Skip to content

Commit

Permalink
First merge of shark.nn (#9)
Browse files Browse the repository at this point in the history
* poc

* tensor overloads wip

* torch types wip

* add test suite and start porting `torch.nn.Module`

* autogenerate from jitop registry torch_ext.py

* generete python wrappers

* sketch functional.py

* smoke test done

* bump version

* rename package to pi

* change names/strings/comments
  • Loading branch information
makslevental committed Dec 30, 2022
1 parent 649d588 commit 8816fa9
Show file tree
Hide file tree
Showing 95 changed files with 30,616 additions and 872 deletions.
27 changes: 13 additions & 14 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
name: Build

on:
pull_request:
branches:
- main
push:
branches:
- main
workflow_run:
workflows: [ "Test" ]
types:
- completed
branches: [ main ]
workflow_dispatch:
branches:
- main
Expand Down Expand Up @@ -44,11 +43,11 @@ jobs:
--extra-index-url https://download.pytorch.org/whl/nightly/cpu
mkdir -p ${{ github.sha }}
mv wheelhouse/SharkPy*.whl ${{ github.sha }}/
mv wheelhouse/PI*.whl ${{ github.sha }}/
- name: Upload an artifact
uses: actions/upload-artifact@v3
if: github.event_name == 'push'
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
with:
if-no-files-found: error
name: build_artifact
Expand All @@ -62,7 +61,7 @@ jobs:

needs: [ build ]

if: ${{ github.event_name == 'push' }}
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
steps:
- name: Checkout
uses: actions/checkout@v2
Expand All @@ -76,10 +75,10 @@ jobs:
- name: Set up a release page
id: setup_release
run: |
SHARKPY_VERSION=$(python setup.py --version)
tag_name="$SHARKPY_VERSION"
release_title="SharkPy $SHARKPY_VERSION"
echo "SharkPy $SHARKPY_VERSION created at $(date)" > body.md
PI_VERSION=$(python setup.py --version)
tag_name="$PI_VERSION"
release_title="PI $PI_VERSION"
echo "PI $PI_VERSION created at $(date)" > body.md
echo "tag_name=${tag_name}" >> $GITHUB_OUTPUT
echo "release_title=${release_title}" >> $GITHUB_OUTPUT
Expand All @@ -88,7 +87,7 @@ jobs:
with:
artifacts: "${{ github.sha }}/*.whl"
bodyFile: body.md
token: "${{ secrets.SHARK_PY_CI }}"
token: "${{ secrets.PI_CI }}"
tag: "${{ steps.setup_release.outputs.tag_name }}"
name: "${{ steps.setup_release.outputs.release_title }}"
removeArtifacts: true
Expand Down
53 changes: 53 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
name: Test

on:
pull_request:
branches:
- main
- nn_module
push:
branches:
- main
- nn_module
workflow_dispatch:
branches:
- main
- nn_module

jobs:

test-against-torch-mlir:

runs-on: ${{ matrix.os }}

strategy:
matrix:
os: [ ubuntu-latest ]
arch: [ x86_64 ]
python_version: [ "3.10" ]

steps:
- name: Checkout
uses: actions/checkout@v2

# - name: Install linux system packages
# run: |
# sudo apt-get update
# sudo apt-get -y install ninja-build cmake clang

- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python_version }}

- name: Install
run: |
pip install . \
--pre torch-mlir torchvision \
-f https://llvm.github.io/torch-mlir/package-index/ \
--extra-index-url https://download.pytorch.org/whl/nightly/cpu \
-v
- name: Test vs. torch-mlir
run: |
PYTHONPATH=tests/torch_mlir python tests/torch_mlir/main.py
62 changes: 62 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
cmake_minimum_required(VERSION 3.13.4)

if (POLICY CMP0068)
cmake_policy(SET CMP0068 NEW)
set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)
endif ()

if (POLICY CMP0075)
cmake_policy(SET CMP0075 NEW)
endif ()

if (POLICY CMP0077)
cmake_policy(SET CMP0077 NEW)
endif ()

if (POLICY CMP0116)
cmake_policy(SET CMP0116 NEW)
endif ()

project(PI LANGUAGES CXX C)

set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)

set(CMAKE_CXX_STANDARD 17 CACHE STRING "C++ standard to conform to")

find_package(MLIR REQUIRED CONFIG)

message(STATUS "Using MLIRConfig.cmake in: ${MLIR_DIR}")
message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")

set(LLVM_RUNTIME_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/bin)
set(LLVM_LIBRARY_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/lib)
set(MLIR_BINARY_DIR ${CMAKE_BINARY_DIR})

list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}")
list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}")
include(TableGen)
include(AddLLVM)
include(AddMLIR)
include(HandleLLVMOptions)

include_directories(${LLVM_INCLUDE_DIRS})
include_directories(${MLIR_INCLUDE_DIRS})
link_directories(${LLVM_BUILD_LIBRARY_DIR})
add_definitions(${LLVM_DEFINITIONS})

##################################### Bindings path hacks

include(MLIRDetectPythonEnv)
include(AddMLIRPython)
mlir_configure_python_dev_packages()
mlir_detect_pybind11_install()

set(PYTHON_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/cpp_ext) # --src-root
set(MLIR_BINARY_DIR ${CMAKE_BINARY_DIR})
# set(MLIR_TABLEGEN_EXE "" CACHE STRING "Path to mlir-tablegen")
# message(STATUS "MLIR_TABLEGEN_EXE: ${MLIR_TABLEGEN_EXE}")
set(MLIR_INCLUDE_TESTS 0)

pybind11_add_module(_mlir cpp_ext/MainModule.cpp cpp_ext/TensorValue.cpp cpp_ext/TorchTypes.cpp)
#target_link_libraries(_mlir PRIVATE MLIRIR MLIRSupport MLIRCAPIInterfaces MLIRCAPIIR)

58 changes: 38 additions & 20 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
- [SharkPy](#sharkpy)
- [PI](#PI)
- [Installing](#installing)
- [Minimal example](#minimal-example)
- [Moderately interesting example](#moderately-interesting-example)
Expand All @@ -8,7 +8,7 @@
<img width="598" alt="image" src="https://user-images.githubusercontent.com/5657668/205545845-544fe701-79d5-43c1-beec-09763f22cc85.png">
</p>

# SharkPy
# PI

Early days of a Python frontend for MLIR.

Expand All @@ -25,10 +25,10 @@ pip install . \

and you're good to go.

Alternatively, you can install the [latest released wheel](https://github.com/nod-ai/SharkPy/releases/latest):
Alternatively, you can install the [latest released wheel](https://github.com/nod-ai/PI/releases/latest):

```shell
pip install https://github.com/nod-ai/SharkPy/releases/latest/download/SharkPy-$CURRENT_VERSION-py3-none-any.whl \
pip install https://github.com/nod-ai/PI/releases/latest/download/PI-$CURRENT_VERSION-py3-none-any.whl \
--pre torch-mlir torchvision \
-f https://llvm.github.io/torch-mlir/package-index/ \
--extra-index-url https://download.pytorch.org/whl/nightly/cpu
Expand All @@ -39,7 +39,7 @@ pip install https://github.com/nod-ai/SharkPy/releases/latest/download/SharkPy-$
[simple_kernels.py](./tests/simple_kernels.py) (in [tests](./tests)) looks like this

```python
from shark.dialects import memref, linalg
from pi.dialects import memref, linalg

def saxpy(a: float, b: float):
A = memref.AllocaOp((10, 30))
Expand Down Expand Up @@ -233,29 +233,47 @@ func.func private @saxpy(%arg0: f64, %arg1: f64) -> memref<10x20xf64> {
Preliminary support for the `torch-mlir` dialect is available:

```python
def torch_ops():
f64 = F64Type.get()
z = torch.ConstantFloatOp(value=FloatAttr.get(f64, 256.0))
attr = DenseFPElementsAttr(Attribute.parse("dense<0.0> : tensor<3x5xf32>"))
a = torch.ValueTensorLiteralOp(attr)
b = torch.ValueTensorLiteralOp(attr)
c = torch.AtenAddTensorOp(a.result.type, a.result, b.result, z)
return c
class MyConv2d(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 1, 3)

def forward(self, x):
y = self.conv(x)
z = y + y
w = z * z
return w
```

lowers to

```mlir
func.func private @torch_ops() -> !torch.vtensor<[3,5],f32> {
%float2.560000e02 = torch.constant.float 2.560000e+02
%0 = torch.vtensor.literal(dense<0.000000e+00> : tensor<3x5xf32>) : !torch.vtensor<[3,5],f32>
%1 = torch.vtensor.literal(dense<0.000000e+00> : tensor<3x5xf32>) : !torch.vtensor<[3,5],f32>
%2 = torch.aten.add.Tensor %0, %1, %float2.560000e02 :
!torch.vtensor<[3,5],f32>, !torch.vtensor<[3,5],f32>, !torch.float -> !torch.vtensor<[3,5],f32>
return %2 : !torch.vtensor<[3,5],f32>
module {
func.func private @simple_conv2d() -> !torch.vtensor {
%0 = torch.vtensor.literal(dense<1.000000e+00> : tensor<1x3x32x32xf32>) : !torch.vtensor<[1,3,32,32],f32>
%1 = torch.vtensor.literal(dense<1.000000e+00> : tensor<1xf32>) : !torch.vtensor<[1],f32>
%2 = torch.vtensor.literal(dense<1.000000e+00> : tensor<1x3x3x3xf32>) : !torch.vtensor<[1,3,3,3],f32>
%int1 = torch.constant.int 1
%int1_0 = torch.constant.int 1
%3 = torch.prim.ListConstruct %int1, %int1_0 : (!torch.int, !torch.int) -> !torch.list<int>
%int0 = torch.constant.int 0
%int0_1 = torch.constant.int 0
%4 = torch.prim.ListConstruct %int0, %int0_1 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_2 = torch.constant.int 1
%int1_3 = torch.constant.int 1
%5 = torch.prim.ListConstruct %int1_2, %int1_3 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_4 = torch.constant.int 1
%6 = torch.aten.conv2d %0, %2, %1, %3, %4, %5, %int1_4 : !torch.vtensor<[1,3,32,32],f32>, !torch.vtensor<[1,3,3,3],f32>, !torch.vtensor<[1],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.int -> !torch.vtensor
%7 = "torch.constant.number"() {value = 1 : i64} : () -> !torch.number
%8 = torch.aten.add.Tensor %6, %6, %7 : !torch.vtensor, !torch.vtensor, !torch.number -> !torch.vtensor
%9 = torch.aten.mul.Tensor %8, %8 : !torch.vtensor, !torch.vtensor -> !torch.vtensor
return %9 : !torch.vtensor
}
}
```

This is very rough right now; to get a rough idea of the current status check the [latest tests](https://github.com/nod-ai/PI/actions?query=branch%3Ann_module+) on the `nn_module` branch.

# Build Wheel

```shell
Expand Down
Loading

0 comments on commit 8816fa9

Please sign in to comment.