Skip to content

Commit

Permalink
update CI
Browse files Browse the repository at this point in the history
  • Loading branch information
xadupre committed Jul 6, 2023
1 parent ea52094 commit 755e6da
Show file tree
Hide file tree
Showing 7 changed files with 86 additions and 34 deletions.
6 changes: 3 additions & 3 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,9 @@ can be enabled with the following command:

::

python setup.py build_ext --inplace --enable_nvtx 1
# or
pip install -e . --config-settings="--enable_nvtx=1"
python setup.py build_ext --inplace --use_nvtx 1
# or (not working yet)
pip install -e . --config-settings="--use_nvtx=1"

Experimental cython binding for onnxruntime
+++++++++++++++++++++++++++++++++++++++++++
Expand Down
13 changes: 13 additions & 0 deletions _cmake/externals/FindCudaExtension.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,14 @@
# Defines USE_NTVX to enable profiling with NVIDIA profiler.
# CUDA_VERSION must be defined as well.

if(${CMAKE_CUDA_COMPILER} STREQUAL "/usr/bin/nvcc")
message(FATAL_ERROR
"CMAKE_CUDA_COMPILER is equal to '${CMAKE_CUDA_COMPILER}', "
"CUDA_VERSION=${CUDA_VERSION}, "
"CMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}, "
"You should specify the cuda version by adding --cuda-version=...")
endif()

if(CUDA_VERSION)
find_package(CUDAToolkit ${CUDA_VERSION} EXACT)
else()
Expand All @@ -14,6 +22,10 @@ message(STATUS "CUDAToolkit_FOUND=${CUDAToolkit_FOUND}")

if(CUDAToolkit_FOUND)

message(STATUS "befor1 language CUDA_VERSION=${CUDA_VERSION}")
message(STATUS "befor1 language CMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}")
message(STATUS "befor1 language CMAKE_CUDA_COMPILER=${CMAKE_CUDA_COMPILER}")

if(CMAKE_CUDA_ARCHITECTURES STREQUAL "")
set(CMAKE_CUDA_ARCHITECTURES "native")
endif()
Expand All @@ -28,6 +40,7 @@ if(CUDAToolkit_FOUND)
endif()
endif()

message(STATUS "before language CUDA_VERSION=${CUDA_VERSION}")
message(STATUS "before language CMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}")
message(STATUS "before language CMAKE_CUDA_COMPILER=${CMAKE_CUDA_COMPILER}")
enable_language(CUDA)
Expand Down
10 changes: 7 additions & 3 deletions _doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,9 @@
epkg_dictionary = {
"cmake": "https://cmake.org/",
"CPUExecutionProvider": "https://onnxruntime.ai/docs/execution-providers/",
"cublasLtMatmul": "https://docs.nvidia.com/cuda/cublas/index.html?highlight=cublasltmatmul#cublasltmatmul",
"cublasLtMatmul":
"https://docs.nvidia.com/cuda/cublas/index.html?"
"highlight=cublasltmatmul#cublasltmatmul",
"CUDA": "https://developer.nvidia.com/",
"cudnn": "https://developer.nvidia.com/cudnn",
"cython": "https://cython.org/",
Expand All @@ -90,14 +92,16 @@
"onnx": "https://onnx.ai/onnx/",
"ONNX": "https://onnx.ai/",
"onnxruntime": "https://onnxruntime.ai/",
"onnxruntime-training": "https://github.com/microsoft/onnxruntime/tree/master/orttraining",
"onnxruntime-training":
"https://github.com/microsoft/onnxruntime/tree/master/orttraining",
"onnxruntime releases": "https://github.com/microsoft/onnxruntime/releases",
"onnx-array-api": (
"http://www.xavierdupre.fr/app/" "onnx-array-api/helpsphinx/index.html"
),
"onnxruntime C API": "https://onnxruntime.ai/docs/api/c/",
"onnxruntime Graph Optimizations": (
"https://onnxruntime.ai/docs/performance/model-optimizations/graph-optimizations.html"
"https://onnxruntime.ai/docs/performance/"
"model-optimizations/graph-optimizations.html"
),
"openmp": "https://www.openmp.org/",
"protobuf": "https://github.com/protocolbuffers/protobuf",
Expand Down
6 changes: 3 additions & 3 deletions _doc/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -105,15 +105,15 @@ If not, some extensions might not be available.
::

python setup.py build_ext --inplace
# or
pip install -e . --config-settings="--enable_nvtx=1"
# or (not working yet)
pip install -e . --config-settings="--use_nvtx=1"

`NVTX <https://github.com/NVIDIA/NVTX>`_
can be enabled with the following command:

::

python setup.py build_ext --inplace --enable_nvtx 1
python setup.py build_ext --inplace --use_nvtx 1

Experimental cython binding for onnxruntime
+++++++++++++++++++++++++++++++++++++++++++
Expand Down
2 changes: 1 addition & 1 deletion _doc/tutorial/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ with two instructions:
By default, *cmake* builds with CUDA if it is available. It can be disabled:

* ``python setup.py build_ext --inplace --with-cuda=0``, the legacy way
* ``pip install -e . --config-settings="--with-cuda=0"``, the new way
* ``pip install -e . --config-settings="--with-cuda=0"``, the new way (not fully working yet)

In case there are multiple versions of CUDA installed, option `cuda-version`
can be specified:
Expand Down
6 changes: 5 additions & 1 deletion azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,17 @@ jobs:
cython-lint .
displayName: 'cython-lint'
- script: |
python -m pip wheel . --config-settings="--with_cuda=0" -v
# --config-settings does not work yet.
# python -m pip wheel . --config-settings="--use_cuda=0" -v
export USE_CUDA=0
python -m pip wheel . -v
displayName: 'build wheel'
- script: |
mkdir dist
cp onnx_extended*.whl dist
displayName: 'copy wheel'
- script: |
export USE_CUDA=0
python -m pip install . -v
displayName: 'install wheel'
- script: | # It fails due to ModuleNotFoundError: No module named 'onnx_extended.reference.c_ops.cpu.c_op_conv_'
Expand Down
77 changes: 54 additions & 23 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,13 +217,13 @@ def __init__(self, name: str, library: str = "") -> None:
class cmake_build_ext(build_ext):
user_options = [
*build_ext.user_options,
("enable-nvtx=", None, "Enables compilation with NVTX events."),
(
"with-cuda=",
"use-cuda=",
None,
"If cuda is available, CUDA is "
"used by default unless this option is set to 0",
),
("use-nvtx=", None, "Enables compilation with NVTX events."),
(
"cuda-version=",
None,
Expand Down Expand Up @@ -251,29 +251,64 @@ class cmake_build_ext(build_ext):
]

def initialize_options(self):
self.enable_nvtx = None
self.with_cuda = None
self.use_nvtx = None
self.use_cuda = None
self.cuda_version = None
self.parallel = None
self.ort_version = DEFAULT_ORT_VERSION
self.cuda_build = "DEFAULT"

build_ext.initialize_options(self)

# boolean
b_values = {0, 1, "1", "0", True, False}
t_values = {1, "1", True}
for att in ["use_nvtx", "use_cuda"]:
v = getattr(self, att)
if v is not None:
continue
v = os.environ.get(att.upper(), None)
if v is None:
continue
if v not in b_values:
raise ValueError(f"Unable to interpret value {v} for {att.upper()!r}.")
print(f"-- setup: use env {att.upper()}={v in t_values}")
setattr(self, att, v in t_values)
if self.ort_version is None:
self.ort_version = os.environ.get("ORT_VERSION", None)
if self.ort_version not in ("", None):
print(f"-- setup: use env ORT_VERSION={self.ort_version}")
if self.cuda_build is None:
self.cuda_build = os.environ.get("CUDA_BUILD", None)
if self.cuda_build not in ("", None):
print(f"-- setup: use env CUDA_BUILD={self.cuda_build}")
if self.cuda_version is None:
self.cuda_version = os.environ.get("CUDA_VERSION", None)
if self.cuda_version not in ("", None):
print(f"-- setup: use env CUDA_VERSION={self.cuda_version}")
if self.use_nvtx is None:
self.use_nvtx = False

def finalize_options(self):
b_values = {None, 0, 1, "1", "0", True, False}
if self.enable_nvtx not in b_values:
raise ValueError(f"enable_nvtx={self.enable_nvtx!r} must be in {b_values}.")
if self.with_cuda not in b_values:
raise ValueError(f"with_cuda={self.with_cuda!r} must be in {b_values}.")
self.enable_nvtx = self.enable_nvtx in {1, "1", True, "True"}
self.with_cuda = self.with_cuda in {1, "1", True, "True", None}
b_values = {0, 1, "1", "0", True, False}
if self.use_nvtx not in b_values:
raise ValueError(f"use_nvtx={self.use_nvtx!r} must be in {b_values}.")
if self.use_cuda not in b_values:
raise ValueError(f"use_cuda={self.use_cuda!r} must be in {b_values}.")
self.use_nvtx = self.use_nvtx in {1, "1", True, "True"}
self.use_cuda = self.use_cuda in {1, "1", True, "True", None}
if self.cuda_version in (None, ""):
self.cuda_version = None
build = {"DEFAULT", "H100", "H100opt"}
if self.cuda_build not in build:
raise ValueError(f"cuda-built={self.cuda_build} not in {build}.")
raise ValueError(f"cuda-build={self.cuda_build!r} not in {build}.")

build_ext.finalize_options(self)

for opt in self.user_options:
name = opt[0]
print(f"-- setup: option {name}={getattr(self, name, None)}")

def get_cmake_args(self, cfg: str) -> List[str]:
"""
Returns the argument for cmake.
Expand Down Expand Up @@ -304,14 +339,12 @@ def get_cmake_args(self, cfg: str) -> List[str]:
if self.parallel is not None:
cmake_args.append(f"-j{self.parallel}")

if os.environ.get("USE_NVTX", "0") in (1, "1") or self.enable_nvtx:
if self.use_nvtx:
cmake_args.append("-DUSE_NVTX=1")
if os.environ.get("USE_CUDA", "1") in (0, "0") or not self.with_cuda:
cmake_args.append("-DUSE_CUDA=0")
else:
cmake_args.append("-DUSE_CUDA=1")
cmake_args.append(f"-DUSE_CUDA={1 if self.use_cuda else 0}")
if self.use_cuda:
cmake_args.append(f"-DCUDA_BUILD={self.cuda_build}")
cuda_version = self.cuda_version or os.environ.get("CUDA_VERSION", "")
cuda_version = self.cuda_version
if cuda_version not in (None, ""):
cmake_args.append(f"-DCUDA_VERSION={cuda_version}")

Expand Down Expand Up @@ -507,19 +540,17 @@ def get_ext_modules():
has_cuda = find_cuda()
if has_cuda:
add_cuda = True
if "--with-cuda" in sys.argv:
pos = sys.argv.index("--with-cuda")
if "--use-cuda" in sys.argv:
pos = sys.argv.index("--use-cuda")
if len(sys.argv) > pos + 1 and sys.argv[pos + 1] in (
"0",
0,
False,
"False",
):
add_cuda = False
elif "--with-cuda=0" in sys.argv:
elif os.environ.get("USE_CUDA", None) in {0, "0", False}:
add_cuda = False
elif "--with-cuda=1" in sys.argv or "--with-cuda=guess":
add_cuda = True
if add_cuda:
cuda_extensions.extend(
[
Expand Down

0 comments on commit 755e6da

Please sign in to comment.