Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Build Docker images for AMD gpus #1041

Merged
merged 23 commits into from
Aug 31, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 32 additions & 21 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ jobs:
matrix:
gpu_backend: ["cuda", "hip_rocm"]
fail-fast: false
env:
FF_GPU_BACKEND: ${{ matrix.gpu_backend }}
steps:
- name: Checkout Git Repository
uses: actions/checkout@v3
Expand All @@ -51,14 +53,15 @@ jobs:

- name: Install CUDA
uses: Jimver/cuda-toolkit@v0.2.11
if: ${{ matrix.gpu_backend == 'cuda' }}
id: cuda-toolkit
with:
cuda: "11.8.0"
# Disable caching of the CUDA binaries, since it does not give us any significant performance improvement
use-github-cache: "false"

- name: Install system dependencies
run: FF_GPU_BACKEND=${{ matrix.gpu_backend }} .github/workflows/helpers/install_dependencies.sh
run: .github/workflows/helpers/install_dependencies.sh

- name: Install conda and FlexFlow dependencies
uses: conda-incubator/setup-miniconda@v2
Expand All @@ -72,22 +75,25 @@ jobs:
export CUDNN_DIR="$CUDA_PATH"
export CUDA_DIR="$CUDA_PATH"
export FF_HOME=$(pwd)
export FF_GPU_BACKEND=${{ matrix.gpu_backend }}
export FF_CUDA_ARCH=70
cores_available=$(nproc --all)
n_build_cores=$(( cores_available -1 ))
if (( $n_build_cores < 1 )) ; then n_build_cores=1 ; fi
mkdir build
cd build
export FF_HIP_ARCH=gfx1100,gfx1036
export hip_version=5.6
export FF_BUILD_ALL_INFERENCE_EXAMPLES=ON

if [[ "${FF_GPU_BACKEND}" == "cuda" ]]; then
export FF_BUILD_ALL_EXAMPLES=ON
export FF_BUILD_ALL_INFERENCE_EXAMPLES=ON
export FF_BUILD_UNIT_TESTS=ON
else
export FF_BUILD_ALL_EXAMPLES=OFF
export FF_BUILD_ALL_INFERENCE_EXAMPLES=OFF
export FF_BUILD_UNIT_TESTS=OFF
fi

cores_available=$(nproc --all)
n_build_cores=$(( cores_available -1 ))
if (( $n_build_cores < 1 )) ; then n_build_cores=1 ; fi
mkdir build
cd build

../config/config.linux
make -j $n_build_cores

Expand All @@ -96,40 +102,45 @@ jobs:
export CUDNN_DIR="$CUDA_PATH"
export CUDA_DIR="$CUDA_PATH"
export FF_HOME=$(pwd)
export FF_GPU_BACKEND=${{ matrix.gpu_backend }}
export FF_CUDA_ARCH=70
cd build
export FF_HIP_ARCH=gfx1100,gfx1036
export hip_version=5.6
export FF_BUILD_ALL_INFERENCE_EXAMPLES=ON

if [[ "${FF_GPU_BACKEND}" == "cuda" ]]; then
export FF_BUILD_ALL_EXAMPLES=ON
export FF_BUILD_ALL_INFERENCE_EXAMPLES=ON
export FF_BUILD_UNIT_TESTS=ON
else
export FF_BUILD_ALL_EXAMPLES=OFF
export FF_BUILD_ALL_INFERENCE_EXAMPLES=OFF
export FF_BUILD_UNIT_TESTS=OFF
fi

cd build
../config/config.linux
sudo make install
sudo ldconfig

- name: Check availability of Python flexflow.core module
if: ${{ matrix.gpu_backend == 'cuda' }}
run: |
export LD_LIBRARY_PATH="$CUDA_PATH/lib64/stubs:$LD_LIBRARY_PATH"
sudo ln -s "$CUDA_PATH/lib64/stubs/libcuda.so" "$CUDA_PATH/lib64/stubs/libcuda.so.1"
export CPU_ONLY_TEST=1
python -c "import flexflow.core; exit()"

- name: Run C++ unit tests
if: ${{ matrix.gpu_backend == 'cuda' }}
run: |
export CUDNN_DIR="$CUDA_PATH"
export CUDA_DIR="$CUDA_PATH"
export LD_LIBRARY_PATH="$CUDA_PATH/lib64/stubs:$LD_LIBRARY_PATH"
export FF_HOME=$(pwd)
sudo ln -s "$CUDA_PATH/lib64/stubs/libcuda.so" "$CUDA_PATH/lib64/stubs/libcuda.so.1"
cd build
./tests/unit/unit-test

- name: Check availability of Python flexflow.core module
run: |
if [[ "${FF_GPU_BACKEND}" == "cuda" ]]; then
export LD_LIBRARY_PATH="$CUDA_PATH/lib64/stubs:$LD_LIBRARY_PATH"
fi
# Remove build folder to check that the installed version can run independently of the build files
rm -rf build
export CPU_ONLY_TEST=1
python -c "import flexflow.core; exit()"

makefile-build:
name: Build FlexFlow with the Makefile
runs-on: ubuntu-20.04
Expand Down
60 changes: 37 additions & 23 deletions .github/workflows/docker-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,29 +26,42 @@ jobs:
strategy:
matrix:
gpu_backend: ["cuda", "hip_rocm"]
cuda_version: ["11.1", "11.2", "11.3", "11.4", "11.5", "11.6", "11.7", "11.8", "12.0"]
gpu_backend_version: ["11.1", "11.2", "11.3", "11.4", "11.5", "11.6", "11.7", "11.8", "12.0", "5.3", "5.4", "5.5", "5.6"]
# The CUDA version doesn't matter when building for hip_rocm, so we just pick one arbitrarily (11.8) to avoid building for hip_rocm once per number of CUDA version supported
exclude:
- gpu_backend: "cuda"
gpu_backend_version: "5.3"
- gpu_backend: "cuda"
gpu_backend_version: "5.4"
- gpu_backend: "cuda"
gpu_backend_version: "5.5"
- gpu_backend: "cuda"
gpu_backend_version: "5.6"
- gpu_backend: "hip_rocm"
cuda_version: "11.1"
gpu_backend_version: "11.1"
- gpu_backend: "hip_rocm"
cuda_version: "11.2"
gpu_backend_version: "11.2"
- gpu_backend: "hip_rocm"
cuda_version: "11.3"
gpu_backend_version: "11.3"
- gpu_backend: "hip_rocm"
cuda_version: "11.4"
gpu_backend_version: "11.4"
- gpu_backend: "hip_rocm"
cuda_version: "11.5"
gpu_backend_version: "11.5"
- gpu_backend: "hip_rocm"
cuda_version: "11.6"
gpu_backend_version: "11.6"
- gpu_backend: "hip_rocm"
cuda_version: "11.7"
gpu_backend_version: "11.7"
- gpu_backend: "hip_rocm"
cuda_version: "12.0"
gpu_backend_version: "11.8"
- gpu_backend: "hip_rocm"
gpu_backend_version: "12.0"
fail-fast: false
env:
FF_GPU_BACKEND: ${{ matrix.gpu_backend }}
cuda_version: ${{ matrix.cuda_version }}
gpu_backend_version: ${{ matrix.gpu_backend_version }}
# one of the two variables below will be unused
cuda_version: ${{ matrix.gpu_backend_version }}
hip_version: ${{ matrix.gpu_backend_version }}
branch_name: ${{ github.head_ref || github.ref_name }}
steps:
- name: Checkout Git Repository
Expand All @@ -58,8 +71,8 @@ jobs:

- name: Free additional space on runner
env:
deploy_needed: ${{ ( github.event_name == 'push' || github.event_name == 'schedule' ) && env.branch_name == 'inference' }}
build_needed: ${{ matrix.gpu_backend == 'hip_rocm' || ( matrix.gpu_backend == 'cuda' && matrix.cuda_version == '11.8' ) }}
deploy_needed: ${{ ( github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' ) && env.branch_name == 'inference' }}
build_needed: ${{ ( matrix.gpu_backend == 'hip_rocm' && matrix.gpu_backend_version == '5.6' ) || ( matrix.gpu_backend == 'cuda' && matrix.gpu_backend_version == '11.8' ) }}
run: |
if [[ $deploy_needed == "true" || $build_needed == "true" ]]; then
.github/workflows/helpers/free_space_on_runner.sh
Expand All @@ -69,22 +82,19 @@ jobs:

- name: Build Docker container
env:
deploy_needed: ${{ ( github.event_name == 'push' || github.event_name == 'schedule' ) && env.branch_name == 'inference' }}
build_needed: ${{ matrix.gpu_backend == 'hip_rocm' || ( matrix.gpu_backend == 'cuda' && matrix.cuda_version == '11.8' ) }}
deploy_needed: ${{ ( github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' ) && env.branch_name == 'inference' }}
build_needed: ${{ ( matrix.gpu_backend == 'hip_rocm' && matrix.gpu_backend_version == '5.6' ) || ( matrix.gpu_backend == 'cuda' && matrix.gpu_backend_version == '11.8' ) }}
run: |
if [[ "${FF_GPU_BACKEND}" == "cuda" ]]; then
export FF_BUILD_ALL_INFERENCE_EXAMPLES=ON
else
export FF_BUILD_ALL_INFERENCE_EXAMPLES=OFF
fi
# On push to inference, build for all compatible architectures, so that we can publish
# a pre-built general-purpose image. On all other cases, only build for one architecture
# to save time.
if [[ $deploy_needed == "true" ]] ; then
export FF_CUDA_ARCH=all
export FF_HIP_ARCH=all
./docker/build.sh flexflow
elif [[ $build_needed == "true" ]]; then
export FF_CUDA_ARCH=70
export FF_HIP_ARCH=gfx1100,gfx1036
./docker/build.sh flexflow
else
echo "Skipping build to save time"
Expand All @@ -93,11 +103,15 @@ jobs:
- name: Check availability of Python flexflow.core module
if: ${{ matrix.gpu_backend == 'cuda' }}
env:
deploy_needed: ${{ ( github.event_name == 'push' || github.event_name == 'schedule' ) && env.branch_name == 'inference' }}
build_needed: ${{ matrix.gpu_backend == 'hip_rocm' || ( matrix.gpu_backend == 'cuda' && matrix.cuda_version == '11.8' ) }}
deploy_needed: ${{ ( github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' ) && env.branch_name == 'inference' }}
build_needed: ${{ ( matrix.gpu_backend == 'hip_rocm' && matrix.gpu_backend_version == '5.6' ) || ( matrix.gpu_backend == 'cuda' && matrix.gpu_backend_version == '11.8' ) }}
run: |
if [[ $deploy_needed == "true" || $build_needed == "true" ]]; then
docker run --env CPU_ONLY_TEST=1 --entrypoint /bin/bash flexflow-cuda-${cuda_version}:latest -c "export LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH; sudo ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1; python -c 'import flexflow.core; exit()'"
if [[ $FF_GPU_BACKEND == "cuda" ]]; then
docker run --env CPU_ONLY_TEST=1 --entrypoint /bin/bash flexflow-${FF_GPU_BACKEND}-${gpu_backend_version}:latest -c "export LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH; sudo ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1; python -c 'import flexflow.core; exit()'"
else
docker run --env CPU_ONLY_TEST=1 --entrypoint /bin/bash flexflow-${FF_GPU_BACKEND}-${gpu_backend_version}:latest -c "python -c 'import flexflow.core; exit()'"
fi
else
echo "Skipping test to save time"
fi
Expand All @@ -106,7 +120,7 @@ jobs:
if: github.repository_owner == 'flexflow'
env:
FLEXFLOW_CONTAINER_TOKEN: ${{ secrets.FLEXFLOW_CONTAINER_TOKEN }}
deploy_needed: ${{ ( github.event_name == 'push' || github.event_name == 'schedule' ) && env.branch_name == 'inference' }}
deploy_needed: ${{ ( github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' ) && env.branch_name == 'inference' }}
run: |
if [[ $deploy_needed == "true" ]]; then
./docker/publish.sh flexflow-environment
Expand Down
38 changes: 29 additions & 9 deletions .github/workflows/helpers/install_dependencies.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,41 @@ echo "Installing apt dependencies..."
sudo apt-get update && sudo apt-get install -y --no-install-recommends wget binutils git zlib1g-dev libhdf5-dev && \
sudo rm -rf /var/lib/apt/lists/*

# Install CUDNN
./install_cudnn.sh

# Install HIP dependencies if needed
FF_GPU_BACKEND=${FF_GPU_BACKEND:-"cuda"}
hip_version=${hip_version:-"5.6"}
if [[ "${FF_GPU_BACKEND}" != @(cuda|hip_cuda|hip_rocm|intel) ]]; then
echo "Error, value of FF_GPU_BACKEND (${FF_GPU_BACKEND}) is invalid."
exit 1
elif [[ "$FF_GPU_BACKEND" == "hip_cuda" || "$FF_GPU_BACKEND" = "hip_rocm" ]]; then
fi
# Install CUDNN if needed
if [[ "$FF_GPU_BACKEND" == "cuda" || "$FF_GPU_BACKEND" = "hip_cuda" ]]; then
# Install CUDNN
./install_cudnn.sh
fi
# Install HIP dependencies if needed
if [[ "$FF_GPU_BACKEND" == "hip_cuda" || "$FF_GPU_BACKEND" = "hip_rocm" ]]; then
echo "FF_GPU_BACKEND: ${FF_GPU_BACKEND}. Installing HIP dependencies"
wget https://repo.radeon.com/amdgpu-install/22.20.5/ubuntu/focal/amdgpu-install_22.20.50205-1_all.deb
sudo apt-get install -y ./amdgpu-install_22.20.50205-1_all.deb
rm ./amdgpu-install_22.20.50205-1_all.deb
# Check that hip_version is one of 5.3,5.4,5.5,5.6
if [[ "$hip_version" != "5.3" && "$hip_version" != "5.4" && "$hip_version" != "5.5" && "$hip_version" != "5.6" ]]; then
echo "hip_version '${hip_version}' is not supported, please choose among {5.3, 5.4, 5.5, 5.6}"
exit 1
fi
# Compute script name and url given the version
AMD_GPU_SCRIPT_NAME=amdgpu-install_5.6.50600-1_all.deb
if [ "$hip_version" = "5.3" ]; then
AMD_GPU_SCRIPT_NAME=amdgpu-install_5.3.50300-1_all.deb
elif [ "$hip_version" = "5.4" ]; then
AMD_GPU_SCRIPT_NAME=amdgpu-install_5.4.50400-1_all.deb
elif [ "$hip_version" = "5.5" ]; then
AMD_GPU_SCRIPT_NAME=amdgpu-install_5.5.50500-1_all.deb
fi
AMD_GPU_SCRIPT_URL="https://repo.radeon.com/amdgpu-install/${hip_version}/ubuntu/focal/${AMD_GPU_SCRIPT_NAME}"
# Download and install AMD GPU software with ROCM and HIP support
wget "$AMD_GPU_SCRIPT_URL"
sudo apt-get install -y ./${AMD_GPU_SCRIPT_NAME}
sudo rm ./${AMD_GPU_SCRIPT_NAME}
sudo amdgpu-install -y --usecase=hip,rocm --no-dkms
sudo apt-get install -y hip-dev hipblas miopen-hip rocm-hip-sdk
sudo apt-get install -y hip-dev hipblas miopen-hip rocm-hip-sdk rocm-device-libs

# Install protobuf v3.20.x manually
sudo apt-get update -y && sudo apt-get install -y pkg-config zip g++ zlib1g-dev unzip python autoconf automake libtool curl make
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/pip-install.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ jobs:
export FF_HOME=$(pwd)
export FF_CUDA_ARCH=70
pip install . --verbose
# Remove build folder to check that the installed version can run independently of the build files
rm -rf build

- name: Check availability of Python flexflow.core module
run: |
Expand Down
24 changes: 23 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,14 @@ set_property(CACHE FF_GPU_BACKEND PROPERTY STRINGS ${FF_GPU_BACKENDS})

# option for cuda arch
set(FF_CUDA_ARCH "autodetect" CACHE STRING "Target CUDA Arch")
if (FF_CUDA_ARCH STREQUAL "")
if ((FF_GPU_BACKEND STREQUAL "cuda" OR FF_GPU_BACKEND STREQUAL "hip_cuda") AND FF_CUDA_ARCH STREQUAL "")
message(FATAL_ERROR "FF_CUDA_ARCH cannot be an empty string. Set it to `autodetect`, `all`, or pass one or multiple valid CUDA archs.")
endif()
# option for hip arch
set(FF_HIP_ARCH "all" CACHE STRING "Target HIP Arch")
if (FF_GPU_BACKEND STREQUAL "hip_rocm" AND FF_CUDA_ARCH STREQUAL "")
message(FATAL_ERROR "FF_HIP_ARCH cannot be an empty string. Set it to `all`, or pass one or multiple valid HIP archs.")
endif()

# option for nccl
option(FF_USE_NCCL "Run FlexFlow with NCCL" OFF)
Expand Down Expand Up @@ -226,6 +231,11 @@ if (FF_GPU_BACKEND STREQUAL "cuda" OR FF_GPU_BACKEND STREQUAL "hip_cuda")
include(cuda)
endif()

# HIP
if (FF_GPU_BACKEND STREQUAL "hip_rocm" OR FF_GPU_BACKEND STREQUAL "hip_cuda")
include(hip)
endif()

# CUDNN
if (FF_GPU_BACKEND STREQUAL "cuda" OR FF_GPU_BACKEND STREQUAL "hip_cuda")
include(cudnn)
Expand Down Expand Up @@ -397,6 +407,18 @@ elseif(FF_GPU_BACKEND STREQUAL "hip_cuda" OR FF_GPU_BACKEND STREQUAL "hip_rocm")

add_compile_definitions(FF_USE_HIP_ROCM)

if (FF_HIP_ARCH STREQUAL "")
message(FATAL_ERROR "FF_HIP_ARCH is undefined")
endif()
set_property(TARGET flexflow PROPERTY HIP_ARCHITECTURES "${HIP_ARCH_LIST}")

message(STATUS "FF_GPU_BACKEND: ${FF_GPU_BACKEND}")
message(STATUS "FF_HIP_ARCH: ${FF_HIP_ARCH}")
message(STATUS "HIP_ARCH_LIST: ${HIP_ARCH_LIST}")
get_property(CHECK_HIP_ARCHS TARGET flexflow PROPERTY HIP_ARCHITECTURES)
message(STATUS "CHECK_HIP_ARCHS: ${CHECK_HIP_ARCHS}")
message(STATUS "HIP_CLANG_PATH: ${HIP_CLANG_PATH}")

# The hip cmake config module defines three targets,
# hip::amdhip64, hip::host, and hip::device.
#
Expand Down
11 changes: 11 additions & 0 deletions cmake/hip.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
if (NOT FF_HIP_ARCH STREQUAL "")
if (FF_HIP_ARCH STREQUAL "all")
set(FF_HIP_ARCH "gfx900,gfx902,gfx904,gfx906,gfx908,gfx909,gfx90a,gfx90c,gfx940,gfx1010,gfx1011,gfx1012,gfx1013,gfx1030,gfx1031,gfx1032,gfx1033,gfx1034,gfx1035,gfx1036,gfx1100,gfx1101,gfx1102,gfx1103")
endif()
string(REPLACE "," " " HIP_ARCH_LIST "${FF_HIP_ARCH}")
endif()

message(STATUS "FF_HIP_ARCH: ${FF_HIP_ARCH}")
if(FF_GPU_BACKEND STREQUAL "hip_rocm")
set(HIP_CLANG_PATH ${ROCM_PATH}/llvm/bin CACHE STRING "Path to the clang compiler by ROCM" FORCE)
endif()
3 changes: 3 additions & 0 deletions cmake/legion.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,11 @@ else()
set(Legion_USE_HIP ON CACHE BOOL "enable Legion_USE_HIP" FORCE)
if (FF_GPU_BACKEND STREQUAL "hip_cuda")
set(Legion_HIP_TARGET "CUDA" CACHE STRING "Legion_HIP_TARGET CUDA" FORCE)
set(Legion_CUDA_ARCH ${FF_CUDA_ARCH} CACHE STRING "Legion CUDA ARCH" FORCE)
elseif(FF_GPU_BACKEND STREQUAL "hip_rocm")
set(Legion_HIP_TARGET "ROCM" CACHE STRING "Legion HIP_TARGET ROCM" FORCE)
set(Legion_HIP_ARCH ${FF_HIP_ARCH} CACHE STRING "Legion HIP ARCH" FORCE)
message(STATUS "Legion_HIP_ARCH: ${Legion_HIP_ARCH}")
endif()
endif()
set(Legion_REDOP_COMPLEX OFF CACHE BOOL "disable complex")
Expand Down
7 changes: 6 additions & 1 deletion config/config.inc
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,11 @@ if [ -n "$FF_CUDA_ARCH" ]; then
SET_CUDA_ARCH="-DFF_CUDA_ARCH=${FF_CUDA_ARCH}"
fi

# set HIP Arch
if [ -n "$FF_HIP_ARCH" ]; then
SET_HIP_ARCH="-DFF_HIP_ARCH=${FF_HIP_ARCH}"
fi

# set CUDA dir
if [ -n "$CUDA_DIR" ]; then
SET_CUDA="-DCUDA_PATH=${CUDA_DIR}"
Expand Down Expand Up @@ -213,7 +218,7 @@ if [ -n "$FF_GPU_BACKEND" ]; then
fi
fi

CMAKE_FLAGS="-DCUDA_USE_STATIC_CUDA_RUNTIME=OFF -DLegion_HIJACK_CUDART=OFF ${SET_CC} ${SET_CXX} ${SET_INSTALL_DIR} ${SET_INFERENCE_TESTS} ${SET_LIBTORCH_PATH} ${SET_BUILD} ${SET_CUDA_ARCH} ${SET_CUDA} ${SET_CUDNN} ${SET_PYTHON} ${SET_NCCL} ${SET_NCCL_DIR} ${SET_LEGION_NETWORKS} ${SET_EXAMPLES} ${SET_INFERENCE_EXAMPLES} ${SET_USE_PREBUILT_LEGION} ${SET_USE_PREBUILT_NCCL} ${SET_USE_ALL_PREBUILT_LIBRARIES} ${SET_BUILD_UNIT_TESTS} ${SET_AVX2} ${SET_MAX_DIM} ${SET_LEGION_MAX_RETURN_SIZE} ${SET_ROCM_PATH} ${SET_FF_GPU_BACKEND}"
CMAKE_FLAGS="-DCUDA_USE_STATIC_CUDA_RUNTIME=OFF -DLegion_HIJACK_CUDART=OFF ${SET_CC} ${SET_CXX} ${SET_INSTALL_DIR} ${SET_INFERENCE_TESTS} ${SET_LIBTORCH_PATH} ${SET_BUILD} ${SET_CUDA_ARCH} ${SET_CUDA} ${SET_CUDNN} ${SET_HIP_ARCH} ${SET_PYTHON} ${SET_NCCL} ${SET_NCCL_DIR} ${SET_LEGION_NETWORKS} ${SET_EXAMPLES} ${SET_INFERENCE_EXAMPLES} ${SET_USE_PREBUILT_LEGION} ${SET_USE_PREBUILT_NCCL} ${SET_USE_ALL_PREBUILT_LIBRARIES} ${SET_BUILD_UNIT_TESTS} ${SET_AVX2} ${SET_MAX_DIM} ${SET_LEGION_MAX_RETURN_SIZE} ${SET_ROCM_PATH} ${SET_FF_GPU_BACKEND}"

function run_cmake() {
SRC_LOCATION=${SRC_LOCATION:=`dirname $0`/../}
Expand Down
Loading
Loading