diff --git a/.github/workflows/test_api_cuda.yaml b/.github/workflows/test_api_cuda.yaml index 8b7d18e7..eae4bb8f 100644 --- a/.github/workflows/test_api_cuda.yaml +++ b/.github/workflows/test_api_cuda.yaml @@ -24,8 +24,6 @@ jobs: - name: Run tests uses: addnab/docker-run-action@v3 - env: - WORKSPACE: ${{ github.workspace }} with: image: pytorch/pytorch:2.2.2-cuda12.1-cudnn8-runtime options: | @@ -34,7 +32,7 @@ jobs: --shm-size 64G --env USE_CUDA="1" --env PROCESS_SPECIFIC_VRAM="0" - --volume ${{ env.WORKSPACE }}:/workspace + --volume ${{ github.workspace }}:/workspace --workdir /workspace run: | pip install -e .[testing,timm,diffusers] diff --git a/.github/workflows/test_api_rocm.yaml b/.github/workflows/test_api_rocm.yaml index 5da75559..d472bf13 100644 --- a/.github/workflows/test_api_rocm.yaml +++ b/.github/workflows/test_api_rocm.yaml @@ -25,6 +25,11 @@ jobs: - name: Checkout code uses: actions/checkout@v3 + - name: Target devices + run: | + echo "DEVICE: $DEVICE" + echo "DEVICE=$DEVICE" >> $GITHUB_ENV + - name: Build image run: docker build --build-arg USER_ID=$(id -u) @@ -39,7 +44,6 @@ jobs: uses: addnab/docker-run-action@v3 env: DEVICE: ${{ env.DEVICE }} - WORKSPACE: ${{ github.workspace }} with: image: opt-bench-rocm:${{ matrix.image.rocm_version }} options: | @@ -49,7 +53,7 @@ jobs: --env USE_ROCM="1" --device /dev/kfd --device /dev/dri/${{ env.DEVICE }} - --volume ${{ env.WORKSPACE }}:/workspace + --volume ${{ github.workspace }}:/workspace --workdir /workspace run: | pip install -e .[testing,timm,diffusers] diff --git a/.github/workflows/test_cli_cuda_onnxruntime.yaml b/.github/workflows/test_cli_cuda_onnxruntime.yaml index bee54f80..c9362244 100644 --- a/.github/workflows/test_cli_cuda_onnxruntime.yaml +++ b/.github/workflows/test_cli_cuda_onnxruntime.yaml @@ -21,8 +21,6 @@ jobs: - name: Run tests uses: addnab/docker-run-action@v3 - env: - WORKSPACE: ${{ github.workspace }} with: image: pytorch/pytorch:2.2.2-cuda11.8-cudnn8-runtime options: | @@ -31,7 +29,7 @@ jobs: --shm-size 64G --env USE_CUDA="1" --env PROCESS_SPECIFIC_VRAM="0" - --volume $WORKSPACE:/workspace + --volume ${{ github.workspace }}:/workspace --workdir /workspace run: | pip install -e .[testing,onnxruntime-gpu,diffusers,timm] && diff --git a/.github/workflows/test_cli_cuda_pytorch_multi_gpu.yaml b/.github/workflows/test_cli_cuda_pytorch_multi_gpu.yaml index a141f60d..84676ca2 100644 --- a/.github/workflows/test_cli_cuda_pytorch_multi_gpu.yaml +++ b/.github/workflows/test_cli_cuda_pytorch_multi_gpu.yaml @@ -24,8 +24,6 @@ jobs: - name: Run tests uses: addnab/docker-run-action@v3 - env: - WORKSPACE: ${{ github.workspace }} with: image: pytorch/pytorch:2.2.2-cuda12.1-cudnn8-devel options: | @@ -34,7 +32,7 @@ jobs: --shm-size 64G --env USE_CUDA="1" --env PROCESS_SPECIFIC_VRAM="0" - --volume $WORKSPACE:/workspace + --volume ${{ github.workspace }}:/workspace --workdir /workspace run: | pip install -e .[testing,diffusers,timm,peft,deepspeed] diff --git a/.github/workflows/test_cli_cuda_pytorch_single_gpu.yaml b/.github/workflows/test_cli_cuda_pytorch_single_gpu.yaml index d918db0a..8430b45c 100644 --- a/.github/workflows/test_cli_cuda_pytorch_single_gpu.yaml +++ b/.github/workflows/test_cli_cuda_pytorch_single_gpu.yaml @@ -24,8 +24,6 @@ jobs: - name: Run tests uses: addnab/docker-run-action@v3 - env: - WORKSPACE: ${{ github.workspace }} with: image: pytorch/pytorch:2.2.2-cuda12.1-cudnn8-runtime options: | @@ -34,7 +32,7 @@ jobs: --shm-size 64G --env USE_CUDA="1" --env PROCESS_SPECIFIC_VRAM="0" - --volume ${{ env.WORKSPACE }}:/workspace + --volume ${{ github.workspace }}:/workspace --workdir /workspace run: | pip install -e .[testing,diffusers,timm,peft,bitsandbytes,autoawq,auto-gptq] diff --git a/.github/workflows/test_cli_cuda_tensorrt_llm.yaml b/.github/workflows/test_cli_cuda_tensorrt_llm.yaml index 22f4952b..5eef9081 100644 --- a/.github/workflows/test_cli_cuda_tensorrt_llm.yaml +++ b/.github/workflows/test_cli_cuda_tensorrt_llm.yaml @@ -24,8 +24,6 @@ jobs: - name: Run tests uses: addnab/docker-run-action@v3 - env: - WORKSPACE: ${{ github.workspace }} with: image: huggingface/optimum-nvidia:latest options: | @@ -34,7 +32,7 @@ jobs: --shm-size 64G --env USE_CUDA="1" --env PROCESS_SPECIFIC_VRAM="0" - --volume ${{ env.WORKSPACE }}:/workspace + --volume ${{ github.workspace }}:/workspace --workdir /workspace run: | pip install -e .[testing] && diff --git a/.github/workflows/test_cli_cuda_torch_ort_multi_gpu.yaml b/.github/workflows/test_cli_cuda_torch_ort_multi_gpu.yaml index 1e953002..bef93787 100644 --- a/.github/workflows/test_cli_cuda_torch_ort_multi_gpu.yaml +++ b/.github/workflows/test_cli_cuda_torch_ort_multi_gpu.yaml @@ -35,8 +35,6 @@ jobs: - name: Run tests uses: addnab/docker-run-action@v3 - env: - WORKSPACE: ${{ github.workspace }} with: image: opt-bench-cuda:${{ matrix.image.cuda_version }} options: | @@ -45,7 +43,7 @@ jobs: --shm-size 64G --env USE_CUDA="1" --env PROCESS_SPECIFIC_VRAM="0" - --volume ${{ env.WORKSPACE }}:/workspace + --volume ${{ github.workspace }}:/workspace --workdir /workspace run: | pip install -e .[testing,torch-ort,deepspeed,peft] && diff --git a/.github/workflows/test_cli_cuda_torch_ort_single_gpu.yaml b/.github/workflows/test_cli_cuda_torch_ort_single_gpu.yaml index fed60cb6..7b4000f6 100644 --- a/.github/workflows/test_cli_cuda_torch_ort_single_gpu.yaml +++ b/.github/workflows/test_cli_cuda_torch_ort_single_gpu.yaml @@ -35,8 +35,6 @@ jobs: - name: Run tests uses: addnab/docker-run-action@v3 - env: - WORKSPACE: ${{ github.workspace }} with: image: opt-bench-cuda:${{ matrix.image.cuda_version }} options: | @@ -45,7 +43,7 @@ jobs: --shm-size 64G --env USE_CUDA="1" --env PROCESS_SPECIFIC_VRAM="0" - --volume ${{ env.WORKSPACE }}:/workspace + --volume ${{ github.workspace }}:/workspace --workdir /workspace run: | pip install -e .[testing,torch-ort,peft] && diff --git a/.github/workflows/test_cli_rocm_pytorch_multi_gpu.yaml b/.github/workflows/test_cli_rocm_pytorch_multi_gpu.yaml index abe069b1..547fd679 100644 --- a/.github/workflows/test_cli_rocm_pytorch_multi_gpu.yaml +++ b/.github/workflows/test_cli_rocm_pytorch_multi_gpu.yaml @@ -25,6 +25,13 @@ jobs: - name: Checkout code uses: actions/checkout@v3 + - name: Target devices + run: | + echo "DEVICE0: $DEVICE0" + echo "DEVICE1: $DEVICE1" + echo "DEVICE0=$DEVICE0" >> $GITHUB_ENV + echo "DEVICE1=$DEVICE1" >> $GITHUB_ENV + - name: Build image run: docker build --build-arg USER_ID=$(id -u) @@ -40,7 +47,6 @@ jobs: env: DEVICE0: ${{ env.DEVICE0 }} DEVICE1: ${{ env.DEVICE1 }} - WORKSPACE: ${{ github.workspace }} with: image: opt-bench-rocm:${{ matrix.image.rocm_version }} options: | @@ -51,8 +57,8 @@ jobs: --device /dev/kfd --device /dev/dri/${{ env.DEVICE0 }} --device /dev/dri/${{ env.DEVICE1 }} - --volume ${{ env.WORKSPACE }}:/workspace + --volume ${{ github.workspace }}:/workspace --workdir /workspace run: | pip install -e .[testing,diffusers,timm,deepspeed,peft,autoawq-rocm,auto-gptq-rocm] - pytest -x -s -k 'cli and rocm and pytorch and (dp or ddp or device_map or deepspeed)' + pytest -x -s -k 'cli and cuda and pytorch and (dp or ddp or device_map or deepspeed)' diff --git a/.github/workflows/test_cli_rocm_pytorch_single_gpu.yaml b/.github/workflows/test_cli_rocm_pytorch_single_gpu.yaml index ce00c49f..8f6bb40e 100644 --- a/.github/workflows/test_cli_rocm_pytorch_single_gpu.yaml +++ b/.github/workflows/test_cli_rocm_pytorch_single_gpu.yaml @@ -25,6 +25,13 @@ jobs: - name: Checkout code uses: actions/checkout@v3 + - name: Target devices + run: | + echo "DEVICE0: $DEVICE0" + echo "DEVICE1: $DEVICE1" + echo "DEVICE0=$DEVICE0" >> $GITHUB_ENV + echo "DEVICE1=$DEVICE1" >> $GITHUB_ENV + - name: Build image run: docker build --build-arg USER_ID=$(id -u) @@ -39,7 +46,6 @@ jobs: uses: addnab/docker-run-action@v3 env: DEVICE: ${{ env.DEVICE }} - WORKSPACE: ${{ github.workspace }} with: image: opt-bench-rocm:${{ matrix.image.rocm_version }} options: | @@ -49,8 +55,8 @@ jobs: --env USE_ROCM="1" --device /dev/kfd --device /dev/dri/${{ env.DEVICE }} - --volume ${{ env.WORKSPACE }}:/workspace + --volume ${{ github.workspace }}:/workspace --workdir /workspace run: | pip install -e .[testing,diffusers,timm,peft,autoawq-rocm,auto-gptq-rocm] - pytest -x -s -k "cli and rocm and pytorch and not (dp or ddp or device_map or deepspeed) and not (bnb or awq or torch_compile)" + pytest -x -s -k "cli and cuda and pytorch and not (dp or ddp or device_map or deepspeed) and not (bnb or awq or torch_compile)"