Skip to content

Looking for BASE redefinition #75

Looking for BASE redefinition

Looking for BASE redefinition #75

name: Conda build and test
on:
workflow_dispatch:
schedule:
- cron: "5 2 * * *"
push:
permissions: read-all
env:
BACKEND: XPU
TRITON_DISABLE_LINE_INFO: 1
jobs:
integration-tests:
name: Integration tests
runs-on:
- glados
- spr
- conda-0.0.1
strategy:
matrix:
python:
- "3.9"
- "3.10"
defaults:
run:
shell: bash -noprofile --norc -eo pipefail -c "source /home/runner/intel/oneapi/setvars.sh > /dev/null && source /home/runner/miniconda3/etc/profile.d/conda.sh && source {0}"
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Load pip cache
id: pip-cache
uses: ./.github/actions/load
env:
# Increase this value to reset cache
CACHE_NUMBER: 2
with:
path: $HOME/.cache/pip
key: pip-${{ matrix.python }}-${{ hashFiles('python/pyproject.toml', 'python/setup.py') }}-${{ env.CACHE_NUMBER }}
- name: Get LLVM commit id
uses: ./.github/actions/get-commit-id
with:
repository: intel/llvm.git
branch: genx
variable: LLVM_COMMIT_ID
- name: Calculate env
run: |
PACKAGES_CACHE_KEY=$(echo $LLVM_COMMIT_ID ${{ hashFiles('scripts/compile-triton.sh') }} | sha256sum - | cut -d\ -f1)
echo "PACKAGES_CACHE_KEY=$PACKAGES_CACHE_KEY" >>$GITHUB_ENV
echo $HOME/miniconda3/bin >>$GITHUB_PATH
- name: Load packages cache
id: packages-cache
uses: ./.github/actions/load
env:
# Increase this value to reset cache
CACHE_NUMBER: 3
with:
path: $HOME/packages
key: conda-packages-${{ env.PACKAGES_CACHE_KEY }}-${{ env.CACHE_NUMBER }}
- name: Load conda cache
id: conda-cache
uses: ./.github/actions/load
env:
CACHE_NUMBER: 5
with:
path: $HOME/miniconda3/envs/triton
key: conda-py${{ matrix.python }}-${{ hashFiles('scripts/triton.yml') }}-${{ env.CACHE_NUMBER }}
- name: Update conda env
if: ${{ steps.conda-cache.outputs.status == 'miss' }}
run: |
conda create -n triton --override-channels -c conda-forge python=${{ matrix.python }}.*
conda env update -f scripts/triton.yml
find /home/runner/intel/oneapi/ \( -name '*.so' -or -name '*.so.*' \) -exec cp -n {} $HOME/miniconda3/envs/triton/lib \;
ln -snf /usr/include/level_zero $HOME/miniconda3/envs/triton/bin/../x86_64-conda-linux-gnu/sysroot/usr/include/level_zero
find /usr -name libze_\* -exec cp -n {} $HOME/miniconda3/envs/triton/lib \;
- name: Add conda info to log
run: |
conda info
conda list -n triton
- name: Build packages
if: ${{ steps.packages-cache.outputs.status == 'miss' }}
run: |
set
conda run --no-capture-output -n triton ./scripts/compile-triton.sh --llvm
- name: Save packages cache
if: ${{ steps.packages-cache.outputs.status == 'miss' }}
uses: ./.github/actions/save
with:
path: ${{ steps.packages-cache.outputs.path }}
dest: ${{ steps.packages-cache.outputs.dest }}
- name: Build Triton
run: |
conda run -n triton scripts/compile-triton.sh --triton
- name: Run core tests
run: |
set
conda run -n triton scripts/test-triton.sh
- name: Save conda cache
if: ${{ steps.conda-cache.outputs.status == 'miss' }}
uses: ./.github/actions/save
with:
path: ${{ steps.conda-cache.outputs.path }}
dest: ${{ steps.conda-cache.outputs.dest }}
- name: Run E2E test
run: |
conda activate triton
# Set WORKSPACE for inductor_xpu_test.sh to make sure it creates "inductor_log" outside of pytorch cloned directory
export WORKSPACE=$GITHUB_WORKSPACE
cd ../pytorch
TRANSFORMERS_VERSION="$(<.ci/docker/ci_commit_pins/huggingface.txt)"
pip install pyyaml pandas scipy numpy psutil pyre_extensions torchrec transformers==$TRANSFORMERS_VERSION
# TODO: Find the fastest Hugging Face model
$GITHUB_WORKSPACE/scripts/inductor_xpu_test.sh huggingface float32 inference accuracy xpu 0 static 1 0 AlbertForMaskedLM
# The script above always returns 0, so we need an additional check to see if the accuracy test passed
cat $WORKSPACE/inductor_log/*/*/*.csv
grep AlbertForMaskedLM $WORKSPACE/inductor_log/*/*/*.csv | grep -q ,pass,
- name: Save pip cache
if: ${{ steps.pip-cache.outputs.status == 'miss' }}
uses: ./.github/actions/save
with:
path: ${{ steps.pip-cache.outputs.path }}
dest: ${{ steps.pip-cache.outputs.dest }}