Skip to content

Run Llama-models Tests #62

Run Llama-models Tests

Run Llama-models Tests #62

name: "Run Llama-models Tests"
on:
pull_request_target:
types: opened
branches:
- 'main'
paths:
- 'models/**/*.py'
workflow_dispatch:
inputs:
runner:
description: 'GHA Runner Scale Set label to run workflow on.'
required: true
default: llama-models-fork-gha-runnes-gpu
debug:
description: 'Run debugging steps?'
required: false
default: "true"
sleep_time:
description: '[DEBUG] sleep time for debugging'
required: true
default: "0"
api_key:
description: 'Provider API key'
required: false
default: "---"
require_model:
description: 'Determine whether or not a model is required for this run'
required: true
default: "false"
model_vision:
description: 'Llama vision model to use for testing'
required: false
default: "Llama3.2-11B-Vision-Instruct"
model_text:
description: 'Llama text model to use for testing'
required: false
default: "Llama3.2-3B-Instruct"
branch:
description: "Branch parameter to control which branch to checkout"
required: true
default: "main"
env:
TOKENIZER_PATH: "models/llama3/api/tokenizer.model"
MODELS_PATH: "/data/llama3.2"
VISION_MODEL_CHECKPOINT_DIR: "/data/llama3.2/${{ github.event.inputs.model_vision }}"
TEXT_MODEL_CHECKPOINT_DIR: "/data/llama3.2/${{ github.event.inputs.model_text }}"
API_KEY: "${{ github.event.inputs.api_key }}"
PYTORCH_CUDA_ALLOC_CONF: expandable_segments:True
jobs:
execute_workflow:
name: Execute workload on Self-Hosted CPU k8s runner
defaults:
run:
shell: bash # default shell to run all steps for a given job.
runs-on: ${{ github.event.inputs.runner != '' && github.event.inputs.runner || 'llama-models-fork-gha-runnes-gpu' }}
if: always()
steps:
##############################
#### INITIAL DEBUG CHECKS ####
##############################
- name: "[DEBUG] ls -la mounted EFS volume"
id: efs_volume
continue-on-error: true
if: github.event.inputs.debug == 'true'
run: |
echo "========= Content of the EFS mount ============="
ls -la ${{ env.MODELS_PATH }}
- name: "Check if models exist in EFS volume"
id: check_if_model_exists
if: ${{ github.event.inputs.require_model == 'true' }}
run: |
MODEL_PATH=${{ env.MODELS_PATH }}
# Check if vision model is provided and exists
if [ -n "${{ inputs.model_vision }}" ]; then
if [ ! -d "${MODEL_PATH}/${{ inputs.model_vision }}" ]; then
echo "Model '${{ inputs.model_vision }}' does not exist in mounted EFS volume, Terminating workflow."
exit 1
else
echo "Content of '${{ inputs.model_vision }}' model"
ls -la "${MODEL_PATH}/${{ inputs.model_vision }}"
fi
fi
# Check if text model is provided and exists
if [ -n "${{ inputs.model_text }}" ]; then
if [ ! -d "${MODEL_PATH}/${{ inputs.model_text }}" ]; then
echo "Model '${{ inputs.model_text }}' does not exist in mounted EFS volume, Terminating workflow."
exit 1
else
echo "Content of '${{ inputs.model_text }}' model"
ls -la "${MODEL_PATH}/${{ inputs.model_text }}"
fi
fi
- name: "[DEBUG] Get runner container OS information"
id: os_info
if: ${{ github.event.inputs.debug == 'true' }}
run: |
cat /etc/os-release
#######################
#### CODE CHECKOUT ####
#######################
- name: "Checkout 'meta-llama/llama-models' repository"
id: checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.branch }}
- name: "[DEBUG] Content of the repository after checkout"
id: content_after_checkout
if: ${{ github.event.inputs.debug == 'true' }}
run: |
ls -la ${GITHUB_WORKSPACE}
##########################################################
#### OPTIONAL SLEEP DEBUG ####
# #
# Use to "exec" into the test k8s POD and run tests #
# manually to identify what dependencies are being used. #
# #
##########################################################
- name: "[DEBUG] sleep"
id: sleep
if: ${{ github.event.inputs.debug == 'true' && github.event.inputs.sleep_time != '' }}
run: |
sleep ${{ inputs.sleep_time }}
##################################
#### DEPENDENCY INSTALLATIONS ####
##################################
- name: "Installing 'apt' required packages"
id: apt_install
run: |
echo "[STEP] Installing 'apt' required packages"
sudo apt update -y
sudo apt upgrade -y
sudo apt install python3-pip -y
- name: "Installing 'llama-models' dependencies"
id: pip_install
run: |
echo "[STEP] Installing 'llama-models' models"
pip install -U pip setuptools
pip install -r requirements.txt
pip install blobfile
pip install llama-models
pip install xmlrunner
pip install pytest
- name: "Installing specific manual_dispatch dependencies"
id: pip_install_manual
if: github.event_name == 'workflow_dispatch'
run: |
echo "[STEP] Installing specific dependencies for manual dispatch workflows"
pip install numpy
pip install torch
pip install fairscale
pip install termcolor
pip install torchvision
############################################
#### AUTOMATIC TESTING ON PULL REQUESTS ####
############################################
#### Run tests ####
- name: Run Tokenizer tests
id: run_tests_tokenizer
working-directory: ${{ github.workspace }}
if: github.event_name == 'pull_request_target'
run: |
echo "[STEP]Running Tokenizer tests on Self-Hosted k8s ARC Runner"
python3 -m pytest models/llama3/ -k "not test_generation.py" --junitxml="${{ github.workspace }}/result.xml"
#### Create test summary ####
- name: PR - Test Summary
id: create_test_summary_pr
if: github.event_name == 'pull_request_target'
uses: test-summary/action@v2
with:
paths: "${{ github.workspace }}/result.xml"
output: test-summary.md
- name: PR - Upload Test Summary
id: publish_test_summary_pr
if: github.event_name == 'pull_request_target'
uses: actions/upload-artifact@v3
with:
name: test-summary
path: test-summary.md
#### Update PR request ####
- name: Update Pull Request with a comment
id: publish_pr_comment
if: github.event_name == 'pull_request_target'
uses: thollander/actions-comment-pull-request@v2
with:
filePath: test-summary.md
########################
#### MANUAL TESTING ####
########################
#### Run tests ####
- name: "Running all PyTest tests on Self-Hosted k8s ARC Runner"
id: pytest
working-directory: ${{ github.workspace }}
if: github.event_name == 'workflow_dispatch'
run: |
echo "[STEP] Running PyTest tests at 'GITHUB_WORKSPACE' path: ${GITHUB_WORKSPACE} | path: ${{ github.workspace }}"
free -m
python3 -m pytest --junitxml="${{ github.workspace }}/result.xml"
#### Create test summary ####
- name: Manual - Test Summary
id: create_test_summary_manual
if: always() && github.event_name == 'workflow_dispatch'
uses: test-summary/action@v2
with:
paths: "${{ github.workspace }}/result.xml"