diff --git a/.github/workflows/gha_workflow_llama_models_tests.yml b/.github/workflows/gha_workflow_llama_models_tests.yml index c6ee0190..aae8672f 100644 --- a/.github/workflows/gha_workflow_llama_models_tests.yml +++ b/.github/workflows/gha_workflow_llama_models_tests.yml @@ -2,7 +2,7 @@ name: "Run Llama-models Tests" on: pull_request_target: - types: opened + types: ["opened"] branches: - 'main' paths: @@ -13,7 +13,12 @@ on: runner: description: 'GHA Runner Scale Set label to run workflow on.' required: true - default: llama-models-fork-gha-runnes-gpu + default: "llama-models-fork-gha-runner-gpu" + + branch: + description: "Branch to checkout" + required: true + default: "main" debug: description: 'Run debugging steps?' @@ -25,37 +30,32 @@ on: required: true default: "0" - api_key: - description: 'Provider API key' - required: false - default: "---" - require_model: - description: 'Determine whether or not a model is required for this run' + description: 'Is a model required?' required: true - default: "false" + default: "true" model_vision: - description: 'Llama vision model to use for testing' + description: 'Llama vision model ID' required: false default: "Llama3.2-11B-Vision-Instruct" model_text: - description: 'Llama text model to use for testing' + description: 'Llama text model ID' required: false default: "Llama3.2-3B-Instruct" - branch: - description: "Branch parameter to control which branch to checkout" + api_key: + description: 'Provider API key' required: true - default: "main" + default: "---" env: TOKENIZER_PATH: "models/llama3/api/tokenizer.model" - MODELS_PATH: '/data/llama3.2' - VISION_MODEL_CHECKPOINT_DIR: ${{ github.events.inputs.model_vision }} - TEXT_MODEL_CHECKPOINT_DIR: ${{ github.events.inputs.model_text }} - API_KEY: ${{ github.event.inputs.api_key }} + MODELS_PATH: "/data/llama3.2" + VISION_MODEL_CHECKPOINT_DIR: "/data/llama3.2/${{ inputs.model_vision }}" + TEXT_MODEL_CHECKPOINT_DIR: "/data/llama3.2/${{ inputs.model_text }}" + API_KEY: "${{ inputs.api_key }}" jobs: execute_workflow: @@ -63,51 +63,50 @@ jobs: defaults: run: shell: bash # default shell to run all steps for a given job. - runs-on: ${{ github.event.inputs.runner != '' && github.event.inputs.runner || 'llama-models-fork-gha-runnes-gpu' }} + runs-on: ${{ inputs.runner != '' && inputs.runner || 'llama-models-fork-gha-runner-gpu' }} + if: always() steps: ############################## #### INITIAL DEBUG CHECKS #### ############################## - - name: "[DEBUG] ls -la mounted EFS volume" - id: efs_volume + - name: "[DEBUG] Check content of the EFS mount" + id: debug_efs_volume continue-on-error: true - if: github.event.inputs.debug == 'true' + if: inputs.debug == 'true' run: | echo "========= Content of the EFS mount =============" ls -la ${{ env.MODELS_PATH }} - name: "Check if models exist in EFS volume" - id: check_if_model_exists - #if: ${{ github.events.input.require_model == 'true' }} + id: check_if_models_exist + if: ${{ inputs.require_model == 'true' }} run: | - MODEL_PATH=${{ env.MODELS_PATH }} - # Check if vision model is provided and exists if [ -n "${{ inputs.model_vision }}" ]; then - if [ ! -d "${MODEL_PATH}/${{ inputs.model_vision }}" ]; then + if [ ! -d "${{ env.VISION_MODEL_CHECKPOINT_DIR }}" ]; then echo "Model '${{ inputs.model_vision }}' does not exist in mounted EFS volume, Terminating workflow." exit 1 else echo "Content of '${{ inputs.model_vision }}' model" - ls -la "${MODEL_PATH}/${{ inputs.model_vision }}" + ls -la "${{ env.VISION_MODEL_CHECKPOINT_DIR }}" fi fi # Check if text model is provided and exists if [ -n "${{ inputs.model_text }}" ]; then - if [ ! -d "${MODEL_PATH}/${{ inputs.model_text }}" ]; then + if [ ! -d "${{ env.TEXT_MODEL_CHECKPOINT_DIR }}" ]; then echo "Model '${{ inputs.model_text }}' does not exist in mounted EFS volume, Terminating workflow." exit 1 else echo "Content of '${{ inputs.model_text }}' model" - ls -la "${MODEL_PATH}/${{ inputs.model_text }}" + ls -la "${{ env.TEXT_MODEL_CHECKPOINT_DIR }}" fi fi - name: "[DEBUG] Get runner container OS information" - id: os_info - if: ${{ github.event.inputs.debug == 'true' }} + id: debug_os_info + if: ${{ inputs.debug == 'true' }} run: | cat /etc/os-release @@ -115,14 +114,14 @@ jobs: #### CODE CHECKOUT #### ####################### - name: "Checkout 'meta-llama/llama-models' repository" - id: checkout + id: checkout_repo uses: actions/checkout@v4 with: - ref: ${{ github.event.inputs.branch }} + ref: ${{ inputs.branch }} - name: "[DEBUG] Content of the repository after checkout" - id: content_after_checkout - if: ${{ github.event.inputs.debug == 'true' }} + id: debug_content_after_checkout + if: ${{ inputs.debug == 'true' }} run: | ls -la ${GITHUB_WORKSPACE} @@ -134,8 +133,8 @@ jobs: # # ########################################################## - name: "[DEBUG] sleep" - id: sleep - if: ${{ github.event.inputs.debug == 'true' && github.event.inputs.sleep_time != '' }} + id: debug_sleep + if: ${{ inputs.debug == 'true' && inputs.sleep_time != '' }} run: | sleep ${{ inputs.sleep_time }} @@ -143,7 +142,7 @@ jobs: #### DEPENDENCY INSTALLATIONS #### ################################## - name: "Installing 'apt' required packages" - id: apt_install + id: install_apt run: | echo "[STEP] Installing 'apt' required packages" sudo apt update -y @@ -151,7 +150,7 @@ jobs: sudo apt install python3-pip -y - name: "Installing 'llama-models' dependencies" - id: pip_install + id: install_pip_generic run: | echo "[STEP] Installing 'llama-models' models" pip install -U pip setuptools @@ -162,7 +161,7 @@ jobs: pip install pytest - name: "Installing specific manual_dispatch dependencies" - id: pip_install_manual + id: manual_install_pip if: github.event_name == 'workflow_dispatch' run: | echo "[STEP] Installing specific dependencies for manual dispatch workflows" @@ -170,7 +169,7 @@ jobs: pip install torch pip install fairscale pip install termcolor - + pip install torchvision ############################################ #### AUTOMATIC TESTING ON PULL REQUESTS #### @@ -178,26 +177,26 @@ jobs: #### Run tests #### - - name: Run Tokenizer tests - id: run_tests_tokenizer - working-directory: ${{ github.workspace }} + - name: "PR - Run Tests" + id: pr_run_tests + working-directory: "${{ github.workspace }}" if: github.event_name == 'pull_request_target' run: | echo "[STEP]Running Tokenizer tests on Self-Hosted k8s ARC Runner" - python3 -m pytest models/llama3/ -k "not test_generation.py" --junitxml="${{ github.workspace }}/result.xml" + python3 -m pytest -k "not test_generation.py" --junitxml="${{ github.workspace }}/result.xml" #### Create test summary #### - - name: PR - Test Summary - id: create_test_summary_pr + - name: "PR - Test Summary" + id: pr_test_summary_create if: github.event_name == 'pull_request_target' uses: test-summary/action@v2 with: paths: "${{ github.workspace }}/result.xml" output: test-summary.md - - name: PR - Upload Test Summary - id: publish_test_summary_pr + - name: "PR - Upload Test Summary" + id: pr_test_summary_upload if: github.event_name == 'pull_request_target' uses: actions/upload-artifact@v3 with: @@ -206,8 +205,8 @@ jobs: #### Update PR request #### - - name: Update Pull Request with a comment - id: publish_pr_comment + - name: "PR - Update comment" + id: pr_update_comment if: github.event_name == 'pull_request_target' uses: thollander/actions-comment-pull-request@v2 with: @@ -220,19 +219,20 @@ jobs: #### Run tests #### - - name: "Running all PyTest tests on Self-Hosted k8s ARC Runner" - id: pytest - working-directory: ${{ github.workspace }} + - name: "Manual - Run Tests" + id: manual_run_tests + working-directory: "${{ github.workspace }}" if: github.event_name == 'workflow_dispatch' run: | echo "[STEP] Running PyTest tests at 'GITHUB_WORKSPACE' path: ${GITHUB_WORKSPACE} | path: ${{ github.workspace }}" + free -m python3 -m pytest --junitxml="${{ github.workspace }}/result.xml" #### Create test summary #### - - name: Manual - Test Summary - id: create_test_summary_manual - if: github.event_name == 'workflow_dispatch' + - name: "Manual - Test Summary" + id: manual_test_summary + if: always() && github.event_name == 'workflow_dispatch' uses: test-summary/action@v2 with: paths: "${{ github.workspace }}/result.xml" diff --git a/models/llama3/tests/api/test_generation.py b/models/llama3/tests/api/test_generation.py index 21bb08d2..e6020e8c 100644 --- a/models/llama3/tests/api/test_generation.py +++ b/models/llama3/tests/api/test_generation.py @@ -7,6 +7,7 @@ import os import unittest +import pytest from pathlib import Path @@ -77,6 +78,8 @@ class TestVisionModelInference(unittest.TestCase): def setUpClass(cls): cls.generator = build_generator("VISION_MODEL_CHECKPOINT_DIR") + @unittest.skip("Disabling vision model test") + @pytest.mark.skip(reason="Disabling vision model test") def test_run_generation(self): with open( THIS_DIR.parent.parent.parent / "scripts/resources/dog.jpg", "rb" diff --git a/setup.py b/setup.py index 8773b9c9..9a9d14b5 100644 --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ def read_requirements(): setup( name="llama_models", - version="0.0.39", + version="0.0.40", author="Meta Llama", author_email="llama-oss@meta.com", description="Llama models",