Skip to content

Commit

Permalink
2025-01-11 nightly release (73f50df)
Browse files Browse the repository at this point in the history
  • Loading branch information
pytorchbot committed Jan 11, 2025
1 parent ff0e4d6 commit 5be0a05
Show file tree
Hide file tree
Showing 525 changed files with 13,933 additions and 5,033 deletions.
2 changes: 1 addition & 1 deletion .ci/docker/ci_commit_pins/buck2.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
2024-05-15
2024-12-16
2 changes: 1 addition & 1 deletion .ci/docker/ci_commit_pins/pytorch.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
19eff28ff3f19b50da46f5a9ff5f4d4d213806fe
0a94bb432ed75cc2d950d81b2921363218a7e459
2 changes: 1 addition & 1 deletion .ci/docker/common/install_pytorch.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ install_pytorch_and_domains() {

chown -R ci-user .

export _GLIBCXX_USE_CXX11_ABI=0
export _GLIBCXX_USE_CXX11_ABI=1
# Then build and install PyTorch
conda_run python setup.py bdist_wheel
pip_install "$(echo dist/*.whl)"
Expand Down
2 changes: 2 additions & 0 deletions .ci/docker/conda-env-ci.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
cmake=3.22.1
ninja=1.10.2
libuv
pkg-config
95 changes: 95 additions & 0 deletions .ci/scripts/download_hf_hub.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
#!/bin/bash

# Function to download files from the Hugging Face Hub
# Arguments:
# 1. model_id: The Hugging Face repository ID (e.g., "organization/model_name")
# 2. subdir: The optional subdirectory in the repo to look for files (pass "" if not used)
# 3. file_names: A space-separated list of filenames to be downloaded
# Returns:
# The directory containing the downloaded files
function download_hf_files() {
local model_id="$1"
local subdir="$2"
shift 2
local file_names=("$@") # Capture all remaining arguments as an array

local download_dir

# Use the first file to determine the download directory
download_dir=$(python3 -c "
from huggingface_hub import hf_hub_download
# Download the first file and get its directory
path = hf_hub_download(
repo_id='${model_id}',
filename='${subdir:+${subdir}/}${file_names[0]}'
)
import os
print(os.path.dirname(path))")

if [ $? -ne 0 ]; then
echo "Error: Failed to determine download directory from ${file_names[0]}" >&2
return 1
fi

# Download remaining files into the same directory
for file_name in "${file_names[@]:1}"; do
python3 -c "
from huggingface_hub import hf_hub_download
# Download the file
hf_hub_download(
repo_id='${model_id}',
filename='${subdir:+${subdir}/}${file_name}'
)"

if [ $? -ne 0 ]; then
echo "Error: Failed to download ${file_name} from ${model_id}" >&2
return 1
fi
done

# Return the directory containing the downloaded files
echo "$download_dir"
}

# Check if script is called directly
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
# Parse arguments from CLI
while [[ $# -gt 0 ]]; do
case $1 in
--model_id)
MODEL_ID="$2"
shift 2
;;
--subdir)
SUBDIR="$2"
shift 2
;;
--files)
shift
FILES_TO_DOWNLOAD=()
while [[ $# -gt 0 && $1 != --* ]]; do
FILES_TO_DOWNLOAD+=("$1")
shift
done
;;
*)
echo "Unknown option: $1" >&2
exit 1
;;
esac
done

# Validate required arguments
if [ -z "$MODEL_ID" ] || [ ${#FILES_TO_DOWNLOAD[@]} -eq 0 ]; then
echo "Usage: $0 --model_id <model_id> --subdir <subdir> --files <file1> [<file2> ...]" >&2
exit 1
fi

# Call the function
DOWNLOAD_DIR=$(download_hf_files "$MODEL_ID" "$SUBDIR" "${FILES_TO_DOWNLOAD[@]}")
if [ $? -eq 0 ]; then
echo "$DOWNLOAD_DIR"
else
exit 1
fi
fi
224 changes: 224 additions & 0 deletions .ci/scripts/gather_benchmark_configs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
#!/usr/bin/env python
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

import json
import logging
import os
import re
from typing import Any, Dict

from examples.models import MODEL_NAME_TO_MODEL


# Device pools for AWS Device Farm
DEVICE_POOLS = {
"apple_iphone_15": "arn:aws:devicefarm:us-west-2:308535385114:devicepool:02a2cf0f-6d9b-45ee-ba1a-a086587469e6/3b5acd2e-92e2-4778-b651-7726bafe129d",
"apple_iphone_15+ios_18": "arn:aws:devicefarm:us-west-2:308535385114:devicepool:02a2cf0f-6d9b-45ee-ba1a-a086587469e6/12c8b15c-8d03-4e07-950d-0a627e7595b4",
"samsung_galaxy_s22": "arn:aws:devicefarm:us-west-2:308535385114:devicepool:02a2cf0f-6d9b-45ee-ba1a-a086587469e6/e59f866a-30aa-4aa1-87b7-4510e5820dfa",
"samsung_galaxy_s24": "arn:aws:devicefarm:us-west-2:308535385114:devicepool:02a2cf0f-6d9b-45ee-ba1a-a086587469e6/98f8788c-2e25-4a3c-8bb2-0d1e8897c0db",
"google_pixel_8_pro": "arn:aws:devicefarm:us-west-2:308535385114:devicepool:02a2cf0f-6d9b-45ee-ba1a-a086587469e6/d65096ab-900b-4521-be8b-a3619b69236a",
}

# Predefined benchmark configurations
BENCHMARK_CONFIGS = {
"xplat": [
"xnnpack_q8",
"hf_xnnpack_fp32",
"llama3_fb16",
"llama3_spinquant",
"llama3_qlora",
],
"android": [
"qnn_q8",
# TODO: Add support for llama3 htp
# "llama3_qnn_htp",
],
"ios": [
"coreml_fp16",
"mps",
"llama3_coreml_ane",
],
}


def parse_args() -> Any:
"""
Parse command-line arguments.
Returns:
argparse.Namespace: Parsed command-line arguments.
Example:
parse_args() -> Namespace(models=['mv3', 'meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8'],
os='android',
devices=['samsung_galaxy_s22'])
"""
from argparse import ArgumentParser

def comma_separated(value: str):
"""
Parse a comma-separated string into a list.
"""
return value.split(",")

parser = ArgumentParser("Gather all benchmark configs.")
parser.add_argument(
"--os",
type=str,
choices=["android", "ios"],
help="The target OS.",
)
parser.add_argument(
"--models",
type=comma_separated, # Use the custom parser for comma-separated values
help=f"Comma-separated model IDs or names. Valid values include {MODEL_NAME_TO_MODEL}.",
)
parser.add_argument(
"--devices",
type=comma_separated, # Use the custom parser for comma-separated values
help=f"Comma-separated device names. Available devices: {list(DEVICE_POOLS.keys())}",
)

return parser.parse_args()


def set_output(name: str, val: Any) -> None:
"""
Set the output value to be used by other GitHub jobs.
Args:
name (str): The name of the output variable.
val (Any): The value to set for the output variable.
Example:
set_output("benchmark_configs", {"include": [...]})
"""

if os.getenv("GITHUB_OUTPUT"):
print(f"Setting {val} to GitHub output")
with open(str(os.getenv("GITHUB_OUTPUT")), "a") as env:
print(f"{name}={val}", file=env)
else:
print(f"::set-output name={name}::{val}")


def is_valid_huggingface_model_id(model_name: str) -> bool:
"""
Validate if the model name matches the pattern for HuggingFace model IDs.
Args:
model_name (str): The model name to validate.
Returns:
bool: True if the model name matches the valid pattern, False otherwise.
Example:
is_valid_huggingface_model_id('meta-llama/Llama-3.2-1B') -> True
"""
pattern = r"^[a-zA-Z0-9-_]+/[a-zA-Z0-9-_.]+$"
return bool(re.match(pattern, model_name))


def get_benchmark_configs() -> Dict[str, Dict]:
"""
Gather benchmark configurations for a given set of models on the target operating system and devices.
Args:
None
Returns:
Dict[str, Dict]: A dictionary containing the benchmark configurations.
Example:
get_benchmark_configs() -> {
"include": [
{
"model": "meta-llama/Llama-3.2-1B",
"config": "llama3_qlora",
"device_name": "apple_iphone_15",
"device_arn": "arn:aws:..."
},
{
"model": "mv3",
"config": "xnnpack_q8",
"device_name": "samsung_galaxy_s22",
"device_arn": "arn:aws:..."
},
...
]
}
"""
args = parse_args()
target_os = args.os
devices = args.devices
models = args.models

benchmark_configs = {"include": []}

for model_name in models:
configs = []
if is_valid_huggingface_model_id(model_name):
if model_name.startswith("meta-llama/"):
# LLaMA models
repo_name = model_name.split("meta-llama/")[1]
if "qlora" in repo_name.lower():
configs.append("llama3_qlora")
elif "spinquant" in repo_name.lower():
configs.append("llama3_spinquant")
else:
configs.append("llama3_fb16")
configs.extend(
[
config
for config in BENCHMARK_CONFIGS.get(target_os, [])
if config.startswith("llama")
]
)
else:
# Non-LLaMA models
configs.append("hf_xnnpack_fp32")
elif model_name in MODEL_NAME_TO_MODEL:
# ExecuTorch in-tree non-GenAI models
configs.append("xnnpack_q8")
configs.extend(
[
config
for config in BENCHMARK_CONFIGS.get(target_os, [])
if not config.startswith("llama")
]
)
else:
# Skip unknown models with a warning
logging.warning(f"Unknown or invalid model name '{model_name}'. Skipping.")
continue

# Add configurations for each valid device
for device in devices:
for config in configs:
if config == "llama3_coreml_ane" and not device.endswith("+ios_18"):
device = f"{device}+ios_18"
logging.info(
f"Benchmark config '{config}' only works on iOS 18+, auto-upgraded device pool to '{device}'"
)

if device not in DEVICE_POOLS:
logging.warning(f"Unsupported device '{device}'. Skipping.")
continue

record = {
"model": model_name,
"config": config,
"device_name": device,
"device_arn": DEVICE_POOLS[device],
}
benchmark_configs["include"].append(record)

set_output("benchmark_configs", json.dumps(benchmark_configs))


if __name__ == "__main__":
get_benchmark_configs()
11 changes: 11 additions & 0 deletions .ci/scripts/setup-arm-baremetal-tools.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash
# Copyright 2024 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

# NB: This function could be used to install Arm dependencies
# Setup arm example environment (including TOSA tools)
git config --global user.email "github_executorch@arm.com"
git config --global user.name "Github Executorch"
bash examples/arm/setup.sh --i-agree-to-the-contained-eula
6 changes: 5 additions & 1 deletion .ci/scripts/setup-macos.sh
Original file line number Diff line number Diff line change
Expand Up @@ -131,5 +131,9 @@ if [[ -z "${GITHUB_RUNNER:-}" ]]; then
fi

print_cmake_info
install_executorch
install_pytorch_and_domains
# We build PyTorch from source here instead of using nightly. This allows CI to test against
# the pinned commit from PyTorch
install_executorch "use-pt-pinned-commit"
build_executorch_runner "${BUILD_TOOL}"
do_not_use_nightly_on_ci
2 changes: 1 addition & 1 deletion .ci/scripts/test_llama.sh
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ prepare_artifacts_upload() {
PARAMS="params.json"
CHECKPOINT_FILE_NAME=""
touch "${PARAMS}"
if [[ "${MODEL_NAME}" == "stories110M" ]]; then
if [[ "${MODEL_NAME}" == "llama" ]] || [[ "${MODEL_NAME}" == "stories"* ]] || [[ "${MODEL_NAME}" == "tinyllama" ]]; then
CHECKPOINT_FILE_NAME="stories110M.pt"
download_stories_model_artifacts
else
Expand Down
1 change: 0 additions & 1 deletion .ci/scripts/test_llava.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ EXECUTORCH_COMMON_CMAKE_ARGS=" \
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
-DEXECUTORCH_BUILD_XNNPACK=ON \
-DEXECUTORCH_DO_NOT_USE_CXX11_ABI=ON \
-DEXECUTORCH_XNNPACK_SHARED_WORKSPACE=ON"

cmake_install_executorch_libraries() {
Expand Down
Loading

0 comments on commit 5be0a05

Please sign in to comment.