Skip to content

Commit

Permalink
add building compute-runtime UMD in benchmarks jobs
Browse files Browse the repository at this point in the history
  • Loading branch information
pbalcer committed Jan 17, 2025
1 parent 023a847 commit 39061e6
Show file tree
Hide file tree
Showing 15 changed files with 163 additions and 40 deletions.
1 change: 1 addition & 0 deletions .github/workflows/benchmarks-reusable.yml
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,7 @@ jobs:
--ur ${{ github.workspace }}/ur_install
--umf ${{ github.workspace }}/umf_build
--adapter ${{ matrix.adapter.str_name }}
--build-compute-runtime
${{ inputs.upload_report && '--output-html' || '' }}
${{ inputs.bench_script_params }}
Expand Down
15 changes: 10 additions & 5 deletions scripts/benchmarks/benches/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import shutil
from pathlib import Path
from .result import Result
from .options import options
from options import options
from utils.utils import download, run
import urllib.request
import tarfile
Expand All @@ -28,17 +28,22 @@ def get_adapter_full_path():
f"could not find adapter file {adapter_path} (and in similar lib paths)"

def run_bench(self, command, env_vars, ld_library=[], add_sycl=True):
env_vars_with_forced_adapter = env_vars.copy()
env_vars = env_vars.copy()
if options.ur is not None:
env_vars_with_forced_adapter.update(
env_vars.update(
{'UR_ADAPTERS_FORCE_LOAD': Benchmark.get_adapter_full_path()})

env_vars.update(options.extra_env_vars)

ld_libraries = options.extra_ld_libraries.copy()
ld_libraries.extend(ld_library)

return run(
command=command,
env_vars=env_vars_with_forced_adapter,
env_vars=env_vars,
add_sycl=add_sycl,
cwd=options.benchmark_cwd,
ld_library=ld_library
ld_library=ld_libraries
).stdout.decode()

def create_data_path(self, name, skip_data_dir = False):
Expand Down
4 changes: 2 additions & 2 deletions scripts/benchmarks/benches/compute.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from utils.utils import run, git_clone, create_build_path
from .base import Benchmark, Suite
from .result import Result
from .options import options
from options import options

class ComputeBench(Suite):
def __init__(self, directory):
Expand All @@ -22,7 +22,7 @@ def setup(self):
if options.sycl is None:
return

repo_path = git_clone(self.directory, "compute-benchmarks-repo", "https://github.com/intel/compute-benchmarks.git", "df38bc342641d7e83fbb4fe764a23d21d734e07b")
repo_path = git_clone(self.directory, "compute-benchmarks-repo", "https://github.com/intel/compute-benchmarks.git", "d13e5b4d8dd3d28926a74ab7f67f78c10f708a01")
build_path = create_build_path(self.directory, 'compute-benchmarks-build')

configure_command = [
Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/benches/llamacpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from .base import Benchmark, Suite
from .result import Result
from utils.utils import run, create_build_path
from .options import options
from options import options
from .oneapi import get_oneapi
import os

Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/benches/oneapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from pathlib import Path
from utils.utils import download, run
from .options import options
from options import options
import os

class OneAPI:
Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/benches/syclbench.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from utils.utils import run, git_clone, create_build_path
from .base import Benchmark, Suite
from .result import Result
from .options import options
from options import options

class SyclBench(Suite):
def __init__(self, directory):
Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/benches/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from .base import Benchmark, Suite
from .result import Result
from utils.utils import run, create_build_path
from .options import options
from options import options
import os

class TestSuite(Suite):
Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/benches/umf.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from .base import Benchmark, Suite
from .result import Result
from utils.utils import run, create_build_path
from .options import options
from options import options
from .oneapi import get_oneapi
import os
import csv
Expand Down
19 changes: 12 additions & 7 deletions scripts/benchmarks/benches/velocity.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from .base import Benchmark, Suite
from .result import Result
from utils.utils import run, create_build_path
from .options import options
from options import options
from .oneapi import get_oneapi
import shutil

Expand Down Expand Up @@ -54,7 +54,6 @@ def __init__(self, name: str, bin_name: str, vb: VelocityBench, unit: str):
self.bench_name = name
self.bin_name = bin_name
self.unit = unit
self.code_path = os.path.join(self.vb.repo_path, self.bench_name, 'SYCL')

def download_deps(self):
return
Expand All @@ -66,6 +65,7 @@ def ld_libraries(self) -> list[str]:
return []

def setup(self):
self.code_path = os.path.join(self.vb.repo_path, self.bench_name, 'SYCL')
self.download_deps()
self.benchmark_bin = os.path.join(self.directory, self.bench_name, self.bin_name)

Expand Down Expand Up @@ -130,12 +130,13 @@ def parse_output(self, stdout: str) -> float:
class Bitcracker(VelocityBase):
def __init__(self, vb: VelocityBench):
super().__init__("bitcracker", "bitcracker", vb, "s")
self.data_path = os.path.join(vb.repo_path, "bitcracker", "hash_pass")

def name(self):
return "Velocity-Bench Bitcracker"

def bin_args(self) -> list[str]:
self.data_path = os.path.join(self.vb.repo_path, "bitcracker", "hash_pass")

return ["-f", f"{self.data_path}/img_win8_user_hash.txt",
"-d", f"{self.data_path}/user_passwords_60000.txt",
"-b", "60000"]
Expand Down Expand Up @@ -175,7 +176,6 @@ def parse_output(self, stdout: str) -> float:
class QuickSilver(VelocityBase):
def __init__(self, vb: VelocityBench):
super().__init__("QuickSilver", "qs", vb, "MMS/CTT")
self.data_path = os.path.join(vb.repo_path, "QuickSilver", "Examples", "AllScattering")

def run(self, env_vars) -> list[Result]:
# TODO: fix the crash in QuickSilver when UR_L0_USE_IMMEDIATE_COMMANDLISTS=0
Expand All @@ -191,6 +191,8 @@ def lower_is_better(self):
return False

def bin_args(self) -> list[str]:
self.data_path = os.path.join(self.vb.repo_path, "QuickSilver", "Examples", "AllScattering")

return ["-i", f"{self.data_path}/scatteringOnly.inp"]

def extra_env_vars(self) -> dict:
Expand Down Expand Up @@ -266,10 +268,11 @@ def parse_output(self, stdout: str) -> float:

class DLCifar(VelocityBase):
def __init__(self, vb: VelocityBench):
self.oneapi = get_oneapi()
super().__init__("dl-cifar", "dl-cifar_sycl", vb, "s")

def ld_libraries(self):
self.oneapi = get_oneapi()

return self.oneapi.ld_libraries()

def download_deps(self):
Expand All @@ -294,10 +297,11 @@ def parse_output(self, stdout: str) -> float:

class DLMnist(VelocityBase):
def __init__(self, vb: VelocityBench):
self.oneapi = get_oneapi()
super().__init__("dl-mnist", "dl-mnist-sycl", vb, "s")

def ld_libraries(self):
self.oneapi = get_oneapi()

return self.oneapi.ld_libraries()

def download_deps(self):
Expand Down Expand Up @@ -337,10 +341,11 @@ def parse_output(self, stdout: str) -> float:

class SVM(VelocityBase):
def __init__(self, vb: VelocityBench):
self.oneapi = get_oneapi()
super().__init__("svm", "svm_sycl", vb, "s")

def ld_libraries(self):
self.oneapi = get_oneapi()

return self.oneapi.ld_libraries()

def extra_cmake_args(self):
Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/history.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import json
from pathlib import Path
from benches.result import Result, BenchmarkRun
from benches.options import Compare, options
from options import Compare, options
from datetime import datetime, timezone
from utils.utils import run;

Expand Down
37 changes: 20 additions & 17 deletions scripts/benchmarks/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,12 @@
from benches.llamacpp import *
from benches.umf import *
from benches.test import TestSuite
from benches.options import Compare, options
from options import Compare, options
from output_markdown import generate_markdown
from output_html import generate_html
from history import BenchmarkHistory
from utils.utils import prepare_workdir;
from utils.utils import prepare_workdir
from utils.compute_runtime import *

import argparse
import re
Expand Down Expand Up @@ -117,6 +118,11 @@ def process_results(results: dict[str, list[Result]], stddev_threshold_override)
def main(directory, additional_env_vars, save_name, compare_names, filter):
prepare_workdir(directory, INTERNAL_WORKDIR_VERSION)

if options.build_compute_runtime:
cr = get_compute_runtime()
options.extra_ld_libraries.extend(cr.ld_libraries())
options.extra_env_vars.update(cr.env_vars())

suites = [
ComputeBench(directory),
VelocityBench(directory),
Expand All @@ -129,15 +135,15 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
benchmarks = []

for s in suites:
print(f"Setting up {type(s).__name__}")
s.setup()
print(f"{type(s).__name__} setup complete.")

for s in suites:
benchmarks += s.benchmarks()
suite_benchmarks = s.benchmarks()
if filter:
suite_benchmarks = [benchmark for benchmark in suite_benchmarks if filter.search(benchmark.name())]

if filter:
benchmarks = [benchmark for benchmark in benchmarks if filter.search(benchmark.name())]
if suite_benchmarks:
print(f"Setting up {type(s).__name__}")
s.setup()
print(f"{type(s).__name__} setup complete.")
benchmarks += suite_benchmarks

for b in benchmarks:
print(b.name())
Expand Down Expand Up @@ -241,7 +247,7 @@ def validate_and_parse_env_args(env_args):
parser.add_argument("--save", type=str, help='Save the results for comparison under a specified name.')
parser.add_argument("--compare", type=str, help='Compare results against previously saved data.', action="append", default=["baseline"])
parser.add_argument("--iterations", type=int, help='Number of times to run each benchmark to select a median value.', default=options.iterations)
parser.add_argument("--stddev-threshold", type=float, help='If stddev % is above this threshold, rerun all iterations', default=options.stddev_threshold)
parser.add_argument("--stddev-threshold", type=float, help='If stddev pct is above this threshold, rerun all iterations', default=options.stddev_threshold)
parser.add_argument("--timeout", type=int, help='Timeout for individual benchmarks in seconds.', default=options.timeout)
parser.add_argument("--filter", type=str, help='Regex pattern to filter benchmarks by name.', default=None)
parser.add_argument("--epsilon", type=float, help='Threshold to consider change of performance significant', default=options.epsilon)
Expand All @@ -252,12 +258,8 @@ def validate_and_parse_env_args(env_args):
parser.add_argument("--output-html", help='Create HTML output', action="store_true", default=False)
parser.add_argument("--output-markdown", help='Create Markdown output', action="store_true", default=True)
parser.add_argument("--dry-run", help='Do not run any actual benchmarks', action="store_true", default=False)
parser.add_argument(
"--iterations-stddev",
type=int,
help="Max number of iterations of the loop calculating stddev after completed benchmark runs",
default=options.iterations_stddev,
)
parser.add_argument("--build-compute-runtime", help="Fetch and build latest supported compute runtime", action="store_true")
parser.add_argument("--iterations-stddev", type=int, help="Max number of iterations of the loop calculating stddev after completed benchmark runs", default=options.iterations_stddev)

args = parser.parse_args()
additional_env_vars = validate_and_parse_env_args(args.env)
Expand All @@ -279,6 +281,7 @@ def validate_and_parse_env_args(env_args):
options.dry_run = args.dry_run
options.umf = args.umf
options.iterations_stddev = args.iterations_stddev
options.build_compute_runtime = args.build_compute_runtime

benchmark_filter = re.compile(args.filter) if args.filter else None

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from dataclasses import dataclass
from dataclasses import dataclass, field
from enum import Enum

class Compare(Enum):
Expand Down Expand Up @@ -27,6 +27,9 @@ class Options:
stddev_threshold: float = 0.02
epsilon: float = 0.02
iterations_stddev: int = 5
build_compute_runtime: bool = False
extra_ld_libraries: list[str] = field(default_factory=list)
extra_env_vars: dict = field(default_factory=dict)

options = Options()

2 changes: 1 addition & 1 deletion scripts/benchmarks/output_markdown.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

import collections, re
from benches.result import Result
from benches.options import options
from options import options
import math

class OutputLine:
Expand Down
Loading

0 comments on commit 39061e6

Please sign in to comment.