Skip to content

Commit

Permalink
Only benchmark hammer matmul if it is available
Browse files Browse the repository at this point in the history
Summary: as title

Reviewed By: xuzhao9, chenyang78, sijiac

Differential Revision: D56353714

fbshipit-source-id: 9597955f2285d55b3aa715262aaf4c5909e4102c
  • Loading branch information
bertmaher authored and facebook-github-bot committed Apr 19, 2024
1 parent ed4df21 commit 7795b06
Showing 1 changed file with 8 additions and 4 deletions.
12 changes: 8 additions & 4 deletions torchbenchmark/operators/gemm/operator.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

import csv
import os
import statistics
Expand All @@ -7,8 +6,6 @@
import numpy
import torch
import triton
from hammer.ops.triton.triton_matmul import triton_matmul as hstu_triton_matmul


from torchbenchmark.util.triton_op import (
BenchmarkOperator,
Expand All @@ -20,6 +17,13 @@
from .data_io import parse_args, read_shapes_from_csv
from .triton_matmul import matmul as triton_matmul

try:
from hammer.ops.triton.triton_matmul import triton_matmul as hstu_triton_matmul

HAS_HAMMER = True
except ImportError:
HAS_HAMMER = False


BUILDIN_SHAPES = [
(256, 256, 256, None),
Expand Down Expand Up @@ -106,7 +110,7 @@ def aten_matmul(self, a, b, bias) -> Callable:
else:
return lambda: torch.matmul(a, b)

@register_benchmark()
@register_benchmark(enabled=HAS_HAMMER)
def hstu_triton_matmul(self, a, b, bias) -> Callable:
if not bias == None:
return lambda: hstu_triton_matmul(a, b) + bias
Expand Down

0 comments on commit 7795b06

Please sign in to comment.