diff --git a/shark/shark_benchmark_runner.py b/shark/shark_benchmark_runner.py index fc2b3b5f22..88fa9be634 100644 --- a/shark/shark_benchmark_runner.py +++ b/shark/shark_benchmark_runner.py @@ -148,13 +148,6 @@ def benchmark_torch(self, modelname, device="cpu"): else: frontend_model.cpu() input.cpu() - # TODO: re-enable as soon as pytorch CUDA context issues are resolved - # try: - # frontend_model = torch.compile( - # frontend_model, mode="max-autotune", backend="inductor" - # ) - # except RuntimeError: - # frontend_model = HFmodel.model for i in range(shark_args.num_warmup_iterations): frontend_model.forward(input)