Skip to content

Commit

Permalink
Add --cold-start to run with fresh inductor cache.
Browse files Browse the repository at this point in the history
Summary:
Add `--cold-start` option to inductor to run inductor with a fresh cache.
We hope this will stabilize the pt2_compilation_time metric for the CI tests.

Apply the `--cold-start` option to inductor_speedup test on AIBench to test the metric stableness.

Reviewed By: aaronenyeshi

Differential Revision: D56092036

fbshipit-source-id: 91e7fae258bd6a2c6336334cb19217385987fc44
  • Loading branch information
xuzhao9 authored and facebook-github-bot committed Apr 13, 2024
1 parent 9d68c51 commit 5c0d0ce
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 9 deletions.
9 changes: 9 additions & 0 deletions torchbenchmark/util/backends/torchdynamo.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,11 @@ def parse_torchdynamo_args(dynamo_args: List[str]) -> argparse.Namespace:
action="store_true",
help="Measure metrics with TorchInductor",
)
parser.add_argument(
"--cold-start",
action="store_true",
help="Use a fresh inductor and triton cachedir when running each model, to force cold-start compile.",
)
parser.add_argument(
"--inductor-compile-mode",
default=None,
Expand Down Expand Up @@ -221,6 +226,10 @@ def apply_torchdynamo_args(
"--dynamo_disable_optimizer_step is set to True, but the optimizer could not be found on this model"
)

if args.cold_start:
from torch._inductor.utils import fresh_inductor_cache
fresh_inductor_context = lambda: fresh_inductor_cache()
model.run_contexts.append(fresh_inductor_context)
if model.test == "train":
if is_staged_train_test(model):
model.forward = optimize_ctx(model.forward)
Expand Down
19 changes: 10 additions & 9 deletions torchbenchmark/util/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,15 +382,16 @@ def _invoke_staged_train_test(self, num_batch: int) -> None:
self.example_inputs = next(input_generator)
# cast inputs if needed
apply_decoration_args(self, self.dargs)
if optimizer is not None:
optimizer.zero_grad()
with nested(*self.forward_contexts):
losses = self.forward()
with nested(*self.backward_contexts):
self.backward(losses)
if optimizer is not None:
with nested(*self.optimizer_contexts):
self.optimizer_step()
with nested(*self.run_contexts):
if optimizer is not None:
optimizer.zero_grad()
with nested(*self.forward_contexts):
losses = self.forward()
with nested(*self.backward_contexts):
self.backward(losses)
if optimizer is not None:
with nested(*self.optimizer_contexts):
self.optimizer_step()
return None

def invoke(self) -> Optional[Tuple[torch.Tensor]]:
Expand Down

0 comments on commit 5c0d0ce

Please sign in to comment.