Skip to content

Commit

Permalink
Enable compiled Adam in the benchmarks (#116093)
Browse files Browse the repository at this point in the history
Summary:
Commit b697bcc583 of mlazos/compiled-adam2 at https://hud.pytorch.org/benchmark/compilers
is an initial benchmark run

Increases compile time by 20s for torchbench and HF, and 30s for TIMM

I expect the compile time to come down significantly with fake tensor prop caching

X-link: pytorch/pytorch#116093
Approved by: https://github.com/janeyx99

Reviewed By: izaitsevfb, jeanschmidt

Differential Revision: D52380708

Pulled By: mlazos

fbshipit-source-id: 1dbe7200d8d55da18a05c3f08ea516316e3a8336
  • Loading branch information
mlazos authored and facebook-github-bot committed Jan 2, 2024
1 parent 5be3269 commit 722b8b1
Showing 1 changed file with 9 additions and 2 deletions.
11 changes: 9 additions & 2 deletions userbenchmark/dynamo/dynamobench/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,12 @@ class CI(NamedTuple):
"mobilevit_s",
"pytorch_CycleGAN_and_pix2pix",
"vision_maskrcnn",
"resmlp_12_224",
"dlrm",
"resnet50",
"dm_nfnet_f0",
"pit_b_224",
"tf_mixnet_l",
}


Expand Down Expand Up @@ -1974,7 +1980,9 @@ def init_optimizer(self, name, device, params):
if (name in CI_USE_SGD and self.args.ci) or name in BENCHMARK_USE_SGD:
self.optimizer = torch.optim.SGD(params, lr=0.01, foreach=True)
else:
self.optimizer = torch.optim.Adam(params, lr=0.01, foreach=True)
self.optimizer = torch.optim.Adam(
params, lr=0.01, capturable=True, foreach=True
)
else:
self.optimizer = None

Expand Down Expand Up @@ -2165,7 +2173,6 @@ def optimizer_zero_grad(self, mod):
else:
mod.zero_grad(True)

@torch._disable_dynamo(recursive=True)
def optimizer_step(self):
if self.optimizer is not None:
self.optimizer.step()
Expand Down

0 comments on commit 722b8b1

Please sign in to comment.