Skip to content

Commit

Permalink
Remove more torch.no_grad()
Browse files Browse the repository at this point in the history
  • Loading branch information
xuzhao9 committed May 3, 2024
1 parent 7278233 commit 3a26c36
Show file tree
Hide file tree
Showing 15 changed files with 25 additions and 41 deletions.
3 changes: 1 addition & 2 deletions torchbenchmark/models/detectron2_maskrcnn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,8 +102,7 @@ def train(self):
def eval(self) -> Tuple[torch.Tensor]:
self.model.eval()
idx = 0
with torch.no_grad():
out = self.model(self.example_inputs[idx])
out = self.model(self.example_inputs[idx])
# retrieve output tensors
outputs = []
for item in out:
Expand Down
3 changes: 1 addition & 2 deletions torchbenchmark/models/doctr_det_predictor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,5 @@ def get_module(self):
return self.model, (self.example_inputs,)

def eval(self) -> Tuple[torch.Tensor]:
with torch.inference_mode():
out = self.model(self.example_inputs, return_model_output=True)
out = self.model(self.example_inputs, return_model_output=True)
return (out["out_map"],)
3 changes: 1 addition & 2 deletions torchbenchmark/models/doctr_reco_predictor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,5 @@ def get_module(self):
return self.model, (self.example_inputs,)

def eval(self) -> Tuple[torch.Tensor]:
with torch.inference_mode():
out = self.model(self.example_inputs, return_model_output=True)
out = self.model(self.example_inputs, return_model_output=True)
return (out["out_map"],)
5 changes: 2 additions & 3 deletions torchbenchmark/models/fastNLP_Bert/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,8 @@ def get_module(self):
def eval(self) -> Tuple[torch.Tensor]:
self._mode(self.model, is_test=True)
self._predict_func = self.model.forward
with torch.no_grad():
for batch_x, _batch_y in self.example_inputs:
pred_dict = self._data_forward(self._predict_func, batch_x)
for batch_x, _batch_y in self.example_inputs:
pred_dict = self._data_forward(self._predict_func, batch_x)
# return a tuple of Tensors
return (pred_dict["pred_start"], pred_dict["pred_end"])

Expand Down
3 changes: 1 addition & 2 deletions torchbenchmark/models/functorch_dp_cifar10/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,5 @@ def eval(self) -> Tuple[torch.Tensor]:
(images, ) = self.example_inputs
model.eval()
targets = self.example_target
with torch.no_grad():
out = model(images)
out = model(images)
return (out, )
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,7 @@ def get_module(self):
return self.model, self.example_inputs

def eval(self):
with torch.no_grad():
out = self.model(*self.example_inputs)
out = self.model(*self.example_inputs)
return (out,)

def train(self):
Expand Down
3 changes: 1 addition & 2 deletions torchbenchmark/models/nanogpt/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,5 @@ def optimizer_step(self):

def eval(self):
self.model.eval()
with torch.no_grad():
out = self.model.generate(*self.example_inputs, self.generate_config.max_new_tokens, self.generate_config.temperature, self.generate_config.top_k)
out = self.model.generate(*self.example_inputs, self.generate_config.max_new_tokens, self.generate_config.temperature, self.generate_config.top_k)
return (out,)
3 changes: 1 addition & 2 deletions torchbenchmark/models/phlippe_densenet/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,5 @@ def eval(self):
model = self.model
(images, ) = self.example_inputs
model.eval()
with torch.no_grad():
out = model(images)
out = model(images)
return (out,)
3 changes: 1 addition & 2 deletions torchbenchmark/models/phlippe_resnet/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,5 @@ def train(self):

def eval(self):
self.model.eval()
with torch.no_grad():
out=self.model(self.images)
out=self.model(self.images)
return (out,)
9 changes: 4 additions & 5 deletions torchbenchmark/models/speech_transformer/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,9 +251,8 @@ def __init__(self, traincfg, num_eval_batch=1, device="cuda"):
break

def eval(self):
with torch.no_grad():
for input, input_length in self.example_inputs:
nbest_hyps = self.model.recognize(
input, input_length, self.char_list, self
)
for input, input_length in self.example_inputs:
nbest_hyps = self.model.recognize(
input, input_length, self.char_list, self
)
return nbest_hyps
10 changes: 4 additions & 6 deletions torchbenchmark/models/torch_multimodal_clip/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,9 @@ def train(self):

def eval(self):
self.model.eval()

with torch.no_grad():
image_embedding, text_embedding = self.model(
self.image_tensor, self.text_tensor
)
score = image_embedding @ text_embedding.t()
image_embedding, text_embedding = self.model(
self.image_tensor, self.text_tensor
)
score = image_embedding @ text_embedding.t()

return self.text[torch.argmax(score)]
9 changes: 4 additions & 5 deletions torchbenchmark/models/vision_maskrcnn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,10 +117,9 @@ def train(self):

def eval(self) -> Tuple[torch.Tensor]:
self.model.eval()
with torch.no_grad():
for _batch_id, (images, _targets) in zip(
range(self.NUM_OF_BATCHES), self.data_loader
):
out = self.model(images)
for _batch_id, (images, _targets) in zip(
range(self.NUM_OF_BATCHES), self.data_loader
):
out = self.model(images)
out = list(map(lambda x: x.values(), out))
return tuple(itertools.chain(*out))
3 changes: 1 addition & 2 deletions torchbenchmark/util/framework/detectron2/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,8 +211,7 @@ def train(self):

def eval(self) -> Tuple[torch.Tensor]:
batch_id = 0
with torch.no_grad():
out = self.model(self.example_inputs[batch_id])
out = self.model(self.example_inputs[batch_id])
# retrieve output tensors
outputs = []
for item in out:
Expand Down
3 changes: 1 addition & 2 deletions torchbenchmark/util/framework/diffusers/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,5 @@ def train(self):
raise NotImplementedError(f"Train is not implemented for model {self.name}")

def eval(self):
with torch.no_grad():
images = self.pipe(*self.example_inputs).images
images = self.pipe(*self.example_inputs).images
return images
3 changes: 1 addition & 2 deletions torchbenchmark/util/framework/gnn/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,7 @@ def __init__(self, model_name, test, device, batch_size=None, extra_args=[]):

def eval(self):
self.model.eval()
with torch.no_grad():
return (self.model(*self.example_inputs),)
return (self.model(*self.example_inputs),)

def train(self):
# NB: This is a little different than test_basic_gnn.py, as we
Expand Down

0 comments on commit 3a26c36

Please sign in to comment.