Skip to content

Commit

Permalink
Fix
Browse files Browse the repository at this point in the history
  • Loading branch information
regisss committed Sep 24, 2024
1 parent 47ad03c commit 94c23ba
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions optimum/habana/transformers/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1584,8 +1584,8 @@ def training_step(self, model: torch.nn.Module, inputs: Dict[str, Union[torch.Te
`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
if hasattr(self.optimizer, "train") and callable(self.optimizer.train):
self.optimizer.train()
# if hasattr(self.optimizer, "train") and callable(self.optimizer.train):
# self.optimizer.train()

inputs = self._prepare_inputs(inputs)

Expand Down Expand Up @@ -1824,8 +1824,8 @@ def evaluation_loop(
self.deepspeed = self.model_wrapped

model.eval()
if hasattr(self.optimizer, "eval") and callable(self.optimizer.eval):
self.optimizer.eval()
# if hasattr(self.optimizer, "eval") and callable(self.optimizer.eval):
# self.optimizer.eval()

# Do not use HPU graphs if the training is ongoing because it detaches gradients
if args.use_hpu_graphs_for_inference and not self.is_in_train:
Expand Down Expand Up @@ -2233,8 +2233,8 @@ def prediction_loop(
if self.is_deepspeed_enabled:
self.deepspeed = self.model_wrapped
model.eval()
if hasattr(self.optimizer, "eval") and callable(self.optimizer.eval):
self.optimizer.eval()
# if hasattr(self.optimizer, "eval") and callable(self.optimizer.eval):
# self.optimizer.eval()

# Do not use HPU graphs if the training is ongoing because it detaches gradients
if args.use_hpu_graphs_for_inference and not self.is_in_train:
Expand Down

0 comments on commit 94c23ba

Please sign in to comment.