Skip to content

Commit

Permalink
Update to use torch.linalg.vector_norm
Browse files Browse the repository at this point in the history
  • Loading branch information
loadams committed Jan 14, 2025
1 parent c283317 commit 8ff83e2
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion deepspeed/runtime/zero/stage3.py
Original file line number Diff line number Diff line change
Expand Up @@ -2101,7 +2101,7 @@ def step(self, closure=None):
return

norm_groups = self._get_norm_groups()
scaled_global_grad_norm = torch.linalg.norm(torch.stack(norm_groups))
scaled_global_grad_norm = torch.linalg.vector_norm(torch.stack(norm_groups))

# Stash unscaled gradient norm
self._global_grad_norm = scaled_global_grad_norm / self.loss_scale
Expand Down
4 changes: 2 additions & 2 deletions deepspeed/runtime/zero/stage_1_and_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -1691,7 +1691,7 @@ def get_grad_norm_direct(self, gradients, params, norm_type=2):
continue
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
all_norms.append(
torch.linalg.norm(g.data.double().detach(),
torch.linalg.vector_norm(g.data.double().detach(),
ord=norm_type).to(get_accelerator().current_device_name()))
if len(all_norms) > 0:
total_norm = torch.stack(all_norms).square().sum().float()
Expand Down Expand Up @@ -1796,7 +1796,7 @@ def scaled_global_norm(self, norm_type=2):
self._average_expert_grad_norms(norm_groups)

# calculating L2 norm
return torch.linalg.norm(torch.stack(norm_groups), ord=norm_type)
return torch.linalg.vector_norm(torch.stack(norm_groups), ord=norm_type)

def get_bit16_param_group(self, group_no):
bit16_partitions = self.parallel_partitioned_bit16_groups[group_no]
Expand Down

0 comments on commit 8ff83e2

Please sign in to comment.