Skip to content

Commit

Permalink
correct error msg
Browse files Browse the repository at this point in the history
  • Loading branch information
haolin-nju committed Jan 6, 2025
1 parent 5e07405 commit 13a828f
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 1 deletion.
1 change: 1 addition & 0 deletions chatlearn/models/vllm_module_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def __init__(self, *args, **kwargs):
if 'worker_module_name' in kwargs and 'worker_class_name' in kwargs:
RayWorkerWrapper.__init__(self, **kwargs) # pylint: disable=non-parent-init-called
os.environ['VLLM_HOST_IP'] = self.get_address()

self.tokenizer = None
self._model = None
self.set_vllm_pp_layer_partition()
Expand Down
2 changes: 1 addition & 1 deletion chatlearn/synchronizer/parameter_sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -1185,7 +1185,7 @@ def setup_rank_mapping(self):
and get_args().runtime_args.routed_expert_regrouping_comm_type == ROUTED_EXPERT_REGROUPING_COMM_TYPE.ALLTOALL
):
raise NotImplementedError(
"All-to-all routed expert weight is only supported when src TP size * src EP size = dst TP size. "
"all-to-all routed expert weight is only supported when src TP size * src EP size = dst TP size. "
"Please consider setting `routed_expert_regrouping_comm_type` to allgather or adjusting the model's parallel size."
)

Expand Down

0 comments on commit 13a828f

Please sign in to comment.