From 474a3288cd5a89c64c37f1e51978a3d031a87b4a Mon Sep 17 00:00:00 2001 From: gyou2021 Date: Wed, 9 Oct 2024 23:23:16 +0800 Subject: [PATCH] Enabled Qwen2-MoE Tensor Parallelism (TP) inference (#6551) Modified _replace_module in auto_tp.py : The modification keeps the layers 'shared_expert_gate' and 'gate' in qwen2-moe the original type torch.nn.Linear and not changes them into LinearLayer. In this way, their weights will not be split into multiple HPU/GPU cards. Then the qwen2-moe can run on multiple HPU/GPU cards. Since the weights of 'gate' are not split into multiple HPU/GPU cards, all gather operations are not needed, which may improve performance. --------- Co-authored-by: Logan Adams <114770087+loadams@users.noreply.github.com> --- deepspeed/module_inject/auto_tp.py | 3 ++- docs/_tutorials/automatic-tensor-parallelism.md | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) mode change 100644 => 100755 deepspeed/module_inject/auto_tp.py mode change 100644 => 100755 docs/_tutorials/automatic-tensor-parallelism.md diff --git a/deepspeed/module_inject/auto_tp.py b/deepspeed/module_inject/auto_tp.py old mode 100644 new mode 100755 index 52d7c95ec9d8..221d490a37d2 --- a/deepspeed/module_inject/auto_tp.py +++ b/deepspeed/module_inject/auto_tp.py @@ -333,7 +333,8 @@ def _replace(self, child, name, conv_linear_layer): weight_shape = child.weight.shape mp_replace = ReplaceWithTensorSlicing(mp_group=self.mp_group) # For mixtral-7x8b, need to skip MoE gate linear replace. - if name == "block_sparse_moe.gate": + if name == "block_sparse_moe.gate" or (('mlp.shared_expert_gate' == name or 'mlp.gate' == name) + and 'qwen2_moe' in str(type(self.module))): return child # For Yuan model if 'Yuan' in str(self.module): diff --git a/docs/_tutorials/automatic-tensor-parallelism.md b/docs/_tutorials/automatic-tensor-parallelism.md old mode 100644 new mode 100755 index e1903ed05892..d5a08b27bf4d --- a/docs/_tutorials/automatic-tensor-parallelism.md +++ b/docs/_tutorials/automatic-tensor-parallelism.md @@ -158,6 +158,7 @@ The following model families have been successfully tested with automatic tensor - plbart - qwen - qwen2 +- qwen2-moe - reformer - roberta - roformer