diff --git a/fastdeploy/distributed/communication.py b/fastdeploy/distributed/communication.py index 95334f63e..67fc33e83 100644 --- a/fastdeploy/distributed/communication.py +++ b/fastdeploy/distributed/communication.py @@ -20,8 +20,6 @@ import paddle import paddle.distributed as dist from paddle.distributed import fleet -from fastdeploy.distributed.parallel_state import get_tensor_model_parallel_world_size - _TP_AR = None @@ -39,10 +37,9 @@ def use_custom_allreduce(custom_all_reduce_max_bytes: int = 8192 * 1024): hcg = fleet.get_hybrid_communicate_group() model_parallel_group = hcg.get_model_parallel_group() global _TP_AR - if get_tensor_model_parallel_world_size() > 1 and paddle.is_compiled_with_cuda(): - from fastdeploy.distributed.custom_all_reduce import CustomAllreduce + from fastdeploy.distributed.custom_all_reduce import CustomAllreduce - _TP_AR = CustomAllreduce(model_parallel_group, custom_all_reduce_max_bytes) + _TP_AR = CustomAllreduce(model_parallel_group, custom_all_reduce_max_bytes) try: diff --git a/fastdeploy/distributed/parallel_state.py b/fastdeploy/distributed/parallel_state.py deleted file mode 100644 index a9220b743..000000000 --- a/fastdeploy/distributed/parallel_state.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - -from paddle.distributed import fleet - - -def get_tensor_model_parallel_world_size(): - """Return world size for the tensor model parallel group.""" - hcg = fleet.get_hybrid_communicate_group() - mp_size = hcg.get_model_parallel_world_size() - return mp_size - - -def get_tensor_model_parallel_rank(): - """Return my rank for the tensor model parallel group.""" - hcg = fleet.get_hybrid_communicate_group() - mp_rank = hcg.get_model_parallel_rank() - return mp_rank diff --git a/fastdeploy/worker/gpu_worker.py b/fastdeploy/worker/gpu_worker.py index 53619f8f9..bfdc92f1d 100644 --- a/fastdeploy/worker/gpu_worker.py +++ b/fastdeploy/worker/gpu_worker.py @@ -68,7 +68,11 @@ class GpuWorker(WorkerBase): gc.collect() paddle.device.cuda.empty_cache() - if self.parallel_config.enable_custom_all_reduce: + if ( + self.parallel_config.enable_custom_all_reduce + and self.parallel_config.tensor_parallel_size > 1 + and paddle.is_compiled_with_cuda() + ): from fastdeploy.distributed.communication import use_custom_allreduce use_custom_allreduce()