mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
delete parallel_state.py (#3250)
This commit is contained in:
@@ -20,8 +20,6 @@ import paddle
|
|||||||
import paddle.distributed as dist
|
import paddle.distributed as dist
|
||||||
from paddle.distributed import fleet
|
from paddle.distributed import fleet
|
||||||
|
|
||||||
from fastdeploy.distributed.parallel_state import get_tensor_model_parallel_world_size
|
|
||||||
|
|
||||||
_TP_AR = None
|
_TP_AR = None
|
||||||
|
|
||||||
|
|
||||||
@@ -39,7 +37,6 @@ def use_custom_allreduce(custom_all_reduce_max_bytes: int = 8192 * 1024):
|
|||||||
hcg = fleet.get_hybrid_communicate_group()
|
hcg = fleet.get_hybrid_communicate_group()
|
||||||
model_parallel_group = hcg.get_model_parallel_group()
|
model_parallel_group = hcg.get_model_parallel_group()
|
||||||
global _TP_AR
|
global _TP_AR
|
||||||
if get_tensor_model_parallel_world_size() > 1 and paddle.is_compiled_with_cuda():
|
|
||||||
from fastdeploy.distributed.custom_all_reduce import CustomAllreduce
|
from fastdeploy.distributed.custom_all_reduce import CustomAllreduce
|
||||||
|
|
||||||
_TP_AR = CustomAllreduce(model_parallel_group, custom_all_reduce_max_bytes)
|
_TP_AR = CustomAllreduce(model_parallel_group, custom_all_reduce_max_bytes)
|
||||||
|
@@ -1,31 +0,0 @@
|
|||||||
"""
|
|
||||||
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from paddle.distributed import fleet
|
|
||||||
|
|
||||||
|
|
||||||
def get_tensor_model_parallel_world_size():
|
|
||||||
"""Return world size for the tensor model parallel group."""
|
|
||||||
hcg = fleet.get_hybrid_communicate_group()
|
|
||||||
mp_size = hcg.get_model_parallel_world_size()
|
|
||||||
return mp_size
|
|
||||||
|
|
||||||
|
|
||||||
def get_tensor_model_parallel_rank():
|
|
||||||
"""Return my rank for the tensor model parallel group."""
|
|
||||||
hcg = fleet.get_hybrid_communicate_group()
|
|
||||||
mp_rank = hcg.get_model_parallel_rank()
|
|
||||||
return mp_rank
|
|
@@ -68,7 +68,11 @@ class GpuWorker(WorkerBase):
|
|||||||
|
|
||||||
gc.collect()
|
gc.collect()
|
||||||
paddle.device.cuda.empty_cache()
|
paddle.device.cuda.empty_cache()
|
||||||
if self.parallel_config.enable_custom_all_reduce:
|
if (
|
||||||
|
self.parallel_config.enable_custom_all_reduce
|
||||||
|
and self.parallel_config.tensor_parallel_size > 1
|
||||||
|
and paddle.is_compiled_with_cuda()
|
||||||
|
):
|
||||||
from fastdeploy.distributed.communication import use_custom_allreduce
|
from fastdeploy.distributed.communication import use_custom_allreduce
|
||||||
|
|
||||||
use_custom_allreduce()
|
use_custom_allreduce()
|
||||||
|
Reference in New Issue
Block a user