mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 00:33:03 +08:00
30 lines
1.0 KiB
Python
30 lines
1.0 KiB
Python
"""
|
|
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""
|
|
|
|
import paddle
|
|
import paddle.distributed as dist
|
|
|
|
|
|
@paddle.jit.marker.unified
|
|
def tensor_model_parallel_all_reduce(input_: paddle.Tensor) -> paddle.Tensor:
|
|
"""All-reduce the input tensor across model parallel group."""
|
|
if paddle.in_dynamic_mode():
|
|
hcg = dist.fleet.get_hybrid_communicate_group()
|
|
mp_group = hcg.get_model_parallel_group()
|
|
dist.all_reduce(input_, group=mp_group)
|
|
else:
|
|
dist.all_reduce(input_)
|