mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-09-27 04:46:16 +08:00
fix ernie vl distributed attr. (#4217)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
This commit is contained in:
@@ -23,7 +23,7 @@ from paddle import nn
|
|||||||
from paddle.autograd import PyLayer
|
from paddle.autograd import PyLayer
|
||||||
from paddle.distributed.fleet.utils import recompute
|
from paddle.distributed.fleet.utils import recompute
|
||||||
|
|
||||||
from fastdeploy.model_executor.layers.utils import _set_var_distributed, get_tensor
|
from fastdeploy.model_executor.layers.utils import get_tensor
|
||||||
from fastdeploy.model_executor.models.ernie4_5_vl.dist_utils import (
|
from fastdeploy.model_executor.models.ernie4_5_vl.dist_utils import (
|
||||||
RowSequenceParallelLinear,
|
RowSequenceParallelLinear,
|
||||||
all_gather_group,
|
all_gather_group,
|
||||||
@@ -197,19 +197,7 @@ class VariableResolutionResamplerModel(nn.Layer):
|
|||||||
self.after_norm = RMSNorm(out_config)
|
self.after_norm = RMSNorm(out_config)
|
||||||
|
|
||||||
if self.tensor_parallel_degree > 1:
|
if self.tensor_parallel_degree > 1:
|
||||||
for idx in [2, 3]:
|
|
||||||
mark_as_sequence_parallel_parameter(self.spatial_linear[idx].weight)
|
|
||||||
mark_as_sequence_parallel_parameter(self.spatial_linear[idx].bias)
|
|
||||||
_set_var_distributed(self.spatial_linear[idx].weight, split_axis=0)
|
|
||||||
_set_var_distributed(self.spatial_linear[idx].bias, split_axis=0)
|
|
||||||
if self.use_temporal_conv:
|
|
||||||
for idx in [0, 2, 3]:
|
|
||||||
mark_as_sequence_parallel_parameter(self.temporal_linear[idx].weight)
|
|
||||||
mark_as_sequence_parallel_parameter(self.temporal_linear[idx].bias)
|
|
||||||
|
|
||||||
mark_as_sequence_parallel_parameter(self.mlp.weight)
|
|
||||||
mark_as_sequence_parallel_parameter(self.mlp.bias)
|
|
||||||
mark_as_sequence_parallel_parameter(self.after_norm.weight)
|
|
||||||
set_weight_attrs(self.spatial_linear[0].weight, {"output_dim": False})
|
set_weight_attrs(self.spatial_linear[0].weight, {"output_dim": False})
|
||||||
|
|
||||||
def spatial_conv_reshape(self, x, spatial_conv_size):
|
def spatial_conv_reshape(self, x, spatial_conv_size):
|
||||||
|
Reference in New Issue
Block a user