mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 08:37:06 +08:00
[GCU] Support gcu platform (#2702)
baseline: e7fa57ebae
Co-authored-by: yongqiangma <xing.wo@163.com>
This commit is contained in:
@@ -19,7 +19,7 @@ from typing import Optional
|
||||
|
||||
import paddle
|
||||
from paddle import nn
|
||||
from paddle.incubate.nn.functional import fused_bias_act
|
||||
from paddle.incubate.nn.functional import fused_bias_act, swiglu
|
||||
|
||||
from fastdeploy.config import FDConfig
|
||||
from fastdeploy.platforms import current_platform
|
||||
@@ -66,6 +66,8 @@ class SiluAndMul(nn.Layer):
|
||||
if current_platform.is_cuda() or current_platform.is_xpu(
|
||||
) or current_platform.is_iluvatar():
|
||||
self.forward = self.forward_cuda
|
||||
elif current_platform.is_gcu():
|
||||
self.forward = self.forward_gcu
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -123,3 +125,18 @@ class SiluAndMul(nn.Layer):
|
||||
quant_max_bound=self.quant_max_bound,
|
||||
quant_min_bound=self.quant_min_bound,
|
||||
)
|
||||
|
||||
def forward_gcu(self, x):
|
||||
"""
|
||||
Forward propagation of the custom activation layer.
|
||||
|
||||
Args:
|
||||
x (Tensor): Input tensor to the activation layer.
|
||||
|
||||
Returns:
|
||||
Tensor: Output tensor.
|
||||
"""
|
||||
out = swiglu(x)
|
||||
if self.bias is not None:
|
||||
out = out + self.bias
|
||||
return out
|
||||
|
Reference in New Issue
Block a user