mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-08 01:50:27 +08:00
[Intel HPU] Support intel hpu platform (#4161)
* [Intel HPU] Support intel hpu platform * fix some issues * apply precommit and move AttentionBackend_HPU * fix format issue * correct ops import * fix ci issue * update code in layers * fix code style issue * remove dense tp moe ep mode * fix enc_dec_block_num * fix rebase issue * rename hpu to gaudi in readme * rename ForwardMeta_HPU to HPUForwardMeta
This commit is contained in:
@@ -72,6 +72,8 @@ class SiluAndMul(nn.Layer):
|
||||
self.forward = self.forward_cuda
|
||||
elif current_platform.is_gcu():
|
||||
self.forward = self.forward_gcu
|
||||
elif current_platform.is_intel_hpu():
|
||||
self.forward = self.forward_intel_hpu
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -147,6 +149,16 @@ class SiluAndMul(nn.Layer):
|
||||
out = out + self.bias
|
||||
return out
|
||||
|
||||
def forward_intel_hpu(self, x):
|
||||
"""
|
||||
Forward propagation of the custom activation layer.
|
||||
Args:
|
||||
x (Tensor): Input tensor to the activation layer.
|
||||
Returns:
|
||||
Tensor: Output tensor.
|
||||
"""
|
||||
return
|
||||
|
||||
|
||||
def get_act_fn(act_fn_name: str) -> nn.Layer:
|
||||
"""Get an activation function by name."""
|
||||
|
Reference in New Issue
Block a user