mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[feat] support fa3 backend for pd disaggregated (#2695)
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled
* support fa3 backend run in pd disaggregated * support fa3 backend run in pd disaggregated * support fa3 backend run in pd disaggregated * support fa3 backend run in pd disaggregated * delete use_fast_ffn
This commit is contained in:
@@ -13,9 +13,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
"""
|
||||
cuda platform file
|
||||
"""
|
||||
|
||||
import paddle
|
||||
|
||||
@@ -65,6 +62,11 @@ class CUDAPlatform(Platform):
|
||||
return (
|
||||
"fastdeploy.model_executor.layers.attention.MLAAttentionBackend"
|
||||
)
|
||||
elif selected_backend == _Backend.FLASH_ATTN:
|
||||
logger.info("Using FLASH ATTN backend.")
|
||||
return (
|
||||
"fastdeploy.model_executor.layers.attention.FlashAttentionBackend"
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Invalid attention backend you specified.\n"
|
||||
|
Reference in New Issue
Block a user