mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-19 15:04:47 +08:00
* 【FIX】Change the name of sparse attn from moba to plas (#4006) * 更新文档 * 【docs】 update readme (#4000) * 更新文档 * update readme * update docs * 【FIX】Change the name of sparse attn from moba to plas (#3845) * 更新文档 * 更新文档 * 更新文档 * 更新文档 * 修改moba为plas * code style * update ci * code style * update ci * code style --------- Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com> * fix max_num_seqs * fix test load attn --------- Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
@@ -30,9 +30,9 @@ from fastdeploy.config import (
|
||||
FDConfig,
|
||||
GraphOptimizationConfig,
|
||||
LoadConfig,
|
||||
MobaAttentionConfig,
|
||||
ModelConfig,
|
||||
ParallelConfig,
|
||||
PlasAttentionConfig,
|
||||
PoolerConfig,
|
||||
RunnerOption,
|
||||
SpeculativeConfig,
|
||||
@@ -361,9 +361,9 @@ class EngineArgs:
|
||||
"""
|
||||
Configuration for graph optimization backend execution.
|
||||
"""
|
||||
moba_attention_config: Optional[Dict[str, Any]] = None
|
||||
plas_attention_config: Optional[Dict[str, Any]] = None
|
||||
"""
|
||||
Configuration for moba attention.
|
||||
Configuration for plas attention.
|
||||
"""
|
||||
|
||||
enable_logprob: bool = False
|
||||
@@ -601,9 +601,9 @@ class EngineArgs:
|
||||
help="",
|
||||
)
|
||||
model_group.add_argument(
|
||||
"--moba-attention-config",
|
||||
"--plas-attention-config",
|
||||
type=json.loads,
|
||||
default=EngineArgs.moba_attention_config,
|
||||
default=EngineArgs.plas_attention_config,
|
||||
help="",
|
||||
)
|
||||
model_group.add_argument(
|
||||
@@ -993,17 +993,17 @@ class EngineArgs:
|
||||
graph_optimization_args[k] = v
|
||||
return GraphOptimizationConfig(graph_optimization_args)
|
||||
|
||||
def create_moba_attention_config(self) -> MobaAttentionConfig:
|
||||
def create_plas_attention_config(self) -> PlasAttentionConfig:
|
||||
"""
|
||||
Create and retuan a MobaAttentionConfig object based on the current settings.
|
||||
Create and retuan a PlasAttentionConfig object based on the current settings.
|
||||
"""
|
||||
attention_args = asdict(self)
|
||||
if self.moba_attention_config is not None:
|
||||
for k, v in self.moba_attention_config.items():
|
||||
if self.plas_attention_config is not None:
|
||||
for k, v in self.plas_attention_config.items():
|
||||
attention_args[k] = v
|
||||
return MobaAttentionConfig(attention_args)
|
||||
return PlasAttentionConfig(attention_args)
|
||||
else:
|
||||
return MobaAttentionConfig(None)
|
||||
return PlasAttentionConfig(None)
|
||||
|
||||
def create_early_stop_config(self) -> EarlyStopConfig:
|
||||
"""
|
||||
@@ -1064,7 +1064,7 @@ class EngineArgs:
|
||||
scheduler_cfg = self.create_scheduler_config()
|
||||
graph_opt_cfg = self.create_graph_optimization_config()
|
||||
graph_opt_cfg.update_use_cudagraph(self.use_cudagraph)
|
||||
moba_attention_config = self.create_moba_attention_config()
|
||||
plas_attention_config = self.create_plas_attention_config()
|
||||
|
||||
early_stop_cfg = self.create_early_stop_config()
|
||||
early_stop_cfg.update_enable_early_stop(self.enable_early_stop)
|
||||
@@ -1093,7 +1093,7 @@ class EngineArgs:
|
||||
max_long_partial_prefills=self.max_long_partial_prefills,
|
||||
long_prefill_token_threshold=self.long_prefill_token_threshold,
|
||||
graph_opt_config=graph_opt_cfg,
|
||||
moba_attention_config=moba_attention_config,
|
||||
plas_attention_config=plas_attention_config,
|
||||
guided_decoding_backend=self.guided_decoding_backend,
|
||||
disable_any_whitespace=self.guided_decoding_disable_any_whitespace,
|
||||
early_stop_config=early_stop_cfg,
|
||||
|
@@ -501,7 +501,7 @@ class LLMEngine:
|
||||
f" --early_stop_config '{self.cfg.early_stop_config.to_json_string()}'"
|
||||
f" --reasoning_parser {self.cfg.reasoning_parser}"
|
||||
f" --load_choices {self.cfg.load_config.load_choices}"
|
||||
f" --moba_attention_config '{self.cfg.moba_attention_config.to_json_string()}'"
|
||||
f" --plas_attention_config '{self.cfg.plas_attention_config.to_json_string()}'"
|
||||
f" --ips {ips}"
|
||||
f" --cache-transfer-protocol {self.cfg.cache_config.cache_transfer_protocol}"
|
||||
f" --runner {self.cfg.model_config.runner}"
|
||||
|
Reference in New Issue
Block a user