mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-09-27 12:52:29 +08:00
[SOT] Mark dynamic dims by type annotations (#2771)
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled
* [SOT] Mark dynamic dims by type annotations * fix conflict of forward_meta * mark more attn backend * fix missing annotated and add env SOT_SPECIALIZED_DIM_NUMBERS * auto infer implicit 0 dim dynamic dim * revert manual marked dims * revert missing update * auto infer can use unsafe code in warmup stage * check -> type_match * fix codestyle * restore blank line * empty commit * add need_warmup nonlocal; * add doc for resolver * add missing type hints * unquote "ForwardMeta"
This commit is contained in:
@@ -335,11 +335,11 @@ class GraphOptimizationConfig:
|
||||
cudagraph_splitting_ops = ["paddle.unified_attention"]
|
||||
|
||||
Note: If want to use subgraph capture functionality in a dynamic graph,
|
||||
can manually split the model into multiple layers and apply the @support_cuda_graph decorator
|
||||
can manually split the model into multiple layers and apply the @support_graph_optimization decorator
|
||||
only to the layer where CUDA graph functionality is required.
|
||||
"""
|
||||
cudagraph_splitting_ops = Optional[list[str]]
|
||||
"""" Whether to use a full cuda graph for the entire forward pass rather than
|
||||
cudagraph_splitting_ops: list[str] = field(default_factory=list)
|
||||
""" Whether to use a full cuda graph for the entire forward pass rather than
|
||||
splitting certain operations such as attention into subgraphs.
|
||||
Thus this flag cannot be used together with splitting_ops."""
|
||||
full_cuda_graph: bool = True
|
||||
|
Reference in New Issue
Block a user