mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
[Executor] Default use CUDAGraph (#3594)
* add start intercept * Adjustment GraphOptConfig * pre-commit * default use cudagraph * set default value * default use cuda graph * pre-commit * fix test case bug * disable rl * fix moba attention * only support gpu * Temporarily disable PD Disaggregation * set max_num_seqs of test case as 1 * set max_num_seqs and temperature * fix max_num_batched_tokens bug * close cuda graph * success run wint2 * profile run with max_num_batched_tokens * 1.add c++ memchecker 2.success run wint2 * updatee a800 yaml * update docs * 1. delete check 2. fix plas attn test case * default use use_unique_memory_pool * add try-except for warmup * ban mtp, mm, rl * fix test case mock * fix ci bug * fix form_model_get_output_topp0 bug * fix ci bug * refine deepseek ci * refine code * Disable PD * fix sot yaml
This commit is contained in:
@@ -251,6 +251,7 @@ if paddle.is_compiled_with_rocm():
|
||||
)
|
||||
elif paddle.is_compiled_with_cuda():
|
||||
sources = [
|
||||
"gpu_ops/helper.cu",
|
||||
"gpu_ops/save_with_output_msg.cc",
|
||||
"gpu_ops/get_output.cc",
|
||||
"gpu_ops/get_output_msg_with_topk.cc",
|
||||
@@ -499,7 +500,7 @@ elif paddle.is_compiled_with_cuda():
|
||||
sources=sources,
|
||||
extra_compile_args={"cxx": cc_compile_args, "nvcc": nvcc_compile_args},
|
||||
libraries=["cublasLt"],
|
||||
extra_link_args=["-lcuda"],
|
||||
extra_link_args=["-lcuda", "-lnvidia-ml"],
|
||||
),
|
||||
packages=find_packages(where="third_party/DeepGEMM"),
|
||||
package_dir={"": "third_party/DeepGEMM"},
|
||||
|
||||
Reference in New Issue
Block a user