Sync v2.0 version of code to github repo

This commit is contained in:
Jiang-Jia-Jun
2025-06-29 23:29:37 +00:00
parent d151496038
commit 92c2cfa2e7
597 changed files with 78776 additions and 22905 deletions

View File

@@ -0,0 +1,8 @@
enable_chunked_prefill: True
max_model_len: 131072
max_num_seqs: 16
kv_cache_ratio: 0.75
tensor_parallel_size: 8
max_num_batched_tokens: 4096
max_num_partial_prefills: 3
max_long_partial_prefills: 3

View File

@@ -0,0 +1,5 @@
max_model_len: 131072
max_num_seqs: 40
gpu_memory_utilization: 0.9
tensor_parallel_size: 8
quantization: wint4

View File

@@ -0,0 +1,8 @@
enable_chunked_prefill: True
max_model_len: 131072
max_num_seqs: 16
kv_cache_ratio: 0.75
tensor_parallel_size: 8
max_num_batched_tokens: 4096
max_num_partial_prefills: 3
max_long_partial_prefills: 3

View File

@@ -0,0 +1,10 @@
enable_mm: True
max_model_len: 32768
max_num_seqs: 128
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.71
tensor_parallel_size: 1
enable_chunked_prefill: True
max_num_batched_tokens: 384
quantization: wint4
reasoning_parser: ernie-45-vl

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
max_num_batched_tokens: 32768

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 32
kv_cache_ratio: 0.5
tensor_parallel_size: 1
quantization: wint4

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
max_num_batched_tokens: 32768
quantization: wint4

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
max_num_batched_tokens: 32768
quantization: wint8

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
max_num_batched_tokens: 32768

View File

@@ -0,0 +1,12 @@
max_model_len: 32768
max_num_seqs: 256
tensor_parallel_size: 8
quantization: block_wise_fp8
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.8
enable_chunked_prefill: True
max_num_batched_tokens: 1024
max_num_partial_prefills: 3
max_long_partial_prefills: 3
enable_prefix_caching: True
swap_space: 200

View File

@@ -0,0 +1,11 @@
max_model_len: 32768
max_num_seqs: 256
tensor_parallel_size: 8
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.8
enable_chunked_prefill: True
max_num_batched_tokens: 1024
max_num_partial_prefills: 3
max_long_partial_prefills: 3
enable_prefix_caching: True
swap_space: 200

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 96
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.71
tensor_parallel_size: 4

View File

@@ -0,0 +1,15 @@
max_model_len: 32768
max_num_seqs: 256
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.8
tensor_parallel_size: 4
cache_queue_port: 55663
enable_chunked_prefill: True
splitwise_role: decode
engine_worker_queue_port: 6678
cache_transfer_protocol: "rdma,ipc"
rdma_comm_ports: "7671,7672,7673,7674"
pd_comm_port: "2334"
max_num_batched_tokens: 384
max_num_partial_prefills: 3
max_long_partial_prefills: 3

View File

@@ -0,0 +1,12 @@
max_model_len: 32768
max_num_seqs: 16
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.9
tensor_parallel_size: 4
splitwise_role: prefill
enable_prefix_caching: True
cache_queue_port: 55664
engine_worker_queue_port: 6677
cache_transfer_protocol: "rdma,ipc"
rdma_comm_ports: "7675,7676,7677,7678"
pd_comm_port: "2333"

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
enable_prefix_caching: true
enable_chunked_prefill: true

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 96
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.71
tensor_parallel_size: 4

View File

@@ -0,0 +1,13 @@
max_model_len: 32768
max_num_seqs: 256
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.8
tensor_parallel_size: 1
data_parallel_size: 8
num_gpu_blocks_override: 1024
cache_queue_port: 55663
splitwise_role: decode
engine_worker_queue_port: 6678
cache_transfer_protocol: "rdma"
rdma_comm_ports: "7671,7672,7673,7674,7675,7676,7677,7678"
pd_comm_port: "2334"

View File

@@ -0,0 +1,13 @@
max_model_len: 32768
max_num_seqs: 16
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.9
tensor_parallel_size: 1
data_parallel_size: 8
splitwise_role: prefill
cache_queue_port: 55664
engine_worker_queue_port: 6677
num_gpu_blocks_override: 1024
cache_transfer_protocol: "rdma"
rdma_comm_ports: "7671,7672,7673,7674,7675,7676,7677,7678"
pd_comm_port: "2334"

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 96
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.71
tensor_parallel_size: 4
quantization: wint4

View File

@@ -0,0 +1,13 @@
max_model_len: 32768
max_num_seqs: 128
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.7
tensor_parallel_size: 4
cache_queue_port: 55663
enable_chunked_prefill: False
enable_prefix_caching: False
splitwise_role: decode
engine_worker_queue_port: 6678
cache_transfer_protocol: "rdma,ipc"
rdma_comm_ports: "7671,7672,7673,7674"
pd_comm_port: "2334"

View File

@@ -0,0 +1,12 @@
max_model_len: 32768
max_num_seqs: 16
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.9
tensor_parallel_size: 4
splitwise_role: prefill
enable_prefix_caching: False
cache_queue_port: 55664
engine_worker_queue_port: 6677
cache_transfer_protocol: "rdma,ipc"
rdma_comm_ports: "7675,7676,7677,7678"
pd_comm_port: "2333"

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 40
tensor_parallel_size: 4
quantization: wint4
gpu_memory_utilization: 0.9

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 160
tensor_parallel_size: 8
quantization: wint4
gpu_memory_utilization: 0.9

View File

@@ -0,0 +1,8 @@
enable_prefix_caching: True
max_model_len: 32768
max_num_seqs: 128
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.71
tensor_parallel_size: 4
swap_space: 200
cache_queue_port: 55664

View File

@@ -0,0 +1,15 @@
max_model_len: 32768
max_num_seqs: 256
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.8
tensor_parallel_size: 4
cache_queue_port: 55663
enable_chunked_prefill: True
splitwise_role: decode
engine_worker_queue_port: 6678
cache_transfer_protocol: "rdma,ipc"
rdma_comm_ports: "7671,7672,7673,7674"
pd_comm_port: "2334"
max_num_batched_tokens: 384
max_num_partial_prefills: 3
max_long_partial_prefills: 3

View File

@@ -0,0 +1,12 @@
max_model_len: 32768
max_num_seqs: 16
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.9
tensor_parallel_size: 4
splitwise_role: prefill
enable_prefix_caching: True
cache_queue_port: 55664
engine_worker_queue_port: 6677
cache_transfer_protocol: "rdma,ipc"
rdma_comm_ports: "7675,7676,7677,7678"
pd_comm_port: "2333"

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 96
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.71
tensor_parallel_size: 8

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 80
tensor_parallel_size: 8
quantization: wint8
gpu_memory_utilization: 0.9

View File

@@ -0,0 +1,9 @@
enable_prefix_caching: True
max_model_len: 32768
max_num_batched_tokens: 68304
max_num_seqs: 128
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.71
tensor_parallel_size: 8
swap_space: 100
cache_queue_port: 55664

View File

@@ -0,0 +1,9 @@
enable_mm: True
max_model_len: 32768
max_num_seqs: 56
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.8
tensor_parallel_size: 8
quantization: wint4
limit_mm_per_prompt: '{"image": 100, "video": 100}'
reasoning_parser: ernie-45-vl

View File

@@ -0,0 +1,11 @@
enable_mm: True
max_model_len: 32768
max_num_seqs: 56
gpu_memory_utilization: 0.8
kv_cache_ratio: 0.8
tensor_parallel_size: 8
quantization: wint4
limit_mm_per_prompt: '{"image": 100, "video": 100}'
enable_chunked_prefill: True
max_num_batched_tokens: 384
reasoning_parser: ernie-45-vl

View File

@@ -0,0 +1,9 @@
enable_mm: True
max_model_len: 32768
max_num_seqs: 36
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.8
tensor_parallel_size: 4
quantization: wint4
limit_mm_per_prompt: '{"image": 100, "video": 100}'
reasoning_parser: ernie-45-vl

View File

@@ -0,0 +1,9 @@
enable_mm: True
max_model_len: 32768
max_num_seqs: 36
gpu_memory_utilization: 0.95
kv_cache_ratio: 0.8
tensor_parallel_size: 8
quantization: wint8
limit_mm_per_prompt: '{"image": 100, "video": 100}'
reasoning_parser: ernie-45-vl

View File

@@ -0,0 +1,11 @@
enable_mm: True
max_model_len: 32768
max_num_seqs: 36
gpu_memory_utilization: 0.8
kv_cache_ratio: 0.8
tensor_parallel_size: 8
quantization: wint8
limit_mm_per_prompt: '{"image": 100, "video": 100}'
enable_chunked_prefill: True
max_num_batched_tokens: 384
reasoning_parser: ernie-45-vl

View File

@@ -0,0 +1,9 @@
enable_mm: True
max_model_len: 32768
max_num_seqs: 36
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.8
tensor_parallel_size: 4
quantization: wint8
limit_mm_per_prompt: '{"image": 100, "video": 100}'
reasoning_parser: ernie-45-vl

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
enable_static_graph_inference: True

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
enable_static_graph_inference: True

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
quantization: wint8
enable_static_graph_inference: True

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
quantization: wint8
enable_static_graph_inference: True

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
enable_static_graph_inference: True

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
quantization: wint4
enable_static_graph_inference: True

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 96
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.71
tensor_parallel_size: 4
enable_static_graph_inference: True

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
enable_static_graph_inference: True

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
enable_static_graph_inference: True

View File

@@ -0,0 +1,4 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
quantization: wfp8afp8
enable_static_graph_inference: True

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
quantization: wfp8afp8

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
quantization: wint8

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
enable_static_graph_inference: True

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
enable_static_graph_inference: True

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
quantization: wint8
enable_static_graph_inference: True

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
quantization: wint8
enable_static_graph_inference: True

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
enable_static_graph_inference: True

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 128
kv_cache_ratio: 0.75
tensor_parallel_size: 1
quantization: wint4
enable_static_graph_inference: True

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 256
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.75
tensor_parallel_size: 1

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 256
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.75
tensor_parallel_size: 1

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 256
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.75
tensor_parallel_size: 1

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 256
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.75
quantization: wint8
tensor_parallel_size: 1

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 256
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.75
quantization: wint8
tensor_parallel_size: 1

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 256
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.75
quantization: wint8
tensor_parallel_size: 1

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 75
gpu_memory_utilization: 0.85
kv_cache_ratio: 0.75
quantization: wint4
tensor_parallel_size: 4

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 25
gpu_memory_utilization: 0.9
kv_cache_ratio: 0.75
quantization: wint8
tensor_parallel_size: 4

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 50
gpu_memory_utilization: 0.85
kv_cache_ratio: 0.75
tensor_parallel_size: 1

View File

@@ -0,0 +1,5 @@
max_model_len: 32768
max_num_seqs: 50
gpu_memory_utilization: 0.85
kv_cache_ratio: 0.75
tensor_parallel_size: 1

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 50
gpu_memory_utilization: 0.8
kv_cache_ratio: 0.75
quantization: wint4
tensor_parallel_size: 1

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 50
gpu_memory_utilization: 0.8
kv_cache_ratio: 0.75
quantization: wint4
tensor_parallel_size: 1

View File

@@ -0,0 +1,8 @@
top_p: 0.8
temperature: 0.8
metadata:
min_tokens: 1
max_tokens: 131071
repetition_penalty: 1.0
frequency_penalty: 0
presence_penalty: 0

View File

@@ -0,0 +1,8 @@
top_p: 0.8
temperature: 0.8
metadata:
min_tokens: 1
max_tokens: 12288
repetition_penalty: 1.0
frequency_penalty: 0
presence_penalty: 0

View File

@@ -0,0 +1,8 @@
top_p: 0.8
temperature: 0.7
metadata:
min_tokens: 1
max_tokens: 12288
repetition_penalty: 1.05
frequency_penalty: 0
presence_penalty: 0

View File

@@ -0,0 +1,8 @@
top_p: 0.8
temperature: 0.7
metadata:
min_tokens: 1
max_tokens: 12288
repetition_penalty: 1.0
frequency_penalty: 0
presence_penalty: 1.5

View File

@@ -0,0 +1,8 @@
top_p: 0.95
temperature: 0.6
metadata:
min_tokens: 1
max_tokens: 32767
repetition_penalty: 1.0
frequency_penalty: 0
presence_penalty: 0

View File

@@ -0,0 +1,6 @@
tensor_parallel_size: 8
max_model_len: 32768
max_num_seqs: 32
num_gpu_blocks_override: 4096
kv_cache_ratio: 0.5
reasoning_parser: ernie-x1

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 32
gpu_memory_utilization: 0.9
tensor_parallel_size: 4
quantization: wint4
reasoning_parser: ernie-x1

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 128
gpu_memory_utilization: 0.9
tensor_parallel_size: 8
quantization: wint4
reasoning_parser: ernie-x1

View File

@@ -0,0 +1,10 @@
enable_prefix_caching: True
num_gpu_blocks_override: 8000
max_model_len: 32768
max_num_seqs: 64
gpu_memory_utilization: 0.85
kv_cache_ratio: 0.5
tensor_parallel_size: 8
swap_space: 200
cache_queue_port: 55664
reasoning_parser: ernie-x1

View File

@@ -0,0 +1,6 @@
tensor_parallel_size: 8
max_model_len: 32768
max_num_seqs: 32
num_gpu_blocks_override: 4096
kv_cache_ratio: 0.5
reasoning_parser: ernie-x1

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 8
gpu_memory_utilization: 0.9
tensor_parallel_size: 4
quantization: wint8
reasoning_parser: ernie-x1

View File

@@ -0,0 +1,6 @@
max_model_len: 32768
max_num_seqs: 64
gpu_memory_utilization: 0.9
tensor_parallel_size: 8
quantization: wint8
reasoning_parser: ernie-x1

View File

@@ -0,0 +1,10 @@
enable_prefix_caching: True
num_gpu_blocks_override: 8000
max_model_len: 32768
max_num_seqs: 64
gpu_memory_utilization: 0.85
kv_cache_ratio: 0.5
tensor_parallel_size: 8
swap_space: 200
cache_queue_port: 55664
reasoning_parser: ernie-x1