[Feature] [PD Disaggregation] simplify configuration for pd-disaggregated deployment, and refactor post-init and usage for all ports (#5415)

* [feat] simplify configuration for pd-disaggregated deployment, and refactor post-init and usage for all ports

* [fix] fix some bugs

* [fix] fix rdma port for cache manager/messager

* [fix] temporarily cancel port availability check to see if it can pass ci test

* [feat] simplify args for multi api server

* [fix] fix dp

* [fix] fix port for xpu

* [fix] add tests for ports post processing & fix ci

* [test] fix test_multi_api_server

* [fix] fix rdma_comm_ports args for multi_api_server

* [fix] fix test_common_engine

* [fix] fix test_cache_transfer_manager

* [chore] automatically setting FD_ENABLE_MULTI_API_SERVER

* [fix] avoid api server from creating engine_args twice

* [fix] fix test_run_batch

* [fix] fix test_metrics

* [fix] fix splitwise connector init

* [test] add test_rdma_transfer and test_expert_service

* [fix] fix code syntax

* [fix] fix test_rdma_transfer and build wheel with rdma script
This commit is contained in:
Yonghua Li
2025-12-17 15:50:42 +08:00
committed by GitHub
parent cdc0004894
commit 0c8c6369ed
34 changed files with 1323 additions and 409 deletions

View File

@@ -14,6 +14,7 @@
# limitations under the License.
"""
import random
import unittest
from unittest.mock import Mock
@@ -132,6 +133,59 @@ class TestConfig(unittest.TestCase):
fd_config.init_cache_info()
assert fd_config.register_info is not None
def test_fdconfig_postprocess_ports(self):
data_parallel_size = 4
tensor_parallel_size = 2
local_data_parallel_id = random.randint(0, data_parallel_size - 1)
engine_worker_queue_ports = [random.randint(8000, 65535) for _ in range(data_parallel_size)]
cache_queue_ports = [random.randint(8000, 65535) for _ in range(data_parallel_size)]
pd_comm_ports = [random.randint(8000, 65535) for _ in range(data_parallel_size)]
rdma_comm_ports = [random.randint(8000, 65535) for _ in range(data_parallel_size * tensor_parallel_size)]
parallel_config = ParallelConfig(
{
"engine_worker_queue_port": ",".join(map(str, engine_worker_queue_ports)),
"data_parallel_size": data_parallel_size,
"tensor_parallel_size": tensor_parallel_size,
"local_data_parallel_id": local_data_parallel_id,
}
)
graph_opt_config = GraphOptimizationConfig({})
cache_config = CacheConfig(
{
"cache_queue_port": ",".join(map(str, cache_queue_ports)),
"pd_comm_port": ",".join(map(str, pd_comm_ports)),
"rdma_comm_ports": ",".join(map(str, rdma_comm_ports)),
}
)
load_config = LoadConfig({})
scheduler_config = SchedulerConfig({})
model_config: Mock = Mock()
model_config.max_model_len = 512
fd_config = FDConfig(
parallel_config=parallel_config,
graph_opt_config=graph_opt_config,
cache_config=cache_config,
load_config=load_config,
scheduler_config=scheduler_config,
model_config=model_config,
ips="0.0.0.0",
test_mode=True,
)
assert (
fd_config.parallel_config.local_engine_worker_queue_port
== engine_worker_queue_ports[local_data_parallel_id]
)
assert fd_config.cache_config.local_cache_queue_port == cache_queue_ports[local_data_parallel_id]
assert fd_config.cache_config.local_pd_comm_port == pd_comm_ports[local_data_parallel_id]
assert (
fd_config.cache_config.local_rdma_comm_ports
== rdma_comm_ports[
local_data_parallel_id * tensor_parallel_size : (local_data_parallel_id + 1) * tensor_parallel_size
]
)
if __name__ == "__main__":
unittest.main()