[Fix]Fix vl when import fastdeploy and fix rl config rank bug (#2953)

* support vl ori_vacab_size

* support trainer_degree in name_mapping

* fix

* fix import error

* fix local rank
This commit is contained in:
gaoziyuan
2025-07-22 19:40:27 +08:00
committed by GitHub
parent 580460046f
commit 535a15ab8f
2 changed files with 20 additions and 15 deletions

View File

@@ -43,8 +43,7 @@ def import_custom_ops(package, module_name, global_ns):
logger.warning(f"Failed to import op {func_name}: {e}")
except Exception:
logger.warning(
f"Ops of {package} import failed, it may be not compiled.")
logger.warning(f"Ops of {package} import failed, it may be not compiled.")
preprocess_static_op(global_ns)
@@ -71,6 +70,7 @@ def wrap_unified_op(original_cpp_ext_op, original_custom_op):
original_cpp_ext_op: Original C++ extension operator function.
original_custom_op: Original custom operator function.
"""
try:
@paddle.jit.marker.unified
@functools.wraps(original_custom_op)
@@ -85,6 +85,9 @@ def wrap_unified_op(original_cpp_ext_op, original_custom_op):
return res
return original_custom_op(*args, **kwargs)
except:
unified_op = None
logger.warning("Paddle version not support JIT mode.")
return unified_op

View File

@@ -58,6 +58,7 @@ class RolloutModelConfig:
disable_any_whitespace: bool = True,
enable_logprob: bool = False,
graph_optimization_config: str = None,
local_rank: int = 0
):
# Required parameters
self.model_name_or_path = model_name_or_path
@@ -98,10 +99,11 @@ class RolloutModelConfig:
self.disable_any_whitespace = disable_any_whitespace
self.enable_logprob = enable_logprob
self.graph_optimization_config = graph_optimization_config
self.local_rank = local_rank
def __str__(self):
return "\n".join(f"{k}: {v}" for k, v in self.__dict__.items())
def initialize(self):
"""Initialize the final fd config"""
return initialize_fd_config(self, ranks=self.tensor_parallel_size, local_rank=0)
return initialize_fd_config(self, ranks=self.tensor_parallel_size, local_rank=self.local_rank)