mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-09 02:20:17 +08:00
[Fix]Fix vl when import fastdeploy and fix rl config rank bug (#2953)
* support vl ori_vacab_size * support trainer_degree in name_mapping * fix * fix import error * fix local rank
This commit is contained in:
@@ -43,8 +43,7 @@ def import_custom_ops(package, module_name, global_ns):
|
|||||||
logger.warning(f"Failed to import op {func_name}: {e}")
|
logger.warning(f"Failed to import op {func_name}: {e}")
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.warning(
|
logger.warning(f"Ops of {package} import failed, it may be not compiled.")
|
||||||
f"Ops of {package} import failed, it may be not compiled.")
|
|
||||||
|
|
||||||
preprocess_static_op(global_ns)
|
preprocess_static_op(global_ns)
|
||||||
|
|
||||||
@@ -71,20 +70,24 @@ def wrap_unified_op(original_cpp_ext_op, original_custom_op):
|
|||||||
original_cpp_ext_op: Original C++ extension operator function.
|
original_cpp_ext_op: Original C++ extension operator function.
|
||||||
original_custom_op: Original custom operator function.
|
original_custom_op: Original custom operator function.
|
||||||
"""
|
"""
|
||||||
|
try:
|
||||||
|
|
||||||
@paddle.jit.marker.unified
|
@paddle.jit.marker.unified
|
||||||
@functools.wraps(original_custom_op)
|
@functools.wraps(original_custom_op)
|
||||||
def unified_op(*args, **kwargs):
|
def unified_op(*args, **kwargs):
|
||||||
if paddle.in_dynamic_mode():
|
if paddle.in_dynamic_mode():
|
||||||
res = original_cpp_ext_op(*args, **kwargs)
|
res = original_cpp_ext_op(*args, **kwargs)
|
||||||
if res is None:
|
if res is None:
|
||||||
return None
|
return None
|
||||||
# TODO(DrRyanHuang): Remove this if when we align the implementation of custom op and C++ extension
|
# TODO(DrRyanHuang): Remove this if when we align the implementation of custom op and C++ extension
|
||||||
if isinstance(res, list) and len(res) == 1:
|
if isinstance(res, list) and len(res) == 1:
|
||||||
return res[0]
|
return res[0]
|
||||||
return res
|
return res
|
||||||
return original_custom_op(*args, **kwargs)
|
return original_custom_op(*args, **kwargs)
|
||||||
|
|
||||||
|
except:
|
||||||
|
unified_op = None
|
||||||
|
logger.warning("Paddle version not support JIT mode.")
|
||||||
return unified_op
|
return unified_op
|
||||||
|
|
||||||
|
|
||||||
|
@@ -58,6 +58,7 @@ class RolloutModelConfig:
|
|||||||
disable_any_whitespace: bool = True,
|
disable_any_whitespace: bool = True,
|
||||||
enable_logprob: bool = False,
|
enable_logprob: bool = False,
|
||||||
graph_optimization_config: str = None,
|
graph_optimization_config: str = None,
|
||||||
|
local_rank: int = 0
|
||||||
):
|
):
|
||||||
# Required parameters
|
# Required parameters
|
||||||
self.model_name_or_path = model_name_or_path
|
self.model_name_or_path = model_name_or_path
|
||||||
@@ -98,10 +99,11 @@ class RolloutModelConfig:
|
|||||||
self.disable_any_whitespace = disable_any_whitespace
|
self.disable_any_whitespace = disable_any_whitespace
|
||||||
self.enable_logprob = enable_logprob
|
self.enable_logprob = enable_logprob
|
||||||
self.graph_optimization_config = graph_optimization_config
|
self.graph_optimization_config = graph_optimization_config
|
||||||
|
self.local_rank = local_rank
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "\n".join(f"{k}: {v}" for k, v in self.__dict__.items())
|
return "\n".join(f"{k}: {v}" for k, v in self.__dict__.items())
|
||||||
|
|
||||||
def initialize(self):
|
def initialize(self):
|
||||||
"""Initialize the final fd config"""
|
"""Initialize the final fd config"""
|
||||||
return initialize_fd_config(self, ranks=self.tensor_parallel_size, local_rank=0)
|
return initialize_fd_config(self, ranks=self.tensor_parallel_size, local_rank=self.local_rank)
|
||||||
|
Reference in New Issue
Block a user