mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 08:37:06 +08:00
[Other]Fastdeploy supports set_model_buffer function for encrypted model (#930)
* Update keypointdetection result docs * Update im.copy() to im in examples * Update new Api, fastdeploy::vision::Visualize to fastdeploy::vision * Update SwapBackgroundSegmentation && SwapBackgroundMatting to SwapBackground * Update README_CN.md * Update README_CN.md * Support set_model_buffer function
This commit is contained in:
@@ -222,11 +222,29 @@ class RuntimeOption:
|
||||
|
||||
:param model_path: (str)Path of model file
|
||||
:param params_path: (str)Path of parameters file
|
||||
:param model_format: (ModelFormat)Format of model, support ModelFormat.PADDLE/ModelFormat.ONNX
|
||||
:param model_format: (ModelFormat)Format of model, support ModelFormat.PADDLE/ModelFormat.ONNX/ModelFormat.TORCHSCRIPT
|
||||
"""
|
||||
return self._option.set_model_path(model_path, params_path,
|
||||
model_format)
|
||||
|
||||
def set_model_buffer(self,
|
||||
model_buffer,
|
||||
model_buffer_size,
|
||||
params_buffer,
|
||||
params_buffer_size,
|
||||
model_format=ModelFormat.PADDLE):
|
||||
"""Specify the memory buffer of model and parameter. Used when model and params are loaded directly from memory
|
||||
|
||||
:param model_buffer: (bytes)The memory buffer of model
|
||||
:param model_buffer_size: (unsigned int)The size of the model data.
|
||||
:param params_buffer: (bytes)The memory buffer of the combined parameters file
|
||||
:param params_buffer_size: (unsigned inst)The size of the combined parameters data
|
||||
:param model_format: (ModelFormat)Format of model, support ModelFormat.PADDLE/ModelFormat.ONNX/ModelFormat.TORCHSCRIPT
|
||||
"""
|
||||
return self._option.set_model_buffer(model_buffer, model_buffer_size,
|
||||
params_buffer, params_buffer_size,
|
||||
model_format)
|
||||
|
||||
def use_gpu(self, device_id=0):
|
||||
"""Inference with Nvidia GPU
|
||||
|
||||
|
Reference in New Issue
Block a user