[Other]Fastdeploy supports set_model_buffer function for encrypted model (#930)

* Update keypointdetection result docs

* Update im.copy() to im in examples

* Update new Api, fastdeploy::vision::Visualize to fastdeploy::vision

* Update SwapBackgroundSegmentation && SwapBackgroundMatting to SwapBackground

* Update README_CN.md

* Update README_CN.md

* Support set_model_buffer function
This commit is contained in:
huangjianhui
2022-12-21 14:21:28 +08:00
committed by GitHub
parent b42ec302e6
commit 291db315c8
6 changed files with 110 additions and 12 deletions

View File

@@ -222,11 +222,29 @@ class RuntimeOption:
:param model_path: (str)Path of model file
:param params_path: (str)Path of parameters file
:param model_format: (ModelFormat)Format of model, support ModelFormat.PADDLE/ModelFormat.ONNX
:param model_format: (ModelFormat)Format of model, support ModelFormat.PADDLE/ModelFormat.ONNX/ModelFormat.TORCHSCRIPT
"""
return self._option.set_model_path(model_path, params_path,
model_format)
def set_model_buffer(self,
model_buffer,
model_buffer_size,
params_buffer,
params_buffer_size,
model_format=ModelFormat.PADDLE):
"""Specify the memory buffer of model and parameter. Used when model and params are loaded directly from memory
:param model_buffer: (bytes)The memory buffer of model
:param model_buffer_size: (unsigned int)The size of the model data.
:param params_buffer: (bytes)The memory buffer of the combined parameters file
:param params_buffer_size: (unsigned inst)The size of the combined parameters data
:param model_format: (ModelFormat)Format of model, support ModelFormat.PADDLE/ModelFormat.ONNX/ModelFormat.TORCHSCRIPT
"""
return self._option.set_model_buffer(model_buffer, model_buffer_size,
params_buffer, params_buffer_size,
model_format)
def use_gpu(self, device_id=0):
"""Inference with Nvidia GPU