mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[Other] FastDeploy TensorRT && ONNX backend support to load model form memory (#1130)
* Update all backends load model from buffer * Delete redundant code * Format code style * Format code style * Delete redundant code * Delete redundant code * Add some FDASSERTs * Update load model form memory when cloning engine * Update clone engine code * Update set_model_buffer api parameters with char pointer * Release memory buffer variables after finish init backends * Fix conflict * Fix bug
This commit is contained in:
@@ -229,20 +229,14 @@ class RuntimeOption:
|
||||
|
||||
def set_model_buffer(self,
|
||||
model_buffer,
|
||||
model_buffer_size,
|
||||
params_buffer,
|
||||
params_buffer_size,
|
||||
params_buffer="",
|
||||
model_format=ModelFormat.PADDLE):
|
||||
"""Specify the memory buffer of model and parameter. Used when model and params are loaded directly from memory
|
||||
|
||||
:param model_buffer: (bytes)The memory buffer of model
|
||||
:param model_buffer_size: (unsigned int)The size of the model data.
|
||||
:param params_buffer: (bytes)The memory buffer of the combined parameters file
|
||||
:param params_buffer_size: (unsigned inst)The size of the combined parameters data
|
||||
:param params_buffer: (bytes)The memory buffer of the parameters
|
||||
:param model_format: (ModelFormat)Format of model, support ModelFormat.PADDLE/ModelFormat.ONNX/ModelFormat.TORCHSCRIPT
|
||||
"""
|
||||
return self._option.set_model_buffer(model_buffer, model_buffer_size,
|
||||
params_buffer, params_buffer_size,
|
||||
return self._option.set_model_buffer(model_buffer, params_buffer,
|
||||
model_format)
|
||||
|
||||
def use_gpu(self, device_id=0):
|
||||
|
Reference in New Issue
Block a user