[Other] Optimize load model from memory function (#1205)

Optimize option for runtime
This commit is contained in:
Jason
2023-02-01 15:50:38 +08:00
committed by GitHub
parent 29e93fa2dc
commit b4e322af63
5 changed files with 29 additions and 49 deletions

View File

@@ -21,39 +21,19 @@ namespace fastdeploy {
void RuntimeOption::SetModelPath(const std::string& model_path,
const std::string& params_path,
const ModelFormat& format) {
if (format == ModelFormat::PADDLE) {
model_file = model_path;
params_file = params_path;
model_format = ModelFormat::PADDLE;
} else if (format == ModelFormat::ONNX) {
model_file = model_path;
model_format = ModelFormat::ONNX;
} else if (format == ModelFormat::TORCHSCRIPT) {
model_file = model_path;
model_format = ModelFormat::TORCHSCRIPT;
} else {
FDASSERT(false,
"The model format only can be "
"ModelFormat::PADDLE/ModelFormat::ONNX/ModelFormat::TORCHSCRIPT.");
}
model_file = model_path;
params_file = params_path;
model_format = format;
model_from_memory_ = false;
}
void RuntimeOption::SetModelBuffer(const std::string& model_buffer,
const std::string& params_buffer,
const ModelFormat& format) {
model_file = model_buffer;
params_file = params_buffer;
model_format = format;
model_from_memory_ = true;
if (format == ModelFormat::PADDLE) {
model_buffer_ = model_buffer;
params_buffer_ = params_buffer;
model_format = ModelFormat::PADDLE;
} else if (format == ModelFormat::ONNX) {
model_buffer_ = model_buffer;
model_format = ModelFormat::ONNX;
} else {
FDASSERT(false,
"The model format only can be "
"ModelFormat::PADDLE/ModelFormat::ONNX.");
}
}
void RuntimeOption::UseGpu(int gpu_id) {