mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 09:07:10 +08:00
Merge branch 'develop' into set_stream_infer-shareExData
This commit is contained in:
@@ -35,6 +35,7 @@ void BindOption(pybind11::module& m) {
|
||||
.def(pybind11::init())
|
||||
.def("set_model_path", &RuntimeOption::SetModelPath)
|
||||
.def("set_model_buffer", &RuntimeOption::SetModelBuffer)
|
||||
.def("set_encryption_key", &RuntimeOption::SetEncryptionKey)
|
||||
.def("use_gpu", &RuntimeOption::UseGpu)
|
||||
.def("use_cpu", &RuntimeOption::UseCpu)
|
||||
.def("use_rknpu2", &RuntimeOption::UseRKNPU2)
|
||||
|
@@ -104,7 +104,33 @@ bool AutoSelectBackend(RuntimeOption& option) {
|
||||
|
||||
bool Runtime::Init(const RuntimeOption& _option) {
|
||||
option = _option;
|
||||
|
||||
// decrypt encrypted model
|
||||
if ("" != option.encryption_key_) {
|
||||
#ifdef ENABLE_ENCRYPTION
|
||||
if (option.model_from_memory_) {
|
||||
option.model_file = Decrypt(option.model_file, option.encryption_key_);
|
||||
if (!(option.params_file.empty())) {
|
||||
option.params_file =
|
||||
Decrypt(option.params_file, option.encryption_key_);
|
||||
}
|
||||
} else {
|
||||
std::string model_buffer = "";
|
||||
FDASSERT(ReadBinaryFromFile(option.model_file, &model_buffer),
|
||||
"Fail to read binary from model file");
|
||||
option.model_file = Decrypt(model_buffer, option.encryption_key_);
|
||||
if (!(option.params_file.empty())) {
|
||||
std::string params_buffer = "";
|
||||
FDASSERT(ReadBinaryFromFile(option.params_file, ¶ms_buffer),
|
||||
"Fail to read binary from parameter file");
|
||||
option.params_file = Decrypt(params_buffer, option.encryption_key_);
|
||||
}
|
||||
option.model_from_memory_ = true;
|
||||
}
|
||||
#else
|
||||
FDERROR << "The FastDeploy didn't compile with encryption function."
|
||||
<< std::endl;
|
||||
#endif
|
||||
}
|
||||
// Choose default backend by model format and device if backend is not
|
||||
// specified
|
||||
if (option.backend == Backend::UNKNOWN) {
|
||||
|
@@ -23,6 +23,9 @@
|
||||
#include "fastdeploy/core/fd_tensor.h"
|
||||
#include "fastdeploy/runtime/runtime_option.h"
|
||||
#include "fastdeploy/utils/perf.h"
|
||||
#ifdef ENABLE_ENCRYPTION
|
||||
#include "fastdeploy/encryption/include/decrypt.h"
|
||||
#endif
|
||||
|
||||
/** \brief All C++ FastDeploy APIs are defined inside this namespace
|
||||
*
|
||||
|
@@ -36,6 +36,15 @@ void RuntimeOption::SetModelBuffer(const std::string& model_buffer,
|
||||
model_from_memory_ = true;
|
||||
}
|
||||
|
||||
void RuntimeOption::SetEncryptionKey(const std::string& encryption_key) {
|
||||
#ifdef ENABLE_ENCRYPTION
|
||||
encryption_key_ = encryption_key;
|
||||
#else
|
||||
FDERROR << "The FastDeploy didn't compile with encryption function."
|
||||
<< std::endl;
|
||||
#endif
|
||||
}
|
||||
|
||||
void RuntimeOption::UseGpu(int gpu_id) {
|
||||
#ifdef WITH_GPU
|
||||
device = Device::GPU;
|
||||
|
@@ -59,6 +59,12 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
||||
const std::string& params_buffer = "",
|
||||
const ModelFormat& format = ModelFormat::PADDLE);
|
||||
|
||||
/** \brief When loading encrypted model, encryption_key is required to decrypte model
|
||||
*
|
||||
* \param[in] encryption_key The key for decrypting model
|
||||
*/
|
||||
void SetEncryptionKey(const std::string& encryption_key);
|
||||
|
||||
/// Use cpu to inference, the runtime will inference on CPU by default
|
||||
void UseCpu();
|
||||
/// Use Nvidia GPU to inference
|
||||
@@ -179,6 +185,8 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
||||
/// format of input model
|
||||
ModelFormat model_format = ModelFormat::PADDLE;
|
||||
|
||||
std::string encryption_key_ = "";
|
||||
|
||||
// for cpu inference
|
||||
// default will let the backend choose their own default value
|
||||
int cpu_thread_num = -1;
|
||||
|
Reference in New Issue
Block a user