Add decrypt function to load encrypted model

This commit is contained in:
felixhjh
2023-02-09 14:04:10 +00:00
parent 05be955187
commit 942cee83d7
5 changed files with 45 additions and 1 deletions

View File

@@ -35,6 +35,7 @@ void BindOption(pybind11::module& m) {
.def(pybind11::init()) .def(pybind11::init())
.def("set_model_path", &RuntimeOption::SetModelPath) .def("set_model_path", &RuntimeOption::SetModelPath)
.def("set_model_buffer", &RuntimeOption::SetModelBuffer) .def("set_model_buffer", &RuntimeOption::SetModelBuffer)
.def("set_encryption_key", &RuntimeOption::SetEncryptionKey)
.def("use_gpu", &RuntimeOption::UseGpu) .def("use_gpu", &RuntimeOption::UseGpu)
.def("use_cpu", &RuntimeOption::UseCpu) .def("use_cpu", &RuntimeOption::UseCpu)
.def("use_rknpu2", &RuntimeOption::UseRKNPU2) .def("use_rknpu2", &RuntimeOption::UseRKNPU2)

View File

@@ -104,7 +104,31 @@ bool AutoSelectBackend(RuntimeOption& option) {
bool Runtime::Init(const RuntimeOption& _option) { bool Runtime::Init(const RuntimeOption& _option) {
option = _option; option = _option;
if ("" != option.encryption_key_) {
#ifdef ENABLE_ENCRYPTION
if (option.model_from_memory_) {
option.model_file = Decrypt(option.model_file, option.encryption_key_);
if (!(option.params_file.empty())) {
option.params_file = Decrypt(option.params_file, option.encryption_key_);
}
} else {
std::string model_buffer = "";
FDASSERT(ReadBinaryFromFile(option.model_file, &model_buffer),
"Fail to read binary from model file");
option.model_file = Decrypt(model_buffer, option.encryption_key_);
if (!(option.params_file.empty())) {
std::string params_buffer = "";
FDASSERT(ReadBinaryFromFile(option.params_file, &params_buffer),
"Fail to read binary from parameter file");
option.params_file = Decrypt(params_buffer, option.encryption_key_);
}
option.model_from_memory_ = true;
}
#else
FDERROR << "The FastDeploy didn't compile with encryption function."
<< std::endl;
#endif
}
// Choose default backend by model format and device if backend is not // Choose default backend by model format and device if backend is not
// specified // specified
if (option.backend == Backend::UNKNOWN) { if (option.backend == Backend::UNKNOWN) {

View File

@@ -23,6 +23,9 @@
#include "fastdeploy/core/fd_tensor.h" #include "fastdeploy/core/fd_tensor.h"
#include "fastdeploy/runtime/runtime_option.h" #include "fastdeploy/runtime/runtime_option.h"
#include "fastdeploy/utils/perf.h" #include "fastdeploy/utils/perf.h"
#ifdef ENABLE_ENCRYPTION
#include "fastdeploy/encryption/include/decrypt.h"
#endif
/** \brief All C++ FastDeploy APIs are defined inside this namespace /** \brief All C++ FastDeploy APIs are defined inside this namespace
* *

View File

@@ -36,6 +36,15 @@ void RuntimeOption::SetModelBuffer(const std::string& model_buffer,
model_from_memory_ = true; model_from_memory_ = true;
} }
void RuntimeOption::SetEncryptionKey(const std::string& encryption_key) {
#ifdef ENABLE_ENCRYPTION
encryption_key_ = encryption_key;
#else
FDERROR << "The FastDeploy didn't compile with encryption function."
<< std::endl;
#endif
}
void RuntimeOption::UseGpu(int gpu_id) { void RuntimeOption::UseGpu(int gpu_id) {
#ifdef WITH_GPU #ifdef WITH_GPU
device = Device::GPU; device = Device::GPU;

View File

@@ -187,6 +187,13 @@ class RuntimeOption:
return self._option.set_model_buffer(model_buffer, params_buffer, return self._option.set_model_buffer(model_buffer, params_buffer,
model_format) model_format)
def set_encryption_key(self,
encryption_key):
"""When loading encrypted model, encryption_key is required to decrypte model
:param encryption_key: (str)The key for decrypting model
"""
return self._option.set_encryption_key(encryption_key)
def use_gpu(self, device_id=0): def use_gpu(self, device_id=0):
"""Inference with Nvidia GPU """Inference with Nvidia GPU