diff --git a/fastdeploy/runtime/option_pybind.cc b/fastdeploy/runtime/option_pybind.cc index 1c786459b..7af90d831 100644 --- a/fastdeploy/runtime/option_pybind.cc +++ b/fastdeploy/runtime/option_pybind.cc @@ -35,6 +35,7 @@ void BindOption(pybind11::module& m) { .def(pybind11::init()) .def("set_model_path", &RuntimeOption::SetModelPath) .def("set_model_buffer", &RuntimeOption::SetModelBuffer) + .def("set_encryption_key", &RuntimeOption::SetEncryptionKey) .def("use_gpu", &RuntimeOption::UseGpu) .def("use_cpu", &RuntimeOption::UseCpu) .def("use_rknpu2", &RuntimeOption::UseRKNPU2) diff --git a/fastdeploy/runtime/runtime.cc b/fastdeploy/runtime/runtime.cc index 70714e4f0..2a00dfda4 100644 --- a/fastdeploy/runtime/runtime.cc +++ b/fastdeploy/runtime/runtime.cc @@ -104,7 +104,31 @@ bool AutoSelectBackend(RuntimeOption& option) { bool Runtime::Init(const RuntimeOption& _option) { option = _option; - + if ("" != option.encryption_key_) { + #ifdef ENABLE_ENCRYPTION + if (option.model_from_memory_) { + option.model_file = Decrypt(option.model_file, option.encryption_key_); + if (!(option.params_file.empty())) { + option.params_file = Decrypt(option.params_file, option.encryption_key_); + } + } else { + std::string model_buffer = ""; + FDASSERT(ReadBinaryFromFile(option.model_file, &model_buffer), + "Fail to read binary from model file"); + option.model_file = Decrypt(model_buffer, option.encryption_key_); + if (!(option.params_file.empty())) { + std::string params_buffer = ""; + FDASSERT(ReadBinaryFromFile(option.params_file, ¶ms_buffer), + "Fail to read binary from parameter file"); + option.params_file = Decrypt(params_buffer, option.encryption_key_); + } + option.model_from_memory_ = true; + } + #else + FDERROR << "The FastDeploy didn't compile with encryption function." + << std::endl; + #endif + } // Choose default backend by model format and device if backend is not // specified if (option.backend == Backend::UNKNOWN) { diff --git a/fastdeploy/runtime/runtime.h b/fastdeploy/runtime/runtime.h index 6e7dc9629..772773007 100755 --- a/fastdeploy/runtime/runtime.h +++ b/fastdeploy/runtime/runtime.h @@ -23,6 +23,9 @@ #include "fastdeploy/core/fd_tensor.h" #include "fastdeploy/runtime/runtime_option.h" #include "fastdeploy/utils/perf.h" +#ifdef ENABLE_ENCRYPTION + #include "fastdeploy/encryption/include/decrypt.h" +#endif /** \brief All C++ FastDeploy APIs are defined inside this namespace * diff --git a/fastdeploy/runtime/runtime_option.cc b/fastdeploy/runtime/runtime_option.cc index c09352d58..8568b3b7f 100644 --- a/fastdeploy/runtime/runtime_option.cc +++ b/fastdeploy/runtime/runtime_option.cc @@ -36,6 +36,15 @@ void RuntimeOption::SetModelBuffer(const std::string& model_buffer, model_from_memory_ = true; } +void RuntimeOption::SetEncryptionKey(const std::string& encryption_key) { + #ifdef ENABLE_ENCRYPTION + encryption_key_ = encryption_key; + #else + FDERROR << "The FastDeploy didn't compile with encryption function." + << std::endl; + #endif +} + void RuntimeOption::UseGpu(int gpu_id) { #ifdef WITH_GPU device = Device::GPU; diff --git a/python/fastdeploy/runtime.py b/python/fastdeploy/runtime.py index cd7b6641b..1d2fc1c1d 100644 --- a/python/fastdeploy/runtime.py +++ b/python/fastdeploy/runtime.py @@ -187,6 +187,13 @@ class RuntimeOption: return self._option.set_model_buffer(model_buffer, params_buffer, model_format) + def set_encryption_key(self, + encryption_key): + """When loading encrypted model, encryption_key is required to decrypte model + :param encryption_key: (str)The key for decrypting model + """ + return self._option.set_encryption_key(encryption_key) + def use_gpu(self, device_id=0): """Inference with Nvidia GPU