diff --git a/fastdeploy/runtime/option_pybind.cc b/fastdeploy/runtime/option_pybind.cc index 1c786459b..7af90d831 100644 --- a/fastdeploy/runtime/option_pybind.cc +++ b/fastdeploy/runtime/option_pybind.cc @@ -35,6 +35,7 @@ void BindOption(pybind11::module& m) { .def(pybind11::init()) .def("set_model_path", &RuntimeOption::SetModelPath) .def("set_model_buffer", &RuntimeOption::SetModelBuffer) + .def("set_encryption_key", &RuntimeOption::SetEncryptionKey) .def("use_gpu", &RuntimeOption::UseGpu) .def("use_cpu", &RuntimeOption::UseCpu) .def("use_rknpu2", &RuntimeOption::UseRKNPU2) diff --git a/fastdeploy/runtime/runtime.cc b/fastdeploy/runtime/runtime.cc index 70714e4f0..67774a306 100644 --- a/fastdeploy/runtime/runtime.cc +++ b/fastdeploy/runtime/runtime.cc @@ -104,7 +104,33 @@ bool AutoSelectBackend(RuntimeOption& option) { bool Runtime::Init(const RuntimeOption& _option) { option = _option; - + // decrypt encrypted model + if ("" != option.encryption_key_) { +#ifdef ENABLE_ENCRYPTION + if (option.model_from_memory_) { + option.model_file = Decrypt(option.model_file, option.encryption_key_); + if (!(option.params_file.empty())) { + option.params_file = + Decrypt(option.params_file, option.encryption_key_); + } + } else { + std::string model_buffer = ""; + FDASSERT(ReadBinaryFromFile(option.model_file, &model_buffer), + "Fail to read binary from model file"); + option.model_file = Decrypt(model_buffer, option.encryption_key_); + if (!(option.params_file.empty())) { + std::string params_buffer = ""; + FDASSERT(ReadBinaryFromFile(option.params_file, ¶ms_buffer), + "Fail to read binary from parameter file"); + option.params_file = Decrypt(params_buffer, option.encryption_key_); + } + option.model_from_memory_ = true; + } +#else + FDERROR << "The FastDeploy didn't compile with encryption function." + << std::endl; +#endif + } // Choose default backend by model format and device if backend is not // specified if (option.backend == Backend::UNKNOWN) { diff --git a/fastdeploy/runtime/runtime.h b/fastdeploy/runtime/runtime.h index 6e7dc9629..fa8b8f198 100755 --- a/fastdeploy/runtime/runtime.h +++ b/fastdeploy/runtime/runtime.h @@ -23,6 +23,9 @@ #include "fastdeploy/core/fd_tensor.h" #include "fastdeploy/runtime/runtime_option.h" #include "fastdeploy/utils/perf.h" +#ifdef ENABLE_ENCRYPTION +#include "fastdeploy/encryption/include/decrypt.h" +#endif /** \brief All C++ FastDeploy APIs are defined inside this namespace * @@ -99,7 +102,7 @@ struct FASTDEPLOY_DECL Runtime { */ double GetProfileTime() { return backend_->benchmark_result_.time_of_runtime; - } + } private: void CreateOrtBackend(); diff --git a/fastdeploy/runtime/runtime_option.cc b/fastdeploy/runtime/runtime_option.cc index c09352d58..d074a9603 100644 --- a/fastdeploy/runtime/runtime_option.cc +++ b/fastdeploy/runtime/runtime_option.cc @@ -36,6 +36,15 @@ void RuntimeOption::SetModelBuffer(const std::string& model_buffer, model_from_memory_ = true; } +void RuntimeOption::SetEncryptionKey(const std::string& encryption_key) { +#ifdef ENABLE_ENCRYPTION + encryption_key_ = encryption_key; +#else + FDERROR << "The FastDeploy didn't compile with encryption function." + << std::endl; +#endif +} + void RuntimeOption::UseGpu(int gpu_id) { #ifdef WITH_GPU device = Device::GPU; diff --git a/fastdeploy/runtime/runtime_option.h b/fastdeploy/runtime/runtime_option.h index c45dd2fe7..a36ac5459 100755 --- a/fastdeploy/runtime/runtime_option.h +++ b/fastdeploy/runtime/runtime_option.h @@ -59,6 +59,12 @@ struct FASTDEPLOY_DECL RuntimeOption { const std::string& params_buffer = "", const ModelFormat& format = ModelFormat::PADDLE); + /** \brief When loading encrypted model, encryption_key is required to decrypte model + * + * \param[in] encryption_key The key for decrypting model + */ + void SetEncryptionKey(const std::string& encryption_key); + /// Use cpu to inference, the runtime will inference on CPU by default void UseCpu(); /// Use Nvidia GPU to inference @@ -178,6 +184,8 @@ struct FASTDEPLOY_DECL RuntimeOption { /// format of input model ModelFormat model_format = ModelFormat::PADDLE; + std::string encryption_key_ = ""; + // for cpu inference // default will let the backend choose their own default value int cpu_thread_num = -1; diff --git a/python/fastdeploy/runtime.py b/python/fastdeploy/runtime.py index cd7b6641b..a9004a15a 100644 --- a/python/fastdeploy/runtime.py +++ b/python/fastdeploy/runtime.py @@ -187,6 +187,12 @@ class RuntimeOption: return self._option.set_model_buffer(model_buffer, params_buffer, model_format) + def set_encryption_key(self, encryption_key): + """When loading encrypted model, encryption_key is required to decrypte model + :param encryption_key: (str)The key for decrypting model + """ + return self._option.set_encryption_key(encryption_key) + def use_gpu(self, device_id=0): """Inference with Nvidia GPU @@ -583,10 +589,12 @@ class RuntimeOption: replica_num=1, available_memory_proportion=1.0, enable_half_partial=False): - logging.warning("`RuntimeOption.set_ipu_config` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.set_ipu_config()` instead.") - self._option.paddle_infer_option.set_ipu_config(enable_fp16, replica_num, - available_memory_proportion, - enable_half_partial) + logging.warning( + "`RuntimeOption.set_ipu_config` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.set_ipu_config()` instead." + ) + self._option.paddle_infer_option.set_ipu_config( + enable_fp16, replica_num, available_memory_proportion, + enable_half_partial) @property def poros_option(self): @@ -657,7 +665,8 @@ class RuntimeOption: continue if hasattr(getattr(self._option, attr), "__call__"): continue - message += " {} : {}\t\n".format(attr, getattr(self._option, attr)) + message += " {} : {}\t\n".format(attr, + getattr(self._option, attr)) message.strip("\n") message += ")" return message diff --git a/tutorials/encrypt_model/README.md b/tutorials/encrypt_model/README.md new file mode 100644 index 000000000..755671686 --- /dev/null +++ b/tutorials/encrypt_model/README.md @@ -0,0 +1,46 @@ +English | [中文](README_CN.md) + +# FastDeploy generates an encrypted model + +This directory provides `encrypt.py` to quickly complete the encryption of the model and parameter files of ResNet50_vd + +## encryption +```bash +# Download deployment example code +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/tutorials/encrypt_model + +# Download the ResNet50_vd model file +wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz +tar -xvf ResNet50_vd_infer.tgz + +python encrypt.py --model_file ResNet50_vd_infer/inference.pdmodel --params_file ResNet50_vd_infer/inference.pdiparams --encrypted_model_dir ResNet50_vd_infer_encrypt +``` +>> **Note** After the encryption is completed, the ResNet50_vd_infer_encrypt folder will be generated, including `__model__.encrypted`, `__params__.encrypted`, `encryption_key.txt` three files, where `encryption_key.txt` contains the encrypted key. At the same time, you need to copy the `inference_cls.yaml` configuration file in the original folder to the ResNet50_vd_infer_encrypt folder for subsequent deployment + +### Python encryption interface + +Use the encrypted interface through the following interface settings +```python +import fastdeploy as fd +import os +# when key is not given, key will be automatically generated. +# otherwise, the file will be encrypted by specific key +encrypted_model, key = fd.encryption.encrypt(model_file.read()) +encrypted_params, key= fd.encryption.encrypt(params_file.read(), key) +``` + +### FastDeploy deployment encryption model (decryption) + +Through the setting of the following interface, FastDeploy can deploy the encryption model +```python +import fastdeploy as fd +option = fd.RuntimeOption() +option.set_encryption_key(key) +``` + +```C++ +fastdeploy::RuntimeOption option; +option.SetEncryptionKey(key) +``` +>> **Note** For more details about RuntimeOption, please refer to [RuntimeOption Python Documentation](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/runtime_option.html), [ RuntimeOption C++ Documentation](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/structfastdeploy_1_1RuntimeOption.html) diff --git a/tutorials/encrypt_model/README_CN.md b/tutorials/encrypt_model/README_CN.md new file mode 100644 index 000000000..c2f80ffd4 --- /dev/null +++ b/tutorials/encrypt_model/README_CN.md @@ -0,0 +1,48 @@ +[English](README.md) | 中文 + +# 使用FastDeploy生成加密模型 + +本目录下提供`encrypt.py`快速完成ResNet50_vd的模型和参数文件加密 + +FastDeploy支持对称加密的方案,通过调用OpenSSL中的对称加密算法(AES)对模型进行加密并产生密钥 + +## 加密 +```bash +#下载加密示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/tutorials/encrypt_model + +# 下载ResNet50_vd模型文件 +wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz +tar -xvf ResNet50_vd_infer.tgz + +python encrypt.py --model_file ResNet50_vd_infer/inference.pdmodel --params_file ResNet50_vd_infer/inference.pdiparams --encrypted_model_dir ResNet50_vd_infer_encrypt +``` +>> **注意** 加密完成后会生成ResNet50_vd_infer_encrypt文件夹,包含`__model__.encrypted`,`__params__.encrypted`,`encryption_key.txt`三个文件,其中`encryption_key.txt`包含加密后的秘钥,同时需要将原文件夹中的、`inference_cls.yaml`配置文件 拷贝至ResNet50_vd_infer_encrypt文件夹,以便后续部署使用 + +### Python加密接口 + +通过如下接口的设定,使用加密接口(解密) +```python +import fastdeploy as fd +import os +# when key is not given, key will be automatically generated. +# otherwise, the file will be encrypted by specific key +encrypted_model, key = fd.encryption.encrypt(model_file.read()) +encrypted_params, key= fd.encryption.encrypt(params_file.read(), key) +``` + +### FastDeploy 部署加密模型 + +通过如下接口的设定,完成加密模型的推理 +```python +import fastdeploy as fd +option = fd.RuntimeOption() +option.set_encryption_key(key) +``` + +```C++ +fastdeploy::RuntimeOption option; +option.SetEncryptionKey(key) +``` +>> **注意** RuntimeOption的更多详细信息,请参考[RuntimeOption Python文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/runtime_option.html),[RuntimeOption C++文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/structfastdeploy_1_1RuntimeOption.html) diff --git a/tutorials/encrypt_model/encrypt.py b/tutorials/encrypt_model/encrypt.py new file mode 100644 index 000000000..f4d80ed2f --- /dev/null +++ b/tutorials/encrypt_model/encrypt.py @@ -0,0 +1,47 @@ +import fastdeploy as fd +import os + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--encrypted_model_dir", + required=False, + help="Path of model directory.") + parser.add_argument( + "--model_file", required=True, help="Path of model file directory.") + parser.add_argument( + "--params_file", + required=True, + help="Path of parameters file directory.") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_arguments() + model_buffer = open(args.model_file, 'rb') + params_buffer = open(args.params_file, 'rb') + encrypted_model, key = fd.encryption.encrypt(model_buffer.read()) + # use the same key to encrypt parameter file + encrypted_params, key = fd.encryption.encrypt(params_buffer.read(), key) + encrypted_model_dir = "encrypt_model_dir" + if args.encrypted_model_dir: + encrypted_model_dir = args.encrypted_model_dir + model_buffer.close() + params_buffer.close() + os.mkdir(encrypted_model_dir) + with open(os.path.join(encrypted_model_dir, "__model__.encrypted"), + "w") as f: + f.write(encrypted_model) + + with open(os.path.join(encrypted_model_dir, "__params__.encrypted"), + "w") as f: + f.write(encrypted_params) + + with open(os.path.join(encrypted_model_dir, "encryption_key.txt"), + "w") as f: + f.write(key) + print("encryption key: ", key) + print("encryption success")