Optimize ocr system code (#209)

* Support PPYOLOE plus model

* Optimize ocr system code

* modify example code

* fix patchelf of openvino

* optimize demo code of ocr

* remove debug code

* update demo code of ocr

Co-authored-by: Jack Zhou <zhoushunjie@baidu.com>
This commit is contained in:
Jason
2022-09-14 09:46:03 +08:00
committed by GitHub
parent 1452275efe
commit 0dd9ecee65
39 changed files with 1041 additions and 909 deletions

View File

@@ -0,0 +1,16 @@
# FastDeploy Runtime推理示例
| 示例代码 | 编程语言 | 说明 |
| :------- | :------- | :---- |
| python/infer_paddle_paddle_inference.py | Python | paddle模型通过paddle inference在cpu/gpu上的推理 |
| python/infer_paddle_tensorrt.py | Python | paddle模型通过tensorrt在gpu上的推理 |
| python/infer_paddle_openvino.py | Python | paddle模型通过openvino在cpu上的推理 |
| python/infer_paddle_onnxruntime.py | Python | paddle模型通过onnx runtime在cpu/gpu上的推理 |
| python/infer_onnx_openvino.py | Python | onnx模型通过openvino在cpu上的推理 |
| python/infer_onnx_tensorrt.py | Python | onnx模型通过tensorrt在gpu上的推理 |
| cpp/infer_paddle_paddle_inference.cc | C++ | paddle模型通过paddle inference在cpu/gpu上的推理 |
| cpp/infer_paddle_tensorrt.cc | C++ | paddle模型通过tensorrt在gpu上的推理 |
| cpp/infer_paddle_openvino.cc | C++ | paddle模型通过openvino在cpu上的推理 |
| cpp/infer_paddle_onnxruntime.cc | C++ | paddle模型通过onnx runtime在cpu/gpu上的推理 |
| cpp/infer_onnx_openvino.cc | C++ | onnx模型通过openvino在cpu上的推理 |
| cpp/infer_onnx_tensorrt.cc | C++ | onnx模型通过tensorrt在gpu上的推理 |

View File

@@ -0,0 +1,39 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import numpy as np
# 下载模型并解压
model_url = "https://bj.bcebos.com/fastdeploy/models/mobilenetv2.onnx"
fd.download(model_url, path=".")
option = fd.RuntimeOption()
option.set_model_path("mobilenetv2.onnx", model_format="onnx")
option.use_openvino_backend()
# 初始化构造runtime
runtime = fd.Runtime(option)
# 获取模型输入名
input_name = runtime.get_input_info(0).name
# 构造随机数据进行推理
results = runtime.infer({
input_name: np.random.rand(1, 3, 224, 224).astype("float32")
})
print(results[0].shape)

View File

@@ -0,0 +1,41 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import numpy as np
# 下载模型并解压
model_url = "https://bj.bcebos.com/fastdeploy/models/mobilenetv2.onnx"
fd.download(model_url, path=".")
option = fd.RuntimeOption()
option.set_model_path("mobilenetv2.onnx", model_format="onnx")
# **** GPU 配置 ***
option.use_gpu(0)
option.use_trt_backend()
# 初始化构造runtime
runtime = fd.Runtime(option)
# 获取模型输入名
input_name = runtime.get_input_info(0).name
# 构造随机数据进行推理
results = runtime.infer({
input_name: np.random.rand(1, 3, 224, 224).astype("float32")
})
print(results[0].shape)

View File

@@ -0,0 +1,47 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import numpy as np
# 下载模型并解压
model_url = "https://bj.bcebos.com/fastdeploy/models/mobilenetv2.tgz"
fd.download_and_decompress(model_url)
option = fd.RuntimeOption()
option.set_model_path("mobilenetv2/inference.pdmodel",
"mobilenetv2/inference.pdiparams")
# **** CPU 配置 ****
option.use_cpu()
option.use_ort_backend()
option.set_cpu_thread_num(12)
# **** GPU 配置 ***
# 如需使用GPU使用如下注释代码
# option.use_gpu(0)
# 初始化构造runtime
runtime = fd.Runtime(option)
# 获取模型输入名
input_name = runtime.get_input_info(0).name
# 构造随机数据进行推理
results = runtime.infer({
input_name: np.random.rand(1, 3, 224, 224).astype("float32")
})
print(results[0].shape)

View File

@@ -0,0 +1,42 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import numpy as np
# 下载模型并解压
model_url = "https://bj.bcebos.com/fastdeploy/models/mobilenetv2.tgz"
fd.download_and_decompress(model_url)
option = fd.RuntimeOption()
option.set_model_path("mobilenetv2/inference.pdmodel",
"mobilenetv2/inference.pdiparams")
option.use_cpu()
option.use_openvino_backend()
option.set_cpu_thread_num(12)
# 初始化构造runtime
runtime = fd.Runtime(option)
# 获取模型输入名
input_name = runtime.get_input_info(0).name
# 构造随机数据进行推理
results = runtime.infer({
input_name: np.random.rand(1, 3, 224, 224).astype("float32")
})
print(results[0].shape)

View File

@@ -0,0 +1,47 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import numpy as np
# 下载模型并解压
model_url = "https://bj.bcebos.com/fastdeploy/models/mobilenetv2.tgz"
fd.download_and_decompress(model_url)
option = fd.RuntimeOption()
option.set_model_path("mobilenetv2/inference.pdmodel",
"mobilenetv2/inference.pdiparams")
# **** CPU 配置 ****
option.use_cpu()
option.use_paddle_backend()
option.set_cpu_thread_num(12)
# **** GPU 配置 ***
# 如需使用GPU使用如下注释代码
# option.use_gpu(0)
# 初始化构造runtime
runtime = fd.Runtime(option)
# 获取模型输入名
input_name = runtime.get_input_info(0).name
# 构造随机数据进行推理
results = runtime.infer({
input_name: np.random.rand(1, 3, 224, 224).astype("float32")
})
print(results[0].shape)

View File

@@ -0,0 +1,42 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import numpy as np
# 下载模型并解压
model_url = "https://bj.bcebos.com/fastdeploy/models/mobilenetv2.tgz"
fd.download_and_decompress(model_url)
option = fd.RuntimeOption()
option.set_model_path("mobilenetv2/inference.pdmodel",
"mobilenetv2/inference.pdiparams")
# **** GPU 配置 ***
option.use_gpu(0)
option.use_trt_backend()
# 初始化构造runtime
runtime = fd.Runtime(option)
# 获取模型输入名
input_name = runtime.get_input_info(0).name
# 构造随机数据进行推理
results = runtime.infer({
input_name: np.random.rand(1, 3, 224, 224).astype("float32")
})
print(results[0].shape)

View File

@@ -28,10 +28,9 @@ tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar.gz
wget https://bj.bcebos.com/paddlehub/fastdeploy/ch_PP-OCRv2_rec_infer.tar.gz
tar -xvf ch_PP-OCRv2_rec_infer.tar.gz
wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/doc/imgs/12.jpg
wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/ppocr/utils/ppocr_keys_v1.txt
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
# CPU推理
./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 0
@@ -39,8 +38,6 @@ wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/ppocr/
./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 1
# GPU上TensorRT推理
./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 2
# OCR还支持det/cls/rec三个模型的组合使用例如当我们不想使用cls模型的时候只需要给cls模型路径的位置传入一个空的字符串, 例子如下
./infer_demo ./ch_PP-OCRv2_det_infer "" ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 0
```
运行完成可视化结果如下图所示
@@ -53,9 +50,9 @@ wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/ppocr/
### PPOCRSystemv2类
```
fastdeploy::application::ocrsystem::PPOCRSystemv2(fastdeploy::vision::ocr::DBDetector* ocr_det = nullptr,
fastdeploy::vision::ocr::Classifier* ocr_cls = nullptr,
fastdeploy::vision::ocr::Recognizer* ocr_rec = nullptr);
fastdeploy::application::ocrsystem::PPOCRSystemv2(fastdeploy::vision::ocr::DBDetector* det_model,
fastdeploy::vision::ocr::Classifier* cls_model,
fastdeploy::vision::ocr::Recognizer* rec_model);
```
PPOCRSystemv2 的初始化,由检测,分类和识别模型串联构成
@@ -66,6 +63,18 @@ PPOCRSystemv2 的初始化,由检测,分类和识别模型串联构成
> * **Classifier**(model): OCR中的分类模型
> * **Recognizer**(model): OCR中的识别模型
```
fastdeploy::application::ocrsystem::PPOCRSystemv2(fastdeploy::vision::ocr::DBDetector* det_model,
fastdeploy::vision::ocr::Recognizer* rec_model);
```
PPOCRSystemv2 的初始化,由检测,识别模型串联构成(无分类器)
**参数**
> * **DBDetector**(model): OCR中的检测模型
> * **Recognizer**(model): OCR中的识别模型
#### Predict函数
> ```

View File

@@ -19,11 +19,7 @@ const char sep = '\\';
const char sep = '/';
#endif
void CpuInfer(const std::string& det_model_dir,
const std::string& cls_model_dir,
const std::string& rec_model_dir,
const std::string& rec_label_file,
const std::string& image_file) {
void InitAndInfer(const std::string& det_model_dir, const std::string& cls_model_dir, const std::string& rec_model_dir, const std::string& rec_label_file, const std::string& image_file, const fastdeploy::RuntimeOption& option) {
auto det_model_file = det_model_dir + sep + "inference.pdmodel";
auto det_params_file = det_model_dir + sep + "inference.pdiparams";
@@ -32,238 +28,32 @@ void CpuInfer(const std::string& det_model_dir,
auto rec_model_file = rec_model_dir + sep + "inference.pdmodel";
auto rec_params_file = rec_model_dir + sep + "inference.pdiparams";
auto rec_label = rec_label_file;
fastdeploy::vision::ocr::DBDetector det_model;
fastdeploy::vision::ocr::Classifier cls_model;
fastdeploy::vision::ocr::Recognizer rec_model;
auto det_model = fastdeploy::vision::ocr::DBDetector(det_model_file, det_params_file, option);
auto cls_model = fastdeploy::vision::ocr::Classifier(cls_model_file, cls_params_file, option);
auto rec_model = fastdeploy::vision::ocr::Recognizer(rec_model_file, rec_params_file, rec_label_file, option);
if (!det_model_dir.empty()) {
auto det_option = fastdeploy::RuntimeOption();
det_option.UseCpu();
det_model = fastdeploy::vision::ocr::DBDetector(
det_model_file, det_params_file, det_option);
assert(det_model.Initialized());
assert(cls_model.Initialized());
assert(rec_model.Initialized());
if (!det_model.Initialized()) {
std::cerr << "Failed to initialize det_model." << std::endl;
return;
}
}
if (!cls_model_dir.empty()) {
auto cls_option = fastdeploy::RuntimeOption();
cls_option.UseCpu();
cls_model = fastdeploy::vision::ocr::Classifier(
cls_model_file, cls_params_file, cls_option);
if (!cls_model.Initialized()) {
std::cerr << "Failed to initialize cls_model." << std::endl;
return;
}
}
if (!rec_model_dir.empty()) {
auto rec_option = fastdeploy::RuntimeOption();
rec_option.UseCpu();
rec_model = fastdeploy::vision::ocr::Recognizer(
rec_model_file, rec_params_file, rec_label, rec_option);
if (!rec_model.Initialized()) {
std::cerr << "Failed to initialize rec_model." << std::endl;
return;
}
}
auto ocrv2_app = fastdeploy::application::ocrsystem::PPOCRSystemv2(
&det_model, &cls_model, &rec_model);
// 其中分类模型可选因此也可使用如下方式串联OCR系统
// auto ocr_system_v2 = fastdeploy::application::ocrsystem::PPOCRSystemv2(&det_model, &rec_model);
auto ocr_system_v2 = fastdeploy::application::ocrsystem::PPOCRSystemv2(&det_model, &cls_model, &rec_model);
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::OCRResult res;
//开始预测
if (!ocrv2_app.Predict(&im, &res)) {
fastdeploy::vision::OCRResult result;
if (!ocr_system_v2.Predict(&im, &result)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
//输出预测信息
std::cout << res.Str() << std::endl;
std::cout << result.Str() << std::endl;
//可视化
auto vis_img = fastdeploy::vision::Visualize::VisOcr(im_bak, res);
cv::imwrite("vis_result.jpg", vis_img);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void GpuInfer(const std::string& det_model_dir,
const std::string& cls_model_dir,
const std::string& rec_model_dir,
const std::string& rec_label_file,
const std::string& image_file) {
auto det_model_file = det_model_dir + sep + "inference.pdmodel";
auto det_params_file = det_model_dir + sep + "inference.pdiparams";
auto cls_model_file = cls_model_dir + sep + "inference.pdmodel";
auto cls_params_file = cls_model_dir + sep + "inference.pdiparams";
auto rec_model_file = rec_model_dir + sep + "inference.pdmodel";
auto rec_params_file = rec_model_dir + sep + "inference.pdiparams";
auto rec_label = rec_label_file;
fastdeploy::vision::ocr::DBDetector det_model;
fastdeploy::vision::ocr::Classifier cls_model;
fastdeploy::vision::ocr::Recognizer rec_model;
//准备模型
if (!det_model_dir.empty()) {
auto det_option = fastdeploy::RuntimeOption();
det_option.UseGpu();
det_model = fastdeploy::vision::ocr::DBDetector(
det_model_file, det_params_file, det_option);
if (!det_model.Initialized()) {
std::cerr << "Failed to initialize det_model." << std::endl;
return;
}
}
if (!cls_model_dir.empty()) {
auto cls_option = fastdeploy::RuntimeOption();
cls_option.UseGpu();
cls_model = fastdeploy::vision::ocr::Classifier(
cls_model_file, cls_params_file, cls_option);
if (!cls_model.Initialized()) {
std::cerr << "Failed to initialize cls_model." << std::endl;
return;
}
}
if (!rec_model_dir.empty()) {
auto rec_option = fastdeploy::RuntimeOption();
rec_option.UseGpu();
rec_option
.UsePaddleBackend(); // OCRv2的rec模型暂不支持ORT后端与PaddleInference
// v2.3.2
rec_model = fastdeploy::vision::ocr::Recognizer(
rec_model_file, rec_params_file, rec_label, rec_option);
if (!rec_model.Initialized()) {
std::cerr << "Failed to initialize rec_model." << std::endl;
return;
}
}
auto ocrv2_app = fastdeploy::application::ocrsystem::PPOCRSystemv2(
&det_model, &cls_model, &rec_model);
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::OCRResult res;
//开始预测
if (!ocrv2_app.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
//输出预测信息
std::cout << res.Str() << std::endl;
//可视化
auto vis_img = fastdeploy::vision::Visualize::VisOcr(im_bak, res);
cv::imwrite("vis_result.jpg", vis_img);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void TrtInfer(const std::string& det_model_dir,
const std::string& cls_model_dir,
const std::string& rec_model_dir,
const std::string& rec_label_file,
const std::string& image_file) {
auto det_model_file = det_model_dir + sep + "inference.pdmodel";
auto det_params_file = det_model_dir + sep + "inference.pdiparams";
auto cls_model_file = cls_model_dir + sep + "inference.pdmodel";
auto cls_params_file = cls_model_dir + sep + "inference.pdiparams";
auto rec_model_file = rec_model_dir + sep + "inference.pdmodel";
auto rec_params_file = rec_model_dir + sep + "inference.pdiparams";
auto rec_label = rec_label_file;
fastdeploy::vision::ocr::DBDetector det_model;
fastdeploy::vision::ocr::Classifier cls_model;
fastdeploy::vision::ocr::Recognizer rec_model;
//准备模型
if (!det_model_dir.empty()) {
auto det_option = fastdeploy::RuntimeOption();
det_option.UseGpu();
det_option.UseTrtBackend();
det_option.SetTrtInputShape("x", {1, 3, 50, 50}, {1, 3, 640, 640},
{1, 3, 960, 960});
det_model = fastdeploy::vision::ocr::DBDetector(
det_model_file, det_params_file, det_option);
if (!det_model.Initialized()) {
std::cerr << "Failed to initialize det_model." << std::endl;
return;
}
}
if (!cls_model_dir.empty()) {
auto cls_option = fastdeploy::RuntimeOption();
cls_option.UseGpu();
cls_option.UseTrtBackend();
cls_option.SetTrtInputShape("x", {1, 3, 48, 192});
cls_model = fastdeploy::vision::ocr::Classifier(
cls_model_file, cls_params_file, cls_option);
if (!cls_model.Initialized()) {
std::cerr << "Failed to initialize cls_model." << std::endl;
return;
}
}
if (!rec_model_dir.empty()) {
auto rec_option = fastdeploy::RuntimeOption();
rec_option.UseGpu();
rec_option.UseTrtBackend();
rec_option.SetTrtInputShape("x", {1, 3, 48, 10}, {1, 3, 48, 320},
{1, 3, 48, 2000});
rec_model = fastdeploy::vision::ocr::Recognizer(
rec_model_file, rec_params_file, rec_label, rec_option);
if (!rec_model.Initialized()) {
std::cerr << "Failed to initialize rec_model." << std::endl;
return;
}
}
auto ocrv2_app = fastdeploy::application::ocrsystem::PPOCRSystemv2(
&det_model, &cls_model, &rec_model);
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::OCRResult res;
//开始预测
if (!ocrv2_app.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
//输出预测信息
std::cout << res.Str() << std::endl;
//可视化
auto vis_img = fastdeploy::vision::Visualize::VisOcr(im_bak, res);
cv::imwrite("vis_result.jpg", vis_img);
auto vis_im = fastdeploy::vision::Visualize::VisOcr(im_bak, result);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -282,12 +72,23 @@ int main(int argc, char* argv[]) {
return -1;
}
if (std::atoi(argv[6]) == 0) {
CpuInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
} else if (std::atoi(argv[6]) == 1) {
GpuInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
} else if (std::atoi(argv[6]) == 2) {
TrtInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
fastdeploy::RuntimeOption option;
int flag = std::atoi(argv[6]);
if (flag == 0) {
option.UseCpu();
} else if (flag == 1) {
option.UseGpu();
} else if (flag == 2) {
option.UseGpu();
option.UseTrtBackend();
}
std::string det_model_dir = argv[1];
std::string cls_model_dir = argv[2];
std::string rec_model_dir = argv[3];
std::string rec_label_file = argv[4];
std::string test_image = argv[5];
InitAndInfer(det_model_dir, cls_model_dir, rec_model_dir, rec_label_file, test_image, option);
return 0;
}
}

View File

@@ -19,9 +19,9 @@ tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar.gz
wget https://bj.bcebos.com/paddlehub/fastdeploy/ch_PP-OCRv2_rec_infer.tar.gz
tar -xvf ch_PP-OCRv2_rec_infer.tar.gz
wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/doc/imgs/12.jpg
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/ppocr/utils/ppocr_keys_v1.txt
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
#下载部署示例代码
@@ -33,9 +33,7 @@ python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2
# GPU推理
python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu
# GPU上使用TensorRT推理
python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --det_use_trt True --cls_use_trt True --rec_use_trt True
# OCR还支持det/cls/rec三个模型的组合使用例如当我们不想使用cls模型的时候只需要给--cls_model传入一个空的字符串, 例子如下:
python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model "" --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device cpu
python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt
```
运行完成可视化结果如下图所示

View File

@@ -1,3 +1,17 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import cv2
import os
@@ -21,7 +35,6 @@ def parse_arguments():
"--rec_label_file",
required=True,
help="Path of Recognization model of PPOCR.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
parser.add_argument(
@@ -30,114 +43,83 @@ def parse_arguments():
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--det_use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
"--backend",
type=str,
default="default",
help="Type of inference backend, support ort/trt/paddle/openvino, default 'openvino' for cpu, 'tensorrt' for gpu"
)
parser.add_argument(
"--cls_use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
"--device_id",
type=int,
default=0,
help="Define which GPU card used to run model.")
parser.add_argument(
"--rec_use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
"--cpu_thread_num",
type=int,
default=9,
help="Number of threads while inference on CPU.")
return parser.parse_args()
def build_det_option(args):
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
option.use_gpu(0)
if args.det_use_trt:
option.set_cpu_thread_num(args.cpu_thread_num)
if args.backend.lower() == "trt":
assert args.device.lower(
) == "gpu", "TensorRT backend require inference on device GPU."
option.use_trt_backend()
#det_max_side_len 默认为960,当用户更改DET模型的max_side_len参数时请将此参数同时更改
det_max_side_len = 960
option.set_trt_input_shape("x", [1, 3, 50, 50], [1, 3, 640, 640],
[1, 3, det_max_side_len, det_max_side_len])
return option
def build_cls_option(args):
option = fd.RuntimeOption()
option.use_paddle_backend()
if args.device.lower() == "gpu":
option.use_gpu()
if args.cls_use_trt:
option.use_trt_backend()
option.set_trt_input_shape("x", [1, 3, 32, 100])
return option
def build_rec_option(args):
option = fd.RuntimeOption()
option.use_paddle_backend()
if args.device.lower() == "gpu":
option.use_gpu()
if args.rec_use_trt:
option.use_trt_backend()
option.set_trt_input_shape("x", [1, 3, 48, 10], [1, 3, 48, 320],
[1, 3, 48, 2000])
elif args.backend.lower() == "ort":
option.use_ort_backend()
elif args.backend.lower() == "paddle":
option.use_paddle_backend()
elif args.backend.lower() == "openvino":
assert args.device.lower(
) == "cpu", "OpenVINO backend require inference on device CPU."
option.use_openvino_backend()
return option
args = parse_arguments()
#Det模型
# Detection模型, 检测文字框
det_model_file = os.path.join(args.det_model, "inference.pdmodel")
det_params_file = os.path.join(args.det_model, "inference.pdiparams")
#Cls模型
# Classification模型方向分类可选
cls_model_file = os.path.join(args.cls_model, "inference.pdmodel")
cls_params_file = os.path.join(args.cls_model, "inference.pdiparams")
#Rec模型
# Recognition模型文字识别模型
rec_model_file = os.path.join(args.rec_model, "inference.pdmodel")
rec_params_file = os.path.join(args.rec_model, "inference.pdiparams")
rec_label_file = args.rec_label_file
#默认
det_model = fd.vision.ocr.DBDetector()
cls_model = fd.vision.ocr.Classifier()
rec_model = fd.vision.ocr.Recognizer()
# 对于三个模型,均采用同样的部署配置
# 用户也可根据自行需求分别配置
runtime_option = build_option(args)
#模型初始化
if (len(args.det_model) != 0):
det_runtime_option = build_det_option(args)
det_model = fd.vision.ocr.DBDetector(
det_model_file, det_params_file, runtime_option=det_runtime_option)
det_model = fd.vision.ocr.DBDetector(
det_model_file, det_params_file, runtime_option=runtime_option)
cls_model = fd.vision.ocr.Classifier(
cls_model_file, cls_params_file, runtime_option=runtime_option)
rec_model = fd.vision.ocr.Recognizer(
rec_model_file,
rec_params_file,
rec_label_file,
runtime_option=runtime_option)
if (len(args.cls_model) != 0):
cls_runtime_option = build_cls_option(args)
cls_model = fd.vision.ocr.Classifier(
cls_model_file, cls_params_file, runtime_option=cls_runtime_option)
if (len(args.rec_model) != 0):
rec_runtime_option = build_rec_option(args)
rec_model = fd.vision.ocr.Recognizer(
rec_model_file,
rec_params_file,
rec_label_file,
runtime_option=rec_runtime_option)
ppocrsysv2 = fd.vision.ocr.PPOCRSystemv2(
ocr_det=det_model._model,
ocr_cls=cls_model._model,
ocr_rec=rec_model._model)
# 创建OCR系统串联3个模型其中cls_model可选如无需求可设置为None
ocr_system = fd.vision.ocr.PPOCRSystemv2(
det_model=det_model, cls_model=cls_model, rec_model=rec_model)
# 预测图片准备
im = cv2.imread(args.image)
#预测并打印结果
result = ppocrsysv2.predict(im)
result = ocr_system.predict(im)
print(result)
# 可视化结果

View File

@@ -28,10 +28,9 @@ tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar.gz
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
tar -xvf ch_PP-OCRv3_rec_infer.tar
wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/doc/imgs/12.jpg
wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/ppocr/utils/ppocr_keys_v1.txt
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
# CPU推理
./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 0
@@ -39,8 +38,6 @@ wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/ppocr/
./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 1
# GPU上TensorRT推理
./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 2
# OCR还支持det/cls/rec三个模型的组合使用例如当我们不想使用cls模型的时候只需要给cls模型路径的位置传入一个空的字符串, 例子如下
./infer_demo ./ch_PP-OCRv3_det_infer "" ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 0
```
运行完成可视化结果如下图所示
@@ -53,12 +50,12 @@ wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/ppocr/
### PPOCRSystemv3类
```
fastdeploy::application::ocrsystem::PPOCRSystemv3(fastdeploy::vision::ocr::DBDetector* ocr_det = nullptr,
fastdeploy::vision::ocr::Classifier* ocr_cls = nullptr,
fastdeploy::vision::ocr::Recognizer* ocr_rec = nullptr);
fastdeploy::application::ocrsystem::PPOCRSystemv3(fastdeploy::vision::ocr::DBDetector* det_model,
fastdeploy::vision::ocr::Classifier* cls_model,
fastdeploy::vision::ocr::Recognizer* rec_model);
```
PPOCRSystemv3 的初始化,由检测,分类和识别模型串联构成
PPOCRSystemv2 的初始化,由检测,分类和识别模型串联构成
**参数**
@@ -66,6 +63,17 @@ PPOCRSystemv3 的初始化,由检测,分类和识别模型串联构成
> * **Classifier**(model): OCR中的分类模型
> * **Recognizer**(model): OCR中的识别模型
```
fastdeploy::application::ocrsystem::PPOCRSystemv3(fastdeploy::vision::ocr::DBDetector* det_model,
fastdeploy::vision::ocr::Recognizer* rec_model);
```
PPOCRSystemv2 的初始化,由检测,识别模型串联构成(无分类器)
**参数**
> * **DBDetector**(model): OCR中的检测模型
> * **Recognizer**(model): OCR中的识别模型
#### Predict函数
> ```

View File

@@ -19,11 +19,7 @@ const char sep = '\\';
const char sep = '/';
#endif
void CpuInfer(const std::string& det_model_dir,
const std::string& cls_model_dir,
const std::string& rec_model_dir,
const std::string& rec_label_file,
const std::string& image_file) {
void InitAndInfer(const std::string& det_model_dir, const std::string& cls_model_dir, const std::string& rec_model_dir, const std::string& rec_label_file, const std::string& image_file, const fastdeploy::RuntimeOption& option) {
auto det_model_file = det_model_dir + sep + "inference.pdmodel";
auto det_params_file = det_model_dir + sep + "inference.pdiparams";
@@ -32,235 +28,32 @@ void CpuInfer(const std::string& det_model_dir,
auto rec_model_file = rec_model_dir + sep + "inference.pdmodel";
auto rec_params_file = rec_model_dir + sep + "inference.pdiparams";
auto rec_label = rec_label_file;
fastdeploy::vision::ocr::DBDetector det_model;
fastdeploy::vision::ocr::Classifier cls_model;
fastdeploy::vision::ocr::Recognizer rec_model;
auto det_model = fastdeploy::vision::ocr::DBDetector(det_model_file, det_params_file, option);
auto cls_model = fastdeploy::vision::ocr::Classifier(cls_model_file, cls_params_file, option);
auto rec_model = fastdeploy::vision::ocr::Recognizer(rec_model_file, rec_params_file, rec_label_file, option);
if (!det_model_dir.empty()) {
auto det_option = fastdeploy::RuntimeOption();
det_option.UseCpu();
det_model = fastdeploy::vision::ocr::DBDetector(
det_model_file, det_params_file, det_option);
assert(det_model.Initialized());
assert(cls_model.Initialized());
assert(rec_model.Initialized());
if (!det_model.Initialized()) {
std::cerr << "Failed to initialize det_model." << std::endl;
return;
}
}
if (!cls_model_dir.empty()) {
auto cls_option = fastdeploy::RuntimeOption();
cls_option.UseCpu();
cls_model = fastdeploy::vision::ocr::Classifier(
cls_model_file, cls_params_file, cls_option);
if (!cls_model.Initialized()) {
std::cerr << "Failed to initialize cls_model." << std::endl;
return;
}
}
if (!rec_model_dir.empty()) {
auto rec_option = fastdeploy::RuntimeOption();
rec_option.UseCpu();
rec_model = fastdeploy::vision::ocr::Recognizer(
rec_model_file, rec_params_file, rec_label, rec_option);
if (!rec_model.Initialized()) {
std::cerr << "Failed to initialize rec_model." << std::endl;
return;
}
}
auto ocrv3_app = fastdeploy::application::ocrsystem::PPOCRSystemv3(
&det_model, &cls_model, &rec_model);
// 其中分类模型可选因此也可使用如下方式串联OCR系统
// auto ocr_system_v3 = fastdeploy::application::ocrsystem::PPOCRSystemv3(&det_model, &rec_model);
auto ocr_system_v3 = fastdeploy::application::ocrsystem::PPOCRSystemv3(&det_model, &cls_model, &rec_model);
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::OCRResult res;
//开始预测
if (!ocrv3_app.Predict(&im, &res)) {
fastdeploy::vision::OCRResult result;
if (!ocr_system_v3.Predict(&im, &result)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
//输出预测信息
std::cout << res.Str() << std::endl;
std::cout << result.Str() << std::endl;
//可视化
auto vis_img = fastdeploy::vision::Visualize::VisOcr(im_bak, res);
cv::imwrite("vis_result.jpg", vis_img);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void GpuInfer(const std::string& det_model_dir,
const std::string& cls_model_dir,
const std::string& rec_model_dir,
const std::string& rec_label_file,
const std::string& image_file) {
auto det_model_file = det_model_dir + sep + "inference.pdmodel";
auto det_params_file = det_model_dir + sep + "inference.pdiparams";
auto cls_model_file = cls_model_dir + sep + "inference.pdmodel";
auto cls_params_file = cls_model_dir + sep + "inference.pdiparams";
auto rec_model_file = rec_model_dir + sep + "inference.pdmodel";
auto rec_params_file = rec_model_dir + sep + "inference.pdiparams";
auto rec_label = rec_label_file;
fastdeploy::vision::ocr::DBDetector det_model;
fastdeploy::vision::ocr::Classifier cls_model;
fastdeploy::vision::ocr::Recognizer rec_model;
//准备模型
if (!det_model_dir.empty()) {
auto det_option = fastdeploy::RuntimeOption();
det_option.UseGpu();
det_model = fastdeploy::vision::ocr::DBDetector(
det_model_file, det_params_file, det_option);
if (!det_model.Initialized()) {
std::cerr << "Failed to initialize det_model." << std::endl;
return;
}
}
if (!cls_model_dir.empty()) {
auto cls_option = fastdeploy::RuntimeOption();
cls_option.UseGpu();
cls_model = fastdeploy::vision::ocr::Classifier(
cls_model_file, cls_params_file, cls_option);
if (!cls_model.Initialized()) {
std::cerr << "Failed to initialize cls_model." << std::endl;
return;
}
}
if (!rec_model_dir.empty()) {
auto rec_option = fastdeploy::RuntimeOption();
rec_option.UseGpu();
rec_model = fastdeploy::vision::ocr::Recognizer(
rec_model_file, rec_params_file, rec_label, rec_option);
if (!rec_model.Initialized()) {
std::cerr << "Failed to initialize rec_model." << std::endl;
return;
}
}
auto ocrv3_app = fastdeploy::application::ocrsystem::PPOCRSystemv3(
&det_model, &cls_model, &rec_model);
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::OCRResult res;
//开始预测
if (!ocrv3_app.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
//输出预测信息
std::cout << res.Str() << std::endl;
//可视化
auto vis_img = fastdeploy::vision::Visualize::VisOcr(im_bak, res);
cv::imwrite("vis_result.jpg", vis_img);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void TrtInfer(const std::string& det_model_dir,
const std::string& cls_model_dir,
const std::string& rec_model_dir,
const std::string& rec_label_file,
const std::string& image_file) {
auto det_model_file = det_model_dir + sep + "inference.pdmodel";
auto det_params_file = det_model_dir + sep + "inference.pdiparams";
auto cls_model_file = cls_model_dir + sep + "inference.pdmodel";
auto cls_params_file = cls_model_dir + sep + "inference.pdiparams";
auto rec_model_file = rec_model_dir + sep + "inference.pdmodel";
auto rec_params_file = rec_model_dir + sep + "inference.pdiparams";
auto rec_label = rec_label_file;
fastdeploy::vision::ocr::DBDetector det_model;
fastdeploy::vision::ocr::Classifier cls_model;
fastdeploy::vision::ocr::Recognizer rec_model;
//准备模型
if (!det_model_dir.empty()) {
auto det_option = fastdeploy::RuntimeOption();
det_option.UseGpu();
det_option.UseTrtBackend();
det_option.SetTrtInputShape("x", {1, 3, 50, 50}, {1, 3, 640, 640},
{1, 3, 960, 960});
det_model = fastdeploy::vision::ocr::DBDetector(
det_model_file, det_params_file, det_option);
if (!det_model.Initialized()) {
std::cerr << "Failed to initialize det_model." << std::endl;
return;
}
}
if (!cls_model_dir.empty()) {
auto cls_option = fastdeploy::RuntimeOption();
cls_option.UseGpu();
cls_option.UseTrtBackend();
cls_option.SetTrtInputShape("x", {1, 3, 48, 192});
cls_model = fastdeploy::vision::ocr::Classifier(
cls_model_file, cls_params_file, cls_option);
if (!cls_model.Initialized()) {
std::cerr << "Failed to initialize cls_model." << std::endl;
return;
}
}
if (!rec_model_dir.empty()) {
auto rec_option = fastdeploy::RuntimeOption();
rec_option.UseGpu();
rec_option.UseTrtBackend();
rec_option.SetTrtInputShape("x", {1, 3, 48, 10}, {1, 3, 48, 320},
{1, 3, 48, 2000});
rec_model = fastdeploy::vision::ocr::Recognizer(
rec_model_file, rec_params_file, rec_label, rec_option);
if (!rec_model.Initialized()) {
std::cerr << "Failed to initialize rec_model." << std::endl;
return;
}
}
auto ocrv3_app = fastdeploy::application::ocrsystem::PPOCRSystemv3(
&det_model, &cls_model, &rec_model);
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::OCRResult res;
//开始预测
if (!ocrv3_app.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
//输出预测信息
std::cout << res.Str() << std::endl;
//可视化
auto vis_img = fastdeploy::vision::Visualize::VisOcr(im_bak, res);
cv::imwrite("vis_result.jpg", vis_img);
auto vis_im = fastdeploy::vision::Visualize::VisOcr(im_bak, result);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -279,12 +72,23 @@ int main(int argc, char* argv[]) {
return -1;
}
if (std::atoi(argv[6]) == 0) {
CpuInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
} else if (std::atoi(argv[6]) == 1) {
GpuInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
} else if (std::atoi(argv[6]) == 2) {
TrtInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
fastdeploy::RuntimeOption option;
int flag = std::atoi(argv[6]);
if (flag == 0) {
option.UseCpu();
} else if (flag == 1) {
option.UseGpu();
} else if (flag == 2) {
option.UseGpu();
option.UseTrtBackend();
}
std::string det_model_dir = argv[1];
std::string cls_model_dir = argv[2];
std::string rec_model_dir = argv[3];
std::string rec_label_file = argv[4];
std::string test_image = argv[5];
InitAndInfer(det_model_dir, cls_model_dir, rec_model_dir, rec_label_file, test_image, option);
return 0;
}
}

View File

@@ -19,10 +19,9 @@ tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar.gz
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
tar xvf ch_PP-OCRv3_rec_infer.tar
wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/doc/imgs/12.jpg
wget https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.6/ppocr/utils/ppocr_keys_v1.txt
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
#下载部署示例代码
git clone https://github.com/PaddlePaddle/FastDeploy.git
@@ -33,9 +32,7 @@ python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2
# GPU推理
python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu
# GPU上使用TensorRT推理
python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --det_use_trt True --cls_use_trt True --rec_use_trt True
# OCR还支持det/cls/rec三个模型的组合使用例如当我们不想使用cls模型的时候只需要给--cls_model传入一个空的字符串, 例子如下:
python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model "" --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device cpu
python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt
```
运行完成可视化结果如下图所示

View File

@@ -1,3 +1,17 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import cv2
import os
@@ -21,7 +35,6 @@ def parse_arguments():
"--rec_label_file",
required=True,
help="Path of Recognization model of PPOCR.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
parser.add_argument(
@@ -30,112 +43,82 @@ def parse_arguments():
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--det_use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
"--backend",
type=str,
default="default",
help="Type of inference backend, support ort/trt/paddle/openvino, default 'openvino' for cpu, 'tensorrt' for gpu"
)
parser.add_argument(
"--cls_use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
"--device_id",
type=int,
default=0,
help="Define which GPU card used to run model.")
parser.add_argument(
"--rec_use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
"--cpu_thread_num",
type=int,
default=9,
help="Number of threads while inference on CPU.")
return parser.parse_args()
def build_det_option(args):
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
option.use_gpu(0)
if args.det_use_trt:
option.set_cpu_thread_num(args.cpu_thread_num)
if args.backend.lower() == "trt":
assert args.device.lower(
) == "gpu", "TensorRT backend require inference on device GPU."
option.use_trt_backend()
#det_max_side_len 默认为960,当用户更改DET模型的max_side_len参数时请将此参数同时更改
det_max_side_len = 960
option.set_trt_input_shape("x", [1, 3, 50, 50], [1, 3, 640, 640],
[1, 3, det_max_side_len, det_max_side_len])
return option
def build_cls_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.cls_use_trt:
option.use_trt_backend()
option.set_trt_input_shape("x", [1, 3, 32, 100])
return option
def build_rec_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.rec_use_trt:
option.use_trt_backend()
option.set_trt_input_shape("x", [1, 3, 48, 10], [1, 3, 48, 320],
[1, 3, 48, 2000])
elif args.backend.lower() == "ort":
option.use_ort_backend()
elif args.backend.lower() == "paddle":
option.use_paddle_backend()
elif args.backend.lower() == "openvino":
assert args.device.lower(
) == "cpu", "OpenVINO backend require inference on device CPU."
option.use_openvino_backend()
return option
args = parse_arguments()
#Det模型
# Detection模型, 检测文字框
det_model_file = os.path.join(args.det_model, "inference.pdmodel")
det_params_file = os.path.join(args.det_model, "inference.pdiparams")
#Cls模型
# Classification模型方向分类可选
cls_model_file = os.path.join(args.cls_model, "inference.pdmodel")
cls_params_file = os.path.join(args.cls_model, "inference.pdiparams")
#Rec模型
# Recognition模型文字识别模型
rec_model_file = os.path.join(args.rec_model, "inference.pdmodel")
rec_params_file = os.path.join(args.rec_model, "inference.pdiparams")
rec_label_file = args.rec_label_file
#默认
det_model = fd.vision.ocr.DBDetector()
cls_model = fd.vision.ocr.Classifier()
rec_model = fd.vision.ocr.Recognizer()
# 对于三个模型,均采用同样的部署配置
# 用户也可根据自行需求分别配置
runtime_option = build_option(args)
#模型初始化
if (len(args.det_model) != 0):
det_runtime_option = build_det_option(args)
det_model = fd.vision.ocr.DBDetector(
det_model_file, det_params_file, runtime_option=det_runtime_option)
det_model = fd.vision.ocr.DBDetector(
det_model_file, det_params_file, runtime_option=runtime_option)
cls_model = fd.vision.ocr.Classifier(
cls_model_file, cls_params_file, runtime_option=runtime_option)
rec_model = fd.vision.ocr.Recognizer(
rec_model_file,
rec_params_file,
rec_label_file,
runtime_option=runtime_option)
if (len(args.cls_model) != 0):
cls_runtime_option = build_cls_option(args)
cls_model = fd.vision.ocr.Classifier(
cls_model_file, cls_params_file, runtime_option=cls_runtime_option)
if (len(args.rec_model) != 0):
rec_runtime_option = build_rec_option(args)
rec_model = fd.vision.ocr.Recognizer(
rec_model_file,
rec_params_file,
rec_label_file,
runtime_option=rec_runtime_option)
ppocrsysv3 = fd.vision.ocr.PPOCRSystemv3(
ocr_det=det_model._model,
ocr_cls=cls_model._model,
ocr_rec=rec_model._model)
# 创建OCR系统串联3个模型其中cls_model可选如无需求可设置为None
ocr_system = fd.vision.ocr.PPOCRSystemv3(
det_model=det_model, cls_model=cls_model, rec_model=rec_model)
# 预测图片准备
im = cv2.imread(args.image)
#预测并打印结果
result = ppocrsysv3.predict(im)
result = ocr_system.predict(im)
print(result)