[Streamer] Basic framework, YAML parser, ppyoloe demo and video decoder demo (#863)

* support trt installed in sys dir

* streamer init

* link elements and run

* create source bin

* add config

* restruct dirs

* set property

* link elements

* integrate perf

* add bbox parser

* parse yaml to string, video decoder

* use try pull for decoder and nits

* streamer ppyoloe cpp

* update readme

* video decoder cpp dir

* add cn readme

* update readme

* cmake nits

* refactor perf measurement
This commit is contained in:
Wang Xinyu
2022-12-19 10:14:17 +08:00
committed by GitHub
parent 95beb2bbf6
commit 81eaeddbd0
35 changed files with 1635 additions and 13 deletions

View File

@@ -0,0 +1,30 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECT(streamer_ppyoloe C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
include_directories(${FASTDEPLOY_INCS})
set(FDSTREAMER_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/../../../src/)
include_directories(${FDSTREAMER_INCLUDE_DIR})
link_directories(${PROJECT_SOURCE_DIR}/../../../build/)
find_package(PkgConfig REQUIRED)
pkg_check_modules(GSTAPP gstreamer-app-1.0 REQUIRED)
include_directories(${GSTAPP_INCLUDE_DIRS})
add_executable(streamer_demo main.cc)
target_link_libraries(streamer_demo fd_streamer)

View File

@@ -0,0 +1 @@
README_EN.md

View File

@@ -0,0 +1,44 @@
简体中文 | [English](README_EN.md)
# FastDeploy Streamer PP-YOLOE C++ Example
## 编译和运行
1. 需要先FastDeploy Streamer, 请参考[README](../../../README.md)
2. 编译Example
```
mkdir build && cd build
cmake .. -DFASTDEPLOY_INSTALL_DIR=[PATH-OF-FASTDEPLOY-INSTALL-DIR]
make -j
```
3. 下载模型
```
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco_onnx_without_scale_factor.tgz
tar xvf ppyoloe_crn_l_300e_coco_onnx_without_scale_factor.tgz
mv ppyoloe_crn_l_300e_coco_onnx_without_scale_factor/ model/
```
4. 运行
```
cp ../nvinfer_config.txt .
cp ../streamer_cfg.yml .
./streamer_demo
```
## 导出ONNX模型不包含NMS和scale factor
```
# 导出Paddle推理模型exclude_nms=True and trt=True
git clone https://github.com/PaddlePaddle/PaddleDetection.git
cd PaddleDetection
python tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams exclude_nms=True trt=True --output_dir inference_model
# 转换为ONNX
paddle2onnx --model_dir inference_model/ppyoloe_crn_l_300e_coco/ --model_filename model.pdmodel --params_filename model.pdiparams --save_file ppyoloe.onnx --deploy_backend tensorrt --enable_dev_version True
# 裁剪ONNX删除scale factor
git clone https://github.com/PaddlePaddle/Paddle2ONNX.git
cd Paddle2ONNX
python tools/onnx/prune_onnx_model.py --model ../PaddleDetection/ppyoloe.onnx --output_names concat_14.tmp_0 p2o.Mul.245 --save_file ppyoloe_without_scale_factor.onnx
```

View File

@@ -0,0 +1,44 @@
English | [简体中文](README_CN.md)
# FastDeploy Streamer PP-YOLOE C++ Example
## Build and Run
1. Build FastDeploy Streamer first, [README](../../../README.md)
2. Build Example
```
mkdir build && cd build
cmake .. -DFASTDEPLOY_INSTALL_DIR=[PATH-OF-FASTDEPLOY-INSTALL-DIR]
make -j
```
3. Download model
```
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco_onnx_without_scale_factor.tgz
tar xvf ppyoloe_crn_l_300e_coco_onnx_without_scale_factor.tgz
mv ppyoloe_crn_l_300e_coco_onnx_without_scale_factor/ model/
```
4. Run
```
cp ../nvinfer_config.txt .
cp ../streamer_cfg.yml .
./streamer_demo
```
## Export ONNX excluding scale_factor and NMS
```
# Export inference model with exclude_nms=True and trt=True
git clone https://github.com/PaddlePaddle/PaddleDetection.git
cd PaddleDetection
python tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams exclude_nms=True trt=True --output_dir inference_model
# Convert to ONNX
paddle2onnx --model_dir inference_model/ppyoloe_crn_l_300e_coco/ --model_filename model.pdmodel --params_filename model.pdiparams --save_file ppyoloe.onnx --deploy_backend tensorrt --enable_dev_version True
# Prune ONNX to delete scale factor
git clone https://github.com/PaddlePaddle/Paddle2ONNX.git
cd Paddle2ONNX
python tools/onnx/prune_onnx_model.py --model ../PaddleDetection/ppyoloe.onnx --output_names concat_14.tmp_0 p2o.Mul.245 --save_file ppyoloe_without_scale_factor.onnx
```

View File

@@ -0,0 +1,22 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fd_streamer.h"
int main(int argc, char* argv[]) {
auto streamer = fastdeploy::streamer::FDStreamer();
streamer.Init("streamer_cfg.yml");
streamer.Run();
return 0;
}

View File

@@ -0,0 +1,23 @@
[property]
batch-size=4
net-scale-factor=0.0039215697906911373
# 0=RGB, 1=BGR
model-color-format=0
onnx-file=model/ppyoloe.onnx
model-engine-file=model/ppyoloe.onnx_b4_gpu0_fp32.engine
labelfile-path=model/labels.txt
## 0=FP32, 1=INT8, 2=FP16 mode
network-mode=0
num-detected-classes=80
gie-unique-id=1
network-type=0
## 1=DBSCAN, 2=NMS, 3= DBSCAN+NMS Hybrid, 4 = None(No clustering)
cluster-mode=2
maintain-aspect-ratio=1
parse-bbox-func-name=NvDsInferParseCustomPPYOLOE
custom-lib-path=../../../../build/libfd_streamer.so
[class-attrs-all]
nms-iou-threshold=0.45
pre-cluster-threshold=0.25
topk=300

View File

@@ -0,0 +1,46 @@
app:
type: video_analytics
enable-perf-measurement: true
perf-measurement-interval-sec: 5
nvurisrcbin_list:
uri-list:
- file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_ride_bike.mov
- file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_ride_bike.mov
- file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_ride_bike.mov
- file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_ride_bike.mov
pad-prefix: mux.sink_
gpu-id: 0
nvstreammux:
name: mux
gpu-id: 0
batch-size: 4
width: 1920
height: 1080
batched-push-timeout: 40000 # 40ms
nvinfer:
gpu-id: 0
config-file-path: nvinfer_config.txt
nvtracker:
gpu-id: 0
tracker-width: 640
tracker-height: 640
ll-lib-file: /opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
ll-config-file: /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml
enable-batch-process: true
nvmultistreamtiler:
gpu-id: 0
rows: 2
columns: 2
nvosdbin:
gpu-id: 0
nvvideoencfilesinkbin:
gpu-id: 0
bitrate: 4000
output-file: out.mp4