mirror of
https://github.com/Ascend/ascend_community_projects.git
synced 2025-09-26 20:01:17 +08:00
19
HelmetIdentification_V2/CMakeLists.txt
Normal file
19
HelmetIdentification_V2/CMakeLists.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
# Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved.
|
||||
|
||||
# 最低CMake版本
|
||||
cmake_minimum_required(VERSION 3.5.1)
|
||||
|
||||
# 项目名
|
||||
project(HelmetIdentification)
|
||||
|
||||
# 配置环境变量MX_SDK_HOME,如:/home/xxxxxxx/MindX_SDK/mxVision,可在远程环境中用指令env查看
|
||||
set(MX_SDK_HOME $ENV{MX_SDK_HOME})
|
||||
|
||||
if (NOT DEFINED ENV{MX_SDK_HOME})
|
||||
set(MX_SDK_HOME "/usr/local/Ascend/mindx_sdk")
|
||||
message(STATUS "set default MX_SDK_HOME: ${MX_SDK_HOME}")
|
||||
else ()
|
||||
message(STATUS "env MX_SDK_HOME: ${MX_SDK_HOME}")
|
||||
endif()
|
||||
|
||||
add_subdirectory("./src")
|
BIN
HelmetIdentification_V2/images/flow.jpg
Normal file
BIN
HelmetIdentification_V2/images/flow.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 77 KiB |
BIN
HelmetIdentification_V2/images/make.png
Normal file
BIN
HelmetIdentification_V2/images/make.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 13 KiB |
BIN
HelmetIdentification_V2/images/map_calculate.png
Normal file
BIN
HelmetIdentification_V2/images/map_calculate.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 5.5 KiB |
BIN
HelmetIdentification_V2/images/rate.jpg
Normal file
BIN
HelmetIdentification_V2/images/rate.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 101 KiB |
BIN
HelmetIdentification_V2/images/result0.jpg
Normal file
BIN
HelmetIdentification_V2/images/result0.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 678 KiB |
BIN
HelmetIdentification_V2/images/warn.png
Normal file
BIN
HelmetIdentification_V2/images/warn.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 29 KiB |
12
HelmetIdentification_V2/model/Helmet_yolov5.cfg
Normal file
12
HelmetIdentification_V2/model/Helmet_yolov5.cfg
Normal file
@@ -0,0 +1,12 @@
|
||||
CLASS_NUM=3
|
||||
BIASES_NUM=18
|
||||
BIASES=10,13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326
|
||||
SCORE_THRESH=0.4
|
||||
OBJECTNESS_THRESH=0.3
|
||||
IOU_THRESH=0.5
|
||||
YOLO_TYPE=3
|
||||
ANCHOR_DIM=3
|
||||
|
||||
YOLO_VERSION=5
|
||||
FRAMEWORK=PyTorch
|
||||
MODEL_TYPE=2
|
14
HelmetIdentification_V2/model/act-env.sh
Normal file
14
HelmetIdentification_V2/model/act-env.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This is used to convert onnx model file to .om model file.
|
||||
export install_path=/usr/local/Ascend/ascend-toolkit/latest
|
||||
export PATH=/usr/local/python3.9.2/bin:${install_path}/arm64-linux/atc/ccec_compiler/bin:${install_path}/arm64-linux/atc/bin:$PATH
|
||||
export PYTHONPATH=${install_path}/arm64-linux/atc/python/site-packages:${install_path}/arm64-linux/atc/python/site-packages/auto_tune.egg/auto_tune:${install_path}/arm64-linux/atc/python/site-packages/schedule_search.egg
|
||||
export LD_LIBRARY_PATH=${install_path}/arm64-linux/atc/lib64:$LD_LIBRARY_PATH
|
||||
export ASCEND_OPP_PATH=${install_path}/opp
|
||||
export Home="./path/"
|
||||
# Home is set to the path where the model is located
|
||||
|
||||
# Execute, transform YOLOv5 model.
|
||||
atc --model="${Home}"/YOLOv5_s.onnx --framework=5 --output="${Home}"/YOLOv5_s --insert_op_conf=./aipp_YOLOv5.config --input_format=NCHW --log=info --soc_version=Ascend310 --input_shape="images:1,3,640,640"
|
||||
# --model is the path where onnx is located. --output is the path where the output of the converted model is located
|
37
HelmetIdentification_V2/model/aipp_YOLOv5.config
Normal file
37
HelmetIdentification_V2/model/aipp_YOLOv5.config
Normal file
@@ -0,0 +1,37 @@
|
||||
aipp_op{
|
||||
aipp_mode:static
|
||||
input_format : YUV420SP_U8
|
||||
|
||||
src_image_size_w : 640
|
||||
src_image_size_h : 640
|
||||
|
||||
crop: false
|
||||
load_start_pos_h : 0
|
||||
load_start_pos_w : 0
|
||||
crop_size_w : 640
|
||||
crop_size_h: 640
|
||||
|
||||
csc_switch : true
|
||||
rbuv_swap_switch : false
|
||||
|
||||
# 色域转换
|
||||
matrix_r0c0: 256
|
||||
matrix_r0c1: 0
|
||||
matrix_r0c2: 359
|
||||
matrix_r1c0: 256
|
||||
matrix_r1c1: -88
|
||||
matrix_r1c2: -183
|
||||
matrix_r2c0: 256
|
||||
matrix_r2c1: 454
|
||||
matrix_r2c2: 0
|
||||
input_bias_0: 0
|
||||
input_bias_1: 128
|
||||
input_bias_2: 128
|
||||
|
||||
# 均值归一化
|
||||
min_chn_0 : 0
|
||||
min_chn_1 : 0
|
||||
min_chn_2 : 0
|
||||
var_reci_chn_0: 0.003921568627451
|
||||
var_reci_chn_1: 0.003921568627451
|
||||
var_reci_chn_2: 0.003921568627451}
|
3
HelmetIdentification_V2/model/imgclass.names
Normal file
3
HelmetIdentification_V2/model/imgclass.names
Normal file
@@ -0,0 +1,3 @@
|
||||
person
|
||||
head
|
||||
helmet
|
283
HelmetIdentification_V2/readme.md
Normal file
283
HelmetIdentification_V2/readme.md
Normal file
@@ -0,0 +1,283 @@
|
||||
# 安全帽识别
|
||||
|
||||
## 1 介绍
|
||||
安全帽作为工作中一样重要的安全防护用品,主要保护头部,防高空物体坠落,防物体打击、碰撞。通过识别每个人是否戴上安全帽,可以对没戴安全帽的人做出告警。
|
||||
|
||||
本项目支持2路视频实时分析,其主要流程为:
|
||||
|
||||
分两路接收外部输入视频,通过视频解码将264格式视频解码为YUV格式图片,然后对这些图片串行化进行缩放、推理、后处理、跟踪去重等处理。模型推理使用YOLOv5进行安全帽识别,对重复检测出的没戴安全帽的对象进行去重,最后将识别结果输出为两路,并对没佩戴安全帽的情况告警。
|
||||
|
||||
### 1.1 支持的产品
|
||||
|
||||
本项目以昇腾Atlas 200DK为主要的硬件平台。
|
||||
|
||||
### 1.2 支持的版本
|
||||
|
||||
本项目配套的CANN版本为 [6.0.RC1](https://www.hiascend.com/software/cann/commercial) ,MindX SDK版本为 [3.0.RC2](https://www.hiascend.com/software/Mindx-sdk) 。
|
||||
|
||||
MindX SDK安装前准备可参考《用户指南》中的[安装教程](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/quickStart/1-1安装SDK开发套件.md)。
|
||||
|
||||
### 1.3 软件方案介绍
|
||||
|
||||
请先总体介绍项目的方案架构。如果项目设计方案中涉及子系统,请详细描述各子系统功能。如果设计了不同的功能模块,则请详细描述各模块功能。
|
||||
|
||||
表1.1 系统方案各子系统功能描述:
|
||||
|
||||
| 序号 | 子系统 | 功能描述 |
|
||||
| ---- | ---------- | ------------------------------------------------------------ |
|
||||
| 1 | 视频解码 | 从264视频流中解码图像帧 |
|
||||
| 2 | 图像帧缩放 | 将输入的图像帧使用等比例缩放方法缩放到YOLO模型要求的宽高 |
|
||||
| 3 | 模型推理 | 将缩放后的图像输入模型获得结果(tensor格式) |
|
||||
| 4 | 后处理 | 调用后处理接口根据模型推理结果计算检测出的目标框在原图中的位置 |
|
||||
| 5 | 跟踪去重 | 使用匈牙利算法和卡尔曼滤波对同一路视频流不同图像中的目标框进行跟踪 |
|
||||
| 6 | 结果输出 | 如果检测出了未佩戴头盔的情况且跟踪去重模块判断为未重复就输出告警信息并将检测结果按照输入分为两路保存 |
|
||||
|
||||
### 1.4 代码目录结构与说明
|
||||
|
||||
本工程名称为HelmetIdentification_V2,工程目录如下图所示:
|
||||
|
||||
```
|
||||
.
|
||||
├── model
|
||||
├── Helmet_yolov5.cfg // 模型转换aipp配置文件
|
||||
├── act-env.sh // 设置环境变量的脚本
|
||||
├── aipp_YOLOv5.config // 模型转换中用到的配置文件
|
||||
├── imgclass.names // label文件
|
||||
└── YOLOv5_s.om // 模型文件(需自行下载或转换)
|
||||
├── src
|
||||
├── CMakeLists.txt // CMake文件
|
||||
├── DataType.h // 自定义的数据结构
|
||||
├── Hungarian.cpp // 匈牙利算法的实现
|
||||
├── Hungarian.h // 匈牙利算法的头文件
|
||||
├── KalmanTracker.cpp // 卡尔曼滤波方法的实现
|
||||
├── KalmanTracker.h // 卡尔曼滤波方法的头文件
|
||||
├── MOTConnection.cpp // 跟踪匹配方法的实现
|
||||
├── MOTConnection.h // 跟踪匹配方法的头文件
|
||||
├── cropResizePaste.hpp // 自定义的等比例缩放
|
||||
├── main-image.cpp // 读取图片测试精度
|
||||
├── main.cpp // 安全帽检测的main函数入口
|
||||
└── utils.h // 实现多线程的工具类
|
||||
├── result
|
||||
├── one // 文件夹,保存第一路输入的结果(需要新建)
|
||||
└── two // 文件夹,保存第二路输入的结果(需要新建)
|
||||
└── CMakeLists.txt // CMake文件
|
||||
```
|
||||
|
||||
### 1.5 技术实现流程图
|
||||
|
||||

|
||||
|
||||
### 1.6 特性及适用场景
|
||||
|
||||
本案例可以满足未佩戴安全帽的目标检测,要求输入的视频为264格式。
|
||||
|
||||
|
||||
|
||||
## 2 环境依赖
|
||||
|
||||
### 2.1 软件依赖
|
||||
|
||||
环境依赖软件和版本如下表:
|
||||
|
||||
| 软件 | 版本 | 说明 | 获取方式 |
|
||||
| ------------------- | ------- | ----------------------------- | --------------------------------------------------------- |
|
||||
| mxVision | 3.0.RC2 | mxVision软件包 | [链接](https://www.hiascend.com/software/Mindx-sdk) |
|
||||
| Ascend-CANN-toolkit | 6.0.RC1 | Ascend-cann-toolkit开发套件包 | [链接](https://www.hiascend.com/software/cann/commercial) |
|
||||
| Ubuntu | 18.04 | | |
|
||||
|
||||
### 2.2 环境变量
|
||||
|
||||
在编译运行项目前,需要设置环境变量:
|
||||
|
||||
MindX SDK环境变量:
|
||||
|
||||
. ${SDK-path}/set_env.sh
|
||||
|
||||
CANN环境变量:
|
||||
|
||||
. ${ascend-toolkit-path}/set_env.sh
|
||||
|
||||
环境变量介绍
|
||||
|
||||
SDK-path:mxVision SDK安装路径
|
||||
|
||||
ascend-toolkit-path:CANN安装路径
|
||||
|
||||
|
||||
|
||||
## 3 模型转换
|
||||
|
||||
模型转换所需ATC工具环境搭建参考链接:https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/600alpha001/infacldevg/atctool/atlasatc_16_0004.html
|
||||
|
||||
本项目使用YOLOV5_s模型用于完成目标检测任务,可点击[链接](https://gitee.com/link?target=https%3A%2F%2Fmindx.sdk.obs.cn-north-4.myhuaweicloud.com%2Fmindxsdk-referenceapps%2520%2Fcontrib%2FHelmetIdentification%2Fmodel.zip)下载onnx模型然后使用ATC工具转换。
|
||||
|
||||
具体步骤为:将onnx模型上传至服务器任意目录,运行atc-env脚本将onnx转为om模型。
|
||||
|
||||
运行命令如下:
|
||||
|
||||
```
|
||||
sh atc-env.sh
|
||||
```
|
||||
|
||||
脚本中包含atc命令:
|
||||
|
||||
```
|
||||
--model=${Home}/YOLOv5_s.onnx --framework=5 --output=${Home}/YOLOv5_s --insert_op_conf=./aipp_YOLOv5.config --input_format=NCHW --log=info --soc_version=Ascend310 --input_shape="images:1,3,640,640"
|
||||
```
|
||||
|
||||
其参数如下表所示:
|
||||
|
||||
| 参数名 | 参数描述 |
|
||||
| ---------------- | ------------------------------------------------------------ |
|
||||
| -- framework | 原始框架类型。当取值为5时,即为ONNX网络模型,仅支持ai.onnx算子域中opset v11版本的算 子。用户也可以将其他opset版本的算子(比如opset v9),通过PyTorch转换成 opset v11版本的onnx算子 |
|
||||
| --model | 原始模型文件路径与文件名 |
|
||||
| --output | 如果是开源框架的网络模型,存放转换后的离线模型的路径以及文件名。 |
|
||||
| --soc_version | 模型转换时指定芯片版本。昇腾AI处理器的版本,可从ATC工具安装路径的“/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/atc/data/platform_config”目录下 查看。 ".ini"文件的文件名即为对应的${soc_version} |
|
||||
| --insert_op_conf | 插入算子的配置文件路径与文件名, 例如aipp预处理算子。 |
|
||||
| --input_shape | 模型输入数据的 shape。 |
|
||||
| --out_nodes | 指定输出节点,如果不指定输出节点(算子名称),则模型的输出默认为最后一层的算子信息,如果 指定,则以指定的为准。 |
|
||||
|
||||
其中--insert_op_conf参数为aipp预处理算子配置文件路径。该配置文件aipp_YOLOv5.config在输入图像进入模型前进行预处理。该配置文件保存在源码Models目录下。
|
||||
|
||||
## 4 编译与运行
|
||||
|
||||
项目要求实现性能和精度的测试验证,所以提供了两个main文件,根据需求保留其中一个就可以。下面按照性能和精度的测试分开写步骤
|
||||
|
||||
### 4.1 测试性能
|
||||
|
||||
**步骤1** 修改参数
|
||||
|
||||
1. 将HelmetIdentification_V2文件夹及其中代码上传至服务器任意目录,请不要修改文件夹结构
|
||||
|
||||
3. 进入src文件夹,修改其中的CMakeLists.txt
|
||||
|
||||
* 第24行需要修改为 对应环境中ascend-toolkit的路径中的include文件夹
|
||||
|
||||
* 第35行需要修改为 对应环境中ascend-toolkit的路径中的lib64文件夹
|
||||
|
||||
注:第34行、第36行应该每个服务器都是一样的,除非有报错不然不需要修改
|
||||
|
||||
**步骤2** 选择对应的main文件
|
||||
|
||||
测试性能使用main.cpp
|
||||
需要删除main-image.cpp
|
||||
可以使用如下命令进行:
|
||||
|
||||
```shell
|
||||
cd src
|
||||
rm ./main-image.cpp
|
||||
```
|
||||
|
||||
**步骤3** 编译
|
||||
|
||||
进入HelmetIdentification_V2文件夹,执行以下命令以完成外部编译
|
||||
|
||||
```shell
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make
|
||||
```
|
||||
|
||||
出现如下图所示的结果即表示编译成功
|
||||
|
||||

|
||||
|
||||
**步骤4** 运行及输出结果
|
||||
|
||||
从build文件夹退回到HelmetIdentification_V2文件夹
|
||||
|
||||
如果是第一次运行,需要新建result文件夹以及内部的one、two两个文件夹用于存放结果,可以运行以下命令(确保当前在HelmetIdentification_V2层级):
|
||||
|
||||
```
|
||||
mkdir result
|
||||
cd result
|
||||
mkdir one
|
||||
mkdir two
|
||||
```
|
||||
|
||||
退回到HelmetIdentification_V2文件夹,运行如下命令计算处理的帧率
|
||||
|
||||
```
|
||||
./main ${video0Path} ${video0width} ${video0height} [${video1Path} ${video1width} ${video1height}]
|
||||
```
|
||||
|
||||
video0Path和video1Path分别是两路输入视频的路径,目前要求是一个264文件,宽高在第二步中设置(默认宽1920高1080)。
|
||||
|
||||
如果检测到某一帧图像中存在未佩戴头盔的情况,会打印告警信息并保存图像。
|
||||
|
||||

|
||||
|
||||
图片保存在HelmetIdentification_V2/result文件夹中,内部有两个文件夹,文件夹one保存第一路输入的结果,文件夹two保存第二路输入的结果。
|
||||
|
||||
图片的文件名命名规则为`result[frameId].jpg`,frameId表示是输入视频中的第几帧。
|
||||
|
||||
在运行时会输出跟踪去重线程的**帧率**(每秒处理的图片数量),示例如下:
|
||||
|
||||

|
||||
|
||||
### 4.2 测试精度
|
||||
|
||||
**步骤1** 修改参数
|
||||
|
||||
1. 将HelmetIdentification_V2文件夹及其中代码上传至服务器任意目录,请不要修改文件夹结构
|
||||
2. 进入src文件夹,修改其中的CMakeLists.txt
|
||||
第24行需要修改为 对应环境中ascend-toolkit的路径中的include文件夹
|
||||
第35行需要修改为 对应环境中ascend-toolkit的路径中的lib64文件夹
|
||||
注:第34行、第36行应该每个服务器都是一样的,除非有报错不然不需要修改
|
||||
|
||||
**步骤2** 选择对应的main文件
|
||||
|
||||
测试精度使用main-image.cpp
|
||||
需要删除main.cpp,然后将main-image.cpp重命名为main.cpp
|
||||
可以使用如下命令进行:
|
||||
|
||||
```sh
|
||||
cd src
|
||||
rm ./main.cpp
|
||||
cp ./main-image.cpp ./main.cpp
|
||||
rm ./main-image.cpp
|
||||
```
|
||||
|
||||
**步骤3** 编译
|
||||
|
||||
进入HelmetIdentification_V2文件夹,执行以下命令以完成外部编译
|
||||
|
||||
```shell
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make
|
||||
```
|
||||
|
||||
出现如下图所示的结果即表示编译成功
|
||||
|
||||

|
||||
|
||||
**步骤4** 运行及输出结果
|
||||
|
||||
**测试精度所用的图片数据集需要自行下载**:[Safety-Helmet-Wearing-Dataset](https://gitee.com/link?target=https%3A%2F%2Fmindx.sdk.obs.cn-north-4.myhuaweicloud.com%2Fmindxsdk-referenceapps%20%2Fcontrib%2FHelmetIdentification%2Fdata.zip)
|
||||
|
||||
从build文件夹退回到HelmetIdentification_V2文件夹,运行如下命令读取图片进行检测:
|
||||
|
||||
```
|
||||
./main ${imageName}
|
||||
```
|
||||
|
||||
imageName是图片名称(<font color=red>不包含后缀名</font>)
|
||||
|
||||
例如 `./main 000023`会**读取000023.jpg,然后生成000023.txt并向其中写入检测结果(目标框的坐标)**。
|
||||
|
||||
每次只能读取一张图片进行检测,检测完所有待检测的图片后,使用V1版本中的`map_calculate.py`计算精度,会打印具体的指标值(如下所示)并将precision、recall和map记录在**output/output.txt**文件中。
|
||||
|
||||

|
||||
|
||||
具体命令如下:
|
||||
|
||||
```
|
||||
python3 map_calculate.py --label_path ${label_path} --npu_txt_path ${txt_path} -na -np
|
||||
```
|
||||
|
||||
* label_path是数据集中的标签,具体生成方式见[V1版本](https://gitee.com/ascend/mindxsdk-referenceapps/tree/master/contrib/HelmetIdentification#32-%E7%B2%BE%E5%BA%A6%E6%B5%8B%E8%AF%95)。
|
||||
* txt_path是上面提到的生成的结果文件,将所有的txt文件都放到一个文件中,map_calculate.py会自动检测文件名,将其与label_path中对应的文件进行比较。
|
||||
|
47
HelmetIdentification_V2/src/CMakeLists.txt
Normal file
47
HelmetIdentification_V2/src/CMakeLists.txt
Normal file
@@ -0,0 +1,47 @@
|
||||
# CMake lowest version requirement
|
||||
cmake_minimum_required(VERSION 3.5.1)
|
||||
# project information
|
||||
project(Individual)
|
||||
|
||||
# Compile options
|
||||
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0 -Dgoogle=mindxsdk_private)
|
||||
add_compile_options(-std=c++11 -fPIC -fstack-protector-all -Wall -D_FORTIFY_SOURCE=2 -O2)
|
||||
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "../../")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-g")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,relro,-z,now,-z,noexecstack -s -pie -pthread")
|
||||
set(CMAKE_SKIP_RPATH TRUE)
|
||||
|
||||
SET(CMAKE_BUILD_TYPE "Debug")
|
||||
SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb")
|
||||
SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")
|
||||
|
||||
# Header path
|
||||
include_directories(
|
||||
${MX_SDK_HOME}/include/
|
||||
${MX_SDK_HOME}/opensource/include/
|
||||
${MX_SDK_HOME}/opensource/include/opencv4/
|
||||
/home/HwHiAiUser/Ascend/ascend-toolkit/latest/include/
|
||||
./
|
||||
)
|
||||
|
||||
# add host lib path
|
||||
link_directories(
|
||||
${MX_SDK_HOME}/lib/
|
||||
${MX_SDK_HOME}/lib/modelpostprocessors
|
||||
${MX_SDK_HOME}/opensource/lib/
|
||||
${MX_SDK_HOME}/opensource/lib64/
|
||||
/usr/lib/aarch64-linux-gnu/
|
||||
/home/HwHiAiUser/Ascend/ascend-toolkit/latest/lib64/
|
||||
/usr/local/Ascend/driver/lib64/
|
||||
./
|
||||
)
|
||||
|
||||
|
||||
aux_source_directory(. sourceList)
|
||||
|
||||
add_executable(main ${sourceList})
|
||||
|
||||
target_link_libraries(main mxbase opencv_world boost_filesystem glog avformat avcodec avutil cpprest yolov3postprocess ascendcl acl_dvpp_mpi)
|
||||
|
||||
install(TARGETS main DESTINATION ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
|
94
HelmetIdentification_V2/src/DataType.h
Normal file
94
HelmetIdentification_V2/src/DataType.h
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MXBASE_HELMETIDENTIFICATION_DATATYPE_H
|
||||
#define MXBASE_HELMETIDENTIFICATION_DATATYPE_H
|
||||
|
||||
#include <memory>
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
|
||||
namespace ascendVehicleTracking {
|
||||
#define DVPP_ALIGN_UP(x, align) ((((x) + ((align)-1)) / (align)) * (align))
|
||||
|
||||
const int MODULE_QUEUE_SIZE = 1000;
|
||||
|
||||
enum FrameMode {
|
||||
FRAME_MODE_SEARCH = 0,
|
||||
FRAME_MODE_REG
|
||||
};
|
||||
|
||||
struct DataBuffer {
|
||||
std::shared_ptr<uint8_t> deviceData;
|
||||
std::shared_ptr<uint8_t> hostData;
|
||||
uint32_t dataSize; // buffer size
|
||||
DataBuffer() : deviceData(nullptr), hostData(nullptr), dataSize(0) {}
|
||||
};
|
||||
|
||||
struct DetectInfo {
|
||||
int32_t classId;
|
||||
float confidence;
|
||||
float minx; // x value of left-top point
|
||||
float miny; // y value of left-top point
|
||||
float height;
|
||||
float width;
|
||||
};
|
||||
|
||||
enum TraceFlag {
|
||||
NEW_VEHICLE = 0,
|
||||
TRACkED_VEHICLE,
|
||||
LOST_VEHICLE
|
||||
};
|
||||
|
||||
struct TraceInfo {
|
||||
int32_t id;
|
||||
TraceFlag flag;
|
||||
int32_t survivalTime; // How long is it been since the first time, unit: detection period
|
||||
int32_t detectedTime; // How long is the vehicle detected, unit: detection period
|
||||
std::chrono::time_point<std::chrono::high_resolution_clock> createTime;
|
||||
};
|
||||
|
||||
struct TrackLet {
|
||||
TraceInfo info;
|
||||
// reserved: kalman status parameter
|
||||
int32_t lostTime; // undetected time for tracked vehicle
|
||||
std::vector<DataBuffer> shortFeature; // nearest 10 frame
|
||||
};
|
||||
|
||||
struct VehicleQuality {
|
||||
float score;
|
||||
};
|
||||
|
||||
struct Coordinate2D {
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
};
|
||||
}
|
||||
// namespace ascendVehicleTracking
|
||||
|
||||
struct AttrT {
|
||||
AttrT(std::string name, std::string value) : name(std::move(name)), value(std::move(value)) {}
|
||||
std::string name = {};
|
||||
std::string value = {};
|
||||
};
|
||||
|
||||
#endif
|
165
HelmetIdentification_V2/src/Hungarian.cpp
Normal file
165
HelmetIdentification_V2/src/Hungarian.cpp
Normal file
@@ -0,0 +1,165 @@
|
||||
/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Hungarian.h"
|
||||
#include <cstring>
|
||||
#include <sys/time.h>
|
||||
#include "MxBase/Log/Log.h"
|
||||
|
||||
namespace {
|
||||
const int INF = 0x3f3f3f3f;
|
||||
const int VISITED = 1;
|
||||
const int HUNGARIAN_CONTENT = 7;
|
||||
const int X_MATCH_OFFSET = 0;
|
||||
const int Y_MATCH_OFFSET = 1;
|
||||
const int X_VALUE_OFFSET = 2;
|
||||
const int Y_VALUE_OFFSET = 3;
|
||||
const int SLACK_OFFSET = 4;
|
||||
const int X_VISIT_OFFSET = 5;
|
||||
const int Y_VISIT_OFFSET = 6;
|
||||
}
|
||||
|
||||
APP_ERROR HungarianHandleInit(HungarianHandle &handle, int row, int cols)
|
||||
{
|
||||
handle.max = (row > cols) ? row : cols;
|
||||
auto adjMat = std::shared_ptr<int>();
|
||||
adjMat.reset(new int[handle.max * handle.max], std::default_delete<int[]>());
|
||||
if (adjMat == nullptr) {
|
||||
LogFatal << "HungarianHandleInit new failed";
|
||||
return APP_ERR_ACL_FAILURE;
|
||||
}
|
||||
|
||||
handle.adjMat = adjMat;
|
||||
|
||||
void* ptr[HUNGARIAN_CONTENT] = {nullptr};
|
||||
for (int i = 0; i < HUNGARIAN_CONTENT; ++i) {
|
||||
ptr[i] = malloc(handle.max * sizeof(int));
|
||||
if (ptr[i] == nullptr) {
|
||||
LogFatal << "HungarianHandleInit Malloc failed";
|
||||
return APP_ERR_ACL_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
handle.xMatch.reset((int *)ptr[X_MATCH_OFFSET], free);
|
||||
handle.yMatch.reset((int *)ptr[Y_MATCH_OFFSET], free);
|
||||
handle.xValue.reset((int *)ptr[X_VALUE_OFFSET], free);
|
||||
handle.yValue.reset((int *)ptr[Y_VALUE_OFFSET], free);
|
||||
handle.slack.reset((int *)ptr[SLACK_OFFSET], free);
|
||||
handle.xVisit.reset((int *)ptr[X_VISIT_OFFSET], free);
|
||||
handle.yVisit.reset((int *)ptr[Y_VISIT_OFFSET], free);
|
||||
return APP_ERR_OK;
|
||||
}
|
||||
|
||||
static void HungarianInit(HungarianHandle &handle, const std::vector<std::vector<int>> &cost, int rows, int cols)
|
||||
{
|
||||
int i, j, value;
|
||||
if (rows > cols) {
|
||||
handle.transpose = true;
|
||||
handle.cols = rows;
|
||||
handle.rows = cols;
|
||||
handle.resX = handle.yMatch.get();
|
||||
handle.resY = handle.xMatch.get();
|
||||
} else {
|
||||
handle.transpose = false;
|
||||
handle.rows = rows;
|
||||
handle.cols = cols;
|
||||
handle.resX = handle.xMatch.get();
|
||||
handle.resY = handle.yMatch.get();
|
||||
}
|
||||
|
||||
for (i = 0; i < handle.rows; ++i) {
|
||||
handle.xValue.get()[i] = 0;
|
||||
handle.xMatch.get()[i] = -1;
|
||||
for (j = 0; j < handle.cols; ++j) {
|
||||
if (handle.transpose) {
|
||||
value = cost[j][i];
|
||||
} else {
|
||||
value = cost[i][j];
|
||||
}
|
||||
handle.adjMat.get()[i * handle.cols + j] = value;
|
||||
if (handle.xValue.get()[i] < value) {
|
||||
handle.xValue.get()[i] = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < handle.cols; ++i) {
|
||||
handle.yValue.get()[i] = 0;
|
||||
handle.yMatch.get()[i] = -1;
|
||||
}
|
||||
}
|
||||
|
||||
static bool Match(HungarianHandle &handle, int id)
|
||||
{
|
||||
int j, delta;
|
||||
handle.xVisit.get()[id] = VISITED;
|
||||
for (j = 0; j < handle.cols; ++j) {
|
||||
if (handle.yVisit.get()[j] == VISITED) {
|
||||
continue;
|
||||
}
|
||||
delta = handle.xValue.get()[id] + handle.yValue.get()[j] - handle.adjMat.get()[id * handle.cols + j];
|
||||
if (delta == 0) {
|
||||
handle.yVisit.get()[j] = VISITED;
|
||||
if (handle.yMatch.get()[j] == -1 || Match(handle, handle.yMatch.get()[j])) {
|
||||
handle.yMatch.get()[j] = id;
|
||||
handle.xMatch.get()[id] = j;
|
||||
return true;
|
||||
}
|
||||
} else if (delta < handle.slack.get()[j]) {
|
||||
handle.slack.get()[j] = delta;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int HungarianSolve(HungarianHandle &handle, const std::vector<std::vector<int>> &cost, int rows, int cols)
|
||||
{
|
||||
HungarianInit(handle, cost, rows, cols);
|
||||
int i, j, delta;
|
||||
for (i = 0; i < handle.rows; ++i) {
|
||||
while (true) {
|
||||
std::fill(handle.xVisit.get(), handle.xVisit.get() + handle.rows, 0);
|
||||
std::fill(handle.yVisit.get(), handle.yVisit.get() + handle.cols, 0);
|
||||
for (j = 0; j < handle.cols; ++j) {
|
||||
handle.slack.get()[j] = INF;
|
||||
}
|
||||
if (Match(handle, i)) {
|
||||
break;
|
||||
}
|
||||
delta = INF;
|
||||
for (j = 0; j < handle.cols; ++j) {
|
||||
if (handle.yVisit.get()[j] != VISITED && delta > handle.slack.get()[j]) {
|
||||
delta = handle.slack.get()[j];
|
||||
}
|
||||
}
|
||||
if (delta == INF) {
|
||||
LogDebug << "Hungarian solve is invalid!";
|
||||
return -1;
|
||||
}
|
||||
for (j = 0; j < handle.rows; ++j) {
|
||||
if (handle.xVisit.get()[j] == VISITED) {
|
||||
handle.xValue.get()[j] -= delta;
|
||||
}
|
||||
}
|
||||
for (j = 0; j < handle.cols; ++j) {
|
||||
if (handle.yVisit.get()[j] == VISITED) {
|
||||
handle.yValue.get()[j] += delta;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return handle.rows;
|
||||
}
|
46
HelmetIdentification_V2/src/Hungarian.h
Normal file
46
HelmetIdentification_V2/src/Hungarian.h
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MXBASE_HELMETIDENTIFICATION_HUNGARIAN_H
|
||||
#define MXBASE_HELMETIDENTIFICATION_HUNGARIAN_H
|
||||
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "DataType.h"
|
||||
#include "MxBase/PostProcessBases/PostProcessDataType.h"
|
||||
#include "MxBase/ErrorCode/ErrorCodes.h"
|
||||
|
||||
struct HungarianHandle {
|
||||
int rows;
|
||||
int cols;
|
||||
int max;
|
||||
int *resX;
|
||||
int *resY;
|
||||
bool transpose;
|
||||
std::shared_ptr<int> adjMat;
|
||||
std::shared_ptr<int> xMatch;
|
||||
std::shared_ptr<int> yMatch;
|
||||
std::shared_ptr<int> xValue;
|
||||
std::shared_ptr<int> yValue;
|
||||
std::shared_ptr<int> slack;
|
||||
std::shared_ptr<int> xVisit;
|
||||
std::shared_ptr<int> yVisit;
|
||||
};
|
||||
|
||||
APP_ERROR HungarianHandleInit(HungarianHandle &handle, int row, int cols);
|
||||
int HungarianSolve(HungarianHandle &handle, const std::vector<std::vector<int>> &cost, int rows, int cols);
|
||||
|
||||
#endif
|
143
HelmetIdentification_V2/src/KalmanTracker.cpp
Normal file
143
HelmetIdentification_V2/src/KalmanTracker.cpp
Normal file
@@ -0,0 +1,143 @@
|
||||
/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "KalmanTracker.h"
|
||||
|
||||
namespace ascendVehicleTracking
|
||||
{
|
||||
namespace
|
||||
{
|
||||
const int OFFSET = 2;
|
||||
const int MULTIPLE = 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* The SORT algorithm uses a linear constant velocity model,which assumes 7
|
||||
* states, including
|
||||
* x coordinate of bounding box center
|
||||
* y coordinate of bounding box center
|
||||
* area of bounding box
|
||||
* aspect ratio of w to h
|
||||
* velocity of x
|
||||
* velocity of y
|
||||
* variation rate of area
|
||||
*
|
||||
* The aspect ratio is considered to be unchanged, so there is no additive item
|
||||
* for aspect ratio in the transitionMatrix
|
||||
*
|
||||
*
|
||||
* Kalman filter equation step by step
|
||||
* (1) X(k|k-1)=AX(k-1|k-1)+BU(k)
|
||||
* X(k|k-1) is the predicted state(statePre),X(k-1|k-1) is the k-1 statePost,A
|
||||
* is transitionMatrix, B is controlMatrix, U(k) is control state, in SORT U(k) is 0.
|
||||
*
|
||||
* (2) P(k|k-1)=AP(k-1|k-1)A'+Q
|
||||
* P(k|k-1) is the predicted errorCovPre, P(k-1|k-1) is the k-1 errorCovPost,
|
||||
* Q is processNoiseCov
|
||||
*
|
||||
* (3) Kg(k)=P(k|k-1)H'/(HP(k|k-1))H'+R
|
||||
* Kg(k) is the kalman gain, the ratio of estimate variance in total variance,
|
||||
* H is the measurementMatrix,R is the measurementNoiseCov
|
||||
*
|
||||
* (4) X(k|k)=X(k|k-1)+Kg(k)(Z(k)-HX(k|k-1))
|
||||
* X(k|k) is the k statePost, Z(k) is the measurement of K, in SORT Z(k) is
|
||||
* the detection result of k
|
||||
*
|
||||
* (5) P(k|k)=(1-Kg(k)H)P(k|k-1)
|
||||
* P(k|k) is the errorCovPost
|
||||
*/
|
||||
void KalmanTracker::CvKalmanInit(MxBase::ObjectInfo initRect)
|
||||
{
|
||||
const int stateDim = 7;
|
||||
const int measureDim = 4;
|
||||
cvkalmanfilter_ = cv::KalmanFilter(stateDim, measureDim, 0); // zero control
|
||||
measurement_ = cv::Mat::zeros(measureDim, 1, CV_32F); // 4 measurements, Z(k), according to detection results
|
||||
// A, will not be updated
|
||||
cvkalmanfilter_.transitionMatrix = (cv::Mat_<float>(stateDim, stateDim) << 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1);
|
||||
cvkalmanfilter_.measurementMatrix = (cv::Mat_<float>(measureDim, stateDim) << 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0);
|
||||
cv::setIdentity(cvkalmanfilter_.measurementMatrix); // H, will not be updated
|
||||
cv::setIdentity(cvkalmanfilter_.processNoiseCov, cv::Scalar::all(1e-2)); // Q, will not be updated
|
||||
cv::setIdentity(cvkalmanfilter_.measurementNoiseCov, cv::Scalar::all(1e-1)); // R, will bot be updated
|
||||
cv::setIdentity(cvkalmanfilter_.errorCovPost, cv::Scalar::all(1)); // P(k-1|k-1), will be updated
|
||||
|
||||
// initialize state vector with bounding box in
|
||||
// [center_x,center_y,area,ratio]
|
||||
// style, the velocity is 0
|
||||
// X(k-1|k-1)
|
||||
cvkalmanfilter_.statePost.at<float>(0, 0) = initRect.x0 + (initRect.x1 - initRect.x0) / MULTIPLE;
|
||||
cvkalmanfilter_.statePost.at<float>(1, 0) = initRect.y0 + (initRect.y1 - initRect.y0) / MULTIPLE;
|
||||
cvkalmanfilter_.statePost.at<float>(OFFSET, 0) = (initRect.x1 - initRect.x0) * (initRect.y1 - initRect.y0);
|
||||
cvkalmanfilter_.statePost.at<float>(OFFSET + 1, 0) = (initRect.x1 - initRect.x0) / (initRect.y1 - initRect.y0);
|
||||
}
|
||||
|
||||
// Predict the bounding box.
|
||||
MxBase::ObjectInfo KalmanTracker::Predict()
|
||||
{
|
||||
// predict
|
||||
// return X(k|k-1)=AX(k-1|k-1), and update
|
||||
// P(k|k-1) <- AP(k-1|k-1)A'+Q
|
||||
MxBase::ObjectInfo detectInfo = {};
|
||||
cv::Mat predictState = cvkalmanfilter_.predict();
|
||||
float *pData = (float *)(predictState.data);
|
||||
float w = sqrt((*(pData + OFFSET)) * (*(pData + OFFSET + 1)));
|
||||
if (w < DBL_EPSILON)
|
||||
{
|
||||
detectInfo.x0 = 0;
|
||||
detectInfo.y0 = 0;
|
||||
detectInfo.x1 = 0;
|
||||
detectInfo.y1 = 0;
|
||||
detectInfo.classId = 0;
|
||||
return detectInfo;
|
||||
}
|
||||
if (w == 0)
|
||||
{
|
||||
MxBase::ObjectInfo w0DetectInfo = {};
|
||||
return w0DetectInfo;
|
||||
}
|
||||
float h = (*(pData + OFFSET)) / w;
|
||||
float x = (*pData) - w / MULTIPLE;
|
||||
float y = (*(pData + 1)) - h / MULTIPLE;
|
||||
if (x < 0 && (*pData) > 0)
|
||||
{
|
||||
x = 0;
|
||||
}
|
||||
if (y < 0 && (*(pData + 1)) > 0)
|
||||
{
|
||||
y = 0;
|
||||
}
|
||||
detectInfo.x0 = x;
|
||||
detectInfo.y0 = y;
|
||||
detectInfo.x1 = x + w;
|
||||
detectInfo.y1 = y + h;
|
||||
return detectInfo;
|
||||
}
|
||||
|
||||
// Update the state using observed bounding box
|
||||
void KalmanTracker::Update(MxBase::ObjectInfo stateMat)
|
||||
{
|
||||
// measurement_, update Z(k)
|
||||
float *pData = (float *)(measurement_.data);
|
||||
*pData = stateMat.x0 + (stateMat.x1 - stateMat.x0) / MULTIPLE;
|
||||
*(pData + 1) = stateMat.y0 + (stateMat.y1 - stateMat.y0) / MULTIPLE;
|
||||
*(pData + OFFSET) = (stateMat.x1 - stateMat.x0) * (stateMat.y1 - stateMat.y0);
|
||||
*(pData + OFFSET + 1) = (stateMat.x1 - stateMat.x0) / (stateMat.y1 - stateMat.y0);
|
||||
// update, do the following steps:
|
||||
// Kg(k): P(k|k-1)H'/(HP(k|k-1))H'+R
|
||||
// X(k|k): X(k|k-1)+Kg(k)(Z(k)-HX(k|k-1))
|
||||
// P(k|k): (1-Kg(k)H)P(k|k-1)
|
||||
cvkalmanfilter_.correct(measurement_);
|
||||
}
|
||||
} // namespace
|
39
HelmetIdentification_V2/src/KalmanTracker.h
Normal file
39
HelmetIdentification_V2/src/KalmanTracker.h
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MXBASE_HELMETIDENTIFICATION_KALMANTRACKER_H
|
||||
#define MXBASE_HELMETIDENTIFICATION_KALMANTRACKER_H
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "DataType.h"
|
||||
#include "MxBase/PostProcessBases/PostProcessDataType.h"
|
||||
|
||||
namespace ascendVehicleTracking {
|
||||
class KalmanTracker {
|
||||
public:
|
||||
KalmanTracker() {}
|
||||
~KalmanTracker() {}
|
||||
void CvKalmanInit(MxBase::ObjectInfo initRect);
|
||||
MxBase::ObjectInfo Predict();
|
||||
void Update(MxBase::ObjectInfo stateMat);
|
||||
private:
|
||||
cv::KalmanFilter cvkalmanfilter_ = {};
|
||||
cv::Mat measurement_ = {};
|
||||
};
|
||||
} // namesapce ascendVehicleTracking
|
||||
|
||||
#endif
|
262
HelmetIdentification_V2/src/MOTConnection.cpp
Normal file
262
HelmetIdentification_V2/src/MOTConnection.cpp
Normal file
@@ -0,0 +1,262 @@
|
||||
/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "MOTConnection.h"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "MxBase/Log/Log.h"
|
||||
#include "MxBase/ErrorCode/ErrorCodes.h"
|
||||
#include "Hungarian.h"
|
||||
|
||||
namespace ascendVehicleTracking
|
||||
{
|
||||
namespace
|
||||
{
|
||||
// convert double to int
|
||||
const int FLOAT_TO_INT = 1000;
|
||||
const int MULTIPLE = 0;
|
||||
const double SIMILARITY_THRESHOLD = 0.66;
|
||||
const int MULTIPLE_IOU = 6;
|
||||
const float NORM_EPS = 1e-10;
|
||||
const double TIME_COUNTS = 1000.0;
|
||||
const double COST_TIME_MS_THRESHOLD = 10.;
|
||||
const float WIDTH_RATE_THRESH = 1.f;
|
||||
const float HEIGHT_RATE_THRESH = 1.f;
|
||||
const float X_DIST_RATE_THRESH = 1.3f;
|
||||
const float Y_DIST_RATE_THRESH = 1.f;
|
||||
} // namespace
|
||||
|
||||
// 计算bounding box的交并比
|
||||
float CalIOU(MxBase::ObjectInfo detect1, MxBase::ObjectInfo detect2)
|
||||
{
|
||||
cv::Rect_<float> bbox1(detect1.x0, detect1.y0, detect1.x1 - detect1.x0, detect1.y1 - detect1.y0);
|
||||
cv::Rect_<float> bbox2(detect2.x0, detect2.y0, detect2.x1 - detect2.x0, detect2.y1 - detect2.y0);
|
||||
float intersectionArea = (bbox1 & bbox2).area();
|
||||
float unionArea = bbox1.area() + bbox2.area() - intersectionArea;
|
||||
if (unionArea < DBL_EPSILON)
|
||||
{
|
||||
return 0.f;
|
||||
}
|
||||
return (intersectionArea / unionArea);
|
||||
}
|
||||
|
||||
// 计算前后两帧的两个bounding box的相似度
|
||||
float CalSimilarity(const TraceLet &traceLet, const MxBase::ObjectInfo &objectInfo, const int &method, const double &kIOU)
|
||||
{
|
||||
return CalIOU(traceLet.detectInfo, objectInfo);
|
||||
}
|
||||
|
||||
// 过滤掉交并比小于阈值的匹配
|
||||
void MOTConnection::FilterLowThreshold(const HungarianHandle &hungarianHandleObj,
|
||||
const std::vector<std::vector<int>> &disMatrix, std::vector<cv::Point> &matchedTracedDetected,
|
||||
std::vector<bool> &detectVehicleFlagVec)
|
||||
{
|
||||
for (unsigned int i = 0; i < traceList_.size(); ++i)
|
||||
{
|
||||
if ((hungarianHandleObj.resX[i] != -1) &&
|
||||
(disMatrix[i][hungarianHandleObj.resX[i]] >= (trackThreshold_ * FLOAT_TO_INT)))
|
||||
{
|
||||
matchedTracedDetected.push_back(cv::Point(i, hungarianHandleObj.resX[i]));
|
||||
detectVehicleFlagVec[hungarianHandleObj.resX[i]] = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
traceList_[i].info.flag = LOST_VEHICLE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 更新没有匹配上的跟踪器
|
||||
void MOTConnection::UpdateUnmatchedTraceLet(const std::vector<std::vector<MxBase::ObjectInfo>> &objInfos)
|
||||
{
|
||||
for (auto itr = traceList_.begin(); itr != traceList_.end();)
|
||||
{
|
||||
if ((*itr).info.flag != LOST_VEHICLE)
|
||||
{
|
||||
++itr;
|
||||
continue;
|
||||
}
|
||||
|
||||
(*itr).lostAge++;
|
||||
(*itr).kalman.Update((*itr).detectInfo);
|
||||
|
||||
if ((*itr).lostAge < lostThreshold_)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
itr = traceList_.erase(itr);
|
||||
}
|
||||
}
|
||||
|
||||
// 更新匹配上的跟踪器
|
||||
void MOTConnection::UpdateMatchedTraceLet(const std::vector<cv::Point> &matchedTracedDetected,
|
||||
std::vector<std::vector<MxBase::ObjectInfo>> &objInfos)
|
||||
{
|
||||
for (unsigned int i = 0; i < matchedTracedDetected.size(); ++i)
|
||||
{
|
||||
int traceIndex = matchedTracedDetected[i].x;
|
||||
int detectIndex = matchedTracedDetected[i].y;
|
||||
if (traceList_[traceIndex].info.survivalTime > MULTIPLE)
|
||||
{
|
||||
traceList_[traceIndex].info.flag = TRACkED_VEHICLE;
|
||||
}
|
||||
traceList_[traceIndex].info.survivalTime++;
|
||||
traceList_[traceIndex].info.detectedTime++;
|
||||
traceList_[traceIndex].lostAge = 0;
|
||||
traceList_[traceIndex].detectInfo = objInfos[0][detectIndex];
|
||||
traceList_[traceIndex].kalman.Update(objInfos[0][detectIndex]);
|
||||
}
|
||||
}
|
||||
// 将没有匹配上的检测更新为新的检测器
|
||||
void MOTConnection::AddNewDetectedVehicle(std::vector<MxBase::ObjectInfo> &unmatchedVehicleObjectQueue)
|
||||
{
|
||||
using Time = std::chrono::high_resolution_clock;
|
||||
for (auto &vehicleObject : unmatchedVehicleObjectQueue)
|
||||
{
|
||||
// add new detected into traceList
|
||||
TraceLet traceLet;
|
||||
generatedId_++;
|
||||
traceLet.info.id = generatedId_;
|
||||
traceLet.info.survivalTime = 1;
|
||||
traceLet.info.detectedTime = 1;
|
||||
traceLet.lostAge = 0;
|
||||
traceLet.info.flag = NEW_VEHICLE;
|
||||
traceLet.detectInfo = vehicleObject;
|
||||
traceLet.info.createTime = Time::now();
|
||||
|
||||
traceLet.kalman.CvKalmanInit(vehicleObject);
|
||||
traceList_.push_back(traceLet);
|
||||
}
|
||||
}
|
||||
|
||||
void MOTConnection::UpdateTraceLetAndFrame(const std::vector<cv::Point> &matchedTracedDetected,
|
||||
std::vector<std::vector<MxBase::ObjectInfo>> &objInfos, std::vector<MxBase::ObjectInfo> &unmatchedVehicleObjectQueue)
|
||||
{
|
||||
UpdateMatchedTraceLet(matchedTracedDetected, objInfos); // 更新匹配上的跟踪器
|
||||
AddNewDetectedVehicle(unmatchedVehicleObjectQueue); // 将没有匹配上的检测更新为新的检测器
|
||||
UpdateUnmatchedTraceLet(objInfos); // 更新没有匹配上的跟踪器
|
||||
}
|
||||
|
||||
void MOTConnection::TrackObjectPredict()
|
||||
{
|
||||
// every traceLet should do kalman predict
|
||||
for (auto &traceLet : traceList_)
|
||||
{
|
||||
traceLet.detectInfo = traceLet.kalman.Predict(); // 卡尔曼滤波预测的框
|
||||
}
|
||||
}
|
||||
|
||||
void MOTConnection::TrackObjectUpdate(const std::vector<std::vector<MxBase::ObjectInfo>> &objInfos,
|
||||
std::vector<cv::Point> &matchedTracedDetected, std::vector<MxBase::ObjectInfo> &unmatchedVehicleObjectQueue)
|
||||
{
|
||||
if (objInfos[0].size() > 0)
|
||||
{
|
||||
LogDebug << "[frame id = " << 1 << "], trace size =" << traceList_.size() << "detect size = " << objInfos[0].size() << "";
|
||||
// init vehicle matched flag
|
||||
std::vector<bool> detectVehicleFlagVec;
|
||||
for (unsigned int i = 0; i < objInfos[0].size(); ++i)
|
||||
{
|
||||
detectVehicleFlagVec.push_back(false);
|
||||
}
|
||||
// calculate the associated matrix
|
||||
std::vector<std::vector<int>> disMatrix;
|
||||
disMatrix.clear();
|
||||
disMatrix.resize(traceList_.size(), std::vector<int>(objInfos[0].size(), 0));
|
||||
for (unsigned int j = 0; j < objInfos[0].size(); ++j)
|
||||
{
|
||||
for (unsigned int i = 0; i < traceList_.size(); ++i)
|
||||
{
|
||||
// 计算交并比
|
||||
float sim = CalSimilarity(traceList_[i], objInfos[0][j], method_, kIOU_); // method_=1, kIOU_=1.0
|
||||
disMatrix[i][j] = (int)(sim * FLOAT_TO_INT);
|
||||
}
|
||||
}
|
||||
|
||||
// solve the assignment problem using hungarian 匈牙利算法进行匹配
|
||||
HungarianHandle hungarianHandleObj = {};
|
||||
HungarianHandleInit(hungarianHandleObj, traceList_.size(), objInfos[0].size());
|
||||
HungarianSolve(hungarianHandleObj, disMatrix, traceList_.size(), objInfos[0].size());
|
||||
// filter out matched but with low distance 过滤掉匹配上但是交并比较小的
|
||||
FilterLowThreshold(hungarianHandleObj, disMatrix, matchedTracedDetected, detectVehicleFlagVec);
|
||||
LogDebug << "matchedTracedDetected = " << matchedTracedDetected.size() << "";
|
||||
// fill unmatchedVehicleObjectQueue
|
||||
for (unsigned int i = 0; i < detectVehicleFlagVec.size(); ++i)
|
||||
{
|
||||
if (detectVehicleFlagVec[i] == false)
|
||||
{
|
||||
unmatchedVehicleObjectQueue.push_back(objInfos[0][i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
APP_ERROR MOTConnection::ProcessSort(std::vector<std::vector<MxBase::ObjectInfo>> &objInfos, size_t frameId)
|
||||
{
|
||||
std::vector<MxBase::ObjectInfo> unmatchedVehicleObjectQueue;
|
||||
std::vector<cv::Point> matchedTracedDetected;
|
||||
if (objInfos[0].size() == 0)
|
||||
{
|
||||
return APP_ERR_COMM_FAILURE;
|
||||
}
|
||||
|
||||
if (traceList_.size() > 0)
|
||||
{
|
||||
// every traceLet should do kalman predict
|
||||
TrackObjectPredict(); // 卡尔曼滤波预测
|
||||
TrackObjectUpdate(objInfos, matchedTracedDetected, unmatchedVehicleObjectQueue); // 选出matched track、unmatched detection
|
||||
}
|
||||
else
|
||||
{
|
||||
// traceList is empty, all the vehicle detected in the new frame are unmatched.
|
||||
if (objInfos[0].size() > 0)
|
||||
{
|
||||
for (unsigned int i = 0; i < objInfos[0].size(); ++i)
|
||||
{
|
||||
unmatchedVehicleObjectQueue.push_back(objInfos[0][i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update all the tracelet in the tracelist per frame
|
||||
UpdateTraceLetAndFrame(matchedTracedDetected, objInfos, unmatchedVehicleObjectQueue); // 用matched track、unmatched detection更新跟踪器
|
||||
return APP_ERR_OK;
|
||||
}
|
||||
|
||||
// 获取跟踪后的检测框
|
||||
APP_ERROR MOTConnection::GettrackResult(std::vector<MxBase::ObjectInfo> &objInfos_)
|
||||
{
|
||||
if (traceList_.size() > 0)
|
||||
{
|
||||
for (auto &traceLet : traceList_)
|
||||
{
|
||||
traceLet.detectInfo.classId = traceLet.info.id;
|
||||
objInfos_.push_back(traceLet.detectInfo);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return APP_ERR_COMM_FAILURE;
|
||||
}
|
||||
return APP_ERR_OK;
|
||||
}
|
||||
}
|
||||
// namespace ascendVehicleTracking
|
78
HelmetIdentification_V2/src/MOTConnection.h
Normal file
78
HelmetIdentification_V2/src/MOTConnection.h
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MXBASE_HELMETIDENTIFICATION_MOTCONNECTION_H
|
||||
#define MXBASE_HELMETIDENTIFICATION_MOTCONNECTION_H
|
||||
|
||||
#include <thread>
|
||||
#include <list>
|
||||
#include <utility>
|
||||
#include "KalmanTracker.h"
|
||||
#include "Hungarian.h"
|
||||
#include "DataType.h"
|
||||
#include "MxBase/ErrorCode/ErrorCodes.h"
|
||||
#include "MxBase/DvppWrapper/DvppWrapper.h"
|
||||
#include "MxBase/MemoryHelper/MemoryHelper.h"
|
||||
#include "MxBase/DeviceManager/DeviceManager.h"
|
||||
#include "MxBase/Tensor/TensorBase/TensorBase.h"
|
||||
#include "MxBase/PostProcessBases/PostProcessDataType.h"
|
||||
#include "MxBase/postprocess/include/ObjectPostProcessors/Yolov3PostProcess.h"
|
||||
|
||||
namespace ascendVehicleTracking {
|
||||
struct TraceLet {
|
||||
TraceInfo info = {};
|
||||
int32_t lostAge = 0;
|
||||
KalmanTracker kalman;
|
||||
std::list<std::pair<DataBuffer, float>> shortFeatureQueue;
|
||||
MxBase::ObjectInfo detectInfo = {};
|
||||
};
|
||||
|
||||
class MOTConnection {
|
||||
public:
|
||||
APP_ERROR ProcessSort(std::vector<std::vector<MxBase::ObjectInfo>> &objInfos, size_t frameId);
|
||||
APP_ERROR GettrackResult(std::vector<MxBase::ObjectInfo> &objInfos_);
|
||||
|
||||
private:
|
||||
double trackThreshold_ = 0.3;
|
||||
double kIOU_ = 1.0;
|
||||
int32_t method_ = 1;
|
||||
int32_t lostThreshold_ = 3;
|
||||
uint32_t maxNumberFeature_ = 0;
|
||||
int32_t generatedId_ = 0;
|
||||
std::vector<TraceLet> traceList_ = {};
|
||||
|
||||
private:
|
||||
|
||||
void FilterLowThreshold(const HungarianHandle &hungarianHandleObj, const std::vector<std::vector<int>> &disMatrix,
|
||||
std::vector<cv::Point> &matchedTracedDetected, std::vector<bool> &detectVehicleFlagVec);
|
||||
|
||||
void UpdateUnmatchedTraceLet(const std::vector<std::vector<MxBase::ObjectInfo>> &objInfos);
|
||||
|
||||
void UpdateMatchedTraceLet(const std::vector<cv::Point> &matchedTracedDetected,
|
||||
std::vector<std::vector<MxBase::ObjectInfo>> &objInfos);
|
||||
|
||||
void AddNewDetectedVehicle(std::vector<MxBase::ObjectInfo> &unmatchedVehicleObjectQueue);
|
||||
|
||||
void UpdateTraceLetAndFrame(const std::vector<cv::Point> &matchedTracedDetected,
|
||||
std::vector<std::vector<MxBase::ObjectInfo>> &objInfos, std::vector<MxBase::ObjectInfo> &unmatchedVehicleObjectQueue);
|
||||
|
||||
void TrackObjectPredict();
|
||||
void TrackObjectUpdate(const std::vector<std::vector<MxBase::ObjectInfo>> &objInfos,
|
||||
std::vector<cv::Point> &matchedTracedDetected, std::vector<MxBase::ObjectInfo> &unmatchedVehicleObjectQueue);
|
||||
};
|
||||
} // namespace ascendVehicleTracking
|
||||
|
||||
#endif
|
122
HelmetIdentification_V2/src/cropResizePaste.hpp
Normal file
122
HelmetIdentification_V2/src/cropResizePaste.hpp
Normal file
@@ -0,0 +1,122 @@
|
||||
/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef CRP_H
|
||||
#define CRP_H
|
||||
|
||||
#include "MxBase/E2eInfer/Image/Image.h"
|
||||
#include "MxBase/E2eInfer/Rect/Rect.h"
|
||||
#include "MxBase/E2eInfer/Size/Size.h"
|
||||
|
||||
#include "acl/dvpp/hi_dvpp.h"
|
||||
#include "acl/acl.h"
|
||||
#include "acl/acl_rt.h"
|
||||
|
||||
#define CONVER_TO_PODD(NUM) (((NUM) % 2 != 0) ? (NUM) : ((NUM)-1))
|
||||
#define CONVER_TO_EVEN(NUM) (((NUM) % 2 == 0) ? (NUM) : ((NUM)-1))
|
||||
#define DVPP_ALIGN_UP(x, align) ((((x) + ((align)-1)) / (align)) * (align))
|
||||
|
||||
MxBase::Rect GetPasteRect(uint32_t inputWidth, uint32_t inputHeight, uint32_t outputWidth, uint32_t outputHeight)
|
||||
{
|
||||
bool widthRatioLarger = true;
|
||||
if (outputWidth == 0)
|
||||
{
|
||||
LogError << "outputWidth equals to 0.";
|
||||
}
|
||||
if (outputHeight == 0)
|
||||
{
|
||||
LogError << "outputHeight equals to 0.";
|
||||
}
|
||||
float resizeRatio = static_cast<float>(inputWidth) / outputWidth;
|
||||
if (resizeRatio < (static_cast<float>(inputHeight) / outputHeight))
|
||||
{
|
||||
resizeRatio = static_cast<float>(inputHeight) / outputHeight;
|
||||
widthRatioLarger = false;
|
||||
}
|
||||
|
||||
// (x0, y0)是左上角坐标,(x1, y1)是右下角坐标,采用图片坐标系
|
||||
uint32_t x0;
|
||||
uint32_t y0;
|
||||
uint32_t x1;
|
||||
uint32_t y1;
|
||||
if (widthRatioLarger)
|
||||
{
|
||||
// 原图width大于height
|
||||
x0 = 0;
|
||||
y0 = 0;
|
||||
x1 = outputWidth-1;
|
||||
y1 = inputHeight / resizeRatio - 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
// 原图height大于width
|
||||
x0 = 0;
|
||||
y0 = 0;
|
||||
x1 = inputWidth / resizeRatio - 1;
|
||||
y1 = outputHeight - 1;
|
||||
}
|
||||
x0 = DVPP_ALIGN_UP(CONVER_TO_EVEN(x0), 16); // 16对齐
|
||||
x1 = DVPP_ALIGN_UP((x1 - x0 + 1), 16) + x0 - 1;
|
||||
y0 = CONVER_TO_EVEN(y0);
|
||||
y1 = CONVER_TO_PODD(y1);
|
||||
MxBase::Rect res(x0, y0, x1, y1);
|
||||
return res;
|
||||
}
|
||||
|
||||
MxBase::Image ConstructImage(uint32_t resizeWidth, uint32_t resizeHeight)
|
||||
{
|
||||
void *addr;
|
||||
uint32_t dataSize = resizeWidth * resizeHeight * 3 / 2;
|
||||
auto ret = hi_mpi_dvpp_malloc(0, &addr, dataSize);
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "hi_mpi_dvpp_malloc fail :" << ret;
|
||||
}
|
||||
// 第三个参数从128改成了0
|
||||
ret = aclrtMemset(addr, dataSize, 0, dataSize);
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "aclrtMemset fail :" << ret;
|
||||
}
|
||||
std::shared_ptr<uint8_t> data((uint8_t *)addr, hi_mpi_dvpp_free);
|
||||
MxBase::Size imageSize(resizeWidth, resizeHeight);
|
||||
MxBase::Image pastedImg(data, dataSize, 0, imageSize);
|
||||
return pastedImg;
|
||||
}
|
||||
|
||||
std::pair<MxBase::Rect, MxBase::Rect> GenerateRect(uint32_t originalWidth, uint32_t originalHeight, uint32_t resizeWidth, uint32_t resizeHeight)
|
||||
{
|
||||
uint32_t x1 = CONVER_TO_PODD(originalWidth - 1);
|
||||
uint32_t y1 = CONVER_TO_PODD(originalHeight - 1);
|
||||
MxBase::Rect cropRect(0, 0, x1, y1);
|
||||
MxBase::Rect pasteRect = GetPasteRect(originalWidth, originalHeight, resizeWidth, resizeHeight);
|
||||
std::pair<MxBase::Rect, MxBase::Rect> cropPasteRect(cropRect, pasteRect);
|
||||
return cropPasteRect;
|
||||
}
|
||||
|
||||
MxBase::Image resizeKeepAspectRatioFit(uint32_t originalWidth, uint32_t originalHeight, uint32_t resizeWidth, uint32_t resizeHeight, MxBase::Image &decodeImage, MxBase::ImageProcessor& imageProcessor)
|
||||
{
|
||||
std::pair<MxBase::Rect, MxBase::Rect> cropPasteRect = GenerateRect(originalWidth, originalHeight, resizeWidth, resizeHeight);
|
||||
MxBase::Image resizeImage = ConstructImage(resizeWidth, resizeHeight);
|
||||
auto ret = imageProcessor.CropAndPaste(decodeImage, cropPasteRect, resizeImage);
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "CropAndPaste fail :" << ret;
|
||||
}
|
||||
return resizeImage;
|
||||
}
|
||||
|
||||
#endif // CRP_H
|
377
HelmetIdentification_V2/src/main-image.cpp
Normal file
377
HelmetIdentification_V2/src/main-image.cpp
Normal file
@@ -0,0 +1,377 @@
|
||||
/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "utils.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <time.h>
|
||||
using namespace std;
|
||||
|
||||
// 如果在200DK上运行就改为 USE_200DK
|
||||
#define USE_DVPP
|
||||
|
||||
APP_ERROR readImage(std::string imgPath, MxBase::Image &image, MxBase::ImageProcessor &imageProcessor)
|
||||
{
|
||||
APP_ERROR ret;
|
||||
#ifdef USE_DVPP
|
||||
// if USE DVPP
|
||||
ret = imageProcessor.Decode(imgPath, image);
|
||||
#endif
|
||||
#ifdef USE_200DK
|
||||
std::shared_ptr<uint8_t> dataPtr;
|
||||
uint32_t dataSize;
|
||||
// Get image data to memory, this method can be substituted or designed by yourself!
|
||||
std::ifstream file;
|
||||
file.open(imgPath.c_str(), std::ios::binary);
|
||||
if (!file)
|
||||
{
|
||||
LogInfo << "Invalid file.";
|
||||
return APP_ERR_COMM_INVALID_PARAM;
|
||||
}
|
||||
std::stringstream buffer;
|
||||
buffer << file.rdbuf();
|
||||
std::string content = buffer.str();
|
||||
|
||||
char *p = (char *)malloc(content.size());
|
||||
memcpy(p, content.data(), content.size());
|
||||
auto deleter = [](void *p) -> void
|
||||
{
|
||||
free(p);
|
||||
p = nullptr;
|
||||
};
|
||||
|
||||
dataPtr.reset(static_cast<uint8_t *>((void *)(p)), deleter);
|
||||
dataSize = content.size();
|
||||
file.close();
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "Getimage failed, ret=" << ret;
|
||||
return ret;
|
||||
}
|
||||
ret = imageProcessor.Decode(dataPtr, dataSize, image, MxBase::ImageFormat::YUV_SP_420);
|
||||
// endif
|
||||
#endif
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "Decode failed, ret=" << ret;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
void postProcess(std::vector<MxBase::Tensor> modelOutputs, std::shared_ptr<MxBase::Yolov3PostProcess> postProcessorDptr,
|
||||
MxBase::Size originalSize, MxBase::Size resizeSize, std::string outFileName)
|
||||
{
|
||||
MxBase::ResizedImageInfo imgInfo;
|
||||
auto shape = modelOutputs[0].GetShape();
|
||||
imgInfo.widthOriginal = originalSize.width;
|
||||
imgInfo.heightOriginal = originalSize.height;
|
||||
imgInfo.widthResize = resizeSize.width;
|
||||
imgInfo.heightResize = resizeSize.height;
|
||||
imgInfo.resizeType = MxBase::RESIZER_MS_KEEP_ASPECT_RATIO;
|
||||
float resizeRate = originalSize.width > originalSize.height ? (originalSize.width * 1.0 / videoInfo::YOLOV5_RESIZE) : (originalSize.height * 1.0 / videoInfo::YOLOV5_RESIZE);
|
||||
imgInfo.keepAspectRatioScaling = 1 / resizeRate;
|
||||
std::vector<MxBase::ResizedImageInfo> imageInfoVec = {};
|
||||
imageInfoVec.push_back(imgInfo);
|
||||
// make postProcess inputs
|
||||
std::vector<MxBase::TensorBase> tensors;
|
||||
for (size_t i = 0; i < modelOutputs.size(); i++)
|
||||
{
|
||||
MxBase::MemoryData memoryData(modelOutputs[i].GetData(), modelOutputs[i].GetByteSize());
|
||||
MxBase::TensorBase tensorBase(memoryData, true, modelOutputs[i].GetShape(), MxBase::TENSOR_DTYPE_INT32);
|
||||
tensors.push_back(tensorBase);
|
||||
}
|
||||
std::vector<std::vector<MxBase::ObjectInfo>> objectInfos;
|
||||
postProcessorDptr->Process(tensors, objectInfos, imageInfoVec);
|
||||
std::cout << "===---> Size of objectInfos is " << objectInfos.size() << std::endl;
|
||||
std::ofstream out(outFileName);
|
||||
for (size_t i = 0; i < objectInfos.size(); i++)
|
||||
{
|
||||
std::cout << "objectInfo-" << i << " , Size:" << objectInfos[i].size() << std::endl;
|
||||
for (size_t j = 0; j < objectInfos[i].size(); j++)
|
||||
{
|
||||
std::cout << std::endl
|
||||
<< "*****objectInfo-" << i << ":" << j << std::endl;
|
||||
std::cout << "x0 is " << objectInfos[i][j].x0 << std::endl;
|
||||
std::cout << "y0 is " << objectInfos[i][j].y0 << std::endl;
|
||||
std::cout << "x1 is " << objectInfos[i][j].x1 << std::endl;
|
||||
std::cout << "y1 is " << objectInfos[i][j].y1 << std::endl;
|
||||
std::cout << "confidence is " << objectInfos[i][j].confidence << std::endl;
|
||||
std::cout << "classId is " << objectInfos[i][j].classId << std::endl;
|
||||
std::cout << "className is " << objectInfos[i][j].className << std::endl;
|
||||
|
||||
// 这里的out是输出流,不是std::cout
|
||||
out << objectInfos[i][j].className;
|
||||
out << " ";
|
||||
out << objectInfos[i][j].confidence;
|
||||
out << " ";
|
||||
out << objectInfos[i][j].x0;
|
||||
out << " ";
|
||||
out << objectInfos[i][j].y0;
|
||||
out << " ";
|
||||
out << objectInfos[i][j].x1;
|
||||
out << " ";
|
||||
out << objectInfos[i][j].y1;
|
||||
out << "\n";
|
||||
}
|
||||
}
|
||||
out.close();
|
||||
}
|
||||
|
||||
APP_ERROR main(int argc, char *argv[])
|
||||
{
|
||||
APP_ERROR ret;
|
||||
|
||||
// global init
|
||||
ret = MxBase::MxInit();
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "MxInit failed, ret=" << ret << ".";
|
||||
}
|
||||
// 检测是否输入了文件路径
|
||||
if (argc <= 1)
|
||||
{
|
||||
LogWarn << "Please input image path, such as 'test.jpg'.";
|
||||
return APP_ERR_OK;
|
||||
}
|
||||
std::string num = argv[1];
|
||||
|
||||
// imageProcess init
|
||||
MxBase::ImageProcessor imageProcessor(videoInfo::DEVICE_ID);
|
||||
// model init
|
||||
MxBase::Model yoloModel(videoInfo::modelPath, videoInfo::DEVICE_ID);
|
||||
// postprocessor init
|
||||
std::map<std::string, std::string> postConfig;
|
||||
postConfig.insert(std::pair<std::string, std::string>("postProcessConfigPath", videoInfo::configPath));
|
||||
postConfig.insert(std::pair<std::string, std::string>("labelPath", videoInfo::labelPath));
|
||||
std::shared_ptr<MxBase::Yolov3PostProcess> postProcessorDptr = std::make_shared<MxBase::Yolov3PostProcess>();
|
||||
postProcessorDptr->Init(postConfig);
|
||||
|
||||
std::string imgPath = "dataSet/TestImages/" + num + ".jpg";
|
||||
// 读取图片
|
||||
MxBase::Image image;
|
||||
readImage(imgPath, image, imageProcessor);
|
||||
|
||||
// 缩放图片
|
||||
MxBase::Size originalSize = image.GetOriginalSize();
|
||||
MxBase::Size resizeSize = MxBase::Size(videoInfo::YOLOV5_RESIZE, videoInfo::YOLOV5_RESIZE);
|
||||
MxBase::Image resizedImage = resizeKeepAspectRatioFit(originalSize.width, originalSize.height, resizeSize.width, resizeSize.height, image, imageProcessor);
|
||||
|
||||
// 模型推理
|
||||
MxBase::Tensor tensorImg = resizedImage.ConvertToTensor();
|
||||
tensorImg.ToDevice(videoInfo::DEVICE_ID);
|
||||
std::vector<MxBase::Tensor> inputs;
|
||||
inputs.push_back(tensorImg);
|
||||
std::vector<MxBase::Tensor> modelOutputs = yoloModel.Infer(inputs);
|
||||
for (auto output : modelOutputs)
|
||||
{
|
||||
output.ToHost();
|
||||
}
|
||||
|
||||
// 后处理
|
||||
std::string outFileName = "dataSet/V2txt/" + num + ".txt";
|
||||
postProcess(modelOutputs, postProcessorDptr, originalSize, resizeSize, outFileName);
|
||||
|
||||
return APP_ERR_OK;
|
||||
}/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "utils.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <time.h>
|
||||
using namespace std;
|
||||
|
||||
// 如果在200DK上运行就改为 USE_200DK
|
||||
#define USE_DVPP
|
||||
|
||||
APP_ERROR readImage(std::string imgPath, MxBase::Image &image, MxBase::ImageProcessor &imageProcessor)
|
||||
{
|
||||
APP_ERROR ret;
|
||||
#ifdef USE_DVPP
|
||||
// if USE DVPP
|
||||
ret = imageProcessor.Decode(imgPath, image);
|
||||
#endif
|
||||
#ifdef USE_200DK
|
||||
std::shared_ptr<uint8_t> dataPtr;
|
||||
uint32_t dataSize;
|
||||
// Get image data to memory, this method can be substituted or designed by yourself!
|
||||
std::ifstream file;
|
||||
file.open(imgPath.c_str(), std::ios::binary);
|
||||
if (!file)
|
||||
{
|
||||
LogInfo << "Invalid file.";
|
||||
return APP_ERR_COMM_INVALID_PARAM;
|
||||
}
|
||||
std::stringstream buffer;
|
||||
buffer << file.rdbuf();
|
||||
std::string content = buffer.str();
|
||||
|
||||
char *p = (char *)malloc(content.size());
|
||||
memcpy(p, content.data(), content.size());
|
||||
auto deleter = [](void *p) -> void
|
||||
{
|
||||
free(p);
|
||||
p = nullptr;
|
||||
};
|
||||
|
||||
dataPtr.reset(static_cast<uint8_t *>((void *)(p)), deleter);
|
||||
dataSize = content.size();
|
||||
file.close();
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "Getimage failed, ret=" << ret;
|
||||
return ret;
|
||||
}
|
||||
ret = imageProcessor.Decode(dataPtr, dataSize, image, MxBase::ImageFormat::YUV_SP_420);
|
||||
// endif
|
||||
#endif
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "Decode failed, ret=" << ret;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
void postProcess(std::vector<MxBase::Tensor> modelOutputs, std::shared_ptr<MxBase::Yolov3PostProcess> postProcessorDptr,
|
||||
MxBase::Size originalSize, MxBase::Size resizeSize, std::string outFileName)
|
||||
{
|
||||
MxBase::ResizedImageInfo imgInfo;
|
||||
auto shape = modelOutputs[0].GetShape();
|
||||
imgInfo.widthOriginal = originalSize.width;
|
||||
imgInfo.heightOriginal = originalSize.height;
|
||||
imgInfo.widthResize = resizeSize.width;
|
||||
imgInfo.heightResize = resizeSize.height;
|
||||
imgInfo.resizeType = MxBase::RESIZER_MS_KEEP_ASPECT_RATIO;
|
||||
float resizeRate = originalSize.width > originalSize.height ? (originalSize.width * 1.0 / videoInfo::YOLOV5_RESIZE) : (originalSize.height * 1.0 / videoInfo::YOLOV5_RESIZE);
|
||||
imgInfo.keepAspectRatioScaling = 1 / resizeRate;
|
||||
std::vector<MxBase::ResizedImageInfo> imageInfoVec = {};
|
||||
imageInfoVec.push_back(imgInfo);
|
||||
// make postProcess inputs
|
||||
std::vector<MxBase::TensorBase> tensors;
|
||||
for (size_t i = 0; i < modelOutputs.size(); i++)
|
||||
{
|
||||
MxBase::MemoryData memoryData(modelOutputs[i].GetData(), modelOutputs[i].GetByteSize());
|
||||
MxBase::TensorBase tensorBase(memoryData, true, modelOutputs[i].GetShape(), MxBase::TENSOR_DTYPE_INT32);
|
||||
tensors.push_back(tensorBase);
|
||||
}
|
||||
std::vector<std::vector<MxBase::ObjectInfo>> objectInfos;
|
||||
postProcessorDptr->Process(tensors, objectInfos, imageInfoVec);
|
||||
std::cout << "===---> Size of objectInfos is " << objectInfos.size() << std::endl;
|
||||
std::ofstream out(outFileName);
|
||||
for (size_t i = 0; i < objectInfos.size(); i++)
|
||||
{
|
||||
std::cout << "objectInfo-" << i << " , Size:" << objectInfos[i].size() << std::endl;
|
||||
for (size_t j = 0; j < objectInfos[i].size(); j++)
|
||||
{
|
||||
std::cout << std::endl
|
||||
<< "*****objectInfo-" << i << ":" << j << std::endl;
|
||||
std::cout << "x0 is " << objectInfos[i][j].x0 << std::endl;
|
||||
std::cout << "y0 is " << objectInfos[i][j].y0 << std::endl;
|
||||
std::cout << "x1 is " << objectInfos[i][j].x1 << std::endl;
|
||||
std::cout << "y1 is " << objectInfos[i][j].y1 << std::endl;
|
||||
std::cout << "confidence is " << objectInfos[i][j].confidence << std::endl;
|
||||
std::cout << "classId is " << objectInfos[i][j].classId << std::endl;
|
||||
std::cout << "className is " << objectInfos[i][j].className << std::endl;
|
||||
|
||||
// 这里的out是输出流,不是std::cout
|
||||
out << objectInfos[i][j].className;
|
||||
out << " ";
|
||||
out << objectInfos[i][j].confidence;
|
||||
out << " ";
|
||||
out << objectInfos[i][j].x0;
|
||||
out << " ";
|
||||
out << objectInfos[i][j].y0;
|
||||
out << " ";
|
||||
out << objectInfos[i][j].x1;
|
||||
out << " ";
|
||||
out << objectInfos[i][j].y1;
|
||||
out << "\n";
|
||||
}
|
||||
}
|
||||
out.close();
|
||||
}
|
||||
|
||||
APP_ERROR main(int argc, char *argv[])
|
||||
{
|
||||
APP_ERROR ret;
|
||||
|
||||
// global init
|
||||
ret = MxBase::MxInit();
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "MxInit failed, ret=" << ret << ".";
|
||||
}
|
||||
// 检测是否输入了文件路径
|
||||
if (argc <= 1)
|
||||
{
|
||||
LogWarn << "Please input image path, such as 'test.jpg'.";
|
||||
return APP_ERR_OK;
|
||||
}
|
||||
std::string num = argv[1];
|
||||
|
||||
// imageProcess init
|
||||
MxBase::ImageProcessor imageProcessor(videoInfo::DEVICE_ID);
|
||||
// model init
|
||||
MxBase::Model yoloModel(videoInfo::modelPath, videoInfo::DEVICE_ID);
|
||||
// postprocessor init
|
||||
std::map<std::string, std::string> postConfig;
|
||||
postConfig.insert(std::pair<std::string, std::string>("postProcessConfigPath", videoInfo::configPath));
|
||||
postConfig.insert(std::pair<std::string, std::string>("labelPath", videoInfo::labelPath));
|
||||
std::shared_ptr<MxBase::Yolov3PostProcess> postProcessorDptr = std::make_shared<MxBase::Yolov3PostProcess>();
|
||||
postProcessorDptr->Init(postConfig);
|
||||
|
||||
std::string imgPath = "dataSet/TestImages/" + num + ".jpg";
|
||||
// 读取图片
|
||||
MxBase::Image image;
|
||||
readImage(imgPath, image, imageProcessor);
|
||||
|
||||
// 缩放图片
|
||||
MxBase::Size originalSize = image.GetOriginalSize();
|
||||
MxBase::Size resizeSize = MxBase::Size(videoInfo::YOLOV5_RESIZE, videoInfo::YOLOV5_RESIZE);
|
||||
MxBase::Image resizedImage = resizeKeepAspectRatioFit(originalSize.width, originalSize.height, resizeSize.width, resizeSize.height, image, imageProcessor);
|
||||
|
||||
// 模型推理
|
||||
MxBase::Tensor tensorImg = resizedImage.ConvertToTensor();
|
||||
tensorImg.ToDevice(videoInfo::DEVICE_ID);
|
||||
std::vector<MxBase::Tensor> inputs;
|
||||
inputs.push_back(tensorImg);
|
||||
std::vector<MxBase::Tensor> modelOutputs = yoloModel.Infer(inputs);
|
||||
for (auto output : modelOutputs)
|
||||
{
|
||||
output.ToHost();
|
||||
}
|
||||
|
||||
// 后处理
|
||||
std::string outFileName = "dataSet/V2txt/" + num + ".txt";
|
||||
postProcess(modelOutputs, postProcessorDptr, originalSize, resizeSize, outFileName);
|
||||
|
||||
return APP_ERR_OK;
|
||||
}
|
359
HelmetIdentification_V2/src/main.cpp
Normal file
359
HelmetIdentification_V2/src/main.cpp
Normal file
@@ -0,0 +1,359 @@
|
||||
/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "utils.h"
|
||||
|
||||
extern "C"
|
||||
{
|
||||
#include "libavformat/avformat.h"
|
||||
}
|
||||
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <fstream>
|
||||
#include "unistd.h"
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <thread>
|
||||
#include "boost/filesystem.hpp"
|
||||
|
||||
#include "MxBase/DeviceManager/DeviceManager.h"
|
||||
#include "MxBase/DvppWrapper/DvppWrapper.h"
|
||||
#include "MxBase/MemoryHelper/MemoryHelper.h"
|
||||
#include "opencv2/opencv.hpp"
|
||||
#include "MxBase/postprocess/include/ObjectPostProcessors/Yolov3PostProcess.h"
|
||||
|
||||
#include "MxBase/E2eInfer/ImageProcessor/ImageProcessor.h"
|
||||
#include "MxBase/E2eInfer/VideoDecoder/VideoDecoder.h"
|
||||
#include "MxBase/E2eInfer/VideoEncoder/VideoEncoder.h"
|
||||
#include "MxBase/E2eInfer/DataType.h"
|
||||
|
||||
#include <time.h>
|
||||
#include <sys/stat.h>
|
||||
using namespace std;
|
||||
using namespace videoInfo;
|
||||
namespace frameConfig
|
||||
{
|
||||
size_t channelId0 = 0;
|
||||
size_t channelId1 = 1;
|
||||
size_t frameCountChannel0 = 300;
|
||||
size_t frameCountChannel1 = 300;
|
||||
size_t skipIntervalChannel0 = 2;
|
||||
size_t skipIntervalChannel1 = 2;
|
||||
|
||||
int arg_one_video = 4;
|
||||
int arg_two_video = 7;
|
||||
int arg1 = 1;
|
||||
int arg2 = 2;
|
||||
int arg3 = 3;
|
||||
int arg4 = 4;
|
||||
int arg5 = 5;
|
||||
int arg6 = 6;
|
||||
|
||||
// channel0对应文件的指针
|
||||
AVFormatContext *pFormatCtx0 = nullptr;
|
||||
// channel1对应文件的指针
|
||||
AVFormatContext *pFormatCtx1 = nullptr;
|
||||
}
|
||||
|
||||
// ffmpeg拉流
|
||||
AVFormatContext *CreateFormatContext(std::string filePath)
|
||||
{
|
||||
LogInfo << "start to CreatFormatContext!";
|
||||
// creat message for stream pull
|
||||
AVFormatContext *formatContext = nullptr;
|
||||
AVDictionary *options = nullptr;
|
||||
|
||||
LogInfo << "start to avformat_open_input!";
|
||||
int ret = avformat_open_input(&formatContext, filePath.c_str(), nullptr, &options);
|
||||
if (options != nullptr)
|
||||
{
|
||||
av_dict_free(&options);
|
||||
}
|
||||
if (ret != 0)
|
||||
{
|
||||
LogError << "Couldn't open input stream " << filePath.c_str() << " ret=" << ret;
|
||||
return nullptr;
|
||||
}
|
||||
ret = avformat_find_stream_info(formatContext, nullptr);
|
||||
if (ret != 0)
|
||||
{
|
||||
LogError << "Couldn't open input stream information";
|
||||
return nullptr;
|
||||
}
|
||||
return formatContext;
|
||||
}
|
||||
|
||||
// 真正的拉流函数
|
||||
void PullStream0(std::string filePath)
|
||||
{
|
||||
av_register_all();
|
||||
avformat_network_init();
|
||||
frameConfig::pFormatCtx0 = avformat_alloc_context();
|
||||
frameConfig::pFormatCtx0 = CreateFormatContext(filePath);
|
||||
av_dump_format(frameConfig::pFormatCtx0, 0, filePath.c_str(), 0);
|
||||
}
|
||||
void PullStream1(std::string filePath)
|
||||
{
|
||||
av_register_all();
|
||||
avformat_network_init();
|
||||
frameConfig::pFormatCtx1 = avformat_alloc_context();
|
||||
frameConfig::pFormatCtx1 = CreateFormatContext(filePath);
|
||||
av_dump_format(frameConfig::pFormatCtx1, 0, filePath.c_str(), 0);
|
||||
}
|
||||
|
||||
// 视频解码回调(样例代码,测试可以跑通,但是不能直接复用)
|
||||
APP_ERROR CallBackVdec(MxBase::Image &decodedImage, uint32_t channelId, uint32_t frameId, void *userData)
|
||||
{
|
||||
FrameImage frameImage;
|
||||
frameImage.image = decodedImage;
|
||||
frameImage.channelId = channelId;
|
||||
frameImage.frameId = frameId;
|
||||
|
||||
videoInfo::g_threadsMutex_frameImageQueue.lock();
|
||||
videoInfo::frameImageQueue.push(frameImage);
|
||||
videoInfo::g_threadsMutex_frameImageQueue.unlock();
|
||||
|
||||
return APP_ERR_OK;
|
||||
}
|
||||
|
||||
// 获取H264中的帧
|
||||
void GetFrame(AVPacket &pkt, FrameImage &frameImage, AVFormatContext *pFormatCtx)
|
||||
{
|
||||
av_init_packet(&pkt);
|
||||
int ret = av_read_frame(pFormatCtx, &pkt);
|
||||
if (ret != 0)
|
||||
{
|
||||
LogInfo << "[StreamPuller] channel Read frame failed, continue!";
|
||||
if (ret == AVERROR_EOF)
|
||||
{
|
||||
LogInfo << "[StreamPuller] channel StreamPuller is EOF, over!";
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (pkt.size <= 0)
|
||||
{
|
||||
LogError << "Invalid pkt.size: " << pkt.size;
|
||||
return;
|
||||
}
|
||||
|
||||
// send to the device
|
||||
auto hostDeleter = [](void *dataPtr) -> void {};
|
||||
MxBase::MemoryData data(pkt.size, MxBase::MemoryData::MEMORY_HOST);
|
||||
MxBase::MemoryData src((void *)(pkt.data), pkt.size, MxBase::MemoryData::MEMORY_HOST);
|
||||
APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(data, src);
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "MxbsMallocAndCopy failed!";
|
||||
}
|
||||
std::shared_ptr<uint8_t> imageData((uint8_t *)data.ptrData, hostDeleter);
|
||||
|
||||
MxBase::Image subImage(imageData, pkt.size);
|
||||
frameImage.image = subImage;
|
||||
|
||||
LogDebug << "'channelId = " << frameImage.channelId << ", frameId = " << frameImage.frameId << " , dataSize = " << frameImage.image.GetDataSize();
|
||||
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// 视频流解码线程 frameCount:要求遍历的帧的总数 skipInterval:跳帧的间隔
|
||||
void VdecThread0(size_t frameCount, size_t skipInterval, int32_t channelId, uint32_t src_width, uint32_t src_height)
|
||||
{
|
||||
AVPacket pkt;
|
||||
uint32_t frameId = 0;
|
||||
// 解码器参数
|
||||
MxBase::VideoDecodeConfig config;
|
||||
MxBase::VideoDecodeCallBack cPtr = CallBackVdec;
|
||||
config.width = src_width;
|
||||
config.height = src_height;
|
||||
config.callbackFunc = cPtr;
|
||||
// 跳帧控制
|
||||
config.skipInterval = skipInterval;
|
||||
|
||||
std::shared_ptr<MxBase::VideoDecoder> videoDecoder = std::make_shared<MxBase::VideoDecoder>(config, videoInfo::DEVICE_ID, channelId);
|
||||
for (size_t i = 0; i < frameCount; i++)
|
||||
{
|
||||
MxBase::Image subImage;
|
||||
FrameImage frame;
|
||||
frame.channelId = channelId;
|
||||
frame.frameId = frameId;
|
||||
frame.image = subImage;
|
||||
GetFrame(pkt, frame, frameConfig::pFormatCtx0);
|
||||
APP_ERROR ret = videoDecoder->Decode(frame.image.GetData(), frame.image.GetDataSize(), frameId, &videoInfo::frameImageQueue);
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "videoDecoder Decode failed. ret is: " << ret;
|
||||
}
|
||||
frameId += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// 视频流解码线程 frameCount:要求遍历的帧的总数 skipInterval:跳帧的间隔
|
||||
void VdecThread1(size_t frameCount, size_t skipInterval, int32_t channelId, uint32_t src_width, uint32_t src_height)
|
||||
{
|
||||
AVPacket pkt;
|
||||
uint32_t frameId = 0;
|
||||
// 解码器参数
|
||||
MxBase::VideoDecodeConfig config;
|
||||
MxBase::VideoDecodeCallBack cPtr = CallBackVdec;
|
||||
config.width = src_width;
|
||||
config.height = src_height;
|
||||
config.callbackFunc = cPtr;
|
||||
// 跳帧控制
|
||||
config.skipInterval = skipInterval;
|
||||
|
||||
std::shared_ptr<MxBase::VideoDecoder> videoDecoder = std::make_shared<MxBase::VideoDecoder>(config, videoInfo::DEVICE_ID, channelId);
|
||||
for (size_t i = 0; i < frameCount; i++)
|
||||
{
|
||||
MxBase::Image subImage;
|
||||
FrameImage frame;
|
||||
frame.channelId = channelId;
|
||||
frame.frameId = frameId;
|
||||
frame.image = subImage;
|
||||
GetFrame(pkt, frame, frameConfig::pFormatCtx1);
|
||||
APP_ERROR ret = videoDecoder->Decode(frame.image.GetData(), frame.image.GetDataSize(), frameId, &videoInfo::frameImageQueue);
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "videoDecoder Decode failed. ret is: " << ret;
|
||||
}
|
||||
frameId += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// 检查文件(是否存在、是否为文件夹)
|
||||
bool checkFile(std::string &fileName)
|
||||
{
|
||||
// 判断视频文件是否存在
|
||||
ifstream f(fileName.c_str());
|
||||
if (!f.good())
|
||||
{
|
||||
LogError << "file not exists! " << fileName;
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
// 如果存在还需要判断是否是文件夹
|
||||
struct stat s;
|
||||
if (stat(fileName.c_str(), &s) == 0)
|
||||
{
|
||||
if (s.st_mode & S_IFDIR)
|
||||
{
|
||||
LogError << fileName << " is a directory!";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool checkArg(int argc, char *argv[])
|
||||
{
|
||||
// 检测是否输入了文件路径
|
||||
if (argc <= 1)
|
||||
{
|
||||
LogWarn << "Please input video path, such as './video_sample test.264'.";
|
||||
return false;
|
||||
}
|
||||
else if (argc == frameConfig::arg_one_video)
|
||||
{
|
||||
std::string videoPath0 = argv[frameConfig::arg1];
|
||||
if (!checkFile(videoPath0))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
uint32_t src_width0 = (uint32_t)stoul(argv[frameConfig::arg2]);
|
||||
uint32_t src_height0 = (uint32_t)stoul(argv[frameConfig::arg3]);
|
||||
LogWarn << "videoPath0: " << videoPath0 << ", src_width0: " << src_width0 << ", src_height0: " << src_height0;
|
||||
PullStream0(videoPath0);
|
||||
std::thread threadVdec0(VdecThread0, frameConfig::frameCountChannel0, frameConfig::skipIntervalChannel0, frameConfig::channelId0, src_width0, src_height0);
|
||||
threadVdec0.join();
|
||||
}
|
||||
else if (argc == frameConfig::arg_two_video)
|
||||
{
|
||||
std::string videoPath0 = argv[frameConfig::arg1];
|
||||
std::string videoPath1 = argv[frameConfig::arg4];
|
||||
if (!checkFile(videoPath0))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if (!checkFile(videoPath1))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t src_width0 = (uint32_t)stoul(argv[frameConfig::arg2]);
|
||||
uint32_t src_height0 = (uint32_t)stoul(argv[frameConfig::arg3]);
|
||||
LogWarn << "videoPath0: " << videoPath0 << ", src_width0: " << src_width0 << ", src_height0: " << src_height0;
|
||||
PullStream0(videoPath0);
|
||||
std::thread threadVdec0(VdecThread0, frameConfig::frameCountChannel0, frameConfig::skipIntervalChannel0, frameConfig::channelId0, src_width0, src_height0);
|
||||
|
||||
uint32_t src_width1 = (uint32_t)stoul(argv[frameConfig::arg5]);
|
||||
uint32_t src_height1 = (uint32_t)stoul(argv[frameConfig::arg6]);
|
||||
LogWarn << "videoPath1: " << videoPath1 << ", src_width1: " << src_width1 << ", src_height1: " << src_height1;
|
||||
PullStream1(videoPath1);
|
||||
std::thread threadVdec1(VdecThread1, frameConfig::frameCountChannel1, frameConfig::skipIntervalChannel1, frameConfig::channelId1, src_width1, src_height1);
|
||||
|
||||
// threadVdec0新建后直接join会导致channel0的图片全部排在channel1的图片之前
|
||||
threadVdec0.join();
|
||||
threadVdec1.join();
|
||||
}
|
||||
else
|
||||
{
|
||||
LogWarn << "usage: ./main test1.264 test1_width test1_height [test2.264 test2_width test2_height]";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
APP_ERROR main(int argc, char *argv[])
|
||||
{
|
||||
// 初始化
|
||||
APP_ERROR ret = MxBase::MxInit();
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "MxInit failed, ret=" << ret << ".";
|
||||
}
|
||||
if (!checkArg(argc, argv))
|
||||
{
|
||||
return APP_ERR_OK;
|
||||
}
|
||||
|
||||
std::chrono::high_resolution_clock::time_point start_time = std::chrono::high_resolution_clock::now();
|
||||
size_t target_count = videoInfo::frameImageQueue.size();
|
||||
|
||||
// resize线程
|
||||
std::thread resizeThread(resizeMethod, start_time, target_count);
|
||||
resizeThread.join();
|
||||
|
||||
// 推理线程
|
||||
std::thread inferThread(inferMethod, start_time, target_count);
|
||||
inferThread.join();
|
||||
|
||||
// 后处理线程
|
||||
std::thread postprocessThread(postprocessMethod, start_time, target_count);
|
||||
postprocessThread.join();
|
||||
|
||||
// 跟踪去重线程
|
||||
std::thread trackThread(trackMethod, start_time, target_count);
|
||||
trackThread.join();
|
||||
|
||||
return APP_ERR_OK;
|
||||
}
|
332
HelmetIdentification_V2/src/utils.h
Normal file
332
HelmetIdentification_V2/src/utils.h
Normal file
@@ -0,0 +1,332 @@
|
||||
/*
|
||||
* Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MXBASE_HELMETIDENTIFICATION_UTILS_H
|
||||
#define MXBASE_HELMETIDENTIFICATION_UTILS_H
|
||||
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <fstream>
|
||||
#include "unistd.h"
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <thread>
|
||||
#include "boost/filesystem.hpp"
|
||||
|
||||
#include "MxBase/DeviceManager/DeviceManager.h"
|
||||
#include "MxBase/DvppWrapper/DvppWrapper.h"
|
||||
#include "MxBase/MemoryHelper/MemoryHelper.h"
|
||||
#include "opencv2/opencv.hpp"
|
||||
#include "MxBase/postprocess/include/ObjectPostProcessors/Yolov3PostProcess.h"
|
||||
|
||||
#include "MxBase/E2eInfer/ImageProcessor/ImageProcessor.h"
|
||||
#include "MxBase/E2eInfer/VideoDecoder/VideoDecoder.h"
|
||||
#include "MxBase/E2eInfer/VideoEncoder/VideoEncoder.h"
|
||||
#include "MxBase/E2eInfer/DataType.h"
|
||||
|
||||
#include "MxBase/MxBase.h"
|
||||
#include "MOTConnection.h"
|
||||
#include "cropResizePaste.hpp"
|
||||
#include "chrono"
|
||||
#include "time.h"
|
||||
|
||||
struct FrameImage
|
||||
{
|
||||
MxBase::Image image;
|
||||
uint32_t frameId = 0;
|
||||
uint32_t channelId = 0;
|
||||
};
|
||||
|
||||
namespace videoInfo
|
||||
{
|
||||
const uint32_t YUV_BYTE_NU = 3;
|
||||
const uint32_t YUV_BYTE_DE = 2;
|
||||
const uint32_t YOLOV5_RESIZE = 640;
|
||||
|
||||
// 要检测的目标类别的标签
|
||||
const std::string TARGET_CLASS_NAME = "head";
|
||||
// 使用chrono计数结果为毫秒,需要除以1000转换为秒
|
||||
double MS_PPE_SECOND = 1000.0;
|
||||
|
||||
const uint32_t DEVICE_ID = 0;
|
||||
// 相对编译出的可执行文件的路径
|
||||
std::string labelPath = "./model/imgclass.names";
|
||||
std::string configPath = "./model/Helmet_yolov5.cfg";
|
||||
std::string modelPath = "./model/YOLOv5_s.om";
|
||||
|
||||
// 读入视频帧Image的队列
|
||||
std::queue<FrameImage> frameImageQueue;
|
||||
// 读入视频帧Image队列的线程锁
|
||||
std::mutex g_threadsMutex_frameImageQueue;
|
||||
|
||||
// resize前即原始图像的vector
|
||||
std::vector<FrameImage> realImageVector;
|
||||
// resize后Image的vector
|
||||
std::vector<FrameImage> resizedImageVector;
|
||||
// resize后Image队列的线程锁
|
||||
std::mutex g_threadsMutex_resizedImageVector;
|
||||
|
||||
// 推理后tensor的vector
|
||||
std::vector<std::vector<MxBase::Tensor>> inferOutputVector;
|
||||
// 推理后tensor队列的线程锁
|
||||
std::mutex g_threadsMutex_inferOutputVector;
|
||||
|
||||
// 后处理后objectInfos的队列 map<channelId, map<frameId, ObjectInfo的vector>>
|
||||
std::vector<std::vector<std::vector<MxBase::ObjectInfo>>> postprocessOutputVector;
|
||||
// 后处理后objectInfos队列的线程锁
|
||||
std::mutex g_threadsMutex_postprocessOutputVector;
|
||||
}
|
||||
namespace fs = boost::filesystem;
|
||||
|
||||
// resize线程
|
||||
void resizeMethod(std::chrono::high_resolution_clock::time_point start_time, size_t target_count)
|
||||
{
|
||||
std::chrono::high_resolution_clock::time_point resize_start_time = std::chrono::high_resolution_clock::now();
|
||||
|
||||
MxBase::ImageProcessor imageProcessor(videoInfo::DEVICE_ID);
|
||||
|
||||
size_t resize_count = 0;
|
||||
while (resize_count < target_count)
|
||||
{
|
||||
if (videoInfo::frameImageQueue.empty())
|
||||
{
|
||||
continue;
|
||||
}
|
||||
// 取图像并resize
|
||||
FrameImage frame = videoInfo::frameImageQueue.front();
|
||||
MxBase::Image image = frame.image;
|
||||
MxBase::Size originalSize = image.GetOriginalSize();
|
||||
MxBase::Size resizeSize = MxBase::Size(videoInfo::YOLOV5_RESIZE, videoInfo::YOLOV5_RESIZE);
|
||||
MxBase::Image outputImage = resizeKeepAspectRatioFit(originalSize.width, originalSize.height, resizeSize.width, resizeSize.height, image, imageProcessor);
|
||||
// 先将缩放后的图像放入resizeImage的队列
|
||||
FrameImage resizedFrame;
|
||||
resizedFrame.channelId = frame.channelId;
|
||||
resizedFrame.frameId = frame.frameId;
|
||||
resizedFrame.image = outputImage;
|
||||
videoInfo::g_threadsMutex_resizedImageVector.lock();
|
||||
frame.image.ToHost();
|
||||
videoInfo::realImageVector.push_back(frame);
|
||||
videoInfo::resizedImageVector.push_back(resizedFrame);
|
||||
videoInfo::g_threadsMutex_resizedImageVector.unlock();
|
||||
// 然后再将原图pop出去
|
||||
videoInfo::g_threadsMutex_frameImageQueue.lock();
|
||||
videoInfo::frameImageQueue.pop();
|
||||
videoInfo::g_threadsMutex_frameImageQueue.unlock();
|
||||
// 计数
|
||||
resize_count++;
|
||||
}
|
||||
}
|
||||
|
||||
// 推理线程
|
||||
void inferMethod(std::chrono::high_resolution_clock::time_point start_time, size_t target_count)
|
||||
{
|
||||
std::chrono::high_resolution_clock::time_point infer_start_time = std::chrono::high_resolution_clock::now();
|
||||
|
||||
std::shared_ptr<MxBase::Model> modelDptr = std::make_shared<MxBase::Model>(videoInfo::modelPath, videoInfo::DEVICE_ID);
|
||||
|
||||
size_t infer_count = 0;
|
||||
while (infer_count < target_count)
|
||||
{
|
||||
if (infer_count >= videoInfo::resizedImageVector.size())
|
||||
{
|
||||
continue;
|
||||
}
|
||||
// 从resize后的队列中取图片
|
||||
FrameImage resizedFrame = videoInfo::resizedImageVector[infer_count];
|
||||
std::vector<MxBase::Tensor> modelOutputs;
|
||||
MxBase::Tensor tensorImg = resizedFrame.image.ConvertToTensor();
|
||||
tensorImg.ToDevice(videoInfo::DEVICE_ID);
|
||||
std::vector<MxBase::Tensor> inputs;
|
||||
inputs.push_back(tensorImg);
|
||||
modelOutputs = modelDptr->Infer(inputs);
|
||||
for (auto output : modelOutputs)
|
||||
{
|
||||
output.ToHost();
|
||||
}
|
||||
|
||||
// 将推理结果存入队列
|
||||
videoInfo::g_threadsMutex_inferOutputVector.lock();
|
||||
videoInfo::inferOutputVector.push_back(modelOutputs);
|
||||
videoInfo::g_threadsMutex_inferOutputVector.unlock();
|
||||
// 计数
|
||||
infer_count++;
|
||||
}
|
||||
}
|
||||
|
||||
// 后处理线程
|
||||
void postprocessMethod(std::chrono::high_resolution_clock::time_point start_time, size_t target_count)
|
||||
{
|
||||
std::chrono::high_resolution_clock::time_point postprocess_start_time = std::chrono::high_resolution_clock::now();
|
||||
|
||||
std::map<std::string, std::string> postConfig;
|
||||
postConfig.insert(std::pair<std::string, std::string>("postProcessConfigPath", videoInfo::configPath));
|
||||
postConfig.insert(std::pair<std::string, std::string>("labelPath", videoInfo::labelPath));
|
||||
std::shared_ptr<MxBase::Yolov3PostProcess> postProcessorDptr = std::make_shared<MxBase::Yolov3PostProcess>();
|
||||
if (postProcessorDptr == nullptr)
|
||||
{
|
||||
LogError << "init postProcessor failed, nullptr";
|
||||
}
|
||||
postProcessorDptr->Init(postConfig);
|
||||
|
||||
size_t postprocess_count = 0;
|
||||
while (postprocess_count < target_count)
|
||||
{
|
||||
if (postprocess_count >= videoInfo::inferOutputVector.size())
|
||||
{
|
||||
continue;
|
||||
}
|
||||
// 取原图信息用于计算
|
||||
MxBase::Size originalSize = videoInfo::realImageVector[postprocess_count].image.GetOriginalSize();
|
||||
// 从推理结果的队列里面取出推理结果
|
||||
std::vector<MxBase::Tensor> modelOutputs = videoInfo::inferOutputVector[postprocess_count];
|
||||
FrameImage resizedFrame = videoInfo::resizedImageVector[postprocess_count];
|
||||
// 新的后处理过程
|
||||
MxBase::ResizedImageInfo imgInfo;
|
||||
auto shape = modelOutputs[0].GetShape();
|
||||
imgInfo.widthOriginal = originalSize.width;
|
||||
imgInfo.heightOriginal = originalSize.height;
|
||||
imgInfo.widthResize = videoInfo::YOLOV5_RESIZE;
|
||||
imgInfo.heightResize = videoInfo::YOLOV5_RESIZE;
|
||||
imgInfo.resizeType = MxBase::RESIZER_MS_KEEP_ASPECT_RATIO;
|
||||
// 因为yolov5要求输入图像为640*640,所以直接比较原图的height和width就好(如果不理解就去看cropResizePaste.hpp里的GetPasteRect函数)
|
||||
float resizeRate = originalSize.width > originalSize.height ? (originalSize.width * 1.0 / videoInfo::YOLOV5_RESIZE) : (originalSize.height * 1.0 / videoInfo::YOLOV5_RESIZE);
|
||||
imgInfo.keepAspectRatioScaling = 1 / resizeRate;
|
||||
std::vector<MxBase::ResizedImageInfo> imageInfoVec = {};
|
||||
imageInfoVec.push_back(imgInfo);
|
||||
// make postProcess inputs
|
||||
std::vector<MxBase::TensorBase> tensors;
|
||||
for (size_t i = 0; i < modelOutputs.size(); i++)
|
||||
{
|
||||
MxBase::MemoryData memoryData(modelOutputs[i].GetData(), modelOutputs[i].GetByteSize());
|
||||
MxBase::TensorBase tensorBase(memoryData, true, modelOutputs[i].GetShape(), MxBase::TENSOR_DTYPE_INT32);
|
||||
tensors.push_back(tensorBase);
|
||||
}
|
||||
// 后处理
|
||||
std::vector<std::vector<MxBase::ObjectInfo>> objectInfos;
|
||||
postProcessorDptr->Process(tensors, objectInfos, imageInfoVec);
|
||||
|
||||
// 将后处理结果存入队列
|
||||
videoInfo::g_threadsMutex_postprocessOutputVector.lock();
|
||||
videoInfo::postprocessOutputVector.push_back(objectInfos);
|
||||
videoInfo::g_threadsMutex_postprocessOutputVector.unlock();
|
||||
// 计数
|
||||
postprocess_count++;
|
||||
}
|
||||
}
|
||||
|
||||
// 跟踪去重线程
|
||||
void trackMethod(std::chrono::high_resolution_clock::time_point start_time, size_t target_count)
|
||||
{
|
||||
std::chrono::high_resolution_clock::time_point track_start_time = std::chrono::high_resolution_clock::now();
|
||||
|
||||
std::shared_ptr<ascendVehicleTracking::MOTConnection> tracker0 = std::make_shared<ascendVehicleTracking::MOTConnection>();
|
||||
if (tracker0 == nullptr)
|
||||
{
|
||||
LogError << "init tracker0 failed, nullptr";
|
||||
}
|
||||
std::shared_ptr<ascendVehicleTracking::MOTConnection> tracker1 = std::make_shared<ascendVehicleTracking::MOTConnection>();
|
||||
if (tracker1 == nullptr)
|
||||
{
|
||||
LogError << "init tracker1 failed, nullptr";
|
||||
}
|
||||
// 用于计算帧率
|
||||
size_t old_count = 0;
|
||||
std::chrono::high_resolution_clock::time_point count_time;
|
||||
std::chrono::high_resolution_clock::time_point old_count_time = std::chrono::high_resolution_clock::now();
|
||||
size_t one_step = 2;
|
||||
size_t track_count = 0;
|
||||
while (track_count < target_count)
|
||||
{
|
||||
// 计算帧率
|
||||
// 如果count_time-old_count_time的值大于one_step,就计算一下这个step里面的帧数,然后除以step的值
|
||||
count_time = std::chrono::high_resolution_clock::now();
|
||||
if (std::chrono::duration_cast<std::chrono::milliseconds>(count_time - old_count_time).count() / videoInfo::MS_PPE_SECOND > one_step)
|
||||
{
|
||||
old_count_time = count_time;
|
||||
LogInfo << "rate: " << (track_count - old_count) / one_step * 1.0;
|
||||
old_count = track_count;
|
||||
}
|
||||
// 下面是业务循环
|
||||
if (track_count >= videoInfo::postprocessOutputVector.size())
|
||||
{
|
||||
continue;
|
||||
}
|
||||
// 从后处理结果的队列中取结果用于跟踪去重
|
||||
std::vector<std::vector<MxBase::ObjectInfo>> objectInfos = videoInfo::postprocessOutputVector[track_count];
|
||||
FrameImage frame = videoInfo::realImageVector[track_count];
|
||||
MxBase::Size originalSize = frame.image.GetOriginalSize();
|
||||
|
||||
// 根据channelId的不同使用不同的tracker
|
||||
std::vector<MxBase::ObjectInfo> objInfos_ = {};
|
||||
if (frame.channelId == 0)
|
||||
{
|
||||
tracker0->ProcessSort(objectInfos, frame.frameId);
|
||||
APP_ERROR ret = tracker0->GettrackResult(objInfos_);
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "No tracker0";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
tracker1->ProcessSort(objectInfos, frame.frameId);
|
||||
APP_ERROR ret = tracker1->GettrackResult(objInfos_);
|
||||
if (ret != APP_ERR_OK)
|
||||
{
|
||||
LogError << "No tracker1";
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t video_height = originalSize.height;
|
||||
uint32_t video_width = originalSize.width;
|
||||
// 初始化OpenCV图像信息矩阵
|
||||
cv::Mat imgYuv = cv::Mat(video_height * videoInfo::YUV_BYTE_NU / videoInfo::YUV_BYTE_DE, video_width, CV_8UC1, frame.image.GetData().get());
|
||||
cv::Mat imgBgr = cv::Mat(video_height, video_width, CV_8UC3);
|
||||
// 颜色空间转换
|
||||
cv::cvtColor(imgYuv, imgBgr, cv::COLOR_YUV420sp2RGB);
|
||||
std::vector<MxBase::ObjectInfo> info;
|
||||
bool headFlag = false;
|
||||
for (uint32_t i = 0; i < objInfos_.size(); i++)
|
||||
{
|
||||
if (objInfos_[i].className == videoInfo::TARGET_CLASS_NAME)
|
||||
{
|
||||
headFlag = true;
|
||||
LogWarn << "Warning:Not wearing a helmet, channelId:" << frame.channelId << ", frameId:" << frame.frameId;
|
||||
// (blue, green, red)
|
||||
const cv::Scalar color = cv::Scalar(0, 0, 255);
|
||||
// width for rectangle
|
||||
const uint32_t thickness = 2;
|
||||
// draw the rectangle
|
||||
cv::rectangle(imgBgr,
|
||||
cv::Rect(objInfos_[i].x0, objInfos_[i].y0, objInfos_[i].x1 - objInfos_[i].x0, objInfos_[i].y1 - objInfos_[i].y0),
|
||||
color, thickness);
|
||||
}
|
||||
}
|
||||
// 如果检测结果中有head标签,就保存为图片
|
||||
if (headFlag)
|
||||
{
|
||||
// 把Mat类型的图像矩阵保存为图像到指定位置。
|
||||
std::string outPath = frame.channelId == 0 ? "one" : "two";
|
||||
std::string fileName = "./result/" + outPath + "/result" + std::to_string(frame.frameId) + ".jpg";
|
||||
cv::imwrite(fileName, imgBgr);
|
||||
}
|
||||
|
||||
// 计数
|
||||
track_count++;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
Reference in New Issue
Block a user