mirror of
https://github.com/Ascend/ascend_community_projects.git
synced 2025-09-26 20:01:17 +08:00
Modified some normative problems and visualization part of the code
This commit is contained in:
@@ -136,10 +136,10 @@ export PYTHONPATH=${MX_SDK_HOME}/python:$PYTHONPATH
|
||||
|
||||
推理中涉及到第三方软件依赖如下表所示。
|
||||
|
||||
| 依赖软件 | 版本 | 说明 | 使用教程 |
|
||||
| -------- | ---------- | ------------------------------ | ------------------------------------------------------------ |
|
||||
| live555 | 1.09 | 实现视频转rstp进行推流 | [链接](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99/Live555%E7%A6%BB%E7%BA%BF%E8%A7%86%E9%A2%91%E8%BD%ACRTSP%E8%AF%B4%E6%98%8E%E6%96%87%E6%A1%A3.md) |
|
||||
| ffmpeg | 2021-07-21 | 实现mp4格式视频转为264格式视频 | [链接](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99/pc%E7%AB%AFffmpeg%E5%AE%89%E8%A3%85%E6%95%99%E7%A8%8B.md#https://ffmpeg.org/download.html) |
|
||||
| 依赖软件 | 版本 | 说明 | 使用教程 |
|
||||
| -------- | ---------- | ------------------------------ | ------------------------------------------------------- |
|
||||
| live555 | 1.09 | 实现视频转rstp进行推流 | [链接](https://gitee.com/ascend/mindxsdk-referenceapps) |
|
||||
| ffmpeg | 2021-07-21 | 实现mp4格式视频转为264格式视频 | [链接](https://gitee.com/ascend/mindxsdk-referenceapps) |
|
||||
|
||||
|
||||
|
||||
@@ -191,7 +191,7 @@ ATC run success, welcome to the next use.
|
||||
|
||||
## 5 准备
|
||||
|
||||
按照第 3 小结**软件依赖**安装 live555 和 ffmpeg,按照 [Live555离线视频转RTSP说明文档 ](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/%E5%8F%82%E8%80%83%E8%B5%84%E6%96%99/Live555%E7%A6%BB%E7%BA%BF%E8%A7%86%E9%A2%91%E8%BD%ACRTSP%E8%AF%B4%E6%98%8E%E6%96%87%E6%A1%A3.md)将 mp4 视频转换为 h264 格式。并将生成的 264 格式的视频上传到 `live/mediaServer` 目录下,然后修改 `AlphaPose/pipeline` 目录下的 `video.pipeline` 文件中 mxpi_rtspsrc0 的内容。
|
||||
按照第 3 小结**软件依赖**安装 live555 和 ffmpeg,按照 [Live555离线视频转RTSP说明文档 ](https://gitee.com/ascend/mindxsdk-referenceapps)将 mp4 视频转换为 h264 格式。并将生成的 264 格式的视频上传到 `live/mediaServer` 目录下,然后修改 `AlphaPose/pipeline` 目录下的 `video.pipeline` 文件中 mxpi_rtspsrc0 的内容。
|
||||
|
||||
```
|
||||
"mxpi_rtspsrc0": {
|
||||
@@ -248,9 +248,7 @@ bash run.sh image
|
||||
bash run.sh video --speedtest
|
||||
```
|
||||
|
||||
结果将如下图所示,终端会每 10 帧打印一次当前帧数,前 10 帧的运行时间以及前10帧的平均帧率。
|
||||
|
||||

|
||||
命令运行后终端会每 10 帧打印一次当前帧数,前 10 帧的运行时间以及前10帧的平均帧率。经测试,性能可以达到 19 fps 左右,满足性能要求。
|
||||
|
||||
[^注]: 输入视频帧率应高于25,否则无法发挥全部性能。且由于 Alphapose 人体关键点估计是一种自上而下的方式,所以实际推理速度与视频中的人数存在负相关关系,即人数越多,推理用时越多,速度越慢。上述展示的推理速度是在视频帧大小为 720*1280,且视频中只有一个人的条件下所得到的性能。
|
||||
|
||||
@@ -278,7 +276,7 @@ bash run.sh video --speedtest
|
||||
└── video.py
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
3. 执行第 6 小节 **编译与运行** 中的步骤 1 至步骤 3 完成准备工作,进入 `AlphaPose` 目录,在 `AlphaPose` 目录下执行命令:
|
||||
|
||||
@@ -286,9 +284,7 @@ bash run.sh video --speedtest
|
||||
bash run.sh evaluate
|
||||
```
|
||||
|
||||
命令执行结束后输出 COCO 格式的评测结果,并生成 val2017_keypoint_detect_result.json 检测结果文件。输出结果如下图所示:
|
||||

|
||||
|
||||
命令执行结束后输出 COCO 格式的评测结果,并生成 val2017_keypoint_detect_result.json 检测结果文件。经测试,在coco验证集上的 mAP 为 0.732,满足精度要求。
|
||||
|
||||
|
||||
## 8 常见问题
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@@ -398,6 +398,50 @@ static void PoseMergeFast(const cv::Mat &predsPick, const std::vector<cv::Mat> &
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Delete the selected person
|
||||
* @param tmpSize - the number of person now
|
||||
* @param pickId - The person id picked
|
||||
* @param keypointPreds - Source data containing the information of keypoints position
|
||||
* @param keypointScores - Source data containing the information of keypoins score
|
||||
* @param humanIds - A collection of person ids
|
||||
* @param humanScores - A collection of the information of person's score
|
||||
* @param confidence - A collection of the confidece of person
|
||||
* @param mergeIds - A collection of the pose to merge
|
||||
*/
|
||||
static void DeletePerson(int tmpSize, int pickId,
|
||||
std::vector<float> &finalDists, std::vector<int> &numMatchKeypoints,
|
||||
std::vector<cv::Mat> &keypointPreds, std::vector<cv::Mat> &keypointScores,
|
||||
std::vector<int> &humanIds, std::vector<float> &humanScores,
|
||||
std::vector<float> &confidence, std::vector<int> &mergeIds)
|
||||
{
|
||||
int count = 0;
|
||||
float gma = 22.48;
|
||||
for (int i = 0; i < tmpSize; i++) {
|
||||
if ((finalDists[i] > gma)||(numMatchKeypoints[i] > MATCH_THREAD)) {
|
||||
int deleteId = i - count;
|
||||
int mergeId = humanIds[deleteId];
|
||||
mergeIds.push_back(mergeId);
|
||||
keypointPreds.erase(keypointPreds.begin() + deleteId);
|
||||
keypointScores.erase(keypointScores.begin() + deleteId);
|
||||
humanIds.erase(humanIds.begin() + deleteId);
|
||||
humanScores.erase(humanScores.begin() + deleteId);
|
||||
confidence.erase(confidence.begin() + deleteId);
|
||||
count ++;
|
||||
}
|
||||
}
|
||||
if (mergeIds.size() == 0) {
|
||||
int deleteId = pickId;
|
||||
int mergeId = humanIds[deleteId];
|
||||
mergeIds.push_back(mergeId);
|
||||
keypointPreds.erase(keypointPreds.begin() + deleteId);
|
||||
keypointScores.erase(keypointScores.begin() + deleteId);
|
||||
humanIds.erase(humanIds.begin() + deleteId);
|
||||
humanScores.erase(humanScores.begin() + deleteId);
|
||||
confidence.erase(confidence.begin() + deleteId);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get final pose
|
||||
* @param maxValue - The max value of picked object's keypoints scores
|
||||
@@ -499,24 +543,15 @@ APP_ERROR MxpiAlphaposePostProcess::ExtractKeypointsInfo(const std::vector<std::
|
||||
* @param personScores - Target data containing the information of person's score
|
||||
* @return APP_ERROR
|
||||
*/
|
||||
APP_ERROR MxpiAlphaposePostProcess::PoseNms(std::vector<cv::Mat> &keypointPreds,
|
||||
std::vector<cv::Mat> &keypointScores,
|
||||
std::vector<std::vector<float> > &objectBoxes,
|
||||
std::vector<cv::Mat> &finalPoses,
|
||||
std::vector<cv::Mat> &finalScores,
|
||||
std::vector<float> &personScores)
|
||||
APP_ERROR MxpiAlphaposePostProcess::PoseNms(std::vector<cv::Mat> &keypointPreds, std::vector<cv::Mat> &keypointScores,
|
||||
std::vector<std::vector<float> > &objectBoxes, std::vector<cv::Mat> &finalPoses,
|
||||
std::vector<cv::Mat> &finalScores, std::vector<float> &personScores)
|
||||
{
|
||||
float gma = 22.48;
|
||||
float alpha = 0.1;
|
||||
int batchNum = keypointScores.size();
|
||||
std::vector<float> confidence(batchNum);
|
||||
std::vector<float> boxWidth(batchNum);
|
||||
std::vector<float> boxHeight(batchNum);
|
||||
std::vector<float> refDists(batchNum);
|
||||
std::vector<float> humanScores(batchNum);
|
||||
std::vector<int> humanIds(batchNum);
|
||||
std::vector<float> finalDists(batchNum);
|
||||
std::vector<int> numMatchKeypoints(batchNum);
|
||||
std::vector<float> confidence(batchNum), boxWidth(batchNum), boxHeight(batchNum);
|
||||
std::vector<float> refDists(batchNum), humanScores(batchNum), finalDists(batchNum);
|
||||
std::vector<int> humanIds(batchNum), numMatchKeypoints(batchNum);
|
||||
for (int i = 0; i < batchNum; i++) {
|
||||
for (int j = 0; j < keypointScores[i].rows; j++) {
|
||||
float *ptr = keypointScores[i].ptr<float>(j);
|
||||
@@ -554,47 +589,16 @@ APP_ERROR MxpiAlphaposePostProcess::PoseNms(std::vector<cv::Mat> &keypointPreds,
|
||||
PCKMatch(pickId, refDist, keypointPreds, numMatchKeypoints);
|
||||
// Delete humans who have more than MATCH_THREAD keypoints overlap and high similarity
|
||||
std::vector<int> mergeIds = {};
|
||||
std::vector<int> deleteIds = {};
|
||||
int count = 0;
|
||||
for (int i = 0; i < tmpSize; i++) {
|
||||
if ((finalDists[i] > gma)||(numMatchKeypoints[i] > MATCH_THREAD)) {
|
||||
int deleteId = i - count;
|
||||
int mergeId = humanIds[deleteId];
|
||||
mergeIds.push_back(mergeId);
|
||||
keypointPreds.erase(keypointPreds.begin() + deleteId);
|
||||
keypointScores.erase(keypointScores.begin() + deleteId);
|
||||
humanIds.erase(humanIds.begin() + deleteId);
|
||||
humanScores.erase(humanScores.begin() + deleteId);
|
||||
confidence.erase(confidence.begin() + deleteId);
|
||||
count ++;
|
||||
}
|
||||
}
|
||||
if (mergeIds.size() == 0) {
|
||||
int deleteId = pickId;
|
||||
int mergeId = humanIds[deleteId];
|
||||
mergeIds.push_back(mergeId);
|
||||
keypointPreds.erase(keypointPreds.begin() + deleteId);
|
||||
keypointScores.erase(keypointScores.begin() + deleteId);
|
||||
humanIds.erase(humanIds.begin() + deleteId);
|
||||
humanScores.erase(humanScores.begin() + deleteId);
|
||||
confidence.erase(confidence.begin() + deleteId);
|
||||
}
|
||||
|
||||
DeletePerson(tmpSize, pickId, finalDists, numMatchKeypoints, keypointPreds, keypointScores,
|
||||
humanIds, humanScores, confidence, mergeIds);
|
||||
double maxValue;
|
||||
cv::Point maxIdx;
|
||||
cv::minMaxLoc(scoresPick, NULL, &maxValue, NULL, &maxIdx);
|
||||
if (maxValue >= SCORE_THREAD) {
|
||||
cv::Mat mergePose(KEY_POINTS_NUM, POSE_COORD_NUM, CV_32FC1, Scalar(0));
|
||||
cv::Mat mergeScore(KEY_POINTS_NUM, SCORE_COORD_NUM, CV_32FC1, Scalar(0));
|
||||
if (mergeIds.size() == 1) {
|
||||
mergePose = originKeypointPreds[mergeIds[0]].clone();
|
||||
mergeScore = originKeypointScores[mergeIds[0]].clone();
|
||||
} else {
|
||||
PoseMergeFast(predsPick, originKeypointPreds, originKeypointScores,
|
||||
refDist, mergeIds, mergePose, mergeScore);
|
||||
}
|
||||
GetFinalPose(maxValue, confidencePick, mergePose,
|
||||
mergeScore, finalPoses, finalScores, personScores);
|
||||
PoseMergeFast(predsPick, originKeypointPreds, originKeypointScores, refDist, mergeIds, mergePose, mergeScore);
|
||||
GetFinalPose(maxValue, confidencePick, mergePose, mergeScore, finalPoses, finalScores, personScores);
|
||||
}
|
||||
}
|
||||
return APP_ERR_OK;
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -11,9 +11,9 @@
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.mitations under the License.
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
set -e
|
||||
|
||||
current_folder="$( cd "$(dirname "$0")" ;pwd -P )"
|
||||
|
||||
@@ -21,7 +21,7 @@ function build_plugin() {
|
||||
build_path=${current_folder}/build
|
||||
plugin_path=${MX_SDK_HOME}/lib/plugins
|
||||
plugin_name="libmxpi_alphaposepostprocess.so"
|
||||
|
||||
|
||||
if [ -d "$build_path" ]; then
|
||||
rm -rf "$build_path"
|
||||
else
|
||||
@@ -31,7 +31,7 @@ function build_plugin() {
|
||||
cd "$build_path"
|
||||
cmake ..
|
||||
make -j4
|
||||
|
||||
|
||||
chmod 440 "$plugin_name"
|
||||
if [ -e "${plugin_path}/${plugin_name}" ];
|
||||
then rm -rf "${plugin_path}/${plugin_name}"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -11,7 +11,7 @@
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.mitations under the License.
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -11,7 +11,7 @@
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.mitations under the License.
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
|
||||
|
@@ -1,3 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -e
|
||||
|
||||
MODE=$1
|
||||
|
@@ -1,23 +1,19 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
|
||||
"""
|
||||
|
||||
Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
"""
|
||||
# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import json
|
||||
import sys
|
||||
import stat
|
||||
@@ -84,8 +80,9 @@ def main():
|
||||
personlist = []
|
||||
for i, _ in enumerate(range(person_num)):
|
||||
person = pose_out_list.personInfoVec[i]
|
||||
keypoints_score = np.zeros((17, 1), dtype = np.float32)
|
||||
keypoints_pre = np.zeros((17, 2), dtype = np.float32)
|
||||
keypoints_num = 17
|
||||
keypoints_score = np.zeros((keypoints_num, 1), dtype = np.float32)
|
||||
keypoints_pre = np.zeros((keypoints_num, 2), dtype = np.float32)
|
||||
for j in range(len(person.keyPoints)):
|
||||
keypoints_score[j][0] = person.keyPoints[j].score
|
||||
keypoints_pre[j][0] = person.keyPoints[j].x
|
||||
@@ -93,7 +90,7 @@ def main():
|
||||
score = np.array(person.confidence)
|
||||
personlist.append({
|
||||
'keypoints': keypoints_pre,
|
||||
'kp_score': keypoints_score,
|
||||
'keypoints_score': keypoints_score,
|
||||
'proposal_score': score
|
||||
})
|
||||
# Read the original image
|
||||
@@ -105,7 +102,7 @@ def main():
|
||||
# Save key point information to JSON file
|
||||
for i, _ in enumerate(range(person_num)):
|
||||
personlist[i]['keypoints'] = personlist[i]['keypoints'].tolist()
|
||||
personlist[i]['kp_score'] = personlist[i]['kp_score'].tolist()
|
||||
personlist[i]['keypoints_score'] = personlist[i]['keypoints_score'].tolist()
|
||||
personlist[i]['proposal_score'] = personlist[i]['proposal_score'].tolist()
|
||||
flags = os.O_WRONLY | os.O_CREAT # Sets how files are read and written
|
||||
modes = stat.S_IWUSR | stat.S_IRUSR # Set file permissions
|
||||
|
@@ -1,5 +1,7 @@
|
||||
# ------------------------------------------------------------------------------
|
||||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
|
||||
# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,97 +14,138 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
import cv2
|
||||
import math
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
YELLOW = (0, 255, 255)
|
||||
ORANGE = (0, 165, 255)
|
||||
CYAN = (255, 255, 0)
|
||||
PURPLE = (255, 0, 255)
|
||||
RED = (0, 0, 255)
|
||||
GREEN = (0, 255, 0)
|
||||
BLUE = (255, 0, 0)
|
||||
CYAN = (255, 255, 0)
|
||||
YELLOW = (0, 255, 255)
|
||||
ORANGE = (0, 165, 255)
|
||||
PURPLE = (255, 0, 255)
|
||||
|
||||
|
||||
def visualize(frame, result, dataset='coco'):
|
||||
'''
|
||||
frame: frame image
|
||||
result: result of predictions
|
||||
dataset: coco or mpii
|
||||
|
||||
return rendered image
|
||||
'''
|
||||
if dataset == 'coco':
|
||||
l_pair = [
|
||||
(0, 1), (0, 2), (1, 3), (2, 4), # Head
|
||||
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
|
||||
(17, 11), (17, 12), # Body
|
||||
(11, 13), (12, 14), (13, 15), (14, 16)
|
||||
]
|
||||
|
||||
p_color = [(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0),
|
||||
(77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77),
|
||||
(204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191),
|
||||
(127, 77, 255), (77, 255, 127), (0, 255, 255)]
|
||||
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
|
||||
(77, 255, 222), (77, 196, 255), (77, 135, 255), (191, 255, 77), (77, 255, 77),
|
||||
(77, 222, 255), (255, 156, 127),
|
||||
(0, 127, 255), (255, 127, 77), (0, 77, 255), (255, 77, 36)]
|
||||
elif dataset == 'mpii':
|
||||
l_pair = [
|
||||
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
|
||||
(13, 14), (14, 15), (3, 4), (4, 5),
|
||||
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
|
||||
]
|
||||
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
|
||||
line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
|
||||
else:
|
||||
raise NotImplementedError
|
||||
def visualize_fast(frame, result):
|
||||
line_pair = [
|
||||
(0, 1), (0, 2), (1, 3), (2, 4),
|
||||
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
|
||||
(17, 11), (17, 12),
|
||||
(11, 13), (12, 14), (13, 15), (14, 16)
|
||||
]
|
||||
# Nose, LEye, REye, LEar, REar
|
||||
# LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
|
||||
# LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
|
||||
point_color = [(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0),
|
||||
(77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77),
|
||||
(204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191), (127, 77, 255), (77, 255, 127), (0, 255, 255)]
|
||||
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
|
||||
(77, 255, 222), (77, 196, 255), (77, 135, 255), (191, 255, 77), (77, 255, 77),
|
||||
(77, 222, 255), (255, 156, 127),
|
||||
(0, 127, 255), (255, 127, 77), (0, 77, 255), (255, 77, 36)]
|
||||
|
||||
img = frame
|
||||
for human in result:
|
||||
part_line = {}
|
||||
kp_preds = human['keypoints']
|
||||
kp_scores = human['kp_score']
|
||||
kp_preds = np.concatenate((kp_preds, np.expand_dims((kp_preds[5, :]+kp_preds[6, :]) / 2.0, 0)))
|
||||
kp_scores = np.concatenate((kp_scores, np.expand_dims((kp_scores[5, :]+kp_scores[6, :]) / 2.0, 0)))
|
||||
preds = human['keypoints']
|
||||
scores = human['keypoints_score']
|
||||
# Get the keypoint between the left shoulder and the right shoulder
|
||||
lshoulder_index = 5
|
||||
rshoulder_index = 6
|
||||
middle_preds = np.expand_dims((preds[lshoulder_index, :]+preds[rshoulder_index, :]) / 2.0, 0)
|
||||
middle_scores = np.expand_dims((scores[lshoulder_index, :]+scores[rshoulder_index, :]) / 2.0, 0)
|
||||
preds = np.concatenate((preds, middle_preds))
|
||||
scores = np.concatenate((scores, middle_scores))
|
||||
# Draw keypoints
|
||||
for n in range(kp_scores.shape[0]):
|
||||
if kp_scores[n] <= 0.05:
|
||||
for n in range(scores.shape[0]):
|
||||
if scores[n] <= 0.05:
|
||||
continue
|
||||
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
|
||||
part_line[n] = (int(cor_x), int(cor_y))
|
||||
bg = img.copy()
|
||||
cv2.circle(bg, (int(cor_x), int(cor_y)), 2, p_color[n], -1)
|
||||
# Now create a mask of logo and create its inverse mask also
|
||||
transparency = float(max(0, min(1, kp_scores[n])))
|
||||
img = cv2.addWeighted(bg, transparency, img, 1-transparency, 0)
|
||||
for i, (start_pair, end_pair) in enumerate(l_pair):
|
||||
point_x, point_y = int(preds[n, 0]), int(preds[n, 1])
|
||||
part_line[n] = (int(point_x), int(point_y))
|
||||
if n < len(point_color):
|
||||
cv2.circle(img, (point_x, point_y), 3, point_color[n], -1)
|
||||
else:
|
||||
cv2.circle(img, (point_x, point_y), 1, (255, 255, 255), 2)
|
||||
# Draw limbs
|
||||
for i, (start_pair, end_pair) in enumerate(line_pair):
|
||||
if start_pair in part_line and end_pair in part_line:
|
||||
start_xy = part_line.get(start_pair)
|
||||
end_xy = part_line.get(end_pair)
|
||||
bg = img.copy()
|
||||
start_point = part_line.get(start_pair)
|
||||
end_point = part_line.get(end_pair)
|
||||
if i < len(line_color):
|
||||
cv2.line(img, start_point, end_point, line_color[i],
|
||||
2 * int(scores[start_pair] + scores[start_pair]) + 1)
|
||||
else:
|
||||
cv2.line(img, start_point, end_point, (255, 255, 255), 1)
|
||||
return img
|
||||
|
||||
coord_x = (start_xy[0], end_xy[0])
|
||||
coord_y = (start_xy[1], end_xy[1])
|
||||
|
||||
def visualize(frame, result):
|
||||
line_pair = [
|
||||
(0, 1), (0, 2), (1, 3), (2, 4),
|
||||
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
|
||||
(17, 11), (17, 12),
|
||||
(11, 13), (12, 14), (13, 15), (14, 16)
|
||||
]
|
||||
# Nose, LEye, REye, LEar, REar
|
||||
# LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
|
||||
# LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
|
||||
point_color = [(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0),
|
||||
(77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77),
|
||||
(204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191), (127, 77, 255), (77, 255, 127), (0, 255, 255)]
|
||||
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
|
||||
(77, 255, 222), (77, 196, 255), (77, 135, 255), (191, 255, 77), (77, 255, 77),
|
||||
(77, 222, 255), (255, 156, 127),
|
||||
(0, 127, 255), (255, 127, 77), (0, 77, 255), (255, 77, 36)]
|
||||
|
||||
img = frame
|
||||
for human in result:
|
||||
part_line = {}
|
||||
preds = human['keypoints']
|
||||
scores = human['keypoints_score']
|
||||
# Get the keypoint between the left shoulder and the right shoulder
|
||||
lshoulder_index = 5
|
||||
rshoulder_index = 6
|
||||
middle_preds = np.expand_dims((preds[lshoulder_index, :]+preds[rshoulder_index, :]) / 2.0, 0)
|
||||
middle_scores = np.expand_dims((scores[lshoulder_index, :]+scores[rshoulder_index, :]) / 2.0, 0)
|
||||
preds = np.concatenate((preds, middle_preds))
|
||||
scores = np.concatenate((scores, middle_scores))
|
||||
# Draw keypoints
|
||||
for n in range(scores.shape[0]):
|
||||
if scores[n] <= 0.05:
|
||||
continue
|
||||
point_x, point_y = int(preds[n, 0]), int(preds[n, 1])
|
||||
part_line[n] = (int(point_x), int(point_y))
|
||||
point_img = img.copy()
|
||||
cv2.circle(point_img, (int(point_x), int(point_y)), 2, point_color[n], -1)
|
||||
# Now create a mask of logo and create its inverse mask also
|
||||
transparency = float(max(0, min(1, scores[n])))
|
||||
img = cv2.addWeighted(point_img, transparency, img, 1-transparency, 0)
|
||||
# Draw limbs
|
||||
for i, (start_pair, end_pair) in enumerate(line_pair):
|
||||
if start_pair in part_line and end_pair in part_line:
|
||||
start_point = part_line.get(start_pair)
|
||||
end_point = part_line.get(end_pair)
|
||||
line_img = img.copy()
|
||||
|
||||
coord_x = (start_point[0], end_point[0])
|
||||
coord_y = (start_point[1], end_point[1])
|
||||
coord_mx = np.mean(coord_x)
|
||||
coord_my = np.mean(coord_y)
|
||||
length = ((coord_y[0] - coord_y[1]) ** 2 + (coord_x[0] - coord_x[1]) ** 2) ** 0.5
|
||||
angle = math.degrees(math.atan2(coord_y[0] - coord_y[1], coord_x[0] - coord_x[1]))
|
||||
stickwidth = (kp_scores[start_pair] + kp_scores[end_pair]) + 1
|
||||
length = ((start_point[1] - end_point[1]) ** 2 +
|
||||
(start_point[0] - end_point[0]) ** 2) ** 0.5
|
||||
angle_degrees = math.atan2(start_point[1] - end_point[1], start_point[0] - end_point[0])
|
||||
angle = math.degrees(angle_degrees)
|
||||
stick_width = (scores[start_pair] + scores[end_pair]) + 1
|
||||
polygon = cv2.ellipse2Poly((int(coord_mx), int(coord_my)), (int(length/2),
|
||||
int(stickwidth)), int(angle), 0, 360, 1)
|
||||
int(stick_width)), int(angle), 0, 360, 1)
|
||||
if i < len(line_color):
|
||||
cv2.fillConvexPoly(bg, polygon, line_color[i])
|
||||
cv2.fillConvexPoly(line_img, polygon, line_color[i])
|
||||
else:
|
||||
cv2.line(bg, start_xy, end_xy, (255, 255, 255), 1)
|
||||
if n < len(p_color):
|
||||
transparency = float(max(0, min(1, 0.5 * (kp_scores[start_pair] + kp_scores[end_pair])-0.1)))
|
||||
cv2.line(line_img, start_point, end_point, (255, 255, 255), 1)
|
||||
if n < len(point_color):
|
||||
transparency = float(max(0, min(1, 0.5 * (scores[start_pair] + scores[end_pair])-0.1)))
|
||||
else:
|
||||
transparency = float(max(0, min(1, (kp_scores[start_pair] + kp_scores[end_pair]))))
|
||||
transparency = float(max(0, min(1, (scores[start_pair] + scores[end_pair]))))
|
||||
|
||||
img = cv2.addWeighted(bg, transparency, img, 1 - transparency, 0)
|
||||
img = cv2.addWeighted(line_img, transparency, img, 1 - transparency, 0)
|
||||
return img
|
||||
|
@@ -1,23 +1,19 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
|
||||
"""
|
||||
|
||||
Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
"""
|
||||
# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
@@ -30,7 +26,7 @@ import numpy as np
|
||||
|
||||
import MxpiDataType_pb2 as MxpiDataType
|
||||
from StreamManagerApi import StreamManagerApi, StringVector
|
||||
from utils.visualization import visualize
|
||||
from utils.visualization import visualize, visualize_fast
|
||||
sys.path.append("../proto/")
|
||||
import mxpiAlphaposeProto_pb2 as mxpialphaposeproto
|
||||
|
||||
@@ -102,8 +98,9 @@ def main():
|
||||
personlist = []
|
||||
for i, _ in enumerate(range(person_num)):
|
||||
person = pose_out_list.personInfoVec[i]
|
||||
keypoints_score = np.zeros((17, 1), dtype = np.float32)
|
||||
keypoints_pre = np.zeros((17, 2), dtype = np.float32)
|
||||
keypoints_num = 17
|
||||
keypoints_score = np.zeros((keypoints_num, 1), dtype = np.float32)
|
||||
keypoints_pre = np.zeros((keypoints_num, 2), dtype = np.float32)
|
||||
for j in range(len(person.keyPoints)):
|
||||
keypoints_score[j][0] = person.keyPoints[j].score
|
||||
keypoints_pre[j][0] = person.keyPoints[j].x
|
||||
@@ -112,7 +109,7 @@ def main():
|
||||
personlist.append({
|
||||
'frame': frame_count,
|
||||
'keypoints': keypoints_pre,
|
||||
'kp_score': keypoints_score,
|
||||
'keypoints_score': keypoints_score,
|
||||
'proposal_score': score,
|
||||
})
|
||||
# Whether to conduct speed tests
|
||||
@@ -124,7 +121,7 @@ def main():
|
||||
# Save key point information to JSON file
|
||||
for i, _ in enumerate(range(person_num)):
|
||||
personlist[i]['keypoints'] = personlist[i]['keypoints'].tolist()
|
||||
personlist[i]['kp_score'] = personlist[i]['kp_score'].tolist()
|
||||
personlist[i]['keypoints_score'] = personlist[i]['keypoints_score'].tolist()
|
||||
personlist[i]['proposal_score'] = personlist[i]['proposal_score'].tolist()
|
||||
flags = os.O_WRONLY | os.O_APPEND | os.O_CREAT # Sets how files are read and written
|
||||
modes = stat.S_IWUSR | stat.S_IRUSR # Set file permissions
|
||||
|
Reference in New Issue
Block a user