Update evaluation function to support calculate average inference time (#106)

* Update README.md

* Update README.md

* Update README.md

* Create README.md

* Update README.md

* Update README.md

* Update README.md

* Update README.md

* Add evaluation calculate time and fix some bugs

* Update classification __init__

* Move to ppseg

Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
huangjianhui
2022-08-12 17:42:09 +08:00
committed by GitHub
parent 724d3dfc85
commit 32047016d6
12 changed files with 124 additions and 62 deletions

View File

@@ -0,0 +1,59 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision/segmentation/ppseg/model.h"
namespace fastdeploy {
namespace vision {
namespace segmentation {
void FDTensor2FP32CVMat(cv::Mat& mat, FDTensor& infer_result,
bool contain_score_map) {
// output with argmax channel is 1
int channel = 1;
int height = infer_result.shape[1];
int width = infer_result.shape[2];
if (contain_score_map) {
// output without argmax and convent to NHWC
channel = infer_result.shape[3];
}
// create FP32 cvmat
if (infer_result.dtype == FDDataType::INT64) {
FDWARNING << "The PaddleSeg model is exported with argmax. Inference "
"result type is " +
Str(infer_result.dtype) +
". If you want the edge of segmentation image more "
"smoother. Please export model with --without_argmax "
"--with_softmax."
<< std::endl;
int64_t chw = channel * height * width;
int64_t* infer_result_buffer = static_cast<int64_t*>(infer_result.Data());
std::vector<float_t> float_result_buffer(chw);
mat = cv::Mat(height, width, CV_32FC(channel));
int index = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
mat.at<float_t>(i, j) =
static_cast<float_t>(infer_result_buffer[index++]);
}
}
} else if (infer_result.dtype == FDDataType::FP32) {
mat = cv::Mat(height, width, CV_32FC(channel), infer_result.Data());
}
}
} // namespace segmentation
} // namespace vision
} // namespace fastdeploy

View File

@@ -143,8 +143,7 @@ bool PaddleSegModel::Postprocess(
Mat* mat = nullptr;
if (is_resized) {
cv::Mat temp_mat;
utils::FDTensor2FP32CVMat(temp_mat, infer_result,
result->contain_score_map);
FDTensor2FP32CVMat(temp_mat, infer_result, result->contain_score_map);
// original image shape
auto iter_ipt = (*im_info).find("input_shape");

View File

@@ -38,6 +38,9 @@ class FASTDEPLOY_DECL PaddleSegModel : public FastDeployModel {
std::vector<std::shared_ptr<Processor>> processors_;
std::string config_file_;
};
void FDTensor2FP32CVMat(cv::Mat& mat, FDTensor& infer_result,
bool contain_score_map);
} // namespace segmentation
} // namespace vision
} // namespace fastdeploy