Files
FastDeploy/fastdeploy/vision/faceid/contrib/adaface/postprocessor.cc
Zheng_Bicheng ec67f8ee6d [Model] Refactor insightface models (#919)
* 重构insightface代码

* 重写insightface example代码

* 重写insightface example代码

* 删除多余代码

* 修改预处理代码

* 修改文档

* 修改文档

* 恢复误删除的文件

* 修改cpp example

* 修改cpp example

* 测试python代码

* 测试python代码

* 测试python代码

* 测试python代码

* 测试python代码

* 测试python代码

* 测试python代码

* 跑通python代码

* 修复重复初始化的bug

* 更新adaface的python代码

* 修复c++重复初始化的问题

* 修复c++重复初始化的问题

* 修复Python重复初始化的问题

* 新增preprocess的几个参数的获取方式

* 修复注释的错误

* 按照要求修改

* 修改文档中的图片为图片压缩包

* 修改编译完成后程序的提示

* 更新错误include

* 删除无用文件

* 更新文档
2022-12-26 21:01:58 +08:00

68 lines
2.5 KiB
C++
Executable File

// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision/faceid/contrib/adaface/postprocessor.h"
#include "fastdeploy/vision/utils/utils.h"
namespace fastdeploy {
namespace vision {
namespace faceid {
AdaFacePostprocessor::AdaFacePostprocessor() {
l2_normalize_ = false;
}
bool AdaFacePostprocessor::Run(std::vector<FDTensor>& infer_result,
std::vector<FaceRecognitionResult>* results) {
if (infer_result[0].dtype != FDDataType::FP32) {
FDERROR << "Only support post process with float32 data." << std::endl;
return false;
}
if(infer_result.size() != 1){
FDERROR << "The default number of output tensor "
"must be 1 according to insightface." << std::endl;
}
int batch = infer_result[0].shape[0];
results->resize(batch);
for (size_t bs = 0; bs < batch; ++bs) {
FDTensor& embedding_tensor = infer_result.at(bs);
FDASSERT((embedding_tensor.shape[0] == 1), "Only support batch = 1 now.");
if (embedding_tensor.dtype != FDDataType::FP32) {
FDERROR << "Only support post process with float32 data." << std::endl;
return false;
}
(*results)[bs].Clear();
(*results)[bs].Resize(embedding_tensor.Numel());
// Copy the raw embedding vector directly without L2 normalize
// post process. Let the user decide whether to normalize or not.
// Will call utils::L2Normlize() method to perform L2
// normalize if l2_normalize was set as 'true'.
std::memcpy((*results)[bs].embedding.data(),
embedding_tensor.Data(),
embedding_tensor.Nbytes());
if (l2_normalize_) {
auto norm_embedding = utils::L2Normalize((*results)[bs].embedding);
std::memcpy((*results)[bs].embedding.data(),
norm_embedding.data(),
embedding_tensor.Nbytes());
}
}
return true;
}
} // namespace detection
} // namespace vision
} // namespace fastdeploy