mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-22 08:09:28 +08:00
[Model] [Part 1] Refactor PaddleClas module (#508)
* Split PaddleClas Module refactor * Split PaddleClas Module refactor * fix bug
This commit is contained in:
@@ -21,7 +21,6 @@
|
||||
#include "fastdeploy/vision/common/processors/crop.h"
|
||||
#include "fastdeploy/vision/common/processors/hwc2chw.h"
|
||||
#include "fastdeploy/vision/common/processors/limit_by_stride.h"
|
||||
#include "fastdeploy/vision/common/processors/limit_long.h"
|
||||
#include "fastdeploy/vision/common/processors/limit_short.h"
|
||||
#include "fastdeploy/vision/common/processors/normalize.h"
|
||||
#include "fastdeploy/vision/common/processors/normalize_and_permute.h"
|
||||
@@ -36,89 +35,12 @@
|
||||
namespace fastdeploy {
|
||||
namespace vision {
|
||||
|
||||
inline void FuseNormalizeCast(
|
||||
std::vector<std::shared_ptr<Processor>>* processors) {
|
||||
// Fuse Normalize and Cast<Float>
|
||||
int cast_index = -1;
|
||||
for (size_t i = 0; i < processors->size(); ++i) {
|
||||
if ((*processors)[i]->Name() == "Cast") {
|
||||
if (i == 0) {
|
||||
continue;
|
||||
}
|
||||
if ((*processors)[i - 1]->Name() != "Normalize" &&
|
||||
(*processors)[i - 1]->Name() != "NormalizeAndPermute") {
|
||||
continue;
|
||||
}
|
||||
cast_index = i;
|
||||
}
|
||||
}
|
||||
if (cast_index < 0) {
|
||||
return;
|
||||
}
|
||||
void FuseTransforms(std::vector<std::shared_ptr<Processor>>* processors);
|
||||
|
||||
if (dynamic_cast<Cast*>((*processors)[cast_index].get())->GetDtype() !=
|
||||
"float") {
|
||||
return;
|
||||
}
|
||||
processors->erase(processors->begin() + cast_index);
|
||||
FDINFO << (*processors)[cast_index - 1]->Name() << " and Cast are fused to "
|
||||
<< (*processors)[cast_index - 1]->Name()
|
||||
<< " in preprocessing pipeline." << std::endl;
|
||||
}
|
||||
|
||||
inline void FuseNormalizeHWC2CHW(
|
||||
std::vector<std::shared_ptr<Processor>>* processors) {
|
||||
// Fuse Normalize and HWC2CHW to NormalizeAndPermute
|
||||
int hwc2chw_index = -1;
|
||||
for (size_t i = 0; i < processors->size(); ++i) {
|
||||
if ((*processors)[i]->Name() == "HWC2CHW") {
|
||||
if (i == 0) {
|
||||
continue;
|
||||
}
|
||||
if ((*processors)[i - 1]->Name() != "Normalize") {
|
||||
continue;
|
||||
}
|
||||
hwc2chw_index = i;
|
||||
}
|
||||
}
|
||||
|
||||
if (hwc2chw_index < 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get alpha and beta of Normalize
|
||||
std::vector<float> alpha =
|
||||
dynamic_cast<Normalize*>((*processors)[hwc2chw_index - 1].get())
|
||||
->GetAlpha();
|
||||
std::vector<float> beta =
|
||||
dynamic_cast<Normalize*>((*processors)[hwc2chw_index - 1].get())
|
||||
->GetBeta();
|
||||
|
||||
// Delete Normalize and HWC2CHW
|
||||
processors->erase(processors->begin() + hwc2chw_index);
|
||||
processors->erase(processors->begin() + hwc2chw_index - 1);
|
||||
|
||||
// Add NormalizeAndPermute
|
||||
std::vector<float> mean({0.0, 0.0, 0.0});
|
||||
std::vector<float> std({1.0, 1.0, 1.0});
|
||||
processors->push_back(std::make_shared<NormalizeAndPermute>(mean, std));
|
||||
|
||||
// Set alpha and beta
|
||||
auto processor = dynamic_cast<NormalizeAndPermute*>(
|
||||
(*processors)[hwc2chw_index - 1].get());
|
||||
|
||||
processor->SetAlpha(alpha);
|
||||
processor->SetBeta(beta);
|
||||
FDINFO << "Normalize and HWC2CHW are fused to NormalizeAndPermute "
|
||||
" in preprocessing pipeline."
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
inline void FuseTransforms(
|
||||
std::vector<std::shared_ptr<Processor>>* processors) {
|
||||
FuseNormalizeCast(processors);
|
||||
FuseNormalizeHWC2CHW(processors);
|
||||
}
|
||||
// Fuse Normalize + Cast(Float) to Normalize
|
||||
void FuseNormalizeCast(std::vector<std::shared_ptr<Processor>>* processors);
|
||||
// Fuse Normalize + HWC2CHW to NormalizeAndPermute
|
||||
void FuseNormalizeHWC2CHW(std::vector<std::shared_ptr<Processor>>* processors);
|
||||
|
||||
} // namespace vision
|
||||
} // namespace fastdeploy
|
||||
|
Reference in New Issue
Block a user