fixed missing trt_backend option & remove un-need data layout check in Cast (#14)

* update .gitignore

* Added checking for cmake include dir

* fixed missing trt_backend option bug when init from trt

* remove un-need data layout and add pre-check for dtype

* changed RGB2BRG to BGR2RGB in ppcls model

* Update CMakeLists.txt
This commit is contained in:
DefTruth
2022-07-12 15:14:30 +08:00
committed by GitHub
parent 57697f3f18
commit a45f189b56
5 changed files with 38 additions and 15 deletions

11
.gitignore vendored
View File

@@ -1 +1,12 @@
fastdeploy/libs/lib*
build
cmake-build-debug
cmake-build-release
.vscode
FastDeploy.cmake
fastdeploy/core/config.h
build-debug.sh
*dist
fastdeploy.egg-info
.setuptools-cmake-build
fastdeploy/version.py

View File

@@ -52,7 +52,8 @@ std::vector<int> toVec(const nvinfer1::Dims& dim) {
return out;
}
bool TrtBackend::InitFromTrt(const std::string& trt_engine_file) {
bool TrtBackend::InitFromTrt(const std::string& trt_engine_file,
const TrtBackendOption& option) {
if (initialized_) {
FDERROR << "TrtBackend is already initlized, cannot initialize again."
<< std::endl;

View File

@@ -69,7 +69,8 @@ class TrtBackend : public BaseBackend {
bool InitFromOnnx(const std::string& model_file,
const TrtBackendOption& option = TrtBackendOption(),
bool from_memory_buffer = false);
bool InitFromTrt(const std::string& trt_engine_file);
bool InitFromTrt(const std::string& trt_engine_file,
const TrtBackendOption& option = TrtBackendOption());
bool Infer(std::vector<FDTensor>& inputs, std::vector<FDTensor>* outputs);

View File

@@ -18,30 +18,40 @@ namespace fastdeploy {
namespace vision {
bool Cast::CpuRun(Mat* mat) {
if (mat->layout != Layout::CHW) {
FDERROR << "Cast: The input data must be Layout::HWC format!" << std::endl;
return false;
}
cv::Mat* im = mat->GetCpuMat();
int c = im->channels();
if (dtype_ == "float") {
im->convertTo(*im, CV_32FC(im->channels()));
if (im->type() != CV_32FC(c)) {
im->convertTo(*im, CV_32FC(c));
}
} else if (dtype_ == "double") {
im->convertTo(*im, CV_64FC(im->channels()));
if (im->type() != CV_64FC(c)) {
im->convertTo(*im, CV_64FC(c));
}
} else {
FDLogger() << "[WARN] Cast not support for " << dtype_
<< " now! will skip this operation."
<< std::endl;
}
return true;
}
#ifdef ENABLE_OPENCV_CUDA
bool Cast::GpuRun(Mat* mat) {
if (mat->layout != Layout::CHW) {
FDERROR << "Cast: The input data must be Layout::HWC format!" << std::endl;
return false;
}
cv::cuda::GpuMat* im = mat->GetGpuMat();
int c = im->channels();
if (dtype_ == "float") {
im->convertTo(*im, CV_32FC(im->channels()));
if (im->type() != CV_32FC(c)) {
im->convertTo(*im, CV_32FC(c));
}
} else if (dtype_ == "double") {
im->convertTo(*im, CV_64FC(im->channels()));
if (im->type() != CV_64FC(c)) {
im->convertTo(*im, CV_64FC(c));
}
} else {
FDLogger() << "[WARN] Cast not support for " << dtype_
<< " now! will skip this operation."
<< std::endl;
}
return true;
}

View File

@@ -44,7 +44,7 @@ bool Model::BuildPreprocessPipelineFromConfig() {
return false;
}
auto preprocess_cfg = cfg["PreProcess"]["transform_ops"];
processors_.push_back(std::make_shared<RGB2BGR>());
processors_.push_back(std::make_shared<BGR2RGB>());
for (const auto& op : preprocess_cfg) {
FDASSERT(op.IsMap(),
"Require the transform information in yaml be Map type.");