mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-07 01:22:59 +08:00

* cvcuda resize * cvcuda center crop * cvcuda resize * add a fdtensor in fdmat * get cv mat and get tensor support gpu * paddleclas cvcuda preprocessor * fix compile err * fix windows compile error * rename reused to cached * address comment * remove debug code * add comment * add manager run * use cuda and cuda used * use cv cuda doc * address comment --------- Co-authored-by: Jason <jiangjiajun@baidu.com>
81 lines
2.2 KiB
C++
81 lines
2.2 KiB
C++
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
#include "fastdeploy/vision/common/processors/manager.h"
|
|
|
|
namespace fastdeploy {
|
|
namespace vision {
|
|
|
|
ProcessorManager::~ProcessorManager() {
|
|
#ifdef WITH_GPU
|
|
if (stream_) cudaStreamDestroy(stream_);
|
|
#endif
|
|
}
|
|
|
|
void ProcessorManager::UseCuda(bool enable_cv_cuda, int gpu_id) {
|
|
#ifdef WITH_GPU
|
|
if (gpu_id >= 0) {
|
|
device_id_ = gpu_id;
|
|
FDASSERT(cudaSetDevice(device_id_) == cudaSuccess,
|
|
"[ERROR] Error occurs while setting cuda device.");
|
|
}
|
|
FDASSERT(cudaStreamCreate(&stream_) == cudaSuccess,
|
|
"[ERROR] Error occurs while creating cuda stream.");
|
|
DefaultProcLib::default_lib = ProcLib::CUDA;
|
|
#else
|
|
FDASSERT(false, "FastDeploy didn't compile with WITH_GPU.");
|
|
#endif
|
|
|
|
if (enable_cv_cuda) {
|
|
#ifdef ENABLE_CVCUDA
|
|
DefaultProcLib::default_lib = ProcLib::CVCUDA;
|
|
#else
|
|
FDASSERT(false, "FastDeploy didn't compile with CV-CUDA.");
|
|
#endif
|
|
}
|
|
}
|
|
|
|
bool ProcessorManager::CudaUsed() {
|
|
return (DefaultProcLib::default_lib == ProcLib::CUDA ||
|
|
DefaultProcLib::default_lib == ProcLib::CVCUDA);
|
|
}
|
|
|
|
bool ProcessorManager::Run(std::vector<FDMat>* images,
|
|
std::vector<FDTensor>* outputs) {
|
|
if (!initialized_) {
|
|
FDERROR << "The preprocessor is not initialized." << std::endl;
|
|
return false;
|
|
}
|
|
if (images->size() == 0) {
|
|
FDERROR << "The size of input images should be greater than 0."
|
|
<< std::endl;
|
|
return false;
|
|
}
|
|
|
|
for (size_t i = 0; i < images->size(); ++i) {
|
|
if (CudaUsed()) {
|
|
SetStream(&((*images)[i]));
|
|
}
|
|
}
|
|
|
|
bool ret = Apply(images, outputs);
|
|
|
|
if (CudaUsed()) {
|
|
SyncStream();
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
} // namespace vision
|
|
} // namespace fastdeploy
|