mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-16 13:41:30 +08:00
[CVCUDA] Update CV-CUDA to v0.2.1, add vision processor C++ tutorial (#1678)
* update cvcuda 0.2.0 -> 0.2.1 * add cpp tutorials demo * fix reviewed problem
This commit is contained in:
@@ -96,7 +96,7 @@ bool Cast::ImplByCvCuda(FDMat* mat) {
|
||||
auto dst_tensor =
|
||||
CreateCvCudaTensorWrapData(*(mat->output_cache), mat->layout);
|
||||
|
||||
cvcuda_convert_op_(mat->Stream(), src_tensor, dst_tensor, 1.0f, 0.0f);
|
||||
cvcuda_convert_op_(mat->Stream(), *src_tensor, *dst_tensor, 1.0f, 0.0f);
|
||||
|
||||
mat->SetTensor(mat->output_cache);
|
||||
mat->mat_type = ProcLib::CVCUDA;
|
||||
|
@@ -70,7 +70,7 @@ bool CenterCrop::ImplByCvCuda(FDMat* mat) {
|
||||
int offset_x = static_cast<int>((mat->Width() - width_) / 2);
|
||||
int offset_y = static_cast<int>((mat->Height() - height_) / 2);
|
||||
NVCVRectI crop_roi = {offset_x, offset_y, width_, height_};
|
||||
cvcuda_crop_op_(mat->Stream(), src_tensor, dst_tensor, crop_roi);
|
||||
cvcuda_crop_op_(mat->Stream(), *src_tensor, *dst_tensor, crop_roi);
|
||||
|
||||
mat->SetTensor(mat->output_cache);
|
||||
mat->SetWidth(width_);
|
||||
|
@@ -43,8 +43,8 @@ nvcv::ImageFormat CreateCvCudaImageFormat(FDDataType type, int channel,
|
||||
return nvcv::FMT_BGRf32;
|
||||
}
|
||||
|
||||
nvcv::TensorWrapData CreateCvCudaTensorWrapData(const FDTensor& tensor,
|
||||
Layout layout) {
|
||||
std::shared_ptr<nvcv::TensorWrapData> CreateCvCudaTensorWrapData(
|
||||
const FDTensor& tensor, Layout layout) {
|
||||
FDASSERT(tensor.shape.size() == 3,
|
||||
"When create CVCUDA tensor from FD tensor,"
|
||||
"tensor shape should be 3-Dim,");
|
||||
@@ -76,7 +76,7 @@ nvcv::TensorWrapData CreateCvCudaTensorWrapData(const FDTensor& tensor,
|
||||
nvcv::TensorDataStridedCuda tensor_data(
|
||||
nvcv::TensorShape{req.shape, req.rank, req.layout},
|
||||
nvcv::DataType{req.dtype}, buf);
|
||||
return nvcv::TensorWrapData(tensor_data);
|
||||
return std::make_shared<nvcv::TensorWrapData>(tensor_data, nullptr);
|
||||
}
|
||||
|
||||
void* GetCvCudaTensorDataPtr(const nvcv::TensorWrapData& tensor) {
|
||||
|
@@ -27,7 +27,7 @@ namespace vision {
|
||||
|
||||
nvcv::ImageFormat CreateCvCudaImageFormat(FDDataType type, int channel,
|
||||
bool interleaved = true);
|
||||
nvcv::TensorWrapData CreateCvCudaTensorWrapData(const FDTensor& tensor,
|
||||
std::shared_ptr<nvcv::TensorWrapData> CreateCvCudaTensorWrapData(const FDTensor& tensor,
|
||||
Layout layout = Layout::HWC);
|
||||
void* GetCvCudaTensorDataPtr(const nvcv::TensorWrapData& tensor);
|
||||
nvcv::ImageWrapData CreateImageWrapData(const FDTensor& tensor);
|
||||
|
@@ -75,7 +75,7 @@ bool HWC2CHW::ImplByCvCuda(FDMat* mat) {
|
||||
auto dst_tensor =
|
||||
CreateCvCudaTensorWrapData(*(mat->output_cache), Layout::CHW);
|
||||
|
||||
cvcuda_reformat_op_(mat->Stream(), src_tensor, dst_tensor);
|
||||
cvcuda_reformat_op_(mat->Stream(), *src_tensor, *dst_tensor);
|
||||
|
||||
mat->layout = Layout::CHW;
|
||||
mat->SetTensor(mat->output_cache);
|
||||
|
@@ -133,7 +133,7 @@ bool Pad::ImplByCvCuda(FDMat* mat) {
|
||||
"output_cache", Device::GPU);
|
||||
auto dst_tensor = CreateCvCudaTensorWrapData(*(mat->output_cache));
|
||||
|
||||
cvcuda_pad_op_(mat->Stream(), src_tensor, dst_tensor, top_, left_,
|
||||
cvcuda_pad_op_(mat->Stream(), *src_tensor, *dst_tensor, top_, left_,
|
||||
NVCV_BORDER_CONSTANT, value);
|
||||
|
||||
mat->SetTensor(mat->output_cache);
|
||||
|
@@ -206,7 +206,7 @@ static bool PadHWCByCvCuda(cvcuda::CopyMakeBorder& pad_op, FDMat* mat,
|
||||
"output_cache", Device::GPU);
|
||||
auto dst_tensor = CreateCvCudaTensorWrapData(*(mat->output_cache));
|
||||
|
||||
pad_op(mat->Stream(), src_tensor, dst_tensor, 0, 0, NVCV_BORDER_CONSTANT,
|
||||
pad_op(mat->Stream(), *src_tensor, *dst_tensor, 0, 0, NVCV_BORDER_CONSTANT,
|
||||
border_value);
|
||||
|
||||
mat->SetTensor(mat->output_cache);
|
||||
@@ -238,8 +238,8 @@ static bool PadCHWByCvCuda(cvcuda::CopyMakeBorder& pad_op, FDMat* mat,
|
||||
input->device, input->device_id);
|
||||
auto dst_tensor = CreateCvCudaTensorWrapData(dst);
|
||||
|
||||
pad_op(mat->Stream(), src_tensor, dst_tensor, 0, 0, NVCV_BORDER_CONSTANT,
|
||||
border_value);
|
||||
pad_op(mat->Stream(), (*src_tensor), (*dst_tensor), 0, 0,
|
||||
NVCV_BORDER_CONSTANT, border_value);
|
||||
}
|
||||
mat->SetTensor(mat->output_cache);
|
||||
mat->mat_type = ProcLib::CVCUDA;
|
||||
|
@@ -146,7 +146,7 @@ bool Resize::ImplByCvCuda(FDMat* mat) {
|
||||
auto dst_tensor = CreateCvCudaTensorWrapData(*(mat->output_cache));
|
||||
|
||||
// CV-CUDA Interp value is compatible with OpenCV
|
||||
cvcuda_resize_op_(mat->Stream(), src_tensor, dst_tensor,
|
||||
cvcuda_resize_op_(mat->Stream(), *src_tensor, *dst_tensor,
|
||||
CreateCvCudaInterp(interp_));
|
||||
|
||||
mat->SetTensor(mat->output_cache);
|
||||
|
@@ -95,7 +95,7 @@ bool ResizeByShort::ImplByCvCuda(FDMat* mat) {
|
||||
"output_cache", Device::GPU);
|
||||
auto dst_tensor = CreateCvCudaTensorWrapData(*(mat->output_cache));
|
||||
|
||||
cvcuda_resize_op_(mat->Stream(), src_tensor, dst_tensor,
|
||||
cvcuda_resize_op_(mat->Stream(), *src_tensor, *dst_tensor,
|
||||
CreateCvCudaInterp(interp_));
|
||||
|
||||
mat->SetTensor(mat->output_cache);
|
||||
|
@@ -167,7 +167,7 @@ bool StridePad::ImplByCvCuda(FDMat* mat) {
|
||||
"output_cache", Device::GPU);
|
||||
auto dst_tensor = CreateCvCudaTensorWrapData(*(mat->output_cache));
|
||||
|
||||
cvcuda_pad_op_(mat->Stream(), src_tensor, dst_tensor, 0, 0,
|
||||
cvcuda_pad_op_(mat->Stream(), *src_tensor, *dst_tensor, 0, 0,
|
||||
NVCV_BORDER_CONSTANT, value);
|
||||
|
||||
mat->SetTensor(mat->output_cache);
|
||||
|
Reference in New Issue
Block a user