mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[Other] Add FDTensor function Pad (#532)
* Add InferShape func for all the vision processors * fix infer shape of limit short * Fix infer shape bug of stride_pad * revert modify of processor * add function pad
This commit is contained in:
126
fastdeploy/function/pad.cc
Normal file
126
fastdeploy/function/pad.cc
Normal file
@@ -0,0 +1,126 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/function/pad.h"
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
#include "fastdeploy/function/eigen.h"
|
||||
#include "fastdeploy/utils/utils.h"
|
||||
|
||||
namespace fastdeploy {
|
||||
|
||||
template <typename T, int Rank>
|
||||
struct PadEigen {
|
||||
using Array = std::array<std::pair<int64_t, int64_t>, Rank>;
|
||||
using Array32Bit = std::array<std::pair<int, int>, Rank>;
|
||||
using InType = Eigen::TensorMap<
|
||||
Eigen::Tensor<const T, Rank, Eigen::RowMajor, Eigen::DenseIndex>>;
|
||||
using InType32BitIndex =
|
||||
Eigen::TensorMap<Eigen::Tensor<const T, Rank, Eigen::RowMajor, int>,
|
||||
Eigen::Aligned>;
|
||||
using OutType = Eigen::TensorMap<
|
||||
Eigen::Tensor<T, Rank, Eigen::RowMajor, Eigen::DenseIndex>>;
|
||||
using OutType32BitIndex =
|
||||
Eigen::TensorMap<Eigen::Tensor<T, Rank, Eigen::RowMajor, int>,
|
||||
Eigen::Aligned>;
|
||||
|
||||
static void Eval(const Eigen::DefaultDevice& dev,
|
||||
OutType out,
|
||||
const InType& in,
|
||||
const Array& padding,
|
||||
const T value) {
|
||||
out.device(dev) = in.pad(padding, value);
|
||||
}
|
||||
|
||||
static void Eval32(const Eigen::DefaultDevice& dev,
|
||||
OutType32BitIndex out,
|
||||
const InType32BitIndex& in,
|
||||
const Array32Bit& padding,
|
||||
const T value) {
|
||||
out.device(dev) = in.pad(padding, value);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, size_t D>
|
||||
void PadFunction(const std::vector<int>& pads,
|
||||
const FDTensor& src,
|
||||
T pad_value,
|
||||
FDTensor* out) {
|
||||
std::array<std::pair<int64_t, int64_t>, D> paddings;
|
||||
|
||||
for (size_t i = 0; i < paddings.size(); ++i) {
|
||||
paddings[i].first = pads[i * 2];
|
||||
paddings[i].second = pads[i * 2 + 1];
|
||||
}
|
||||
|
||||
auto src_tensor = EigenTensor<T, D>::From(src);
|
||||
auto out_tensor = EigenTensor<T, D>::From(*out);
|
||||
|
||||
const auto& dev = *EigenDeviceWrapper::GetInstance()->GetDevice();
|
||||
PadEigen<T, D>::Eval(
|
||||
dev, out_tensor, src_tensor, paddings, pad_value);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
void PaddingFunctor(int rank, const std::vector<int>& pads, T pad_value, const FDTensor& src, FDTensor* out) {
|
||||
switch (rank) {
|
||||
case 1:
|
||||
PadFunction<T, 1>(pads, src, pad_value, out);
|
||||
break;
|
||||
case 2:
|
||||
PadFunction<T, 2>(pads, src, pad_value, out);
|
||||
break;
|
||||
case 3:
|
||||
PadFunction<T, 3>(pads, src, pad_value, out);
|
||||
break;
|
||||
case 4:
|
||||
PadFunction<T, 4>(pads, src, pad_value, out);
|
||||
break;
|
||||
case 5:
|
||||
PadFunction<T, 5>(pads, src, pad_value, out);
|
||||
break;
|
||||
case 6:
|
||||
PadFunction<T, 6>(pads, src, pad_value, out);
|
||||
break;
|
||||
default:
|
||||
FDASSERT(false, "Pad only support tensors with no more than 6 dimensions currently.");
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void PadKernel(const FDTensor& x,
|
||||
const std::vector<int>& paddings,
|
||||
const T& pad_value,
|
||||
FDTensor* out) {
|
||||
std::vector<int64_t> new_shape(x.shape.size());
|
||||
for (size_t i = 0; i < x.shape.size(); ++i) {
|
||||
new_shape[i] = x.shape[i] + paddings[2 * i] + paddings[2 * i + 1];
|
||||
}
|
||||
out->Allocate(new_shape, x.dtype);
|
||||
PaddingFunctor<T>(x.shape.size(), paddings, pad_value, x, out);
|
||||
}
|
||||
|
||||
void Pad(const FDTensor& x, FDTensor* out, const std::vector<int>& pads, float value) {
|
||||
FDASSERT(pads.size() == x.shape.size() * 2, "Size of pads:%zu must be 2 times of rank:%zu.", pads.size(), x.shape.size());
|
||||
FDTensor out_tmp;
|
||||
FD_VISIT_ALL_TYPES(x.dtype, "PadKernel",
|
||||
([&] { PadKernel<data_t>(x, pads, value, &out_tmp); }));
|
||||
*out = std::move(out_tmp);
|
||||
}
|
||||
|
||||
|
||||
|
||||
} // namespace fastdeploy
|
30
fastdeploy/function/pad.h
Normal file
30
fastdeploy/function/pad.h
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "fastdeploy/core/fd_tensor.h"
|
||||
|
||||
namespace fastdeploy {
|
||||
|
||||
/** Excute the pad operation for input FDTensor along given dims.
|
||||
@param x The input tensor.
|
||||
@param out The output tensor which stores the result.
|
||||
@param pads The size of padding for each dimension, for 3-D tensor, the pads should be [1d-left, 1d-right, 2d-left, 2d-right, 3d-left, 3d-right]
|
||||
@param pad_value The value which will fill into out tensor
|
||||
*/
|
||||
FASTDEPLOY_DECL void Pad(const FDTensor& x, FDTensor* out,
|
||||
const std::vector<int>& pads, float pad_value = 0);
|
||||
|
||||
} // namespace fastdeploy
|
@@ -1,58 +0,0 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/vision/common/processors/resize_by_long.h"
|
||||
|
||||
namespace fastdeploy {
|
||||
namespace vision {
|
||||
|
||||
bool ResizeByLong::ImplByOpenCV(Mat* mat) {
|
||||
cv::Mat* im = mat->GetOpenCVMat();
|
||||
int origin_w = im->cols;
|
||||
int origin_h = im->rows;
|
||||
double scale = GenerateScale(origin_w, origin_h);
|
||||
if (use_scale_) {
|
||||
cv::resize(*im, *im, cv::Size(), scale, scale, interp_);
|
||||
} else {
|
||||
int width = static_cast<int>(round(scale * im->cols));
|
||||
int height = static_cast<int>(round(scale * im->rows));
|
||||
cv::resize(*im, *im, cv::Size(width, height), 0, 0, interp_);
|
||||
}
|
||||
mat->SetWidth(im->cols);
|
||||
mat->SetHeight(im->rows);
|
||||
return true;
|
||||
}
|
||||
|
||||
double ResizeByLong::GenerateScale(const int origin_w, const int origin_h) {
|
||||
int im_size_max = std::max(origin_w, origin_h);
|
||||
int im_size_min = std::min(origin_w, origin_h);
|
||||
double scale = 1.0f;
|
||||
if (target_size_ == -1) {
|
||||
if (im_size_max > max_size_) {
|
||||
scale = static_cast<double>(max_size_) / static_cast<double>(im_size_max);
|
||||
}
|
||||
} else {
|
||||
scale =
|
||||
static_cast<double>(target_size_) / static_cast<double>(im_size_max);
|
||||
}
|
||||
return scale;
|
||||
}
|
||||
|
||||
bool ResizeByLong::Run(Mat* mat, int target_size, int interp, bool use_scale,
|
||||
int max_size, ProcLib lib) {
|
||||
auto r = ResizeByLong(target_size, interp, use_scale, max_size);
|
||||
return r(mat, lib);
|
||||
}
|
||||
} // namespace vision
|
||||
} // namespace fastdeploy
|
@@ -1,46 +0,0 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "fastdeploy/vision/common/processors/base.h"
|
||||
|
||||
namespace fastdeploy {
|
||||
namespace vision {
|
||||
|
||||
class ResizeByLong : public Processor {
|
||||
public:
|
||||
ResizeByLong(int target_size, int interp = 1, bool use_scale = true,
|
||||
int max_size = -1) {
|
||||
target_size_ = target_size;
|
||||
max_size_ = max_size;
|
||||
interp_ = interp;
|
||||
use_scale_ = use_scale;
|
||||
}
|
||||
bool ImplByOpenCV(Mat* mat);
|
||||
std::string Name() { return "ResizeByLong"; }
|
||||
|
||||
static bool Run(Mat* mat, int target_size, int interp = 1,
|
||||
bool use_scale = true, int max_size = -1,
|
||||
ProcLib lib = ProcLib::DEFAULT);
|
||||
|
||||
private:
|
||||
double GenerateScale(const int origin_w, const int origin_h);
|
||||
int target_size_;
|
||||
int max_size_;
|
||||
int interp_;
|
||||
bool use_scale_;
|
||||
};
|
||||
} // namespace vision
|
||||
} // namespace fastdeploy
|
@@ -27,7 +27,6 @@
|
||||
#include "fastdeploy/vision/common/processors/pad.h"
|
||||
#include "fastdeploy/vision/common/processors/pad_to_size.h"
|
||||
#include "fastdeploy/vision/common/processors/resize.h"
|
||||
#include "fastdeploy/vision/common/processors/resize_by_long.h"
|
||||
#include "fastdeploy/vision/common/processors/resize_by_short.h"
|
||||
#include "fastdeploy/vision/common/processors/stride_pad.h"
|
||||
#include "fastdeploy/vision/common/processors/warp_affine.h"
|
||||
|
92
tests/function/test_pad.cc
Normal file
92
tests/function/test_pad.cc
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
#include "fastdeploy/core/fd_tensor.h"
|
||||
#include "fastdeploy/function/pad.h"
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "gtest/gtest.h"
|
||||
#include "gtest_utils.h"
|
||||
|
||||
namespace fastdeploy {
|
||||
|
||||
TEST(fastdeploy, pad_2d) {
|
||||
FDTensor input, output;
|
||||
CheckShape check_shape;
|
||||
CheckData check_data;
|
||||
CheckType check_type;
|
||||
|
||||
std::vector<float> inputs = {2, 4, 3,
|
||||
7, 1, 5};
|
||||
std::vector<float> expected_result = {2.2, 2.2, 2.2, 2.2, 2.2,
|
||||
2.2, 2, 4, 3, 2.2,
|
||||
2.2, 7, 1, 5, 2.2,
|
||||
2.2, 2.2, 2.2, 2.2, 2.2};
|
||||
input.SetExternalData({2, 3}, FDDataType::FP32, inputs.data());
|
||||
|
||||
Pad(input, &output, {1, 1, 1, 1}, 2.2);
|
||||
check_shape(output.shape, {4, 5});
|
||||
check_data(reinterpret_cast<const float*>(output.Data()),
|
||||
expected_result.data(), expected_result.size());
|
||||
check_type(input.dtype, output.dtype);
|
||||
}
|
||||
|
||||
TEST(fastdeploy, pad_2d_int32_t) {
|
||||
FDTensor input, output;
|
||||
CheckShape check_shape;
|
||||
CheckData check_data;
|
||||
CheckType check_type;
|
||||
|
||||
std::vector<int32_t> inputs = {2, 4, 3,
|
||||
7, 1, 5};
|
||||
std::vector<int32_t> expected_result = {2, 2, 2, 2, 2,
|
||||
2, 2, 4, 3, 2,
|
||||
2, 7, 1, 5, 2,
|
||||
2, 2, 2, 2, 2};
|
||||
input.SetExternalData({2, 3}, FDDataType::INT32, inputs.data());
|
||||
|
||||
Pad(input, &output, {1, 1, 1, 1}, 2.2);
|
||||
check_shape(output.shape, {4, 5});
|
||||
check_data(reinterpret_cast<const int32_t*>(output.Data()),
|
||||
expected_result.data(), expected_result.size());
|
||||
check_type(input.dtype, output.dtype);
|
||||
}
|
||||
|
||||
//TEST(fastdeploy, transpose_5d) {
|
||||
// FDTensor input, output;
|
||||
// CheckShape check_shape;
|
||||
// CheckData check_data;
|
||||
//
|
||||
// std::vector<int64_t> input_shape = {2, 1, 3, 1, 2};
|
||||
// auto total_size = std::accumulate(input_shape.begin(), input_shape.end(), 1,
|
||||
// std::multiplies<int64_t>());
|
||||
// std::vector<int> inputs(total_size, 1);
|
||||
// std::iota(inputs.begin(), inputs.end(), 1);
|
||||
// std::vector<int> expected_result = {1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12};
|
||||
// input.SetExternalData(input_shape, FDDataType::INT32, inputs.data());
|
||||
//
|
||||
// Transpose(input, &output, {0, 1, 4, 3, 2});
|
||||
// check_shape(output.shape, {2, 1, 2, 1, 3});
|
||||
// check_data(reinterpret_cast<const int*>(output.Data()),
|
||||
// expected_result.data(), expected_result.size());
|
||||
//
|
||||
// Transpose(input, &input, {0, 1, 4, 3, 2});
|
||||
// check_shape(input.shape, {2, 1, 2, 1, 3});
|
||||
// check_data(reinterpret_cast<const int*>(input.Data()), expected_result.data(),
|
||||
// expected_result.size());
|
||||
//}
|
||||
|
||||
} // namespace fastdeploy
|
Reference in New Issue
Block a user