From e93bf6e35c96d6f69490e7203d8824f345e4ac03 Mon Sep 17 00:00:00 2001 From: Jason Date: Tue, 8 Nov 2022 21:45:31 +0800 Subject: [PATCH] [Other] Add FDTensor function Pad (#532) * Add InferShape func for all the vision processors * fix infer shape of limit short * Fix infer shape bug of stride_pad * revert modify of processor * add function pad --- fastdeploy/function/pad.cc | 126 ++++++++++++++++++ fastdeploy/function/pad.h | 30 +++++ .../common/processors/resize_by_long.cc | 58 -------- .../vision/common/processors/resize_by_long.h | 46 ------- .../vision/common/processors/transform.h | 1 - tests/function/test_pad.cc | 92 +++++++++++++ 6 files changed, 248 insertions(+), 105 deletions(-) create mode 100644 fastdeploy/function/pad.cc create mode 100644 fastdeploy/function/pad.h delete mode 100644 fastdeploy/vision/common/processors/resize_by_long.cc delete mode 100644 fastdeploy/vision/common/processors/resize_by_long.h create mode 100644 tests/function/test_pad.cc diff --git a/fastdeploy/function/pad.cc b/fastdeploy/function/pad.cc new file mode 100644 index 000000000..42585781b --- /dev/null +++ b/fastdeploy/function/pad.cc @@ -0,0 +1,126 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/function/pad.h" + +#include + +#include "fastdeploy/function/eigen.h" +#include "fastdeploy/utils/utils.h" + +namespace fastdeploy { + +template +struct PadEigen { + using Array = std::array, Rank>; + using Array32Bit = std::array, Rank>; + using InType = Eigen::TensorMap< + Eigen::Tensor>; + using InType32BitIndex = + Eigen::TensorMap, + Eigen::Aligned>; + using OutType = Eigen::TensorMap< + Eigen::Tensor>; + using OutType32BitIndex = + Eigen::TensorMap, + Eigen::Aligned>; + + static void Eval(const Eigen::DefaultDevice& dev, + OutType out, + const InType& in, + const Array& padding, + const T value) { + out.device(dev) = in.pad(padding, value); + } + + static void Eval32(const Eigen::DefaultDevice& dev, + OutType32BitIndex out, + const InType32BitIndex& in, + const Array32Bit& padding, + const T value) { + out.device(dev) = in.pad(padding, value); + } +}; + +template +void PadFunction(const std::vector& pads, + const FDTensor& src, + T pad_value, + FDTensor* out) { + std::array, D> paddings; + + for (size_t i = 0; i < paddings.size(); ++i) { + paddings[i].first = pads[i * 2]; + paddings[i].second = pads[i * 2 + 1]; + } + + auto src_tensor = EigenTensor::From(src); + auto out_tensor = EigenTensor::From(*out); + + const auto& dev = *EigenDeviceWrapper::GetInstance()->GetDevice(); + PadEigen::Eval( + dev, out_tensor, src_tensor, paddings, pad_value); +} + + +template +void PaddingFunctor(int rank, const std::vector& pads, T pad_value, const FDTensor& src, FDTensor* out) { + switch (rank) { + case 1: + PadFunction(pads, src, pad_value, out); + break; + case 2: + PadFunction(pads, src, pad_value, out); + break; + case 3: + PadFunction(pads, src, pad_value, out); + break; + case 4: + PadFunction(pads, src, pad_value, out); + break; + case 5: + PadFunction(pads, src, pad_value, out); + break; + case 6: + PadFunction(pads, src, pad_value, out); + break; + default: + FDASSERT(false, "Pad only support tensors with no more than 6 dimensions currently."); + } +} + +template +void PadKernel(const FDTensor& x, + const std::vector& paddings, + const T& pad_value, + FDTensor* out) { + std::vector new_shape(x.shape.size()); + for (size_t i = 0; i < x.shape.size(); ++i) { + new_shape[i] = x.shape[i] + paddings[2 * i] + paddings[2 * i + 1]; + } + out->Allocate(new_shape, x.dtype); + PaddingFunctor(x.shape.size(), paddings, pad_value, x, out); +} + +void Pad(const FDTensor& x, FDTensor* out, const std::vector& pads, float value) { + FDASSERT(pads.size() == x.shape.size() * 2, "Size of pads:%zu must be 2 times of rank:%zu.", pads.size(), x.shape.size()); + FDTensor out_tmp; + FD_VISIT_ALL_TYPES(x.dtype, "PadKernel", + ([&] { PadKernel(x, pads, value, &out_tmp); })); + *out = std::move(out_tmp); +} + + + +} // namespace fastdeploy diff --git a/fastdeploy/function/pad.h b/fastdeploy/function/pad.h new file mode 100644 index 000000000..fef7e5c5c --- /dev/null +++ b/fastdeploy/function/pad.h @@ -0,0 +1,30 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "fastdeploy/core/fd_tensor.h" + +namespace fastdeploy { + +/** Excute the pad operation for input FDTensor along given dims. + @param x The input tensor. + @param out The output tensor which stores the result. + @param pads The size of padding for each dimension, for 3-D tensor, the pads should be [1d-left, 1d-right, 2d-left, 2d-right, 3d-left, 3d-right] + @param pad_value The value which will fill into out tensor +*/ +FASTDEPLOY_DECL void Pad(const FDTensor& x, FDTensor* out, + const std::vector& pads, float pad_value = 0); + +} // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/resize_by_long.cc b/fastdeploy/vision/common/processors/resize_by_long.cc deleted file mode 100644 index 20edca413..000000000 --- a/fastdeploy/vision/common/processors/resize_by_long.cc +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "fastdeploy/vision/common/processors/resize_by_long.h" - -namespace fastdeploy { -namespace vision { - -bool ResizeByLong::ImplByOpenCV(Mat* mat) { - cv::Mat* im = mat->GetOpenCVMat(); - int origin_w = im->cols; - int origin_h = im->rows; - double scale = GenerateScale(origin_w, origin_h); - if (use_scale_) { - cv::resize(*im, *im, cv::Size(), scale, scale, interp_); - } else { - int width = static_cast(round(scale * im->cols)); - int height = static_cast(round(scale * im->rows)); - cv::resize(*im, *im, cv::Size(width, height), 0, 0, interp_); - } - mat->SetWidth(im->cols); - mat->SetHeight(im->rows); - return true; -} - -double ResizeByLong::GenerateScale(const int origin_w, const int origin_h) { - int im_size_max = std::max(origin_w, origin_h); - int im_size_min = std::min(origin_w, origin_h); - double scale = 1.0f; - if (target_size_ == -1) { - if (im_size_max > max_size_) { - scale = static_cast(max_size_) / static_cast(im_size_max); - } - } else { - scale = - static_cast(target_size_) / static_cast(im_size_max); - } - return scale; -} - -bool ResizeByLong::Run(Mat* mat, int target_size, int interp, bool use_scale, - int max_size, ProcLib lib) { - auto r = ResizeByLong(target_size, interp, use_scale, max_size); - return r(mat, lib); -} -} // namespace vision -} // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/resize_by_long.h b/fastdeploy/vision/common/processors/resize_by_long.h deleted file mode 100644 index c288e07a4..000000000 --- a/fastdeploy/vision/common/processors/resize_by_long.h +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "fastdeploy/vision/common/processors/base.h" - -namespace fastdeploy { -namespace vision { - -class ResizeByLong : public Processor { - public: - ResizeByLong(int target_size, int interp = 1, bool use_scale = true, - int max_size = -1) { - target_size_ = target_size; - max_size_ = max_size; - interp_ = interp; - use_scale_ = use_scale; - } - bool ImplByOpenCV(Mat* mat); - std::string Name() { return "ResizeByLong"; } - - static bool Run(Mat* mat, int target_size, int interp = 1, - bool use_scale = true, int max_size = -1, - ProcLib lib = ProcLib::DEFAULT); - - private: - double GenerateScale(const int origin_w, const int origin_h); - int target_size_; - int max_size_; - int interp_; - bool use_scale_; -}; -} // namespace vision -} // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/transform.h b/fastdeploy/vision/common/processors/transform.h index 2a914fff7..7ba58167d 100644 --- a/fastdeploy/vision/common/processors/transform.h +++ b/fastdeploy/vision/common/processors/transform.h @@ -27,7 +27,6 @@ #include "fastdeploy/vision/common/processors/pad.h" #include "fastdeploy/vision/common/processors/pad_to_size.h" #include "fastdeploy/vision/common/processors/resize.h" -#include "fastdeploy/vision/common/processors/resize_by_long.h" #include "fastdeploy/vision/common/processors/resize_by_short.h" #include "fastdeploy/vision/common/processors/stride_pad.h" #include "fastdeploy/vision/common/processors/warp_affine.h" diff --git a/tests/function/test_pad.cc b/tests/function/test_pad.cc new file mode 100644 index 000000000..bec7b8252 --- /dev/null +++ b/tests/function/test_pad.cc @@ -0,0 +1,92 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "fastdeploy/core/fd_tensor.h" +#include "fastdeploy/function/pad.h" + +#include "glog/logging.h" +#include "gtest/gtest.h" +#include "gtest_utils.h" + +namespace fastdeploy { + +TEST(fastdeploy, pad_2d) { + FDTensor input, output; + CheckShape check_shape; + CheckData check_data; + CheckType check_type; + + std::vector inputs = {2, 4, 3, + 7, 1, 5}; + std::vector expected_result = {2.2, 2.2, 2.2, 2.2, 2.2, + 2.2, 2, 4, 3, 2.2, + 2.2, 7, 1, 5, 2.2, + 2.2, 2.2, 2.2, 2.2, 2.2}; + input.SetExternalData({2, 3}, FDDataType::FP32, inputs.data()); + + Pad(input, &output, {1, 1, 1, 1}, 2.2); + check_shape(output.shape, {4, 5}); + check_data(reinterpret_cast(output.Data()), + expected_result.data(), expected_result.size()); + check_type(input.dtype, output.dtype); +} + +TEST(fastdeploy, pad_2d_int32_t) { + FDTensor input, output; + CheckShape check_shape; + CheckData check_data; + CheckType check_type; + + std::vector inputs = {2, 4, 3, + 7, 1, 5}; + std::vector expected_result = {2, 2, 2, 2, 2, + 2, 2, 4, 3, 2, + 2, 7, 1, 5, 2, + 2, 2, 2, 2, 2}; + input.SetExternalData({2, 3}, FDDataType::INT32, inputs.data()); + + Pad(input, &output, {1, 1, 1, 1}, 2.2); + check_shape(output.shape, {4, 5}); + check_data(reinterpret_cast(output.Data()), + expected_result.data(), expected_result.size()); + check_type(input.dtype, output.dtype); +} + +//TEST(fastdeploy, transpose_5d) { +// FDTensor input, output; +// CheckShape check_shape; +// CheckData check_data; +// +// std::vector input_shape = {2, 1, 3, 1, 2}; +// auto total_size = std::accumulate(input_shape.begin(), input_shape.end(), 1, +// std::multiplies()); +// std::vector inputs(total_size, 1); +// std::iota(inputs.begin(), inputs.end(), 1); +// std::vector expected_result = {1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12}; +// input.SetExternalData(input_shape, FDDataType::INT32, inputs.data()); +// +// Transpose(input, &output, {0, 1, 4, 3, 2}); +// check_shape(output.shape, {2, 1, 2, 1, 3}); +// check_data(reinterpret_cast(output.Data()), +// expected_result.data(), expected_result.size()); +// +// Transpose(input, &input, {0, 1, 4, 3, 2}); +// check_shape(input.shape, {2, 1, 2, 1, 3}); +// check_data(reinterpret_cast(input.Data()), expected_result.data(), +// expected_result.size()); +//} + +} // namespace fastdeploy