diff --git a/fastdeploy/function/clip.cc b/fastdeploy/function/clip.cc new file mode 100644 index 000000000..bede9e56a --- /dev/null +++ b/fastdeploy/function/clip.cc @@ -0,0 +1,58 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/function/clip.h" +#include + +namespace fastdeploy { +namespace function { + +template class ClipFunctor { + public: + explicit ClipFunctor(const T min, const T max) : min_(min), max_(max) {} + T operator()(const T x) const { + return x < min_ ? min_ : x > max_ ? max_ : x; + } + + private: + T min_; + T max_; +}; + +template +void ClipKernel(const FDTensor& x, double min, double max, FDTensor* out) { + T max_ = static_cast(max); + T min_ = static_cast(min); + + FDASSERT(min_ < max_, + "max should be greater than or equal to min. But received min = %f, " + "max = %f", + static_cast(min_), static_cast(max_)); + + out->Allocate(x.Shape(), x.Dtype()); + const T* x_data = reinterpret_cast(x.Data()); + + int64_t numel = x.Numel(); + T* out_data = reinterpret_cast(out->Data()); + + std::transform(x_data, x_data + numel, out_data, ClipFunctor(min_, max_)); +} + +void Clip(const FDTensor& x, double min, double max, FDTensor* out) { + FD_VISIT_INT_FLOAT_TYPES(x.dtype, "ClipKernel", + ([&] { ClipKernel(x, min, max, out); })); +} + +} // namespace function +} // namespace fastdeploy \ No newline at end of file diff --git a/fastdeploy/function/clip.h b/fastdeploy/function/clip.h new file mode 100644 index 000000000..fce6aa67e --- /dev/null +++ b/fastdeploy/function/clip.h @@ -0,0 +1,32 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "fastdeploy/core/fd_tensor.h" + +namespace fastdeploy { +namespace function { + +/** This operator clip all elements in input into the range [ min, max ]. Support float32, float64, int32, int64 + @param x The input tensor. + @param min The lower bound + @param max The uppper bound + @param out The output tensor which stores the result. +*/ +FASTDEPLOY_DECL void Clip(const FDTensor& x, double min, double max, + FDTensor* out); + +} // namespace function +} // namespace fastdeploy diff --git a/tests/function/test_clip.cc b/tests/function/test_clip.cc new file mode 100644 index 000000000..77dc46cf2 --- /dev/null +++ b/tests/function/test_clip.cc @@ -0,0 +1,54 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/core/fd_tensor.h" +#include "fastdeploy/function/clip.h" +#include "glog/logging.h" +#include "gtest_utils.h" +#include "gtest/gtest.h" +#include +#include + +namespace fastdeploy { +namespace function { + +std::vector CreateTestData() { + // Shape: [2, 3, 4] + std::vector x_data = { + 0.8428625, 0.6461913, 0.13740455, 0.11430702, 0.659926, 0.535816, + 0.7429162, 0.8456049, 0.21228176, 0.29970083, 0.8621713, 0.40894133, + 0.12684688, 0.1566195, 0.42884097, 0.8476526, 0.2458633, 0.669046, + 0.87888306, 0.6762589, 0.666453, 0.32523027, 0.4139388, 0.8341406}; + return x_data; +} + +TEST(fastdeploy, clip) { + CheckShape check_shape; + CheckData check_data; + FDTensor x, y; + auto test_data = CreateTestData(); + x.SetExternalData({2, 3, 4}, FDDataType::FP32, test_data.data()); + + Clip(x, /* min = */ 0.2, /* max = */ 0.8, &y); + std::vector result = { + 0.8, 0.646191, 0.2, 0.2, 0.659926, 0.535816, 0.742916, 0.8, + 0.212282, 0.299701, 0.8, 0.408941, 0.2, 0.2, 0.428841, 0.8, + 0.245863, 0.669046, 0.8, 0.676259, 0.666453, 0.32523, 0.413939, 0.8}; + check_shape(y.shape, {2, 3, 4}); + check_data(reinterpret_cast(y.Data()), result.data(), + result.size()); +} + +} // namespace function +} // namespace fastdeploy