[Other] Add namespace for functions (#538)

Add namespace for functions
This commit is contained in:
Jason
2022-11-09 13:57:53 +08:00
committed by GitHub
parent 4706a7c32a
commit f2fed7959b
27 changed files with 63 additions and 61 deletions

View File

@@ -187,11 +187,11 @@ struct ErnieForSequenceClassificationPredictor {
std::vector<SeqClsResult>* seq_cls_results) {
const auto& logits = outputs[0];
fastdeploy::FDTensor probs;
fastdeploy::Softmax(logits, &probs);
fastdeploy::function::Softmax(logits, &probs);
fastdeploy::FDTensor labels, confidences;
fastdeploy::Max(probs, &confidences, {-1});
fastdeploy::ArgMax(probs, &labels, -1);
fastdeploy::function::Max(probs, &confidences, {-1});
fastdeploy::function::ArgMax(probs, &labels, -1);
if (labels.Numel() != confidences.Numel()) {
return false;
}

View File

@@ -318,7 +318,7 @@ bool TrtBackend::Infer(std::vector<FDTensor>& inputs,
casted_output_tensors_[(*outputs)[i].name].Resize((*outputs)[i].shape, (*outputs)[i].dtype,
(*outputs)[i].name, Device::GPU);
CudaCast(output_tensor, &casted_output_tensors_[(*outputs)[i].name], stream_);
function::CudaCast(output_tensor, &casted_output_tensors_[(*outputs)[i].name], stream_);
} else {
casted_output_tensors_[(*outputs)[i].name].SetExternalData(
(*outputs)[i].shape, model_output_dtype,
@@ -392,7 +392,7 @@ void TrtBackend::SetInputs(const std::vector<FDTensor>& inputs) {
input_tensor.SetExternalData(item.shape, FDDataType::INT32,
inputs_device_buffer_[item.name].data(),
Device::GPU);
CudaCast(item, &input_tensor, stream_);
function::CudaCast(item, &input_tensor, stream_);
} else {
// no copy
inputs_device_buffer_[item.name].SetExternalData(dims, item.Data());

View File

@@ -21,7 +21,7 @@
#include "fastdeploy/utils/utils.h"
namespace fastdeploy {
namespace function {
std::string Str(const std::vector<int64_t>& shape) {
std::ostringstream oss;
oss << "[ " << shape[0];
@@ -121,4 +121,5 @@ void Concat(const std::vector<FDTensor>& x, FDTensor* out, int axis) {
*out = std::move(out_temp);
}
} // namespace function
} // namespace fastdeploy

View File

@@ -17,6 +17,7 @@
#include "fastdeploy/core/fd_tensor.h"
namespace fastdeploy {
namespace function {
/** Excute the concatenate operation for input FDTensor along given axis.
@param x The input tensor.
@@ -27,4 +28,5 @@ namespace fastdeploy {
FASTDEPLOY_DECL void Concat(const std::vector<FDTensor>& x, FDTensor* out,
int axis = 0);
} // namespace function
} // namespace fastdeploy

View File

@@ -15,7 +15,7 @@
#include "fastdeploy/function/cuda_cast.h"
namespace fastdeploy {
namespace function {
template <typename T_IN, typename T_OUT>
__global__ void CudaCastKernel(const T_IN* in, T_OUT* out, int edge) {
int position = blockDim.x * blockIdx.x + threadIdx.x;
@@ -42,4 +42,5 @@ void CudaCast(const FDTensor& in, FDTensor* out, cudaStream_t stream) {
}
}
} // namespace function
} // namespace fastdeploy

View File

@@ -17,7 +17,7 @@
#include "fastdeploy/core/fd_tensor.h"
namespace fastdeploy {
namespace function {
/** Cast the type of the data in GPU buffer.
@param in The input tensor.
@param out The output tensor
@@ -25,5 +25,5 @@ namespace fastdeploy {
*/
FASTDEPLOY_DECL void CudaCast(const FDTensor& in, FDTensor* out,
cudaStream_t stream);
} // namespace function
} // namespace fastdeploy

View File

@@ -15,7 +15,7 @@
#include "fastdeploy/function/eigen.h"
namespace fastdeploy {
namespace function {
std::shared_ptr<EigenDeviceWrapper> EigenDeviceWrapper::instance_ = nullptr;
std::shared_ptr<EigenDeviceWrapper> EigenDeviceWrapper::GetInstance() {
@@ -29,4 +29,5 @@ const Eigen::DefaultDevice* EigenDeviceWrapper::GetDevice() const {
return &device_;
}
}
} // namespace fastdeploy

View File

@@ -22,6 +22,7 @@
#include "unsupported/Eigen/CXX11/Tensor"
namespace fastdeploy {
namespace function {
// EigenDim converts shape into Eigen::DSizes.
template <int D>
struct EigenDim {
@@ -135,4 +136,5 @@ class EigenDeviceWrapper {
static std::shared_ptr<EigenDeviceWrapper> instance_;
};
} // namespace function
} // namespace fastdeploy

View File

@@ -20,7 +20,7 @@
#include "fastdeploy/utils/utils.h"
namespace fastdeploy {
namespace function {
template <typename T, int Rank>
struct PadEigen {
using Array = std::array<std::pair<int64_t, int64_t>, Rank>;
@@ -123,4 +123,5 @@ void Pad(const FDTensor& x, FDTensor* out, const std::vector<int>& pads, float v
} // namespace function
} // namespace fastdeploy

View File

@@ -17,7 +17,7 @@
#include "fastdeploy/core/fd_tensor.h"
namespace fastdeploy {
namespace function {
/** Excute the pad operation for input FDTensor along given dims.
@param x The input tensor.
@param out The output tensor which stores the result.
@@ -27,4 +27,5 @@ namespace fastdeploy {
FASTDEPLOY_DECL void Pad(const FDTensor& x, FDTensor* out,
const std::vector<int>& pads, float pad_value = 0);
}
} // namespace fastdeploy

View File

@@ -23,7 +23,7 @@
#include "fastdeploy/utils/utils.h"
namespace fastdeploy {
namespace function {
template <typename T, size_t D, size_t R_D, typename Functor>
void ReduceFunctor(const FDTensor& input, FDTensor* output,
const std::vector<int64_t>& dims, bool keep_dim) {
@@ -402,4 +402,5 @@ void ArgMin(const FDTensor& x, FDTensor* out, int64_t axis,
}));
}
} // namespace function
} // namespace fastdeploy

View File

@@ -17,7 +17,7 @@
#include "fastdeploy/core/fd_tensor.h"
namespace fastdeploy {
namespace function {
/** Excute the maximum operation for input FDTensor along given dims.
@param x The input tensor.
@param out The output tensor which stores the result.
@@ -123,4 +123,5 @@ FASTDEPLOY_DECL void ArgMin(const FDTensor& x, FDTensor* out, int64_t axis,
FDDataType output_dtype = FDDataType::INT64,
bool keep_dim = false, bool flatten = false);
} // namespace function
} // namespace fastdeploy

View File

@@ -16,7 +16,7 @@
#include "fastdeploy/function/eigen.h"
namespace fastdeploy {
namespace function {
//////// Max Functor ///////
struct MaxFunctor {
template <typename X, typename Y, typename Dim>
@@ -73,4 +73,5 @@ struct ProdFunctor {
}
};
} // namespace function
} // namespace fastdeploy

View File

@@ -21,6 +21,7 @@
#include "fastdeploy/utils/utils.h"
namespace fastdeploy {
namespace function {
template <typename T>
struct ValueClip {
T operator()(const T& x) const {
@@ -124,4 +125,5 @@ void Softmax(const FDTensor& x, FDTensor* out, int axis) {
([&] { SoftmaxKernel<data_t>(x, &out_tmp, axis); }));
*out = std::move(out_tmp);
}
} // namespace function
} // namespace fastdeploy

View File

@@ -17,7 +17,7 @@
#include "fastdeploy/core/fd_tensor.h"
namespace fastdeploy {
namespace function {
/** Excute the softmax operation for input FDTensor along given dims.
@param x The input tensor.
@param out The output tensor which stores the result.
@@ -25,4 +25,5 @@ namespace fastdeploy {
*/
FASTDEPLOY_DECL void Softmax(const FDTensor& x, FDTensor* out, int axis = -1);
} // namespace function
} // namespace fastdeploy

View File

@@ -17,7 +17,7 @@
#include "fastdeploy/utils/utils.h"
namespace fastdeploy {
namespace function {
template <typename T>
struct TransposeNormalKernel {
void operator()(const FDTensor& in, FDTensor* out,
@@ -121,4 +121,5 @@ void Transpose(const FDTensor& x, FDTensor* out,
*out = std::move(out_temp);
}
} // namespace function
} // namespace fastdeploy

View File

@@ -17,7 +17,7 @@
#include "fastdeploy/core/fd_tensor.h"
namespace fastdeploy {
namespace function {
/** Excute the transpose operation for input FDTensor along given dims.
@param x The input tensor.
@param out The output tensor which stores the result.
@@ -25,4 +25,5 @@ namespace fastdeploy {
*/
FASTDEPLOY_DECL void Transpose(const FDTensor& x, FDTensor* out,
const std::vector<int64_t>& dims);
} // namespace function
} // namespace fastdeploy

View File

@@ -87,7 +87,7 @@ bool ResNet::Postprocess(FDTensor& infer_result,
// 1. Softmax 2. Choose topk labels 3. Put the result into ClassifyResult variable.
int num_classes = infer_result.shape[1];
Softmax(infer_result, &infer_result);
function::Softmax(infer_result, &infer_result);
const float* infer_result_buffer = reinterpret_cast<float*>(infer_result.Data());
topk = std::min(num_classes, topk);
result->label_ids =

View File

@@ -75,7 +75,7 @@ bool YOLOv5Cls::Postprocess(const FDTensor& infer_result,
ClassifyResult* result, int topk) {
// Softmax
FDTensor infer_result_softmax;
Softmax(infer_result, &infer_result_softmax, 1);
function::Softmax(infer_result, &infer_result_softmax, 1);
int num_classes = infer_result_softmax.shape[1];
const float* infer_result_buffer =
reinterpret_cast<const float*>(infer_result_softmax.Data());

View File

@@ -99,7 +99,11 @@ bool PaddleClasPreprocessor::Run(std::vector<FDMat>* images, std::vector<FDTenso
(*images)[i].ShareWithTensor(&(tensors[i]));
tensors[i].ExpandDim(0);
}
Concat(tensors, &((*outputs)[0]), 0);
if (tensors.size() == 1) {
(*outputs)[0] = std::move(tensors[0]);
} else {
function::Concat(tensors, &((*outputs)[0]), 0);
}
return true;
}

View File

@@ -170,7 +170,7 @@ bool PPMatting::Postprocess(
return false;
}
std::vector<int64_t> dim{0, 2, 3, 1};
Transpose(alpha_tensor, &alpha_tensor, dim);
function::Transpose(alpha_tensor, &alpha_tensor, dim);
alpha_tensor.Squeeze(0);
Mat mat = Mat::Create(alpha_tensor);

View File

@@ -220,7 +220,7 @@ bool PaddleSegModel::Postprocess(
}
if (!is_with_softmax && apply_softmax) {
Softmax(*infer_result, infer_result, 1);
function::Softmax(*infer_result, infer_result, 1);
}
if (!is_with_argmax) {
@@ -228,7 +228,7 @@ bool PaddleSegModel::Postprocess(
result->contain_score_map = true;
std::vector<int64_t> dim{0, 2, 3, 1};
Transpose(*infer_result, infer_result, dim);
function::Transpose(*infer_result, infer_result, dim);
}
// batch always 1, so ignore
infer_result->shape = {infer_height, infer_width, infer_channel};
@@ -284,11 +284,11 @@ bool PaddleSegModel::Postprocess(
std::vector<int64_t> reduce_dim{-1};
// argmax
if (is_resized) {
ArgMax(new_infer_result, &argmax_infer_result, -1, FDDataType::INT32);
Max(new_infer_result, &max_score_result, reduce_dim);
function::ArgMax(new_infer_result, &argmax_infer_result, -1, FDDataType::INT32);
function::Max(new_infer_result, &max_score_result, reduce_dim);
} else {
ArgMax(*infer_result, &argmax_infer_result, -1, FDDataType::INT32);
Max(*infer_result, &max_score_result, reduce_dim);
function::ArgMax(*infer_result, &argmax_infer_result, -1, FDDataType::INT32);
function::Max(*infer_result, &max_score_result, reduce_dim);
}
argmax_infer_result_buffer =
static_cast<int32_t*>(argmax_infer_result.Data());

View File

@@ -21,7 +21,7 @@
#include "gtest_utils.h"
namespace fastdeploy {
namespace function {
TEST(fastdeploy, concat1) {
CheckShape check_shape;
std::vector<FDTensor> inputs(3);
@@ -77,4 +77,5 @@ TEST(fastdeploy, concat5) {
check_shape(output.shape, {5, 6, 4, 5});
}
} // namespace function
} // namespace fastdeploy

View File

@@ -22,7 +22,7 @@
#include "gtest_utils.h"
namespace fastdeploy {
namespace function {
TEST(fastdeploy, pad_2d) {
FDTensor input, output;
CheckShape check_shape;
@@ -65,28 +65,5 @@ TEST(fastdeploy, pad_2d_int32_t) {
check_type(input.dtype, output.dtype);
}
//TEST(fastdeploy, transpose_5d) {
// FDTensor input, output;
// CheckShape check_shape;
// CheckData check_data;
//
// std::vector<int64_t> input_shape = {2, 1, 3, 1, 2};
// auto total_size = std::accumulate(input_shape.begin(), input_shape.end(), 1,
// std::multiplies<int64_t>());
// std::vector<int> inputs(total_size, 1);
// std::iota(inputs.begin(), inputs.end(), 1);
// std::vector<int> expected_result = {1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12};
// input.SetExternalData(input_shape, FDDataType::INT32, inputs.data());
//
// Transpose(input, &output, {0, 1, 4, 3, 2});
// check_shape(output.shape, {2, 1, 2, 1, 3});
// check_data(reinterpret_cast<const int*>(output.Data()),
// expected_result.data(), expected_result.size());
//
// Transpose(input, &input, {0, 1, 4, 3, 2});
// check_shape(input.shape, {2, 1, 2, 1, 3});
// check_data(reinterpret_cast<const int*>(input.Data()), expected_result.data(),
// expected_result.size());
//}
} // namespace function
} // namespace fastdeploy

View File

@@ -21,7 +21,7 @@
#include "gtest_utils.h"
namespace fastdeploy {
namespace function {
TEST(fastdeploy, reduce_max) {
FDTensor input, output;
CheckShape check_shape;
@@ -371,4 +371,5 @@ TEST(fastdeploy, reduce_argmin) {
expected_result_noaxis.data(), expected_result_noaxis.size());
}
} // namespace function
} // namespace fastdeploy

View File

@@ -20,7 +20,7 @@
#include "gtest_utils.h"
namespace fastdeploy {
namespace function {
TEST(fastdeploy, softmax) {
FDTensor input, input1, output;
CheckShape check_shape;
@@ -57,4 +57,5 @@ TEST(fastdeploy, softmax) {
expected_result_axis1.data(), expected_result_axis1.size());
}
} // namespace function
} // namespace fastdeploy

View File

@@ -22,7 +22,7 @@
#include "gtest_utils.h"
namespace fastdeploy {
namespace function {
TEST(fastdeploy, transpose_2d) {
FDTensor input, output;
CheckShape check_shape;
@@ -67,4 +67,5 @@ TEST(fastdeploy, transpose_5d) {
expected_result.size());
}
} // namespace function
} // namespace fastdeploy