[Functions] Add quantile function (#700)

* Add sort function

* Add isfinite function

* upgrade isinf isnan

* Add Scalar to FDTensor

* Add floor, ceil function

* add cast functions

* Update out_tmp

* Update quantile

* add gather scatter along axis

* finish quantile function

* Add quantile unittest

* refresh code style for test source code

* Add comments

* Add full function

* Add scalar to fd tensor

* Add full unittest

* Add functions headers

* move fdtensor operators to fastdeploy namespace
This commit is contained in:
Jack Zhou
2022-11-28 09:51:40 +08:00
committed by GitHub
parent 4e74ac06fb
commit 129dda7809
37 changed files with 1567 additions and 75 deletions

121
fastdeploy/core/fd_scalar.h Normal file
View File

@@ -0,0 +1,121 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cstdint>
#include <limits>
#include "fastdeploy/core/fd_type.h"
#include "fastdeploy/core/float16.h"
namespace fastdeploy {
class Scalar {
public:
// Constructor support implicit
Scalar() : Scalar(0) {}
Scalar(double val) : dtype_(FDDataType::FP64) { // NOLINT
data_.f64 = val;
}
Scalar(float val) : dtype_(FDDataType::FP32) { // NOLINT
data_.f32 = val;
}
Scalar(float16 val) : dtype_(FDDataType::FP16) { // NOLINT
data_.f16 = val;
}
Scalar(int64_t val) : dtype_(FDDataType::INT64) { // NOLINT
data_.i64 = val;
}
Scalar(int32_t val) : dtype_(FDDataType::INT32) { // NOLINT
data_.i32 = val;
}
Scalar(int16_t val) : dtype_(FDDataType::INT16) { // NOLINT
data_.i16 = val;
}
Scalar(int8_t val) : dtype_(FDDataType::INT8) { // NOLINT
data_.i8 = val;
}
Scalar(uint8_t val) : dtype_(FDDataType::UINT8) { // NOLINT
data_.ui8 = val;
}
Scalar(bool val) : dtype_(FDDataType::BOOL) { // NOLINT
data_.b = val;
}
// The compatible method for fliud operators,
// and it will be removed in the future.
explicit Scalar(const std::string& str_value) : dtype_(FDDataType::FP64) {
if (str_value == "inf") {
data_.f64 = std::numeric_limits<double>::infinity();
} else if (str_value == "-inf") {
data_.f64 = -std::numeric_limits<double>::infinity();
} else if (str_value == "nan") {
data_.f64 = std::numeric_limits<double>::quiet_NaN();
} else {
data_.f64 = std::stod(str_value);
}
}
template <typename RT> inline RT to() const {
switch (dtype_) {
case FDDataType::FP32:
return static_cast<RT>(data_.f32);
case FDDataType::FP64:
return static_cast<RT>(data_.f64);
case FDDataType::FP16:
return static_cast<RT>(data_.f16);
case FDDataType::INT32:
return static_cast<RT>(data_.i32);
case FDDataType::INT64:
return static_cast<RT>(data_.i64);
case FDDataType::INT16:
return static_cast<RT>(data_.i16);
case FDDataType::INT8:
return static_cast<RT>(data_.i8);
case FDDataType::UINT8:
return static_cast<RT>(data_.ui8);
case FDDataType::BOOL:
return static_cast<RT>(data_.b);
default:
FDASSERT(false, "Invalid enum scalar data type `%s`.",
Str(dtype_).c_str());
}
}
FDDataType dtype() const { return dtype_; }
private:
FDDataType dtype_;
union data {
bool b;
int8_t i8;
int16_t i16;
int32_t i32;
int64_t i64;
uint8_t ui8;
float16 f16;
float f32;
double f64;
} data_;
};
} // namespace fastdeploy

View File

@@ -12,8 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/core/fd_tensor.h"
#include "fastdeploy/core/fd_scalar.h"
#include "fastdeploy/core/float16.h"
#include "fastdeploy/utils/utils.h"
#include <algorithm>
#include <cstring>
#ifdef WITH_GPU
@@ -344,6 +346,40 @@ void FDTensor::CopyBuffer(void* dst, const void* src, size_t nbytes,
}
FDTensor::FDTensor(const std::string& tensor_name) { name = tensor_name; }
FDTensor::FDTensor(const char* tensor_name) { name = tensor_name; }
FDTensor::FDTensor(const Scalar& scalar) {
Allocate({1}, scalar.dtype());
switch (scalar.dtype()) {
case FDDataType::BOOL:
(reinterpret_cast<bool*>(Data()))[0] = scalar.to<bool>();
break;
case FDDataType::UINT8:
(reinterpret_cast<uint8_t*>(Data()))[0] = scalar.to<uint8_t>();
break;
case FDDataType::INT8:
(reinterpret_cast<int8_t*>(Data()))[0] = scalar.to<int8_t>();
break;
case FDDataType::INT16:
(reinterpret_cast<int16_t*>(Data()))[0] = scalar.to<int16_t>();
break;
case FDDataType::INT32:
(reinterpret_cast<int*>(Data()))[0] = scalar.to<int>();
break;
case FDDataType::INT64:
(reinterpret_cast<int64_t*>(Data()))[0] = scalar.to<int64_t>();
break;
case FDDataType::FP16:
(reinterpret_cast<float16*>(Data()))[0] = scalar.to<float16>();
break;
case FDDataType::FP32:
(reinterpret_cast<float*>(Data()))[0] = scalar.to<float>();
break;
case FDDataType::FP64:
(reinterpret_cast<double*>(Data()))[0] = scalar.to<double>();
break;
}
}
FDTensor::FDTensor(const FDTensor& other)
: shape(other.shape), name(other.name), dtype(other.dtype),

View File

@@ -23,6 +23,8 @@
namespace fastdeploy {
struct Scalar;
struct FASTDEPLOY_DECL FDTensor {
// std::vector<int8_t> data;
void* buffer_ = nullptr;
@@ -126,6 +128,8 @@ struct FASTDEPLOY_DECL FDTensor {
FDTensor() {}
explicit FDTensor(const std::string& tensor_name);
explicit FDTensor(const char* tensor_name);
// Deep copy
FDTensor(const FDTensor& other);
// Move constructor
@@ -136,6 +140,9 @@ struct FASTDEPLOY_DECL FDTensor {
// Move assignment
FDTensor& operator=(FDTensor&& other);
// Scalar to FDTensor
explicit FDTensor(const Scalar& scalar);
~FDTensor() { FreeFn(); }
static void CopyBuffer(void* dst, const void* src, size_t nbytes,