mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-27 18:41:02 +08:00
@@ -57,14 +57,14 @@ const void* FDTensor::Data() const {
|
|||||||
return data.data();
|
return data.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
void FDTensor::SetExternalData(const std::vector<int>& new_shape,
|
void FDTensor::SetExternalData(const std::vector<int64_t>& new_shape,
|
||||||
const FDDataType& data_type, void* data_buffer) {
|
const FDDataType& data_type, void* data_buffer) {
|
||||||
dtype = data_type;
|
dtype = data_type;
|
||||||
shape.assign(new_shape.begin(), new_shape.end());
|
shape.assign(new_shape.begin(), new_shape.end());
|
||||||
external_data_ptr = data_buffer;
|
external_data_ptr = data_buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
void FDTensor::Allocate(const std::vector<int>& new_shape,
|
void FDTensor::Allocate(const std::vector<int64_t>& new_shape,
|
||||||
const FDDataType& data_type,
|
const FDDataType& data_type,
|
||||||
const std::string& tensor_name) {
|
const std::string& tensor_name) {
|
||||||
dtype = data_type;
|
dtype = data_type;
|
||||||
|
|||||||
@@ -59,13 +59,14 @@ struct FASTDEPLOY_DECL FDTensor {
|
|||||||
// Set user memory buffer for Tensor, the memory is managed by
|
// Set user memory buffer for Tensor, the memory is managed by
|
||||||
// the user it self, but the Tensor will share the memory with user
|
// the user it self, but the Tensor will share the memory with user
|
||||||
// So take care with the user buffer
|
// So take care with the user buffer
|
||||||
void SetExternalData(const std::vector<int>& new_shape,
|
void SetExternalData(const std::vector<int64_t>& new_shape,
|
||||||
const FDDataType& data_type, void* data_buffer);
|
const FDDataType& data_type, void* data_buffer);
|
||||||
|
|
||||||
// Initialize Tensor
|
// Initialize Tensor
|
||||||
// Include setting attribute for tensor
|
// Include setting attribute for tensor
|
||||||
// and allocate cpu memory buffer
|
// and allocate cpu memory buffer
|
||||||
void Allocate(const std::vector<int>& new_shape, const FDDataType& data_type,
|
void Allocate(const std::vector<int64_t>& new_shape,
|
||||||
|
const FDDataType& data_type,
|
||||||
const std::string& tensor_name = "");
|
const std::string& tensor_name = "");
|
||||||
|
|
||||||
// Total size of tensor memory buffer in bytes
|
// Total size of tensor memory buffer in bytes
|
||||||
|
|||||||
@@ -54,12 +54,8 @@ void Softmax(const fastdeploy::FDTensor& input, fastdeploy::FDTensor* output) {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
std::vector<int32_t> output_shape;
|
output->Allocate(input.shape, input.dtype);
|
||||||
for (int i = 0; i < input.shape.size(); ++i) {
|
int label_num = output->shape.back();
|
||||||
output_shape.push_back(input.shape[i]);
|
|
||||||
}
|
|
||||||
output->Allocate(output_shape, input.dtype);
|
|
||||||
int label_num = output_shape.back();
|
|
||||||
int batch_size = input.Numel() / label_num;
|
int batch_size = input.Numel() / label_num;
|
||||||
int offset = 0;
|
int offset = 0;
|
||||||
const T* input_ptr = reinterpret_cast<const T*>(input.Data());
|
const T* input_ptr = reinterpret_cast<const T*>(input.Data());
|
||||||
@@ -73,11 +69,8 @@ void Softmax(const fastdeploy::FDTensor& input, fastdeploy::FDTensor* output) {
|
|||||||
// Only useful for axis = -1
|
// Only useful for axis = -1
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void Max(const fastdeploy::FDTensor& input, fastdeploy::FDTensor* output) {
|
void Max(const fastdeploy::FDTensor& input, fastdeploy::FDTensor* output) {
|
||||||
std::vector<int32_t> output_shape;
|
auto output_shape = input.shape;
|
||||||
for (int i = 0; i < input.shape.size() - 1; ++i) {
|
output_shape.back() = 1;
|
||||||
output_shape.push_back(input.shape[i]);
|
|
||||||
}
|
|
||||||
output_shape.push_back(1);
|
|
||||||
output->Allocate(output_shape, input.dtype);
|
output->Allocate(output_shape, input.dtype);
|
||||||
int batch_size = output->Numel();
|
int batch_size = output->Numel();
|
||||||
int label_num = input.shape.back();
|
int label_num = input.shape.back();
|
||||||
|
|||||||
Reference in New Issue
Block a user