Unify the data type of tensor shape (#74)

int32_t->int64_t
This commit is contained in:
Jack Zhou
2022-08-08 14:02:56 +08:00
committed by GitHub
parent 0587c4bbac
commit 32c2154bbe
3 changed files with 10 additions and 16 deletions

View File

@@ -57,14 +57,14 @@ const void* FDTensor::Data() const {
return data.data();
}
void FDTensor::SetExternalData(const std::vector<int>& new_shape,
void FDTensor::SetExternalData(const std::vector<int64_t>& new_shape,
const FDDataType& data_type, void* data_buffer) {
dtype = data_type;
shape.assign(new_shape.begin(), new_shape.end());
external_data_ptr = data_buffer;
}
void FDTensor::Allocate(const std::vector<int>& new_shape,
void FDTensor::Allocate(const std::vector<int64_t>& new_shape,
const FDDataType& data_type,
const std::string& tensor_name) {
dtype = data_type;

View File

@@ -59,13 +59,14 @@ struct FASTDEPLOY_DECL FDTensor {
// Set user memory buffer for Tensor, the memory is managed by
// the user it self, but the Tensor will share the memory with user
// So take care with the user buffer
void SetExternalData(const std::vector<int>& new_shape,
void SetExternalData(const std::vector<int64_t>& new_shape,
const FDDataType& data_type, void* data_buffer);
// Initialize Tensor
// Include setting attribute for tensor
// and allocate cpu memory buffer
void Allocate(const std::vector<int>& new_shape, const FDDataType& data_type,
void Allocate(const std::vector<int64_t>& new_shape,
const FDDataType& data_type,
const std::string& tensor_name = "");
// Total size of tensor memory buffer in bytes

View File

@@ -54,12 +54,8 @@ void Softmax(const fastdeploy::FDTensor& input, fastdeploy::FDTensor* output) {
}
};
std::vector<int32_t> output_shape;
for (int i = 0; i < input.shape.size(); ++i) {
output_shape.push_back(input.shape[i]);
}
output->Allocate(output_shape, input.dtype);
int label_num = output_shape.back();
output->Allocate(input.shape, input.dtype);
int label_num = output->shape.back();
int batch_size = input.Numel() / label_num;
int offset = 0;
const T* input_ptr = reinterpret_cast<const T*>(input.Data());
@@ -73,11 +69,8 @@ void Softmax(const fastdeploy::FDTensor& input, fastdeploy::FDTensor* output) {
// Only useful for axis = -1
template <typename T>
void Max(const fastdeploy::FDTensor& input, fastdeploy::FDTensor* output) {
std::vector<int32_t> output_shape;
for (int i = 0; i < input.shape.size() - 1; ++i) {
output_shape.push_back(input.shape[i]);
}
output_shape.push_back(1);
auto output_shape = input.shape;
output_shape.back() = 1;
output->Allocate(output_shape, input.dtype);
int batch_size = output->Numel();
int label_num = input.shape.back();
@@ -229,4 +222,4 @@ int main() {
}
best_path.PrintInfo();
return 0;
}
}