diff --git a/csrcs/fastdeploy/core/fd_tensor.cc b/csrcs/fastdeploy/core/fd_tensor.cc index c278763ca..c6f7a4739 100644 --- a/csrcs/fastdeploy/core/fd_tensor.cc +++ b/csrcs/fastdeploy/core/fd_tensor.cc @@ -57,14 +57,14 @@ const void* FDTensor::Data() const { return data.data(); } -void FDTensor::SetExternalData(const std::vector& new_shape, +void FDTensor::SetExternalData(const std::vector& new_shape, const FDDataType& data_type, void* data_buffer) { dtype = data_type; shape.assign(new_shape.begin(), new_shape.end()); external_data_ptr = data_buffer; } -void FDTensor::Allocate(const std::vector& new_shape, +void FDTensor::Allocate(const std::vector& new_shape, const FDDataType& data_type, const std::string& tensor_name) { dtype = data_type; diff --git a/csrcs/fastdeploy/core/fd_tensor.h b/csrcs/fastdeploy/core/fd_tensor.h index 14c5a1142..84e8c7ff0 100644 --- a/csrcs/fastdeploy/core/fd_tensor.h +++ b/csrcs/fastdeploy/core/fd_tensor.h @@ -59,13 +59,14 @@ struct FASTDEPLOY_DECL FDTensor { // Set user memory buffer for Tensor, the memory is managed by // the user it self, but the Tensor will share the memory with user // So take care with the user buffer - void SetExternalData(const std::vector& new_shape, + void SetExternalData(const std::vector& new_shape, const FDDataType& data_type, void* data_buffer); // Initialize Tensor // Include setting attribute for tensor // and allocate cpu memory buffer - void Allocate(const std::vector& new_shape, const FDDataType& data_type, + void Allocate(const std::vector& new_shape, + const FDDataType& data_type, const std::string& tensor_name = ""); // Total size of tensor memory buffer in bytes diff --git a/examples/text/ernie_tokencls.cc b/examples/text/ernie_tokencls.cc index 4df1f5705..1f04bbb66 100644 --- a/examples/text/ernie_tokencls.cc +++ b/examples/text/ernie_tokencls.cc @@ -54,12 +54,8 @@ void Softmax(const fastdeploy::FDTensor& input, fastdeploy::FDTensor* output) { } }; - std::vector output_shape; - for (int i = 0; i < input.shape.size(); ++i) { - output_shape.push_back(input.shape[i]); - } - output->Allocate(output_shape, input.dtype); - int label_num = output_shape.back(); + output->Allocate(input.shape, input.dtype); + int label_num = output->shape.back(); int batch_size = input.Numel() / label_num; int offset = 0; const T* input_ptr = reinterpret_cast(input.Data()); @@ -73,11 +69,8 @@ void Softmax(const fastdeploy::FDTensor& input, fastdeploy::FDTensor* output) { // Only useful for axis = -1 template void Max(const fastdeploy::FDTensor& input, fastdeploy::FDTensor* output) { - std::vector output_shape; - for (int i = 0; i < input.shape.size() - 1; ++i) { - output_shape.push_back(input.shape[i]); - } - output_shape.push_back(1); + auto output_shape = input.shape; + output_shape.back() = 1; output->Allocate(output_shape, input.dtype); int batch_size = output->Numel(); int label_num = input.shape.back(); @@ -229,4 +222,4 @@ int main() { } best_path.PrintInfo(); return 0; -} \ No newline at end of file +}