Upgrade FDASSERT string format (#121)

* upgrade FDASSERT

* Add c_str() for reduce

* upgrade FDASSERT flag

* upgrade flags

* upgrade lu

* fix tests cmake

* Fix pybind FDASSERT
This commit is contained in:
Jack Zhou
2022-08-17 10:03:41 +08:00
committed by GitHub
parent 82868d2c4d
commit 13a3d6f9f5
15 changed files with 63 additions and 56 deletions

View File

@@ -162,11 +162,11 @@ void MultiClassNMS::Compute(const float* boxes_data, const float* scores_data,
int64_t out_dim = box_dim + 2; int64_t out_dim = box_dim + 2;
int num_nmsed_out = 0; int num_nmsed_out = 0;
FDASSERT(score_size == 3, "Require rank of input scores be 3, but now it's " + FDASSERT(score_size == 3,
std::to_string(score_size) + "."); "Require rank of input scores be 3, but now it's %d.", score_size);
FDASSERT(boxes_dim[2] == 4, FDASSERT(boxes_dim[2] == 4,
"Require the 3-dimension of input boxes be 4, but now it's " + "Require the 3-dimension of input boxes be 4, but now it's %lld.",
std::to_string(boxes_dim[2]) + "."); box_dim);
out_num_rois_data.resize(batch_size); out_num_rois_data.resize(batch_size);
std::vector<std::map<int, std::vector<int>>> all_indices; std::vector<std::map<int, std::vector<int>>> all_indices;

View File

@@ -178,11 +178,11 @@ void MultiClassNmsKernel::Compute(OrtKernelContext* context) {
int64_t out_dim = box_dim + 2; int64_t out_dim = box_dim + 2;
int num_nmsed_out = 0; int num_nmsed_out = 0;
FDASSERT(score_size == 3, "Require rank of input scores be 3, but now it's " + FDASSERT(score_size == 3,
std::to_string(score_size) + "."); "Require rank of input scores be 3, but now it's %d.", score_size);
FDASSERT(boxes_dim[2] == 4, FDASSERT(boxes_dim[2] == 4,
"Require the 3-dimension of input boxes be 4, but now it's " + "Require the 3-dimension of input boxes be 4, but now it's %lld.",
std::to_string(boxes_dim[2]) + "."); box_dim);
std::vector<int64_t> out_num_rois_dims = {batch_size}; std::vector<int64_t> out_num_rois_dims = {batch_size};
OrtValue* out_num_rois = ort_.KernelContext_GetOutput( OrtValue* out_num_rois = ort_.KernelContext_GetOutput(
context, 2, out_num_rois_dims.data(), out_num_rois_dims.size()); context, 2, out_num_rois_dims.data(), out_num_rois_dims.size());

View File

@@ -191,8 +191,10 @@ void OrtBackend::CopyToCpu(const Ort::Value& value, FDTensor* tensor) {
numel * sizeof(double)); numel * sizeof(double));
tensor->dtype = FDDataType::FP64; tensor->dtype = FDDataType::FP64;
} else { } else {
FDASSERT(false, "Unrecognized data type of " + std::to_string(data_type) + FDASSERT(
" while calling OrtBackend::CopyToCpu()."); false,
"Unrecognized data type of %d while calling OrtBackend::CopyToCpu().",
data_type);
} }
} }
@@ -237,9 +239,9 @@ bool OrtBackend::Infer(std::vector<FDTensor>& inputs,
} }
TensorInfo OrtBackend::GetInputInfo(int index) { TensorInfo OrtBackend::GetInputInfo(int index) {
FDASSERT(index < NumInputs(), "The index:" + std::to_string(index) + FDASSERT(index < NumInputs(),
" should less than the number of inputs:" + "The index: %d should less than the number of inputs: %d.", index,
std::to_string(NumInputs()) + "."); NumInputs());
TensorInfo info; TensorInfo info;
info.name = inputs_desc_[index].name; info.name = inputs_desc_[index].name;
info.shape.assign(inputs_desc_[index].shape.begin(), info.shape.assign(inputs_desc_[index].shape.begin(),
@@ -250,9 +252,8 @@ TensorInfo OrtBackend::GetInputInfo(int index) {
TensorInfo OrtBackend::GetOutputInfo(int index) { TensorInfo OrtBackend::GetOutputInfo(int index) {
FDASSERT(index < NumOutputs(), FDASSERT(index < NumOutputs(),
"The index:" + std::to_string(index) + "The index: %d should less than the number of outputs: %d.", index,
" should less than the number of outputs:" + NumOutputs());
std::to_string(NumOutputs()) + ".");
TensorInfo info; TensorInfo info;
info.name = outputs_desc_[index].name; info.name = outputs_desc_[index].name;
info.shape.assign(outputs_desc_[index].shape.begin(), info.shape.assign(outputs_desc_[index].shape.begin(),

View File

@@ -34,7 +34,7 @@ int FDDataTypeSize(const FDDataType& data_type) {
} else if (data_type == FDDataType::UINT8) { } else if (data_type == FDDataType::UINT8) {
return sizeof(uint8_t); return sizeof(uint8_t);
} else { } else {
FDASSERT(false, "Unexpected data type: " + Str(data_type)); FDASSERT(false, "Unexpected data type: %s", Str(data_type).c_str());
} }
return -1; return -1;
} }

View File

@@ -89,12 +89,12 @@ bool FastDeployModel::InitRuntime() {
<< Str(runtime_option.backend) << "." << std::endl; << Str(runtime_option.backend) << "." << std::endl;
if (use_gpu) { if (use_gpu) {
FDASSERT(valid_gpu_backends.size() > 0, FDASSERT(valid_gpu_backends.size() > 0,
"There's no valid gpu backend for " + ModelName() + "."); "There's no valid gpu backend for %s.", ModelName().c_str());
FDWARNING << "FastDeploy will choose " << Str(valid_gpu_backends[0]) FDWARNING << "FastDeploy will choose " << Str(valid_gpu_backends[0])
<< " for model inference." << std::endl; << " for model inference." << std::endl;
} else { } else {
FDASSERT(valid_gpu_backends.size() > 0, FDASSERT(valid_gpu_backends.size() > 0,
"There's no valid cpu backend for " + ModelName() + "."); "There's no valid cpu backend for %s.", ModelName().c_str());
FDWARNING << "FastDeploy will choose " << Str(valid_cpu_backends[0]) FDWARNING << "FastDeploy will choose " << Str(valid_cpu_backends[0])
<< " for model inference." << std::endl; << " for model inference." << std::endl;
} }

View File

@@ -310,15 +310,16 @@ void ArgMinMax(const FDTensor& x, FDTensor* out, int64_t axis,
const auto& x_dims = x.shape; const auto& x_dims = x.shape;
int64_t x_rank = x_dims.size(); int64_t x_rank = x_dims.size();
FDASSERT(axis >= -x_rank, FDASSERT(axis >= -x_rank,
"'axis'(%d) must be greater than or equal to -Rank(X)(%d).", axis, "'axis'(%lld) must be greater than or equal to -Rank(X)(%lld).",
-x_rank); axis, -x_rank);
FDASSERT(axis < x_rank, FDASSERT(axis < x_rank,
"'axis'(%d) must be less than or equal to Rank(X)(%d).", axis, "'axis'(%lld) must be less than or equal to Rank(X)(%lld).", axis,
x_rank); x_rank);
FDASSERT(output_dtype == FDDataType::INT32 || FDDataType::INT64, FDASSERT(output_dtype == FDDataType::INT32 || FDDataType::INT64,
"The attribute of dtype in argmin/argmax must be [%s] or [%s], but " "The attribute of dtype in argmin/argmax must be [%s] or [%s], but "
"received [%s].", "received [%s].",
Str(FDDataType::INT32), Str(FDDataType::INT64), Str(output_dtype)); Str(FDDataType::INT32).c_str(), Str(FDDataType::INT64).c_str(),
Str(output_dtype).c_str());
if (axis < 0) axis += x_rank; if (axis < 0) axis += x_rank;
if (output_dtype == FDDataType::INT32) { if (output_dtype == FDDataType::INT32) {
int64_t all_element_num = 0; int64_t all_element_num = 0;
@@ -330,7 +331,7 @@ void ArgMinMax(const FDTensor& x, FDTensor* out, int64_t axis,
} }
FDASSERT(all_element_num <= std::numeric_limits<int>::max(), FDASSERT(all_element_num <= std::numeric_limits<int>::max(),
"The element num of the argmin/argmax input at axis is " "The element num of the argmin/argmax input at axis is "
"%d, is larger than int32 maximum value:%d, you must " "%lld, is larger than int32 maximum value:%d, you must "
"set the dtype of argmin/argmax to 'int64'.", "set the dtype of argmin/argmax to 'int64'.",
all_element_num, std::numeric_limits<int>::max()); all_element_num, std::numeric_limits<int>::max());
} }

View File

@@ -117,7 +117,7 @@ void Softmax(const FDTensor& x, FDTensor* out, int axis) {
FDASSERT( FDASSERT(
std::abs(axis) < x.shape.size(), std::abs(axis) < x.shape.size(),
"The absolute given axis should be smaller than the input's " "The absolute given axis should be smaller than the input's "
"dimension. Expected absolute axis is smaller than %d, but receive %d.", "dimension. Expected absolute axis is smaller than %lu, but receive %d.",
x.shape.size(), std::abs(axis)); x.shape.size(), std::abs(axis));
FD_VISIT_FLOAT_TYPES(x.dtype, "SoftmaxKernel", FD_VISIT_FLOAT_TYPES(x.dtype, "SoftmaxKernel",
([&] { SoftmaxKernel<data_t>(x, out, axis); })); ([&] { SoftmaxKernel<data_t>(x, out, axis); }));

View File

@@ -95,12 +95,12 @@ void Transpose(const FDTensor& x, FDTensor* out,
size_t dims_size = dims.size(); size_t dims_size = dims.size();
FDASSERT(dims_size == x.shape.size(), FDASSERT(dims_size == x.shape.size(),
"The input tensor's dimension should be equal to the dims's size. " "The input tensor's dimension should be equal to the dims's size. "
"Expect dims size is %d, but receive %d.", "Expect dims size is %lu, but receive %lu.",
x.shape.size(), dims_size); x.shape.size(), dims_size);
std::vector<int> count(dims_size, 0); std::vector<int> count(dims_size, 0);
for (size_t i = 0; i < dims_size; i++) { for (size_t i = 0; i < dims_size; i++) {
FDASSERT(dims[i] >= 0, FDASSERT(dims[i] >= 0,
"The dims should be greater than or equal to 0, but receive %d.", "The dims should be greater than or equal to 0, but receive %lld.",
dims[i]); dims[i]);
FDASSERT(dims[i] < static_cast<int>(dims_size) && ++count[dims[i]] == 1, FDASSERT(dims[i] < static_cast<int>(dims_size) && ++count[dims[i]] == 1,
"Each element of Attribute axis should be a unique value range " "Each element of Attribute axis should be a unique value range "

View File

@@ -33,8 +33,8 @@ pybind11::dtype FDDataTypeToNumpyDataType(const FDDataType& fd_dtype) {
} else if (fd_dtype == FDDataType::UINT8) { } else if (fd_dtype == FDDataType::UINT8) {
dt = pybind11::dtype::of<uint8_t>(); dt = pybind11::dtype::of<uint8_t>();
} else { } else {
FDASSERT(false, "The function doesn't support data type of " + FDASSERT(false, "The function doesn't support data type of %s.",
Str(fd_dtype) + "."); Str(fd_dtype).c_str());
} }
return dt; return dt;
} }
@@ -73,7 +73,8 @@ void PyArrayToTensor(pybind11::array& pyarray, FDTensor* tensor,
pybind11::array TensorToPyArray(const FDTensor& tensor) { pybind11::array TensorToPyArray(const FDTensor& tensor) {
auto numpy_dtype = FDDataTypeToNumpyDataType(tensor.dtype); auto numpy_dtype = FDDataTypeToNumpyDataType(tensor.dtype);
auto out = pybind11::array(numpy_dtype, tensor.shape); auto out = pybind11::array(numpy_dtype, tensor.shape);
memcpy(out.mutable_data(), tensor.Data(), tensor.Numel() * FDDataTypeSize(tensor.dtype)); memcpy(out.mutable_data(), tensor.Data(),
tensor.Numel() * FDDataTypeSize(tensor.dtype));
return out; return out;
} }

View File

@@ -33,8 +33,8 @@ pybind11::dtype FDDataTypeToNumpyDataType(const FDDataType& fd_dtype) {
} else if (fd_dtype == FDDataType::UINT8) { } else if (fd_dtype == FDDataType::UINT8) {
dt = pybind11::dtype::of<uint8_t>(); dt = pybind11::dtype::of<uint8_t>();
} else { } else {
FDASSERT(false, "The function doesn't support data type of " + FDASSERT(false, "The function doesn't support data type of %s.",
Str(fd_dtype) + "."); Str(fd_dtype).c_str());
} }
return dt; return dt;
} }

View File

@@ -86,14 +86,13 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
FDLogger(true, "[INFO]") << __REL_FILE__ << "(" << __LINE__ \ FDLogger(true, "[INFO]") << __REL_FILE__ << "(" << __LINE__ \
<< ")::" << __FUNCTION__ << "\t" << ")::" << __FUNCTION__ << "\t"
#define FDASSERT(condition, format, ...) \ #define FDASSERT(condition, format, ...) \
if (!(condition)) { \ if (!(condition)) { \
std::string format_string(format); \ int n = std::snprintf(nullptr, 0, format, ##__VA_ARGS__); \
int n = std::snprintf(nullptr, 0, format_string.data(), ##__VA_ARGS__); \ std::vector<char> buffer(n + 1); \
std::vector<char> buffer(n + 1); \ std::snprintf(buffer.data(), n + 1, format, ##__VA_ARGS__); \
std::snprintf(buffer.data(), n + 1, format_string.data(), ##__VA_ARGS__); \ FDERROR << buffer.data() << std::endl; \
FDERROR << buffer.data() << std::endl; \ std::abort(); \
std::abort(); \
} }
///////// Basic Marco /////////// ///////// Basic Marco ///////////
@@ -128,7 +127,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
false, \ false, \
"Invalid enum data type. Expect to accept data type BOOL, INT32, " \ "Invalid enum data type. Expect to accept data type BOOL, INT32, " \
"INT64, FP32, FP64, but receive type %s.", \ "INT64, FP32, FP64, but receive type %s.", \
Str(__dtype__)); \ Str(__dtype__).c_str()); \
} \ } \
}() }()
@@ -148,7 +147,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
FDASSERT(false, \ FDASSERT(false, \
"Invalid enum data type. Expect to accept data type INT32, " \ "Invalid enum data type. Expect to accept data type INT32, " \
"INT64, FP32, FP64, but receive type %s.", \ "INT64, FP32, FP64, but receive type %s.", \
Str(__dtype__)); \ Str(__dtype__).c_str()); \
} \ } \
}() }()
@@ -164,7 +163,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
FDASSERT(false, \ FDASSERT(false, \
"Invalid enum data type. Expect to accept data type FP32, " \ "Invalid enum data type. Expect to accept data type FP32, " \
"FP64, but receive type %s.", \ "FP64, but receive type %s.", \
Str(__dtype__)); \ Str(__dtype__).c_str()); \
} \ } \
}() }()
@@ -180,7 +179,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
FDASSERT(false, \ FDASSERT(false, \
"Invalid enum data type. Expect to accept data type INT32, " \ "Invalid enum data type. Expect to accept data type INT32, " \
"INT64, but receive type %s.", \ "INT64, but receive type %s.", \
Str(__dtype__)); \ Str(__dtype__).c_str()); \
} \ } \
}() }()

View File

@@ -97,8 +97,9 @@ FDDataType Mat::Type() {
} else if (type == 1) { } else if (type == 1) {
return FDDataType::INT8; return FDDataType::INT8;
} else if (type == 2) { } else if (type == 2) {
FDASSERT(false, "While calling Mat::Type(), get UINT16 type which is not " FDASSERT(false,
"supported now."); "While calling Mat::Type(), get UINT16 type which is not "
"supported now.");
} else if (type == 3) { } else if (type == 3) {
return FDDataType::INT16; return FDDataType::INT16;
} else if (type == 4) { } else if (type == 4) {
@@ -108,10 +109,12 @@ FDDataType Mat::Type() {
} else if (type == 6) { } else if (type == 6) {
return FDDataType::FP64; return FDDataType::FP64;
} else { } else {
FDASSERT(false, "While calling Mat::Type(), get type = " + FDASSERT(
std::to_string(type) + ", which is not expected!."); false,
"While calling Mat::Type(), get type = %d, which is not expected!.",
type);
} }
} }
} // namespace vision } // namespace vision
} // namespace fastdeploy } // namespace fastdeploy

View File

@@ -99,8 +99,8 @@ bool PPYOLOE::BuildPreprocessPipelineFromConfig() {
auto target_size = op["target_size"].as<std::vector<int>>(); auto target_size = op["target_size"].as<std::vector<int>>();
int interp = op["interp"].as<int>(); int interp = op["interp"].as<int>();
FDASSERT(target_size.size(), FDASSERT(target_size.size(),
"Require size of target_size be 2, but now it's " + "Require size of target_size be 2, but now it's %lu.",
std::to_string(target_size.size()) + "."); target_size.size());
if (!keep_ratio) { if (!keep_ratio) {
int width = target_size[1]; int width = target_size[1];
int height = target_size[0]; int height = target_size[0];

View File

@@ -127,8 +127,8 @@ bool PaddleSegModel::Postprocess(
// 3. shape: 2-D HW // 3. shape: 2-D HW
FDASSERT(infer_result.dtype == FDDataType::INT64 || FDASSERT(infer_result.dtype == FDDataType::INT64 ||
infer_result.dtype == FDDataType::FP32, infer_result.dtype == FDDataType::FP32,
"Require the data type of output is int64 or fp32, but now it's " + "Require the data type of output is int64 or fp32, but now it's %s.",
Str(infer_result.dtype) + "."); Str(infer_result.dtype).c_str());
result->Clear(); result->Clear();
if (infer_result.shape.size() == 4) { if (infer_result.shape.size() == 4) {

View File

@@ -25,9 +25,11 @@ function(cc_test_build TARGET_NAME)
target_link_libraries(${TARGET_NAME} PUBLIC ${PYTHON_LIBRARIES}) target_link_libraries(${TARGET_NAME} PUBLIC ${PYTHON_LIBRARIES})
endif() endif()
set(EXTERNAL_LIB "") set(EXTERNAL_LIB "")
else(WIN32) elseif(APPLE)
set(EXTERNAL_LIB "-ldl -lpthread")
else()
set(EXTERNAL_LIB "-lrt -ldl -lpthread") set(EXTERNAL_LIB "-lrt -ldl -lpthread")
endif(WIN32) endif()
get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
target_link_libraries(${TARGET_NAME} PUBLIC ${cc_test_DEPS} ${os_dependency_modules} fastdeploy_gtest_main gtest glog ${EXTERNAL_LIB}) target_link_libraries(${TARGET_NAME} PUBLIC ${cc_test_DEPS} ${os_dependency_modules} fastdeploy_gtest_main gtest glog ${EXTERNAL_LIB})
add_dependencies(${TARGET_NAME} ${cc_test_DEPS} gtest) add_dependencies(${TARGET_NAME} ${cc_test_DEPS} gtest)