Upgrade FDASSERT string format (#121)

* upgrade FDASSERT

* Add c_str() for reduce

* upgrade FDASSERT flag

* upgrade flags

* upgrade lu

* fix tests cmake

* Fix pybind FDASSERT
This commit is contained in:
Jack Zhou
2022-08-17 10:03:41 +08:00
committed by GitHub
parent 82868d2c4d
commit 13a3d6f9f5
15 changed files with 63 additions and 56 deletions

View File

@@ -162,11 +162,11 @@ void MultiClassNMS::Compute(const float* boxes_data, const float* scores_data,
int64_t out_dim = box_dim + 2;
int num_nmsed_out = 0;
FDASSERT(score_size == 3, "Require rank of input scores be 3, but now it's " +
std::to_string(score_size) + ".");
FDASSERT(score_size == 3,
"Require rank of input scores be 3, but now it's %d.", score_size);
FDASSERT(boxes_dim[2] == 4,
"Require the 3-dimension of input boxes be 4, but now it's " +
std::to_string(boxes_dim[2]) + ".");
"Require the 3-dimension of input boxes be 4, but now it's %lld.",
box_dim);
out_num_rois_data.resize(batch_size);
std::vector<std::map<int, std::vector<int>>> all_indices;

View File

@@ -178,11 +178,11 @@ void MultiClassNmsKernel::Compute(OrtKernelContext* context) {
int64_t out_dim = box_dim + 2;
int num_nmsed_out = 0;
FDASSERT(score_size == 3, "Require rank of input scores be 3, but now it's " +
std::to_string(score_size) + ".");
FDASSERT(score_size == 3,
"Require rank of input scores be 3, but now it's %d.", score_size);
FDASSERT(boxes_dim[2] == 4,
"Require the 3-dimension of input boxes be 4, but now it's " +
std::to_string(boxes_dim[2]) + ".");
"Require the 3-dimension of input boxes be 4, but now it's %lld.",
box_dim);
std::vector<int64_t> out_num_rois_dims = {batch_size};
OrtValue* out_num_rois = ort_.KernelContext_GetOutput(
context, 2, out_num_rois_dims.data(), out_num_rois_dims.size());

View File

@@ -191,8 +191,10 @@ void OrtBackend::CopyToCpu(const Ort::Value& value, FDTensor* tensor) {
numel * sizeof(double));
tensor->dtype = FDDataType::FP64;
} else {
FDASSERT(false, "Unrecognized data type of " + std::to_string(data_type) +
" while calling OrtBackend::CopyToCpu().");
FDASSERT(
false,
"Unrecognized data type of %d while calling OrtBackend::CopyToCpu().",
data_type);
}
}
@@ -237,9 +239,9 @@ bool OrtBackend::Infer(std::vector<FDTensor>& inputs,
}
TensorInfo OrtBackend::GetInputInfo(int index) {
FDASSERT(index < NumInputs(), "The index:" + std::to_string(index) +
" should less than the number of inputs:" +
std::to_string(NumInputs()) + ".");
FDASSERT(index < NumInputs(),
"The index: %d should less than the number of inputs: %d.", index,
NumInputs());
TensorInfo info;
info.name = inputs_desc_[index].name;
info.shape.assign(inputs_desc_[index].shape.begin(),
@@ -250,9 +252,8 @@ TensorInfo OrtBackend::GetInputInfo(int index) {
TensorInfo OrtBackend::GetOutputInfo(int index) {
FDASSERT(index < NumOutputs(),
"The index:" + std::to_string(index) +
" should less than the number of outputs:" +
std::to_string(NumOutputs()) + ".");
"The index: %d should less than the number of outputs: %d.", index,
NumOutputs());
TensorInfo info;
info.name = outputs_desc_[index].name;
info.shape.assign(outputs_desc_[index].shape.begin(),

View File

@@ -34,7 +34,7 @@ int FDDataTypeSize(const FDDataType& data_type) {
} else if (data_type == FDDataType::UINT8) {
return sizeof(uint8_t);
} else {
FDASSERT(false, "Unexpected data type: " + Str(data_type));
FDASSERT(false, "Unexpected data type: %s", Str(data_type).c_str());
}
return -1;
}

View File

@@ -89,12 +89,12 @@ bool FastDeployModel::InitRuntime() {
<< Str(runtime_option.backend) << "." << std::endl;
if (use_gpu) {
FDASSERT(valid_gpu_backends.size() > 0,
"There's no valid gpu backend for " + ModelName() + ".");
"There's no valid gpu backend for %s.", ModelName().c_str());
FDWARNING << "FastDeploy will choose " << Str(valid_gpu_backends[0])
<< " for model inference." << std::endl;
} else {
FDASSERT(valid_gpu_backends.size() > 0,
"There's no valid cpu backend for " + ModelName() + ".");
"There's no valid cpu backend for %s.", ModelName().c_str());
FDWARNING << "FastDeploy will choose " << Str(valid_cpu_backends[0])
<< " for model inference." << std::endl;
}

View File

@@ -310,15 +310,16 @@ void ArgMinMax(const FDTensor& x, FDTensor* out, int64_t axis,
const auto& x_dims = x.shape;
int64_t x_rank = x_dims.size();
FDASSERT(axis >= -x_rank,
"'axis'(%d) must be greater than or equal to -Rank(X)(%d).", axis,
-x_rank);
"'axis'(%lld) must be greater than or equal to -Rank(X)(%lld).",
axis, -x_rank);
FDASSERT(axis < x_rank,
"'axis'(%d) must be less than or equal to Rank(X)(%d).", axis,
"'axis'(%lld) must be less than or equal to Rank(X)(%lld).", axis,
x_rank);
FDASSERT(output_dtype == FDDataType::INT32 || FDDataType::INT64,
"The attribute of dtype in argmin/argmax must be [%s] or [%s], but "
"received [%s].",
Str(FDDataType::INT32), Str(FDDataType::INT64), Str(output_dtype));
Str(FDDataType::INT32).c_str(), Str(FDDataType::INT64).c_str(),
Str(output_dtype).c_str());
if (axis < 0) axis += x_rank;
if (output_dtype == FDDataType::INT32) {
int64_t all_element_num = 0;
@@ -330,7 +331,7 @@ void ArgMinMax(const FDTensor& x, FDTensor* out, int64_t axis,
}
FDASSERT(all_element_num <= std::numeric_limits<int>::max(),
"The element num of the argmin/argmax input at axis is "
"%d, is larger than int32 maximum value:%d, you must "
"%lld, is larger than int32 maximum value:%d, you must "
"set the dtype of argmin/argmax to 'int64'.",
all_element_num, std::numeric_limits<int>::max());
}

View File

@@ -117,7 +117,7 @@ void Softmax(const FDTensor& x, FDTensor* out, int axis) {
FDASSERT(
std::abs(axis) < x.shape.size(),
"The absolute given axis should be smaller than the input's "
"dimension. Expected absolute axis is smaller than %d, but receive %d.",
"dimension. Expected absolute axis is smaller than %lu, but receive %d.",
x.shape.size(), std::abs(axis));
FD_VISIT_FLOAT_TYPES(x.dtype, "SoftmaxKernel",
([&] { SoftmaxKernel<data_t>(x, out, axis); }));

View File

@@ -95,12 +95,12 @@ void Transpose(const FDTensor& x, FDTensor* out,
size_t dims_size = dims.size();
FDASSERT(dims_size == x.shape.size(),
"The input tensor's dimension should be equal to the dims's size. "
"Expect dims size is %d, but receive %d.",
"Expect dims size is %lu, but receive %lu.",
x.shape.size(), dims_size);
std::vector<int> count(dims_size, 0);
for (size_t i = 0; i < dims_size; i++) {
FDASSERT(dims[i] >= 0,
"The dims should be greater than or equal to 0, but receive %d.",
"The dims should be greater than or equal to 0, but receive %lld.",
dims[i]);
FDASSERT(dims[i] < static_cast<int>(dims_size) && ++count[dims[i]] == 1,
"Each element of Attribute axis should be a unique value range "

View File

@@ -33,8 +33,8 @@ pybind11::dtype FDDataTypeToNumpyDataType(const FDDataType& fd_dtype) {
} else if (fd_dtype == FDDataType::UINT8) {
dt = pybind11::dtype::of<uint8_t>();
} else {
FDASSERT(false, "The function doesn't support data type of " +
Str(fd_dtype) + ".");
FDASSERT(false, "The function doesn't support data type of %s.",
Str(fd_dtype).c_str());
}
return dt;
}
@@ -73,7 +73,8 @@ void PyArrayToTensor(pybind11::array& pyarray, FDTensor* tensor,
pybind11::array TensorToPyArray(const FDTensor& tensor) {
auto numpy_dtype = FDDataTypeToNumpyDataType(tensor.dtype);
auto out = pybind11::array(numpy_dtype, tensor.shape);
memcpy(out.mutable_data(), tensor.Data(), tensor.Numel() * FDDataTypeSize(tensor.dtype));
memcpy(out.mutable_data(), tensor.Data(),
tensor.Numel() * FDDataTypeSize(tensor.dtype));
return out;
}

View File

@@ -33,8 +33,8 @@ pybind11::dtype FDDataTypeToNumpyDataType(const FDDataType& fd_dtype) {
} else if (fd_dtype == FDDataType::UINT8) {
dt = pybind11::dtype::of<uint8_t>();
} else {
FDASSERT(false, "The function doesn't support data type of " +
Str(fd_dtype) + ".");
FDASSERT(false, "The function doesn't support data type of %s.",
Str(fd_dtype).c_str());
}
return dt;
}

View File

@@ -88,10 +88,9 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
#define FDASSERT(condition, format, ...) \
if (!(condition)) { \
std::string format_string(format); \
int n = std::snprintf(nullptr, 0, format_string.data(), ##__VA_ARGS__); \
int n = std::snprintf(nullptr, 0, format, ##__VA_ARGS__); \
std::vector<char> buffer(n + 1); \
std::snprintf(buffer.data(), n + 1, format_string.data(), ##__VA_ARGS__); \
std::snprintf(buffer.data(), n + 1, format, ##__VA_ARGS__); \
FDERROR << buffer.data() << std::endl; \
std::abort(); \
}
@@ -128,7 +127,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
false, \
"Invalid enum data type. Expect to accept data type BOOL, INT32, " \
"INT64, FP32, FP64, but receive type %s.", \
Str(__dtype__)); \
Str(__dtype__).c_str()); \
} \
}()
@@ -148,7 +147,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
FDASSERT(false, \
"Invalid enum data type. Expect to accept data type INT32, " \
"INT64, FP32, FP64, but receive type %s.", \
Str(__dtype__)); \
Str(__dtype__).c_str()); \
} \
}()
@@ -164,7 +163,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
FDASSERT(false, \
"Invalid enum data type. Expect to accept data type FP32, " \
"FP64, but receive type %s.", \
Str(__dtype__)); \
Str(__dtype__).c_str()); \
} \
}()
@@ -180,7 +179,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
FDASSERT(false, \
"Invalid enum data type. Expect to accept data type INT32, " \
"INT64, but receive type %s.", \
Str(__dtype__)); \
Str(__dtype__).c_str()); \
} \
}()

View File

@@ -97,7 +97,8 @@ FDDataType Mat::Type() {
} else if (type == 1) {
return FDDataType::INT8;
} else if (type == 2) {
FDASSERT(false, "While calling Mat::Type(), get UINT16 type which is not "
FDASSERT(false,
"While calling Mat::Type(), get UINT16 type which is not "
"supported now.");
} else if (type == 3) {
return FDDataType::INT16;
@@ -108,8 +109,10 @@ FDDataType Mat::Type() {
} else if (type == 6) {
return FDDataType::FP64;
} else {
FDASSERT(false, "While calling Mat::Type(), get type = " +
std::to_string(type) + ", which is not expected!.");
FDASSERT(
false,
"While calling Mat::Type(), get type = %d, which is not expected!.",
type);
}
}

View File

@@ -99,8 +99,8 @@ bool PPYOLOE::BuildPreprocessPipelineFromConfig() {
auto target_size = op["target_size"].as<std::vector<int>>();
int interp = op["interp"].as<int>();
FDASSERT(target_size.size(),
"Require size of target_size be 2, but now it's " +
std::to_string(target_size.size()) + ".");
"Require size of target_size be 2, but now it's %lu.",
target_size.size());
if (!keep_ratio) {
int width = target_size[1];
int height = target_size[0];

View File

@@ -127,8 +127,8 @@ bool PaddleSegModel::Postprocess(
// 3. shape: 2-D HW
FDASSERT(infer_result.dtype == FDDataType::INT64 ||
infer_result.dtype == FDDataType::FP32,
"Require the data type of output is int64 or fp32, but now it's " +
Str(infer_result.dtype) + ".");
"Require the data type of output is int64 or fp32, but now it's %s.",
Str(infer_result.dtype).c_str());
result->Clear();
if (infer_result.shape.size() == 4) {

View File

@@ -25,9 +25,11 @@ function(cc_test_build TARGET_NAME)
target_link_libraries(${TARGET_NAME} PUBLIC ${PYTHON_LIBRARIES})
endif()
set(EXTERNAL_LIB "")
else(WIN32)
elseif(APPLE)
set(EXTERNAL_LIB "-ldl -lpthread")
else()
set(EXTERNAL_LIB "-lrt -ldl -lpthread")
endif(WIN32)
endif()
get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
target_link_libraries(${TARGET_NAME} PUBLIC ${cc_test_DEPS} ${os_dependency_modules} fastdeploy_gtest_main gtest glog ${EXTERNAL_LIB})
add_dependencies(${TARGET_NAME} ${cc_test_DEPS} gtest)