mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-27 18:41:02 +08:00
[Android] Add android aar package (#416)
* [Android] Add Android build docs and demo (#26) * [Backend] Add override flag to lite backend * [Docs] Add Android C++ SDK build docs * [Doc] fix android_build_docs typos * Update CMakeLists.txt * Update android.md * [Doc] Add PicoDet Android demo docs * [Doc] Update PicoDet Andorid demo docs * [Doc] Update PaddleClasModel Android demo docs * [Doc] Update fastdeploy android jni docs * [Doc] Update fastdeploy android jni usage docs * [Android] init fastdeploy android jar package * [Backend] support int8 option for lite backend * [Model] add Backend::Lite to paddle model * [Backend] use CopyFromCpu for lite backend. * [Android] package jni srcs and java api into aar * Update infer.cc * Update infer.cc * [Android] Update package build.gradle * [Android] Update android app examples * [Android] update android detection app
This commit is contained in:
@@ -42,7 +42,11 @@ FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType& dtype) {
|
||||
void LiteBackend::BuildOption(const LiteBackendOption& option) {
|
||||
option_ = option;
|
||||
std::vector<paddle::lite_api::Place> valid_places;
|
||||
if (option.enable_fp16) {
|
||||
if (option_.enable_int8) {
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt8)});
|
||||
}
|
||||
if (option_.enable_fp16) {
|
||||
paddle::lite_api::MobileConfig check_fp16_config;
|
||||
// Determine whether the device supports the FP16
|
||||
// instruction set (or whether it is an arm device
|
||||
@@ -58,12 +62,12 @@ void LiteBackend::BuildOption(const LiteBackendOption& option) {
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
|
||||
config_.set_valid_places(valid_places);
|
||||
if (option.threads > 0) {
|
||||
config_.set_threads(option.threads);
|
||||
if (option_.threads > 0) {
|
||||
config_.set_threads(option_.threads);
|
||||
}
|
||||
if (option.power_mode > 0) {
|
||||
if (option_.power_mode > 0) {
|
||||
config_.set_power_mode(
|
||||
static_cast<paddle::lite_api::PowerMode>(option.power_mode));
|
||||
static_cast<paddle::lite_api::PowerMode>(option_.power_mode));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,14 +140,13 @@ TensorInfo LiteBackend::GetOutputInfo(int index) {
|
||||
std::vector<TensorInfo> LiteBackend::GetOutputInfos() { return outputs_desc_; }
|
||||
|
||||
bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
|
||||
std::vector<FDTensor>* outputs) {
|
||||
std::vector<FDTensor>* outputs) {
|
||||
if (inputs.size() != inputs_desc_.size()) {
|
||||
FDERROR << "[LiteBackend] Size of inputs(" << inputs.size()
|
||||
<< ") should keep same with the inputs of this model("
|
||||
<< inputs_desc_.size() << ")." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||
auto iter = inputs_order_.find(inputs[i].name);
|
||||
if (iter == inputs_order_.end()) {
|
||||
@@ -152,12 +155,29 @@ bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
|
||||
return false;
|
||||
}
|
||||
auto tensor = predictor_->GetInput(iter->second);
|
||||
tensor->Resize(inputs[i].shape);
|
||||
tensor->ShareExternalMemory(const_cast<void*>(inputs[i].CpuData()),
|
||||
inputs[i].Nbytes(),
|
||||
paddle::lite_api::TargetType::kARM);
|
||||
// Adjust dims only, allocate lazy.
|
||||
tensor->Resize(inputs[i].shape);
|
||||
if (inputs[i].dtype == FDDataType::FP32) {
|
||||
tensor->CopyFromCpu<float, paddle::lite_api::TargetType::kARM>(
|
||||
reinterpret_cast<const float*>(const_cast<void*>(
|
||||
inputs[i].CpuData())));
|
||||
} else if (inputs[i].dtype == FDDataType::INT32) {
|
||||
tensor->CopyFromCpu<int, paddle::lite_api::TargetType::kARM>(
|
||||
reinterpret_cast<const int*>(const_cast<void*>(
|
||||
inputs[i].CpuData())));
|
||||
} else if (inputs[i].dtype == FDDataType::INT8) {
|
||||
tensor->CopyFromCpu<int8_t, paddle::lite_api::TargetType::kARM>(
|
||||
reinterpret_cast<const int8_t*>(const_cast<void*>(
|
||||
inputs[i].CpuData())));
|
||||
} else if (inputs[i].dtype == FDDataType::UINT8) {
|
||||
tensor->CopyFromCpu<uint8_t, paddle::lite_api::TargetType::kARM>(
|
||||
reinterpret_cast<const uint8_t*>(const_cast<void*>(
|
||||
inputs[i].CpuData())));
|
||||
} else {
|
||||
FDASSERT(false, "Unexpected data type of %d.", inputs[i].dtype);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
predictor_->Run();
|
||||
|
||||
outputs->resize(outputs_desc_.size());
|
||||
|
||||
@@ -37,6 +37,8 @@ struct LiteBackendOption {
|
||||
int power_mode = 3;
|
||||
// enable fp16
|
||||
bool enable_fp16 = false;
|
||||
// enable int8
|
||||
bool enable_int8 = false;
|
||||
// optimized model dir for CxxConfig
|
||||
std::string optimized_model_dir = "";
|
||||
// TODO(qiuyanjun): support more options for lite backend.
|
||||
|
||||
@@ -321,7 +321,17 @@ void RuntimeOption::EnableLiteFP16() {
|
||||
lite_enable_fp16 = true;
|
||||
}
|
||||
|
||||
void RuntimeOption::DisableLiteFP16() { lite_enable_fp16 = false; }
|
||||
void RuntimeOption::DisableLiteFP16() {
|
||||
lite_enable_fp16 = false;
|
||||
}
|
||||
|
||||
void RuntimeOption::EnableLiteInt8() {
|
||||
lite_enable_int8 = true;
|
||||
}
|
||||
|
||||
void RuntimeOption::DisableLiteInt8() {
|
||||
lite_enable_int8 = false;
|
||||
}
|
||||
|
||||
void RuntimeOption::SetLitePowerMode(LitePowerMode mode) {
|
||||
lite_power_mode = mode;
|
||||
@@ -650,6 +660,7 @@ void Runtime::CreateLiteBackend() {
|
||||
#ifdef ENABLE_LITE_BACKEND
|
||||
auto lite_option = LiteBackendOption();
|
||||
lite_option.threads = option.cpu_thread_num;
|
||||
lite_option.enable_int8 = option.lite_enable_int8;
|
||||
lite_option.enable_fp16 = option.lite_enable_fp16;
|
||||
lite_option.power_mode = static_cast<int>(option.lite_power_mode);
|
||||
lite_option.optimized_model_dir = option.lite_optimized_model_dir;
|
||||
|
||||
@@ -173,6 +173,16 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
||||
*/
|
||||
void DisableLiteFP16();
|
||||
|
||||
/**
|
||||
* @brief enable int8 precision while use paddle lite backend
|
||||
*/
|
||||
void EnableLiteInt8();
|
||||
|
||||
/**
|
||||
* @brief disable int8 precision, change to full precision(float32)
|
||||
*/
|
||||
void DisableLiteInt8();
|
||||
|
||||
/**
|
||||
* @brief Set power mode while using Paddle Lite as inference backend, mode(0: LITE_POWER_HIGH; 1: LITE_POWER_LOW; 2: LITE_POWER_FULL; 3: LITE_POWER_NO_BIND, 4: LITE_POWER_RAND_HIGH; 5: LITE_POWER_RAND_LOW, refer [paddle lite](https://paddle-lite.readthedocs.io/zh/latest/api_reference/cxx_api_doc.html#set-power-mode) for more details)
|
||||
*/
|
||||
@@ -260,6 +270,8 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
||||
// 3: LITE_POWER_NO_BIND 4: LITE_POWER_RAND_HIGH
|
||||
// 5: LITE_POWER_RAND_LOW
|
||||
LitePowerMode lite_power_mode = LitePowerMode::LITE_POWER_NO_BIND;
|
||||
// enable int8 or not
|
||||
bool lite_enable_int8 = false;
|
||||
// enable fp16 or not
|
||||
bool lite_enable_fp16 = false;
|
||||
// optimized model dir for CxxConfig
|
||||
|
||||
@@ -24,7 +24,7 @@ MaskRCNN::MaskRCNN(const std::string& model_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const ModelFormat& model_format) {
|
||||
config_file_ = config_file;
|
||||
valid_cpu_backends = {Backend::PDINFER};
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER};
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
|
||||
@@ -23,7 +23,7 @@ PPYOLO::PPYOLO(const std::string& model_file, const std::string& params_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const ModelFormat& model_format) {
|
||||
config_file_ = config_file;
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER};
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER};
|
||||
has_nms_ = true;
|
||||
runtime_option = custom_option;
|
||||
|
||||
@@ -14,7 +14,7 @@ PPYOLOE::PPYOLOE(const std::string& model_file, const std::string& params_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const ModelFormat& model_format) {
|
||||
config_file_ = config_file;
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER};
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
|
||||
@@ -24,7 +24,7 @@ FasterRCNN::FasterRCNN(const std::string& model_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const ModelFormat& model_format) {
|
||||
config_file_ = config_file;
|
||||
valid_cpu_backends = {Backend::PDINFER};
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER};
|
||||
has_nms_ = true;
|
||||
runtime_option = custom_option;
|
||||
|
||||
@@ -23,7 +23,7 @@ YOLOv3::YOLOv3(const std::string& model_file, const std::string& params_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const ModelFormat& model_format) {
|
||||
config_file_ = config_file;
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER};
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
|
||||
@@ -24,7 +24,7 @@ PaddleYOLOX::PaddleYOLOX(const std::string& model_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const ModelFormat& model_format) {
|
||||
config_file_ = config_file;
|
||||
valid_cpu_backends = {Backend::ORT, Backend::PDINFER};
|
||||
valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
|
||||
@@ -30,7 +30,7 @@ InsightFaceRecognitionModel::InsightFaceRecognitionModel(
|
||||
valid_cpu_backends = {Backend::ORT};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::TRT};
|
||||
} else {
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
|
||||
}
|
||||
runtime_option = custom_option;
|
||||
|
||||
@@ -16,7 +16,7 @@ PPTinyPose::PPTinyPose(const std::string& model_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const ModelFormat& model_format) {
|
||||
config_file_ = config_file;
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
|
||||
@@ -25,7 +25,7 @@ PPMatting::PPMatting(const std::string& model_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const ModelFormat& model_format) {
|
||||
config_file_ = config_file;
|
||||
valid_cpu_backends = {Backend::ORT, Backend::PDINFER};
|
||||
valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::TRT};
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
|
||||
@@ -30,7 +30,7 @@ Classifier::Classifier(const std::string& model_file,
|
||||
Backend::OPENVINO};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::TRT};
|
||||
} else {
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
|
||||
}
|
||||
runtime_option = custom_option;
|
||||
|
||||
@@ -30,7 +30,7 @@ DBDetector::DBDetector(const std::string& model_file,
|
||||
Backend::OPENVINO};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::TRT};
|
||||
} else {
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
|
||||
}
|
||||
|
||||
|
||||
@@ -110,7 +110,6 @@ bool PPOCRv2::Predict(cv::Mat* img,
|
||||
if (nullptr != classifier_ && result->cls_labels[i] % 2 == 1 && result->cls_scores[i] > classifier_->cls_thresh) {
|
||||
cv::rotate(image_list[i], image_list[i], 1);
|
||||
}
|
||||
|
||||
if (nullptr != recognizer_ && !Recognize(&(image_list[i]), result)) {
|
||||
FDERROR << "Failed to recgnize croped image of index " << i << "." << std::endl;
|
||||
return false;
|
||||
|
||||
@@ -48,7 +48,7 @@ Recognizer::Recognizer(const std::string& model_file,
|
||||
Backend::OPENVINO};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::TRT};
|
||||
} else {
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const ModelFormat& model_format) {
|
||||
config_file_ = config_file;
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::ORT};
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::ORT, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
@@ -106,7 +106,7 @@ bool PaddleSegModel::BuildPreprocessPipelineFromConfig() {
|
||||
<< "Please refer to https://github.com/PaddlePaddle/PaddleSeg/blob/develop/docs/model_export.md"
|
||||
<< " to export model with fixed input shape."
|
||||
<< std::endl;
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER};
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER};
|
||||
}
|
||||
if (input_height != -1 && input_width != -1 && !yml_contain_resize_op) {
|
||||
|
||||
Reference in New Issue
Block a user