[Android] Add android aar package (#416)

* [Android] Add Android build docs and demo (#26)

* [Backend] Add override flag to lite backend

* [Docs] Add Android C++ SDK build docs

* [Doc] fix android_build_docs typos

* Update CMakeLists.txt

* Update android.md

* [Doc] Add PicoDet Android demo docs

* [Doc] Update PicoDet Andorid demo docs

* [Doc] Update PaddleClasModel Android demo docs

* [Doc] Update fastdeploy android jni docs

* [Doc] Update fastdeploy android jni usage docs

* [Android] init fastdeploy android jar package

* [Backend] support int8 option for lite backend

* [Model] add Backend::Lite to paddle model

* [Backend] use CopyFromCpu for lite backend.

* [Android] package jni srcs and java api into aar

* Update infer.cc

* Update infer.cc

* [Android] Update package build.gradle

* [Android] Update android app examples

* [Android] update android detection app
This commit is contained in:
DefTruth
2022-10-26 17:01:14 +08:00
committed by GitHub
parent b064ddf7ed
commit a51e5a6e55
137 changed files with 4664 additions and 37 deletions

View File

@@ -42,7 +42,11 @@ FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType& dtype) {
void LiteBackend::BuildOption(const LiteBackendOption& option) {
option_ = option;
std::vector<paddle::lite_api::Place> valid_places;
if (option.enable_fp16) {
if (option_.enable_int8) {
valid_places.push_back(
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt8)});
}
if (option_.enable_fp16) {
paddle::lite_api::MobileConfig check_fp16_config;
// Determine whether the device supports the FP16
// instruction set (or whether it is an arm device
@@ -58,12 +62,12 @@ void LiteBackend::BuildOption(const LiteBackendOption& option) {
valid_places.push_back(
paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
config_.set_valid_places(valid_places);
if (option.threads > 0) {
config_.set_threads(option.threads);
if (option_.threads > 0) {
config_.set_threads(option_.threads);
}
if (option.power_mode > 0) {
if (option_.power_mode > 0) {
config_.set_power_mode(
static_cast<paddle::lite_api::PowerMode>(option.power_mode));
static_cast<paddle::lite_api::PowerMode>(option_.power_mode));
}
}
@@ -136,14 +140,13 @@ TensorInfo LiteBackend::GetOutputInfo(int index) {
std::vector<TensorInfo> LiteBackend::GetOutputInfos() { return outputs_desc_; }
bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
std::vector<FDTensor>* outputs) {
std::vector<FDTensor>* outputs) {
if (inputs.size() != inputs_desc_.size()) {
FDERROR << "[LiteBackend] Size of inputs(" << inputs.size()
<< ") should keep same with the inputs of this model("
<< inputs_desc_.size() << ")." << std::endl;
return false;
}
for (size_t i = 0; i < inputs.size(); ++i) {
auto iter = inputs_order_.find(inputs[i].name);
if (iter == inputs_order_.end()) {
@@ -152,12 +155,29 @@ bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
return false;
}
auto tensor = predictor_->GetInput(iter->second);
tensor->Resize(inputs[i].shape);
tensor->ShareExternalMemory(const_cast<void*>(inputs[i].CpuData()),
inputs[i].Nbytes(),
paddle::lite_api::TargetType::kARM);
// Adjust dims only, allocate lazy.
tensor->Resize(inputs[i].shape);
if (inputs[i].dtype == FDDataType::FP32) {
tensor->CopyFromCpu<float, paddle::lite_api::TargetType::kARM>(
reinterpret_cast<const float*>(const_cast<void*>(
inputs[i].CpuData())));
} else if (inputs[i].dtype == FDDataType::INT32) {
tensor->CopyFromCpu<int, paddle::lite_api::TargetType::kARM>(
reinterpret_cast<const int*>(const_cast<void*>(
inputs[i].CpuData())));
} else if (inputs[i].dtype == FDDataType::INT8) {
tensor->CopyFromCpu<int8_t, paddle::lite_api::TargetType::kARM>(
reinterpret_cast<const int8_t*>(const_cast<void*>(
inputs[i].CpuData())));
} else if (inputs[i].dtype == FDDataType::UINT8) {
tensor->CopyFromCpu<uint8_t, paddle::lite_api::TargetType::kARM>(
reinterpret_cast<const uint8_t*>(const_cast<void*>(
inputs[i].CpuData())));
} else {
FDASSERT(false, "Unexpected data type of %d.", inputs[i].dtype);
}
}
predictor_->Run();
outputs->resize(outputs_desc_.size());