mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-04 16:22:57 +08:00

* [cmake] support Android arm64-v8a & armeabi-v7a native c++ sdk * [cmake] fixed patchelf download on mac and android * [lite] Add threads and power_mode option support * [pybind] update runtime pybind for lite power mode * [python] Add set_lite_power_mode api to runtime * [Lite] add lite enable_fp16 option support * [lite] add more options for lite backend. * [cmake] fixed Paddle Lite typo * [runtime] format LitePowerMode enum comments * [runtime] format lite option comments
80 lines
2.5 KiB
C++
80 lines
2.5 KiB
C++
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
#pragma once
|
|
|
|
#include <iostream>
|
|
#include <memory>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
#include "fastdeploy/backends/backend.h"
|
|
#include "paddle_api.h" // NOLINT
|
|
|
|
namespace fastdeploy {
|
|
|
|
struct LiteBackendOption {
|
|
// cpu num threads
|
|
int threads = 1;
|
|
// lite power mode
|
|
// 0: LITE_POWER_HIGH
|
|
// 1: LITE_POWER_LOW
|
|
// 2: LITE_POWER_FULL
|
|
// 3: LITE_POWER_NO_BIND
|
|
// 4: LITE_POWER_RAND_HIGH
|
|
// 5: LITE_POWER_RAND_LOW
|
|
int power_mode = 3;
|
|
// enable fp16
|
|
bool enable_fp16 = false;
|
|
// optimized model dir for CxxConfig
|
|
std::string optimized_model_dir = "";
|
|
// TODO(qiuyanjun): support more options for lite backend.
|
|
// Such as fp16, different device target (kARM/kXPU/kNPU/...)
|
|
};
|
|
|
|
// Convert data type from paddle lite to fastdeploy
|
|
FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType& dtype);
|
|
|
|
class LiteBackend : public BaseBackend {
|
|
public:
|
|
LiteBackend() {}
|
|
virtual ~LiteBackend() = default;
|
|
void BuildOption(const LiteBackendOption& option);
|
|
|
|
bool InitFromPaddle(const std::string& model_file,
|
|
const std::string& params_file,
|
|
const LiteBackendOption& option = LiteBackendOption());
|
|
|
|
bool Infer(std::vector<FDTensor>& inputs, std::vector<FDTensor>* outputs);
|
|
|
|
int NumInputs() const { return inputs_desc_.size(); }
|
|
|
|
int NumOutputs() const { return outputs_desc_.size(); }
|
|
|
|
TensorInfo GetInputInfo(int index);
|
|
TensorInfo GetOutputInfo(int index);
|
|
std::vector<TensorInfo> GetInputInfos() override;
|
|
std::vector<TensorInfo> GetOutputInfos() override;
|
|
|
|
private:
|
|
paddle::lite_api::CxxConfig config_;
|
|
std::shared_ptr<paddle::lite_api::PaddlePredictor> predictor_;
|
|
std::vector<TensorInfo> inputs_desc_;
|
|
std::vector<TensorInfo> outputs_desc_;
|
|
std::map<std::string, int> inputs_order_;
|
|
LiteBackendOption option_;
|
|
bool supported_fp16_ = false;
|
|
};
|
|
} // namespace fastdeploy
|