Files
FastDeploy/fastdeploy/runtime/backends/ort/ort_backend.h
yunyaoXYY c38b7d4377 [Backend] Support onnxruntime DirectML inference. (#1304)
* Fix links in readme

* Fix links in readme

* Update PPOCRv2/v3 examples

* Update auto compression configs

* Add neww quantization  support for paddleclas model

* Update quantized Yolov6s model download link

* Improve PPOCR comments

* Add English doc for quantization

* Fix PPOCR rec model bug

* Add  new paddleseg quantization support

* Add  new paddleseg quantization support

* Add  new paddleseg quantization support

* Add  new paddleseg quantization support

* Add Ascend model list

* Add ascend model list

* Add ascend model list

* Add ascend model list

* Add ascend model list

* Add ascend model list

* Add ascend model list

* Support DirectML in onnxruntime

* Support onnxruntime DirectML

* Support onnxruntime DirectML

* Support onnxruntime DirectML

* Support OnnxRuntime DirectML

* Support OnnxRuntime DirectML

* Support OnnxRuntime DirectML

* Support OnnxRuntime DirectML

* Support OnnxRuntime DirectML

* Support OnnxRuntime DirectML

* Support OnnxRuntime DirectML

* Support OnnxRuntime DirectML

* Remove DirectML vision model example

* Imporve OnnxRuntime DirectML

* Imporve OnnxRuntime DirectML

* fix opencv cmake in Windows

* recheck codestyle
2023-02-17 10:53:51 +08:00

85 lines
2.7 KiB
C++

// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include <map>
#include "fastdeploy/runtime/backends/backend.h"
#include "fastdeploy/runtime/backends/ort/option.h"
#include "onnxruntime_cxx_api.h" // NOLINT
#ifdef WITH_DIRECTML
#include "dml_provider_factory.h" // NOLINT
#endif
namespace fastdeploy {
struct OrtValueInfo {
std::string name;
std::vector<int64_t> shape;
ONNXTensorElementDataType dtype;
};
class OrtBackend : public BaseBackend {
public:
OrtBackend() {}
virtual ~OrtBackend() = default;
bool BuildOption(const OrtBackendOption& option);
bool Init(const RuntimeOption& option);
bool Infer(std::vector<FDTensor>& inputs, std::vector<FDTensor>* outputs,
bool copy_to_fd = true) override;
int NumInputs() const override { return inputs_desc_.size(); }
int NumOutputs() const override { return outputs_desc_.size(); }
TensorInfo GetInputInfo(int index) override;
TensorInfo GetOutputInfo(int index) override;
std::vector<TensorInfo> GetInputInfos() override;
std::vector<TensorInfo> GetOutputInfos() override;
static std::vector<OrtCustomOp*> custom_operators_;
void InitCustomOperators();
private:
bool InitFromPaddle(const std::string& model_buffer,
const std::string& params_buffer,
const OrtBackendOption& option = OrtBackendOption(),
bool verbose = false);
bool InitFromOnnx(const std::string& model_buffer,
const OrtBackendOption& option = OrtBackendOption());
Ort::Env env_;
Ort::Session session_{nullptr};
Ort::SessionOptions session_options_;
std::shared_ptr<Ort::IoBinding> binding_;
std::vector<OrtValueInfo> inputs_desc_;
std::vector<OrtValueInfo> outputs_desc_;
#ifndef NON_64_PLATFORM
Ort::CustomOpDomain custom_op_domain_ = Ort::CustomOpDomain("Paddle");
#endif
OrtBackendOption option_;
void OrtValueToFDTensor(const Ort::Value& value, FDTensor* tensor,
const std::string& name, bool copy_to_fd);
};
} // namespace fastdeploy