mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00

* Add poros backend * Add torch lib * Add python3 lib * set c++ 14 for poros * fixed bugs * fixed grammer bugs * fixed grammer bugs * fixed code bugs * fixed code bugs * fixed CreatePorosValue bug * Add AtType2String for Log * fixed trt_option * fixed poros.cmake path * fixed grammer bug * fixed grammer bug * fixed ambiguous reference * fixed ambiguous reference * fixed reference error * fixed include files * rm ENABLE_TRT_BACKEND in poros * update CMakeLists.txt * fixed CMakeLists.txt * Add libtorch.so in CMakeLists.txt * Fixed CMakeLists.txt * Fixed CMakeLists.txt * Fixed copy bug * Fixed copy bug * Fixed copy bug * Fixed Cmake * Fixed Cmake * debug * debug * debug * debug * debug * debug * debug utils * debug utils * copy to cpu * rm log info * test share mem * test share mem * test share mem * test multi outputs * test multi outputs * test multi outputs * test multi outputs * test multi outputs * test multi outputs * test multi outputs * time cost * time cost * fixed bug * time collect * mem copy * mem copy * rm time log * rm share mem * fixed multi inputs bug * add set_input_dtypes func * add SetInputDtypes * fixed bug * fixed bug * fixed prewarm data order * debug * debug * debug * debug * debug * debug * debug * debug * debug * debug * debug * fixed bug * Add compile func * Add compile func * Add compile func * Add is_dynamic option * Add is_dynamic option * Add is_dynamic option * Add is_dynamic option * rm infer log * add cuda11.6 poros lib * fixed bug * fixed bug * fixed multi outputs * fixed multi outputs * fixed multi outputs * fixed multi outputs * fixed multi outputs * fixed multi outputs * fixed multi outputs * fixed multi outputs * fixed multi outputs * fixed multi outputs * fixed multi outputs * rm logs * test * test * test * add test log * add test log * add test log * add test log * support cpu * support cpu * support cpu * support cpu * support member variable definition * rm useless log * fixed name * resolve conflict * resolve conflict * resolve conflict * fixed cmake * add GetInputInfos&GetOutputInfos * add GetInputInfos&GetOutputInfos * fixed bug * fixed runtime.py * add compile func * add np * deal with comments * rm to_inter func * add property
85 lines
2.4 KiB
C++
Executable File
85 lines
2.4 KiB
C++
Executable File
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
#pragma once
|
|
|
|
#include <string>
|
|
|
|
//from pytorch
|
|
#include "torch/script.h"
|
|
#include "torch/csrc/jit/ir/ir.h"
|
|
#include "ATen/core/interned_strings.h"
|
|
|
|
#include "plugin_create.h"
|
|
|
|
namespace baidu {
|
|
namespace mirana {
|
|
namespace poros {
|
|
|
|
/**
|
|
* the base engine class
|
|
* every registered engine should inherit from this IEngine
|
|
**/
|
|
|
|
struct PorosGraph {
|
|
torch::jit::Graph* graph = NULL;
|
|
torch::jit::Node* node = NULL;
|
|
};
|
|
|
|
typedef uint64_t EngineID;
|
|
|
|
class IEngine : public IPlugin, public torch::CustomClassHolder{
|
|
public:
|
|
virtual ~IEngine() {}
|
|
|
|
/**
|
|
* @brief init, initialization must be successful if the init is successful
|
|
* @return int
|
|
* @retval 0 => success, <0 => fail
|
|
**/
|
|
virtual int init() = 0;
|
|
|
|
/**
|
|
* @brief During compilation, the subgraph is converted into the graph structure of the corresponding engine and stored inside the engine, so that the execute_engine at runtime can be called
|
|
* @param [in] sub_graph : subgraph
|
|
* @return [res]int
|
|
* @retval 0 => success, <0 => fail
|
|
**/
|
|
virtual int transform(const PorosGraph& sub_graph) = 0;
|
|
|
|
/**
|
|
* @brief Subgraph execution period logic
|
|
* @param [in] inputs : input tensor
|
|
* @return [res] output tensor
|
|
**/
|
|
virtual std::vector<at::Tensor> excute_engine(const std::vector<at::Tensor>& inputs) = 0;
|
|
|
|
virtual void register_module_attribute(const std::string& name, torch::jit::Module& module) = 0;
|
|
|
|
// Logo
|
|
virtual const std::string who_am_i() = 0;
|
|
|
|
// Whether the node is supported by the current engine
|
|
bool is_node_supported(const torch::jit::Node* node);
|
|
|
|
public:
|
|
std::pair<uint64_t, uint64_t> _num_io; // Number of input/output parameters
|
|
EngineID _id;
|
|
|
|
};
|
|
|
|
} // namespace poros
|
|
} // namespace mirana
|
|
} // namespace baidu
|