mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 09:07:10 +08:00
[Serving][Backend] Backend support zero_copy_infer and Serving reduce the output memory copy (#703)
* backend add zero copy infer interface * fix bug * fix bug * fix bug * paddle ipu
This commit is contained in:
@@ -187,7 +187,8 @@ TensorInfo LiteBackend::GetOutputInfo(int index) {
|
||||
std::vector<TensorInfo> LiteBackend::GetOutputInfos() { return outputs_desc_; }
|
||||
|
||||
bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
|
||||
std::vector<FDTensor>* outputs) {
|
||||
std::vector<FDTensor>* outputs,
|
||||
bool copy_to_fd) {
|
||||
if (inputs.size() != inputs_desc_.size()) {
|
||||
FDERROR << "[LiteBackend] Size of inputs(" << inputs.size()
|
||||
<< ") should keep same with the inputs of this model("
|
||||
|
Reference in New Issue
Block a user