mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
[CVCUDA] Utilize CV-CUDA batch processing function (#1223)
* norm and permute batch processing * move cache to mat, batch processors * get batched tensor logic, resize on cpu logic * fix cpu compile error * remove vector mat api * nits * add comments * nits * fix batch size * move initial resize on cpu option to use_cuda api * fix pybind * processor manager pybind * rename mat and matbatch * move initial resize on cpu to ppcls preprocessor --------- Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
@@ -16,44 +16,40 @@ from __future__ import absolute_import
|
||||
import logging
|
||||
from .... import FastDeployModel, ModelFormat
|
||||
from .... import c_lib_wrap as C
|
||||
from ...common import ProcessorManager
|
||||
|
||||
|
||||
class PaddleClasPreprocessor:
|
||||
class PaddleClasPreprocessor(ProcessorManager):
|
||||
def __init__(self, config_file):
|
||||
"""Create a preprocessor for PaddleClasModel from configuration file
|
||||
|
||||
:param config_file: (str)Path of configuration file, e.g resnet50/inference_cls.yaml
|
||||
"""
|
||||
self._preprocessor = C.vision.classification.PaddleClasPreprocessor(
|
||||
super(PaddleClasPreprocessor, self).__init__()
|
||||
self._manager = C.vision.classification.PaddleClasPreprocessor(
|
||||
config_file)
|
||||
|
||||
def run(self, input_ims):
|
||||
"""Preprocess input images for PaddleClasModel
|
||||
|
||||
:param: input_ims: (list of numpy.ndarray)The input image
|
||||
:return: list of FDTensor
|
||||
"""
|
||||
return self._preprocessor.run(input_ims)
|
||||
|
||||
def use_cuda(self, enable_cv_cuda=False, gpu_id=-1):
|
||||
"""Use CUDA preprocessors
|
||||
|
||||
:param: enable_cv_cuda: Whether to enable CV-CUDA
|
||||
:param: gpu_id: GPU device id
|
||||
"""
|
||||
return self._preprocessor.use_cuda(enable_cv_cuda, gpu_id)
|
||||
|
||||
def disable_normalize(self):
|
||||
"""
|
||||
This function will disable normalize in preprocessing step.
|
||||
"""
|
||||
self._preprocessor.disable_normalize()
|
||||
self._manager.disable_normalize()
|
||||
|
||||
def disable_permute(self):
|
||||
"""
|
||||
This function will disable hwc2chw in preprocessing step.
|
||||
"""
|
||||
self._preprocessor.disable_permute()
|
||||
self._manager.disable_permute()
|
||||
|
||||
def initial_resize_on_cpu(self, v):
|
||||
"""
|
||||
When the initial operator is Resize, and input image size is large,
|
||||
maybe it's better to run resize on CPU, because the HostToDevice memcpy
|
||||
is time consuming. Set this True to run the initial resize on CPU.
|
||||
|
||||
:param: v: True or False
|
||||
"""
|
||||
self._manager.initial_resize_on_cpu(v)
|
||||
|
||||
|
||||
class PaddleClasPostprocessor:
|
||||
|
16
python/fastdeploy/vision/common/__init__.py
Normal file
16
python/fastdeploy/vision/common/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import absolute_import
|
||||
|
||||
from .manager import ProcessorManager
|
36
python/fastdeploy/vision/common/manager.py
Normal file
36
python/fastdeploy/vision/common/manager.py
Normal file
@@ -0,0 +1,36 @@
|
||||
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
class ProcessorManager:
|
||||
def __init__(self):
|
||||
self._manager = None
|
||||
|
||||
def run(self, input_ims):
|
||||
"""Process input image
|
||||
|
||||
:param: input_ims: (list of numpy.ndarray) The input images
|
||||
:return: list of FDTensor
|
||||
"""
|
||||
return self._manager.run(input_ims)
|
||||
|
||||
def use_cuda(self, enable_cv_cuda=False, gpu_id=-1):
|
||||
"""Use CUDA processors
|
||||
|
||||
:param: enable_cv_cuda: Ture: use CV-CUDA, False: use CUDA only
|
||||
:param: gpu_id: GPU device id
|
||||
"""
|
||||
return self._manager.use_cuda(enable_cv_cuda, gpu_id)
|
Reference in New Issue
Block a user