mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 08:37:06 +08:00
[Backend] Add Huawei Ascend NPU deploy using PaddleLite CANN. (#757)
* Add Huawei Ascend NPU deploy through PaddleLite CANN * Add NNAdapter interface for paddlelite * Modify Huawei Ascend Cmake * Update way for compiling Huawei Ascend NPU deployment * remove UseLiteBackend in UseCANN * Support compile python whlee * Change names of nnadapter API * Add nnadapter pybind and remove useless API * Support Python deployment on Huawei Ascend NPU * Add models suppor for ascend * Add PPOCR rec reszie for ascend * fix conflict for ascend * Rename CANN to Ascend * Rename CANN to Ascend * Improve ascend * fix ascend bug * improve ascend docs * improve ascend docs * improve ascend docs * Improve Ascend * Improve Ascend * Move ascend python demo * Imporve ascend * Improve ascend * Improve ascend * Improve ascend * Improve ascend * Imporve ascend * Imporve ascend * Improve ascend
This commit is contained in:
@@ -301,6 +301,11 @@ class RuntimeOption:
|
||||
rknpu2_core=rknpu2.CoreMask.RKNN_NPU_CORE_0):
|
||||
return self._option.use_rknpu2(rknpu2_name, rknpu2_core)
|
||||
|
||||
def use_ascend(self):
|
||||
"""Inference with Huawei Ascend NPU
|
||||
"""
|
||||
return self._option.use_ascend()
|
||||
|
||||
def set_cpu_thread_num(self, thread_num=-1):
|
||||
"""Set number of threads if inference with CPU
|
||||
|
||||
@@ -355,6 +360,46 @@ class RuntimeOption:
|
||||
"""
|
||||
return self.use_lite_backend()
|
||||
|
||||
def set_lite_device_names(self, device_names):
|
||||
"""Set nnadapter device name for Paddle Lite backend.
|
||||
"""
|
||||
return self._option.set_lite_device_names(device_names)
|
||||
|
||||
def set_lite_context_properties(self, context_properties):
|
||||
"""Set nnadapter context properties for Paddle Lite backend.
|
||||
"""
|
||||
return self._option.set_lite_context_properties(context_properties)
|
||||
|
||||
def set_lite_model_cache_dir(self, model_cache_dir):
|
||||
"""Set nnadapter model cache dir for Paddle Lite backend.
|
||||
"""
|
||||
return self._option.set_lite_model_cache_dir(model_cache_dir)
|
||||
|
||||
def set_lite_dynamic_shape_info(self, dynamic_shape_info):
|
||||
""" Set nnadapter dynamic shape info for Paddle Lite backend.
|
||||
"""
|
||||
return self._option.set_lite_dynamic_shape_info(dynamic_shape_info)
|
||||
|
||||
def set_lite_subgraph_partition_path(self, subgraph_partition_path):
|
||||
""" Set nnadapter subgraph partition path for Paddle Lite backend.
|
||||
"""
|
||||
return self._option.set_lite_subgraph_partition_path(
|
||||
subgraph_partition_path)
|
||||
|
||||
def set_lite_subgraph_partition_config_buffer(self,
|
||||
subgraph_partition_buffer):
|
||||
""" Set nnadapter subgraph partition buffer for Paddle Lite backend.
|
||||
"""
|
||||
return self._option.set_lite_subgraph_partition_config_buffer(
|
||||
subgraph_partition_buffer)
|
||||
|
||||
def set_lite_mixed_precision_quantization_config_path(
|
||||
self, mixed_precision_quantization_config_path):
|
||||
""" Set nnadapter mixed precision quantization config path for Paddle Lite backend..
|
||||
"""
|
||||
return self._option.set_lite_mixed_precision_quantization_config_path(
|
||||
mixed_precision_quantization_config_path)
|
||||
|
||||
def set_paddle_mkldnn(self, use_mkldnn=True):
|
||||
"""Enable/Disable MKLDNN while using Paddle Inference backend, mkldnn is enabled by default.
|
||||
"""
|
||||
|
@@ -508,6 +508,17 @@ class RecognizerPreprocessor:
|
||||
"""
|
||||
return self._preprocessor.run(input_ims)
|
||||
|
||||
@property
|
||||
def static_shape(self):
|
||||
return self._preprocessor.static_shape
|
||||
|
||||
@static_shape.setter
|
||||
def static_shape(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `static_shape` must be type of bool."
|
||||
self._preprocessor.static_shape = value
|
||||
|
||||
@property
|
||||
def is_scale(self):
|
||||
return self._preprocessor.is_scale
|
||||
@@ -626,6 +637,17 @@ class Recognizer(FastDeployModel):
|
||||
def postprocessor(self, value):
|
||||
self._model.postprocessor = value
|
||||
|
||||
@property
|
||||
def static_shape(self):
|
||||
return self._model.preprocessor.static_shape
|
||||
|
||||
@static_shape.setter
|
||||
def static_shape(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `static_shape` must be type of bool."
|
||||
self._model.preprocessor.static_shape = value
|
||||
|
||||
@property
|
||||
def is_scale(self):
|
||||
return self._model.preprocessor.is_scale
|
||||
|
@@ -56,6 +56,7 @@ if os.getenv("BUILD_ON_CPU", "OFF") == "ON":
|
||||
setup_configs = dict()
|
||||
setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND",
|
||||
"OFF")
|
||||
setup_configs["WITH_ASCEND"] = os.getenv("WITH_ASCEND", "OFF")
|
||||
setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF")
|
||||
setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND",
|
||||
"OFF")
|
||||
|
Reference in New Issue
Block a user