[Backend] Add Huawei Ascend NPU deploy using PaddleLite CANN. (#757)

* Add Huawei Ascend NPU deploy through PaddleLite CANN

* Add NNAdapter interface for paddlelite

* Modify Huawei Ascend Cmake

* Update way for compiling Huawei Ascend NPU deployment

* remove UseLiteBackend in UseCANN

* Support compile python whlee

* Change names of nnadapter API

* Add nnadapter pybind and remove useless API

* Support Python deployment on Huawei Ascend NPU

* Add models suppor for ascend

* Add PPOCR rec reszie for ascend

* fix conflict for ascend

* Rename CANN to Ascend

* Rename CANN to Ascend

* Improve ascend

* fix ascend bug

* improve ascend docs

* improve ascend docs

* improve ascend docs

* Improve Ascend

* Improve Ascend

* Move ascend python demo

* Imporve ascend

* Improve ascend

* Improve ascend

* Improve ascend

* Improve ascend

* Imporve ascend

* Imporve ascend

* Improve ascend
This commit is contained in:
yunyaoXYY
2022-12-26 10:18:34 +08:00
committed by GitHub
parent 2d3d941372
commit d45382e3cc
42 changed files with 714 additions and 29 deletions

View File

@@ -301,6 +301,11 @@ class RuntimeOption:
rknpu2_core=rknpu2.CoreMask.RKNN_NPU_CORE_0):
return self._option.use_rknpu2(rknpu2_name, rknpu2_core)
def use_ascend(self):
"""Inference with Huawei Ascend NPU
"""
return self._option.use_ascend()
def set_cpu_thread_num(self, thread_num=-1):
"""Set number of threads if inference with CPU
@@ -355,6 +360,46 @@ class RuntimeOption:
"""
return self.use_lite_backend()
def set_lite_device_names(self, device_names):
"""Set nnadapter device name for Paddle Lite backend.
"""
return self._option.set_lite_device_names(device_names)
def set_lite_context_properties(self, context_properties):
"""Set nnadapter context properties for Paddle Lite backend.
"""
return self._option.set_lite_context_properties(context_properties)
def set_lite_model_cache_dir(self, model_cache_dir):
"""Set nnadapter model cache dir for Paddle Lite backend.
"""
return self._option.set_lite_model_cache_dir(model_cache_dir)
def set_lite_dynamic_shape_info(self, dynamic_shape_info):
""" Set nnadapter dynamic shape info for Paddle Lite backend.
"""
return self._option.set_lite_dynamic_shape_info(dynamic_shape_info)
def set_lite_subgraph_partition_path(self, subgraph_partition_path):
""" Set nnadapter subgraph partition path for Paddle Lite backend.
"""
return self._option.set_lite_subgraph_partition_path(
subgraph_partition_path)
def set_lite_subgraph_partition_config_buffer(self,
subgraph_partition_buffer):
""" Set nnadapter subgraph partition buffer for Paddle Lite backend.
"""
return self._option.set_lite_subgraph_partition_config_buffer(
subgraph_partition_buffer)
def set_lite_mixed_precision_quantization_config_path(
self, mixed_precision_quantization_config_path):
""" Set nnadapter mixed precision quantization config path for Paddle Lite backend..
"""
return self._option.set_lite_mixed_precision_quantization_config_path(
mixed_precision_quantization_config_path)
def set_paddle_mkldnn(self, use_mkldnn=True):
"""Enable/Disable MKLDNN while using Paddle Inference backend, mkldnn is enabled by default.
"""