[Build] Support build with source code of Paddle2ONNX (#1559)

* Add notes for tensors

* Optimize some apis

* move some warnings

* Support build with Paddle2ONNX

* Add protobuf support

* Fix compile on mac

* add clearn package script

* Add paddle2onnx code

* remove submodule

* Add onnx ocde

* remove softlink

* add onnx code

* fix error

* Add cmake file

* fix patchelf

* update paddle2onnx

* Delete .gitmodules

---------

Co-authored-by: PaddleCI <paddle_ci@example.com>
Co-authored-by: pangyoki <pangyoki@126.com>
Co-authored-by: jiangjiajun <jiangjiajun@baidu.lcom>
This commit is contained in:
Jason
2023-03-17 10:03:22 +08:00
committed by GitHub
parent f568c59698
commit 6343b0db47
5052 changed files with 222092 additions and 32 deletions

View File

@@ -45,8 +45,10 @@ if(NOT MSVC)
endif() endif()
endif(NOT MSVC) endif(NOT MSVC)
include(${PROJECT_SOURCE_DIR}/cmake/build_tools.cmake)
if(UNIX AND (NOT APPLE) AND (NOT ANDROID) AND (NOT WITH_TIMVX)) if(UNIX AND (NOT APPLE) AND (NOT ANDROID) AND (NOT WITH_TIMVX))
include(${PROJECT_SOURCE_DIR}/cmake/patchelf.cmake) download_patchelf()
set(PATCHELF_EXE ${THIRD_PARTY_PATH}/patchelf/bin/patchelf)
endif() endif()
@@ -423,9 +425,17 @@ if(ENABLE_ENCRYPTION)
endif() endif()
if(ENABLE_PADDLE2ONNX) if(ENABLE_PADDLE2ONNX)
set(BUILD_PADDLE2ONNX ON)
add_definitions(-DENABLE_PADDLE2ONNX) add_definitions(-DENABLE_PADDLE2ONNX)
if(BUILD_PADDLE2ONNX)
download_protobuf()
include(${PROJECT_SOURCE_DIR}/cmake/build_paddle2onnx.cmake)
list(APPEND ALL_DEPLOY_SRCS ${PADDLE2ONNX_ALL_SRCS})
list(APPEND DEPEND_LIBS p2o_paddle_proto onnx)
else()
include(${PROJECT_SOURCE_DIR}/cmake/paddle2onnx.cmake) include(${PROJECT_SOURCE_DIR}/cmake/paddle2onnx.cmake)
list(APPEND DEPEND_LIBS external_paddle2onnx) list(APPEND DEPEND_LIBS external_paddle2onnx)
endif()
endif(ENABLE_PADDLE2ONNX) endif(ENABLE_PADDLE2ONNX)
if(WITH_CAPI) if(WITH_CAPI)

View File

@@ -30,6 +30,7 @@ set(ENABLE_OPENVINO_BACKEND @ENABLE_OPENVINO_BACKEND@)
set(ENABLE_POROS_BACKEND @ENABLE_POROS_BACKEND@) set(ENABLE_POROS_BACKEND @ENABLE_POROS_BACKEND@)
set(ENABLE_TRT_BACKEND @ENABLE_TRT_BACKEND@) set(ENABLE_TRT_BACKEND @ENABLE_TRT_BACKEND@)
set(ENABLE_PADDLE2ONNX @ENABLE_PADDLE2ONNX@) set(ENABLE_PADDLE2ONNX @ENABLE_PADDLE2ONNX@)
set(BUILD_PADDLE2ONNX @BUILD_PADDLE2ONNX@)
set(ENABLE_VISION @ENABLE_VISION@) set(ENABLE_VISION @ENABLE_VISION@)
set(ENABLE_FLYCV @ENABLE_FLYCV@) set(ENABLE_FLYCV @ENABLE_FLYCV@)
@@ -346,8 +347,10 @@ if(ENABLE_PADDLE2ONNX)
if(ANDROID) if(ANDROID)
message(FATAL_ERROR "Not support fastdeploy-paddle2onnx APIs with Android now!") message(FATAL_ERROR "Not support fastdeploy-paddle2onnx APIs with Android now!")
endif() endif()
if(NOT BUILD_PADDLE2ONNX)
find_library(PADDLE2ONNX_LIB paddle2onnx ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle2onnx/lib NO_DEFAULT_PATH) find_library(PADDLE2ONNX_LIB paddle2onnx ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle2onnx/lib NO_DEFAULT_PATH)
list(APPEND FASTDEPLOY_LIBS ${PADDLE2ONNX_LIB}) list(APPEND FASTDEPLOY_LIBS ${PADDLE2ONNX_LIB})
endif()
endif() endif()
if(WITH_KUNLUNXIN) if(WITH_KUNLUNXIN)

View File

@@ -0,0 +1,41 @@
add_definitions(-DMAX_ONNX_OPSET_VERSION=16)
add_definitions(-DPADDLE2ONNX_LIB)
# Third dependency: onnx
if(NOT TARGET onnx_proto)
if(NOT ONNX_NAMESPACE)
set(ONNX_NAMESPACE "paddle2onnx")
endif()
add_definitions("-DONNX_NAMESPACE=${ONNX_NAMESPACE}")
set(MSVC_STATIC_CRT ON)
if(ONNX_CUSTOM_PROTOC_PATH)
if(WIN32)
if(MSVC_STATIC_CRT)
# MT
set(ONNX_USE_MSVC_STATIC_RUNTIME ON)
else()
# MD
set(ONNX_USE_MSVC_STATIC_RUNTIME OFF)
endif()
set(ONNX_CUSTOM_PROTOC_PATH "${ONNX_CUSTOM_PROTOC_PATH};$ENV{PATH}")
else()
set(ONNX_CUSTOM_PROTOC_PATH "${ONNX_CUSTOM_PROTOC_PATH}:$ENV{PATH}")
endif()
set(ENV{PATH} ${ONNX_CUSTOM_PROTOC_PATH})
endif()
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/onnx)
endif()
include_directories(${PROJECT_SOURCE_DIR})
include_directories(${CMAKE_CURRENT_BINARY_DIR})
include_directories(${CMAKE_CURRENT_BINARY_DIR}/third_party/onnx)
include_directories(${PROJECT_SOURCE_DIR}/third_party/optimizer)
add_subdirectory(${PROJECT_SOURCE_DIR}/paddle2onnx/proto)
file(GLOB_RECURSE PADDLE2ONNX_ALL_SRCS ${PROJECT_SOURCE_DIR}/paddle2onnx/*.cc ${PROJECT_SOURCE_DIR}/third_party/optimizer/onnxoptimizer/*.cc)
list(REMOVE_ITEM PADDLE2ONNX_ALL_SRCS ${PROJECT_SOURCE_DIR}/paddle2onnx/cpp2py_export.cc ${PROJECT_SOURCE_DIR}/third_party/optimizer/onnxoptimizer/cpp2py_export.cc)

57
cmake/build_tools.cmake Normal file
View File

@@ -0,0 +1,57 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function(download_patchelf)
if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
set(PATCHELF_EXE "patchelf")
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/patchelf-0.15.0-aarch64.tar.gz)
download_and_decompress(${PATCHELF_URL} ${CMAKE_CURRENT_BINARY_DIR}/patchelf-0.15.0-aarch64.tar.gz ${THIRD_PARTY_PATH}/patchelf)
else()
set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/patchelf-0.15.0-x86_64.tar.gz)
download_and_decompress(${PATCHELF_URL} ${CMAKE_CURRENT_BINARY_DIR}/patchelf-0.15.0-x86_64.tar.gz ${THIRD_PARTY_PATH}/patchelf)
endif()
endif()
endfunction()
function(download_protobuf)
if(WIN32)
if(NOT CMAKE_CL_64)
set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/protobuf-win-x86-3.16.0.zip)
else()
set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/protobuf-win-x64-3.16.0.zip)
endif()
set(ORIGIN_ENV_PATH "$ENV{PATH}")
download_and_decompress(${PATCHELF_URL} ${CMAKE_CURRENT_BINARY_DIR}/protobuf-win-3.16.0.tgz ${THIRD_PARTY_PATH}/protobuf)
set(ENV{PATH} "${THIRD_PARTY_PATH}\\protobuf\\bin;${ORIGIN_ENV_PATH}")
elseif(APPLE)
if(CURRENT_OSX_ARCH MATCHES "arm64")
set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/protobuf-osx-arm64-3.16.0.tgz)
else()
set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/protobuf-osx-x86_64-3.16.0.tgz)
endif()
set(ORIGIN_ENV_PATH "$ENV{PATH}")
download_and_decompress(${PATCHELF_URL} ${CMAKE_CURRENT_BINARY_DIR}/protobuf-osx-3.16.0.tgz ${THIRD_PARTY_PATH}/protobuf)
set(ENV{PATH} "${THIRD_PARTY_PATH}/protobuf/bin/:${ORIGIN_ENV_PATH}")
else()
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/protobuf-linux-aarch64-3.16.0.tgz)
else()
set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/protobuf-linux-x64-3.16.0.tgz)
endif()
set(ORIGIN_ENV_PATH "$ENV{PATH}")
download_and_decompress(${PATCHELF_URL} ${CMAKE_CURRENT_BINARY_DIR}/protobuf-linux-3.16.0.tgz ${THIRD_PARTY_PATH}/protobuf)
set(ENV{PATH} "${THIRD_PARTY_PATH}/protobuf/bin/:${ORIGIN_ENV_PATH}")
endif()
endfunction()

View File

@@ -134,6 +134,7 @@ else()
endif(PADDLEINFERENCE_DIRECTORY) endif(PADDLEINFERENCE_DIRECTORY)
if(UNIX AND (NOT APPLE) AND (NOT ANDROID)) if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
message("?????????????? ${PATCHELF_EXE}")
add_custom_target(patchelf_paddle_inference ALL COMMAND bash -c "PATCHELF_EXE=${PATCHELF_EXE} python ${PROJECT_SOURCE_DIR}/scripts/patch_paddle_inference.py ${PADDLEINFERENCE_INSTALL_DIR}/paddle/lib/libpaddle_inference.so" DEPENDS ${LIBRARY_NAME}) add_custom_target(patchelf_paddle_inference ALL COMMAND bash -c "PATCHELF_EXE=${PATCHELF_EXE} python ${PROJECT_SOURCE_DIR}/scripts/patch_paddle_inference.py ${PADDLEINFERENCE_INSTALL_DIR}/paddle/lib/libpaddle_inference.so" DEPENDS ${LIBRARY_NAME})
endif() endif()

View File

@@ -1,26 +0,0 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
set(PATCHELF_EXE "patchelf")
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/patchelf-0.15.0-aarch64.tar.gz)
download_and_decompress(${PATCHELF_URL} ${CMAKE_CURRENT_BINARY_DIR}/patchelf-0.15.0-aarch64.tar.gz ${THIRD_PARTY_PATH}/patchelf)
set(PATCHELF_EXE ${THIRD_PARTY_PATH}/patchelf/bin/patchelf)
else()
set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/patchelf-0.15.0-x86_64.tar.gz)
download_and_decompress(${PATCHELF_URL} ${CMAKE_CURRENT_BINARY_DIR}/patchelf-0.15.0-x86_64.tar.gz ${THIRD_PARTY_PATH}/patchelf)
set(PATCHELF_EXE ${THIRD_PARTY_PATH}/patchelf/bin/patchelf)
endif()
endif()

View File

@@ -27,7 +27,6 @@ int main(int argc, char* argv[]) {
// https://baidu-paddle.github.io/fastdeploy-api/cpp/html/structfastdeploy_1_1RuntimeOption.html // https://baidu-paddle.github.io/fastdeploy-api/cpp/html/structfastdeploy_1_1RuntimeOption.html
fd::RuntimeOption runtime_option; fd::RuntimeOption runtime_option;
runtime_option.SetModelPath(model_file, params_file); runtime_option.SetModelPath(model_file, params_file);
runtime_option.UsePaddleInferBackend();
runtime_option.UseCpu(); runtime_option.UseCpu();
// If need to configure Paddle Inference backend for more option, we can configure runtime_option.paddle_infer_option // If need to configure Paddle Inference backend for more option, we can configure runtime_option.paddle_infer_option

View File

65
paddle2onnx/__init__.py Executable file
View File

@@ -0,0 +1,65 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle2onnx.utils import logging
from . import command
from .convert import dygraph2onnx
from .convert import program2onnx
from .version import version
from .version import git_version
__version__ = version
__commit_id__ = git_version
def run_convert(model, input_shape_dict=None, scope=None, opset_version=9):
logging.warning(
"[Deprecated] `paddle2onnx.run_convert` will be deprecated in the future version, the recommended usage is `paddle2onnx.export`"
)
from paddle2onnx.legacy import run_convert
return run_convert(model, input_shape_dict, scope, opset_version)
def export(model_file,
params_file="",
save_file=None,
opset_version=11,
auto_upgrade_opset=True,
verbose=True,
enable_onnx_checker=True,
enable_experimental_op=True,
enable_optimize=True,
custom_op_info=None,
deploy_backend="onnxruntime",
calibration_file="",
external_file="",
export_fp16_model=False):
import paddle2onnx.paddle2onnx_cpp2py_export as c_p2o
deploy_backend = deploy_backend.lower()
if custom_op_info is None:
onnx_model_str = c_p2o.export(
model_file, params_file, opset_version, auto_upgrade_opset, verbose,
enable_onnx_checker, enable_experimental_op, enable_optimize, {},
deploy_backend, calibration_file, external_file, export_fp16_model)
else:
onnx_model_str = c_p2o.export(
model_file, params_file, opset_version, auto_upgrade_opset, verbose,
enable_onnx_checker, enable_experimental_op, enable_optimize,
custom_op_info, deploy_backend, calibration_file, external_file,
export_fp16_model)
if save_file is not None:
with open(save_file, "wb") as f:
f.write(onnx_model_str)
else:
return onnx_model_str

273
paddle2onnx/command.py Executable file
View File

@@ -0,0 +1,273 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from six import text_type as _text_type
import argparse
import ast
import sys
import os
from paddle2onnx.utils import logging
def str2list(v):
if len(v) == 0:
return None
v = v.replace(" ", "")
v = eval(v)
return v
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
"-m",
type=_text_type,
default=None,
help="PaddlePaddle model directory, if params stored in single file, you need define '--model_filename' and 'params_filename'."
)
parser.add_argument(
"--model_filename",
"-mf",
type=_text_type,
default=None,
help="PaddlePaddle model's network file name, which under directory seted by --model_dir"
)
parser.add_argument(
"--params_filename",
"-pf",
type=_text_type,
default=None,
help="PaddlePaddle model's param file name(param files combined in single file), which under directory seted by --model_dir."
)
parser.add_argument(
"--save_file",
"-s",
type=_text_type,
default=None,
help="file path to save onnx model")
parser.add_argument(
"--opset_version",
"-ov",
type=int,
default=9,
help="set onnx opset version to export")
parser.add_argument(
"--input_shape_dict",
"-isd",
type=_text_type,
default="None",
help="define input shapes, e.g --input_shape_dict=\"{'image':[1, 3, 608, 608]}\" or" \
"--input_shape_dict=\"{'image':[1, 3, 608, 608], 'im_shape': [1, 2], 'scale_factor': [1, 2]}\"")
parser.add_argument(
"--enable_dev_version",
type=ast.literal_eval,
default=True,
help="whether to use new version of Paddle2ONNX which is under developing, default True"
)
parser.add_argument(
"--deploy_backend",
"-d",
type=_text_type,
default="onnxruntime",
choices=["onnxruntime", "tensorrt", "rknn", "others"],
help="Quantize model deploy backend, default onnxruntime.")
parser.add_argument(
"--save_calibration_file",
type=_text_type,
default="calibration.cache",
help="The calibration cache for TensorRT deploy, default calibration.cache."
)
parser.add_argument(
"--enable_onnx_checker",
type=ast.literal_eval,
default=True,
help="whether check onnx model validity, default True")
parser.add_argument(
"--enable_paddle_fallback",
type=ast.literal_eval,
default=False,
help="whether use PaddleFallback for custom op, default is False")
parser.add_argument(
"--version",
"-v",
action="store_true",
default=False,
help="get version of paddle2onnx")
parser.add_argument(
"--output_names",
"-on",
type=str2list,
default=None,
help="define output names, e.g --output_names=\"[\"output1\"]\" or \
--output_names=\"[\"output1\", \"output2\", \"output3\"]\" or \
--output_names=\"{\"Paddleoutput\":\"Onnxoutput\"}\"")
parser.add_argument(
"--enable_auto_update_opset",
type=ast.literal_eval,
default=True,
help="whether enable auto_update_opset, default is True")
parser.add_argument(
"--external_filename",
type=_text_type,
default=None,
help="The filename of external_data when the model is bigger than 2G.")
parser.add_argument(
"--export_fp16_model",
type=ast.literal_eval,
default=False,
help="Whether export FP16 model for ORT-GPU, default False")
return parser
def c_paddle_to_onnx(model_file,
params_file="",
save_file=None,
opset_version=7,
auto_upgrade_opset=True,
verbose=True,
enable_onnx_checker=True,
enable_experimental_op=True,
enable_optimize=True,
deploy_backend="onnxruntime",
calibration_file="",
external_file="",
export_fp16_model=False):
import paddle2onnx.paddle2onnx_cpp2py_export as c_p2o
onnx_model_str = c_p2o.export(
model_file, params_file, opset_version, auto_upgrade_opset, verbose,
enable_onnx_checker, enable_experimental_op, enable_optimize, {},
deploy_backend, calibration_file, external_file, export_fp16_model)
if save_file is not None:
with open(save_file, "wb") as f:
f.write(onnx_model_str)
else:
return onnx_model_str
def program2onnx(model_dir,
save_file,
model_filename=None,
params_filename=None,
opset_version=9,
enable_onnx_checker=False,
operator_export_type="ONNX",
input_shape_dict=None,
output_names=None,
auto_update_opset=True):
logging.warning(
"[Deprecated] `paddle2onnx.command.program2onnx` will be deprecated in the future version, the recommended usage is `paddle2onnx.export`"
)
from paddle2onnx.legacy.command import program2onnx
return program2onnx(model_dir, save_file, model_filename, params_filename,
opset_version, enable_onnx_checker,
operator_export_type, input_shape_dict, output_names,
auto_update_opset)
def main():
if len(sys.argv) < 2:
logging.info("Use \"paddle2onnx -h\" to print the help information")
logging.info(
"For more information, please follow our github repo below:")
logging.info("Github: https://github.com/PaddlePaddle/paddle2onnx.git")
return
parser = arg_parser()
args = parser.parse_args()
if args.version:
import paddle2onnx
logging.info("paddle2onnx-{} with python>=3.6, paddlepaddle>=2.0.0".
format(paddle2onnx.__version__))
return
assert args.model_dir is not None, "--model_dir should be defined while translating paddle model to onnx"
assert args.save_file is not None, "--save_file should be defined while translating paddle model to onnx"
input_shape_dict = eval(args.input_shape_dict)
operator_export_type = "ONNX"
if args.enable_paddle_fallback:
logging.warning(
"[Deprecated] The flag `--enable_paddle_fallback` will be deprecated, and only works while `--enable_dev_version False` now."
)
operator_export_type = "PaddleFallback"
if args.output_names is not None and args.enable_dev_version:
logging.warning(
"[Deprecated] The flag `--output_names` is deprecated, if you need to modify the output name, please refer to this tool https://github.com/jiangjiajun/PaddleUtils/tree/main/onnx "
)
if not isinstance(args.output_names, (list, dict)):
raise TypeError(
"The output_names should be 'list' or 'dict', but received type is %s."
% type(args.output_names))
if input_shape_dict is not None and args.enable_dev_version:
logging.warning(
"[Deprecated] The flag `--input_shape_dict` is deprecated, if you need to modify the input shape of PaddlePaddle model, please refer to this tool https://github.com/jiangjiajun/PaddleUtils/tree/main/paddle "
)
if args.enable_dev_version:
model_file = os.path.join(args.model_dir, args.model_filename)
if args.params_filename is None:
params_file = ""
else:
params_file = os.path.join(args.model_dir, args.params_filename)
if args.external_filename is None:
args.external_filename = "external_data"
base_path = os.path.dirname(args.save_file)
if base_path and not os.path.exists(base_path):
os.mkdir(base_path)
external_file = os.path.join(base_path, args.external_filename)
calibration_file = args.save_calibration_file
c_paddle_to_onnx(
model_file=model_file,
params_file=params_file,
save_file=args.save_file,
opset_version=args.opset_version,
auto_upgrade_opset=args.enable_auto_update_opset,
verbose=True,
enable_onnx_checker=args.enable_onnx_checker,
enable_experimental_op=True,
enable_optimize=True,
deploy_backend=args.deploy_backend,
calibration_file=calibration_file,
external_file=external_file,
export_fp16_model=args.export_fp16_model)
logging.info("===============Make PaddlePaddle Better!================")
logging.info("A little survey: https://iwenjuan.baidu.com/?code=r8hu2s")
return
program2onnx(
args.model_dir,
args.save_file,
args.model_filename,
args.params_filename,
opset_version=args.opset_version,
enable_onnx_checker=args.enable_onnx_checker,
operator_export_type=operator_export_type,
input_shape_dict=input_shape_dict,
output_names=args.output_names,
auto_update_opset=args.enable_auto_update_opset)
logging.info("===============Make PaddlePaddle Better!================")
logging.info("A little survey: https://iwenjuan.baidu.com/?code=r8hu2s")
if __name__ == "__main__":
main()

83
paddle2onnx/convert.py Executable file
View File

@@ -0,0 +1,83 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle2onnx.utils import logging
def export_onnx(paddle_graph,
save_file,
opset_version=9,
enable_onnx_checker=False,
operator_export_type="ONNX",
verbose=False,
auto_update_opset=True,
output_names=None):
from paddle2onnx.legacy.convert import export_onnx
return export_onnx(paddle_graph, save_file, opset_version, opset_version,
enable_onnx_checker, operator_export_type, verbose,
auto_update_opset, output_names)
def dygraph2onnx(layer, save_file, input_spec=None, opset_version=9, **configs):
if "enable_dev_version" in configs and not configs["enable_dev_version"]:
from paddle2onnx.legacy.convert import dygraph2onnx
return dygraph2onnx(layer, save_file, input_spec, opset_version,
**configs)
import os
import paddle2onnx
import paddle
dirname = os.path.split(save_file)[0]
paddle_model_dir = os.path.join(dirname,
"paddle_model_static_onnx_temp_dir")
model_file = os.path.join(paddle_model_dir, "model.pdmodel")
params_file = os.path.join(paddle_model_dir, "model.pdiparams")
if os.path.exists(paddle_model_dir):
if os.path.isfile(paddle_model_dir):
logging.info("File {} exists, will remove it.".format(
paddle_model_dir))
os.remove(paddle_model_dir)
if os.path.isfile(model_file):
os.remove(model_file)
if os.path.isfile(params_file):
os.remove(params_file)
paddle.jit.save(layer, os.path.join(paddle_model_dir, "model"), input_spec)
logging.info("Static PaddlePaddle model saved in {}.".format(
paddle_model_dir))
if not os.path.isfile(params_file):
params_file = ""
if save_file is None:
return paddle2onnx.export(model_file, params_file, save_file,
opset_version)
else:
paddle2onnx.export(model_file, params_file, save_file, opset_version)
logging.info("ONNX model saved in {}.".format(save_file))
def program2onnx(program,
scope,
save_file,
feed_var_names=None,
target_vars=None,
opset_version=9,
enable_onnx_checker=False,
operator_export_type="ONNX",
auto_update_opset=True,
**configs):
from paddle2onnx.legacy.convert import program2onnx
return program2onnx(program, scope, save_file, feed_var_names, target_vars,
opset_version, enable_onnx_checker,
operator_export_type, auto_update_opset, **configs)

38
paddle2onnx/convert_to_fp16.py Executable file
View File

@@ -0,0 +1,38 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import sys
from paddle2onnx.utils import logging
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_model_path',
required=True,
help='The path of input onnx model file.')
parser.add_argument(
'--output_model_path',
required=True,
help='The file path to write optimized onnx model file.')
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
import paddle2onnx.paddle2onnx_cpp2py_export as c_p2o
c_p2o.convert_to_fp16(args.input_model_path, args.output_model_path)
logging.info("FP16 model saved in {}.".format(args.output_model_path))

287
paddle2onnx/converter.cc Normal file
View File

@@ -0,0 +1,287 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/converter.h"
#include <fstream>
#include <iostream>
#include <set>
#include <string>
#include "paddle2onnx/mapper/exporter.h"
#include "paddle2onnx/optimizer/convert_fp32_to_fp16.h"
namespace paddle2onnx {
PADDLE2ONNX_DECL bool IsExportable(const char* model, const char* params,
int32_t opset_version,
bool auto_upgrade_opset, bool verbose,
bool enable_onnx_checker,
bool enable_experimental_op,
bool enable_optimize, CustomOp* ops,
int op_count, const char* deploy_backend) {
auto parser = PaddleParser();
if (!parser.Init(model, params)) {
return false;
}
paddle2onnx::ModelExporter me;
std::set<std::string> unsupported_ops;
if (!me.CheckIfOpSupported(parser, &unsupported_ops,
enable_experimental_op)) {
return false;
}
// Add custom operator information
if (ops != nullptr && op_count > 0) {
for (int i = 0; i < op_count; ++i) {
std::string op_name(ops[i].op_name, strlen(ops[i].op_name));
std::string export_op_name(ops[i].export_op_name,
strlen(ops[i].export_op_name));
if (export_op_name == "paddle2onnx_null") {
export_op_name = op_name;
}
me.custom_ops[op_name] = export_op_name;
}
}
if (me.GetMinOpset(parser, false) < 0) {
return false;
}
std::string calibration_str;
std::string onnx_model =
me.Run(parser, opset_version, auto_upgrade_opset, verbose,
enable_onnx_checker, enable_experimental_op, enable_optimize,
deploy_backend, &calibration_str);
if (onnx_model.empty()) {
P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl;
return false;
}
if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) &&
calibration_str.empty()) {
P2OLogger(verbose) << "Can not generate calibration cache for TensorRT "
"deploy backend when export quantize model."
<< std::endl;
return false;
}
return true;
}
PADDLE2ONNX_DECL bool IsExportable(const void* model_buffer, int model_size,
const void* params_buffer, int params_size,
int32_t opset_version,
bool auto_upgrade_opset, bool verbose,
bool enable_onnx_checker,
bool enable_experimental_op,
bool enable_optimize, CustomOp* ops,
int op_count, const char* deploy_backend) {
auto parser = PaddleParser();
if (!parser.Init(model_buffer, model_size, params_buffer, params_size)) {
return false;
}
paddle2onnx::ModelExporter me;
std::set<std::string> unsupported_ops;
if (!me.CheckIfOpSupported(parser, &unsupported_ops,
enable_experimental_op)) {
return false;
}
// Add custom operator information
if (ops != nullptr && op_count > 0) {
for (int i = 0; i < op_count; ++i) {
std::string op_name(ops[i].op_name, strlen(ops[i].op_name));
std::string export_op_name(ops[i].export_op_name,
strlen(ops[i].export_op_name));
if (export_op_name == "paddle2onnx_null") {
export_op_name = op_name;
}
me.custom_ops[op_name] = export_op_name;
}
}
if (me.GetMinOpset(parser, false) < 0) {
return false;
}
std::string calibration_str;
std::string onnx_model =
me.Run(parser, opset_version, auto_upgrade_opset, verbose,
enable_onnx_checker, enable_experimental_op, enable_optimize,
deploy_backend, &calibration_str);
if (onnx_model.empty()) {
P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl;
return false;
}
if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) &&
calibration_str.empty()) {
P2OLogger(verbose) << "Can not generate calibration cache for TensorRT "
"deploy backend when export quantize model."
<< std::endl;
return false;
}
return true;
}
PADDLE2ONNX_DECL bool Export(
const char* model, const char* params, char** out, int* out_size,
int32_t opset_version, bool auto_upgrade_opset, bool verbose,
bool enable_onnx_checker, bool enable_experimental_op, bool enable_optimize,
CustomOp* ops, int op_count, const char* deploy_backend,
char** calibration_cache, int* calibration_size, const char* external_file,
bool* save_external, bool export_fp16_model) {
auto parser = PaddleParser();
P2OLogger(verbose) << "Start to parsing Paddle model..." << std::endl;
if (!parser.Init(model, params)) {
P2OLogger(verbose) << "Paddle model parsing failed." << std::endl;
return false;
}
paddle2onnx::ModelExporter me;
// Add custom operator information
if (ops != nullptr && op_count > 0) {
for (int i = 0; i < op_count; ++i) {
std::string op_name(ops[i].op_name, strlen(ops[i].op_name));
std::string export_op_name(ops[i].export_op_name,
strlen(ops[i].export_op_name));
if (export_op_name == "paddle2onnx_null") {
export_op_name = op_name;
}
me.custom_ops[op_name] = export_op_name;
}
}
std::string calibration_str;
std::string result = me.Run(
parser, opset_version, auto_upgrade_opset, verbose, enable_onnx_checker,
enable_experimental_op, enable_optimize, deploy_backend, &calibration_str,
external_file, save_external, export_fp16_model);
if (result.empty()) {
P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl;
return false;
}
if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) &&
calibration_str.empty()) {
P2OLogger(verbose) << "Can not generate calibration cache for TensorRT "
"deploy backend when export quantize model."
<< std::endl;
return false;
}
*out_size = result.size();
*out = new char[*out_size]();
memcpy(*out, result.data(), *out_size);
if (calibration_str.size()) {
*calibration_size = calibration_str.size();
*calibration_cache = new char[*calibration_size]();
memcpy(*calibration_cache, calibration_str.data(), *calibration_size);
}
return true;
}
PADDLE2ONNX_DECL bool Export(
const void* model_buffer, int64_t model_size, const void* params_buffer,
int64_t params_size, char** out, int* out_size, int32_t opset_version,
bool auto_upgrade_opset, bool verbose, bool enable_onnx_checker,
bool enable_experimental_op, bool enable_optimize, CustomOp* ops,
int op_count, const char* deploy_backend, char** calibration_cache,
int* calibration_size, const char* external_file, bool* save_external,
bool export_fp16_model) {
auto parser = PaddleParser();
P2OLogger(verbose) << "Start to parsing Paddle model..." << std::endl;
if (!parser.Init(model_buffer, model_size, params_buffer, params_size)) {
P2OLogger(verbose) << "Paddle model parsing failed." << std::endl;
return false;
}
paddle2onnx::ModelExporter me;
// Add custom operator information
if (ops != nullptr && op_count > 0) {
for (int i = 0; i < op_count; ++i) {
std::string op_name(ops[i].op_name, strlen(ops[i].op_name));
std::string export_op_name(ops[i].export_op_name,
strlen(ops[i].export_op_name));
if (export_op_name == "paddle2onnx_null") {
export_op_name = op_name;
}
me.custom_ops[op_name] = export_op_name;
}
}
std::string calibration_str;
std::string result = me.Run(
parser, opset_version, auto_upgrade_opset, verbose, enable_onnx_checker,
enable_experimental_op, enable_optimize, deploy_backend, &calibration_str,
external_file, save_external, export_fp16_model);
if (result.empty()) {
P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl;
return false;
}
if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) &&
calibration_str.empty()) {
P2OLogger(verbose) << "Can not generate calibration cache for TensorRT "
"deploy backend when export quantize model."
<< std::endl;
return false;
}
*out_size = result.size();
*out = new char[*out_size]();
memcpy(*out, result.data(), *out_size);
if (calibration_str.size()) {
*calibration_size = calibration_str.size();
*calibration_cache = new char[*calibration_size]();
memcpy(*calibration_cache, calibration_str.data(), *calibration_size);
}
return true;
}
PADDLE2ONNX_DECL bool ConvertFP32ToFP16(const char* onnx_model, int model_size,
char** out_model, int* out_model_size) {
std::string onnx_proto(onnx_model, onnx_model + model_size);
ONNX_NAMESPACE::ModelProto model;
model.ParseFromString(onnx_proto);
P2OLogger(true) << "Convert FP32 ONNX model to FP16." << std::endl;
ConvertFp32ToFp16 convert;
convert.Convert(&model);
// save external data file for big model
std::string external_data_file;
if (model.ByteSizeLong() > INT_MAX) {
external_data_file = "external_data";
}
paddle2onnx::ModelExporter me;
if (external_data_file.size()) {
me.SaveExternalData(model.mutable_graph(), external_data_file);
}
// check model
me.ONNXChecker(model, true);
std::string result;
if (!model.SerializeToString(&result)) {
P2OLogger(true)
<< "Error happenedd while optimizing the exported ONNX model."
<< std::endl;
return false;
}
*out_model_size = result.size();
*out_model = new char[*out_model_size]();
memcpy(*out_model, result.data(), *out_model_size);
return true;
}
ModelTensorInfo::~ModelTensorInfo() {
if (shape != nullptr) {
delete[] shape;
shape = nullptr;
rank = 0;
}
}
} // namespace paddle2onnx

130
paddle2onnx/converter.h Executable file
View File

@@ -0,0 +1,130 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdint.h>
#if defined(_WIN32)
#ifdef PADDLE2ONNX_LIB
#define PADDLE2ONNX_DECL __declspec(dllexport)
#else
#define PADDLE2ONNX_DECL __declspec(dllimport)
#endif // PADDLE2ONNX_LIB
#else
#define PADDLE2ONNX_DECL __attribute__((visibility("default")))
#endif // _WIN32
namespace paddle2onnx {
struct PADDLE2ONNX_DECL CustomOp {
char op_name[100] = "null";
// if export_op_name set as "paddle2onnx_null"
// it will automaticly change to `op_name`
char export_op_name[100] = "paddle2onnx_null";
};
PADDLE2ONNX_DECL bool IsExportable(
const char* model, const char* params, int32_t opset_version = 11,
bool auto_upgrade_opset = true, bool verbose = false,
bool enable_onnx_checker = true, bool enable_experimental_op = false,
bool enable_optimize = true, CustomOp* ops = nullptr, int op_count = 0,
const char* deploy_backend = "onnxruntime");
PADDLE2ONNX_DECL bool IsExportable(
const void* model_buffer, int model_size, const void* params_buffer,
int params_size, int32_t opset_version = 11, bool auto_upgrade_opset = true,
bool verbose = false, bool enable_onnx_checker = true,
bool enable_experimental_op = false, bool enable_optimize = true,
CustomOp* ops = nullptr, int op_count = 0,
const char* deploy_backend = "onnxruntime");
PADDLE2ONNX_DECL bool Export(
const char* model, const char* params, char** out, int* out_size,
int32_t opset_version = 11, bool auto_upgrade_opset = true,
bool verbose = false, bool enable_onnx_checker = true,
bool enable_experimental_op = false, bool enable_optimize = true,
CustomOp* ops = nullptr, int op_count = 0,
const char* deploy_backend = "onnxruntime",
char** calibration_cache = nullptr, int* calibration_size = 0,
const char* external_file = "", bool* save_external = nullptr,
bool export_fp16_model = false);
PADDLE2ONNX_DECL bool Export(
const void* model_buffer, int64_t model_size, const void* params_buffer,
int64_t params_size, char** out, int* out_size, int32_t opset_version = 11,
bool auto_upgrade_opset = true, bool verbose = false,
bool enable_onnx_checker = true, bool enable_experimental_op = false,
bool enable_optimize = true, CustomOp* ops = nullptr, int op_count = 0,
const char* deploy_backend = "onnxruntime",
char** calibration_cache = nullptr, int* calibration_size = 0,
const char* external_file = "", bool* save_external = nullptr,
bool export_fp16_model = false);
// Following are inside usage, will remove it maybe
struct PADDLE2ONNX_DECL ModelTensorInfo {
char name[100] = "";
int64_t* shape = nullptr;
int32_t rank = 0;
// 0: float32
// 1: double
// 2: uint8
// 3: int8
// 4: int32
// 5: int64
// 6: float16
int32_t dtype = 0;
~ModelTensorInfo();
};
struct PADDLE2ONNX_DECL NMSParameters {
int64_t background_label = -1;
int64_t keep_top_k = 300;
float nms_eta = 1.0;
float nms_threshold = 0.7;
float score_threshold = 0.01;
int64_t nms_top_k = 10000;
bool normalized = true;
};
struct PADDLE2ONNX_DECL OnnxReader {
OnnxReader(const char* model_buffer, int buffer_size);
// suppose the maximum number of inputs/outputs is 100
// suppose the longest string of inputs/outputs is 200
// suppose the biggest rank will be less than 10
ModelTensorInfo inputs[100];
ModelTensorInfo outputs[100];
int num_inputs;
int num_outputs;
};
PADDLE2ONNX_DECL bool RemoveMultiClassNMS(const char* onnx_model,
int model_size, char** out_model,
int* out_model_size);
PADDLE2ONNX_DECL bool ConvertFP32ToFP16(const char* onnx_model, int model_size,
char** out_model, int* out_model_size);
struct PADDLE2ONNX_DECL PaddleReader {
PaddleReader(const char* model_buffer, int buffer_size);
// suppose the maximum number of inputs/outputs is 100
// suppose the longest string of inputs/outputs is 200
ModelTensorInfo inputs[100];
ModelTensorInfo outputs[100];
int num_inputs;
int num_outputs;
bool has_nms = false;
bool is_quantize_model = false;
NMSParameters nms_params;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,128 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <string>
#include <vector>
#include "paddle2onnx/converter.h"
#include "paddle2onnx/mapper/exporter.h"
#include "paddle2onnx/optimizer/paddle2onnx_optimizer.h"
namespace paddle2onnx {
typedef std::map<std::string, std::string> CustomOpInfo;
PYBIND11_MODULE(paddle2onnx_cpp2py_export, m) {
m.doc() = "Paddle2ONNX: export PaddlePaddle to ONNX";
m.def("export", [](const std::string& model_filename,
const std::string& params_filename, int opset_version = 9,
bool auto_upgrade_opset = true, bool verbose = true,
bool enable_onnx_checker = true,
bool enable_experimental_op = true,
bool enable_optimize = true,
const CustomOpInfo& info = CustomOpInfo(),
const std::string& deploy_backend = "onnxruntime",
const std::string& calibration_file = "",
const std::string& external_file = "",
const bool& export_fp16_model = false) {
P2OLogger(verbose) << "Start to parse PaddlePaddle model..." << std::endl;
P2OLogger(verbose) << "Model file path: " << model_filename << std::endl;
P2OLogger(verbose) << "Paramters file path: " << params_filename
<< std::endl;
if (info.size() == 0) {
char* out = nullptr;
int size = 0;
char* calibration_cache = nullptr;
int cache_size = 0;
bool save_external;
if (!Export(model_filename.c_str(), params_filename.c_str(), &out, &size,
opset_version, auto_upgrade_opset, verbose,
enable_onnx_checker, enable_experimental_op, enable_optimize,
nullptr, 0, deploy_backend.c_str(), &calibration_cache,
&cache_size, external_file.c_str(), &save_external,
export_fp16_model)) {
P2OLogger(verbose) << "Paddle model convert failed." << std::endl;
return pybind11::bytes("");
}
if (cache_size) {
std::string calibration_cache_str(calibration_cache,
calibration_cache + cache_size);
std::ofstream cache_file;
cache_file.open(calibration_file, std::ios::out);
cache_file << calibration_cache_str;
delete calibration_cache;
calibration_cache = nullptr;
P2OLogger(verbose) << "TensorRT calibration cache path: "
<< calibration_file << std::endl;
}
std::string onnx_proto(out, out + size);
delete out;
out = nullptr;
return pybind11::bytes(onnx_proto);
}
std::vector<CustomOp> ops;
ops.resize(info.size());
int index = 0;
for (auto& item : info) {
strcpy(ops[index].op_name, item.first.c_str());
strcpy(ops[index].export_op_name, item.second.c_str());
index += 1;
}
char* out = nullptr;
int size = 0;
char* calibration_cache = nullptr;
int cache_size = 0;
bool save_external;
if (!Export(model_filename.c_str(), params_filename.c_str(), &out, &size,
opset_version, auto_upgrade_opset, verbose, enable_onnx_checker,
enable_experimental_op, enable_optimize, ops.data(),
info.size(), deploy_backend.c_str(), &calibration_cache,
&cache_size, external_file.c_str(), &save_external,
export_fp16_model)) {
P2OLogger(verbose) << "Paddle model convert failed." << std::endl;
return pybind11::bytes("");
}
if (cache_size) {
std::string calibration_cache_str(calibration_cache,
calibration_cache + cache_size);
std::ofstream cache_file;
cache_file.open(calibration_file, std::ios::out);
cache_file << calibration_cache_str;
delete calibration_cache;
calibration_cache = nullptr;
P2OLogger(verbose) << "TensorRT calibration cache path: "
<< calibration_file << std::endl;
}
std::string onnx_proto(out, out + size);
delete out;
out = nullptr;
return pybind11::bytes(onnx_proto);
});
m.def(
"optimize",
[](const std::string& model_path, const std::string& optimized_model_path,
const std::map<std::string, std::vector<int>>& shape_infos) {
ONNX_NAMESPACE::optimization::OptimizePaddle2ONNX(
model_path, optimized_model_path, shape_infos);
});
m.def("convert_to_fp16", [](const std::string& fp32_model_path,
const std::string& fp16_model_path) {
paddle2onnx::optimization::Paddle2ONNXFP32ToFP16(fp32_model_path,
fp16_model_path);
});
}
} // namespace paddle2onnx

128
paddle2onnx/legacy/__init__.py Executable file
View File

@@ -0,0 +1,128 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
__version__ = "0.9.6"
import paddle
from .convert import dygraph2onnx, program2onnx
from .op_mapper import register_op_mapper
from typing import TypeVar
from paddle2onnx.utils import logging
from paddle2onnx.legacy.op_mapper import OpMapper
from . import command
OP_WITHOUT_KERNEL_SET = {
'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',
'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',
'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify',
'gen_bkcl_id', 'c_gen_bkcl_id', 'gen_nccl_id', 'c_gen_nccl_id',
'c_comm_init', 'c_sync_calc_stream', 'c_sync_comm_stream',
'queue_generator', 'dequeue', 'enqueue', 'heter_listen_and_serv',
'c_wait_comm', 'c_wait_compute', 'c_gen_hccl_id', 'c_comm_init_hccl',
'copy_cross_scope'
}
def process_old_ops_desc(model):
for i in range(len(model.blocks[0].ops)):
if model.blocks[0].ops[i].type == "matmul":
if not model.blocks[0].ops[i].has_attr("head_number"):
model.blocks[0].ops[i]._set_attr("head_number", 1)
elif model.blocks[0].ops[i].type == "yolo_box":
if not model.blocks[0].ops[i].has_attr("iou_aware"):
model.blocks[0].ops[i]._set_attr("iou_aware", False)
if not model.blocks[0].ops[i].has_attr("iou_aware_factor"):
model.blocks[0].ops[i]._set_attr("iou_aware_factor", 0.5)
def get_all_registered_ops(save_file=None):
ops = list(OpMapper.OPSETS.keys())
logging.warning("The number of all registered OPs is: {}".format(len(ops)))
if save_file is None:
return
with open(save_file, "w") as f:
logging.warning("All registered OPs will be written to the file: {}".
format(save_file))
f.write("Total OPs num: {} \n".format(len(ops)))
for index in range(len(ops)):
op = ops[index]
f.write(str(index + 1) + ". " + op + "\n")
return
def run_convert(model, input_shape_dict=None, scope=None, opset_version=9):
paddle_version = paddle.__version__
if isinstance(model, paddle.static.Program):
process_old_ops_desc(model)
if input_shape_dict is not None:
model_version = model.desc._version()
major_ver = model_version // 1000000
minor_ver = (model_version - major_ver * 1000000) // 1000
patch_ver = model_version - major_ver * 1000000 - minor_ver * 1000
model_version = "{}.{}.{}".format(major_ver, minor_ver, patch_ver)
if model_version != paddle_version:
logging.warning(
"The model is saved by paddlepaddle v{}, but now your paddlepaddle is version of {}, this difference may cause error, it is recommend you reinstall a same version of paddlepaddle for this model".
format(model_version, paddle_version))
for k, v in input_shape_dict.items():
model.blocks[0].var(k).desc.set_shape(v)
for i in range(len(model.blocks[0].ops)):
if model.blocks[0].ops[i].type in OP_WITHOUT_KERNEL_SET:
continue
model.blocks[0].ops[i].desc.infer_shape(model.blocks[0].desc)
if scope is None:
scope = paddle.static.global_scope()
input_names = list()
output_vars = list()
for i in range(len(model.blocks[0].ops)):
if model.blocks[0].ops[i].type == "feed":
input_names.append(model.blocks[0].ops[i].output("Out")[0])
if model.blocks[0].ops[i].type == "fetch":
output_vars.append(model.blocks[0].var(model.blocks[0].ops[i]
.input("X")[0]))
return program2onnx(
model,
scope,
save_file=None,
feed_var_names=input_names,
target_vars=output_vars,
opset_version=opset_version,
enable_onnx_checker=True)
elif isinstance(model, paddle.jit.TranslatedLayer):
process_old_ops_desc(model.program())
model_version = model.program().desc._version()
major_ver = model_version // 1000000
minor_ver = (model_version - major_ver * 1000000) // 1000
patch_ver = model_version - major_ver * 1000000 - minor_ver * 1000
model_version = "{}.{}.{}".format(major_ver, minor_ver, patch_ver)
if model_version != paddle_version:
logging.warning(
"The model is saved by paddlepaddle v{}, but now your paddlepaddle is version of {}, this difference may cause error, it is recommend you reinstall a same version of paddlepaddle for this model".
format(model_version, paddle_version))
if input_shape_dict is not None:
for k, v in input_shape_dict.items():
model.program().blocks[0].var(k).desc.set_shape(v)
for i in range(len(model.program().blocks[0].ops)):
if model.program().blocks[0].ops[
i].type in OP_WITHOUT_KERNEL_SET:
continue
model.program().blocks[0].ops[i].desc.infer_shape(model.program(
).blocks[0].desc)
return dygraph2onnx(model, save_file=None, opset_version=opset_version)
else:
raise Exception(
"Only support model loaded from paddle.static.load_inference_model() or paddle.jit.load()"
)

287
paddle2onnx/legacy/command.py Executable file
View File

@@ -0,0 +1,287 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from six import text_type as _text_type
import argparse
import ast
import sys
import os
import paddle.fluid as fluid
from paddle2onnx.utils import logging
def str2list(v):
if len(v) == 0:
return None
v = v.replace(" ", "")
v = eval(v)
return v
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
"-m",
type=_text_type,
default=None,
help="PaddlePaddle model directory, if params stored in single file, you need define '--model_filename' and 'params_filename'."
)
parser.add_argument(
"--model_filename",
"-mf",
type=_text_type,
default=None,
help="PaddlePaddle model's network file name, which under directory seted by --model_dir"
)
parser.add_argument(
"--params_filename",
"-pf",
type=_text_type,
default=None,
help="PaddlePaddle model's param file name(param files combined in single file), which under directory seted by --model_dir."
)
parser.add_argument(
"--save_file",
"-s",
type=_text_type,
default=None,
help="file path to save onnx model")
parser.add_argument(
"--opset_version",
"-ov",
type=int,
default=9,
help="set onnx opset version to export")
parser.add_argument(
"--input_shape_dict",
"-isd",
type=_text_type,
default="None",
help="define input shapes, e.g --input_shape_dict=\"{'image':[1, 3, 608, 608]}\" or" \
"--input_shape_dict=\"{'image':[1, 3, 608, 608], 'im_shape': [1, 2], 'scale_factor': [1, 2]}\"")
parser.add_argument(
"--enable_dev_version",
type=ast.literal_eval,
default=False,
help="whether to use new version of Paddle2ONNX which is under developing, default False"
)
parser.add_argument(
"--enable_onnx_checker",
type=ast.literal_eval,
default=True,
help="whether check onnx model validity, default True")
parser.add_argument(
"--enable_paddle_fallback",
type=ast.literal_eval,
default=False,
help="whether use PaddleFallback for custom op, default is False")
parser.add_argument(
"--version",
"-v",
action="store_true",
default=False,
help="get version of paddle2onnx")
parser.add_argument(
"--output_names",
"-on",
type=str2list,
default=None,
help="define output names, e.g --output_names=\"[\"output1\"]\" or \
--output_names=\"[\"output1\", \"output2\", \"output3\"]\" or \
--output_names=\"{\"Paddleoutput\":\"Onnxoutput\"}\"")
parser.add_argument(
"--enable_auto_update_opset",
type=ast.literal_eval,
default=True,
help="whether enable auto_update_opset, default is True")
return parser
def c_paddle_to_onnx(model_file,
params_file="",
save_file=None,
opset_version=7,
auto_upgrade_opset=True,
verbose=True,
enable_onnx_checker=True,
enable_experimental_op=True,
enable_optimize=True):
import paddle2onnx.paddle2onnx_cpp2py_export as c_p2o
onnx_model_str = c_p2o.export(
model_file, params_file, opset_version, auto_upgrade_opset, verbose,
enable_onnx_checker, enable_experimental_op, enable_optimize)
if save_file is not None:
with open(save_file, "wb") as f:
f.write(onnx_model_str)
else:
return onnx_model_str
def program2onnx(model_dir,
save_file,
model_filename=None,
params_filename=None,
opset_version=9,
enable_onnx_checker=False,
operator_export_type="ONNX",
input_shape_dict=None,
output_names=None,
auto_update_opset=True):
try:
import paddle
except:
logging.error(
"paddlepaddle not installed, use \"pip install paddlepaddle\"")
v0, v1, v2 = paddle.__version__.split('.')
if v0 == '0' and v1 == '0' and v2 == '0':
logging.warning("You are use develop version of paddlepaddle")
elif int(v0) <= 1 and int(v1) < 8:
raise ImportError("paddlepaddle>=1.8.0 is required")
import paddle2onnx as p2o
# convert model save with 'paddle.fluid.io.save_inference_model'
if hasattr(paddle, 'enable_static'):
paddle.enable_static()
exe = fluid.Executor(fluid.CPUPlace())
if model_filename is None and params_filename is None:
[program, feed_var_names, fetch_vars] = fluid.io.load_inference_model(
model_dir, exe)
else:
[program, feed_var_names, fetch_vars] = fluid.io.load_inference_model(
model_dir,
exe,
model_filename=model_filename,
params_filename=params_filename)
OP_WITHOUT_KERNEL_SET = {
'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',
'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',
'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify',
'gen_bkcl_id', 'c_gen_bkcl_id', 'gen_nccl_id', 'c_gen_nccl_id',
'c_comm_init', 'c_sync_calc_stream', 'c_sync_comm_stream',
'queue_generator', 'dequeue', 'enqueue', 'heter_listen_and_serv',
'c_wait_comm', 'c_wait_compute', 'c_gen_hccl_id', 'c_comm_init_hccl',
'copy_cross_scope'
}
if input_shape_dict is not None:
import paddle2onnx
paddle2onnx.legacy.process_old_ops_desc(program)
paddle_version = paddle.__version__
model_version = program.desc._version()
major_ver = model_version // 1000000
minor_ver = (model_version - major_ver * 1000000) // 1000
patch_ver = model_version - major_ver * 1000000 - minor_ver * 1000
model_version = "{}.{}.{}".format(major_ver, minor_ver, patch_ver)
if model_version != paddle_version:
logging.warning(
"The model is saved by paddlepaddle v{}, but now your paddlepaddle is version of {}, this difference may cause error, it is recommend you reinstall a same version of paddlepaddle for this model".
format(model_version, paddle_version))
for k, v in input_shape_dict.items():
program.blocks[0].var(k).desc.set_shape(v)
for i in range(len(program.blocks[0].ops)):
if program.blocks[0].ops[i].type in OP_WITHOUT_KERNEL_SET:
continue
program.blocks[0].ops[i].desc.infer_shape(program.blocks[0].desc)
p2o.program2onnx(
program,
fluid.global_scope(),
save_file,
feed_var_names=feed_var_names,
target_vars=fetch_vars,
opset_version=opset_version,
enable_onnx_checker=enable_onnx_checker,
operator_export_type=operator_export_type,
auto_update_opset=auto_update_opset,
output_names=output_names)
def main():
if len(sys.argv) < 2:
logging.info("Use \"paddle2onnx -h\" to print the help information")
logging.info(
"For more information, please follow our github repo below:")
logging.info("Github: https://github.com/PaddlePaddle/paddle2onnx.git")
return
parser = arg_parser()
args = parser.parse_args()
if args.version:
import paddle2onnx
logging.info("paddle2onnx-{} with python>=2.7, paddlepaddle>=1.8.0".
format(paddle2onnx.__version__))
return
assert args.model_dir is not None, "--model_dir should be defined while translating paddle model to onnx"
assert args.save_file is not None, "--save_file should be defined while translating paddle model to onnx"
input_shape_dict = eval(args.input_shape_dict)
operator_export_type = "ONNX"
if args.enable_paddle_fallback:
operator_export_type = "PaddleFallback"
if args.output_names is not None:
if not isinstance(args.output_names, (list, dict)):
raise TypeError(
"The output_names should be 'list' or 'dict', but received type is %s."
% type(args.output_names))
if args.enable_dev_version:
if args.enable_paddle_fallback:
logging.warn(
"--enable_paddle_fallback is deprecated while --enable_dev_version=True."
)
if args.output_names is not None:
logging.warn(
"--output_names is deprecated while --enable_dev_version=True.")
if input_shape_dict is not None:
logging.warn(
"--input_shape_dict is deprecated while --enable_dev_version=True."
)
model_file = os.path.join(args.model_dir, args.model_filename)
if args.params_filename is None:
params_file = ""
else:
params_file = os.path.join(args.model_dir, args.params_filename)
return c_paddle_to_onnx(
model_file=model_file,
params_file=params_file,
save_file=args.save_file,
opset_version=args.opset_version,
auto_upgrade_opset=args.enable_auto_update_opset,
verbose=True,
enable_onnx_checker=args.enable_onnx_checker,
enable_experimental_op=True,
enable_optimize=True)
program2onnx(
args.model_dir,
args.save_file,
args.model_filename,
args.params_filename,
opset_version=args.opset_version,
enable_onnx_checker=args.enable_onnx_checker,
operator_export_type=operator_export_type,
input_shape_dict=input_shape_dict,
output_names=args.output_names,
auto_update_opset=args.enable_auto_update_opset)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,15 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .constant import PRODUCER
from .constant import NodeDomain

View File

@@ -0,0 +1,24 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PRODUCER = 'PaddlePaddle'
ONNX_HELPER_VERSION = '1.7.0'
class NodeDomain():
ONNX = 'onnx'
PADDLE = 'paddle'
CUSTOM = 'custom'
RAW = 'raw'

View File

@@ -0,0 +1,84 @@
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle.fluid.core as core
from onnx import helper
from onnx import TensorProto
ONNX = TensorProto
DTYPE_PADDLE_ONNX_MAP = {
TensorProto.FLOAT16: core.VarDesc.VarType.FP16,
TensorProto.FLOAT: core.VarDesc.VarType.FP32,
TensorProto.DOUBLE: core.VarDesc.VarType.FP64,
TensorProto.INT16: core.VarDesc.VarType.INT16,
TensorProto.INT32: core.VarDesc.VarType.INT32,
TensorProto.INT64: core.VarDesc.VarType.INT64,
TensorProto.BOOL: core.VarDesc.VarType.BOOL,
TensorProto.UINT8: core.VarDesc.VarType.UINT8,
core.VarDesc.VarType.FP16: TensorProto.FLOAT16,
core.VarDesc.VarType.FP32: TensorProto.FLOAT,
core.VarDesc.VarType.FP64: TensorProto.DOUBLE,
core.VarDesc.VarType.INT16: TensorProto.INT16,
core.VarDesc.VarType.INT32: TensorProto.INT32,
core.VarDesc.VarType.INT64: TensorProto.INT64,
core.VarDesc.VarType.BOOL: TensorProto.BOOL,
core.VarDesc.VarType.UINT8: TensorProto.UINT8,
}
DTYPE_PADDLE_NUMPY_MAP = {
np.float32: core.VarDesc.VarType.FP32,
np.float64: core.VarDesc.VarType.FP64,
np.int16: core.VarDesc.VarType.INT16,
np.int32: core.VarDesc.VarType.INT32,
np.int64: core.VarDesc.VarType.INT64,
np.bool_: core.VarDesc.VarType.BOOL,
core.VarDesc.VarType.FP32: np.float32,
core.VarDesc.VarType.FP64: np.float64,
core.VarDesc.VarType.INT16: np.int16,
core.VarDesc.VarType.INT32: np.int32,
core.VarDesc.VarType.INT64: np.int64,
core.VarDesc.VarType.BOOL: np.bool_
}
DTYPE_PADDLE_STR_MAP = {
core.VarDesc.VarType.FP32: 'float32',
core.VarDesc.VarType.FP64: 'float64',
core.VarDesc.VarType.INT16: 'int16',
core.VarDesc.VarType.INT32: 'int32',
core.VarDesc.VarType.INT64: 'int64',
core.VarDesc.VarType.BOOL: 'bool',
'float32': core.VarDesc.VarType.FP32,
'float64': core.VarDesc.VarType.FP64,
'int16': core.VarDesc.VarType.INT16,
'int32': core.VarDesc.VarType.INT32,
'int64': core.VarDesc.VarType.INT64,
'bool': core.VarDesc.VarType.BOOL
}
DTYPE_ONNX_STR_MAP = {
TensorProto.FLOAT: 'float32',
TensorProto.DOUBLE: 'float64',
TensorProto.INT16: 'int16',
TensorProto.INT32: 'int32',
TensorProto.INT64: 'int64',
TensorProto.BOOL: 'bool',
'float32': TensorProto.FLOAT,
'float64': TensorProto.DOUBLE,
'int16': TensorProto.INT16,
'int32': TensorProto.INT32,
'int64': TensorProto.INT64,
'bool': TensorProto.BOOL,
}

View File

@@ -0,0 +1,19 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
OP_MAPPING_WAITTING = 0
OP_MAPPING_NO_REGISTER = 1
OP_MAPPING_NO_VERSION = 2
OP_MAPPING_SUCCESSED = 3
OP_MAPPING_FAILED = 4

206
paddle2onnx/legacy/convert.py Executable file
View File

@@ -0,0 +1,206 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import paddle
import numpy as np
from paddle.fluid.framework import Variable
from paddle2onnx.utils import check_model, logging
from paddle2onnx.legacy.graph import PaddleGraph, ONNXGraph
from paddle2onnx.legacy.passes import PassManager
def export_onnx(paddle_graph,
save_file,
opset_version=9,
enable_onnx_checker=False,
operator_export_type="ONNX",
verbose=False,
auto_update_opset=True,
output_names=None):
onnx_graph = ONNXGraph.build(paddle_graph, opset_version,
operator_export_type, verbose,
auto_update_opset)
onnx_graph = PassManager.run_pass(
onnx_graph, ['dumplicate_names_pass', 'inplace_node_pass'])
onnx_proto = onnx_graph.export_proto(enable_onnx_checker, output_names)
if save_file is None:
return onnx_proto
path, _ = os.path.split(save_file)
if path != '' and not os.path.isdir(path):
os.makedirs(path)
with open(save_file, 'wb') as f:
f.write(onnx_proto.SerializeToString())
logging.info("ONNX model saved in {}".format(save_file))
def program2onnx(program,
scope,
save_file,
feed_var_names=None,
target_vars=None,
opset_version=9,
enable_onnx_checker=False,
operator_export_type="ONNX",
auto_update_opset=True,
**configs):
from paddle import fluid
if hasattr(paddle, 'enable_static'):
paddle.enable_static()
if isinstance(program, paddle.fluid.framework.Program):
if feed_var_names is not None:
if isinstance(feed_var_names, six.string_types):
feed_var_names = [feed_var_names]
else:
if not (bool(feed_var_names) and all(
isinstance(name, six.string_types)
for name in feed_var_names)):
raise TypeError("'feed_var_names' should be a list of str.")
if target_vars is not None:
if isinstance(target_vars, Variable):
target_vars = [target_vars]
else:
if not (bool(target_vars) and
all(isinstance(var, Variable) for var in target_vars)):
raise TypeError(
"'target_vars' should be a list of variable.")
paddle_graph = PaddleGraph.build_from_program(program, feed_var_names,
target_vars, scope)
output_names = None
if 'output_names' in configs:
output_names = configs['output_names']
if output_names is not None and not isinstance(output_names,
(list, dict)):
raise TypeError(
"The output_names should be 'list' or dict, but received type is %s."
% type(output_names))
return export_onnx(
paddle_graph,
save_file,
opset_version,
enable_onnx_checker,
operator_export_type,
auto_update_opset=auto_update_opset,
output_names=output_names)
else:
raise TypeError(
"the input 'program' should be 'Program', but received type is %s."
% type(program))
def dygraph2onnx(layer, save_file, input_spec=None, opset_version=9, **configs):
from paddle.nn import Layer
from paddle.fluid import core
from paddle.fluid.framework import Variable
from paddle.fluid.dygraph.dygraph_to_static import program_translator
from paddle.fluid import dygraph
if not isinstance(layer, Layer):
raise TypeError(
"the input 'layer' should be 'Layer', 'TranslatedLayer', but received type is %s."
% type(layer))
inner_input_spec = None
if input_spec is not None:
if not isinstance(input_spec, list):
raise TypeError(
"The input input_spec should be 'list', but received type is %s."
% type(input_spec))
inner_input_spec = []
for var in input_spec:
if isinstance(var, paddle.static.InputSpec):
inner_input_spec.append(var)
elif isinstance(var, (core.VarBase, Variable)):
inner_input_spec.append(
paddle.static.InputSpec.from_tensor(var))
else:
raise TypeError(
"The element in input_spec list should be 'Variable' or `paddle.static.InputSpec`, but received element's type is %s."
% type(var))
output_spec = None
if 'output_spec' in configs:
output_spec = configs['output_spec']
if not isinstance(output_spec, list):
raise TypeError(
"The output_spec should be 'list', but received type is %s." %
type(output_spec))
for var in output_spec:
if not isinstance(var, (core.VarBase, Variable)):
raise TypeError(
"The element in output_spec list should be 'Variable', but received element's type is %s."
% type(var))
verbose = False
if 'verbose' in configs:
if isinstance(configs['verbose'], bool):
verbose = configs['verbose']
else:
raise TypeError(
"The verbose should be 'bool', but received type is %s." %
type(configs['verbose']))
enable_onnx_checker = False
if 'enable_onnx_checker' in configs:
if isinstance(configs['enable_onnx_checker'], bool):
enable_onnx_checker = configs['enable_onnx_checker']
else:
raise TypeError(
"The 'enable_onnx_checker' should be 'bool', but received type is %s."
% type(configs['enable_onnx_checker']))
operator_export_type = "ONNX"
enable_paddle_fallback = False
if 'enable_paddle_fallback' in configs:
if isinstance(configs['enable_paddle_fallback'], bool):
enable_paddle_fallback = configs['enable_paddle_fallback']
if enable_paddle_fallback:
operator_export_type = "PaddleFallback"
else:
raise TypeError(
"The 'enable_paddle_fallback' should be 'bool', but received type is %s."
% type(configs['enable_paddle_fallback']))
paddle_graph = PaddleGraph.build_from_dygraph(layer, inner_input_spec,
output_spec)
if 'get_paddle_graph' in configs:
return paddle_graph
auto_update_opset = True
if 'auto_update_opset' in configs:
if isinstance(configs['auto_update_opset'], bool):
auto_update_opset = configs['auto_update_opset']
else:
raise TypeError(
"The auto_update_opset should be 'bool', but received type is %s."
% type(configs['auto_update_opset']))
output_names = None
if 'output_names' in configs:
output_names = configs['output_names']
if not isinstance(output_names, (list, dict)):
raise TypeError(
"The output_names should be 'list' or dict, but received type is %s."
% type(output_names))
return export_onnx(paddle_graph, save_file, opset_version,
enable_onnx_checker, operator_export_type, verbose,
auto_update_opset, output_names)

View File

@@ -0,0 +1,17 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .graph import Graph, Node
from .paddle_graph import PaddleGraph, PaddleNode
from .onnx_graph import ONNXGraph, ONNXNode

View File

@@ -0,0 +1,271 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import numpy as np
import inspect
import six
import paddle
from paddle.fluid.io import _get_valid_program
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction
from paddle.fluid.layers.utils import flatten, pack_sequence_as
from collections import OrderedDict
from paddle.fluid import dygraph
from paddle.fluid.dygraph.jit import declarative
from paddle.fluid import core
from paddle.fluid import layers
from paddle.nn import Layer
from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, program_guard
from paddle.fluid.dygraph.layers import Layer
from paddle2onnx.utils import logging
from paddle2onnx.legacy.graph.graph_helper import prepend_feed_ops, append_fetch_ops
def _get_input_var_names(inputs, input_spec):
name_none_error = "The %s's name is None. " \
"When using jit.save, please set InputSepc's name in " \
"to_static(input_spec=[]) and jit.save(input_spec=[]) " \
"and make sure they are consistent."
name_no_exists_error = "The tensor `%s` does not exists. " \
"Please make sure the name of InputSpec or example Tensor " \
"in input_spec is the same as the name of InputSpec in " \
"`to_static` decorated on the Layer.forward method."
result_list = []
input_var_names = [
var.name for var in flatten(inputs) if isinstance(var, Variable)
]
if input_spec is None:
# no prune
return input_var_names
else:
# fileter out non-tensor type spec infos.
input_spec = [
spec for spec in input_spec
if isinstance(spec, paddle.static.InputSpec)
]
if len(input_spec) == len(input_var_names):
# no prune
result_list = input_var_names
# if input spec name not in input_var_names, only raise warning
for spec in input_spec:
if spec.name is None:
warnings.warn(name_none_error % spec)
elif spec.name not in input_var_names:
warnings.warn(name_no_exists_error % spec.name)
else:
# do nothing
pass
else:
# prune
for spec in input_spec:
if spec.name is None:
# name is None, the input_spec only can be InputSpec
raise ValueError(name_none_error % spec)
elif spec.name not in input_var_names:
# the input_spec can be `InputSpec` or `VarBase`
raise ValueError(name_no_exists_error % spec.name)
else:
result_list.append(spec.name)
return result_list
def _get_output_vars(outputs, output_spec):
name_no_exists_error = "The tensor `%s` does not exists. " \
"Please make sure the name of example Tensor " \
"in configs.output_spec is the output tensor of " \
"Layer.forward method."
result_list = []
output_vars_dict = OrderedDict()
for var in flatten(outputs):
if isinstance(var, Variable):
output_vars_dict[var.name] = var
if output_spec is None:
result_list = output_vars_dict.values()
elif output_spec is not None and len(output_spec) == len(output_vars_dict):
result_list = output_vars_dict.values()
for var in output_spec:
if var.name not in output_vars_dict:
warnings.warn(name_no_exists_error % var.name)
else:
for var in output_spec:
if var.name not in output_vars_dict:
raise ValueError(name_no_exists_error % var.name)
else:
result_list.append(output_vars_dict[var.name])
return result_list
@dygraph.base.switch_to_static_graph
def get_program(layer, input_spec, output_spec, **configs):
paddle.jit.set_verbosity(0)
prog_translator = ProgramTranslator()
if not prog_translator.enable_to_static:
raise RuntimeError(
"The Paddle2onnx doesn't work when setting ProgramTranslator.enable to False."
)
if not isinstance(layer, Layer):
raise TypeError(
"The input of paddle2onnx should be 'Layer', but received input type is %s."
% type(layer))
if isinstance(layer, paddle.DataParallel):
inner_layer = layer._layers
else:
inner_layer = layer
# avoid change user given input_spec
inner_input_spec = None
if input_spec is not None:
for attr_func in dir(inner_layer):
static_func = getattr(inner_layer, attr_func, None)
if isinstance(static_func,
StaticFunction) and 'forward' != attr_func:
raise ValueError(
"If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s."
% type(input_spec))
if not isinstance(input_spec, (list, tuple)):
raise TypeError(
"The input input_spec should be 'list', but received input_spec's type is %s."
% type(input_spec))
inner_input_spec = []
for var in flatten(input_spec):
if isinstance(var, paddle.static.InputSpec):
inner_input_spec.append(var)
elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)):
inner_input_spec.append(
paddle.static.InputSpec.from_tensor(var))
else:
# NOTE(Aurelius84): Support non-Tensor type in `input_spec`.
inner_input_spec.append(var)
extra_var_info = dict()
functions = dir(inner_layer)
for attr_func in functions:
static_func = getattr(inner_layer, attr_func, None)
if isinstance(static_func, StaticFunction):
concrete_program = static_func.concrete_program_specify_input_spec(
inner_input_spec)
elif 'forward' == attr_func:
# transform in jit.save, if input_spec is incomplete, declarative will throw error
# inner_input_spec is list[InputSpec], it should be packed with same structure
# as original input_spec here.
if inner_input_spec:
inner_input_spec = pack_sequence_as(input_spec,
inner_input_spec)
static_forward = declarative(
inner_layer.forward, input_spec=inner_input_spec)
concrete_program = static_forward.concrete_program
# the input_spec has been used in declarative, which is equal to
# @declarative with input_spec and jit.save without input_spec,
# avoid needless warning
inner_input_spec = None
else:
continue
input_var_names = _get_input_var_names(concrete_program.inputs,
inner_input_spec)
# NOTE(chenweihang): [ Get output variables ]
# the rule is like [ Get input variables name ]. For output var,
# we only support VarBase spec, and actually, we only need the
# var name of output, and we don't recommended to use output_spec
output_vars = _get_output_vars(concrete_program.outputs, output_spec)
feeded_var_names = input_var_names
target_vars = output_vars
main_program = concrete_program.main_program.clone()
export_for_deployment = True
if isinstance(feeded_var_names, six.string_types):
feeded_var_names = [feeded_var_names]
elif export_for_deployment:
if len(feeded_var_names) > 0:
# TODO(paddle-dev): polish these code blocks
if not (bool(feeded_var_names) and all(
isinstance(name, six.string_types)
for name in feeded_var_names)):
raise ValueError("'feed_var_names' should be a list of str.")
if isinstance(target_vars, Variable):
target_vars = [target_vars]
elif export_for_deployment:
if not (bool(target_vars) and
all(isinstance(var, Variable) for var in target_vars)):
raise ValueError("'target_vars' should be a list of Variable.")
main_program = _get_valid_program(main_program)
# remind user to set auc_states to zeros if the program contains auc op
all_ops = main_program.global_block().ops
for op in all_ops:
# clear device of Op
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
op._set_attr(device_attr_name, "")
if op.type == 'auc':
warnings.warn(
"please ensure that you have set the auc states to zeros before saving inference model"
)
break
with program_guard(main_program):
uniq_target_vars = []
for i, var in enumerate(target_vars):
uniq_target_vars.append(var)
target_vars = uniq_target_vars
target_var_name_list = [var.name for var in target_vars]
origin_program = main_program.clone()
main_program = main_program.clone()
global_block = main_program.global_block()
need_to_remove_op_index = []
for i, op in enumerate(global_block.ops):
op.desc.set_is_target(False)
if op.type == "feed" or op.type == "fetch":
need_to_remove_op_index.append(i)
for index in need_to_remove_op_index[::-1]:
global_block._remove_op(index)
main_program.desc.flush()
main_program = main_program._prune_with_input(
feeded_var_names=feeded_var_names, targets=target_vars)
main_program = main_program._inference_optimize(prune_read_op=True)
fetch_var_names = [v.name for v in target_vars]
for target_v in target_vars:
if not main_program.global_block().has_var(target_v.name):
main_program.global_block().create_var(
name=target_v.name,
shape=target_v.shape,
dtype=target_v.dtype,
persistable=target_v.persistable)
prepend_feed_ops(main_program, feeded_var_names)
append_fetch_ops(main_program, fetch_var_names)
main_program.desc._set_version()
paddle.fluid.core.save_op_version_info(main_program.desc)
main_program._copy_dist_param_info_from(origin_program)
return main_program, feeded_var_names, target_vars

287
paddle2onnx/legacy/graph/graph.py Executable file
View File

@@ -0,0 +1,287 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import copy
import six
import collections
from paddle2onnx.legacy.constant import NodeDomain
class Node(object):
def __init__(self,
op_type,
inputs,
outputs,
attrs,
layer_name,
domain=NodeDomain.RAW):
self.domain = domain
self.type = op_type
self.attrs = attrs
self.layer_name = layer_name
self.set_inputs(inputs)
self.set_outputs(outputs)
def __hash__(self):
return hash(self.layer_name)
def __eq__(self, other):
if self.layer_name == other.layer_name:
return True
return False
def __str__(self):
node_str = ''
attrs = ''
for key, value in self.attrs.items():
attrs += ', ' + key + '=' + str(value)
node_str += " {} = {}::{}(inputs={}{}) \n".format(
self.outputs, self.domain, self.type, self.inputs, attrs)
return node_str
def input(self, idx=None):
if idx is None:
return self.inputs
return self.inputs[idx]
def output(self, idx=None):
if idx is None:
return self.outputs
return self.outputs[idx]
def attr(self, name):
if name in self.attrs:
return self.attrs[name]
return None
def set_inputs(self, inputs):
if isinstance(inputs, list):
self.inputs = [
ipt.layer_name if isinstance(ipt, Node) else ipt
for ipt in inputs
]
elif isinstance(inputs, six.string_types):
self.inputs = [inputs]
elif isinstance(inputs, Node):
self.inputs = [inputs.layer_name]
else:
raise TypeError(
'Inputs of node must be type: list, Node, or String but got {}'.
format(type(inputs)))
def set_outputs(self, outputs):
if isinstance(outputs, list):
self.outputs = [
opt.layer_name if isinstance(opt, Node) else opt
for opt in outputs
]
elif isinstance(outputs, six.string_types):
self.outputs = [outputs]
elif isinstance(outputs, Node):
self.outputs = [outputs.layer_name]
else:
raise TypeError(
'Outputs of node must be type: list, Node, or String but got {}'.
format(type(outputs)))
class Graph(object):
def __init__(self):
self.parameters = {}
self.node_map = collections.OrderedDict()
self.input_nodes = list()
self.output_nodes = list()
self.op_type_count = dict()
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
if self.id == other.id:
return True
return False
def __str__(self):
graph_str = 'graph { \n'
for node in self.input_nodes:
graph_str += " input: {} \n".format(node.layer_name)
for node in self.output_nodes:
graph_str += " output: {} \n \n".format(node.layer_name)
for name, node in self.node_map.items():
graph_str += node.__str__()
graph_str += ' }'
return graph_str
def set_output_nodes(self, node_list):
if isinstance(node_list, list):
self.output_nodes = node_list
else:
raise TypeError(
'output_nodes of Graph must be type: list, but got {}'.format(
type(node_list)))
def set_node_map(self, node_map):
if isinstance(node_map, dict):
self.node_map = node_map
self.generate_topo_sort()
else:
raise TypeError('node_map of Graph must be type: list, but got {}'.
format(type(node_map)))
def set_input_nodes(self, node_list):
if isinstance(node_list, list):
self.input_nodes = node_list
else:
raise TypeError(
'input_nodes of Graph must be type: list, but got {}'.format(
type(node_list)))
def set_parameters(self, parameters):
if isinstance(parameters, dict):
self.parameters = parameters
else:
raise TypeError(
'parameters of Graph must be type: dict, but got {}'.format(
type(parameters)))
def generate_node_name(self, op_type):
if op_type in self.op_type_count:
self.op_type_count[op_type] += 1
else:
self.op_type_count[op_type] = 1
# layer_name need follow https://github.com/onnx/onnx/blob/master/docs/OpConventions.md
layer_name = op_type + '_' + str(self.op_type_count[op_type] - 1)
return layer_name
def insert_node(self, node):
if node.type not in ['feed', 'fetch']:
self.node_map[node.layer_name] = node
def make_node(self,
op_type,
inputs=None,
outputs=None,
attrs=None,
layer_name=None,
domain=None,
**kw):
if layer_name is None:
layer_name = self.generate_node_name(op_type)
if attrs is None:
attrs = kw
attrs.update(kw)
if inputs is None:
inputs = []
if outputs is None:
outputs = [layer_name]
node = Node(op_type, layer_name, inputs, outputs, attrs, domain)
self.insert_node(node)
return node
def update_node(self,
node,
op_type=None,
inputs=None,
outputs=None,
attrs=None,
block=None,
move_to_end=True,
domain=None,
**kw):
if op_type is not None:
node.type = op_type
if inputs is not None:
node.set_inputs(inputs)
if outputs is not None:
node.set_outputs(outputs)
if attrs is None:
attrs = kw
attrs.update(kw)
node.attrs = attrs
if domain is not None:
node.domain = domain
if move_to_end:
self.node_map.pop(node.layer_name)
self.node_map[node.layer_name] = node
return node
def get_node(self, name, copy=False):
if name not in self.node_map:
raise TypeError('Node with name:{} not in graph'.format(name))
if copy:
node = copy.copy(self.node_map[name])
else:
node = self.node_map[name]
return node
def remove_node_by_name(self, name):
if name in self.node_map:
node = self.node_map.pop(name)
return node
raise TypeError('Node with name:{} not in graph'.format(name))
def remove_node(self, node):
if isinstance(node, Node):
node = self.remove_node_by_name(node.layer_name)
return node
else:
node = self.remove_node_by_name(node)
return node
def get_output_nodes_of_node(self, node):
if node in self.edge_map:
return self.edge_map[node]
elif self.get_node(node.layer_name, copy=False):
return []
else:
raise KeyError('Node with layer_name {} not in graph.egde_map'.
format(node.layer_name))
def get_adjacency_map(self):
adjacency_map = {}
for layer_name, current_node in self.node_map.items():
inputs = current_node.inputs
for ipt in inputs:
for layer_name, node in self.node_map.items():
if current_node == node:
continue
outputs = node.outputs
if ipt in outputs:
if node not in adjacency_map:
adjacency_map[node] = set([current_node])
else:
adjacency_map[node].add(current_node)
return adjacency_map
def get_topo_sort_list(self):
topo_sort_list = list()
adjacency_map = self.get_adjacency_map()
for layer_name, node in self.node_map.items():
if node not in adjacency_map:
topo_sort_list.append(node)
idx = 0
while idx < len(topo_sort_list):
current_node = topo_sort_list[idx]
for input_node, output_nodes in adjacency_map.items():
if current_node in output_nodes:
adjacency_map[input_node].remove(current_node)
if len(adjacency_map[input_node]) == 0:
topo_sort_list.append(input_node)
idx += 1
return topo_sort_list[::-1]

View File

@@ -0,0 +1,83 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import paddle
import numpy as np
from paddle.fluid import core
from paddle.fluid.framework import Variable, program_guard
from paddle2onnx.utils import logging
def prepend_feed_ops(inference_program,
feed_target_names,
feed_holder_name='feed'):
if len(feed_target_names) == 0:
return
global_block = inference_program.global_block()
feed_var = global_block.create_var(
name=feed_holder_name,
type=core.VarDesc.VarType.FEED_MINIBATCH,
persistable=True)
for i, name in enumerate(feed_target_names):
if not global_block.has_var(name):
raise ValueError(
"The feed_var_names[{i}]: '{name}' doesn't exist in pruned inference program. "
"Please check whether '{name}' is a valid feed_var name, or remove it from feed_var_names "
"if '{name}' is not involved in the fetch_vars calculation.".
format(
i=i, name=name))
out = global_block.var(name)
global_block._prepend_op(
type='feed',
inputs={'X': [feed_var]},
outputs={'Out': [out]},
attrs={'col': i})
def append_fetch_ops(inference_program,
fetch_target_names,
fetch_holder_name='fetch'):
global_block = inference_program.global_block()
fetch_var = global_block.create_var(
name=fetch_holder_name,
type=core.VarDesc.VarType.FETCH_LIST,
persistable=True)
for i, name in enumerate(fetch_target_names):
global_block.append_op(
type='fetch',
inputs={'X': [name]},
outputs={'Out': [fetch_var]},
attrs={'col': i})
def get_program(program, feed_var_names, fetch_vars):
global_block = program.global_block()
need_to_remove_op_index = []
for i, op in enumerate(global_block.ops):
op.desc.set_is_target(False)
if op.type == "feed" or op.type == "fetch":
need_to_remove_op_index.append(i)
for index in need_to_remove_op_index[::-1]:
global_block._remove_op(index)
program.desc.flush()
program = program._prune_with_input(
feeded_var_names=feed_var_names, targets=fetch_vars)
program = program._inference_optimize(prune_read_op=True)
fetch_var_names = [v.name for v in fetch_vars]
prepend_feed_ops(program, feed_var_names)
append_fetch_ops(program, fetch_var_names)
return program

View File

@@ -0,0 +1,333 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import copy
import collections
import numpy as np
from paddle2onnx.legacy.graph import Node, Graph
from paddle2onnx.legacy.constant import NodeDomain, PRODUCER, dtypes
from paddle2onnx.legacy.op_mapper import OpMapper
from onnx import helper
from paddle2onnx.utils import check_model, logging
class ONNXNode(Node):
def __init__(self, op_type, inputs, outputs, attrs, layer_name, domain):
super(ONNXNode, self).__init__(op_type, inputs, outputs, attrs,
layer_name, domain)
self.domain = domain
self.onnx_node = self.make_onnx_node()
def make_onnx_constant_node(self):
dtype = self.attr('dtype')
value = self.attr('value')
if isinstance(value, list):
dims = (len(value), )
elif value is None:
dims = ()
value = []
else:
dims = ()
value = [value]
if 'dims' in self.attrs:
dims = self.attrs['dims']
tensor = helper.make_tensor(
name=self.layer_name, data_type=dtype, dims=dims, vals=value)
onnx_node = helper.make_node(
self.type, inputs=self.inputs, outputs=self.outputs, value=tensor)
return onnx_node
def make_onnx_node(self):
if self.type in ['Constant', 'ConstantOfShape']:
onnx_node = self.make_onnx_constant_node()
else:
onnx_node = helper.make_node(
self.type,
inputs=self.inputs,
outputs=self.outputs,
name=self.layer_name,
domain=self.domain,
**self.attrs)
return onnx_node
class ONNXGraph(Graph):
def __init__(self,
paddle_graph,
opset_version,
operator_export_type="ONNX",
block=None,
auto_update_opset=True):
super(ONNXGraph, self).__init__()
self.opset_version = opset_version
self.operator_export_type = operator_export_type
self.ctx = paddle_graph
self.custom = []
if auto_update_opset:
self.update_opset_version()
def __str__(self):
graph_str = 'graph { \n'
for node in self.input_nodes:
graph_str += " input: {} \n".format(node)
for node in self.output_nodes:
graph_str += " output: {} \n \n".format(node)
for name, node in self.node_map.items():
graph_str += node.__str__()
graph_str += ' }'
return graph_str
def make_node(self,
op_type,
inputs=[],
outputs=[],
attrs=None,
layer_name=None,
domain=None,
**kw):
if layer_name is None:
layer_name = self.generate_node_name(op_type)
if domain is not None:
if domain not in self.custom:
self.custom.append(domain)
if attrs is None:
attrs = kw
attrs.update(kw)
if inputs is None:
inputs = []
real_outputs = None
if outputs is None:
real_outputs = [layer_name]
elif isinstance(outputs, int):
real_outputs = []
for i in range(outputs):
real_outputs.append(self.generate_node_name(op_type))
elif isinstance(outputs, list):
real_outputs = []
if len(outputs) == 0:
real_outputs = [layer_name]
else:
for opt in outputs:
if isinstance(opt, Node):
real_outputs.append(opt.layer_name)
elif isinstance(opt, int):
real_outputs.append(self.generate_node_name(op_type))
else:
real_outputs.append(opt)
else:
real_outputs = outputs
node = ONNXNode(op_type, inputs, real_outputs, attrs, layer_name,
domain)
self.insert_node(node)
if len(node.outputs) == 1:
return node.outputs[0]
else:
return node.outputs
def update_node(self,
node,
op_type=None,
inputs=None,
outputs=None,
attrs=None,
**kw):
if op_type is None:
op_type = node.type
if inputs is None:
inputs = node.inputs
if outputs is None:
outputs = node.outputs
if attrs is None:
attrs = node.attrs
attrs.update(kw)
node = ONNXNode(op_type, inputs, outputs, attrs, node.layer_name,
node.domain)
self.insert_node(node)
return node
def build_parameters(self, parameters):
# build weight nodes
for name, param in parameters.items():
weight = param['data']
if weight is not np.ndarray:
weight = np.array(weight)
tensor = helper.make_tensor(
name=name,
dims=param['shape'],
data_type=dtypes.DTYPE_PADDLE_ONNX_MAP[param['dtype']],
vals=weight.flatten().tolist())
node = helper.make_node(
'Constant', inputs=[], outputs=[name], value=tensor)
self.parameters[name] = node
def build_input_nodes(self, input_nodes):
# build input nodes
for ipt in input_nodes:
self.add_input_node(ipt.layer_name,
ipt.attr('shape'), ipt.attr('dtype'))
def build_output_nodes(self, output_nodes):
# build output nodes
for opt in output_nodes:
self.add_output_node(opt.layer_name,
opt.attr('shape'), opt.attr('dtype'))
def update_opset_version(self):
node_map = self.ctx.node_map
self.opset_version = OpMapper.get_recommend_opset_version(
node_map, self.opset_version)
def build_op_nodes(self, node_map):
OpMapper.check_support_status(node_map, self.opset_version)
# build op nodes
for name, node in list(node_map.items()):
OpMapper.mapping(self, node, self.operator_export_type)
def make_value_info(self, name, shape, dtype):
tensor_info = helper.make_tensor_value_info(
name=name,
shape=shape,
elem_type=dtypes.DTYPE_PADDLE_ONNX_MAP[dtype])
return tensor_info
def add_input_node(self, name, shape, dtype):
vi = self.make_value_info(name, shape, dtype)
self.input_nodes.append(vi)
def add_output_node(self, name, shape, dtype):
vi = self.make_value_info(name, shape, dtype)
self.output_nodes.append(vi)
def find_index(self, node_inout, name):
for i in range(len(node_inout)):
if node_inout[i] == name:
return i
return -1
def change_output_names(self, onnx_proto, output_names):
logging.info("The output of the ONNX model is set to: {}".format(
output_names))
if isinstance(output_names, list):
assert len(output_names) == len(
onnx_proto.graph.output
), "The provided output names are inconsistent with the output number of the onnx model when output_names is list"
origin_output_names = []
for i in range(len(onnx_proto.graph.output)):
origin_output_names.append(onnx_proto.graph.output[i].name)
onnx_proto.graph.output[i].name = output_names[i]
for i in range(len(onnx_proto.graph.node)):
node = onnx_proto.graph.node[i]
# Prevent changed names from being changed again
output_visited_node = []
input_visited_node = []
for j in range(len(origin_output_names)):
if origin_output_names[j] in node.output:
index = self.find_index(node.output,
origin_output_names[j])
if index in output_visited_node:
continue
output_visited_node.append(index)
onnx_proto.graph.node[i].output[index] = output_names[j]
if origin_output_names[j] in node.input:
index = self.find_index(node.input,
origin_output_names[j])
if index in input_visited_node:
continue
input_visited_node.append(index)
onnx_proto.graph.node[i].input[index] = output_names[j]
if isinstance(output_names, dict):
for i in range(len(onnx_proto.graph.output)):
for key, value in output_names.items():
if onnx_proto.graph.output[i].name == key:
onnx_proto.graph.output[i].name = value
break
for i in range(len(onnx_proto.graph.node)):
node = onnx_proto.graph.node[i]
# Prevent changed names from being changed again
output_visited_node = []
input_visited_node = []
for key, value in output_names.items():
if key in node.output:
index = self.find_index(node.output, key)
if index in output_visited_node:
continue
output_visited_node.append(index)
onnx_proto.graph.node[i].output[index] = value
if key in node.input:
index = self.find_index(node.input, key)
if index in input_visited_node:
continue
input_visited_node.append(index)
onnx_proto.graph.node[i].input[index] = value
return onnx_proto
def export_proto(self, enable_onnx_checker=False, output_names=None):
op_nodes = [node.onnx_node for node in self.node_map.values()]
weight_nodes = [node for node in self.parameters.values()]
onnx_graph = helper.make_graph(
nodes=weight_nodes + op_nodes,
name='paddle-onnx',
initializer=[],
inputs=self.input_nodes,
outputs=self.output_nodes)
opset_imports = [helper.make_opsetid("", self.opset_version)]
for custom_domain in self.custom:
opset_imports.append(helper.make_opsetid(custom_domain, 1))
onnx_proto = helper.make_model(
onnx_graph, producer_name=PRODUCER, opset_imports=opset_imports)
if output_names is not None:
onnx_proto = self.change_output_names(onnx_proto, output_names)
if enable_onnx_checker:
check_model(onnx_proto)
return onnx_proto
@staticmethod
def build(paddle_graph,
opset_version,
operator_export_type="ONNX",
verbose=False,
auto_update_opset=True):
onnx_graph = ONNXGraph(
paddle_graph,
opset_version=opset_version,
operator_export_type=operator_export_type,
auto_update_opset=auto_update_opset)
onnx_graph.build_parameters(paddle_graph.parameters)
onnx_graph.build_input_nodes(paddle_graph.input_nodes)
onnx_graph.build_output_nodes(paddle_graph.output_nodes)
onnx_graph.build_op_nodes(paddle_graph.node_map)
return onnx_graph

View File

@@ -0,0 +1,303 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import copy
import collections
import numpy as np
import paddle
from paddle import fluid
from paddle.fluid import dygraph
from paddle.fluid.framework import Operator
from paddle2onnx.legacy.graph import Node, Graph
from paddle2onnx.legacy.constant import NodeDomain
from paddle2onnx.utils import logging
class PaddleNode(Node):
def __init__(self, paddle_op, inputs, outputs, attrs, layer_name, block):
super(PaddleNode, self).__init__(paddle_op.type, inputs, outputs, attrs,
layer_name, NodeDomain.PADDLE)
self.paddle_op = paddle_op
self.block = block
def __str__(self):
node_str = ''
attrs = ''
for key, value in self.attrs.items():
if key == 'op_callstack':
continue
attrs += ', ' + key + '=' + str(value)
node_str += " {} = {}::{}(inputs={}{}) \n".format(
self.outputs, self.domain, self.type, self.inputs, attrs)
return node_str
@property
def input_names(self):
return [name for name in self.inputs.keys()]
@property
def output_names(self):
return [name for name in self.outputs.keys()]
def input(self, name, idx=None):
if name not in self.inputs:
return None
if idx is None:
return self.inputs[name]
if len(self.inputs[name]) <= idx:
return None
return self.inputs[name][idx]
def output(self, name, idx=None):
if idx is None:
return self.outputs[name]
return self.outputs[name][idx]
def output_shape(self, name, idx):
return self.block.var(self.output(name, idx)).shape
def input_shape(self, name, idx):
return self.block.var(self.input(name, idx)).shape
def input_var(self, name, idx):
return self.block.var(self.input(name, idx))
def input_dtype(self, name, idx):
return self.block.var(self.input(name, idx)).dtype
def output_dtype(self, name, idx):
return self.block.var(self.output(name, idx)).dtype
def attr(self, name, default=None):
if name in self.attrs:
return self.attrs[name]
return default
def set_inputs(self, inputs):
if isinstance(inputs, dict):
# input of node in paddle, which stored by dict
self.inputs = inputs
else:
raise TypeError('Inputs of node must be type: dict, but got {}'.
format(type(inputs)))
def set_outputs(self, outputs):
if isinstance(outputs, dict):
# output of node in paddle, which stored by dict
self.outputs = outputs
else:
raise TypeError('Outputs of node must be type: dict, but got {}'.
format(type(outputs)))
class PaddleGraph(Graph):
def __init__(self, program, parameters, feed_var_names, fetch_vars):
super(PaddleGraph, self).__init__()
self.build_graph(program, parameters, feed_var_names, fetch_vars)
def make_node(self,
op,
inputs=None,
outputs=None,
attrs=None,
block=None,
layer_name=None,
**kw):
if layer_name is None:
layer_name = self.generate_node_name(op.type)
if attrs is None:
attrs = kw
attrs.update(kw)
if inputs is None:
inputs = {}
if outputs is None:
outputs = {'Out': layer_name}
node = PaddleNode(op, inputs, outputs, attrs, layer_name, block)
self.insert_node(node)
return node
def add_input_node(self, inputs, block=None):
for ipt in inputs:
# parse feed_names
layer_name = ipt
var = block.var(ipt)
attrs = {}
attrs['shape'] = var.shape
attrs['dtype'] = var.dtype
node = Node('feed', [], [layer_name], attrs, layer_name)
self.input_nodes.append(node)
def add_output_node(self, outputs, block=None):
from paddle.fluid.framework import Variable
for opt in outputs:
# parse fetch_target_vars
layer_name = opt.name
attrs = {}
attrs['shape'] = opt.shape
attrs['dtype'] = opt.dtype
node = Node('fetch', [layer_name], [], attrs, layer_name)
self.output_nodes.append(node)
def get_adjacency_map(self):
adjacency_map = {}
for layer_name, current_node in self.node_map.items():
inputs = current_node.inputs.values()
inputs = [x for j in inputs for x in j]
for ipt in inputs:
for layer_name, node in self.node_map.items():
if current_node == node:
continue
outputs = node.outputs.values()
outputs = [x for j in outputs for x in j]
if ipt in outputs:
if node not in adjacency_map:
adjacency_map[node] = set([current_node])
else:
adjacency_map[node].add(current_node)
return adjacency_map
def build_graph(self,
program,
parameters,
feed_var_names=None,
target_vars=None):
self.program = program
self.set_parameters(parameters)
self.add_input_node(feed_var_names, program.global_block())
self.add_output_node(target_vars, program.global_block())
for block in program.blocks:
for i, op in enumerate(block.ops):
if op.type in ['feed', 'fetch']:
continue
else:
inputs = {}
outputs = {}
for ipt in op.input_names:
inputs[ipt] = op.input(ipt)
for opt in op.output_names:
outputs[opt] = op.output(opt)
node = self.make_node(op, inputs, outputs,
op.all_attrs(), block)
@staticmethod
def build_from_program(program,
feed_var_names=None,
fetch_vars=None,
scope=None):
parameters_dict = {}
vars = program.global_block().vars
for name in vars:
var = program.global_block().var(name)
if name.endswith('feed') or name.endswith('fetch'):
continue
if not var.persistable:
continue
parameters_dict[name] = {
'data': np.array(scope.var(name).get_tensor()),
'dtype': var.dtype,
'shape': var.shape
}
graph = PaddleGraph(program, parameters_dict, feed_var_names,
fetch_vars)
return graph
@staticmethod
def build_from_dygraph(layer, input_spec=None, output_spec=None):
from paddle.nn import Layer
from paddle.fluid import core
from paddle.fluid.framework import Variable
from paddle2onnx.legacy.graph import dygraph_helper as dg_helper
if isinstance(layer, dygraph.TranslatedLayer):
program = layer.program()
parameters_dict = {}
pruned_vars = program.global_block().vars
for param in layer.parameters():
if param.name.endswith('feed') or param.name.endswith('fetch'):
continue
if not param.persistable:
continue
if param.name in pruned_vars:
parameters_dict[param.name] = {
'data': np.array(param.value().get_tensor()),
'dtype': param.dtype,
'shape': param.shape
}
for param in layer.buffers():
if param.name.endswith('feed') or param.name.endswith('fetch'):
continue
if not param.value().get_tensor()._is_initialized():
continue
if param.name in pruned_vars:
parameters_dict[param.name] = {
'data': np.array(param.value().get_tensor()),
'dtype': param.dtype,
'shape': param.shape
}
if input_spec is not None:
logging.warning(
"Although input_spec is specified, TranslatedLayer is not support prune. An Complete network will be exported."
)
input_spec = layer._input_spec()
if output_spec is not None:
logging.warning(
"Although output_spec is specified, TranslatedLayer is not support prune. An Complete network will be exported."
)
feed_var_names = [ipt.name for ipt in layer._input_spec()]
fetch_vars = [
program.global_block().var(opt.name)
for opt in layer._output_spec()
]
graph = PaddleGraph(program, parameters_dict, feed_var_names,
fetch_vars)
return graph
elif isinstance(layer, Layer):
program, feed_var_names, fetch_vars = dg_helper.get_program(
layer, input_spec, output_spec)
parameters_dict = {}
pruned_vars = program.global_block().vars
for param in layer.parameters():
if param.name.endswith('feed') or param.name.endswith('fetch'):
continue
if not param.persistable:
continue
if param.name in pruned_vars:
parameters_dict[param.name] = {
'data': np.array(param.value().get_tensor()),
'dtype': param.dtype,
'shape': param.shape
}
for param in layer.buffers():
if param.name.endswith('feed') or param.name.endswith('fetch'):
continue
if not param.value().get_tensor()._is_initialized():
continue
if param.name in pruned_vars:
parameters_dict[param.name] = {
'data': np.array(param.value().get_tensor()),
'dtype': param.dtype,
'shape': param.shape
}
graph = PaddleGraph(program, parameters_dict, feed_var_names,
fetch_vars)
return graph
else:
raise TypeError(
"The input Layer should be 'Layer' or 'TranslatedLayer', but received type is %s."
% type(layer))

View File

@@ -0,0 +1,39 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .op_mapper import OpMapper, register_op_mapper, CustomPaddleOp, register_custom_paddle_op
from . import nn
from . import math
from . import activation
from . import tensor
from . import logic
from . import search
from .detection import yolo_box
from .detection import multiclass_nms
from .detection import prior_box
from .detection import density_prior_box
from .detection import box_coder
from .sequence import im2sequence
from .custom_paddle_op import deformable_conv
from .custom_paddle_op import anchor_generator
from .custom_paddle_op import generate_proposals
from .custom_paddle_op import collect_fpn_proposals
from .custom_paddle_op import distribute_fpn_proposals
from .custom_paddle_op import box_clip
from .custom_paddle_op import grid_sampler

View File

@@ -0,0 +1,269 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import math
from paddle2onnx.legacy.constant import dtypes
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
import paddle
@op_mapper(
['relu', 'tanh', 'log', 'sigmoid', 'sqrt'],
mapper_dict={
'relu': 'Relu',
'tanh': 'Tanh',
'log': 'Log',
'sigmoid': 'Sigmoid',
'sqrt': 'Sqrt',
})
class ActivationOps():
support_opset_version_range = (7, 15)
@classmethod
def opset_1(cls, graph, node, **kw):
onnx_type = kw['mapper_dict'][node.type]
onnx_node = graph.make_node(
onnx_type, inputs=node.input('X'), outputs=node.output('Out'))
@op_mapper('silu')
class Silu():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
x = node.input('X')[0]
out = graph.make_node('Sigmoid', inputs=[x])
graph.make_node('Mul', inputs=[x, out], outputs=node.output('Out'))
@op_mapper('leaky_relu')
class LeakyRelu():
support_opset_version_range = (7, 15)
@classmethod
def opset_1(cls, graph, node, **kw):
onnx_node = graph.make_node(
'LeakyRelu',
inputs=[node.input('X')[0]],
outputs=node.output('Out'),
alpha=node.attr('alpha'))
@op_mapper('softplus')
class Softplus():
support_opset_version_range = (7, 15)
@classmethod
def opset_1(cls, graph, node, **kw):
beta = node.attr('beta')
threshold = node.attr('threshold')
if np.isclose(beta, 1.0, 1e-06, 1e-06) and \
np.isclose(threshold, 20.0, 1e-06, 1e-06):
onnx_node = graph.make_node(
'Softplus',
inputs=[node.input('X')[0]],
outputs=node.output('Out'))
else:
raise Exception("[ERROR] Operator softplus " \
"only supported while beta==1.0 and threshold==20.0")
@op_mapper('prelu')
class PRelu():
support_opset_version_range = (9, 15)
@classmethod
def opset_9(cls, graph, node, **kw):
slope_shape = node.input_shape('Alpha', 0)
input_shape = node.input_shape('X', 0)
slope_node = node.input('Alpha')[0]
if len(input_shape) != len(slope_shape):
assert len(
slope_shape) == 1, "Slope shape is not expected for prelu"
broadcast_shape = [-1] + [1] * (len(input_shape) - 2)
broadcast_shape = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=broadcast_shape)
slope_node = graph.make_node(
'Reshape', inputs=[node.input('Alpha')[0], broadcast_shape])
x = node.input('X')[0]
x_dtype = node.input_dtype('X', 0)
slope_dtype = node.input_dtype('Alpha', 0)
if slope_dtype != paddle.float32:
slope_node = graph.make_node(
'Cast', inputs=[slope_node], to=dtypes.ONNX.FLOAT)
if x_dtype != paddle.float32:
x = graph.make_node('Cast', inputs=[x], to=dtypes.ONNX.FLOAT)
onnx_node = graph.make_node('PRelu', inputs=[x, slope_node])
graph.make_node(
'Cast',
inputs=[onnx_node],
outputs=node.output('Out'),
to=dtypes.DTYPE_PADDLE_ONNX_MAP[x_dtype])
else:
onnx_node = graph.make_node(
'PRelu', inputs=[x, slope_node], outputs=node.output('Out'))
@op_mapper('relu6')
class Relu6():
support_opset_version_range = (7, 15)
@classmethod
def opset_1(cls, graph, node, **kw):
mapper_helper.clip_helper(graph, node,
node.input('X', 0),
node.attr('threshold'), 0.0,
node.output('Out', 0))
@op_mapper('gelu')
class Gelu():
support_opset_version_range = (9, 15)
@classmethod
def opset_9(cls, graph, node, **kw):
input = node.input('X', 0)
x_dtype = node.input_dtype('X', 0)
# onnxruntime only support float32 Erf
if x_dtype != paddle.float32:
input = graph.make_node(
'Cast', inputs=[input], to=dtypes.ONNX.FLOAT)
sqrt2 = graph.make_node(
'Constant', dtype=dtypes.ONNX.FLOAT, value=[1.4142135623730951])
zero_point_five = graph.make_node(
'Constant', dtype=dtypes.ONNX.FLOAT, value=[0.5])
one = graph.make_node('Constant', dtype=dtypes.ONNX.FLOAT, value=[1])
x = graph.make_node('Div', inputs=[input, sqrt2])
x = graph.make_node('Erf', inputs=x)
x = graph.make_node('Add', inputs=[x, one])
x = graph.make_node('Mul', inputs=[input, x])
if x_dtype != paddle.float32:
mul_node = graph.make_node('Mul', inputs=[x, zero_point_five])
graph.make_node(
'Cast',
inputs=[mul_node],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[x_dtype],
outputs=node.output('Out'))
else:
graph.make_node(
'Mul', inputs=[x, zero_point_five], outputs=node.output('Out'))
@op_mapper('selu')
class Selu():
support_opset_version_range = (7, 15)
@classmethod
def opset_6(cls, graph, node, **kw):
graph.make_node(
'Selu',
inputs=node.input('X'),
alpha=node.attr('alpha'),
gamma=node.attr('scale'),
outputs=node.output('Out'))
@op_mapper('hard_sigmoid')
class HardSigmoid():
support_opset_version_range = (7, 15)
@classmethod
def opset_1(cls, graph, node, **kw):
slope = node.attr('slope')
offset = node.attr('offset')
graph.make_node(
'HardSigmoid',
inputs=node.input('X'),
outputs=node.output('Out'),
alpha=slope,
beta=offset)
@op_mapper('swish')
class Swish():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
x = node.input('X')[0]
if math.fabs(node.attr("beta") - 1.0) > 1e-05:
beta_node = graph.make_node(
'Constant',
attrs={'dtype': dtypes.ONNX.FLOAT,
'value': [node.attr('beta')]})
x = graph.make_node(
'Mul', inputs=[x, beta_node])
sigmoid_node = graph.make_node('Sigmoid', inputs=[x])
graph.make_node(
'Mul',
inputs=[x, sigmoid_node],
outputs=node.output('Out'))
@op_mapper('mish')
class Mish():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
inputs = node.input('X', 0)
dtype = node.input_dtype("X", 0)
if dtype != paddle.float32:
inputs = graph.make_node(
'Cast', inputs=[inputs], to=dtypes.ONNX.FLOAT)
dtype = paddle.float32
threshold = node.attr('threshold')
assert np.fabs(
threshold - 20
) < 1e-4, "In mish OP, the threshold only supports 20, no other values are supported"
softplus_node = graph.make_node('Softplus', inputs=[inputs])
tanh_node = graph.make_node('Tanh', inputs=[softplus_node])
if node.input_dtype("X", 0) != paddle.float32:
mul_node = graph.make_node('Mul', inputs=[inputs, tanh_node])
inputs = graph.make_node(
'Cast',
inputs=[mul_node],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype("X", 0)],
outputs=node.output('Out'))
else:
graph.make_node(
'Mul', inputs=[inputs, tanh_node], outputs=node.output('Out'))
@op_mapper('hard_swish')
class HardSwish():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
scale_node = graph.make_node(
'Constant',
attrs={'dtype': dtypes.ONNX.FLOAT,
'value': node.attr('scale')})
offset_node = graph.make_node(
'Constant',
attrs={'dtype': dtypes.ONNX.FLOAT,
'value': node.attr('offset')})
node0 = graph.make_node('Add', inputs=[node.input('X')[0], offset_node])
node1 = mapper_helper.clip_helper(graph, node, node0,
node.attr('threshold'), 0.0)
node2 = graph.make_node('Mul', inputs=[node.input('X')[0], node1])
node3 = graph.make_node(
'Div', inputs=[node2, scale_node], outputs=node.output('Out'))

View File

@@ -0,0 +1,13 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,97 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import paddle
from paddle.fluid import layers
from paddle2onnx.legacy.op_mapper import CustomPaddleOp, register_custom_paddle_op
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
class AnchorGenerator(CustomPaddleOp):
def __init__(self, node, **kw):
super(AnchorGenerator, self).__init__(node)
#self.x_shape = node.input_shape('Input', 0)
self.anchor_sizes = node.attr('anchor_sizes')
self.aspect_ratios = node.attr('aspect_ratios')
self.offset = node.attr('offset')
self.strides = node.attr('stride')
self.variances = node.attr('variances')
self.shapes = self.compute_shapes()
def compute_shapes(self):
shapes = list()
for r in range(len(self.aspect_ratios)):
ar = self.aspect_ratios[r]
for s in range(len(self.anchor_sizes)):
anchor_size = self.anchor_sizes[s]
area = self.strides[0] * self.strides[1]
area_ratios = area / ar
base_w = np.floor(np.sqrt(area_ratios) + 0.5)
base_h = np.floor(base_w * ar + 0.5)
scale_w = anchor_size / self.strides[0]
scale_h = anchor_size / self.strides[1]
w = scale_w * base_w
h = scale_h * base_h
shapes.append([
-0.5 * (w - 1), -0.5 * (h - 1), 0.5 * (w - 1), 0.5 * (h - 1)
])
return shapes
def forward(self):
input_feature = self.input('Input', 0)
input_shape = paddle.shape(input_feature)
n, c, h, w = paddle.tensor.split(input_shape, num_or_sections=4)
x_ctr = paddle.arange(start=0, end=w, step=1, dtype=input_feature.dtype)
y_ctr = paddle.arange(start=0, end=h, step=1, dtype=input_feature.dtype)
x_ctr = x_ctr * self.strides[0] + self.offset * (self.strides[0] - 1)
y_ctr = y_ctr * self.strides[1] + self.offset * (self.strides[1] - 1)
tensor_one = paddle.ones(shape=[1], dtype='int64')
tensor_len_shape = paddle.full(
shape=[1], fill_value=len(self.shapes), dtype='int64')
x_ctr = paddle.reshape(x_ctr, shape=(1, -1))
y_ctr = paddle.reshape(y_ctr, shape=(1, -1))
x_ctr = paddle.tile(x_ctr, repeat_times=(h, tensor_one))
y_ctr = paddle.tile(y_ctr, repeat_times=(w, tensor_one))
y_ctr = paddle.transpose(y_ctr, perm=[1, 0])
centers = paddle.stack([x_ctr, y_ctr], axis=-1)
centers = paddle.tensor.unsqueeze(centers, axis=[2])
centers = paddle.tile(centers, repeat_times=(1, 1, len(self.shapes), 2))
shape_tensor = paddle.assign(np.array(self.shapes).astype('float32'))
anchors = centers + shape_tensor
variance_tensor = paddle.assign(
np.asarray(self.variances).astype('float32'))
vars = paddle.reshape(variance_tensor, shape=[1, 1, 1, -1])
vars = paddle.tile(
vars, repeat_times=(h, w, tensor_len_shape, tensor_one))
return {'Anchors': [anchors], 'Variances': [vars]}
@op_mapper('anchor_generator')
class Anchors_generator:
@classmethod
def opset_1(cls, graph, node, **kw):
node = graph.make_node(
'anchor_generator',
inputs=node.input('Input'),
outputs=node.output('Anchors') + node.output('Variances'),
anchor_sizes = node.attr('anchor_sizes'),
aspect_ratios = node.attr('aspect_ratios'),
offset = node.attr('offset'),
strides = node.attr('stride'),
variances = node.attr('variances'),
domain = 'custom')
register_custom_paddle_op('anchor_generator', AnchorGenerator)

View File

@@ -0,0 +1,56 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import paddle
from paddle.fluid import layers
from paddle2onnx.legacy.op_mapper import CustomPaddleOp, register_custom_paddle_op
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
class BoxClip(CustomPaddleOp):
def __init__(self, node, **kw):
super(BoxClip, self).__init__(node)
def forward(self):
input = self.input('Input', 0)
im_info = self.input('ImInfo', 0)
im_info = paddle.reshape(im_info, shape=[3])
h, w, s = paddle.tensor.split(im_info, axis=0, num_or_sections=3)
tensor_one = paddle.full(shape=[1], dtype='float32', fill_value=1.0)
tensor_zero = paddle.full(shape=[1], dtype='float32', fill_value=0.0)
h = paddle.subtract(h, tensor_one)
w = paddle.subtract(w, tensor_one)
xmin, ymin, xmax, ymax = paddle.tensor.split(
input, axis=-1, num_or_sections=4)
xmin = paddle.maximum(paddle.minimum(xmin, w), tensor_zero)
ymin = paddle.maximum(paddle.minimum(ymin, h), tensor_zero)
xmax = paddle.maximum(paddle.minimum(xmax, w), tensor_zero)
ymax = paddle.maximum(paddle.minimum(ymax, h), tensor_zero)
cliped_box = paddle.concat([xmin, ymin, xmax, ymax], axis=-1)
return {'Output': [cliped_box]}
@op_mapper('box_clip')
class Boxclip:
@classmethod
def opset_1(cls, graph, node, **kw):
node = graph.make_node(
'box_clip',
inputs=node.input('Input')+node.input('ImInfo'),
outputs=node.output('Output'),
domain = 'custom')
register_custom_paddle_op('box_clip', BoxClip)

View File

@@ -0,0 +1,55 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import paddle
from paddle.fluid import layers
from paddle2onnx.legacy.op_mapper import CustomPaddleOp, register_custom_paddle_op
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
class CollectFpnProposals(CustomPaddleOp):
def __init__(self, node, **kw):
super(CollectFpnProposals, self).__init__(node)
self.post_nms_top_n = node.attr('post_nms_topN')
def forward(self):
multi_level_rois = self.input('MultiLevelRois')
multi_level_scores = self.input('MultiLevelScores')
multi_level_rois = paddle.concat(multi_level_rois, axis=0)
multi_level_scores = paddle.concat(multi_level_scores, axis=0)
proposal_num = paddle.shape(multi_level_scores)[0]
post_nms_top_n_tensor = paddle.assign(
np.array([self.post_nms_top_n]).astype('int32'))
k_candidate = paddle.concat([proposal_num, post_nms_top_n_tensor])
k = paddle.min(k_candidate)
scores, index = paddle.topk(multi_level_scores, k=k, axis=0)
rois = paddle.gather(multi_level_rois, index, axis=0)
return {"FpnRois": [rois]}
@op_mapper('collect_fpn_proposals')
class Collectfpnproposals:
@classmethod
def opset_1(cls, graph, node, **kw):
node = graph.make_node(
'collect_fpn_proposals',
inputs=node.input('MultiLevelRois')+ node.input('MultiLevelScores'),
outputs=node.output('FpnRois'),
post_nms_top_n = node.attr('post_nms_topN'),
domain = 'custom')
register_custom_paddle_op('collect_fpn_proposals', CollectFpnProposals)

View File

@@ -0,0 +1,296 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import paddle
from paddle.fluid import layers
from paddle2onnx.legacy.op_mapper import CustomPaddleOp, register_custom_paddle_op
from paddle2onnx import utils
from paddle2onnx.legacy.constant import dtypes
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
class DeformConv2d(CustomPaddleOp):
def check_attribute(self, node):
utils.compare_attr_between_dims(
node.attr('strides'), (0, 1), 'strides', 'equal')
utils.compare_attr_between_dims(
node.attr('paddings'), (0, 1), 'paddings', 'equal')
utils.compare_attr_between_dims(
node.input_shape('Offset', 0), (2, 3), 'Offset', 'equal')
utils.compare_attr(
node.attr('deformable_groups'), 1, 'deformable_groups', 'equal')
def __init__(self, node, **kw):
super(DeformConv2d, self).__init__(node)
self.check_attribute(node)
self.in_channel = node.input_shape('Input', 0)[1]
self.offset_channel = node.input_shape('Offset', 0)[1]
self.stride = node.attr('strides')[0]
self.padding = node.attr('paddings')
if len(self.padding) == 2:
self.padding += self.padding
self.groups = node.attr('groups')
self.dilation = node.attr('dilations')[0]
self.padded_x_h = node.input_shape('Input', 0)[2]
self.padded_x_w = node.input_shape('Input', 0)[3]
if self.padded_x_h > 0:
self.padded_x_h = self.padded_x_h + self.padding[0] + self.padding[1]
if self.padded_x_w > 0:
self.padded_x_w = self.padded_x_w + self.padding[2] + self.padding[3]
self.kernel_size = node.input_shape('Filter', 0)[2]
self.N = self.kernel_size**2
self.num_filters = node.input_shape('Filter', 0)[0]
def forward(self):
input = self.input('Input', 0)
weight = self.input('Filter', 0)
mask = self.input('Mask', 0)
offset = self.input('Offset', 0)
input = layers.pad2d(input, self.padding)
input_shape = paddle.shape(input)
if self.padded_x_h < 0 or self.padded_x_w < 0:
self.padded_x_h = input_shape[2]
self.padded_x_w = input_shape[3]
offset_x = paddle.strided_slice(
offset,
axes=[1],
starts=[0],
ends=[self.offset_channel],
strides=[2])
offset_y = paddle.strided_slice(
offset,
axes=[1],
starts=[1],
ends=[self.offset_channel],
strides=[2])
offset = paddle.concat([offset_x, offset_y], axis=1)
offset_shape = paddle.shape(offset)
offset_h = offset_shape[2]
offset_w = offset_shape[3]
coordinate = self.get_offset_coordinate(offset, 'float32', offset_shape)
coordinate = coordinate.transpose((0, 2, 3, 1))
coord_lt, coord_rb, coord_lb, coord_rt = self.get_bilinear_corner_coordinate(
coordinate, self.padded_x_h, self.padded_x_w)
# clip coordinate
coordinate = paddle.concat(
[
paddle.clip(coordinate[:, :, :, :self.N], 0,
self.padded_x_h - 1),
paddle.clip(coordinate[:, :, :, self.N:], 0,
self.padded_x_w - 1)
],
axis=-1)
cof_lt, cof_rb, cof_lb, cof_rt = self.get_bilinear_coefficient(
coord_lt, coord_rb, coord_lb, coord_rt, coordinate)
feature_lt = self.get_feature_by_coordinate(input, coord_lt, offset_h,
offset_w, self.padded_x_w)
feature_rb = self.get_feature_by_coordinate(input, coord_rb, offset_h,
offset_w, self.padded_x_w)
feature_lb = self.get_feature_by_coordinate(input, coord_lb, offset_h,
offset_w, self.padded_x_w)
feature_rt = self.get_feature_by_coordinate(input, coord_rt, offset_h,
offset_w, self.padded_x_w)
feature_after_deformation = paddle.unsqueeze(cof_lt, 1) * feature_lt + \
paddle.unsqueeze(cof_rb, 1) * feature_rb + \
paddle.unsqueeze(cof_lb, 1) * feature_lb + \
paddle.unsqueeze(cof_rt, 1) * feature_rt
# modulation
if mask is not None:
mask = paddle.transpose(mask, (0, 2, 3, 1))
mask = paddle.unsqueeze(mask, 1)
mask = paddle.tile(mask, [1, self.in_channel, 1, 1, 1])
feature_after_deformation *= mask
feature_after_deformation = self.reshape_feature(
feature_after_deformation, offset_h, offset_w)
out = paddle.nn.functional.conv2d(
feature_after_deformation,
weight,
stride=self.kernel_size,
groups=self.groups)
return {'Output': [out]}
def get_offset_coordinate(self, offset, dtype, offset_shape):
kernel_grid_origin_x = paddle.arange(
0,
self.kernel_size + (self.kernel_size - 1) * (self.dilation - 1),
step=self.dilation,
dtype=dtype)
kernel_grid_origin_x = kernel_grid_origin_x.unsqueeze(1)
kernel_grid_origin_x = paddle.tile(kernel_grid_origin_x,
[1, self.kernel_size])
kernel_grid_origin_y = paddle.arange(
0,
self.kernel_size + (self.kernel_size - 1) * (self.dilation - 1),
step=self.dilation,
dtype=dtype)
kernel_grid_origin_y = kernel_grid_origin_y.unsqueeze(0)
kernel_grid_origin_y = paddle.tile(kernel_grid_origin_y,
[self.kernel_size, 1])
kernel_grid_origin_x = paddle.reshape(kernel_grid_origin_x, [-1])
kernel_grid_origin_y = paddle.reshape(kernel_grid_origin_y, [-1])
kernel_grid_origin = paddle.concat(
[kernel_grid_origin_x, kernel_grid_origin_y], -1)
kernel_grid_origin = paddle.reshape(kernel_grid_origin,
(1, 2 * self.N, 1, 1))
kernel_offset_x = paddle.arange(
0, offset_shape[2] * self.stride, step=self.stride, dtype=dtype)
kernel_offset_x = kernel_offset_x.unsqueeze(1)
kernel_offset_x = paddle.expand(kernel_offset_x, offset_shape[2:])
kernel_offset_y = paddle.arange(
0, offset_shape[3] * self.stride, step=self.stride, dtype=dtype)
kernel_offset_y = kernel_offset_y.unsqueeze(0)
kernel_offset_y = paddle.expand(kernel_offset_y, offset_shape[2:])
kernel_offset_x = kernel_offset_x.unsqueeze([0, 1])
kernel_offset_x = paddle.tile(kernel_offset_x, (1, self.N, 1, 1))
kernel_offset_y = kernel_offset_y.unsqueeze([0, 1])
kernel_offset_y = paddle.tile(kernel_offset_y, (1, self.N, 1, 1))
kernel_offset = paddle.concat([kernel_offset_x, kernel_offset_y], 1)
offset = offset + paddle.cast(kernel_offset, 'float32') + paddle.cast(
kernel_grid_origin, 'float32')
return offset
def get_bilinear_corner_coordinate(self, coord, padded_h, padded_w):
coord_lt = coord.floor()
coord_rb = coord_lt + 1
coord_lt = paddle.cast(
paddle.concat(
[
paddle.clip(coord_lt[:, :, :, :self.N], 0, padded_h - 1),
paddle.clip(coord_lt[:, :, :, self.N:], 0, padded_w - 1)
],
axis=-1),
dtype='int64')
coord_rb = paddle.cast(
paddle.concat(
[
paddle.clip(coord_rb[:, :, :, :self.N], 0, padded_h - 1),
paddle.clip(coord_rb[:, :, :, self.N:], 0, padded_w - 1)
],
axis=-1),
dtype='int64')
coord_lb = paddle.concat(
[coord_lt[:, :, :, :self.N], coord_rb[:, :, :, self.N:]], axis=-1)
coord_rt = paddle.concat(
[coord_rb[:, :, :, :self.N], coord_lt[:, :, :, self.N:]], axis=-1)
return coord_lt, coord_rb, coord_lb, coord_rt
def get_bilinear_coefficient(self, coord_lt, coord_rb, coord_lb, coord_rt,
p):
cof_lt = (1 + (paddle.cast(
coord_lt[:, :, :, :self.N], dtype='float32') - p[:, :, :, :self.N])
) * (1 + paddle.cast(
coord_lt[:, :, :, self.N:], dtype='float32') -
p[:, :, :, self.N:])
cof_rb = (1 - (paddle.cast(
coord_rb[:, :, :, :self.N], dtype='float32') - p[:, :, :, :self.N])
) * (1 - (paddle.cast(
coord_rb[:, :, :, self.N:], dtype='float32') -
p[:, :, :, self.N:]))
cof_lb = (1 + (paddle.cast(
coord_lb[:, :, :, :self.N], dtype='float32') - p[:, :, :, :self.N])
) * (1 - (paddle.cast(
coord_lb[:, :, :, self.N:], dtype='float32') -
p[:, :, :, self.N:]))
cof_rt = (1 - (paddle.cast(
coord_rt[:, :, :, :self.N], dtype='float32') - p[:, :, :, :self.N])
) * (1 + paddle.cast(
coord_rt[:, :, :, self.N:], dtype='float32') -
p[:, :, :, self.N:])
return cof_lt, cof_rb, cof_lb, cof_rt
def get_feature_by_coordinate(self, x, coord, offset_h, offset_w,
padded_x_w):
x = paddle.reshape(x, [0, 0, -1])
index = paddle.cast(
coord[:, :, :, :self.N] * padded_x_w,
dtype='int64') + coord[:, :, :, self.N:] # offset_x*w + offset_y
index = paddle.unsqueeze(index, 1)
index = paddle.tile(index, [1, self.in_channel, 1, 1, 1])
index = paddle.reshape(index, (0, 0, -1))
x_range = list(range(3))
dim = 2
x_range[0] = dim
x_range[dim] = 0
x_swaped = paddle.transpose(x, perm=x_range)
index_range = list(range(3))
index_range[0] = dim
index_range[dim] = 0
index_swaped = paddle.transpose(index, perm=index_range)
x_shape = layers.shape(x_swaped)
index_shape = layers.shape(index_swaped)
prod = paddle.prod(x_shape[1:], keepdim=True)
x_swaped_flattend = paddle.reshape(x_swaped, [-1])
index_swaped_flattend = paddle.reshape(index_swaped, [-1])
index_swaped_flattend *= prod
bias = paddle.arange(start=0, end=prod, step=1, dtype='float32')
bias = paddle.tile(bias, index_shape[0])
index_swaped_flattend += bias
gathered = paddle.gather(x_swaped_flattend, index_swaped_flattend)
gathered = paddle.reshape(gathered, layers.shape(index_swaped))
x_offset = paddle.transpose(gathered, perm=x_range)
x_offset = paddle.reshape(
x_offset, (-1, self.in_channel, offset_h, offset_w, self.N))
return x_offset
def reshape_feature(self, x_offset, offset_h, offset_w):
x_offset = paddle.concat(
[
paddle.reshape(x_offset[:, :, :, :, s:s + self.kernel_size], (
-1, self.in_channel, offset_h, offset_w * self.kernel_size))
for s in range(0, self.N, self.kernel_size)
],
axis=-1)
x_offset = paddle.reshape(x_offset, (-1, self.in_channel,
offset_h * self.kernel_size,
offset_w * self.kernel_size))
return x_offset
@op_mapper('deformable_conv')
class Deformconv2d:
@classmethod
def opset_1(cls, graph, node, **kw):
node = graph.make_node(
'deformable_conv',
inputs=node.input('Input')+node.input('Filter')+node.input('Mask')+node.input('Offset'),
outputs=node.output('Output'),
stride = node.attr('strides'),
padding = node.attr('paddings'),
groups = node.attr('groups'),
dilation = node.attr('dilations'),
deformable_groups = node.attr('deformable_groups'),
domain = 'custom')
register_custom_paddle_op('deformable_conv', DeformConv2d)

View File

@@ -0,0 +1,100 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import paddle
from paddle.fluid import layers
from paddle2onnx.legacy.op_mapper import CustomPaddleOp, register_custom_paddle_op
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
class DistributeFpnProposals(CustomPaddleOp):
def __init__(self, node, **kw):
super(DistributeFpnProposals, self).__init__(node)
self.max_level = node.attr('max_level')
self.min_level = node.attr('min_level')
self.refer_level = node.attr('refer_level')
self.refer_scale = node.attr('refer_scale')
self.pixel_offset = node.attr('pixel_offset')
def bbox_area(self, boxes):
offset = 1 if self.pixel_offset else 0
xmin, ymin, xmax, ymax = paddle.tensor.split(
boxes, axis=1, num_or_sections=4)
width = xmax - xmin + offset
height = ymax - ymin + offset
areas = width * height
return areas
def forward(self):
fpn_rois = self.input('FpnRois', 0)
areas = self.bbox_area(fpn_rois)
scale = paddle.sqrt(areas)
num_level = self.max_level - self.min_level + 1
target_level = paddle.log(scale / self.refer_scale + 1e-06) / np.log(2)
target_level = paddle.floor(self.refer_level + target_level)
target_level = paddle.clip(
target_level, min=self.min_level, max=self.max_level)
rois = list()
rois_idx_order = list()
rois_num_per_level = list()
for level in range(self.min_level, self.max_level + 1):
level_tensor = paddle.full_like(target_level, fill_value=level)
res = paddle.equal(target_level, level_tensor)
res = paddle.squeeze(res, axis=1)
res = paddle.cast(res, dtype='int32')
index = paddle.nonzero(res)
roi = paddle.gather(fpn_rois, index, axis=0)
rois.append(roi)
rois_idx_order.append(index)
rois_num_per_level.append(paddle.shape(roi)[0])
rois_idx_order = paddle.concat(rois_idx_order, axis=0)
size = paddle.shape(rois_idx_order)[0]
_, rois_idx_restore = paddle.topk(
rois_idx_order, axis=0, sorted=True, largest=False, k=size)
rois_idx_restore = paddle.cast(rois_idx_restore, dtype='int32')
if len(self.input('RoisNum')) > 0:
# trick: to keep rois num
rois_num_per_level[0] += self.input('RoisNum', 0) * 0
return {
'MultiFpnRois': rois,
'RestoreIndex': [rois_idx_restore],
'MultiLevelRoIsNum': rois_num_per_level
}
else:
return {'MultiFpnRois': rois, 'RestoreIndex': [rois_idx_restore]}
@op_mapper('distribute_fpn_proposals')
class Distributefpnproposals:
@classmethod
def opset_1(cls, graph, node, **kw):
node = graph.make_node(
'distribute_fpn_proposals',
inputs=node.input('FpnRois'),
outputs=node.output('MultiFpnRois') + node.output('RestoreIndex'),
max_level=node.attr('max_level'),
min_level=node.attr('min_level'),
refer_level=node.attr('refer_level'),
refer_scale=node.attr('refer_scale'),
domain='custom')
register_custom_paddle_op('distribute_fpn_proposals', DistributeFpnProposals)

View File

@@ -0,0 +1,223 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import paddle
import math
from paddle.fluid import layers
from paddle2onnx.legacy.op_mapper import CustomPaddleOp, register_custom_paddle_op
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
BBOX_CLIP_DEFAULT = math.log(1000.0 / 16.0)
class GenerateProposals(CustomPaddleOp):
def __init__(self, node, **kw):
paddle.enable_static()
super(GenerateProposals, self).__init__(node)
self.eta = node.attr('eta')
self.min_size = node.attr('min_size')
self.nms_thresh = node.attr('nms_thresh')
self.post_nms_topN = node.attr('post_nms_topN')
self.pre_nms_topN = node.attr('pre_nms_topN')
self.type = node.type
if self.type == 'generate_proposals_v2':
self.pixel_offset = node.attr('pixel_offset')
else:
self.pixel_offset = True
def filter_boxes(self, boxes, im_w, im_h, im_s, min_size):
min_size = max(min_size, 1.0)
xmin, ymin, xmax, ymax = paddle.tensor.split(
boxes, axis=1, num_or_sections=4)
x_ctr = (xmax + xmin) / 2 + 0.5
y_ctr = (ymax + ymin) / 2 + 0.5
ws = (xmax - xmin) / im_s + 1
hs = (ymax - ymin) / im_s + 1
min_size = np.asarray([min_size], dtype='float32')
min_size = paddle.assign(min_size)
valid_flag_ws = paddle.greater_equal(ws, min_size)
valid_flag_hs = paddle.greater_equal(hs, min_size)
valid_flag_x = paddle.less_equal(x_ctr, im_w)
valid_flag_y = paddle.less_equal(y_ctr, im_h)
valid_flag = paddle.logical_and(valid_flag_ws, valid_flag_hs)
valid_flag = paddle.logical_and(valid_flag, valid_flag_x)
valid_flag = paddle.logical_and(valid_flag, valid_flag_y)
valid_flag = paddle.squeeze(valid_flag, axis=1)
valid_inds = paddle.nonzero(valid_flag)
return valid_inds
def filter_boxes_v2(self, boxes, im_w, im_h, min_size, pixel_offset=True):
min_size = max(min_size, 1.0)
xmin, ymin, xmax, ymax = paddle.tensor.split(
boxes, axis=1, num_or_sections=4)
offset = 1 if pixel_offset else 0
ws = (xmax - xmin) + offset
hs = (ymax - ymin) + offset
min_size = np.asarray([min_size], dtype='float32')
min_size = paddle.assign(min_size)
valid_flag_ws = paddle.greater_equal(ws, min_size)
valid_flag_hs = paddle.greater_equal(hs, min_size)
valid_flag = paddle.logical_and(valid_flag_ws, valid_flag_hs)
if pixel_offset:
x_ctr = xmin + ws / 2
y_ctr = ymin + hs / 2
valid_flag_x = paddle.less_equal(x_ctr, im_w)
valid_flag_y = paddle.less_equal(y_ctr, im_h)
valid_flag = paddle.logical_and(valid_flag, valid_flag_x)
valid_flag = paddle.logical_and(valid_flag, valid_flag_y)
valid_flag = paddle.squeeze(valid_flag, axis=1)
valid_inds = paddle.nonzero(valid_flag)
return valid_inds
def clip_tiled_boxes(self, im_w, im_h, input_boxes, pixel_offset=True):
offset = 1 if pixel_offset else 0
xmin, ymin, xmax, ymax = paddle.tensor.split(
input_boxes, axis=1, num_or_sections=4)
xmin = paddle.clip(xmin, max=im_w - offset, min=0)
ymin = paddle.clip(ymin, max=im_h - offset, min=0)
xmax = paddle.clip(xmax, max=im_w - offset, min=0)
ymax = paddle.clip(ymax, max=im_h - offset, min=0)
input_boxes = paddle.concat([xmin, ymin, xmax, ymax], axis=1)
return input_boxes
def box_encode(self, anchors, bbox_deltas, variances, pixel_offset=True):
offset = 1 if pixel_offset else 0
anchor_xmin, anchor_ymin, anchor_xmax, anchor_ymax = paddle.tensor.split(
anchors, axis=1, num_or_sections=4)
anchor_width = anchor_xmax - anchor_xmin + offset
anchor_height = anchor_ymax - anchor_ymin + offset
anchor_center_x = anchor_xmin + 0.5 * anchor_width
anchor_center_y = anchor_ymin + 0.5 * anchor_height
var_center_x, var_center_y, var_width, var_height = paddle.tensor.split(
variances, axis=1, num_or_sections=4)
delta_center_x, delta_center_y, delta_width, delta_height = paddle.tensor.split(
bbox_deltas, axis=1, num_or_sections=4)
bbox_center_x = var_center_x * delta_center_x * anchor_width + anchor_center_x
bbox_center_y = var_center_y * delta_center_y * anchor_height + anchor_center_y
bbox_width = paddle.exp(
paddle.clip(
var_width * delta_width, max=BBOX_CLIP_DEFAULT)) * anchor_width
bbox_height = paddle.exp(
paddle.clip(
var_height * delta_height,
max=BBOX_CLIP_DEFAULT)) * anchor_height
proposal_xmin = bbox_center_x - bbox_width / 2
proposal_ymin = bbox_center_y - bbox_height / 2
proposal_xmax = bbox_center_x + bbox_width / 2 - offset
proposal_ymax = bbox_center_y + bbox_height / 2 - offset
proposal = paddle.concat(
[proposal_xmin, proposal_ymin, proposal_xmax, proposal_ymax],
axis=1)
return proposal
def proposal_for_single_sample(self, anchors, bbox_deltas, im_info, scores,
variances):
proposal_num = paddle.shape(scores)[0]
pre_nms_top_n_tensor = paddle.assign(
np.asarray(
[self.pre_nms_topN], dtype='int32'))
k_candidate = paddle.concat([proposal_num, pre_nms_top_n_tensor])
k = paddle.min(k_candidate)
scores, index = paddle.topk(scores, k=k, axis=0)
bbox_deltas = paddle.gather(bbox_deltas, index, axis=0)
anchors = paddle.gather(anchors, index, axis=0)
variances = paddle.gather(variances, index, axis=0)
proposal = self.box_encode(anchors, bbox_deltas, variances,
self.pixel_offset)
if self.type == "generate_proposals_v2":
im_h, im_w = paddle.tensor.split(im_info, axis=1, num_or_sections=2)
else:
im_h, im_w, im_s = paddle.tensor.split(
im_info, axis=1, num_or_sections=3)
proposal = self.clip_tiled_boxes(im_w, im_h, proposal,
self.pixel_offset)
if self.type == "generate_proposals_v2":
keep = self.filter_boxes_v2(proposal, im_w, im_h, self.min_size,
self.pixel_offset)
else:
keep = self.filter_boxes(proposal, im_w, im_h, im_s, self.min_size)
tail_proposal = paddle.zeros(shape=[1, 4], dtype=proposal.dtype)
proposal_num = paddle.shape(proposal)[0]
tail_keep = paddle.reshape(proposal_num, shape=[1, 1])
tail_keep = paddle.cast(tail_keep, dtype=keep.dtype)
tail_scores = paddle.zeros(shape=[1, 1], dtype=scores.dtype)
# proposal = paddle.concat([proposal, tail_proposal])
# keep = paddle.concat([keep, tail_keep])
# scores = paddle.concat([scores, tail_scores])
bbox_sel = paddle.gather(proposal, keep, axis=0)
scores_sel = paddle.gather(scores, keep, axis=0)
proposal = paddle.unsqueeze(bbox_sel, axis=0)
scores = paddle.transpose(scores_sel, perm=[1, 0])
scores = paddle.unsqueeze(scores, axis=0)
out = layers.multiclass_nms(
proposal,
scores,
background_label=-1,
nms_top_k=self.pre_nms_topN,
score_threshold=-10000.,
keep_top_k=self.post_nms_topN,
nms_threshold=self.nms_thresh,
normalized=False if self.pixel_offset else True,
nms_eta=self.eta)
label, scores, proposal = paddle.tensor.split(
out, axis=1, num_or_sections=[1, 1, 4])
return scores, proposal
def forward(self):
anchors = self.input('Anchors', 0)
bboxdeltas = self.input('BboxDeltas', 0)
if self.type == 'generate_proposals_v2':
iminfo = self.input('ImShape', 0)
else:
iminfo = self.input('ImInfo', 0)
scores = self.input('Scores', 0)
variances = self.input('Variances', 0)
bboxdeltas = paddle.transpose(bboxdeltas, perm=[0, 2, 3, 1])
bboxdeltas = paddle.reshape(bboxdeltas, [-1, 4])
scores = paddle.transpose(scores, perm=[0, 2, 3, 1])
scores = paddle.reshape(scores, [-1, 1])
anchors = paddle.reshape(anchors, [-1, 4])
variances = paddle.reshape(variances, [-1, 4])
new_scores, proposals = self.proposal_for_single_sample(
anchors, bboxdeltas, iminfo, scores, variances)
if len(self.node.outputs) == 3:
rois_num = paddle.shape(new_scores)[0]
return {
'RpnRoiProbs': [new_scores],
'RpnRois': [proposals],
'RpnRoisNum': [rois_num]
}
else:
return {'RpnRoiProbs': [new_scores], 'RpnRois': [proposals]}
register_custom_paddle_op('generate_proposals', GenerateProposals)
register_custom_paddle_op('generate_proposals_v2', GenerateProposals)

View File

@@ -0,0 +1,151 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import paddle
from paddle.fluid import layers
from paddle2onnx.legacy.op_mapper import CustomPaddleOp, register_custom_paddle_op
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
class GridSampler(CustomPaddleOp):
def __init__(self, node, **kw):
super(GridSampler, self).__init__(node)
self.padding_mode = node.attr('padding_mode')
self.mode = node.attr('mode')
self.align_corners = node.attr('align_corners')
def paddle_bilinear_grid_sample(self, im, grid, align_corners=False):
# this code reference: https://mmcv.readthedocs.io/en/latest/_modules/mmcv/ops/point_sample.html
im_shape = paddle.shape(im)
n, c, h, w = paddle.split(im_shape, num_or_sections=4)
grid_shape = paddle.shape(grid)
gn, gh, gw, _ = paddle.split(grid_shape, num_or_sections=4)
# n, c, h, w = im.shape
# gn, gh, gw, _ = grid.shape
# assert n == gn
x = grid[:, :, :, 0]
y = grid[:, :, :, 1]
if align_corners:
x = ((x + 1) / 2) * (w - 1)
y = ((y + 1) / 2) * (h - 1)
else:
x = ((x + 1) * w - 1) / 2
y = ((y + 1) * h - 1) / 2
x = paddle.reshape(x, [n, -1])
y = paddle.reshape(y, [n, -1])
x0 = paddle.floor(x).astype('int64')
y0 = paddle.floor(y).astype('int64')
x1 = x0 + 1
y1 = y0 + 1
x1_cast = x1.astype(grid.dtype)
x0_cast = x0.astype(grid.dtype)
y1_cast = y1.astype(grid.dtype)
y0_cast = y0.astype(grid.dtype)
wa = paddle.unsqueeze(((x1_cast - x) * (y1_cast - y)), 1)
wb = paddle.unsqueeze(((x1_cast - x) * (y - y0_cast)), 1)
wc = paddle.unsqueeze(((x - x0_cast) * (y1_cast - y)), 1)
wd = paddle.unsqueeze(((x - x0_cast) * (y - y0_cast)), 1)
# Apply default for grid_sample function zero padding
im_padded = paddle.nn.functional.pad(im,
pad=[1, 1, 1, 1],
mode='constant',
value=0)
if im_padded.dtype != im.dtype:
im_padded = paddle.cast(im_padded, im.dtype)
padded_h = h + 2
padded_w = w + 2
# save points positions after padding
x0, x1, y0, y1 = x0 + 1, x1 + 1, y0 + 1, y1 + 1
# Clip coordinates to padded image size
tensor_zero = paddle.full(shape=[1], dtype='int64', fill_value=0.0)
tensor_padded_w = paddle.full(
shape=[1], dtype='int64', fill_value=padded_w - 1)
tensor_padded_h = paddle.full(
shape=[1], dtype='int64', fill_value=padded_h - 1)
x0 = paddle.where(x0 < 0, tensor_zero, x0)
x0 = paddle.where(x0 > padded_w - 1, tensor_padded_w, x0)
x1 = paddle.where(x1 < 0, tensor_zero, x1)
x1 = paddle.where(x1 > padded_w - 1, tensor_padded_w, x1)
y0 = paddle.where(y0 < 0, tensor_zero, y0)
y0 = paddle.where(y0 > padded_h - 1, tensor_padded_h, y0)
y1 = paddle.where(y1 < 0, tensor_zero, y1)
y1 = paddle.where(y1 > padded_h - 1, tensor_padded_h, y1)
im_padded = paddle.reshape(im_padded, [n, c, -1])
x0_y0 = paddle.expand(
paddle.unsqueeze((x0 + y0 * padded_w), 1), [-1, c, -1])
x0_y1 = paddle.expand(
paddle.unsqueeze((x0 + y1 * padded_w), 1), [-1, c, -1])
x1_y0 = paddle.expand(
paddle.unsqueeze((x1 + y0 * padded_w), 1), [-1, c, -1])
x1_y1 = paddle.expand(
paddle.unsqueeze((x1 + y1 * padded_w), 1), [-1, c, -1])
Ia = self.paddle_gather(im_padded, 2, x0_y0)
Ib = self.paddle_gather(im_padded, 2, x0_y1)
Ic = self.paddle_gather(im_padded, 2, x1_y0)
Id = self.paddle_gather(im_padded, 2, x1_y1)
return paddle.reshape((Ia * wa + Ib * wb + Ic * wc + Id * wd),
[n, c, gh, gw])
def paddle_gather(self, x, dim, index):
# index_shape = index.shape
index_shape = paddle.shape(index)
x_shape = paddle.shape(x)
index_flatten = index.flatten()
if dim < 0:
dim = len(x.shape) + dim
nd_index = []
for k in range(len(x.shape)):
if k == dim:
nd_index.append(index_flatten)
else:
reshape_shape = [1] * len(x.shape)
x_shape_k = x_shape[k]
# x_shape_k = x.shape[k]
reshape_shape[k] = x_shape_k
x_arange = paddle.arange(x_shape_k, dtype=index.dtype)
x_arange = x_arange.reshape(reshape_shape)
dim_index = paddle.expand(x_arange, index_shape).flatten()
nd_index.append(dim_index)
ind2 = paddle.transpose(paddle.stack(nd_index), [1, 0]).astype("int64")
paddle_out = paddle.gather_nd(x, ind2).reshape(index_shape)
return paddle_out
def forward(self):
input = self.input('X', 0)
grid = self.input('Grid', 0)
if self.mode != 'bilinear' or self.padding_mode != 'zeros':
raise Exception(
"grid_sample only is supported with mode should be 'bilinear' and padding_mode should be 'zeros'"
)
res = self.paddle_bilinear_grid_sample(
input, grid, align_corners=self.align_corners)
return {'Output': [res]}
register_custom_paddle_op('grid_sampler', GridSampler)

View File

@@ -0,0 +1,13 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,363 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
from paddle2onnx.legacy.constant import dtypes
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
@op_mapper('box_coder')
class BoxCoder():
"""
we use the decode the prior box to target box,
we just use the decode mode to transform this op.
"""
support_opset_verison_range = (7, 12)
@classmethod
def opset_7(cls, graph, node, **kw):
input_names = node.input_names
t_size = node.input_shape('TargetBox', 0)
p_size = node.input_shape('PriorBox', 0)
# get the outout_name
result_name = node.output('OutputBox', 0)
# n is size of batch, m is boxes num of targe_boxes
n = t_size[0]
m = t_size[0]
axis = int(node.attr('axis'))
#norm
norm = bool(node.attr('box_normalized'))
name_slice_x1 = node.output('OutputBox')[0] + "@x1"
name_slice_y1 = node.output('OutputBox')[0] + "@y1"
name_slice_x2 = node.output('OutputBox')[0] + "@x2"
name_slice_y2 = node.output('OutputBox')[0] + "@y2"
#make onnx tensor to save the intermeidate reslut
name_slice_indices = [
[node.output('OutputBox')[0] + "@slice_" + str(i)]
for i in range(1, 3)
]
node_slice_indices = [None for i in range(1, 3)]
# create the range(0, 4) const data to slice
for i in range(1, 3):
tmp_node = graph.make_node(
'Constant',
inputs=[],
outputs=name_slice_indices[i - 1],
dtype=dtypes.ONNX.FLOAT,
dims=(),
value=[i])
# make node split data
name_box_split = [
name_slice_x1, name_slice_y1, name_slice_x2, name_slice_y2
]
split_shape = list(p_size)
split_shape[-1] = 1
node_split_prior_node = graph.make_node(
'Split',
inputs=node.input('PriorBox'),
outputs=name_box_split,
axis=1)
# make node get centor node for decode
final_outputs_vars = []
if not norm:
name_centor_w_tmp = [node.output('OutputBox')[0] + "@centor_w_tmp"]
name_centor_h_tmp = [node.output('OutputBox')[0] + "@centor_h_tmp"]
node_centor_w_tmp = None
node_centor_h_tmp = None
name_centor_tmp_list = [name_centor_w_tmp, name_centor_h_tmp]
node_centor_tmp_list = [node_centor_w_tmp, node_centor_h_tmp]
count = 2
for (name, op_node) in zip(name_centor_tmp_list,
node_centor_tmp_list):
tmp_node = graph.make_node('Add',
inputs=[node.output('OutputBox')[0] + "@slice_" + str(1)]\
+ [name_box_split[count]],
outputs=name)
count = count + 1
if not norm:
inputs_sub = [[name_centor_w_tmp[0], name_box_split[0]],
[name_centor_h_tmp[0], name_box_split[1]]]
else:
inputs_sub = [[name_box_split[2], name_box_split[0]],
[name_box_split[3], name_box_split[1]]]
outputs_sub = [result_name + "@pb_w", result_name + "@pb_h"]
for i in range(0, 2):
tmp_node = graph.make_node(
'Sub', inputs=inputs_sub[i], outputs=[outputs_sub[i]])
# according to prior_box height and weight to get centor x, y
name_half_value = [result_name + "@half_value"]
node_half_value = graph.make_node(
'Constant',
inputs=[],
outputs=name_half_value,
dtype=dtypes.ONNX.FLOAT,
dims=(),
value=[0.5])
outputs_half_wh = [[result_name + "@pb_w_half"],
[result_name + "@pb_h_half"]]
inputs_half_wh = [[result_name + "@pb_w", name_half_value[0]],
[result_name + "@pb_h", name_half_value[0]]]
for i in range(0, 2):
tmp_node = graph.make_node(
'Mul', inputs=inputs_half_wh[i], outputs=outputs_half_wh[i])
inputs_centor_xy = [[outputs_half_wh[0][0], name_slice_x1],
[outputs_half_wh[1][0], name_slice_y1]]
outputs_centor_xy = [[result_name + "@pb_x"], [result_name + "@pb_y"]]
# final calc the centor x ,y
for i in range(0, 2):
tmp_node = graph.make_node(
'Add', inputs=inputs_centor_xy[i], outputs=outputs_centor_xy[i])
# reshape the data
shape = (1, split_shape[0]) if axis == 0 else (split_shape[0], 1)
# need to reshape the data
inputs_transpose_pb = [
[result_name + "@pb_w"],
[result_name + "@pb_h"],
[result_name + "@pb_x"],
[result_name + "@pb_y"],
]
outputs_transpose_pb = [
[result_name + "@pb_w_transpose"],
[result_name + "@pb_h_transpose"],
[result_name + "@pb_x_transpose"],
[result_name + "@pb_y_transpose"],
]
if axis == 0:
name_reshape_pb = [result_name + "@pb_transpose"]
# reshape the data
for i in range(0, 4):
tmp_node = graph.make_node(
'Transpose',
inputs=inputs_transpose_pb[i],
outputs=outputs_transpose_pb[i])
# decoder the box according to the target_box and variacne
name_variance_raw = [result_name + "@variance_raw"]
name_variance_unsqueeze = [result_name + "@variance_unsqueeze"]
shape = []
# make node to extend the data
var_split_axis = 0
var_split_inputs_name = []
if 'PriorBoxVar' in input_names and len(node.input('PriorBoxVar')) > 0:
if axis == 1:
raise Exception(
"The op box_coder has variable do not support aixs broadcast"
)
axes = []
var_split_inputs_name = [result_name + "@variance_split"]
tmp_node = graph.make_node(
'Transpose',
inputs=node.input('PriorBoxVar'),
outputs=var_split_inputs_name)
var_split_axis = 0
else:
variances = [1.0, 1.0, 1.0, 1.0]
if 'variance' in node.attrs and len(node.attr('variance')) > 0:
variances = [float(var) for var in node.attr('variance')]
node_variance_create = graph.make_node(
'Constant',
inputs=[],
outputs=name_variance_raw,
dtype=dtypes.ONNX.FLOAT,
dims=[len(variances)],
value=variances)
var_split_axis = 0
var_split_inputs_name = name_variance_raw
# decode the result
outputs_split_variance = [
result_name + "@variance_split" + str(i) for i in range(0, 4)
]
outputs_split_targebox = [
result_name + "@targebox_split" + str(i) for i in range(0, 4)
]
node_split_var = graph.make_node(
'Split',
inputs=var_split_inputs_name,
outputs=outputs_split_variance,
axis=var_split_axis)
node_split_target = graph.make_node(
'Split',
inputs=node.input('TargetBox'),
outputs=outputs_split_targebox,
axis=2)
outputs_squeeze_targebox = [
result_name + "@targebox_squeeze" + str(i) for i in range(0, 4)
]
for (input_name, output_name) in zip(outputs_split_targebox,
outputs_squeeze_targebox):
tmp_node = mapper_helper.squeeze_helper(graph, input_name, [2],
[output_name])
output_shape_step1 = list(t_size)[:-1]
inputs_tb_step1 = [
[outputs_squeeze_targebox[0], outputs_split_variance[0]],
[outputs_squeeze_targebox[1], outputs_split_variance[1]],
[outputs_squeeze_targebox[2], outputs_split_variance[2]],
[outputs_squeeze_targebox[3], outputs_split_variance[3]]
]
outputs_tb_step1 = [[result_name + "@decode_x_step1"],
[result_name + "@decode_y_step1"],
[result_name + "@decode_w_step1"],
[result_name + "@decode_h_step1"]]
for input_step1, output_step_1 in zip(inputs_tb_step1,
outputs_tb_step1):
tmp_node = graph.make_node(
'Mul', inputs=input_step1, outputs=output_step_1)
if axis == 0:
inputs_tbxy_step2 = [[
outputs_tb_step1[0][0], outputs_transpose_pb[0][0]
], [outputs_tb_step1[1][0], outputs_transpose_pb[1][0]]]
else:
inputs_tbxy_step2 = [[
outputs_tb_step1[0][0], inputs_transpose_pb[0][0]
], [outputs_tb_step1[1][0], inputs_transpose_pb[1][0]]]
outputs_tbxy_step2 = [[result_name + "@decode_x_step2"],
[result_name + "@decode_y_step2"]]
for input_step2, output_step_2 in zip(inputs_tbxy_step2,
outputs_tbxy_step2):
tmp_node = graph.make_node(
'Mul', inputs=input_step2, outputs=output_step_2)
if axis == 0:
inputs_tbxy_step3 = [[
outputs_tbxy_step2[0][0], outputs_transpose_pb[2][0]
], [outputs_tbxy_step2[1][0], outputs_transpose_pb[3][0]]]
else:
inputs_tbxy_step3 = [[
outputs_tbxy_step2[0][0], inputs_transpose_pb[2][0]
], [outputs_tbxy_step2[1][0], inputs_transpose_pb[3][0]]]
outputs_tbxy_step3 = [[result_name + "@decode_x_step3"],
[result_name + "@decode_y_step3"]]
for input_step3, output_step_3 in zip(inputs_tbxy_step3,
outputs_tbxy_step3):
tmp_node = graph.make_node(
'Add', inputs=input_step3, outputs=output_step_3)
# deal with width & height
inputs_tbwh_step2 = [outputs_tb_step1[2], outputs_tb_step1[3]]
outputs_tbwh_step2 = [[result_name + "@decode_w_step2"],
[result_name + "@decode_h_step2"]]
for input_name, output_name in zip(inputs_tbwh_step2,
outputs_tbwh_step2):
tmp_node = graph.make_node(
'Exp', inputs=input_name, outputs=output_name)
if axis == 0:
inputs_tbwh_step3 = [[
outputs_tbwh_step2[0][0], outputs_transpose_pb[0][0]
], [outputs_tbwh_step2[1][0], outputs_transpose_pb[1][0]]]
else:
inputs_tbwh_step3 = [[
outputs_tbwh_step2[0][0], inputs_transpose_pb[0][0]
], [outputs_tbwh_step2[1][0], inputs_transpose_pb[1][0]]]
outputs_tbwh_step3 = [[result_name + "@decode_w_step3"],
[result_name + "@decode_h_step3"]]
for input_name, output_name in zip(inputs_tbwh_step3,
outputs_tbwh_step3):
tmp_node = graph.make_node(
'Mul', inputs=input_name, outputs=output_name)
# final step to calc the result, and concat the result to output
# return the output box, [(x1, y1), (x2, y2)]
inputs_half_tbwh_step4 = [[
outputs_tbwh_step3[0][0], result_name + "@slice_2"
], [outputs_tbwh_step3[1][0], result_name + "@slice_2"]]
outputs_half_tbwh_step4 = [[result_name + "@decode_half_w_step4"],
[result_name + "@decode_half_h_step4"]]
for inputs_name, outputs_name in zip(inputs_half_tbwh_step4,
outputs_half_tbwh_step4):
tmp_node = graph.make_node(
'Div', inputs=inputs_name, outputs=outputs_name)
inputs_output_point1 = [[
outputs_tbxy_step3[0][0], outputs_half_tbwh_step4[0][0]
], [outputs_tbxy_step3[1][0], outputs_half_tbwh_step4[1][0]]]
outputs_output_point1 = [[result_name + "@ouput_x1"],
[result_name + "@output_y1"]]
for input_name, output_name in zip(inputs_output_point1,
outputs_output_point1):
tmp_node = graph.make_node(
'Sub', inputs=input_name, outputs=output_name)
inputs_output_point2 = [[
outputs_tbxy_step3[0][0], outputs_half_tbwh_step4[0][0]
], [outputs_tbxy_step3[1][0], outputs_half_tbwh_step4[1][0]]]
outputs_output_point2 = [[result_name + "@ouput_x2"],
[result_name + "@output_y2"]]
for input_name, output_name in zip(inputs_output_point2,
outputs_output_point2):
tmp_node = graph.make_node(
'Add', inputs=input_name, outputs=output_name)
if not norm:
inputs_unnorm_point2 = [[
outputs_output_point2[0][0], result_name + "@slice_1"
], [outputs_output_point2[1][0], result_name + "@slice_1"]]
outputs_unnorm_point2 = [[result_name + "@ouput_unnorm_x2"],
[result_name + "@ouput_unnorm_y2"]]
for input_name, output_name in zip(inputs_unnorm_point2,
outputs_unnorm_point2):
tmp_node = graph.make_node(
'Sub', inputs=input_name, outputs=output_name)
outputs_output_point2 = outputs_unnorm_point2
outputs_output_point1.extend(outputs_output_point2)
ouputs_points_unsqueeze = [[result_name + "@points_unsqueeze_x1"],
[result_name + "points_unsqueeze_y1"],
[result_name + "points_unsqueeze_x2"],
[result_name + "points_unsqueeze_y2"]]
for input_name, output_name in zip(outputs_output_point1,
ouputs_points_unsqueeze):
tmp_node = mapper_helper.unsqueeze_helper(
graph, input_name, [len(output_shape_step1)], output_name)
outputs_points_unsqueeze_list = [
output[0] for output in ouputs_points_unsqueeze
]
node_point_final = graph.make_node(
'Concat',
inputs=outputs_points_unsqueeze_list,
outputs=node.output('OutputBox'),
axis=len(output_shape_step1))

View File

@@ -0,0 +1,125 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import math
import numpy as np
from paddle2onnx.legacy.constant import dtypes
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.utils import require_fixed_shape
@op_mapper('density_prior_box')
class DensityPriorBox():
"""
In this function, use the attribute to get the prior box, because we do not use
the image data and feature map, wo could the python code to create the varaible,
and to create the onnx tensor as output.
"""
support_opset_verison_range = (1, 12)
@classmethod
def opset_9(cls, graph, node, **kw):
clip = bool(node.attr('clip'))
densities = node.attr('densities')
fixed_ratios = node.attr('fixed_ratios')
fixed_sizes = node.attr('fixed_sizes')
flatten_to_2d = bool(node.attr('flatten_to_2d'))
offset = node.attr('offset')
step_h = node.attr('step_h')
step_w = node.attr('step_w')
variances = node.attr('variances')
input_shape = node.input_shape('Input', 0)
image_shape = node.input_shape('Image', 0)
img_width = image_shape[3]
img_height = image_shape[2]
feature_width = input_shape[3]
feature_height = input_shape[2]
assert img_width > 0 and img_height > 0, require_fixed_shape(
cls.__name__)
if step_w == 0.0 or step_h == 0.0:
step_w = float(img_width / feature_width)
step_h = float(img_height / feature_height)
num_priors = 0
if len(fixed_sizes) > 0 and len(densities) > 0:
for density in densities:
if len(fixed_ratios) > 0:
num_priors += len(fixed_ratios) * (pow(density, 2))
out_dim = (feature_height, feature_width, num_priors, 4)
out_boxes = np.zeros(out_dim).astype('float32')
out_var = np.zeros(out_dim).astype('float32')
step_average = int((step_w + step_h) * 0.5)
for h in range(feature_height):
for w in range(feature_width):
c_x = (w + offset) * step_w
c_y = (h + offset) * step_h
idx = 0
for density, fixed_size in zip(densities, fixed_sizes):
if (len(fixed_ratios) > 0):
for ar in fixed_ratios:
shift = int(step_average / density)
box_width_ratio = fixed_size * math.sqrt(ar)
box_height_ratio = fixed_size / math.sqrt(ar)
for di in range(density):
for dj in range(density):
c_x_temp = c_x - step_average / 2.0 + shift / 2.0 + dj * shift
c_y_temp = c_y - step_average / 2.0 + shift / 2.0 + di * shift
out_boxes[h, w, idx, :] = [
max((c_x_temp - box_width_ratio / 2.0) /
img_width, 0),
max((c_y_temp - box_height_ratio / 2.0)
/ img_height, 0),
min((c_x_temp + box_width_ratio / 2.0) /
img_width, 1),
min((c_y_temp + box_height_ratio / 2.0)
/ img_height, 1)
]
idx += 1
if clip:
out_boxes = np.clip(out_boxes, 0.0, 1.0)
# set the variance.
out_var = np.tile(variances,
(feature_height, feature_width, num_priors, 1))
if flatten_to_2d:
out_boxes = out_boxes.reshape((-1, 4))
out_var = out_var.reshape((-1, 4))
#make node that
node_boxes = graph.make_node(
'Constant',
inputs=[],
outputs=node.output('Boxes'),
dtype=dtypes.ONNX.FLOAT,
dims=out_boxes.shape,
value=out_boxes.flatten().tolist())
node_vars = graph.make_node(
'Constant',
inputs=[],
outputs=node.output('Variances'),
dtype=dtypes.ONNX.FLOAT,
dims=out_var.shape,
value=out_var.flatten().tolist())

View File

@@ -0,0 +1,343 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle2onnx.utils import logging
from paddle2onnx.legacy.constant import dtypes
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
@op_mapper(
['multiclass_nms', 'multiclass_nms2', 'matrix_nms', 'multiclass_nms3'])
class MultiClassNMS():
support_opset_verision_range = (10, 16)
"""
Convert the paddle multiclass_nms to onnx op.
This op is get the select boxes from origin boxes.
"""
@classmethod
def opset_10(cls, graph, node, **kw):
if node.input_shape("BBoxes", 0)[0] != 1:
logging.warning(
"Due to the operator:{}, the converted ONNX model will only supports input[batch_size] == 1.".
format(node.type))
scores = node.input('Scores', 0)
bboxes = node.input('BBoxes', 0)
num_class = node.input_shape('Scores', 0)[1]
if len(node.input_shape('Scores', 0)) == 2:
# inputs: scores & bboxes is lod tensor
scores = graph.make_node('Transpose', inputs=[scores], perm=[1, 0])
scores = mapper_helper.unsqueeze_helper(graph, scores, [0])
if graph.opset_version < 13:
scores_list = graph.make_node(
'Split',
inputs=scores,
outputs=num_class,
axis=1,
split=[1] * num_class)
else:
split_const = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[1] * num_class)
scores_list = graph.make_node(
"Split",
inputs=[scores] + [split_const],
outputs=num_class,
axis=1)
bboxes = graph.make_node('Transpose', inputs=bboxes, perm=[1, 0, 2])
if graph.opset_version < 13:
bboxes_list = graph.make_node(
'Split',
inputs=bboxes,
outputs=num_class,
axis=0,
split=[1] * num_class)
else:
split_const = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[1] * num_class)
bboxes_list = graph.make_node(
"Split",
inputs=[bboxes] + [split_const],
outputs=num_class,
axis=0)
bbox_ids = []
if not isinstance(scores_list, list):
scores_list = [scores_list]
if not isinstance(bboxes_list, list):
bboxes_list = [bboxes_list]
for i in range(num_class):
bbox_id = cls.nms(graph,
node,
scores_list[i],
bboxes_list[i],
class_id=i)
bbox_ids.append(bbox_id)
bbox_ids = graph.make_node('Concat', inputs=bbox_ids, axis=0)
const_shape = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[1, -1, 4])
bboxes = graph.make_node('Reshape', inputs=[bboxes, const_shape])
cls.keep_top_k(
graph, node, bbox_ids, scores, bboxes, is_lod_input=True)
else:
bbox_ids = cls.nms(graph, node, scores, bboxes)
cls.keep_top_k(graph, node, bbox_ids, scores, bboxes)
@classmethod
def nms(cls, graph, node, scores, bboxes, class_id=None):
normalized = node.attr('normalized')
nms_top_k = node.attr('nms_top_k')
if node.type == 'matrix_nms':
iou_threshold = 0.5
logging.warning(
"Operator:{} is not supported completely, so we use traditional"
" NMS (nms_theshold={}) to instead it, which introduce some difference.".
format(node.type, str(iou_threshold)))
else:
iou_threshold = node.attr('nms_threshold')
if nms_top_k == -1:
nms_top_k = 100000
#convert the paddle attribute to onnx tensor
score_threshold = graph.make_node(
'Constant',
dtype=dtypes.ONNX.FLOAT,
value=[float(node.attr('score_threshold'))])
iou_threshold = graph.make_node(
'Constant', dtype=dtypes.ONNX.FLOAT, value=[float(iou_threshold)])
nms_top_k = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[np.int64(nms_top_k)])
# the paddle data format is x1,y1,x2,y2
kwargs = {'center_point_box': 0}
if normalized:
select_bbox_indices = graph.make_node(
'NonMaxSuppression',
inputs=[
bboxes, scores, nms_top_k, iou_threshold, score_threshold
])
elif not normalized:
value_one = graph.make_node(
'Constant', dims=[1], dtype=dtypes.ONNX.FLOAT, value=1.0)
if graph.opset_version < 13:
new_bboxes = graph.make_node(
'Split',
inputs=[bboxes],
outputs=4,
axis=2,
split=[1, 1, 1, 1])
else:
split_const = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[1, 1, 1, 1])
new_bboxes = graph.make_node(
"Split", inputs=[bboxes] + [split_const], outputs=4, axis=2)
new_xmax = graph.make_node('Add', inputs=[new_bboxes[2], value_one])
new_ymax = graph.make_node('Add', inputs=[new_bboxes[3], value_one])
new_bboxes = graph.make_node(
'Concat',
inputs=[new_bboxes[0], new_bboxes[1], new_xmax, new_ymax],
axis=2)
select_bbox_indices = graph.make_node(
'NonMaxSuppression',
inputs=[
new_bboxes, scores, nms_top_k, iou_threshold,
score_threshold
])
if class_id is not None and class_id != 0:
class_id = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[0, class_id, 0])
class_id = mapper_helper.unsqueeze_helper(graph, class_id, [0])
select_bbox_indices = graph.make_node(
'Add', inputs=[select_bbox_indices, class_id])
return select_bbox_indices
@classmethod
def keep_top_k(cls,
graph,
node,
select_bbox_indices,
scores,
bboxes,
is_lod_input=False):
# step 1 nodes select the nms class
# create some const value to use
background = node.attr('background_label')
const_values = []
for value in [0, 1, 2, -1]:
const_value = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[value])
const_values.append(const_value)
# In this code block, we will deocde the raw score data, reshape N * C * M to 1 * N*C*M
# and the same time, decode the select indices to 1 * D, gather the select_indices
class_id = graph.make_node(
'Gather', inputs=[select_bbox_indices, const_values[1]], axis=1)
squeezed_class_id = mapper_helper.squeeze_helper(graph, class_id, [1])
bbox_id = graph.make_node(
'Gather', inputs=[select_bbox_indices, const_values[2]], axis=1)
if background == 0:
nonzero = graph.make_node('NonZero', inputs=[squeezed_class_id])
else:
filter_cls_id = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT32, value=[background])
cast = graph.make_node(
'Cast', inputs=[squeezed_class_id], to=dtypes.ONNX.INT32)
filter_index = graph.make_node('Sub', inputs=[cast, filter_cls_id])
nonzero = graph.make_node('NonZero', inputs=[filter_index])
class_id = graph.make_node('Gather', inputs=[class_id, nonzero], axis=0)
class_id = graph.make_node(
'Cast', inputs=[class_id], to=dtypes.ONNX.INT64)
bbox_id = graph.make_node('Gather', inputs=[bbox_id, nonzero], axis=0)
bbox_id = graph.make_node(
'Cast', inputs=[bbox_id], to=dtypes.ONNX.INT64)
# get the shape of scores
shape_scores = graph.make_node('Shape', inputs=scores)
# gather the index: 2 shape of scores
class_num = graph.make_node(
'Gather', inputs=[shape_scores, const_values[2]], axis=0)
# reshape scores N * C * M to (N*C*M) * 1
scores = graph.make_node('Reshape', inputs=[scores, const_values[-1]])
# mul class * M
mul_classnum_boxnum = graph.make_node(
'Mul', inputs=[class_id, class_num])
# add class * M * index
add_class_indices = graph.make_node(
'Add', inputs=[mul_classnum_boxnum, bbox_id])
# Squeeze the indices to 1 dim
score_indices = mapper_helper.squeeze_helper(graph, add_class_indices,
[0, 2])
# gather the data from flatten scores
scores = graph.make_node(
'Gather', inputs=[scores, score_indices], axis=0)
keep_top_k = node.attr('keep_top_k')
keep_top_k = graph.make_node(
'Constant',
dtype=dtypes.ONNX.INT64,
dims=[1, 1],
value=[node.attr('keep_top_k')])
# get min(topK, num_select)
shape_select_num = graph.make_node('Shape', inputs=[scores])
const_zero = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[0])
gather_select_num = graph.make_node(
'Gather', inputs=[shape_select_num, const_zero], axis=0)
unsqueeze_select_num = mapper_helper.unsqueeze_helper(
graph, gather_select_num, [0])
concat_topK_select_num = graph.make_node(
'Concat', inputs=[unsqueeze_select_num, keep_top_k], axis=0)
cast_concat_topK_select_num = graph.make_node(
'Cast', inputs=[concat_topK_select_num], to=6)
keep_top_k = graph.make_node(
'ReduceMin', inputs=[cast_concat_topK_select_num], keepdims=0)
# unsqueeze the indices to 1D tensor
keep_top_k = mapper_helper.unsqueeze_helper(graph, keep_top_k, [0])
# cast the indices to INT64
keep_top_k = graph.make_node('Cast', inputs=[keep_top_k], to=7)
# select topk scores indices
keep_topk_scores, keep_topk_indices = graph.make_node(
'TopK', inputs=[scores, keep_top_k], outputs=2)
# gather topk label, scores, boxes
gather_topk_scores = graph.make_node(
'Gather', inputs=[scores, keep_topk_indices], axis=0)
gather_topk_class = graph.make_node(
'Gather', inputs=[class_id, keep_topk_indices], axis=1)
# gather the boxes need to gather the boxes id, then get boxes
if is_lod_input:
gather_topk_boxes_id = graph.make_node(
'Gather', [add_class_indices, keep_topk_indices], axis=1)
else:
gather_topk_boxes_id = graph.make_node(
'Gather', [bbox_id, keep_topk_indices], axis=1)
# squeeze the gather_topk_boxes_id to 1 dim
squeeze_topk_boxes_id = mapper_helper.squeeze_helper(
graph, gather_topk_boxes_id, [0, 2])
gather_select_boxes = graph.make_node(
'Gather', inputs=[bboxes, squeeze_topk_boxes_id], axis=1)
# concat the final result
# before concat need to cast the class to float
cast_topk_class = graph.make_node(
'Cast', inputs=[gather_topk_class], to=1)
unsqueeze_topk_scores = mapper_helper.unsqueeze_helper(
graph, gather_topk_scores, [0, 2])
inputs_concat_final_results = [
cast_topk_class, unsqueeze_topk_scores, gather_select_boxes
]
sort_by_socre_results = graph.make_node(
'Concat', inputs=inputs_concat_final_results, axis=2)
# sort by class_id
squeeze_cast_topk_class = mapper_helper.squeeze_helper(
graph, cast_topk_class, [0, 2])
neg_squeeze_cast_topk_class = graph.make_node(
'Neg', inputs=[squeeze_cast_topk_class])
data, indices = graph.make_node(
'TopK', inputs=[neg_squeeze_cast_topk_class, keep_top_k], outputs=2)
concat_final_results = graph.make_node(
'Gather', inputs=[sort_by_socre_results, indices], axis=1)
concat_final_results = mapper_helper.squeeze_helper(
graph, concat_final_results, [0], node.output('Out'))
if node.type in ['multiclass_nms2', 'matrix_nms', 'multiclass_nms3']:
final_indices = mapper_helper.squeeze_helper(graph, bbox_id, [0],
node.output('Index'))
if node.type in ['matrix_nms', 'multiclass_nms3']:
select_bboxes_shape = graph.make_node('Shape', inputs=[indices])
select_bboxes_shape1 = graph.make_node(
'Cast', inputs=[select_bboxes_shape], to=dtypes.ONNX.INT32)
indices = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[0])
rois_num = None
if 'NmsRoisNum' in node.outputs:
rois_num = node.output('NmsRoisNum')
elif 'RoisNum' in node.outputs:
rois_num = node.output('RoisNum')
if rois_num is not None:
graph.make_node(
"Gather",
inputs=[select_bboxes_shape1, indices],
outputs=rois_num)

View File

@@ -0,0 +1,176 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import math
import numpy as np
from paddle2onnx.legacy.constant import dtypes
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.utils import require_fixed_shape
def expand_aspect_rations(input_aspect_ratior, flip):
expsilon = 1e-6
output_ratios = [1.0]
for input_ratio in input_aspect_ratior:
already_exis = False
for output_ratio in output_ratios:
if abs(input_ratio - output_ratio) < expsilon:
already_exis = True
break
if already_exis == False:
output_ratios.append(input_ratio)
if flip:
output_ratios.append(1.0 / input_ratio)
return output_ratios
@op_mapper('prior_box')
class PriorBox():
"""
In this function, use the attribute to get the prior box, because we do not use
the image data and feature map, wo could the python code to create the varaible,
and to create the onnx tensor as output.
"""
support_opset_verison_range = (1, 12)
@classmethod
def opset_9(cls, graph, node, **kw):
flip = bool(node.attr('flip'))
clip = bool(node.attr('clip'))
min_max_aspect_ratios_order = bool(
node.attr('min_max_aspect_ratios_order'))
min_sizes = [float(size) for size in node.attr('min_sizes')]
max_sizes = [float(size) for size in node.attr('max_sizes')]
if isinstance(node.attr('aspect_ratios'), list):
aspect_ratios = [
float(ratio) for ratio in node.attr('aspect_ratios')
]
else:
aspect_ratios = [float(node.attr('aspect_ratios'))]
variances = [float(var) for var in node.attr('variances')]
# set min_max_aspect_ratios_order = false
output_ratios = expand_aspect_rations(aspect_ratios, flip)
step_w = float(node.attr('step_w'))
step_h = float(node.attr('step_h'))
offset = float(node.attr('offset'))
input_shape = node.input_shape('Input', 0)
image_shape = node.input_shape('Image', 0)
img_width = image_shape[3]
img_height = image_shape[2]
feature_width = input_shape[3]
feature_height = input_shape[2]
assert img_width > 0 and img_height > 0, require_fixed_shape(
cls.__name__)
step_width = 1.0
step_height = 1.0
if step_w == 0.0 or step_h == 0.0:
step_w = float(img_width / feature_width)
step_h = float(img_height / feature_height)
num_priors = len(output_ratios) * len(min_sizes)
if len(max_sizes) > 0:
num_priors += len(max_sizes)
out_dim = (feature_height, feature_width, num_priors, 4)
out_boxes = np.zeros(out_dim).astype('float32')
out_var = np.zeros(out_dim).astype('float32')
idx = 0
for h in range(feature_height):
for w in range(feature_width):
c_x = (w + offset) * step_w
c_y = (h + offset) * step_h
idx = 0
for s in range(len(min_sizes)):
min_size = min_sizes[s]
if not min_max_aspect_ratios_order:
# rest of priors
for r in range(len(output_ratios)):
ar = output_ratios[r]
c_w = min_size * math.sqrt(ar) / 2
c_h = (min_size / math.sqrt(ar)) / 2
out_boxes[h, w, idx, :] = [(c_x - c_w) / img_width,
(c_y - c_h) / img_height,
(c_x + c_w) / img_width,
(c_y + c_h) / img_height]
idx += 1
if len(max_sizes) > 0:
max_size = max_sizes[s]
# second prior: aspect_ratio = 1,
c_w = c_h = math.sqrt(min_size * max_size) / 2
out_boxes[h, w, idx, :] = [(c_x - c_w) / img_width,
(c_y - c_h) / img_height,
(c_x + c_w) / img_width,
(c_y + c_h) / img_height]
idx += 1
else:
c_w = c_h = min_size / 2.
out_boxes[h, w, idx, :] = [
(c_x - c_w) / img_width, (c_y - c_h) / img_height,
(c_x + c_w) / img_width, (c_y + c_h) / img_height
]
idx += 1
if len(max_sizes) > 0:
max_size = max_sizes[s]
# second prior: aspect_ratio = 1,
c_w = c_h = math.sqrt(min_size * max_size) / 2
out_boxes[h, w, idx, :] = [(c_x - c_w) / img_width,
(c_y - c_h) / img_height,
(c_x + c_w) / img_width,
(c_y + c_h) / img_height]
idx += 1
# rest of priors
for r in range(len(output_ratios)):
ar = output_ratios[r]
if abs(ar - 1.) < 1e-6:
continue
c_w = min_size * math.sqrt(ar) / 2
c_h = (min_size / math.sqrt(ar)) / 2
out_boxes[h, w, idx, :] = [(c_x - c_w) / img_width,
(c_y - c_h) / img_height,
(c_x + c_w) / img_width,
(c_y + c_h) / img_height]
idx += 1
if clip:
out_boxes = np.clip(out_boxes, 0.0, 1.0)
# set the variance.
out_var = np.tile(variances,
(feature_height, feature_width, num_priors, 1))
#make node that
node_boxes = graph.make_node(
'Constant',
inputs=[],
outputs=node.output('Boxes'),
dtype=dtypes.ONNX.FLOAT,
dims=out_boxes.shape,
value=out_boxes.flatten().tolist())
node_vars = graph.make_node(
'Constant',
inputs=[],
outputs=node.output('Variances'),
dtype=dtypes.ONNX.FLOAT,
dims=out_var.shape,
value=out_var.flatten().tolist())

View File

@@ -0,0 +1,506 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
import numpy as np
from paddle2onnx.legacy.constant import dtypes
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
from onnx import TensorProto
import paddle
MAX_FLOAT32 = 3.402823466E+38
@op_mapper('yolo_box')
class YOLOBox():
support_opset_verison_range = (9, 12)
node_pred_box_x1_decode = None
node_pred_box_y1_decode = None
node_pred_box_x2_decode = None
node_pred_box_y2_decode = None
node_pred_box_x2_sub_w = None
node_pred_box_y2_sub_h = None
@classmethod
def opset_9(cls, graph, node, **kw):
model_name = node.output('Boxes', 0)
input_shape = node.input_shape('X', 0)
mapper_helper.is_static_shape(input_shape)
image_size = node.input('ImgSize')
input_height = input_shape[2]
input_width = input_shape[3]
class_num = node.attr('class_num')
anchors = node.attr('anchors')
num_anchors = int(len(anchors)) // 2
scale_x_y = node.attr('scale_x_y')
downsample_ratio = node.attr('downsample_ratio')
input_size = input_height * downsample_ratio
conf_thresh = node.attr('conf_thresh')
conf_thresh_mat = [conf_thresh
] * num_anchors * input_height * input_width
cls.score_shape = [
1, input_height * input_width * int(num_anchors), class_num
]
im_outputs = []
x_shape = [1, num_anchors, 5 + class_num, input_height, input_width]
node_x_shape = graph.make_node(
'Constant', attrs={'dtype': dtypes.ONNX.INT64,
'value': x_shape})
node_x_reshape = graph.make_node(
'Reshape', inputs=[node.input('X')[0], node_x_shape])
node_x_transpose = graph.make_node(
'Transpose', inputs=[node_x_reshape], perm=[0, 1, 3, 4, 2])
range_x = []
range_y = []
for i in range(0, input_width):
range_x.append(i)
for j in range(0, input_height):
range_y.append(j)
node_range_x = graph.make_node(
'Constant',
attrs={
'dtype': dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
'value': range_x,
})
node_range_y = graph.make_node(
'Constant',
inputs=[],
attrs={
'dtype': dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
'value': range_y,
})
range_x_new_shape = [1, input_width]
range_y_new_shape = [input_height, 1]
node_range_x_new_shape = graph.make_node(
'Constant',
inputs=[],
dtype=dtypes.ONNX.INT64,
value=range_x_new_shape)
node_range_y_new_shape = graph.make_node(
'Constant',
inputs=[],
dtype=dtypes.ONNX.INT64,
value=range_y_new_shape)
node_range_x_reshape = graph.make_node(
'Reshape', inputs=[node_range_x, node_range_x_new_shape])
node_range_y_reshape = graph.make_node(
'Reshape', inputs=[node_range_y, node_range_y_new_shape])
node_grid_x = graph.make_node(
"Tile", inputs=[node_range_x_reshape, node_range_y_new_shape])
node_grid_y = graph.make_node(
"Tile", inputs=[node_range_y_reshape, node_range_x_new_shape])
node_box_x = model_name + "@box_x"
node_box_y = model_name + "@box_y"
node_box_w = model_name + "@box_w"
node_box_h = model_name + "@box_h"
node_conf = model_name + "@conf"
node_prob = model_name + "@prob"
output = [
node_box_x, node_box_y, node_box_w, node_box_h, node_conf, node_prob
]
node_split_input = mapper_helper.split_helper(
graph, [node_x_transpose],
output,
-1, [1, 1, 1, 1, 1, class_num],
dtype=node.input_dtype('X', 0))
node_box_x_sigmoid = graph.make_node("Sigmoid", inputs=[node_box_x])
node_box_y_sigmoid = graph.make_node("Sigmoid", inputs=[node_box_y])
if scale_x_y is not None:
bias_x_y = -0.5 * (scale_x_y - 1.0)
scale_x_y_node = graph.make_node(
'Constant',
attrs={
'dtype':
dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
'value': scale_x_y
})
bias_x_y_node = graph.make_node(
'Constant',
attrs={
'dtype':
dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
'value': bias_x_y
})
node_box_x_sigmoid = graph.make_node(
"Mul", inputs=[node_box_x_sigmoid, scale_x_y_node])
node_box_x_sigmoid = graph.make_node(
"Add", inputs=[node_box_x_sigmoid, bias_x_y_node])
node_box_y_sigmoid = graph.make_node(
"Mul", inputs=[node_box_y_sigmoid, scale_x_y_node])
node_box_y_sigmoid = graph.make_node(
"Add", inputs=[node_box_y_sigmoid, bias_x_y_node])
node_box_x_squeeze = mapper_helper.squeeze_helper(
graph, node_box_x_sigmoid, [4])
node_box_y_squeeze = mapper_helper.squeeze_helper(
graph, node_box_y_sigmoid, [4])
node_box_x_add_grid = graph.make_node(
"Add", inputs=[node_grid_x, node_box_x_squeeze])
node_box_y_add_grid = graph.make_node(
"Add", inputs=[node_grid_y, node_box_y_squeeze])
node_input_h = graph.make_node(
'Constant',
inputs=[],
dtype=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
value=[input_height])
node_input_w = graph.make_node(
'Constant',
inputs=[],
dtype=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
value=[input_width])
node_box_x_encode = graph.make_node(
'Div', inputs=[node_box_x_add_grid, node_input_w])
node_box_y_encode = graph.make_node(
'Div', inputs=[node_box_y_add_grid, node_input_h])
node_anchor_tensor = graph.make_node(
"Constant",
inputs=[],
dtype=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
value=anchors)
anchor_shape = [int(num_anchors), 2]
node_anchor_shape = graph.make_node(
"Constant", inputs=[], dtype=dtypes.ONNX.INT64, value=anchor_shape)
node_anchor_tensor_reshape = graph.make_node(
"Reshape", inputs=[node_anchor_tensor, node_anchor_shape])
node_input_size = graph.make_node(
"Constant",
inputs=[],
dtype=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
value=[input_size])
node_anchors_div_input_size = graph.make_node(
"Div", inputs=[node_anchor_tensor_reshape, node_input_size])
node_anchor_w = model_name + "@anchor_w"
node_anchor_h = model_name + "@anchor_h"
node_anchor_split = mapper_helper.split_helper(
graph,
inputs=node_anchors_div_input_size,
axis=1,
split=[1, 1],
outputs=[node_anchor_w, node_anchor_h],
dtype=node.input_dtype('X', 0))
new_anchor_shape = [1, int(num_anchors), 1, 1]
node_new_anchor_shape = graph.make_node(
'Constant',
inputs=[],
dtype=dtypes.ONNX.INT64,
value=new_anchor_shape)
node_anchor_w_reshape = graph.make_node(
'Reshape', inputs=[node_anchor_w, node_new_anchor_shape])
node_anchor_h_reshape = graph.make_node(
'Reshape', inputs=[node_anchor_h, node_new_anchor_shape])
node_box_w_squeeze = mapper_helper.squeeze_helper(graph, node_box_w,
[4])
node_box_h_squeeze = mapper_helper.squeeze_helper(graph, node_box_h,
[4])
node_box_w_exp = graph.make_node("Exp", inputs=[node_box_w_squeeze])
node_box_h_exp = graph.make_node("Exp", inputs=[node_box_h_squeeze])
node_box_w_encode = graph.make_node(
'Mul', inputs=[node_box_w_exp, node_anchor_w_reshape])
node_box_h_encode = graph.make_node(
'Mul', inputs=[node_box_h_exp, node_anchor_h_reshape])
node_conf_sigmoid = graph.make_node('Sigmoid', inputs=[node_conf])
node_conf_thresh = graph.make_node(
'Constant',
inputs=[],
dtype=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
value=conf_thresh_mat)
conf_shape = [1, int(num_anchors), input_height, input_width, 1]
node_conf_shape = graph.make_node(
'Constant', inputs=[], dtype=dtypes.ONNX.INT64, value=conf_shape)
node_conf_thresh_reshape = graph.make_node(
'Reshape', inputs=[node_conf_thresh, node_conf_shape])
node_conf_sub = graph.make_node(
'Sub', inputs=[node_conf_sigmoid, node_conf_thresh_reshape])
node_conf_clip = mapper_helper.clip_helper(graph, node, node_conf_sub,
float(MAX_FLOAT32), 0.0)
node_zeros = graph.make_node(
'Constant',
inputs=[],
dtype=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
value=[0])
node_conf_clip_bool = graph.make_node(
'Greater', inputs=[node_conf_clip, node_zeros])
node_conf_clip_cast = graph.make_node(
'Cast',
inputs=[node_conf_clip_bool],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)])
node_conf_set_zero = graph.make_node(
'Mul', inputs=[node_conf_sigmoid, node_conf_clip_cast])
node_prob_sigmoid = graph.make_node('Sigmoid', inputs=[node_prob])
new_shape = [1, int(num_anchors), input_height, input_width, 1]
node_new_shape = graph.make_node(
'Constant',
inputs=[],
dtype=dtypes.ONNX.INT64,
dims=[len(new_shape)],
value=new_shape)
node_conf_new_shape = graph.make_node(
'Reshape', inputs=[node_conf_set_zero, node_new_shape])
cls.node_score = graph.make_node(
'Mul', inputs=[node_prob_sigmoid, node_conf_new_shape])
node_conf_bool = graph.make_node(
'Greater', inputs=[node_conf_new_shape, node_zeros])
node_box_x_new_shape = graph.make_node(
'Reshape', inputs=[node_box_x_encode, node_new_shape])
node_box_y_new_shape = graph.make_node(
'Reshape', inputs=[node_box_y_encode, node_new_shape])
node_box_w_new_shape = graph.make_node(
'Reshape', inputs=[node_box_w_encode, node_new_shape])
node_box_h_new_shape = graph.make_node(
'Reshape', inputs=[node_box_h_encode, node_new_shape])
node_pred_box = graph.make_node(
'Concat',
inputs=[node_box_x_new_shape, node_box_y_new_shape, \
node_box_w_new_shape, node_box_h_new_shape],
axis=4)
node_conf_cast = graph.make_node(
'Cast',
inputs=[node_conf_bool],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)])
node_pred_box_mul_conf = graph.make_node(
'Mul', inputs=[node_pred_box, node_conf_cast])
box_shape = [1, int(num_anchors) * input_height * input_width, 4]
node_box_shape = graph.make_node(
'Constant', inputs=[], dtype=dtypes.ONNX.INT64, value=box_shape)
node_pred_box_new_shape = graph.make_node(
'Reshape', inputs=[node_pred_box_mul_conf, node_box_shape])
node_pred_box_x = model_name + "@_pred_box_x"
node_pred_box_y = model_name + "@_pred_box_y"
node_pred_box_w = model_name + "@_pred_box_w"
node_pred_box_h = model_name + "@_pred_box_h"
if node.input_dtype('X', 0) == paddle.float64:
node_pred_box_new_shape = graph.make_node(
'Cast', inputs=[node_pred_box_new_shape], to=TensorProto.FLOAT)
node_pred_box_split = mapper_helper.split_helper(
graph,
inputs=node_pred_box_new_shape,
axis=2,
split=[1, 1, 1, 1],
outputs=[
node_pred_box_x, node_pred_box_y, node_pred_box_w,
node_pred_box_h
],
dtype=node.input_dtype('X', 0))
if node.input_dtype('X', 0) == paddle.float64:
node_pred_box_x = graph.make_node(
'Cast',
inputs=[node_pred_box_x],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)])
node_pred_box_y = graph.make_node(
'Cast',
inputs=[node_pred_box_y],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)])
node_pred_box_w = graph.make_node(
'Cast',
inputs=[node_pred_box_w],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)])
node_pred_box_h = graph.make_node(
'Cast',
inputs=[node_pred_box_h],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)])
node_number_two = graph.make_node(
"Constant",
inputs=[],
dtype=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
value=[2])
node_half_w = graph.make_node(
"Div", inputs=[node_pred_box_w, node_number_two])
node_half_h = graph.make_node(
"Div", inputs=[node_pred_box_h, node_number_two])
node_pred_box_x1 = graph.make_node(
'Sub', inputs=[node_pred_box_x, node_half_w])
node_pred_box_y1 = graph.make_node(
'Sub', inputs=[node_pred_box_y, node_half_h])
node_pred_box_x2 = graph.make_node(
'Add', inputs=[node_pred_box_x, node_half_w])
node_pred_box_y2 = graph.make_node(
'Add', inputs=[node_pred_box_y, node_half_h])
node_sqeeze_image_size = mapper_helper.squeeze_helper(
graph, image_size[0], [0])
node_img_height = model_name + "@img_height"
node_img_width = model_name + "@img_width"
node_image_size_split = mapper_helper.split_helper(
graph, [node_sqeeze_image_size], [node_img_height, node_img_width],
-1, [1, 1],
dtype=node.input_dtype('X', 0))
node_img_width_cast = graph.make_node(
'Cast',
inputs=[node_img_width],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)])
node_img_height_cast = graph.make_node(
'Cast',
inputs=[node_img_height],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)])
cls.node_pred_box_x1_decode = graph.make_node(
'Mul',
inputs=[node_pred_box_x1, node_img_width_cast]) #boxes[box_idx]
cls.node_pred_box_y1_decode = graph.make_node(
'Mul', inputs=[node_pred_box_y1,
node_img_height_cast]) #boxes[box_idx + 1]
cls.node_pred_box_x2_decode = graph.make_node(
'Mul',
inputs=[node_pred_box_x2, node_img_width_cast]) #boxes[box_idx + 2]
cls.node_pred_box_y2_decode = graph.make_node(
'Mul', inputs=[node_pred_box_y2,
node_img_height_cast]) #boxes[box_idx + 3]
if node.attr('clip_bbox'):
node_number_one = graph.make_node(
'Constant',
inputs=[],
dtype=dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype('X', 0)],
value=[1])
node_new_img_height = graph.make_node(
'Sub', inputs=[node_img_height_cast, node_number_one])
node_new_img_width = graph.make_node(
'Sub', inputs=[node_img_width_cast, node_number_one])
cls.node_pred_box_x2_sub_w = graph.make_node(
'Sub',
inputs=[cls.node_pred_box_x2_decode, node_new_img_width])
cls.node_pred_box_y2_sub_h = graph.make_node(
'Sub',
inputs=[cls.node_pred_box_y2_decode, node_new_img_height])
node_pred_box_x1_clip = mapper_helper.clip_helper(
graph, node, cls.node_pred_box_x1_decode,
float(MAX_FLOAT32), 0.0)
node_pred_box_y1_clip = mapper_helper.clip_helper(
graph, node, cls.node_pred_box_y1_decode,
float(MAX_FLOAT32), 0.0)
node_pred_box_x2_clip = mapper_helper.clip_helper(
graph, node, cls.node_pred_box_x2_sub_w,
float(MAX_FLOAT32), 0.0)
node_pred_box_y2_clip = mapper_helper.clip_helper(
graph, node, cls.node_pred_box_y2_sub_h,
float(MAX_FLOAT32), 0.0)
node_pred_box_x2_res = graph.make_node(
'Sub',
inputs=[cls.node_pred_box_x2_decode, node_pred_box_x2_clip])
node_pred_box_y2_res = graph.make_node(
'Sub',
inputs=[cls.node_pred_box_y2_decode, node_pred_box_y2_clip])
node_pred_box_result = graph.make_node(
'Concat',
inputs=[
node_pred_box_x1_clip, node_pred_box_y1_clip,
node_pred_box_x2_res, node_pred_box_y2_res
],
outputs=node.output('Boxes'),
axis=-1)
else:
node_pred_box_result = graph.make_node(
'Concat',
inputs=[
cls.node_pred_box_x1_decode, cls.node_pred_box_y1_decode,
cls.node_pred_box_x2_decode, cls.node_pred_box_y2_decode
],
outputs=node.output('Boxes'),
axis=-1)
node_score_shape = graph.make_node(
"Constant",
inputs=[],
dtype=dtypes.ONNX.INT64,
value=cls.score_shape)
node_score_new_shape = graph.make_node(
'Reshape',
inputs=[cls.node_score, node_score_shape],
outputs=node.output('Scores'))

View File

@@ -0,0 +1,280 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
from paddle2onnx.legacy.constant import dtypes
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
import paddle
from paddle2onnx.utils import logging
from paddle2onnx.legacy.op_mapper import mapper_helper
@op_mapper('greater_equal')
class GreaterOrEqual():
support_opset_version_range = (12, 15)
@classmethod
def opset_12(cls, graph, node, **kw):
onnx_node = graph.make_node(
'GreaterOrEqual',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
@op_mapper('equal')
class Equal():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
if node.input_dtype('X', 0) in [paddle.float32, paddle.float64]:
warning_info = "Operator 'Equal' only support input with dtype of int/bool, now the dtype of input is {}, this may cause wrong results, it is more recommend converting this model with opset version >= 11.".format(
node.input_dtype('X', 0))
logging.warning(warning_info)
x_node = graph.make_node(
'Cast', inputs=node.input('X'), to=dtypes.ONNX.INT32)
y_node = graph.make_node(
'Cast', inputs=node.input('Y'), to=dtypes.ONNX.INT32)
onnx_node = graph.make_node(
'Equal', inputs=[x_node, y_node], outputs=node.output('Out'))
else:
onnx_node = graph.make_node(
'Equal',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
@classmethod
def opset_11(cls, graph, node, **kw):
onnx_node = graph.make_node(
'Equal',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
@op_mapper('not_equal')
class NotEqual():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
equal_val = None
if node.input_dtype('X', 0) in [paddle.float32, paddle.float64]:
warning_info = "Operator 'not_equal' only support input with dtype of int/bool, now the dtype of input is {}, this may cause wrong results, it is more recommend converting this model with opset version >= 11.".format(
node.input_dtype('X', 0))
logging.warning(warning_info)
x_node = graph.make_node(
'Cast', inputs=node.input('X'), to=dtypes.ONNX.INT32)
y_node = graph.make_node(
'Cast', inputs=node.input('Y'), to=dtypes.ONNX.INT32)
equal_val = graph.make_node(
'Equal', inputs=[x_node, y_node], outputs=node.output('Out'))
else:
equal_val = graph.make_node(
'Equal',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
k_node = graph.make_node(
'Cast', inputs=[equal_val], to=dtypes.ONNX.INT64)
const = graph.make_node('Constant', dtype=dtypes.ONNX.INT64, value=1)
sub_ = graph.make_node('Sub', inputs=[const, k_node])
graph.make_node(
'Cast',
inputs=[sub_],
outputs=node.output('Out'),
to=dtypes.ONNX.BOOL)
@classmethod
def opset_11(cls, graph, node, **kw):
equal_val = graph.make_node(
'Equal', inputs=[node.input('X', 0), node.input('Y', 0)])
k_node = graph.make_node(
'Cast', inputs=[equal_val], to=dtypes.ONNX.INT64)
const = graph.make_node('Constant', dtype=dtypes.ONNX.INT64, value=1)
sub_ = graph.make_node('Sub', inputs=[const, k_node])
graph.make_node(
'Cast',
inputs=[sub_],
outputs=node.output('Out'),
to=dtypes.ONNX.BOOL)
@op_mapper('greater_than')
class GreaterThan():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
if node.input_dtype('X', 0) in [paddle.int32, paddle.int64]:
warning_info = "Operator 'greater_than' only support input with dtype of float/double, now the dtype of input is {}, this may cause wrong results, it is more recommend converting this model with opset version >= 11.".format(
node.input_dtype('X', 0))
logging.warning(warning_info)
x_node = graph.make_node(
'Cast', inputs=node.input('X'), to=dtypes.ONNX.INT32)
y_node = graph.make_node(
'Cast', inputs=node.input('Y'), to=dtypes.ONNX.INT32)
graph.make_node(
'Greater',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
else:
graph.make_node(
'Greater',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
@classmethod
def opset_11(cls, graph, node, **kw):
onnx_node = graph.make_node(
'Greater',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
@op_mapper('logical_and')
class LogicalAnd():
support_opset_version_range = (1, 15)
@classmethod
def opset_1(cls, graph, node, **kw):
onnx_node = graph.make_node(
'And',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
@op_mapper('logical_not')
class LogicalNot():
support_opset_version_range = (1, 15)
@classmethod
def opset_1(cls, graph, node, **kw):
graph.make_node(
'Not', inputs=node.input('X'), outputs=node.output('Out'))
@op_mapper('logical_or')
class LogicalOr():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
graph.make_node(
'Or',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
@op_mapper('logical_xor')
class LogicalXOr():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
graph.make_node(
'Xor',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
@op_mapper('less_equal')
class LessOrEqual():
support_opset_version_range = (12, 15)
@classmethod
def opset_12(cls, graph, node, **kw):
onnx_node = graph.make_node(
'LessOrEqual',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
@op_mapper('less_than')
class Less_than():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
if node.input_dtype('X', 0) in [paddle.int32, paddle.int64]:
warning_info = "Operator 'less_than' only support input with dtype of float/double, now the dtype of input is {}, this may cause wrong results, it is more recommend converting this model with opset version >= 11.".format(
node.input_dtype('X', 0))
logging.warning(warning_info)
x_node = graph.make_node(
'Cast', inputs=node.input('X'), to=dtypes.ONNX.INT32)
y_node = graph.make_node(
'Cast', inputs=node.input('Y'), to=dtypes.ONNX.INT32)
graph.make_node(
'Less',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
else:
graph.make_node(
'Less',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'))
@classmethod
def opset_9(cls, graph, node, **kw):
graph.make_node(
'Less',
inputs=[node.input('X', 0), node.input('Y', 0)],
outputs=node.output('Out'), )
@op_mapper('isfinite_v2')
class Isfinite():
support_opset_version_range = (10, 15)
@classmethod
def opset_10(cls, graph, node, **kw):
is_inf = graph.make_node('IsInf', inputs=node.input('X', 0))
is_nan = graph.make_node('IsNaN', inputs=node.input('X', 0))
finite = graph.make_node('Or', inputs=[is_inf, is_nan])
graph.make_node('Not', inputs=[finite], outputs=node.output('Out'))
@op_mapper('isinf_v2')
class IsInf():
support_opset_version_range = (10, 15)
@classmethod
def opset_10(cls, graph, node, **kw):
graph.make_node(
'IsInf', inputs=node.input('X'), outputs=node.output('Out'))
@op_mapper('isnan_v2')
class IsNaN():
support_opset_version_range = (9, 15)
@classmethod
def opset_9(cls, graph, node, **kw):
graph.make_node(
'IsNaN', inputs=node.input('X'), outputs=node.output('Out'))
@op_mapper('isnan')
class IsNaN():
support_opset_version_range = (9, 15)
@classmethod
def opset_9(cls, graph, node, **kw):
isnan = graph.make_node('IsNaN', inputs=node.input('X'))
cast_node = graph.make_node(
'Cast', inputs=isnan, attrs={'to': dtypes.ONNX.FLOAT})
reduce_node = graph.make_node(
'ReduceMax', inputs=[cast_node], keepdims=False)
mapper_helper.unsqueeze_helper(graph, reduce_node, [0],
node.output('Out'))

View File

@@ -0,0 +1,432 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid.core as core
import six
import copy
from paddle2onnx.legacy.constant import dtypes
import paddle
from onnx import TensorProto
def is_static_shape(shape):
if len(shape) > 1 and shape[1:].count(-1) > 0:
raise Exception(
"Converting this model to ONNX need with static input shape," \
" please fix input shape of this model, see doc Q2 in" \
" https://github.com/PaddlePaddle/paddle2onnx/blob/develop/docs/en/FAQ.md."
)
def shape_helper(graph, input, dim=None):
if dim is None:
shape_node = graph.make_node('Shape', inputs=[input])
return shape_node
full_shape = graph.make_node('Shape', inputs=[input])
shape_node = slice_helper(graph, full_shape, [0], [dim], [dim + 1])
return shape_node
def unsqueeze_helper(graph, input, axes, outputs=None):
inputs = []
if not isinstance(input, list):
input = [input]
inputs.append(input[0])
if not isinstance(axes, list):
axes = [axes]
if outputs is not None and isinstance(outputs, six.string_types):
outputs = [outputs]
if graph.opset_version < 13:
unsqueeze_node = graph.make_node(
"Unsqueeze", inputs=inputs, outputs=outputs, axes=axes)
return unsqueeze_node
else:
axes_node = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=axes)
inputs = inputs + [axes_node]
unsqueeze_node = graph.make_node(
"Unsqueeze", inputs=inputs, outputs=outputs)
return unsqueeze_node
def split_helper(graph, input, axis=0, split=None, outputs=None):
assert outputs is not None, "outputs can not be None in split_helper."
inputs = []
if not isinstance(input, list):
input = [input]
inputs.append(input[0])
if split is not None and not isinstance(split, list):
split = [split]
if split is None:
split_node = graph.make_node(
"Split", inputs=inputs, outputs=outputs, axis=axis)
return split_node
if graph.opset_version < 13:
split_node = graph.make_node(
"Split", inputs=inputs, outputs=outputs, axis=axis, split=split)
return split_node
else:
split = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=split)
inputs = inputs + [split]
split_node = graph.make_node(
"Split", inputs=inputs, axis=axis, outputs=outputs)
return split_node
def slice_helper(graph,
input,
axes,
starts,
ends,
outputs=None,
dtype=dtypes.ONNX.INT64):
inputs = []
if not isinstance(input, list):
input = [input]
inputs.append(input[0])
if axes is not None and not isinstance(axes, list):
axes = [axes]
if starts is not None and not isinstance(starts, (list, six.string_types)):
starts = [starts]
if ends is not None and not isinstance(ends, (list, six.string_types)):
ends = [ends]
if graph.opset_version < 10:
attrs = {
'starts': starts,
'ends': ends,
}
if axes not in [None, []]:
attrs['axes'] = axes
slice_node = graph.make_node(
"Slice", inputs=inputs, outputs=outputs, attrs=attrs)
return slice_node
else:
if not isinstance(starts, six.string_types):
starts = graph.make_node('Constant', dtype=dtype, value=starts)
if not isinstance(ends, six.string_types):
ends = graph.make_node('Constant', dtype=dtype, value=ends)
inputs = inputs + [starts, ends]
if axes not in [None, []]:
axes_node = graph.make_node('Constant', dtype=dtype, value=axes)
inputs.append(axes_node)
slice_node = graph.make_node("Slice", inputs=inputs, outputs=outputs)
return slice_node
def squeeze_helper(graph, input, axes=None, outputs=None):
inputs = []
if not isinstance(input, list):
input = [input]
inputs.append(input[0])
if axes is not None and not isinstance(axes, list):
axes = [axes]
if graph.opset_version < 13:
squeeze_node = graph.make_node(
"Squeeze", inputs=inputs, axes=axes, outputs=outputs)
return squeeze_node
else:
if axes is not None:
axes_node = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=axes)
inputs.append(axes_node)
squeeze_node = graph.make_node(
"Squeeze", inputs=inputs, outputs=outputs)
return squeeze_node
def unsqueeze_helper(graph, input, axes, outputs=None):
inputs = []
if isinstance(input, list):
input = input[0]
inputs.append(input)
if not isinstance(axes, list):
axes = [axes]
if graph.opset_version < 13:
unsqueeze_node = graph.make_node(
'Unsqueeze', inputs=inputs, axes=axes, outputs=outputs)
else:
axes_node = graph.make_node(
'Constant', attrs={'dtype': dtypes.ONNX.INT64,
'value': axes})
inputs.append(axes_node)
unsqueeze_node = graph.make_node(
'Unsqueeze', inputs=inputs, outputs=outputs)
return unsqueeze_node
def split_helper(graph, inputs, outputs, axis, split, dtype=paddle.float32):
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
if not isinstance(outputs, int) and not isinstance(outputs, (list, tuple)):
outputs = [outputs]
if dtype == paddle.float64:
cast_inputs = []
for i in range(len(inputs)):
one = graph.make_node(
'Cast', inputs=[inputs[i]], to=TensorProto.FLOAT)
cast_inputs.append(one)
if graph.opset_version < 13:
split_node = graph.make_node(
"Split",
inputs=cast_inputs,
outputs=outputs,
axis=axis,
split=split)
else:
split_const = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=split)
split_node = graph.make_node(
"Split",
inputs=cast_inputs + [split_const],
outputs=outputs,
axis=axis)
casted_output = []
for i in range(len(outputs)):
one = graph.make_node(
'Cast',
inputs=[split_node[i]],
outputs=[outputs[i]],
to=TensorProto.DOUBLE)
casted_output.append(one)
return casted_output
else:
if graph.opset_version < 13:
split_node = graph.make_node(
"Split", inputs=inputs, outputs=outputs, axis=axis, split=split)
else:
split_const = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=split)
split_node = graph.make_node(
"Split",
inputs=inputs + [split_const],
outputs=outputs,
axis=axis)
return split_node
def constant_helper(graph, dtype, value, shape=None, outputs=[]):
constant = graph.make_node(
'Constant',
inputs=[],
outputs=outputs,
attrs={
'dims': shape,
'dtype': dtypes.DTYPE_PADDLE_ONNX_MAP[dtype],
'value': value
})
return constant
def clip_helper(graph, node, input, max, min, output=[]):
x_dtype = node.input_dtype('X', 0)
if (isinstance(min, six.string_types) or
isinstance(max, six.string_types)) and graph.opset_version < 11:
raise Exception(
"min or max of Clip is Tensor, please try with higher onnx opset_version."
)
if graph.opset_version < 11:
if x_dtype != paddle.float32:
input = graph.make_node(
'Cast', inputs=[input], to=dtypes.ONNX.FLOAT)
clip = graph.make_node('Clip', inputs=input, max=max, min=min)
clip = graph.make_node(
'Cast',
inputs=[clip],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[x_dtype],
outputs=output)
else:
clip = graph.make_node(
'Clip', inputs=input, max=max, min=min, outputs=output)
else:
if x_dtype != paddle.float32:
input = graph.make_node(
'Cast', inputs=[input], to=dtypes.ONNX.FLOAT)
if not isinstance(min, six.string_types):
min = graph.make_node(
'Constant',
attrs={
'dtype': dtypes.DTYPE_PADDLE_ONNX_MAP[paddle.float32],
'value': min
})
else:
if node.input_dtype('Min', 0) != paddle.float32:
min = graph.make_node(
'Cast',
inputs=min,
attrs={'to': dtypes.DTYPE_PADDLE_ONNX_MAP[paddle.float32]})
min = graph.make_node('Squeeze', min)
if not isinstance(max, six.string_types):
max = graph.make_node(
'Constant',
attrs={
'dtype': dtypes.DTYPE_PADDLE_ONNX_MAP[paddle.float32],
'value': max
})
else:
if node.input_dtype('Max', 0) != paddle.float32:
max = graph.make_node(
'Cast',
inputs=max,
attrs={'to': dtypes.DTYPE_PADDLE_ONNX_MAP[paddle.float32]})
max = graph.make_node('Squeeze', max)
if x_dtype != paddle.float32:
clip_pre = graph.make_node('Clip', inputs=[input, min, max])
clip = graph.make_node(
'Cast',
inputs=[clip_pre],
outputs=output,
to=dtypes.DTYPE_PADDLE_ONNX_MAP[x_dtype])
else:
clip = graph.make_node(
'Clip', inputs=[input, min, max], outputs=output)
return clip
def dtype_alignment(graph, nodes, node_dtypes, to=None):
assert len(nodes) == len(
node_dtypes), "Length of nodes and node_dtypes should be equal."
dtype_order = [
core.VarDesc.VarType.BOOL,
core.VarDesc.VarType.INT16,
core.VarDesc.VarType.INT32,
core.VarDesc.VarType.INT64,
core.VarDesc.VarType.FP16,
core.VarDesc.VarType.FP32,
core.VarDesc.VarType.FP64,
]
max_index = -1
for dtype in node_dtypes:
index = dtype_order.index(dtype)
if index > max_index:
max_index = index
if max_index < 0:
return nodes
casted_nodes = list()
cast_dtype = dtype_order[max_index]
cast_dtype = dtypes.DTYPE_PADDLE_ONNX_MAP[cast_dtype]
for i, dtype in enumerate(node_dtypes):
index = dtype_order.index(dtype)
if to is not None:
cast_dtype = to
condition = dtypes.DTYPE_PADDLE_ONNX_MAP[index] != cast_dtype
else:
condition = index != max_index
if condition:
cast_node = graph.make_node(
'Cast', inputs=[nodes[i]], to=cast_dtype)
casted_nodes.append(cast_node)
else:
casted_nodes.append(nodes[i])
return casted_nodes
def cast(graph, input, origin_dtype, target_dtype):
if not isinstance(origin_dtype, six.string_types):
origin_dtype = dtypes.DTYPE_PADDLE_STR_MAP[origin_dtype]
if origin_dtype != target_dtype:
cast_node = graph.make_node(
'Cast', inputs=input, to=dtypes.DTYPE_ONNX_STR_MAP[target_dtype])
return cast_node
return input
def shape_alignment(graph, nodes, node_shapes):
assert len(nodes) == len(
node_shapes), "Length of nodes and node_shapes should be equal."
max_dim = -1
for shape in node_shapes:
dim = len(shape)
if dim > max_dim:
max_dim = dim
if max_dim < 0:
return nodes
assert max_dim == 1 or max_dim == 0, "max_dim is only supported when max_dim is 1 or 0."
max_dim = 1 if max_dim == 0 else max_dim
unsqueeze_nodes = list()
for i, shape in enumerate(node_shapes):
dim = len(shape)
if dim != max_dim:
unsqueeze_node = nodes[i]
for j in range(max_dim - dim):
unsqueeze_node = unsqueeze_helper(graph, unsqueeze_node, [0])
unsqueeze_nodes.append(unsqueeze_node)
else:
unsqueeze_nodes.append(nodes[i])
return unsqueeze_nodes
def get_tensor_list_node(graph, node, name, dtype=None):
node_list = node.input(name)
node_dtypes = [node.input_dtype(name, i) for i in range(len(node_list))]
node_list = dtype_alignment(graph, node_list, node_dtypes, dtype)
node_shapes = [node.input_shape(name, i) for i in range(len(node_list))]
node_list = shape_alignment(graph, node_list, node_shapes)
node = graph.make_node("Concat", inputs=node_list, axis=0)
return node
def get_value_from_parameters(graph, input_node):
assert input_node in graph.parameters, "{} is not in graph.parameters".format(
input_node)
data = graph.parameters[input_node].attribute[0].t.int32_data
if data is None or len(data) < 1:
data = graph.parameters[input_node].attribute[0].t.int64_data
value = [val for _, val in enumerate(data)]
return value
# return value
# arg1: attr_value
# arg2: attr_value is tensor or not
def get_node_attr_value(graph,
node,
attr_name=None,
attr_tensor_name=None,
attr_tensor_list_name=None,
return_list=False,
dtype=None):
attr_tensor = node.input(attr_tensor_name)
attr_tensor_list = node.input(attr_tensor_list_name)
if attr_tensor is not None and len(attr_tensor) > 0:
value = node.input(attr_tensor_name)[0]
if return_list:
try:
value = get_value_from_parameters(graph, value)
return value, False # value, is_tensor
except Exception:
return value, True
else:
input_dtype = dtypes.DTYPE_PADDLE_ONNX_MAP[node.input_dtype(
attr_tensor_name, 0)]
if input_dtype != dtype:
value = graph.make_node('Cast', inputs=[value], to=dtype)
return value, True
elif attr_tensor_list is not None and len(attr_tensor_list) > 0:
value = get_tensor_list_node(graph, node, attr_tensor_list_name, dtype)
return value, True
else:
value = node.attr(attr_name)
return value, False

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,952 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import math
import collections
from paddle2onnx.legacy.constant import dtypes
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
from paddle2onnx import utils
import paddle
@op_mapper(['conv2d', 'depthwise_conv2d', 'conv3d'])
class Conv():
support_opset_version_range = (1, 12)
@classmethod
def opset_1(cls, graph, node, **kw):
kernel_shape = node.input_shape('Filter', 0)
dilations = node.attr('dilations')
kernel_shape = kernel_shape[2:]
strides = node.attr('strides')
group = node.attr('groups')
pads = node.attr('paddings')
assert node.attrs['data_format'] == 'NCHW' or node.attrs['data_format'] == 'NCDHW' or node.attrs['data_format'] == "AnyLayout", \
"The conv data format should be 'NCHW' or 'NCDHW', but received data format " \
"is %s." % node.attrs['data_format']
# onnx padding is [x1_begin, x2_begin...x1_end, x2_end, ...]
if len(pads) == 2 or len(pads) == 3:
pads = pads + pads
elif len(pads) == 4:
pads = [pads[i] for i in [0, 2, 1, 3]]
elif len(pads) == 6:
pads = [pads[i] for i in [0, 2, 4, 1, 3, 5]]
attrs = {
'dilations': dilations,
'kernel_shape': kernel_shape,
'strides': strides,
'group': group
}
auto_pad = node.attr('padding_algorithm')
if auto_pad == 'SAME':
attrs['auto_pad'] = 'SAME_UPPER'
elif auto_pad == 'VALID':
attrs['auto_pad'] = 'VALID'
else:
attrs['pads'] = pads
graph.make_node(
'Conv',
inputs=node.input('Input') + node.input('Filter'),
outputs=node.output('Output'),
attrs=attrs)
@op_mapper(
['conv2d_transpose', 'depthwise_conv2d_transpose', 'conv3d_transpose'])
class ConvTranspose():
support_opset_version_range = (1, 12)
@classmethod
def opset_1(cls, graph, node, **kw):
output_padding = node.attr('output_padding')
kernel_shape = node.input_shape('Filter', 0)
dilations = node.attr('dilations')
kernel_shape = kernel_shape[2:]
strides = node.attr('strides')
group = node.attr('groups')
pads = node.attr('paddings')
assert node.attrs['data_format'] == 'NCHW' or node.attrs['data_format'] == 'NCDHW', \
"The conv data format should be 'NCHW' or 'NCDHW', but received data format " \
"is %s." % node.attrs['data_format']
if len(pads) == 2 or len(pads) == 3:
pads = pads + pads
elif len(pads) == 4:
pads = [pads[i] for i in [0, 2, 1, 3]]
elif len(pads) == 6:
pads = [pads[i] for i in [0, 2, 4, 1, 3, 5]]
attrs = {
'dilations': dilations,
'kernel_shape': kernel_shape,
'strides': strides,
'group': group
}
auto_pad = node.attr('padding_algorithm')
if auto_pad == 'SAME':
attrs['auto_pad'] = 'SAME_UPPER'
elif auto_pad == 'VALID':
attrs['auto_pad'] = 'VALID'
else:
attrs['pads'] = pads
if output_padding and len(output_padding) > 0:
attrs['output_padding'] = output_padding
graph.make_node(
'ConvTranspose',
inputs=node.input('Input') + node.input('Filter'),
outputs=node.output('Output'),
attrs=attrs)
@op_mapper('pool2d')
class Pool():
support_opset_version_range = (1, 12)
pool_type = {
'max': ('MaxPool', 'GlobalMaxPool'),
'avg': ('AveragePool', 'GlobalAveragePool')
}
@classmethod
def is_same_span(cls, in_size, out_size):
spans = []
for i in range(out_size):
start = math.floor(i * (in_size / out_size))
end = math.ceil((i + 1) * (in_size / out_size))
spans.append(end - start)
if len(set(spans)) == 1:
return True
return False
@classmethod
def opset_1(cls, graph, node, **kw):
assert node.attrs['data_format'] == 'NCHW' or node.attrs['data_format'] == "AnyLayout", \
"The conv data format should be 'NCHW', but received data format " \
"is %s." % node.attrs['data_format']
x_dtype = node.input_dtype('X', 0)
need_dtype_convert = False
input_name = node.input('X', 0)
if x_dtype != paddle.float32:
need_dtype_convert = True
input_name = graph.make_node(
'Cast', inputs=node.input('X'), to=dtypes.ONNX.FLOAT)
if node.attr('global_pooling') or (node.attr('adaptive') and
node.attr('ksize') == [1, 1]):
if need_dtype_convert:
onnx_node = graph.make_node(
cls.pool_type[node.attr('pooling_type')][1],
inputs=[input_name])
graph.make_node(
'Cast',
inputs=[onnx_node],
outputs=node.output('Out'),
to=dtypes.ONNX.DOUBLE)
else:
onnx_node = graph.make_node(
cls.pool_type[node.attr('pooling_type')][1],
inputs=[input_name],
outputs=node.output('Out'))
elif node.attr('adaptive'):
# if pool is adaptive, check if input shape of pool is fixed.
if node.input_shape('X', 0)[2:].count(-1) > 0:
raise Exception(
"Converting this model to ONNX need with static input shape," \
" please fix input shape of this model, see doc Q2 in" \
" https://github.com/PaddlePaddle/paddle2onnx/blob/develop/docs/en/FAQ.md."
)
input_h, input_w = node.input_shape('X', 0)[2:]
output_h, output_w = node.output_shape('Out', 0)[2:]
stride_h = int(input_h / output_h)
stride_w = int(input_w / output_w)
kernel_h = input_h - (output_h - 1) * stride_h
kernel_w = input_w - (output_w - 1) * stride_w
#check if kernel_size is fixed.
if not cls.is_same_span(input_h, output_h) or not cls.is_same_span(
input_w, output_w):
raise Exception(
"Cannot convert adaptive pool with input_size: {}, output_size: {}"
.format(
node.input_shape('X', 0), node.output_shape('Out', 0)))
else:
attrs = {
'kernel_shape': (kernel_h, kernel_w),
'strides': (stride_h, stride_w),
}
if node.attr('ceil_mode') and graph.opset_version < 10:
raise Exception(
"Cannot convert pool with ceil_model == True to ONNX Opset version < 10."
)
elif graph.opset_version > 10:
attrs['ceil_mode'] = node.attr('ceil_mode')
auto_pad = node.attr('padding_algorithm')
if auto_pad == 'SAME':
attrs['auto_pad'] = 'SAME_UPPER'
elif auto_pad == 'VALID':
attrs['auto_pad'] = 'VALID'
if node.attr('pooling_type') == 'avg':
attrs['count_include_pad'] = not node.attr('exclusive')
if need_dtype_convert:
onnx_node = graph.make_node(
cls.pool_type[node.attr('pooling_type')][0],
inputs=[input_name],
attrs=attrs)
graph.make_node(
'Cast',
inputs=[onnx_node],
outputs=node.output('Out'),
to=dtypes.ONNX.DOUBLE)
else:
onnx_node = graph.make_node(
cls.pool_type[node.attr('pooling_type')][0],
inputs=[input_name],
outputs=node.output('Out'),
attrs=attrs)
else:
input_shape = node.input_shape('X', 0)
k_size = node.attr('ksize')
pads = node.attr('paddings')
strides = node.attr('strides')
if len(pads) == 2:
pads = pads + pads
elif len(pads) == 4:
pads = [pads[i] for i in [0, 2, 1, 3]]
if input_shape[2] > 0 and input_shape[2] + pads[0] < k_size[0]:
k_size[0] = input_shape[2] + pads[0]
if input_shape[3] > 0 and input_shape[3] + pads[1] < k_size[1]:
k_size[1] = input_shape[3] + pads[1]
input_x = [input_name]
if max(k_size) <= max(pads):
onnx_paddings = [0, 0, pads[0], pads[1], 0, 0, pads[2], pads[3]]
attrs_pad = {'mode': 'constant', }
if graph.opset_version >= 11:
pads_node = graph.make_node(
'Constant',
attrs={
'dtype': dtypes.ONNX.INT64,
'value': onnx_paddings
})
value_node = graph.make_node(
'Constant',
attrs={'dtype': dtypes.ONNX.FLOAT,
'value': 0.0})
input_x = input_x + [pads_node, value_node]
else:
attrs_pad['pads'] = onnx_paddings
attrs_pad['value'] = 0.0
input_x = graph.make_node(
'Pad', inputs=input_x, attrs=attrs_pad)
pads = [0, 0, 0, 0]
attrs = {
'kernel_shape': k_size,
'strides': strides,
}
auto_pad = node.attr('padding_algorithm')
if auto_pad == 'SAME':
attrs['auto_pad'] = 'SAME_UPPER'
elif auto_pad == 'VALID':
attrs['auto_pad'] = 'VALID'
else:
attrs['pads'] = pads
if node.attr('ceil_mode') and graph.opset_version < 10:
raise Exception(
"Cannot convert pool with ceil_model == True to ONNX Opset version < 10"
)
elif graph.opset_version >= 10:
attrs['ceil_mode'] = node.attr('ceil_mode')
if node.attr('pooling_type') == 'avg':
attrs['count_include_pad'] = not node.attr('exclusive')
if need_dtype_convert:
onnx_node = graph.make_node(
cls.pool_type[node.attr('pooling_type')][0],
inputs=input_x,
attrs=attrs)
graph.make_node(
'Cast',
inputs=[onnx_node],
outputs=node.output('Out'),
to=dtypes.ONNX.DOUBLE)
else:
onnx_node = graph.make_node(
cls.pool_type[node.attr('pooling_type')][0],
inputs=input_x,
outputs=node.output('Out'),
attrs=attrs)
@op_mapper('pool3d')
class Pool3D():
support_opset_version_range = (1, 12)
pool_type = {
'max': ('MaxPool', 'GlobalMaxPool'),
'avg': ('AveragePool', 'GlobalAveragePool')
}
@classmethod
def is_same_span(cls, in_size, out_size):
spans = []
for i in range(out_size):
start = math.floor(i * (in_size / out_size))
end = math.ceil((i + 1) * (in_size / out_size))
spans.append(end - start)
if len(set(spans)) == 1:
return True
return False
@classmethod
def opset_1(cls, graph, node, **kw):
assert node.attrs['data_format'] == 'NCDHW' or node.attrs['data_format'] == "AnyLayout", \
"The conv data format should be 'NCDHW', but received data format " \
"is %s." % node.attrs['data_format']
if node.attr('global_pooling') or (node.attr('adaptive') and
node.attr('ksize') == [1, 1, 1]):
onnx_node = graph.make_node(
cls.pool_type[node.attr('pooling_type')][1],
inputs=node.input('X'),
outputs=node.output('Out'))
elif node.attr('adaptive'):
# if pool is adaptive, check if input shape of pool is fixed.
if node.input_shape('X', 0)[2:].count(-1) > 0:
raise Exception(
"Converting this model to ONNX need with static input shape," \
" please fix input shape of this model, see doc Q2 in" \
" https://github.com/PaddlePaddle/paddle2onnx/blob/develop/docs/en/FAQ.md."
)
input_d, input_h, input_w = node.input_shape('X', 0)[2:]
output_d, output_h, output_w = node.output_shape('Out', 0)[2:]
stride_d = int(input_d / output_d)
stride_h = int(input_h / output_h)
stride_w = int(input_w / output_w)
kernel_d = input_d - (output_d - 1) * stride_d
kernel_h = input_h - (output_h - 1) * stride_h
kernel_w = input_w - (output_w - 1) * stride_w
#check if kernel_size is fixed.
if not cls.is_same_span(input_h, output_h) or not cls.is_same_span(
input_w, output_w) or not cls.is_same_span(input_d,
output_d):
raise Exception(
"Cannot convert adaptive pool with input_size: {}, output_size: {}"
.format(
node.input_shape('X', 0), node.output_shape('Out', 0)))
else:
attrs = {
'kernel_shape': (kernel_d, kernel_h, kernel_w),
'strides': (stride_d, stride_h, stride_w),
}
if node.attr('ceil_mode') and graph.opset_version < 10:
raise Exception(
"Cannot convert pool with ceil_model == True to ONNX Opset version < 10."
)
elif graph.opset_version > 10:
attrs['ceil_mode'] = node.attr('ceil_mode')
auto_pad = node.attr('padding_algorithm')
if auto_pad == 'SAME':
attrs['auto_pad'] = 'SAME_UPPER'
elif auto_pad == 'VALID':
attrs['auto_pad'] = 'VALID'
if node.attr('pooling_type') == 'avg':
attrs['count_include_pad'] = not node.attr('exclusive')
onnx_node = graph.make_node(
cls.pool_type[node.attr('pooling_type')][0],
inputs=node.input('X'),
outputs=node.output('Out'),
attrs=attrs)
else:
input_shape = node.input_shape('X', 0)
k_size = node.attr('ksize')
paddings = node.attr('paddings')
if input_shape[2] > 0 and input_shape[2] + paddings[0] < k_size[0]:
k_size[0] = input_shape[2] + paddings[0]
if input_shape[3] > 0 and input_shape[3] + paddings[1] < k_size[1]:
k_size[1] = input_shape[3] + paddings[1]
if input_shape[4] > 0 and input_shape[4] + paddings[2] < k_size[2]:
k_size[2] = input_shape[4] + paddings[2]
attrs = {
'kernel_shape': k_size,
'strides': node.attr('strides'),
'pads': node.attr('paddings') + node.attr('paddings'),
}
if node.attr('ceil_mode') and graph.opset_version < 10:
raise Exception(
"Cannot convert pool with ceil_model == True to ONNX Opset version < 10"
)
elif graph.opset_version >= 10:
attrs['ceil_mode'] = node.attr('ceil_mode')
if node.attr('pooling_type') == 'avg':
attrs['count_include_pad'] = not node.attr('exclusive')
onnx_node = graph.make_node(
cls.pool_type[node.attr('pooling_type')][0],
inputs=node.input('X'),
outputs=node.output('Out'),
attrs=attrs)
@op_mapper('elu')
class ELU():
support_opset_version_range = (7, 15)
@classmethod
def opset_1(cls, graph, node, **kw):
node = graph.make_node(
'Elu',
inputs=node.input('X'),
outputs=node.output('Out'),
alpha=node.attr('alpha'))
@op_mapper('softsign')
class SoftSign():
support_opset_version_range = (7, 15)
@classmethod
def opset_1(cls, graph, node, **kw):
graph.make_node(
'Softsign', inputs=node.input('X'), outputs=node.output('Out'))
@op_mapper('hard_shrink')
class Hardshrink():
support_opset_version_range = (9, 15)
@classmethod
def opset_9(cls, graph, node, **kw):
node = graph.make_node(
'Shrink',
inputs=node.input('X'),
outputs=node.output('Out'),
lambd=node.attr('threshold'))
@op_mapper('logsigmoid')
class LogSigmoid():
support_opset_version_range = (1, 12)
@classmethod
def opset_1(cls, graph, node, **kw):
sigmoid_node = graph.make_node('Sigmoid', inputs=node.input('X'))
graph.make_node('Log', inputs=sigmoid_node, outputs=node.output('Out'))
@op_mapper('norm')
class Norm():
support_opset_version_range = (1, 12)
@classmethod
def opset_1(cls, graph, node, **kw):
node = graph.make_node(
'LpNormalization',
inputs=node.input('X'),
outputs=node.output('Out'),
axis=node.attr('axis'))
@op_mapper('softshrink')
class SoftShrink():
support_opset_version_range = (9, 15)
@classmethod
def opset_9(cls, graph, node, **kw):
graph.make_node(
'Shrink',
inputs=node.input('X'),
bias=node.attr('lambda'),
lambd=node.attr('lambda'),
outputs=node.output('Out'))
@op_mapper('tanh_shrink')
class TanhShrink():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
tanh_node = graph.make_node(
'Tanh',
inputs=node.input('X', 0), )
graph.make_node(
'Sub',
inputs=[node.input('X', 0), tanh_node],
outputs=node.output('Out'))
@op_mapper('log_softmax')
class LogSoftmax():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
axis = node.attr('axis')
shape = node.output_shape('Out', 0)
if axis is None:
axis = -1
if axis < 0:
axis += len(shape)
if axis == len(shape) - 1:
node = graph.make_node(
'LogSoftmax',
inputs=node.input('X'),
outputs=node.output('Out'),
attrs={'axis': axis})
else:
perm = [i for i in range(len(shape))]
perm[-1] = axis
perm[axis] = len(shape) - 1
transpose_node = graph.make_node(
'Transpose', inputs=node.input('X'), attrs={'perm': perm})
softmax_node = graph.make_node(
'LogSoftmax', inputs=[transpose_node], axis=-1)
transpose_node1 = graph.make_node(
'Transpose',
inputs=[softmax_node],
outputs=node.output('Out'),
attrs={'perm': perm})
@classmethod
def opset_13(cls, graph, node, **kw):
graph.make_node(
'LogSoftmax',
inputs=node.input('X'),
axis=node.attr('axis'),
outputs=node.output('Out'))
@op_mapper('layer_norm')
class LayerNorm():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
ipt = node.input('X', 0)
ipt_dims = len(node.input_shape('X', 0))
normalized_shape = node.attr('begin_norm_axis')
axes = None
if isinstance(normalized_shape, collections.Iterable):
axes = [-i for i in range(len(normalized_shape), 0, -1)]
else:
axes = [i for i in range(normalized_shape, ipt_dims)]
dtype = node.block.vars[node.input('X', 0)].dtype
dtype = dtypes.DTYPE_PADDLE_ONNX_MAP[dtype]
epsilon = graph.make_node(
'Constant', dtype=dtype, value=node.attr('epsilon'))
two = graph.make_node('Constant', dtype=dtype, value=2.0)
mean = graph.make_node("ReduceMean", inputs=[ipt], axes=axes)
numerator = graph.make_node("Sub", inputs=[ipt, mean])
pow_num = graph.make_node("Pow", inputs=[numerator, two])
variance = graph.make_node("ReduceMean", inputs=[pow_num], axes=axes)
add_eps = graph.make_node("Add", inputs=[variance, epsilon])
denominator = graph.make_node("Sqrt", inputs=[add_eps])
ipt_shape = graph.make_node("Shape", inputs=[ipt])
weight_shape = mapper_helper.slice_helper(
graph, ipt_shape, [0], [ipt_dims - len(axes)], [ipt_dims])
if 'Bias' in node.inputs and 'Scale' in node.inputs and len(
node.input('Scale')) > 0 and len(node.input('Bias')) > 0:
if normalized_shape == ipt_dims - 1:
shape_const = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[-1])
scale = graph.make_node(
"Reshape", inputs=[node.input('Scale', 0), shape_const])
bias = graph.make_node(
"Reshape", inputs=[node.input('Bias', 0), shape_const])
else:
scale = graph.make_node(
"Reshape", inputs=[node.input('Scale', 0), weight_shape])
bias = graph.make_node(
"Reshape", inputs=[node.input('Bias', 0), weight_shape])
layer_norm = graph.make_node("Div", inputs=[numerator, denominator])
layer_norm = graph.make_node("Mul", inputs=[layer_norm, scale])
graph.make_node(
"Add", inputs=[layer_norm, bias], outputs=node.output('Y'))
elif 'Bias' in node.inputs and len(node.input('Bias')) > 0:
if normalized_shape == ipt_dims - 1:
shape_const = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[-1])
bias = graph.make_node(
"Reshape", inputs=[node.input('Bias', 0), shape_const])
else:
bias = graph.make_node(
"Reshape", inputs=[node.input('Bias', 0), weight_shape])
layer_norm = graph.make_node("Div", inputs=[numerator, denominator])
graph.make_node(
"Add", inputs=[layer_norm, bias], outputs=node.output('Y'))
elif 'Scale' in node.inputs and len(node.input('Scale')) > 0:
if normalized_shape == ipt_dims - 1:
shape_const = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[-1])
scale = graph.make_node(
"Reshape", inputs=[node.input('Scale', 0), shape_const])
else:
scale = graph.make_node(
"Reshape", inputs=[node.input('Scale', 0), weight_shape])
layer_norm = graph.make_node("Div", inputs=[numerator, denominator])
graph.make_node(
"Mul", inputs=[layer_norm, scale], outputs=node.output('Y'))
else:
layer_norm = graph.make_node(
"Div",
inputs=[numerator, denominator],
outputs=node.output('Y'))
@op_mapper('batch_norm')
class BatchNorm():
support_opset_version_range = (7, 15)
@classmethod
def make_attrs_and_inputs(cls, graph, node, **kw):
onnx_attr = {
'epsilon': node.attr('epsilon'),
'momentum': node.attr('momentum')
}
inputs = node.input('X') + node.input('Scale') + node.input(
'Bias') + node.input('Mean') + node.input('Variance')
return onnx_attr, inputs
@classmethod
def opset_9(cls, graph, node, **kw):
onnx_attr, inputs = cls.make_attrs_and_inputs(graph, node, **kw)
onnx_node = graph.make_node(
'BatchNormalization',
inputs=inputs,
outputs=node.output('Y'),
**onnx_attr)
@classmethod
def opset_7(cls, graph, node, **kw):
onnx_attr, inputs = cls.make_attrs_and_inputs(graph, node, **kw)
onnx_attr['spatial'] = 1
onnx_node = graph.make_node(
'BatchNormalization',
inputs=inputs,
outputs=node.output('Y'),
**onnx_attr)
@op_mapper('group_norm')
class GroupNorm():
support_opset_version_range = (6, 15)
@classmethod
def opset_6(cls, graph, node, **kw):
num_groups = node.attr('groups')
epsilon = node.attr('epsilon')
ipt = node.input('X')[0]
ipt_shape = node.input_shape('X', 0)
assert len(
ipt_shape) == 4, "Only support 4D-Tensor as input for GroupNorm"
dtype = node.block.vars[node.input('X', 0)].dtype
dtype = dtypes.DTYPE_PADDLE_ONNX_MAP[dtype]
shape = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[0, num_groups, -1])
reshape_input = graph.make_node('Reshape', inputs=[ipt, shape])
scale_ = graph.make_node(
'Constant', dtype=dtype, value=[1.0] * num_groups)
bias_ = graph.make_node(
'Constant', dtype=dtype, value=[0.0] * num_groups)
reshaped_output = graph.make_node(
'InstanceNormalization',
inputs=[reshape_input, scale_, bias_],
epsilon=epsilon)
origin_shape = graph.make_node('Shape', inputs=[ipt])
if len(node.input('Scale')) > 0 and len(node.input('Bias')) > 0:
output = graph.make_node(
'Reshape', inputs=[reshaped_output, origin_shape])
unsqueezed_scale = mapper_helper.unsqueeze_helper(
graph, node.input('Scale', 0), [1, 2])
unsqueezed_bias = mapper_helper.unsqueeze_helper(
graph, node.input('Bias', 0), [1, 2])
part0 = graph.make_node('Mul', inputs=[output, unsqueezed_scale])
graph.make_node(
'Add',
inputs=[part0, unsqueezed_bias],
outputs=node.output('Y'))
else:
output = graph.make_node(
'Reshape',
inputs=[reshaped_output, origin_shape],
outputs=node.output('Y'))
@op_mapper('instance_norm')
class InstanceNorm():
support_opset_version_range = (6, 15)
@classmethod
def opset_6(cls, graph, node, **kw):
onnx_attr = {'epsilon': node.attr('epsilon'), }
num_groups = node.block.vars[node.input('X')[0]].shape[1]
dtype = node.block.vars[node.input('X', 0)].dtype
dtype = dtypes.DTYPE_PADDLE_ONNX_MAP[dtype]
if len(node.input('Scale')) == 0:
scale_ = graph.make_node(
'Constant', dtype=dtype, value=[1.0] * num_groups)
else:
scale_ = node.input('Scale')[0]
if len(node.input('Bias')) == 0:
bias_ = graph.make_node(
'Constant', dtype=dtype, value=[0.0] * num_groups)
else:
bias_ = node.input('Bias')[0]
inputs = node.input('X') + [scale_] + [bias_]
onnx_node = graph.make_node(
'InstanceNormalization',
inputs=inputs,
outputs=node.output('Y'),
**onnx_attr)
@op_mapper('dropout')
class Dropout():
support_opset_version_range = (7, 15)
@classmethod
def opset_7(cls, graph, node, **kw):
dropout_mode = node.attr('dropout_implementation')
dropout_prob = node.attr('dropout_prob')
if dropout_mode == 'upscale_in_train':
onnx_node = graph.make_node(
'Identity', inputs=node.input('X'), outputs=node.output('Out'))
elif dropout_mode == 'downgrade_in_infer':
scale_node = graph.make_node(
'Constant',
attrs={'dtype': dtypes.ONNX.FLOAT,
'value': 1 - dropout_prob})
graph.make_node(
"Mul",
inputs=[node.input('X')[0], scale_node],
outputs=node.output('Out'))
else:
raise Exception("Unexpected situation happend")
@op_mapper('roi_align')
class RoiAlign():
support_opset_version_range = (10, 16)
@classmethod
def opset_10(cls, graph, node, **kw):
if node.attr('aligned') and graph.opset_version < 16:
raise Exception(
'when aligned is true, onnx opset should be (onnx_opset>= 16)')
rois_shape = graph.make_node('Shape', inputs=[node.input('ROIs', 0)])
starts = graph.make_node(
'Constant', attrs={'dtype': dtypes.ONNX.INT64,
'value': [0]})
ends = graph.make_node(
'Constant', attrs={'dtype': dtypes.ONNX.INT64,
'value': [1]})
num_rois = graph.make_node('Slice', inputs=[rois_shape, starts, ends])
zero = graph.make_node(
'Constant', dims=[1], dtype=dtypes.ONNX.INT64, value=[0])
batch_indices = graph.make_node('Expand', inputs=[zero, num_rois])
node = graph.make_node(
'RoiAlign',
inputs=[node.input('X', 0), node.input('ROIs', 0), batch_indices],
outputs=node.output('Out'),
mode='avg',
output_height=node.attr('pooled_height'),
output_width=node.attr('pooled_width'),
sampling_ratio=node.attr('sampling_ratio'),
spatial_scale=node.attr('spatial_scale'))
@op_mapper('rnn')
class RNN():
support_opset_version_range = (7, 15)
@classmethod
def make_param_inputs(cls, graph, node, layer, hidden_size, num_layers):
# weight assign order:
# (F_whi F_whh B_whi B_whh* layer_num + (F_bias_hi F_bias_hh B_bias_hi B_bias_hi)* layer_num
def reform_weights(g, w, n, intervals):
slices = [
mapper_helper.slice_helper(
g, w, axes=[1], starts=[x * n], ends=[y * n])
for x, y in intervals
]
return g.make_node('Concat', slices, axis=1)
def transform_weight_with_bias(g, weights, n, intervals):
return [reform_weights(g, w, n, intervals) for w in weights]
if node.attr('mode') == 'LSTM':
reform_permutation = [(0, 1), (3, 4), (1, 3)]
elif node.attr('mode') == 'GRU':
reform_permutation = [(1, 2), (0, 1), (2, 3)]
bidirect_len = 4 if node.attr('is_bidirec') else 2
all_layer_param_len = len(node.input('WeightList'))
weight_list = node.input('WeightList')[:all_layer_param_len // 2]
bias_list = node.input('WeightList')[all_layer_param_len // 2:]
single_layer_param_len = all_layer_param_len // num_layers
unsqueeze_weights = []
layer_weight_list = weight_list[layer * bidirect_len:layer *
bidirect_len + bidirect_len]
layer_bias_list = bias_list[layer * bidirect_len:layer * bidirect_len +
bidirect_len]
param_list = layer_weight_list + layer_bias_list
param_list_len = len(param_list)
for i in range(param_list_len):
weight = mapper_helper.unsqueeze_helper(graph, param_list[i], [0])
unsqueeze_weights.append(weight)
input_weights = unsqueeze_weights[0:param_list_len // 2:2]
hidden_weights = unsqueeze_weights[1:param_list_len // 2:2]
input_weight = graph.make_node('Concat', inputs=input_weights, axis=0)
hidden_weight = graph.make_node('Concat', inputs=hidden_weights, axis=0)
input_bias = unsqueeze_weights[param_list_len // 2:param_list_len:2]
hidden_bias = unsqueeze_weights[param_list_len // 2 + 1:param_list_len:
2]
input_bias = graph.make_node('Concat', inputs=input_bias, axis=0)
hidden_bias = graph.make_node('Concat', inputs=hidden_bias, axis=0)
input_weight, hidden_weight, input_bias, hidden_bias = transform_weight_with_bias(
graph, [input_weight, hidden_weight, input_bias, hidden_bias],
hidden_size, reform_permutation)
bias = graph.make_node(
'Concat', inputs=[input_bias, hidden_bias], axis=1)
return [input_weight, hidden_weight, bias, '']
@classmethod
def make_init_param_inputs(cls, graph, node, layer):
if node.attr('mode') == 'LSTM':
all_init_h, all_init_c = node.input('PreState')
bidirect_len = 2 if node.attr('is_bidirec') else 1
init_h = mapper_helper.slice_helper(
graph, all_init_h, [0], [layer * bidirect_len],
[layer * bidirect_len + bidirect_len])
init_c = mapper_helper.slice_helper(
graph, all_init_c, [0], [layer * bidirect_len],
[layer * bidirect_len + bidirect_len])
return [init_h, init_c]
elif node.attr('mode') == 'GRU':
all_init_h = node.input('PreState', 0)
bidirect_len = 2 if node.attr('is_bidirec') else 1
init_h = mapper_helper.slice_helper(
graph, all_init_h, [0], [layer * bidirect_len],
[layer * bidirect_len + bidirect_len])
return [init_h]
@classmethod
def opset_7(cls, graph, node, **kw):
mode = node.attr('mode')
hidden_size = node.attr('hidden_size')
num_layers = node.attr('num_layers')
prev_output = node.input('Input', 0)
if node.attr('mode') == 'LSTM':
for layer in range(num_layers):
param_inputs = cls.make_param_inputs(graph, node, layer,
hidden_size, num_layers)
init_param_inputs = cls.make_init_param_inputs(graph, node,
layer)
if layer + 1 < num_layers:
rnn_outputs = 3
output_y = None
else:
rnn_outputs = [1] + node.output('State')
output_y = node.output('Out')
prev_output, h_out, c_out = graph.make_node(
node.attr('mode'),
inputs=[prev_output] + param_inputs + init_param_inputs,
outputs=rnn_outputs,
direction='bidirectional'
if node.attr('is_bidirec') else 'forward',
hidden_size=node.attr('hidden_size'))
prev_output = graph.make_node(
'Transpose', inputs=[prev_output], perm=[0, 2, 1, 3])
prev_shape = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[0, 0, -1])
prev_output = graph.make_node(
'Reshape',
inputs=[prev_output, prev_shape],
outputs=output_y)
elif node.attr('mode') == 'GRU':
for layer in range(num_layers):
param_inputs = cls.make_param_inputs(graph, node, layer,
hidden_size, num_layers)
init_param_inputs = cls.make_init_param_inputs(graph, node,
layer)
if layer + 1 < num_layers:
rnn_outputs = 2
output_y = None
else:
rnn_outputs = [1] + node.output('State')
output_y = node.output('Out')
attrs = {
'direction': 'bidirectional'
if node.attr('is_bidirec') else 'forward',
'hidden_size': node.attr('hidden_size'),
'linear_before_reset': 1,
}
prev_output, h_out = graph.make_node(
node.attr('mode'),
inputs=[prev_output] + param_inputs + init_param_inputs,
outputs=rnn_outputs,
attrs=attrs)
prev_output = graph.make_node(
'Transpose', inputs=[prev_output], perm=[0, 2, 1, 3])
prev_shape = graph.make_node(
'Constant', dtype=dtypes.ONNX.INT64, value=[0, 0, -1])
prev_output = graph.make_node(
'Reshape',
inputs=[prev_output, prev_shape],
outputs=output_y)
@op_mapper('thresholded_relu')
class ThresholdedRelu():
support_opset_version_range = (10, 15)
@classmethod
def opset_10(cls, graph, node, **kw):
x_dtype = node.input_dtype('X', 0)
if x_dtype != paddle.float32:
x = graph.make_node(
'Cast', inputs=node.input('X'), to=dtypes.ONNX.FLOAT)
threshholdedrelu_node = graph.make_node(
'ThresholdedRelu', inputs=[x], alpha=node.attr('threshold'))
graph.make_node(
'Cast',
inputs=[threshholdedrelu_node],
outputs=node.output('Out'),
to=dtypes.DTYPE_PADDLE_ONNX_MAP[x_dtype])
else:
graph.make_node(
'ThresholdedRelu',
inputs=node.input('X'),
alpha=node.attr('threshold'),
outputs=node.output('Out'))

View File

@@ -0,0 +1,305 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import inspect
import six
import numpy as np
import paddle
from paddle import fluid
from paddle.fluid import layers
from paddle2onnx.legacy.graph import graph_helper, PaddleGraph
from paddle2onnx.utils import logging
from paddle2onnx.legacy.constant.op_mapping_status import *
REGISTER_CUSTOM_PADDLE_OP = {}
def get_max_support_version(versions, opset_version):
max_version = -1
for vs in sorted(versions):
if vs <= opset_version:
max_version = vs
return max_version
def register_op_mapper(paddle_op, mapper_obj):
paddle_op_list = []
if isinstance(paddle_op, six.string_types):
paddle_op_list.append(paddle_op)
elif isinstance(paddle_op, list):
paddle_op_list = paddle_op
else:
raise ValueError('paddle_op must be List or string, but got type {}.'.
format(type(paddle_op)))
if not isinstance(mapper_obj, six.class_types):
raise ValueError('mapper_obj must be Class, but got type {}.'.format(
type(mapper_obj)))
valid_register_func = 0
for k, v in inspect.getmembers(mapper_obj, inspect.ismethod):
if k.startswith("opset_"):
version = int(k.replace("opset_", ""))
if version > 13 or version < 1:
raise Exception(
'the specific method of operator mapper must be named opset_[number](1<=number<=13), such as opset_9, but got {}.'.
format(k))
valid_register_func += 1
if valid_register_func == 0:
raise Exception(
'the specific method of operator mapper must be classmethod, which named opset_[number](1<=number<=13), such as opset_9, but none achieved.'
)
mapper = OpMapper(paddle_op_list)
mapper(mapper_obj)
class OpMapper(object):
OPSETS = {}
REGISTER_CUSTOM_PADDLE_OP = {}
def __init__(self, paddle_op, **kwargs):
if not isinstance(paddle_op, list):
paddle_op = [paddle_op]
self.paddle_op = paddle_op
self.kwargs = kwargs
def __call__(self, cls):
for k, v in inspect.getmembers(cls, inspect.ismethod):
if k.startswith("opset_"):
version = int(k.replace("opset_", ""))
for op in self.paddle_op:
if op not in OpMapper.OPSETS:
OpMapper.OPSETS[op] = {}
opset_dict = OpMapper.OPSETS[op]
opset_dict[version] = (v, self.kwargs)
@staticmethod
def mapping(graph, node, operator_export_type="ONNX"):
try:
if node.type in OpMapper.REGISTER_CUSTOM_PADDLE_OP:
if operator_export_type in ["PaddleFallback"]:
opsets = OpMapper.OPSETS[node.type]
versions = list(opsets.keys())
convert_version = get_max_support_version(
versions, graph.opset_version)
mapper_func, kw = opsets[convert_version]
mapper_func(graph, node, **kw)
else:
custom_paddle_op = OpMapper.REGISTER_CUSTOM_PADDLE_OP[
node.type](node)
custom_paddle_graph, output_results = custom_paddle_op.get_paddle_graph(
)
OpMapper.check_support_status(custom_paddle_graph.node_map,
graph.opset_version)
graph.build_op_nodes(custom_paddle_graph.node_map)
node_output_results = dict()
for k in node.output_names:
custom_outs = output_results[k]
node_outs = node.output(k)
assert len(custom_outs) == len(
node_outs
), "Length of custom implementation operator's outputs is not same with the length of original operator's outputs."
for i in range(len(custom_outs)):
graph.make_node(
"Identity",
inputs=[custom_outs[i]],
outputs=[node_outs[i]])
else:
opsets = OpMapper.OPSETS[node.type]
versions = list(opsets.keys())
convert_version = get_max_support_version(versions,
graph.opset_version)
mapper_func, kw = opsets[convert_version]
mapper_func(graph, node, **kw)
except Exception as e:
raise Exception(
"Error happened when mapping node ['{}'] to onnx, which op_type is '{}' with inputs: {} and outputs: {}, specific error: ".
format(node.layer_name, node.type, node.inputs,
node.outputs) + str(e))
@staticmethod
def get_recommend_opset_version(node_map, opset_version):
recommend_opset_version = OpMapper.check_support_status(
node_map, opset_version, True)
for name, node in list(node_map.items()):
if node.type in OpMapper.REGISTER_CUSTOM_PADDLE_OP: #如果是custom的op获取custom的推荐op
custom_paddle_op = OpMapper.REGISTER_CUSTOM_PADDLE_OP[
node.type](node)
custom_paddle_graph, output_results = custom_paddle_op.get_paddle_graph(
)
custom_recommend_opset_version = OpMapper.check_support_status(
custom_paddle_graph.node_map, opset_version, True)
recommend_opset_version = max(recommend_opset_version,
custom_recommend_opset_version)
if opset_version != recommend_opset_version:
warning_info = "\n======================\n"
warning_info += "\nFor a successful conversion, set the recommended opset version : {}\n".format(
recommend_opset_version)
warning_info += "\n======================\n"
logging.warning(warning_info)
return recommend_opset_version
@staticmethod
def check_support_status(node_map, opset_version, for_check=False):
op_mapping_status = {
OP_MAPPING_NO_REGISTER: [],
OP_MAPPING_NO_VERSION: [],
}
for name, node in list(node_map.items()):
if node.type in OpMapper.REGISTER_CUSTOM_PADDLE_OP:
continue
if node.type not in OpMapper.OPSETS:
op_mapping_status[OP_MAPPING_NO_REGISTER].append(node)
else:
opsets = OpMapper.OPSETS[node.type]
versions = list(opsets.keys())
convert_version = get_max_support_version(versions,
opset_version)
if convert_version == -1:
op_mapping_status[OP_MAPPING_NO_VERSION].append(node)
if len(op_mapping_status[OP_MAPPING_NO_REGISTER]) > 0:
unsupported_op_types = set([
node.type for node in op_mapping_status[OP_MAPPING_NO_REGISTER]
])
error_info = "\nThere's {} ops are not supported yet\n".format(
len(unsupported_op_types))
for op_type in unsupported_op_types:
error_info += "=========== {} ===========\n".format(op_type)
raise NotImplementedError(error_info)
if len(op_mapping_status[OP_MAPPING_NO_VERSION]) > 0:
unsupported_op_types = set([
node.type for node in op_mapping_status[OP_MAPPING_NO_VERSION]
])
recommend_opset_version = -1
for op_type in unsupported_op_types:
opsets = OpMapper.OPSETS[op_type]
if min(opsets.keys()) > recommend_opset_version:
recommend_opset_version = min(opsets.keys())
warning_info = "\nThere are {} ops that are not supported in opset version {}, please set opset version >= {}.\n".format(
len(unsupported_op_types), opset_version,
recommend_opset_version)
for op_type in unsupported_op_types:
warning_info += "=========== {} ===========\n".format(op_type)
if for_check:
logging.warning(warning_info)
return recommend_opset_version
raise NotImplementedError(warning_info)
return opset_version
class CustomPaddleOp(object):
CREATE_TIMES = {}
def __init__(self, node):
self.main_program = paddle.static.Program()
self.startup_program = paddle.static.Program()
self.inputs = self.create_place_holder(node)
self.node = node
def generate_scope_name(self, node):
if node.type in CustomPaddleOp.CREATE_TIMES:
CustomPaddleOp.CREATE_TIMES[node.type] += 1
else:
CustomPaddleOp.CREATE_TIMES[node.type] = 1
scope_prefix = node.type + str(CustomPaddleOp.CREATE_TIMES[node.type] -
1) + '_'
return scope_prefix
def create_place_holder(self, node):
place_holders = {}
with paddle.static.program_guard(self.main_program,
self.startup_program):
for arg_name, idxs in node.inputs.items():
place_holders[arg_name] = []
for idx in range(len(idxs)):
shape = node.input_shape(arg_name, idx)
dtype = node.input_dtype(arg_name, idx)
name = node.input(arg_name, idx)
data = paddle.static.data(
name=name, shape=shape, dtype=dtype)
place_holders[arg_name].append(data)
return place_holders
def input(self, name, idx=None):
if name not in self.inputs:
return None
if idx is None:
return self.inputs[name]
if len(self.inputs[name]) <= idx:
return None
return self.inputs[name][idx]
def get_paddle_graph(self):
scope_prefix = self.generate_scope_name(self.node)
scope = paddle.static.Scope()
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(self.main_program,
self.startup_program):
with paddle.utils.unique_name.guard(scope_prefix):
res = self.forward()
feed_var_names = [
var.name for vars in self.inputs.values()
for var in vars
]
fetch_vars = [var for vars in res.values() for var in vars]
inference_program = graph_helper.get_program(
self.main_program, feed_var_names, fetch_vars)
paddle_graph = PaddleGraph.build_from_program(
inference_program,
feed_var_names,
fetch_vars,
scope=scope)
output_results = dict()
for arg_name, outs in res.items():
output_results[arg_name] = [out.name for out in outs]
return paddle_graph, output_results
def register_custom_paddle_op(paddle_op, custom_op):
paddle_op_list = []
if isinstance(paddle_op, six.string_types):
paddle_op_list.append(paddle_op)
elif isinstance(paddle_op, list):
paddle_op_list = paddle_op
else:
raise ValueError("paddle_op' must be List or string, but got type {}.".
format(type(paddle_op)))
if not isinstance(custom_op, six.class_types):
raise ValueError("'custom_op' must be Class, but got type {}.".format(
type(custom_op)))
forward = getattr(custom_op, "forward", None)
if not callable(forward):
raise Exception(
"Custom paddle operators must be implemented in function named 'forward'."
)
for op in paddle_op_list:
if op not in OpMapper.REGISTER_CUSTOM_PADDLE_OP:
OpMapper.REGISTER_CUSTOM_PADDLE_OP[op] = custom_op

View File

@@ -0,0 +1,233 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
from paddle2onnx.legacy.constant import dtypes
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
@op_mapper('where_index')
class WhereIndex():
support_opset_version_range = (9, 15)
@classmethod
def opset_9(cls, graph, node, **kw):
nonzero_node = graph.make_node(
'NonZero', inputs=node.input('Condition'))
graph.make_node(
'Transpose',
inputs=[nonzero_node],
outputs=node.output('Out'),
perm=[1, 0])
@op_mapper('top_k_v2')
class TopKV2():
support_opset_version_range = (11, 15)
@classmethod
def opset_11(cls, graph, node, **kw):
sorted = node.attr('sorted')
# for paddle, In gpu device, it always return the sorted value
# if not sorted:
# sorted = True
if 'K' in node.inputs and len(node.input('K')) > 0:
k_node = node.input('K', 0)
k_node_dtype = node.input_dtype('K', 0)
if dtypes.DTYPE_PADDLE_STR_MAP[k_node_dtype] != 'int64':
k_node = graph.make_node(
'Cast', inputs=[k_node], to=dtypes.ONNX.INT64)
graph.make_node(
'TopK',
inputs=[node.input('X', 0), k_node],
outputs=[node.output('Out', 0), node.output('Indices', 0)],
largest=node.attr('largest'),
sorted=sorted,
axis=node.attr('axis'))
else:
k = node.attr('k')
k_node = graph.make_node(
'Constant', attrs={'dtype': dtypes.ONNX.INT64,
'value': [k]})
graph.make_node(
'TopK',
inputs=[node.input('X', 0), k_node],
outputs=[node.output('Out', 0), node.output('Indices', 0)],
largest=node.attr('largest'),
sorted=sorted,
axis=node.attr('axis'))
@op_mapper('top_k')
class TopK():
support_opset_version_range = (11, 15)
@classmethod
def opset_11(cls, graph, node, **kw):
if 'K' in node.inputs and len(node.input('K')) > 0:
k_node = node.input('K', 0)
k_node_dtype = node.input_dtype('K', 0)
if dtypes.DTYPE_PADDLE_STR_MAP[k_node_dtype] != 'int64':
k_node = graph.make_node(
'Cast', inputs=[k_node], to=dtypes.ONNX.INT64)
graph.make_node(
'TopK',
inputs=[node.input('X', 0), k_node],
outputs=[node.output('Out', 0), node.output('Indices', 0)])
else:
k = node.attr('k')
k_node = graph.make_node(
'Constant', attrs={'dtype': dtypes.ONNX.INT64,
'value': [k]})
graph.make_node(
'TopK',
inputs=[node.input('X', 0), k_node],
outputs=[node.output('Out', 0), node.output('Indices', 0)])
@op_mapper('argsort')
class ArgSort():
support_opset_version_range = (6, 15)
@classmethod
def opset_10(cls, graph, node, **kw):
shape = graph.make_node('Shape', inputs=node.input('X', 0))
from paddle2onnx.legacy.op_mapper import mapper_helper
axis = node.attr('axis')
if axis < 0:
axis = axis + len(node.input_shape('X', 0))
dim_size = mapper_helper.slice_helper(
graph, shape, axes=[0], starts=[axis], ends=[axis + 1])
if graph.opset_version > 10:
if not node.attr('descending'):
graph.make_node(
'TopK',
inputs=[node.input('X', 0), dim_size],
outputs=[node.output('Out', 0), node.output('Indices', 0)],
axis=node.attr('axis'),
largest=0)
else:
graph.make_node(
'TopK',
inputs=[node.input('X', 0), dim_size],
outputs=[node.output('Out', 0), node.output('Indices', 0)],
axis=node.attr('axis'),
largest=1)
else:
if not node.attr('descending'):
raise Exception(
"descending=False only support opset version>=11.")
else:
graph.make_node(
'TopK',
inputs=[node.input('X', 0), dim_size],
outputs=[node.output('Out', 0), node.output('Indices', 0)],
axis=node.attr('axis'))
@classmethod
def opset_6(cls, graph, node, **kw):
shape = node.input_shape('X', 0)
k = shape[node.attr('axis')]
assert k > 0, "while input shape is dynamic, it only support opset version>=10."
input_dtype = node.input_dtype('X', 0)
dtype = dtypes.DTYPE_PADDLE_STR_MAP[input_dtype]
inputs = node.input('X', 0)
if dtype in ["int32", "int64"]:
inputs = graph.make_node(
'Cast', inputs=inputs, to=dtypes.ONNX.FLOAT)
if not node.attr('descending'):
raise Exception("descending=False only support opset version>=11.")
else:
output_node = node.output('Out', 0)
graph.make_node(
'TopK',
inputs=[inputs],
outputs=[output_node, node.output('Indices', 0)],
axis=node.attr('axis'),
k=k)
if dtype in ["int32", "int64"]:
graph.make_node(
'Cast',
inputs=[output_node],
to=dtypes.DTYPE_PADDLE_ONNX_MAP[input_dtype],
outputs=[output_node])
@op_mapper('index_select')
class IndexSelect():
support_opset_version_range = (1, 15)
@classmethod
def opset_1(cls, graph, node, **kw):
graph.make_node(
'Gather',
inputs=[node.input('X', 0), node.input('Index', 0)],
axis=node.attr('dim'),
outputs=node.output('Out'))
@op_mapper('unique')
class Unique():
support_opset_version_range = (11, 15)
@classmethod
def opset_11(cls, graph, node, **kw):
if node.attr('axis') == []:
graph.make_node(
'Unique',
inputs=node.input('X'),
outputs=[
node.output('Out', 0), node.output('Indices', 0),
node.output('Index', 0), node.output('Counts', 0)
])
else:
graph.make_node(
'Unique',
inputs=node.input('X'),
axis=node.attr('axis')[0],
outputs=[
node.output('Out', 0), node.output('Indices', 0),
node.output('Index', 0), node.output('Counts', 0)
])
@op_mapper('where')
class Where():
support_opset_version_range = (9, 15)
@classmethod
def opset_9(cls, graph, node, **kw):
graph.make_node(
'Where',
inputs=[
node.input('Condition', 0), node.input('X', 0),
node.input('Y', 0)
],
outputs=node.output('Out'))
@op_mapper('masked_select')
class MaskSelect():
support_opset_version_range = (11, 15)
@classmethod
def opset_11(cls, graph, node, **kw):
index = graph.make_node('NonZero', inputs=node.input('Mask', 0))
index = graph.make_node('Transpose', inputs=[index], perm=[1, 0])
graph.make_node(
'GatherND',
inputs=[node.input('X', 0), index],
outputs=node.output('Y'))

View File

@@ -0,0 +1,13 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,78 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
from paddle2onnx.legacy.constant import dtypes
from paddle2onnx.utils import logging
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
@op_mapper('im2sequence')
class Im2Sequence():
support_opset_verison_range = (1, 12)
@classmethod
def opset_1(cls, graph, node, **kw):
n, c, h, w = node.input_shape('X', 0)
assert h > 0 and w > 0, "Only supported fixed input shape for im2sequence operator."
stride_h, stride_w = node.attr('strides')
paddings = node.attr('paddings')
assert node.attr(
'out_stride'
) != 1, "Only out_stride==1 is supported for im2sequence operator."
h = h + paddings[0] + paddings[1]
w = w + paddings[1] + paddings[2]
kernel_h, kernel_w = node.attr('kernels')
out_h = 1 + (h - kernel_h + stride_h - 1) // stride_h
out_w = 1 + (w - kernel_w + stride_w - 1) // stride_w
h_steps = list()
for i in range(out_h):
h_steps.append([i * stride_h, i * stride_h + kernel_h])
w_steps = list()
for i in range(out_w):
w_steps.append([i * stride_w, i * stride_w + kernel_w])
slice_node_blocks = list()
for i in range(out_h):
for j in range(out_w):
starts_node = graph.make_node(
'Constant',
dtype=dtypes.ONNX.INT64,
dims=[4],
value=[0, 0, h_steps[i][0], w_steps[j][0]])
ends_node = graph.make_node(
'Constant',
dtype=dtypes.ONNX.INT64,
dims=[4],
value=[999999, 999999, h_steps[i][1], w_steps[j][1]])
nodes.extend([starts_node, ends_node])
slice_block_node = graph.make_node(
'Slice',
inputs=[node.input('X', 0), starts_node, ends_node])
flatten_block_node = graph.make_node(
"Flatten", inputs=[slice_block_node], axis=0)
nodes.extend([slice_block_node, flatten_block_node])
concat_block_node = graph.make_node(
"Concat",
inputs=slice_node_blocks,
outputs=node.output('Out'),
axis=0)
logging.info("==========Importance Notice===========")
logging.info(
"Since im2sequence operator is used in your paddlepaddle model, the translated onnx model only support input data with batch_size=1."
)
logging.info("======================================")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .pass_manager import PassManager
from .inplace_node_pass import InplaceNodePass
from .dumplicate_names_pass import DumplicateNamesPass

View File

@@ -0,0 +1,91 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle2onnx.legacy.passes import PassManager
from paddle2onnx.utils import logging
@PassManager('dumplicate_names_pass')
class DumplicateNamesPass(object):
name_count = dict()
@classmethod
def generate_new_name(cls, name):
for saved_name in cls.name_count:
if name.startswith(saved_name):
cls.name_count[saved_name] += 1
new_name = saved_name + '.' + str(cls.name_count[saved_name])
return new_name
cls.name_count[name] = 1
new_name = name + '.' + str(cls.name_count[name])
return new_name
@classmethod
def run_pass(cls, onnx_graph):
renamer = {}
tensor_names = set()
for name, node in onnx_graph.parameters.items():
output = node.output
for opt in output:
assert opt not in tensor_names, "There's dumplicate names in parameters."
tensor_names.add(opt)
for ipt in onnx_graph.input_nodes:
assert ipt.name not in tensor_names, "There's dumplicate names in exported parameters and inputs."
tensor_names.add(ipt.name)
for name, node in onnx_graph.node_map.items():
inputs = node.inputs
outputs = node.outputs
update_node = False
for idx in range(len(inputs)):
ipt = inputs[idx]
if ipt not in renamer:
continue
updated_name = renamer[ipt]
while updated_name in renamer:
updated_name = renamer[updated_name]
inputs[idx] = updated_name
update_node = True
for idx in range(len(outputs)):
opt = outputs[idx]
if opt not in tensor_names:
tensor_names.add(opt)
continue
renamed_tensor_name = opt
while renamed_tensor_name in renamer:
renamed_tensor_name = renamer[renamed_tensor_name]
new_name = cls.generate_new_name(renamed_tensor_name)
logging.warning("[Renamer Pass] Will rename {}, to {}".format(
renamed_tensor_name, new_name))
outputs[idx] = new_name
update_node = True
renamer[renamed_tensor_name] = new_name
if update_node:
node.set_inputs(inputs)
node.set_outputs(outputs)
onnx_graph.update_node(node)
for opt in onnx_graph.output_nodes:
if opt.name not in renamer:
continue
updated_name = renamer[opt.name]
while updated_name in renamer:
updated_name = renamer[updated_name]
opt.name = updated_name
return onnx_graph

View File

@@ -0,0 +1,62 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle2onnx.legacy.passes import PassManager
def get_repeated_output(inputs, outputs):
repeated_output = {}
for idx in range(len(outputs)):
opt = outputs[idx]
if opt in inputs:
repeated_output[opt] = idx
return repeated_output
@PassManager('inplace_node_pass')
class InplaceNodePass(object):
name_count = dict()
@classmethod
def generate_new_name(cls, name):
if name in cls.name_count:
cls.name_count[name] += 1
else:
cls.name_count[name] = 1
new_name = name + '.' + str(cls.name_count[name])
return new_name
@classmethod
def run_pass(cls, onnx_graph):
node_map = list(onnx_graph.node_map.items())
name_mapping = {}
for idx in range(len(node_map)):
name, node = node_map[idx]
inputs = node.inputs
outputs = node.outputs
for idx in range(len(inputs)):
ipt = inputs[idx]
if ipt in name_mapping:
inputs[idx] = name_mapping[ipt]
repeated_output = get_repeated_output(inputs, outputs)
if len(repeated_output) != 0:
for opt, idx in repeated_output.items():
name_mapping[opt] = cls.generate_new_name(opt)
outputs[idx] = name_mapping[opt]
node.set_inputs(inputs)
node.set_outputs(outputs)
onnx_graph.update_node(node)
return onnx_graph

View File

@@ -0,0 +1,39 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
class PassManager(object):
PASSES = {}
def __init__(self, name, **kwargs):
self.name = name
self.kwargs = kwargs
def __call__(self, cls):
for k, v in inspect.getmembers(cls, inspect.ismethod):
if k == 'run_pass':
self.PASSES[self.name] = (v, self.kwargs)
@staticmethod
def run_pass(graph, custom_pass_list):
for pass_name in custom_pass_list:
try:
pass_func, kw = PassManager.PASSES[pass_name]
pass_func(graph, **kw)
except:
raise Exception("Error happened when excute pass: {}".format(
pass_name))
return graph

View File

@@ -0,0 +1,453 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/activation.h"
namespace paddle2onnx {
REGISTER_MAPPER(relu, ActivationMapper)
REGISTER_MAPPER(relu6, Relu6Mapper)
REGISTER_MAPPER(tanh, ActivationMapper)
REGISTER_MAPPER(log, ActivationMapper)
REGISTER_MAPPER(sigmoid, ActivationMapper)
REGISTER_MAPPER(sqrt, ActivationMapper)
REGISTER_MAPPER(softplus, ActivationMapper)
REGISTER_MAPPER(exp, ActivationMapper)
REGISTER_MAPPER(floor, ActivationMapper)
REGISTER_MAPPER(cos, ActivationMapper)
REGISTER_MAPPER(sin, ActivationMapper)
REGISTER_MAPPER(round, ActivationMapper)
REGISTER_MAPPER(abs, ActivationMapper)
REGISTER_MAPPER(acos, ActivationMapper)
REGISTER_MAPPER(asin, ActivationMapper)
REGISTER_MAPPER(atan, ActivationMapper)
REGISTER_MAPPER(sinh, ActivationMapper)
REGISTER_MAPPER(tan, ActivationMapper)
REGISTER_MAPPER(ceil, ActivationMapper)
REGISTER_MAPPER(cosh, ActivationMapper)
REGISTER_MAPPER(softsign, ActivationMapper)
REGISTER_MAPPER(sign, ActivationMapper)
REGISTER_MAPPER(erf, ActivationMapper)
REGISTER_MAPPER(reciprocal, ActivationMapper)
REGISTER_MAPPER(leaky_relu, LeakyReluMapper)
REGISTER_MAPPER(gelu, GeluMapper)
REGISTER_MAPPER(selu, SeluMapper)
REGISTER_MAPPER(prelu, PReluMapper)
REGISTER_MAPPER(hard_sigmoid, HardSigmoidMapper)
REGISTER_MAPPER(swish, SwishMapper)
REGISTER_MAPPER(hard_swish, HardSwishMapper)
REGISTER_MAPPER(softmax, SoftMaxMapper)
REGISTER_MAPPER(brelu, BReluMapper)
REGISTER_MAPPER(elu, EluMapper)
REGISTER_MAPPER(hard_shrink, HardShrinkMapper)
REGISTER_MAPPER(softshrink, SoftShrinkMapper)
REGISTER_MAPPER(mish, MishMapper)
REGISTER_MAPPER(square, SquareMapper)
REGISTER_MAPPER(size, SizeMapper)
REGISTER_MAPPER(rsqrt, RsqrtMapper)
REGISTER_MAPPER(logsigmoid, LogSigmoidMapper)
REGISTER_MAPPER(log_softmax, LogSoftmaxMapper)
REGISTER_MAPPER(tanh_shrink, TanhShrinkMapper)
REGISTER_MAPPER(thresholded_relu, ThresholdedReluMapper)
REGISTER_MAPPER(log1p, Log1PMapper)
REGISTER_MAPPER(log2, Log2Mapper)
REGISTER_MAPPER(log10, Log10Mapper)
REGISTER_MAPPER(silu, SiluMapper)
int32_t ActivationMapper::GetMinOpset(bool verbose) {
if (OpType() == "softplus") {
float beta = 0.0;
float threshold = 20.0;
GetAttr("beta", &beta);
GetAttr("threshold", &threshold);
if ((beta - 1.0) > 1e-06 || (beta - 1.0) < -1e-06 ||
(threshold - 20.0) > 1e-06 || (threshold - 20.0) < -1e-06) {
Error() << "Only support softplus with beta == 1.0 and threshold == 20.0."
<< std::endl;
return -1;
}
}
if (OpType() == "round") {
Logger(verbose, 11) << RequireOpset(11) << std::endl;
return 11;
}
if (OpType() == "sinh" || OpType() == "cosh" || OpType() == "sign") {
Logger(verbose, 9) << RequireOpset(9) << std::endl;
return 9;
}
return 7;
}
void ActivationMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
auto iter = op_mapper_.find(OpType());
Assert(op_mapper_.end() != iter,
"Cannot find " + OpType() + " in activation op_mapper.");
if (OpType() == "erf") {
auto input = helper_->AutoCast(input_info[0].name, input_info[0].dtype,
P2ODataType::FP32);
auto output = helper_->MakeNode(iter->second, {input})->output(0);
helper_->AutoCast(output, output_info[0].name, P2ODataType::FP32,
output_info[0].dtype);
} else {
helper_->MakeNode(iter->second, {input_info[0].name},
{output_info[0].name});
}
}
void Relu6Mapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
float min = 0.0;
helper_->Clip(input_info[0].name, output_info[0].name, min, threshold_,
input_info[0].dtype);
}
int32_t PReluMapper::GetMinOpset(bool verbose) {
auto input_info = GetInput("X");
auto slope_info = GetInput("Alpha");
if (input_info[0].Rank() != slope_info[0].Rank()) {
if (slope_info[0].Rank() > 1) {
Error()
<< "Only support rank of alpha <=1 while Rank(alpha) != Rank(input)."
<< std::endl;
return -1;
}
}
return 7;
}
void PReluMapper::Opset7() {
auto input_info = GetInput("X");
auto slope_info = GetInput("Alpha");
auto output_info = GetOutput("Out");
std::string slope_cast_name = slope_info[0].name;
if (slope_info[0].dtype == P2ODataType::FP64) {
slope_cast_name = helper_->AutoCast({slope_info[0].name}, P2ODataType::FP64,
P2ODataType::FP32);
}
if (slope_info[0].Rank() != input_info[0].Rank()) {
Assert(slope_info[0].Rank() <= 1,
"Paddle2ONNX: Only support rank of alpha <= 1 while rank of alpha "
"is not equal with rank of input for operator prelu.");
Assert(
input_info[0].Rank() > 1,
"Paddle2ONNX: Rank of input should greater than 2 for operator prelu.");
std::vector<int64_t> shape_value(input_info[0].Rank() - 1, 1);
shape_value[0] = -1;
slope_cast_name = helper_->Reshape(slope_cast_name, shape_value);
}
if (input_info[0].dtype == P2ODataType::FP64) {
std::string x_cast_name = helper_->AutoCast(
{input_info[0].name}, P2ODataType::FP64, P2ODataType::FP32);
auto node = helper_->MakeNode("PRelu", {x_cast_name, slope_cast_name});
helper_->AutoCast(node->output(0), {output_info[0].name}, P2ODataType::FP32,
P2ODataType::FP64);
} else {
helper_->MakeNode("PRelu", {input_info[0].name, slope_cast_name},
{output_info[0].name});
}
}
void SeluMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
auto node =
helper_->MakeNode("Selu", {input_info[0].name}, {output_info[0].name});
AddAttribute(node, "alpha", alpha_);
AddAttribute(node, "gamma", scale_);
}
void HardSigmoidMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
auto node = helper_->MakeNode("HardSigmoid", {input_info[0].name},
{output_info[0].name});
AddAttribute(node, "alpha", alpha_);
AddAttribute(node, "beta", beta_);
}
void SwishMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
std::string beta_node =
helper_->Constant({1}, GetOnnxDtype(input_info[0].dtype), beta_);
// TODO(jiangjiajun) eliminate multiply with a constant of value 1
// TODO(jiangjiajun) eliminate add with a constant of value 0
auto beta_x_node = helper_->MakeNode("Mul", {input_info[0].name, beta_node});
auto sigmod_node = helper_->MakeNode("Sigmoid", {beta_x_node->output(0)});
helper_->MakeNode("Mul", {input_info[0].name, sigmod_node->output(0)},
{output_info[0].name});
}
void HardSwishMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
std::string scale_node =
helper_->Constant({1}, GetOnnxDtype(input_info[0].dtype), scale_);
std::string offset_node =
helper_->Constant({1}, GetOnnxDtype(input_info[0].dtype), offset_);
auto add_node = helper_->MakeNode("Add", {input_info[0].name, offset_node});
auto clip_node =
helper_->Clip(add_node->output(0), 0.0, threshold_, input_info[0].dtype);
auto mul_node = helper_->MakeNode("Mul", {input_info[0].name, clip_node});
helper_->MakeNode("Div", {mul_node->output(0), scale_node},
{output_info[0].name});
}
void HardSwishMapper::Opset14() {
if (fabs(offset_ - 3.0) > 1e-05 || fabs(scale_ - 6.0) > 1e-05 ||
fabs(threshold_ - 6.0) > 1e-05) {
return Opset7();
}
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
helper_->MakeNode("HardSwish", {input_info[0].name}, {output_info[0].name});
}
void LeakyReluMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
auto node = helper_->MakeNode("LeakyRelu", {input_info[0].name},
{output_info[0].name});
AddAttribute(node, "alpha", alpha_);
}
void GeluMapper::Opset9() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
auto input_onnx_dtype = GetOnnxDtype(input_info[0].dtype);
double sqrt_2_value = 1.4142135623730951;
double scale_value = 0.5;
double const_1_value = 1.0;
auto sqrt_2 =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::FLOAT, sqrt_2_value);
auto scale =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::FLOAT, scale_value);
auto const_1 =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::FLOAT, const_1_value);
auto input_name = helper_->AutoCast(input_info[0].name, input_info[0].dtype,
P2ODataType::FP32);
// the computation formula follows
// https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/functional/gelu_cn.html#gelu
auto erf0 = helper_->MakeNode("Div", {input_name, sqrt_2});
auto erf1 = helper_->MakeNode("Erf", {erf0->output(0)});
auto gelu0 = helper_->MakeNode("Add", {erf1->output(0), const_1});
auto gelu1 = helper_->MakeNode("Mul", {input_name, gelu0->output(0)});
if (input_info[0].dtype != P2ODataType::FP32) {
auto out = helper_->MakeNode("Mul", {gelu1->output(0), scale});
auto cast_out =
helper_->MakeNode("Cast", {out->output(0)}, {output_info[0].name});
AddAttribute(cast_out, "to", GetOnnxDtype(input_info[0].dtype));
} else {
helper_->MakeNode("Mul", {gelu1->output(0), scale}, {output_info[0].name});
}
}
void SoftMaxMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
if (axis_ < 0) {
axis_ = axis_ + output_info[0].Rank();
}
if (axis_ == output_info[0].Rank() - 1) {
auto node = helper_->MakeNode("Softmax", {input_info[0].name},
{output_info[0].name});
AddAttribute(node, "axis", axis_);
} else {
std::vector<int64_t> perm = Arange(0, output_info[0].Rank());
perm[output_info[0].Rank() - 1] = axis_;
perm[axis_] = output_info[0].Rank() - 1;
auto transpose_node = helper_->MakeNode("Transpose", {input_info[0].name});
AddAttribute(transpose_node, "perm", perm);
auto softmax_node =
helper_->MakeNode("Softmax", {transpose_node->output(0)});
int64_t axis_last = -1;
AddAttribute(softmax_node, "axis", axis_last);
auto transpose_node_last = helper_->MakeNode(
"Transpose", {softmax_node->output(0)}, {output_info[0].name});
AddAttribute(transpose_node_last, "perm", perm);
}
}
void SoftMaxMapper::Opset13() {
int64_t axis;
GetAttr("axis", &axis);
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
auto node =
helper_->MakeNode("Softmax", {input_info[0].name}, {output_info[0].name});
AddAttribute(node, "axis", axis);
}
void BReluMapper::Opset7() {
auto x_info = GetInput("X");
helper_->Clip(x_info[0].name, GetOutput("Out")[0].name, t_min_, t_max_,
x_info[0].dtype);
}
void EluMapper::Opset7() {
auto node = helper_->MakeNode("Elu", {GetInput("X")[0].name},
{GetOutput("Out")[0].name});
AddAttribute(node, "alpha", alpha_);
}
void HardShrinkMapper::Opset9() {
auto node = helper_->MakeNode("Shrink", {GetInput("X")[0].name},
{GetOutput("Out")[0].name});
AddAttribute(node, "lambd", threshold_);
AddAttribute(node, "bias", float(0.0));
}
int32_t MishMapper::GetMinOpset(bool verbose) {
if (fabs(threshold_ - 20.0) > 1e-05) {
Error() << "Only support threshold = 20.0." << std::endl;
return -1;
}
return 7;
}
void MishMapper::Opset7() {
auto input_info = GetInput("X");
auto out_info = GetOutput("Out");
auto input = helper_->AutoCast(input_info[0].name, input_info[0].dtype,
P2ODataType::FP32);
auto softplus = helper_->MakeNode("Softplus", {input})->output(0);
auto tanh = helper_->MakeNode("Tanh", {softplus})->output(0);
auto output = helper_->MakeNode("Mul", {input, tanh})->output(0);
helper_->AutoCast(output, out_info[0].name, P2ODataType::FP32,
out_info[0].dtype);
}
void SquareMapper::Opset7() {
auto input_info = GetInput("X");
helper_->MakeNode("Mul", {input_info[0].name, input_info[0].name},
{GetOutput("Out")[0].name});
}
void SoftShrinkMapper::Opset9() {
auto node = helper_->MakeNode("Shrink", {GetInput("X")[0].name},
{GetOutput("Out")[0].name});
AddAttribute(node, "lambd", lambda_);
AddAttribute(node, "bias", lambda_);
}
void SizeMapper::Opset7() {
auto out_info = GetOutput("Out");
auto output =
helper_->MakeNode("Size", {GetInput("Input")[0].name})->output(0);
output = helper_->Reshape(output, {-1});
output = helper_->AutoCast(output, out_info[0].name, P2ODataType::INT64,
out_info[0].dtype);
}
void RsqrtMapper::Opset7() {
auto output = helper_->MakeNode("Sqrt", {GetInput("X")[0].name})->output(0);
helper_->MakeNode("Reciprocal", {output}, {GetOutput("Out")[0].name});
}
void TanhShrinkMapper::Opset7() {
auto x_info = GetInput("X");
auto tanh = helper_->MakeNode("Tanh", {x_info[0].name})->output(0);
helper_->MakeNode("Sub", {x_info[0].name, tanh}, {GetOutput("Out")[0].name});
}
void LogSigmoidMapper::Opset7() {
auto output =
helper_->MakeNode("Sigmoid", {GetInput("X")[0].name})->output(0);
helper_->MakeNode("Log", {output}, {GetOutput("Out")[0].name});
}
void LogSoftmaxMapper::Opset7() {
auto input_info = GetInput("X");
auto axis = axis_;
if (axis < 0) {
axis += input_info[0].Rank();
}
if (axis == input_info[0].Rank() - 1) {
auto node = helper_->MakeNode("LogSoftmax", {input_info[0].name},
{GetOutput("Out")[0].name});
AddAttribute(node, "axis", axis);
} else {
auto perm = Arange(0, input_info[0].Rank());
perm[input_info[0].Rank() - 1] = axis;
perm[axis] = input_info[0].Rank() - 1;
auto output = helper_->Transpose(input_info[0].name, perm);
auto node = helper_->MakeNode("LogSoftmax", {output});
AddAttribute(node, "axis", int64_t(-1));
helper_->Transpose(node->output(0), GetOutput("Out")[0].name, perm);
}
}
void ThresholdedReluMapper::Opset10() {
auto x_info = GetInput("X");
auto out_info = GetOutput("Out");
auto input = x_info[0].name;
if (x_info[0].dtype != P2ODataType::FP32) {
input = helper_->AutoCast(input, x_info[0].dtype, P2ODataType::FP32);
auto node = helper_->MakeNode("ThresholdedRelu", {input});
AddAttribute(node, "alpha", threshold_);
helper_->AutoCast(node->output(0), out_info[0].name, P2ODataType::FP32,
out_info[0].dtype);
} else {
auto node =
helper_->MakeNode("ThresholdedRelu", {input}, {out_info[0].name});
AddAttribute(node, "alpha", threshold_);
}
}
void Log1PMapper::Opset7() {
auto x_info = GetInput("X");
auto out_info = GetOutput("Out");
auto one = helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), float(1.0));
auto input = helper_->MakeNode("Add", {x_info[0].name, one})->output(0);
helper_->MakeNode("Log", {input}, {out_info[0].name});
}
void Log2Mapper::Opset7() {
auto x_info = GetInput("X");
auto out_info = GetOutput("Out");
double ln2 = 0.693147180559945309;
auto ln2_tensor = helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), ln2);
auto output = helper_->MakeNode("Log", {x_info[0].name})->output(0);
helper_->MakeNode("Div", {output, ln2_tensor}, {out_info[0].name});
}
void Log10Mapper::Opset7() {
auto x_info = GetInput("X");
auto out_info = GetOutput("Out");
double ln10 = 2.30258509299404568401;
auto ln10_tensor =
helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), ln10);
auto output = helper_->MakeNode("Log", {x_info[0].name})->output(0);
helper_->MakeNode("Div", {output, ln10_tensor}, {out_info[0].name});
}
void SiluMapper::Opset7() {
auto x_info = GetInput("X");
auto out_info = GetOutput("Out");
auto out = helper_->MakeNode("Sigmoid", {x_info[0].name})->output(0);
helper_->MakeNode("Mul", {x_info[0].name, out}, {out_info[0].name});
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,372 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cmath>
#include <map>
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class ActivationMapper : public Mapper {
public:
ActivationMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
op_mapper_["relu"] = "Relu";
op_mapper_["tanh"] = "Tanh";
op_mapper_["log"] = "Log";
op_mapper_["sigmoid"] = "Sigmoid";
op_mapper_["sqrt"] = "Sqrt";
op_mapper_["softplus"] = "Softplus";
op_mapper_["exp"] = "Exp";
op_mapper_["floor"] = "Floor";
op_mapper_["cos"] = "Cos";
op_mapper_["sin"] = "Sin";
op_mapper_["round"] = "Round";
op_mapper_["abs"] = "Abs";
op_mapper_["acos"] = "Acos";
op_mapper_["asin"] = "Asin";
op_mapper_["atan"] = "Atan";
op_mapper_["sinh"] = "Sinh";
op_mapper_["tan"] = "Tan";
op_mapper_["ceil"] = "Ceil";
op_mapper_["cosh"] = "Cosh";
op_mapper_["erf"] = "Erf";
op_mapper_["sign"] = "Sign";
op_mapper_["softsign"] = "Softsign";
op_mapper_["reciprocal"] = "Reciprocal";
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
private:
std::map<std::string, std::string> op_mapper_;
};
class Relu6Mapper : public Mapper {
public:
Relu6Mapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("threshold", &threshold_);
}
void Opset7();
private:
float threshold_;
};
class PReluMapper : public Mapper {
public:
PReluMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
};
class SeluMapper : public Mapper {
public:
SeluMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("alpha", &alpha_);
GetAttr("scale", &scale_);
}
void Opset7();
private:
float alpha_;
float scale_;
};
class HardSigmoidMapper : public Mapper {
public:
HardSigmoidMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("slope", &alpha_);
GetAttr("offset", &beta_);
}
void Opset7();
private:
float alpha_;
float beta_;
};
class SwishMapper : public Mapper {
public:
SwishMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("beta", &beta_);
}
void Opset7();
private:
float beta_;
};
class HardSwishMapper : public Mapper {
public:
HardSwishMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("scale", &scale_);
GetAttr("offset", &offset_);
GetAttr("threshold", &threshold_);
}
void Opset7();
void Opset14();
private:
float scale_;
float offset_;
float threshold_;
};
class LeakyReluMapper : public Mapper {
public:
LeakyReluMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("alpha", &alpha_);
}
void Opset7();
private:
float alpha_;
};
class GeluMapper : public Mapper {
public:
GeluMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
int32_t GetMinOpset(bool verbose = false) {
Logger(verbose, 9) << RequireOpset(9) << std::endl;
return 9;
}
void Opset9();
};
class SoftMaxMapper : public Mapper {
public:
SoftMaxMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("axis", &axis_);
}
void Opset7();
void Opset13();
private:
int64_t axis_ = -1;
};
class BReluMapper : public Mapper {
public:
BReluMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("t_min", &t_min_);
GetAttr("t_max", &t_max_);
}
void Opset7();
private:
float t_min_;
float t_max_;
};
class EluMapper : public Mapper {
public:
EluMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("alpha", &alpha_);
}
void Opset7();
private:
float alpha_;
};
class HardShrinkMapper : public Mapper {
public:
HardShrinkMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("threshold", &threshold_);
}
int32_t GetMinOpset(bool verbose = false) {
Logger(verbose, 9) << RequireOpset(9) << std::endl;
return 9;
}
void Opset9();
private:
float threshold_;
};
class MishMapper : public Mapper {
public:
MishMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("threshold", &threshold_);
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
private:
float threshold_;
};
class SquareMapper : public Mapper {
public:
SquareMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
void Opset7();
};
class SizeMapper : public Mapper {
public:
SizeMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
void Opset7();
};
class LogSigmoidMapper : public Mapper {
public:
LogSigmoidMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
void Opset7();
};
class RsqrtMapper : public Mapper {
public:
RsqrtMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
void Opset7();
};
class LogSoftmaxMapper : public Mapper {
public:
LogSoftmaxMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("axis", &axis_);
}
void Opset7();
private:
int64_t axis_;
};
class SoftShrinkMapper : public Mapper {
public:
SoftShrinkMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("lambda", &lambda_);
}
int32_t GetMinOpset(bool verbose = false) {
Logger(verbose, 9) << RequireOpset(9) << std::endl;
return 9;
}
void Opset9();
private:
float lambda_;
};
class ThresholdedReluMapper : public Mapper {
public:
ThresholdedReluMapper(const PaddleParser& p, OnnxHelper* helper,
int64_t block_id, int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("threshold", &threshold_);
}
int32_t GetMinOpset(bool verbose = false) {
Logger(verbose, 10) << RequireOpset(10) << std::endl;
return 10;
}
void Opset10();
private:
float threshold_;
};
class TanhShrinkMapper : public Mapper {
public:
TanhShrinkMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
void Opset7();
};
class Log1PMapper : public Mapper {
public:
Log1PMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
void Opset7();
};
class Log2Mapper : public Mapper {
public:
Log2Mapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
void Opset7();
};
class Log10Mapper : public Mapper {
public:
Log10Mapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
void Opset7();
};
class SiluMapper : public Mapper {
public:
SiluMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
void Opset7();
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,30 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#include "paddle2onnx/utils/utils.h"
namespace paddle2onnx {
inline std::vector<int64_t> Arange(int64_t start, int64_t end) {
Assert(end > start, "In arrange(), end must be greater than start.");
std::vector<int64_t> res;
res.resize(end - start);
for (auto i = start; i < end; ++i) {
res[i - start] = i;
}
return res;
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,360 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/detection/multiclass_nms.h"
namespace paddle2onnx {
REGISTER_MAPPER(multiclass_nms3, NMSMapper);
int32_t NMSMapper::GetMinOpset(bool verbose) {
auto boxes_info = GetInput("BBoxes");
auto score_info = GetInput("Scores");
if (score_info[0].Rank() != 3) {
Error() << "Lod Tensor input is not supported, which means the shape of "
"input(scores) is [M, C] now, but Paddle2ONNX only support [N, "
"C, M]."
<< std::endl;
return -1;
}
if (boxes_info[0].Rank() != 3) {
Error() << "Only support input boxes as 3-D Tensor, but now it's rank is "
<< boxes_info[0].Rank() << "." << std::endl;
return -1;
}
if (score_info[0].shape[1] <= 0) {
Error() << "The 2nd-dimension of score should be fixed(means the number of "
"classes), but now it's "
<< score_info[0].shape[1] << "." << std::endl;
return -1;
}
if (export_as_custom_op || this->deploy_backend == "tensorrt") {
return 7;
}
Logger(verbose, 10) << RequireOpset(10) << std::endl;
return 10;
}
void NMSMapper::KeepTopK(const std::string& selected_indices) {
auto boxes_info = GetInput("BBoxes");
auto score_info = GetInput("Scores");
auto out_info = GetOutput("Out");
auto index_info = GetOutput("Index");
auto num_rois_info = GetOutput("NmsRoisNum");
auto value_0 =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, int64_t(0));
auto value_1 =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, int64_t(1));
auto value_2 =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, int64_t(2));
auto value_neg_1 =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, int64_t(-1));
auto class_id = helper_->MakeNode("Gather", {selected_indices, value_1});
AddAttribute(class_id, "axis", int64_t(1));
auto box_id = helper_->MakeNode("Gather", {selected_indices, value_2});
AddAttribute(box_id, "axis", int64_t(1));
auto filtered_class_id = class_id->output(0);
auto filtered_box_id = box_id->output(0);
if (background_label_ >= 0) {
auto filter_indices = MapperHelper::Get()->GenName("nms.filter_background");
auto squeezed_class_id =
helper_->Squeeze(class_id->output(0), std::vector<int64_t>(1, 1));
if (background_label_ > 0) {
auto background = helper_->Constant(
{1}, ONNX_NAMESPACE::TensorProto::INT64, background_label_);
auto diff = helper_->MakeNode("Sub", {squeezed_class_id, background});
helper_->MakeNode("NonZero", {diff->output(0)}, {filter_indices});
} else if (background_label_ == 0) {
helper_->MakeNode("NonZero", {squeezed_class_id}, {filter_indices});
}
auto new_class_id =
helper_->MakeNode("Gather", {filtered_class_id, filter_indices});
AddAttribute(new_class_id, "axis", int64_t(0));
auto new_box_id =
helper_->MakeNode("Gather", {box_id->output(0), filter_indices});
AddAttribute(new_box_id, "axis", int64_t(0));
filtered_class_id = new_class_id->output(0);
filtered_box_id = new_box_id->output(0);
}
// Here is a little complicated
// Since we need to gather all the scores for the final boxes to filter the
// top-k boxes Now we have the follow inputs
// - scores: [N, C, M] N means batch size(but now it will be regarded as
// 1); C means number of classes; M means number of boxes for each classes
// - selected_indices: [num_selected_indices, 3], and 3 means [batch,
// class_id, box_id]. We will use this inputs to gather score
// So now we will first flatten `scores` to shape of [1 * C * M], then we
// gather scores by each elements in `selected_indices` The index need be
// calculated as
// `gather_index = class_id * M + box_id`
auto flatten_score = helper_->Flatten(score_info[0].name);
auto num_boxes_each_class = helper_->Constant(
{1}, ONNX_NAMESPACE::TensorProto::INT64, score_info[0].shape[2]);
auto gather_indices_0 =
helper_->MakeNode("Mul", {filtered_class_id, num_boxes_each_class});
auto gather_indices_1 =
helper_->MakeNode("Add", {gather_indices_0->output(0), filtered_box_id});
auto gather_indices = helper_->Flatten(gather_indices_1->output(0));
auto gathered_scores =
helper_->MakeNode("Gather", {flatten_score, gather_indices});
AddAttribute(gathered_scores, "axis", int64_t(0));
// Now we will perform keep_top_k process
// First we need to check if the number of remaining boxes is greater than
// keep_top_k Otherwise, we will downgrade the keep_top_k to number of
// remaining boxes
auto final_classes = filtered_class_id;
auto final_boxes_id = filtered_box_id;
auto final_scores = gathered_scores->output(0);
if (keep_top_k_ > 0) {
// get proper topk
auto shape_of_scores = helper_->MakeNode("Shape", {final_scores});
auto num_of_boxes =
helper_->Slice(shape_of_scores->output(0), std::vector<int64_t>(1, 0),
std::vector<int64_t>(1, 0), std::vector<int64_t>(1, 1));
auto top_k =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, keep_top_k_);
auto ensemble_value = helper_->MakeNode("Concat", {num_of_boxes, top_k});
AddAttribute(ensemble_value, "axis", int64_t(0));
auto new_top_k =
helper_->MakeNode("ReduceMin", {ensemble_value->output(0)});
AddAttribute(new_top_k, "axes", std::vector<int64_t>(1, 0));
AddAttribute(new_top_k, "keepdims", int64_t(1));
// the output is topk_scores, topk_score_indices
auto topk_node =
helper_->MakeNode("TopK", {final_scores, new_top_k->output(0)}, 2);
auto topk_scores =
helper_->MakeNode("Gather", {final_scores, topk_node->output(1)});
AddAttribute(topk_scores, "axis", int64_t(0));
filtered_class_id =
helper_->MakeNode("Squeeze", {filtered_class_id})->output(0);
auto topk_classes =
helper_->MakeNode("Gather", {filtered_class_id, topk_node->output(1)});
AddAttribute(topk_classes, "axis", int64_t(0));
filtered_box_id =
helper_->MakeNode("Squeeze", {filtered_box_id})->output(0);
auto topk_boxes_id =
helper_->MakeNode("Gather", {filtered_box_id, topk_node->output(1)});
AddAttribute(topk_boxes_id, "axis", int64_t(0));
final_boxes_id = topk_boxes_id->output(0);
final_scores = topk_scores->output(0);
final_classes = topk_classes->output(0);
}
auto flatten_boxes_id = helper_->Flatten({final_boxes_id});
auto gathered_selected_boxes =
helper_->MakeNode("Gather", {boxes_info[0].name, flatten_boxes_id});
AddAttribute(gathered_selected_boxes, "axis", int64_t(1));
auto float_classes = helper_->MakeNode("Cast", {final_classes});
AddAttribute(float_classes, "to", ONNX_NAMESPACE::TensorProto::FLOAT);
std::vector<int64_t> shape{1, -1, 1};
auto unsqueezed_scores = helper_->Reshape({final_scores}, shape);
auto unsqueezed_class = helper_->Reshape({float_classes->output(0)}, shape);
auto box_result =
helper_->MakeNode("Concat", {unsqueezed_class, unsqueezed_scores,
gathered_selected_boxes->output(0)});
AddAttribute(box_result, "axis", int64_t(2));
helper_->Squeeze({box_result->output(0)}, {out_info[0].name},
std::vector<int64_t>(1, 0));
// other outputs, we don't use sometimes
// there's lots of Cast in exporting
// TODO(jiangjiajun) A pass to eleminate all the useless Cast is needed
auto reshaped_index_result =
helper_->Reshape({flatten_boxes_id}, {int64_t(-1), int64_t(1)});
auto index_result =
helper_->MakeNode("Cast", {reshaped_index_result}, {index_info[0].name});
AddAttribute(index_result, "to", GetOnnxDtype(index_info[0].dtype));
auto out_box_shape = helper_->MakeNode("Shape", {out_info[0].name});
auto num_rois_result =
helper_->Slice({out_box_shape->output(0)}, std::vector<int64_t>(1, 0),
std::vector<int64_t>(1, 0), std::vector<int64_t>(1, 1));
auto int32_num_rois_result =
helper_->AutoCast(num_rois_result, num_rois_info[0].name,
P2ODataType::INT64, num_rois_info[0].dtype);
}
void NMSMapper::Opset10() {
if (this->deploy_backend == "tensorrt") {
return ExportForTensorRT();
}
auto boxes_info = GetInput("BBoxes");
auto score_info = GetInput("Scores");
if (boxes_info[0].shape[0] != 1) {
Warn()
<< "[WARNING] Due to the operator multiclass_nms3, the exported ONNX "
"model will only supports inference with input batch_size == 1."
<< std::endl;
}
int64_t num_classes = score_info[0].shape[1];
auto score_threshold = helper_->Constant(
{1}, ONNX_NAMESPACE::TensorProto::FLOAT, score_threshold_);
auto nms_threshold = helper_->Constant(
{1}, ONNX_NAMESPACE::TensorProto::FLOAT, nms_threshold_);
auto nms_top_k =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, nms_top_k_);
auto selected_box_index = MapperHelper::Get()->GenName("nms.selected_index");
if (normalized_) {
helper_->MakeNode("NonMaxSuppression",
{boxes_info[0].name, score_info[0].name, nms_top_k,
nms_threshold, score_threshold},
{selected_box_index});
} else {
auto value_1 =
helper_->Constant({1}, GetOnnxDtype(boxes_info[0].dtype), float(1.0));
auto split_boxes = helper_->Split(boxes_info[0].name,
std::vector<int64_t>(4, 1), int64_t(2));
auto xmax = helper_->MakeNode("Add", {split_boxes[2], value_1});
auto ymax = helper_->MakeNode("Add", {split_boxes[3], value_1});
auto new_boxes = helper_->MakeNode(
"Concat",
{split_boxes[0], split_boxes[1], xmax->output(0), ymax->output(0)});
AddAttribute(new_boxes, "axis", int64_t(2));
helper_->MakeNode("NonMaxSuppression",
{new_boxes->output(0), score_info[0].name, nms_top_k,
nms_threshold, score_threshold},
{selected_box_index});
}
KeepTopK(selected_box_index);
}
void NMSMapper::ExportAsCustomOp() {
auto boxes_info = GetInput("BBoxes");
auto score_info = GetInput("Scores");
auto out_info = GetOutput("Out");
auto index_info = GetOutput("Index");
auto num_rois_info = GetOutput("NmsRoisNum");
auto node = helper_->MakeNode(
custom_op_name, {boxes_info[0].name, score_info[0].name},
{out_info[0].name, index_info[0].name, num_rois_info[0].name});
node->set_domain("Paddle");
int64_t normalized = normalized_ ? 1 : 0;
AddAttribute(node, "normalized", normalized);
AddAttribute(node, "nms_threshold", nms_threshold_);
AddAttribute(node, "score_threshold", score_threshold_);
AddAttribute(node, "nms_eta", nms_eta_);
AddAttribute(node, "nms_top_k", nms_top_k_);
AddAttribute(node, "background_label", background_label_);
AddAttribute(node, "keep_top_k", keep_top_k_);
helper_->MakeValueInfo(boxes_info[0].name, boxes_info[0].dtype,
boxes_info[0].shape);
helper_->MakeValueInfo(score_info[0].name, score_info[0].dtype,
score_info[0].shape);
helper_->MakeValueInfo(out_info[0].name, out_info[0].dtype,
out_info[0].shape);
helper_->MakeValueInfo(index_info[0].name, index_info[0].dtype,
index_info[0].shape);
helper_->MakeValueInfo(num_rois_info[0].name, num_rois_info[0].dtype,
num_rois_info[0].shape);
}
void NMSMapper::ExportForTensorRT() {
auto boxes_info = GetInput("BBoxes");
auto score_info = GetInput("Scores");
auto out_info = GetOutput("Out");
auto index_info = GetOutput("Index");
auto num_rois_info = GetOutput("NmsRoisNum");
auto scores = helper_->Transpose(score_info[0].name, {0, 2, 1});
auto boxes = helper_->Unsqueeze(boxes_info[0].name, {2});
int64_t num_classes = score_info[0].shape[1];
auto repeats =
helper_->Constant(GetOnnxDtype(P2ODataType::INT64),
std::vector<int64_t>({1, 1, num_classes, 1}));
boxes = helper_->MakeNode("Tile", {boxes, repeats})->output(0);
auto nms_node =
helper_->MakeNode("BatchedNMSDynamic_TRT", {boxes, scores}, 4);
AddAttribute(nms_node, "shareLocation", int64_t(0));
AddAttribute(nms_node, "backgroundLabelId", background_label_);
AddAttribute(nms_node, "numClasses", num_classes);
int64_t nms_top_k = nms_top_k_;
int64_t keep_top_k = keep_top_k_;
if (nms_top_k > 4096) {
Warn()
<< "Paramter nms_top_k:" << nms_top_k
<< " is exceed limit in TensorRT BatchedNMS plugin, will force to 4096."
<< std::endl;
nms_top_k = 4096;
}
if (keep_top_k > 4096) {
Warn()
<< "Parameter keep_top_k:" << keep_top_k
<< " is exceed limit in TensorRT BatchedNMS plugin, will force to 4096."
<< std::endl;
keep_top_k = 4096;
}
AddAttribute(nms_node, "topK", nms_top_k);
AddAttribute(nms_node, "keepTopK", keep_top_k);
AddAttribute(nms_node, "scoreThreshold", score_threshold_);
AddAttribute(nms_node, "iouThreshold", nms_threshold_);
if (normalized_) {
AddAttribute(nms_node, "isNormalized", int64_t(1));
} else {
AddAttribute(nms_node, "isNormalized", int64_t(0));
}
AddAttribute(nms_node, "clipBoxes", int64_t(0));
nms_node->set_domain("Paddle");
auto num_rois = helper_->Reshape(nms_node->output(0), {-1});
helper_->AutoCast(num_rois, num_rois_info[0].name, P2ODataType::INT32,
num_rois_info[0].dtype);
auto out_classes = helper_->Reshape(nms_node->output(3), {-1, 1});
auto out_scores = helper_->Reshape(nms_node->output(2), {-1, 1});
auto out_boxes = helper_->Reshape(nms_node->output(1), {-1, 4});
out_classes =
helper_->AutoCast(out_classes, P2ODataType::INT32, P2ODataType::FP32);
helper_->Concat({out_classes, out_scores, out_boxes}, {out_info[0].name}, 1);
// EfficientNMS_TRT cannot get the same result, so disable now
// auto nms_node = helper_->MakeNode("EfficientNMS_TRT", {boxes_info[0].name,
// score}, 4);
// AddAttribute(nms_node, "plugin_version", "1");
// AddAttribute(nms_node, "background_class", background_label_);
// AddAttribute(nms_node, "max_output_boxes", nms_top_k_);
// AddAttribute(nms_node, "score_threshold", score_threshold_);
// AddAttribute(nms_node, "iou_threshold", nms_threshold_);
// AddAttribute(nms_node, "score_activation", int64_t(0));
// AddAttribute(nms_node, "box_coding", int64_t(0));
// nms_node->set_domain("Paddle");
//
// auto num_rois = helper_->Reshape(nms_node->output(0), {-1});
// helper_->AutoCast(num_rois, num_rois_info[0].name, P2ODataType::INT32,
// num_rois_info[0].dtype);
//
// auto out_classes = helper_->Reshape(nms_node->output(3), {-1, 1});
// auto out_scores = helper_->Reshape(nms_node->output(2), {-1, 1});
// auto out_boxes = helper_->Reshape(nms_node->output(1), {-1, 4});
// out_classes = helper_->AutoCast(out_classes, P2ODataType::INT32,
// P2ODataType::FP32);
// helper_->Concat({out_classes, out_scores, out_boxes}, {out_info[0].name},
// 1);
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,62 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class NMSMapper : public Mapper {
public:
NMSMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
// NMS is a post process operators for object detection
// We have found there're difference between `multi_class_nms3` in
// PaddlePaddle and `NonMaxSuppresion` in ONNX
MarkAsExperimentalOp();
GetAttr("normalized", &normalized_);
GetAttr("nms_threshold", &nms_threshold_);
GetAttr("score_threshold", &score_threshold_);
GetAttr("nms_eta", &nms_eta_);
// The `nms_top_k` in Paddle and `max_output_boxes_per_class` in ONNX share
// the same meaning But the filter process may not be same Since NMS is just
// a post process for Detection, we are not going to export it with exactly
// same result. We will make a precision performance in COCO or Pascal VOC
// data later.
GetAttr("nms_top_k", &nms_top_k_);
GetAttr("background_label", &background_label_);
GetAttr("keep_top_k", &keep_top_k_);
}
int32_t GetMinOpset(bool verbose = false);
void KeepTopK(const std::string& selected_indices);
void Opset10();
void ExportForTensorRT();
void ExportAsCustomOp();
private:
bool normalized_;
float nms_threshold_;
float score_threshold_;
float nms_eta_;
int64_t nms_top_k_;
int64_t background_label_;
int64_t keep_top_k_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,43 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/detection/roi_align.h"
namespace paddle2onnx {
REGISTER_MAPPER(roi_align, RoiAlignMapper)
void RoiAlignMapper::Opset10() {
auto x_info = GetInput("X");
auto rois_info = GetInput("ROIs");
auto out_info = GetOutput("Out");
auto roi_shape = helper_->MakeNode("Shape", {rois_info[0].name})->output(0);
auto num_rois =
helper_->Slice(roi_shape, std::vector<int64_t>(1, 0),
std::vector<int64_t>(1, 0), std::vector<int64_t>(1, 1));
auto value_zero = helper_->Constant(ONNX_NAMESPACE::TensorProto::INT64,
std::vector<int64_t>(1, 0));
auto batch_indices =
helper_->MakeNode("Expand", {value_zero, num_rois})->output(0);
auto roi_align_node = helper_->MakeNode(
"RoiAlign", {x_info[0].name, rois_info[0].name, batch_indices},
{out_info[0].name});
AddAttribute(roi_align_node, "output_height", pooled_height_);
AddAttribute(roi_align_node, "output_width", pooled_width_);
AddAttribute(roi_align_node, "sampling_ratio", sampling_ratio_);
AddAttribute(roi_align_node, "spatial_scale", spatial_scale_);
AddAttribute(roi_align_node, "mode", "avg");
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,47 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class RoiAlignMapper : public Mapper {
public:
RoiAlignMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
MarkAsExperimentalOp();
GetAttr("pooled_height", &pooled_height_);
GetAttr("pooled_width", &pooled_width_);
GetAttr("spatial_scale", &spatial_scale_);
GetAttr("sampling_ratio", &sampling_ratio_);
GetAttr("aligned", &aligned_);
}
int32_t GetMinOpset(bool verbose = false) {
Logger(verbose, 10) << RequireOpset(10) << std::endl;
return 10;
}
void Opset10();
private:
int64_t pooled_height_;
int64_t pooled_width_;
float spatial_scale_;
int64_t sampling_ratio_;
bool aligned_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,254 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/detection/yolo_box.h"
namespace paddle2onnx {
REGISTER_MAPPER(yolo_box, YoloBoxMapper)
int32_t YoloBoxMapper::GetMinOpset(bool verbose) {
Logger(verbose, 11) << RequireOpset(11) << std::endl;
return 11;
}
void YoloBoxMapper::Opset11() {
auto x_info_ori = GetInput("X");
// handle the float64 input
auto x_info = x_info_ori;
if (x_info_ori[0].dtype != P2ODataType::FP32) {
x_info[0].name = helper_->AutoCast(x_info_ori[0].name, x_info_ori[0].dtype,
P2ODataType::FP32);
x_info[0].dtype = P2ODataType::FP32;
}
auto im_size_info = GetInput("ImgSize");
auto boxes_info = GetOutput("Boxes");
auto scores_info = GetOutput("Scores");
int64_t max_int = 999999;
int64_t anchor_num = anchors_.size() / 2;
auto x_shape = helper_->MakeNode("Shape", {x_info[0].name});
std::vector<std::string> nchw = helper_->Split(
x_shape->output(0), std::vector<int64_t>(4, 1), int64_t(0));
std::string float_h =
helper_->AutoCast(nchw[2], P2ODataType::INT64, x_info[0].dtype);
std::string float_w =
helper_->AutoCast(nchw[3], P2ODataType::INT64, x_info[0].dtype);
auto anchor_num_tensor =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, anchor_num);
auto x_name = x_info[0].name;
if (iou_aware_) {
// Here we use the feature that while value is very large, it equals to the
// ends This is a standared definition in ONNX However not sure all the
// inference engines implements `Slice` this way Let's handle this issue
// later
x_name = helper_->Slice(x_name, {0, 1, 2, 3}, {0, 0, 0, 0},
{max_int, anchor_num, max_int, max_int});
}
auto unknown_dim =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, int64_t(-1));
auto shape_0 = helper_->MakeNode(
"Concat", {nchw[0], anchor_num_tensor, unknown_dim, nchw[2], nchw[3]});
AddAttribute(shape_0, "axis", int64_t(0));
auto reshaped_x = helper_->MakeNode("Reshape", {x_name, shape_0->output(0)});
auto transposed_x = helper_->MakeNode("Transpose", {reshaped_x->output(0)});
{
std::vector<int64_t> perm({0, 1, 3, 4, 2});
AddAttribute(transposed_x, "perm", perm);
}
// grid_x = np.tile(np.arange(w).reshape((1, w)), (h, 1))
// grid_y = np.tile(np.arange(h).reshape((h, 1)), (1, w))
auto float_value_0 =
helper_->Constant({}, GetOnnxDtype(x_info[0].dtype), float(0.0));
auto float_value_1 =
helper_->Constant({}, GetOnnxDtype(x_info[0].dtype), float(1.0));
auto scalar_float_w = helper_->Squeeze(float_w, {});
auto scalar_float_h = helper_->Squeeze(float_h, {});
auto grid_x_0 = helper_->MakeNode(
"Range", {float_value_0, scalar_float_w, float_value_1}); // shape is [w]
auto grid_y_0 = helper_->MakeNode(
"Range", {float_value_0, scalar_float_h, float_value_1}); // shape is [h]
auto grid_x_1 = helper_->MakeNode(
"Tile", {grid_x_0->output(0), nchw[2]}); // shape is [w*h]
auto grid_y_1 = helper_->MakeNode(
"Tile", {grid_y_0->output(0), nchw[3]}); // shape is [h*w]
auto int_value_1 =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, float(1.0));
auto grid_shape_x =
helper_->MakeNode("Concat", {nchw[2], nchw[3], int_value_1});
auto grid_shape_y =
helper_->MakeNode("Concat", {nchw[3], nchw[2], int_value_1});
AddAttribute(grid_shape_x, "axis", int64_t(0));
AddAttribute(grid_shape_y, "axis", int64_t(0));
auto grid_x = helper_->MakeNode(
"Reshape", {grid_x_1->output(0), grid_shape_x->output(0)});
auto grid_y_2 = helper_->MakeNode(
"Reshape", {grid_y_1->output(0), grid_shape_y->output(0)});
auto grid_y = helper_->MakeNode("Transpose", {grid_y_2->output(0)});
{
std::vector<int64_t> perm({1, 0, 2});
AddAttribute(grid_y, "perm", perm);
}
auto grid =
helper_->MakeNode("Concat", {grid_x->output(0), grid_y->output(0)});
AddAttribute(grid, "axis", int64_t(2));
// pred_box[:, :, :, :, 0] = (grid_x + sigmoid(pred_box[:, :, :, :, 0]) *
// scale_x_y + bias_x_y) / w pred_box[:, :, :, :, 1] = (grid_y +
// sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y + bias_x_y) / h
auto pred_box_xy =
helper_->Slice(transposed_x->output(0), {0, 1, 2, 3, 4}, {0, 0, 0, 0, 0},
{max_int, max_int, max_int, max_int, 2});
auto scale_x_y =
helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), scale_x_y_);
float bias_x_y_value = (1.0 - scale_x_y_) / 2.0;
auto bias_x_y =
helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), bias_x_y_value);
auto wh = helper_->MakeNode("Concat", {float_w, float_h});
AddAttribute(wh, "axis", int64_t(0));
pred_box_xy = helper_->MakeNode("Sigmoid", {pred_box_xy})->output(0);
pred_box_xy = helper_->MakeNode("Mul", {pred_box_xy, scale_x_y})->output(0);
pred_box_xy = helper_->MakeNode("Add", {pred_box_xy, bias_x_y})->output(0);
pred_box_xy =
helper_->MakeNode("Add", {pred_box_xy, grid->output(0)})->output(0);
pred_box_xy =
helper_->MakeNode("Div", {pred_box_xy, wh->output(0)})->output(0);
// anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
// anchors_s = np.array(
// [(an_w / input_w, an_h / input_h) for an_w, an_h in anchors])
// anchor_w = anchors_s[:, 0:1].reshape((1, an_num, 1, 1))
// anchor_h = anchors_s[:, 1:2].reshape((1, an_num, 1, 1))
std::vector<int64_t> valid_anchors(anchor_num);
valid_anchors.assign(anchors_.begin(), anchors_.begin() + anchor_num * 2);
auto anchors =
helper_->Constant(GetOnnxDtype(x_info[0].dtype), valid_anchors);
anchors = helper_->Reshape(anchors, {anchor_num, 2});
auto downsample =
helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), downsample_ratio_);
auto ori_wh =
helper_->MakeNode("Mul", {wh->output(0), downsample})->output(0);
anchors = helper_->MakeNode("Div", {anchors, ori_wh})->output(0);
// Following divide operation requires undirectional broadcast
// It satisfies the definition of ONNX, but now sure all the inference engines
// support this rule e.g TensorRT、OpenVINO anchor_w = anchors_s[:,
// 0:1].reshape((1, an_num, 1, 1)) anchor_h = anchors_s[:, 1:2].reshape((1,
// an_num, 1, 1)) pred_box[:, :, :, :, 2] = np.exp(pred_box[:, :, :, :, 2]) *
// anchor_w pred_box[:, :, :, :, 3] = np.exp(pred_box[:, :, :, :, 3]) *
// anchor_h
anchors = helper_->Reshape(anchors, {1, anchor_num, 1, 1, 2});
auto pred_box_wh =
helper_->Slice(transposed_x->output(0), {0, 1, 2, 3, 4}, {0, 0, 0, 0, 2},
{max_int, max_int, max_int, max_int, 4});
pred_box_wh = helper_->MakeNode("Exp", {pred_box_wh})->output(0);
pred_box_wh = helper_->MakeNode("Mul", {pred_box_wh, anchors})->output(0);
// if iou_aware:
// pred_conf = sigmoid(x[:, :, :, :, 4:5])**(
// 1 - iou_aware_factor) * sigmoid(ioup)**iou_aware_factor
// else:
// pred_conf = sigmoid(x[:, :, :, :, 4:5])
auto confidence =
helper_->Slice(transposed_x->output(0), {0, 1, 2, 3, 4}, {0, 0, 0, 0, 4},
{max_int, max_int, max_int, max_int, 5});
std::string pred_conf = helper_->MakeNode("Sigmoid", {confidence})->output(0);
if (iou_aware_) {
auto ioup = helper_->Slice(x_info[0].name, {0, 1, 2, 3}, {0, 0, 0, 0},
{max_int, anchor_num, max_int, max_int});
ioup = helper_->Unsqueeze(ioup, {4});
ioup = helper_->MakeNode("Sigmoid", {ioup})->output(0);
float power_value_0 = 1 - iou_aware_factor_;
auto power_0 =
helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), power_value_0);
auto power_1 = helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype),
iou_aware_factor_);
ioup = helper_->MakeNode("Pow", {ioup, power_1})->output(0);
pred_conf = helper_->MakeNode("Pow", {pred_conf, power_0})->output(0);
pred_conf = helper_->MakeNode("Mul", {pred_conf, ioup})->output(0);
}
// pred_conf[pred_conf < conf_thresh] = 0.
// pred_score = sigmoid(x[:, :, :, :, 5:]) * pred_conf
// pred_box = pred_box * (pred_conf > 0.).astype('float32')
auto value_2 =
helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), float(2.0));
auto center = helper_->MakeNode("Div", {pred_box_wh, value_2})->output(0);
auto min_xy = helper_->MakeNode("Sub", {pred_box_xy, center})->output(0);
auto max_xy = helper_->MakeNode("Add", {pred_box_xy, center})->output(0);
auto conf_thresh =
helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), conf_thresh_);
auto filter =
helper_->MakeNode("Greater", {pred_conf, conf_thresh})->output(0);
filter = helper_->AutoCast(filter, P2ODataType::BOOL, x_info[0].dtype);
pred_conf = helper_->MakeNode("Mul", {pred_conf, filter})->output(0);
auto pred_score =
helper_->Slice(transposed_x->output(0), {0, 1, 2, 3, 4}, {0, 0, 0, 0, 5},
{max_int, max_int, max_int, max_int, max_int});
pred_score = helper_->MakeNode("Sigmoid", {pred_score})->output(0);
pred_score = helper_->MakeNode("Mul", {pred_score, pred_conf})->output(0);
auto pred_box = helper_->Concat({min_xy, max_xy}, 4);
pred_box = helper_->MakeNode("Mul", {pred_box, filter})->output(0);
auto value_neg_1 =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, int64_t(-1));
auto value_4 =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, int64_t(4));
auto new_shape = helper_->Concat({nchw[0], value_neg_1, value_4}, 0);
pred_box = helper_->MakeNode("Reshape", {pred_box, new_shape})->output(0);
auto float_img_size = helper_->AutoCast(
im_size_info[0].name, im_size_info[0].dtype, x_info[0].dtype);
float_img_size = helper_->Unsqueeze(float_img_size, {1});
auto split_im_hw = helper_->Split(float_img_size, {1, 1}, 2);
auto im_whwh = helper_->Concat(
{split_im_hw[1], split_im_hw[0], split_im_hw[1], split_im_hw[0]}, 2);
if (!clip_bbox_) {
auto out = helper_->MakeNode("Mul", {pred_box, im_whwh})->output(0);
helper_->AutoCast(out, boxes_info[0].name, x_info[0].dtype,
boxes_info[0].dtype);
} else {
pred_box = helper_->MakeNode("Mul", {pred_box, im_whwh})->output(0);
auto im_wh = helper_->Concat({split_im_hw[1], split_im_hw[0]}, 2);
im_wh = helper_->MakeNode("Sub", {im_wh, float_value_1})->output(0);
auto pred_box_xymin_xymax = helper_->Split(pred_box, {2, 2}, 2);
pred_box_xymin_xymax[0] =
helper_->MakeNode("Relu", {pred_box_xymin_xymax[0]})->output(0);
pred_box_xymin_xymax[1] =
helper_->MakeNode("Min", {pred_box_xymin_xymax[1], im_wh})->output(0);
auto out = helper_->Concat(pred_box_xymin_xymax, 2);
helper_->AutoCast(out, boxes_info[0].name, x_info[0].dtype,
boxes_info[0].dtype);
}
auto class_num =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, class_num_);
auto score_out_shape =
helper_->Concat({nchw[0], value_neg_1, class_num}, int64_t(0));
auto score_out =
helper_->MakeNode("Reshape", {pred_score, score_out_shape})->output(0);
helper_->AutoCast(score_out, scores_info[0].name, x_info[0].dtype,
scores_info[0].dtype);
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,53 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class YoloBoxMapper : public Mapper {
public:
YoloBoxMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
MarkAsExperimentalOp();
GetAttr("clip_bbox", &clip_bbox_);
GetAttr("iou_aware", &iou_aware_);
GetAttr("conf_thresh", &conf_thresh_);
GetAttr("iou_aware_factor", &iou_aware_factor_);
GetAttr("class_num", &class_num_);
GetAttr("downsample_ratio", &downsample_ratio_);
GetAttr("scale_x_y", &scale_x_y_);
GetAttr("anchors", &anchors_);
}
int32_t GetMinOpset(bool verbose = false);
void Opset11();
private:
bool clip_bbox_;
bool iou_aware_;
float conf_thresh_;
float iou_aware_factor_;
float scale_x_y_;
int64_t class_num_;
int64_t downsample_ratio_;
std::vector<int64_t> anchors_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,188 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/elementwise.h"
namespace paddle2onnx {
REGISTER_MAPPER(elementwise_add, ElementwiseMapper)
REGISTER_MAPPER(elementwise_sub, ElementwiseMapper)
REGISTER_MAPPER(elementwise_div, ElementwiseMapper)
REGISTER_MAPPER(elementwise_mul, ElementwiseMapper)
REGISTER_MAPPER(elementwise_min, ElementwiseMapper)
REGISTER_MAPPER(elementwise_max, ElementwiseMapper)
REGISTER_MAPPER(elementwise_pow, ElementwiseMapper)
REGISTER_MAPPER(elementwise_mod, ElementWiseModMapper)
REGISTER_MAPPER(elementwise_floordiv, ElementWiseFloordivMapper)
int32_t ElementwiseMapper::GetMinOpset(bool verbose) {
if (OpType() == "elementwise_min" || OpType() == "elementwise_max") {
Logger(verbose, 8) << RequireOpset(8) << std::endl;
return 8;
}
return 7;
}
void ElementwiseMapper::Opset7() {
auto input_x_info = GetInput("X");
auto input_y_info = GetInput("Y");
auto output_info = GetOutput("Out");
auto iter = op_mapper_.find(OpType());
Assert(op_mapper_.end() != iter,
"Cannot find " + OpType() + " in elementwise op_mapper.");
auto x_name = input_x_info[0].name;
auto y_name = input_y_info[0].name;
if (input_x_info[0].dtype == P2ODataType::BOOL &&
input_y_info[0].dtype == P2ODataType::BOOL) {
x_name =
helper_->AutoCast(x_name, input_x_info[0].dtype, P2ODataType::INT32);
y_name =
helper_->AutoCast(y_name, input_y_info[0].dtype, P2ODataType::INT32);
}
std::string output_name;
if (axis_ == -1 || axis_ == (input_x_info[0].Rank() - 1) ||
input_x_info[0].Rank() == input_y_info[0].Rank()) {
output_name = helper_->MakeNode(iter->second, {x_name, y_name})->output(0);
} else {
std::vector<int64_t> broadcast_shape(input_x_info[0].Rank(), 1);
for (int i = axis_; i < axis_ + input_y_info[0].Rank(); ++i) {
broadcast_shape[i] = input_y_info[0].shape[i - axis_];
}
std::string broadcast_shape_node =
helper_->Constant(GetOnnxDtype(P2ODataType::INT64), broadcast_shape);
auto y_node = helper_->MakeNode("Reshape", {y_name, broadcast_shape_node});
output_name =
helper_->MakeNode(iter->second, {x_name, y_node->output(0)})->output(0);
}
if (input_x_info[0].dtype == P2ODataType::BOOL &&
input_y_info[0].dtype == P2ODataType::BOOL) {
helper_->AutoCast(output_name, output_info[0].name, P2ODataType::INT32,
P2ODataType::BOOL);
} else {
helper_->MakeNode("Identity", {output_name}, {output_info[0].name});
}
}
void ElementWiseModMapper::Opset10() {
auto input_x_info = GetInput("X");
auto input_y_info = GetInput("Y");
auto output_info = GetOutput("Out");
int64_t fmod = 0;
if (input_y_info[0].dtype == P2ODataType::INT32 ||
input_y_info[0].dtype == P2ODataType::INT64) {
if (this->deploy_backend == "tensorrt") {
auto x = helper_->AutoCast(input_x_info[0].name, input_x_info[0].dtype,
input_y_info[0].dtype);
auto times =
helper_->MakeNode("Div", {input_x_info[0].name, input_y_info[0].name})
->output(0);
auto result =
helper_->MakeNode("Mul", {input_y_info[0].name, times})->output(0);
helper_->MakeNode("Sub", {input_x_info[0].name, result},
{output_info[0].name});
return;
}
auto mod_node =
helper_->MakeNode("Mod", {input_x_info[0].name, input_y_info[0].name},
{output_info[0].name});
AddAttribute(mod_node, "fmod", fmod);
return;
}
fmod = 1;
auto abs_x_node = helper_->MakeNode("Abs", {input_x_info[0].name});
auto abs_y_node = helper_->MakeNode("Abs", {input_y_info[0].name});
auto dtype = input_y_info[0].dtype;
std::vector<float> val_0 = {0.0};
std::string zero_node = helper_->Constant(GetOnnxDtype(dtype), val_0);
auto mod_node =
helper_->MakeNode("Mod", {abs_x_node->output(0), abs_y_node->output(0)});
AddAttribute(mod_node, "fmod", fmod);
auto neg_node = helper_->MakeNode("Neg", {mod_node->output(0)});
auto less_node = helper_->MakeNode("Less", {input_x_info[0].name, zero_node});
std::string condition_node =
helper_->AutoCast(less_node->output(0), dtype, P2ODataType::BOOL);
auto mod_res_node = helper_->MakeNode(
"Where", {condition_node, neg_node->output(0), mod_node->output(0)});
auto mod_y_add_node =
helper_->MakeNode("Add", {mod_res_node->output(0), input_y_info[0].name});
auto mod_y_mul_node =
helper_->MakeNode("Mul", {mod_res_node->output(0), input_y_info[0].name});
auto mod_y_mul_less_node =
helper_->MakeNode("Less", {mod_y_mul_node->output(0), zero_node});
std::string mod_y_mul_condition_node = helper_->AutoCast(
mod_y_mul_less_node->output(0), dtype, P2ODataType::BOOL);
helper_->MakeNode("Where",
{mod_y_mul_condition_node, mod_y_add_node->output(0),
mod_res_node->output(0)},
{output_info[0].name});
}
void ElementWiseFloordivMapper::Opset7() {
auto input_x_info = GetInput("X");
auto input_y_info = GetInput("Y");
auto output_info = GetOutput("Out");
bool is_int = false;
if (input_x_info[0].dtype <= 3 || input_x_info[0].dtype == 20 ||
input_y_info[0].dtype <= 3 || input_y_info[0].dtype == 20) {
is_int = true;
}
if (axis_ == -1 || axis_ == input_x_info[0].Rank() - 1 ||
input_x_info[0].Rank() == input_y_info[0].Rank()) {
if (is_int) {
helper_->MakeNode("Div", {input_x_info[0].name, input_y_info[0].name},
{output_info[0].name});
} else {
auto div_node = helper_->MakeNode(
"Div", {input_x_info[0].name, input_y_info[0].name});
helper_->MakeNode("Floor", {div_node->output(0)}, {output_info[0].name});
}
} else {
std::vector<int64_t> broadcast_shape;
broadcast_shape.resize(axis_ + input_x_info[0].Rank(), 1);
for (auto i = 0; i < input_y_info[0].Rank(); ++i) {
broadcast_shape[axis_ + i] = input_y_info[0].shape[i];
}
std::string broadcast_shape_node =
helper_->Constant(GetOnnxDtype(P2ODataType::INT64), broadcast_shape);
auto y_node = helper_->MakeNode(
"Reshape", {input_y_info[0].name, broadcast_shape_node});
if (is_int) {
helper_->MakeNode("Div", {input_x_info[0].name, y_node->output(0)},
{output_info[0].name});
} else {
auto div_node =
helper_->MakeNode("Div", {input_x_info[0].name, y_node->output(0)});
helper_->MakeNode("Floor", {div_node->output(0)}, {output_info[0].name});
}
}
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,75 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <map>
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class ElementwiseMapper : public Mapper {
public:
ElementwiseMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("axis", &axis_);
op_mapper_["elementwise_add"] = "Add";
op_mapper_["elementwise_sub"] = "Sub";
op_mapper_["elementwise_div"] = "Div";
op_mapper_["elementwise_mul"] = "Mul";
op_mapper_["elementwise_min"] = "Min";
op_mapper_["elementwise_max"] = "Max";
op_mapper_["elementwise_pow"] = "Pow";
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
private:
std::map<std::string, std::string> op_mapper_;
int64_t axis_;
};
class ElementWiseModMapper : public Mapper {
public:
ElementWiseModMapper(const PaddleParser& p, OnnxHelper* helper,
int64_t block_id, int64_t op_id)
: Mapper(p, helper, block_id, op_id) {}
int32_t GetMinOpset(bool verbose = false) {
Logger(verbose, 10) << RequireOpset(10) << std::endl;
return 10;
}
void Opset10();
};
class ElementWiseFloordivMapper : public Mapper {
public:
ElementWiseFloordivMapper(const PaddleParser& p, OnnxHelper* helper,
int64_t block_id, int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("axis", &axis_);
}
void Opset7();
private:
int64_t axis_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,548 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/exporter.h"
#include <google/protobuf/message.h>
#include <onnx/checker.h>
#include <array>
#include "onnxoptimizer/optimize.h"
#include "paddle2onnx/optimizer/convert_fp32_to_fp16.h"
#include "paddle2onnx/optimizer/eliminate_non_transpose.h"
#include "paddle2onnx/optimizer/fuse_constant_cast.h"
#include "paddle2onnx/optimizer/fuse_constant_reshape.h"
#include "paddle2onnx/optimizer/fuse_constant_unsqueeze.h"
#include "paddle2onnx/optimizer/fuse_paddle_conv_bias.h"
#include "paddle2onnx/optimizer/fuse_unsqueeze_conv2d_squeeze.h"
namespace paddle2onnx {
MapperHelper* MapperHelper::helper = nullptr;
void ModelExporter::ExportParameters(
const std::map<std::string, Weight>& params, bool use_initializer) {
for (auto& item : params) {
// TODO(jiangjiajun) I'm not handling use_initializer now, but some day I
// will
auto node = MakeConstant(item.first, item.second);
parameters.push_back(std::move(node));
}
}
void ModelExporter::UpdateParameters(
const std::map<std::string, Weight>& params) {
for (auto& item : params) {
auto node = MakeConstant(item.first, item.second);
bool updated = false;
for (int i = 0; i < parameters.size(); ++i) {
auto old_node = parameters[i];
if (old_node->output(0) == item.first) {
parameters.erase(parameters.begin() + i);
parameters.push_back(std::move(node));
updated = true;
break;
}
}
if (!updated) {
parameters.push_back(std::move(node));
}
}
}
void ModelExporter::ExportInputOutputs(
const std::vector<TensorInfo>& input_infos,
const std::vector<TensorInfo>& output_infos) {
for (auto& item : input_infos) {
auto value_info = MakeValueInfo(item);
inputs.push_back(std::move(value_info));
}
for (auto& item : output_infos) {
auto value_info = MakeValueInfo(item);
outputs.push_back(std::move(value_info));
}
}
void ModelExporter::ExportOp(const PaddleParser& parser, OnnxHelper* helper,
int32_t opset_version, int64_t block_id,
int64_t op_id, bool verbose) {
_current_exported_num += 1;
auto op = parser.GetOpDesc(block_id, op_id);
#ifdef PADDLE2ONNX_DEBUG
P2OLogger(true) << "---Converting operator: " << op.type() << " ---"
<< std::endl;
#endif
if (op.type() == "while") {
return ExportLoop(parser, helper, opset_version, block_id, op_id, verbose);
}
auto mapper = MapperHelper::Get()->CreateMapper(op.type(), parser, helper,
block_id, op_id);
mapper->deploy_backend = _deploy_backend;
#ifdef PADDLE2ONNX_DEBUG
P2OLogger(true) << "Mapper Name: " << mapper->Name() << std::endl;
#endif
// Some operators will export as custom operator
auto iter = custom_ops.find(op.type());
if (iter != custom_ops.end()) {
mapper->export_as_custom_op = true;
mapper->custom_op_name = iter->second;
}
mapper->Run();
delete mapper;
#ifdef PADDLE2ONNX_DEBUG
P2OLogger(true) << "---Converting operator: " << op.type() << " done---"
<< std::endl;
#endif
}
void ModelExporter::ProcessGraphDumplicateNames(
std::vector<std::shared_ptr<ONNX_NAMESPACE::NodeProto>>* parameters,
std::vector<std::shared_ptr<ONNX_NAMESPACE::ValueInfoProto>>* inputs,
std::vector<std::shared_ptr<ONNX_NAMESPACE::ValueInfoProto>>* outputs,
std::vector<std::shared_ptr<ONNX_NAMESPACE::NodeProto>>* nodes,
std::map<std::string, QuantizeInfo>* quantize_info) {
// process dumplicate tensor names
std::map<std::string, std::string> renamer;
std::set<std::string> tensor_names;
for (auto& item : *parameters) {
for (size_t i = 0; i < item->output_size(); ++i) {
if (tensor_names.find(item->output(i)) != tensor_names.end()) {
Assert(false, "There's dumplicate names in exported parameters.");
}
tensor_names.insert(item->output(i));
}
}
for (auto& item : *inputs) {
if (tensor_names.find(item->name()) != tensor_names.end()) {
Assert(false, "There's dumplicate names:" + item->name() +
" in exported parameters and inputs.");
}
tensor_names.insert(item->name());
}
for (auto& item : *nodes) {
// update node inputs
for (size_t i = 0; i < item->input_size(); ++i) {
if (renamer.find(item->input(i)) != renamer.end()) {
auto updated_name = renamer[item->input(i)];
while (renamer.find(updated_name) != renamer.end()) {
updated_name = renamer[updated_name];
}
*(item->mutable_input(i)) = updated_name;
}
}
// if there's dumplicate name
// will generate new name and replace it
for (size_t i = 0; i < item->output_size(); ++i) {
if (tensor_names.find(item->output(i)) != tensor_names.end()) {
std::string renamed_tensor_name = item->output(i);
while (renamer.find(renamed_tensor_name) != renamer.end()) {
renamed_tensor_name = renamer[renamed_tensor_name];
}
auto new_tensor_name =
MapperHelper::Get()->GenName(renamed_tensor_name);
P2OLogger() << "Find dumplicate output name '" << renamed_tensor_name
<< "', it will rename to '" << new_tensor_name << "'."
<< std::endl;
if (quantize_info &&
quantize_info->find(renamed_tensor_name) != quantize_info->end()) {
(*quantize_info)[new_tensor_name] =
(*quantize_info)[renamed_tensor_name];
}
*(item->mutable_output(i)) = new_tensor_name;
renamer[renamed_tensor_name] = new_tensor_name;
}
tensor_names.insert(item->output(i));
}
}
for (auto& item : *outputs) {
if (renamer.find(item->name()) != renamer.end()) {
auto updated_name = renamer[item->name()];
while (renamer.find(updated_name) != renamer.end()) {
updated_name = renamer[updated_name];
}
item->set_name(updated_name);
}
}
}
void ModelExporter::SaveExternalData(::paddle2onnx::GraphProto* graph,
const std::string& external_file_path,
bool* save_external) {
P2OLogger() << "The exported ONNX model is bigger than 2G, external data "
"will save to file: "
<< external_file_path << std::endl;
std::string file_name = GetFilenameFromPath(external_file_path);
if (save_external) {
*save_external = true;
}
std::fstream f(external_file_path, std::ios::out);
Assert(f.is_open(), "Failed to open: " + external_file_path +
" file to save external data");
for (auto index = 0; index < graph->node_size(); index++) {
auto node = graph->mutable_node(index);
if (node->op_type() != "Constant") {
continue;
}
for (auto i = 0; i < node->attribute_size(); i++) {
auto attr = node->mutable_attribute(i);
if (attr->name() != "value") {
continue;
}
auto tensor = attr->mutable_t();
if (tensor->raw_data().size() <= 128) {
continue;
}
tensor->set_data_location(TensorProto::EXTERNAL);
auto external_data = tensor->add_external_data();
external_data->set_key("location");
external_data->set_value(file_name);
external_data = tensor->add_external_data();
external_data->set_key("offset");
f.seekg(0, std::ios::end);
int64_t offset = f.tellg();
external_data->set_value(std::to_string(offset));
auto raw_data = tensor->raw_data();
f << raw_data;
external_data = tensor->add_external_data();
external_data->set_key("length");
int64_t raw_datas_size = raw_data.size();
external_data->set_value(std::to_string(raw_datas_size));
tensor->clear_raw_data();
}
}
f.close();
}
void ModelExporter::ONNXChecker(const ONNX_NAMESPACE::ModelProto& model,
const bool& verbose) {
// TODO(jiangjiajun)
// If we need to integrate with framework
// this check will return a information
// to let framework know the conversion is
// pass or fail
try {
// ONNX_NAMESPACE::checker::check_model(*(model.get()));
ONNX_NAMESPACE::checker::check_model(model);
} catch (const std::exception& e) {
P2OLogger(verbose) << "The exported ONNX model is invalid." << std::endl;
P2OLogger(verbose) << "Model checker error log: " << e.what() << std::endl;
}
P2OLogger(verbose) << "PaddlePaddle model is exported as ONNX format now."
<< std::endl;
}
std::string ModelExporter::Run(
const PaddleParser& parser, int opset_version, bool auto_upgrade_opset,
bool verbose, bool enable_onnx_checker, bool enable_experimental_op,
bool enable_optimize, const std::string& deploy_backend,
std::string* calibration_cache, const std::string& external_file,
bool* save_external, bool export_fp16_model) {
_deploy_backend = deploy_backend;
_helper.SetOpsetVersion(opset_version);
_total_ops_num = 0;
_current_exported_num = 0;
for (auto i = 0; i < parser.NumOfBlocks(); ++i) {
_total_ops_num += parser.NumOfOps(i);
}
_helper.nodes.reserve(_total_ops_num * 3);
Assert(opset_version <= MAX_ONNX_OPSET_VERSION && opset_version >= 7,
"Paddle2ONNX now only support opset version in range of [7, " +
std::to_string(MAX_ONNX_OPSET_VERSION) + "].");
_helper.Clear();
inputs.clear();
outputs.clear();
parameters.clear();
// clear name_counter
// this use to generate unique name
// for intermdiate
// while converting all the op
MapperHelper::Get()->ClearNameCounter();
std::set<std::string> unsupported_ops;
if (!CheckIfOpSupported(parser, &unsupported_ops, enable_experimental_op)) {
auto logger = P2OLogger();
logger << "Oops, there are some operators not supported yet, including ";
for (auto& item : unsupported_ops) {
logger << item << ",";
}
logger << std::endl;
Assert(1 == 0,
"Due to the unsupported operators, the conversion is aborted.");
}
int32_t min_opset = GetMinOpset(parser, verbose);
if (min_opset < 0) {
Assert(false,
"Model exporting failed, you can report this problem to "
"https://github.com/PaddlePaddle/Paddle2ONNX.git.");
}
if (!auto_upgrade_opset) {
if (min_opset > opset_version) {
P2OLogger() << "This PaddlePaddle model is not able to export to ONNX "
"with opset_version="
<< opset_version << ", please set the opset_version to "
<< min_opset << " or higher for successfully conversion."
<< std::endl;
Assert(false,
"Due to opset version, the model exporting is aborted, please set "
"a higher opset_version or set auto_upgrade_opset=true.");
}
} else {
if (min_opset > opset_version) {
P2OLogger() << "Opset version will change to " << min_opset << " from "
<< opset_version << std::endl;
opset_version = min_opset;
}
}
_helper.SetOpsetVersion(opset_version);
P2OLogger(verbose) << "Use opset_version = " << _helper.GetOpsetVersion()
<< " for ONNX export." << std::endl;
ExportParameters(parser.params);
ExportInputOutputs(parser.inputs, parser.outputs);
// Only convert blocks 0 now
// because control flow is not supported yet
for (auto i = 0; i < parser.NumOfOps(0); ++i) {
auto op = parser.GetOpDesc(0, i);
if (op.type() == "feed") {
continue;
} else if (op.type() == "fetch") {
continue;
}
ExportOp(parser, &_helper, opset_version, 0, i, verbose);
}
// construct a onnx model proto
auto model = std::make_shared<ONNX_NAMESPACE::ModelProto>();
// TODO(jiangjiajun) ir version is related to onnx version
model->set_ir_version(ONNX_NAMESPACE::IR_VERSION);
auto graph = model->mutable_graph();
graph->set_name("Model from PaddlePaddle.");
auto opset_id = model->add_opset_import();
opset_id->set_domain("");
opset_id->set_version(opset_version);
if (custom_ops.size()) {
auto opset_paddle_id = model->add_opset_import();
opset_paddle_id->set_domain("Paddle");
opset_paddle_id->set_version(1);
}
ProcessGraphDumplicateNames(&parameters, &inputs, &outputs, &_helper.nodes,
&_helper.quantize_info);
if (parser.is_quantized_model) {
quantize_model_processer.ProcessQuantizeModel(
&parameters, &inputs, &outputs, &_helper.nodes, &_helper,
deploy_backend, parser, calibration_cache);
// Update int8 weights in quantized OP to float32
UpdateParameters(_helper.updated_params);
}
for (auto& item : parameters) {
*(graph->add_node()) = *(item.get());
}
for (auto& item : inputs) {
*(graph->add_input()) = *(item.get());
}
for (auto& item : _helper.nodes) {
*(graph->add_node()) = (*item.get());
}
for (auto& item : outputs) {
*(graph->add_output()) = (*item.get());
}
for (auto& item : _helper.value_infos) {
*(graph->add_value_info()) = (*item.get());
}
ONNX_NAMESPACE::ModelProto onnx_model;
std::string out;
if (enable_optimize) {
onnx_model = Optimize(*(model.get()));
} else {
onnx_model = *model.get();
}
// convert fp32 model to fp16
if (export_fp16_model) {
P2OLogger(verbose) << "Convert FP32 ONNX model to FP16." << std::endl;
ConvertFp32ToFp16 convert;
convert.SetCustomOps(custom_ops);
convert.Convert(&onnx_model);
}
// save external data file for big model
std::string external_data_file;
if (onnx_model.ByteSizeLong() > INT_MAX) {
if (external_file.empty()) {
external_data_file = "external_data";
} else {
external_data_file = external_file;
}
}
if (external_data_file.size()) {
SaveExternalData(onnx_model.mutable_graph(), external_data_file,
save_external);
}
// check model
if (enable_onnx_checker) {
ONNXChecker(onnx_model, verbose);
}
if (!onnx_model.SerializeToString(&out)) {
P2OLogger(verbose)
<< "Error happenedd while optimizing the exported ONNX model."
<< std::endl;
return "";
}
return out;
}
bool ModelExporter::CheckIfOpSupported(const PaddleParser& parser,
std::set<std::string>* unsupported_ops,
bool enable_experimental_op) {
unsupported_ops->clear();
for (auto i = 0; i < parser.NumOfBlocks(); ++i) {
for (auto j = 0; j < parser.NumOfOps(i); ++j) {
auto op = parser.GetOpDesc(i, j);
if (op.type() == "feed" || op.type() == "fetch") {
continue;
}
if (op.type() == "while" && enable_experimental_op) {
if (!IsLoopSupported(parser, i, j)) {
unsupported_ops->insert("while");
}
continue;
}
if (!MapperHelper::Get()->IsRegistered(op.type())) {
unsupported_ops->insert(op.type());
} else if (!enable_experimental_op) {
auto mapper = MapperHelper::Get()->CreateMapper(op.type(), parser,
&_helper, i, j);
if (mapper->IsExperimentalOp()) {
unsupported_ops->insert(op.type());
}
delete mapper;
}
}
}
return (unsupported_ops->size() == 0);
}
int32_t ModelExporter::GetMinOpset(const PaddleParser& parser, bool verbose) {
int32_t opset_version = _helper.GetOpsetVersion();
int32_t max_opset = 7;
bool exportable = true;
// Record the number of ops that need to be converted
int converted_op_num = 0;
std::set<std::string> verbose_log;
for (auto i = 0; i < parser.NumOfBlocks(); ++i) {
for (auto j = 0; j < parser.NumOfOps(i); ++j) {
auto op = parser.GetOpDesc(i, j);
if (custom_ops.find(op.type()) != custom_ops.end()) {
continue;
}
if (op.type() == "feed" || op.type() == "fetch") {
continue;
}
converted_op_num += 1;
int current_min_opset = 7;
if (op.type() == "while") {
P2OLogger() << "Detected there's control flow 'while' op in your "
"model, this requires the minimal opset version of 13."
<< std::endl;
current_min_opset = 13;
} else {
auto mapper = MapperHelper::Get()->CreateMapper(op.type(), parser,
&_helper, i, j);
auto iter = custom_ops.find(op.type());
if (iter != custom_ops.end()) {
mapper->export_as_custom_op = true;
}
current_min_opset = mapper->GetMinOpset(verbose);
delete mapper;
}
if (current_min_opset < 0) {
exportable = false;
P2OLogger(verbose) << "Due to the operator: " << op.type()
<< ", this model cannot be exported to ONNX."
<< std::endl;
} else if (current_min_opset > max_opset) {
max_opset = current_min_opset;
if (verbose && current_min_opset > opset_version) {
verbose_log.insert("Due to the operator: " + op.type() +
", requires opset_version >= " +
std::to_string(current_min_opset) + ".");
}
}
}
}
if (verbose) {
for (auto iter = verbose_log.begin(); iter != verbose_log.end(); ++iter) {
P2OLogger() << *iter << std::endl;
}
}
// Here we put some checks to make sure
// paddle2onnx could compatible with
// other version of onnx
int32_t max_support_opset = MAX_ONNX_OPSET_VERSION;
if (exportable && (max_opset > MAX_ONNX_OPSET_VERSION)) {
exportable = false;
P2OLogger() << "[ERROR] The compiled ONNX version only supports opset 7~"
<< MAX_ONNX_OPSET_VERSION
<< ", but now this model need as least opset " << max_opset
<< ", please compile with higher version of ONNX." << std::endl;
}
if (exportable) {
return max_opset;
}
return -1;
}
ONNX_NAMESPACE::ModelProto ModelExporter::Optimize(
const ONNX_NAMESPACE::ModelProto& model) {
ONNX_NAMESPACE::optimization::Optimizer::passes
.registerPass<ONNX_NAMESPACE::optimization::FuseConstantReshape>();
ONNX_NAMESPACE::optimization::Optimizer::passes
.registerPass<ONNX_NAMESPACE::optimization::FuseConstantUnsqueeze>();
ONNX_NAMESPACE::optimization::Optimizer::passes
.registerPass<ONNX_NAMESPACE::optimization::FusePaddleConvBias>();
ONNX_NAMESPACE::optimization::Optimizer::passes
.registerPass<ONNX_NAMESPACE::optimization::FuseUnsqueezeConv2dSqueeze>();
ONNX_NAMESPACE::optimization::Optimizer::passes
.registerPass<ONNX_NAMESPACE::optimization::EliminateNonTranspose>();
ONNX_NAMESPACE::optimization::Optimizer::passes
.registerPass<ONNX_NAMESPACE::optimization::FuseConstantCast>();
std::vector<std::string> passes = {"eliminate_identity",
"eliminate_deadend",
"eliminate_deadend",
"fuse_constant_reshape",
"fuse_constant_unsqueeze",
"fuse_paddle_conv_bias",
"fuse_consecutive_transposes",
"eliminate_non_transpose",
"fuse_matmul_add_bias_into_gemm",
"eliminate_identity",
"eliminate_deadend",
"eliminate_unused_initializer"};
return ONNX_NAMESPACE::optimization::Optimize(model, passes);
}
} // namespace paddle2onnx

122
paddle2onnx/mapper/exporter.h Executable file
View File

@@ -0,0 +1,122 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <onnx/onnx_pb.h>
#include <algorithm>
#include <set>
#include "paddle2onnx/mapper/mapper.h"
#include "paddle2onnx/mapper/quantize_helper.h"
#include "paddle2onnx/parser/parser.h"
#ifdef _MSC_VER
#define PATH_SEP "\\"
#else
#define PATH_SEP "/"
#endif
inline std::string GetFilenameFromPath(const std::string& path) {
auto pos = path.find_last_of(PATH_SEP);
if (pos == std::string::npos) {
return path;
}
return path.substr(pos + 1);
}
namespace paddle2onnx {
struct ModelExporter {
private:
std::vector<std::shared_ptr<ONNX_NAMESPACE::NodeProto>> parameters;
std::vector<std::shared_ptr<ONNX_NAMESPACE::ValueInfoProto>> inputs;
std::vector<std::shared_ptr<ONNX_NAMESPACE::ValueInfoProto>> outputs;
// The _deploy_backend will pass to Mapper to influence the conversion
std::string _deploy_backend = "onnxruntime";
OnnxHelper _helper;
int32_t _total_ops_num = 0;
int32_t _current_exported_num = 0;
void ExportParameters(const std::map<std::string, Weight>& params,
bool use_initializer = false);
// Update constant node in parameters. When process quantize model, the weight
// dtype may be int8, it should be convet to float32 and use this function to
// update converted params.
void UpdateParameters(const std::map<std::string, Weight>& params);
void ExportInputOutputs(const std::vector<TensorInfo>& input_infos,
const std::vector<TensorInfo>& output_infos);
void ExportOp(const PaddleParser& parser, OnnxHelper* helper,
int32_t opset_version, int64_t block_id, int64_t op_id,
bool verbose);
bool IsLoopSupported(const PaddleParser& parser, const int64_t& block_id,
const int64_t& op_id);
void ExportLoop(const PaddleParser& parser, OnnxHelper* helper,
int32_t opset_version, int64_t block_id, int64_t op_id,
bool verbose);
ONNX_NAMESPACE::ModelProto Optimize(const ONNX_NAMESPACE::ModelProto& model);
public:
// custom operators for export
// <key: op_name, value:[exported_op_name, domain]>
std::map<std::string, std::string> custom_ops;
QuantizeModelProcessor quantize_model_processer;
// Get a proper opset version in range of [7, 16]
// Also will check the model is convertable, this will include 2 parts
// 1. is the op convert function implemented
// 2. is the op convertable(some cases may not be able to convert)
// If the model is not convertable, return -1
int32_t GetMinOpset(const PaddleParser& parser, bool verbose = false);
// // Remove isolated nodes in onnx model
// void RemoveIsolatedNodes(
// std::vector<std::shared_ptr<ONNX_NAMESPACE::NodeProto>>* parameters,
// std::vector<std::shared_ptr<ONNX_NAMESPACE::ValueInfoProto>>* inputs,
// std::vector<std::shared_ptr<ONNX_NAMESPACE::ValueInfoProto>>* outputs,
// std::vector<std::shared_ptr<ONNX_NAMESPACE::NodeProto>>* nodes);
// Process dumplicate tensor names in paddle model
void ProcessGraphDumplicateNames(
std::vector<std::shared_ptr<ONNX_NAMESPACE::NodeProto>>* parameters,
std::vector<std::shared_ptr<ONNX_NAMESPACE::ValueInfoProto>>* inputs,
std::vector<std::shared_ptr<ONNX_NAMESPACE::ValueInfoProto>>* outputs,
std::vector<std::shared_ptr<ONNX_NAMESPACE::NodeProto>>* nodes,
std::map<std::string, QuantizeInfo>* quantize_info = nullptr);
bool CheckIfOpSupported(const PaddleParser& parser,
std::set<std::string>* unsupported_ops,
bool enable_experimental_op);
void SaveExternalData(::paddle2onnx::GraphProto* graph,
const std::string& external_file_path,
bool* save_external = nullptr);
void ONNXChecker(const ONNX_NAMESPACE::ModelProto& model,
const bool& verbose);
std::string Run(const PaddleParser& parser, int opset_version = 9,
bool auto_upgrade_opset = true, bool verbose = false,
bool enable_onnx_checker = true,
bool enable_experimental_op = false,
bool enable_optimize = true,
const std::string& deploy_backend = "onnxruntime",
std::string* calibration_cache = nullptr,
const std::string& external_file = "",
bool* save_external = nullptr,
bool export_fp16_model = false);
};
} // namespace paddle2onnx

195
paddle2onnx/mapper/loop.cc Normal file
View File

@@ -0,0 +1,195 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/exporter.h"
namespace paddle2onnx {
bool ModelExporter::IsLoopSupported(const PaddleParser& parser,
const int64_t& block_id,
const int64_t& op_id) {
auto x_info = parser.GetOpInput(block_id, op_id, "X");
auto out_info = parser.GetOpOutput(block_id, op_id, "Out");
auto cond_info = parser.GetOpInput(block_id, op_id, "Condition");
std::set<std::string> input_names;
for (size_t i = 0; i < x_info.size(); ++i) {
input_names.insert(x_info[i].name);
}
input_names.insert(cond_info[0].name);
for (size_t i = 0; i < out_info.size(); ++i) {
auto iter = input_names.find(out_info[i].name);
if (iter == input_names.end()) {
P2OLogger() << "Cannot find output:" << out_info[i].name << " in input tensors while converting operator 'while', Paddle2ONNX doesn't support this situation now." << std::endl;
return false;
}
}
for (size_t i = 0; i < x_info.size(); ++i) {
if (x_info[i].is_tensor_array) {
P2OLogger() << "LodTensorArray is not supported." << std::endl;
return false;
}
}
return true;
}
void ModelExporter::ExportLoop(const PaddleParser& parser, OnnxHelper* helper,
int32_t opset_version, int64_t block_id,
int64_t op_id, bool verbose) {
auto op = parser.GetOpDesc(block_id, op_id);
int32_t sub_block_idx = -1;
for (size_t i = 0; i < op.attrs_size(); ++i) {
if (op.attrs(i).name() == "sub_block") {
sub_block_idx = op.attrs(i).block_idx();
break;
}
}
Assert(sub_block_idx > 0, "Cannot find sub_block in while operator.");
auto x_info = parser.GetOpInput(block_id, op_id, "X");
auto cond_info = parser.GetOpInput(block_id, op_id, "Condition");
std::vector<std::shared_ptr<ONNX_NAMESPACE::ValueInfoProto>> inputs;
std::vector<std::shared_ptr<ONNX_NAMESPACE::ValueInfoProto>> outputs;
// make loop iter
auto iter_name = MapperHelper::Get()->GenName("loop.iter");
TensorInfo iter_info(iter_name, std::vector<int64_t>(1, 1),
P2ODataType::INT64);
inputs.push_back(std::move(MakeValueInfo(iter_info)));
std::set<std::string> input_names;
// make cond
inputs.push_back(std::move(MakeValueInfo(cond_info[0])));
input_names.insert(cond_info[0].name);
// other inputs
outputs.push_back(std::move(std::move(MakeValueInfo(cond_info[0]))));
for (size_t i = 0; i < x_info.size(); ++i) {
if (x_info[i].is_tensor_array) {
continue;
}
if (input_names.find(x_info[i].name) != input_names.end()) {
continue;
}
input_names.insert(x_info[i].name);
inputs.push_back(std::move(MakeValueInfo(x_info[i])));
outputs.push_back(std::move(MakeValueInfo(x_info[i])));
}
for (size_t i = 0; i < x_info.size(); ++i) {
if (x_info[i].is_tensor_array) {
if (input_names.find(x_info[i].name) != input_names.end()) {
continue;
}
input_names.insert(x_info[i].name);
outputs.push_back(std::move(MakeValueInfo(x_info[i])));
}
}
// make op nodes
OnnxHelper loop_helper;
loop_helper.SetOpsetVersion(opset_version);
for (auto i = 0; i < parser.NumOfOps(sub_block_idx); ++i) {
auto op = parser.GetOpDesc(sub_block_idx, i);
ExportOp(parser, &loop_helper, opset_version, sub_block_idx, i, verbose);
}
std::vector<std::shared_ptr<ONNX_NAMESPACE::NodeProto>> parameters;
ProcessGraphDumplicateNames(&parameters, &inputs, &outputs,
&loop_helper.nodes);
std::map<std::string, std::string> renamer;
for (auto& item : inputs) {
auto name = MapperHelper::Get()->GenName("loop.input");
renamer[item->name()] = name;
item->set_name(name);
}
for (auto& item : loop_helper.nodes) {
for (size_t i = 0; i < item->input_size(); ++i) {
if (renamer.find(item->input(i)) != renamer.end()) {
auto updated_name = renamer[item->input(i)];
while (renamer.find(updated_name) != renamer.end()) {
updated_name = renamer[updated_name];
}
*(item->mutable_input(i)) = updated_name;
}
}
}
for (auto& item : outputs) {
if (renamer.find(item->name()) != renamer.end()) {
auto updated_name = renamer[item->name()];
while (renamer.find(updated_name) != renamer.end()) {
updated_name = renamer[updated_name];
}
item->set_name(updated_name);
}
}
// // construct a onnx model proto
// // consider to optimize the subgraph
// auto model = std::make_shared<ONNX_NAMESPACE::ModelProto>();
// model->set_ir_version(ONNX_NAMESPACE::IR_VERSION);
// auto graph = model->mutable_graph();
// auto graph_name = MapperHelper::Get()->GenName("Model from
// PaddlePaddle(Loop).");
// graph->set_name(graph_name);
// auto opset_id = model->add_opset_import();
// opset_id->set_domain("");
// opset_id->set_version(loop_helper->GetOpsetVersion());
auto graph_name = MapperHelper::Get()->GenName("paddle.loop");
auto graph = std::make_shared<ONNX_NAMESPACE::GraphProto>();
graph->set_name(graph_name);
for (auto& item : inputs) {
*(graph->add_input()) = *(item.get());
}
for (auto& item : loop_helper.nodes) {
*(graph->add_node()) = (*item.get());
}
for (auto& item : outputs) {
*(graph->add_output()) = (*item.get());
}
// fake iter
auto fake_iter = helper->Constant(ONNX_NAMESPACE::TensorProto::INT64,
std::vector<int64_t>(1, 1024));
std::vector<std::string> x_names;
x_names.push_back(fake_iter);
x_names.push_back(cond_info[0].name);
std::vector<std::string> out_names;
for (size_t i = 0; i < x_info.size(); ++i) {
if (x_info[i].is_tensor_array) {
continue;
}
if (std::find(x_names.begin(), x_names.end(), x_info[i].name) != x_names.end()) {
continue;
}
x_names.push_back(x_info[i].name);
out_names.push_back(x_info[i].name);
}
for (size_t i = 0; i < x_info.size(); ++i) {
if (x_info[i].is_tensor_array) {
if (std::find(x_names.begin(), x_names.end(), x_info[i].name) != x_names.end()) {
continue;
}
out_names.push_back(x_info[i].name);
}
}
auto loop_node = helper->MakeNode("Loop", x_names, out_names);
auto attr = loop_node->add_attribute();
attr->set_name("body");
attr->set_type(ONNX_NAMESPACE::AttributeProto::GRAPH);
*(attr->mutable_g()) = *(graph.get());
}
} // namespace paddle2onnx

246
paddle2onnx/mapper/mapper.h Executable file
View File

@@ -0,0 +1,246 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#include "paddle2onnx/mapper/data_helper.h"
#include "paddle2onnx/mapper/onnx_helper.h"
#include "paddle2onnx/mapper/register_mapper.h"
#include "paddle2onnx/parser/parser.h"
namespace paddle2onnx {
class Mapper {
public:
Mapper() {}
Mapper(const PaddleParser& p, OnnxHelper* helper, int32_t block_id,
int32_t op_id, std::string name = {})
: parser_(&p) {
block_idx_ = block_id;
op_idx_ = op_id;
helper_ = helper;
name_ = name;
}
// The flag will control if the op is exported as a custom operator
// if export_as_custom_op = true, will exported as description in
// custom_op_info
bool export_as_custom_op = false;
// [exported_op_name, domain]
std::string custom_op_name;
std::string deploy_backend;
P2OLogger Logger(const bool& verbose, const int32_t& opset_version = 100) {
bool v = verbose;
if (opset_version <= helper_->GetOpsetVersion()) {
v = false;
}
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
std::string output_name = "";
if (op.outputs(0).arguments_size() > 0) {
output_name = op.outputs(0).arguments(0);
}
std::string op_type = op.type();
std::string prefix = "[Paddle2ONNX] [" + op_type + ": " + output_name + "]";
return P2OLogger(v, prefix);
}
P2OLogger Error() {
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
std::string output_name = "";
if (op.outputs(0).arguments_size() > 0) {
output_name = op.outputs(0).arguments(0);
}
std::string op_type = op.type();
std::string prefix =
"[ERROR][Paddle2ONNX] [" + op_type + ": " + output_name + "]";
return P2OLogger(true, prefix);
}
P2OLogger Warn() {
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
std::string output_name = "";
if (op.outputs(0).arguments_size() > 0) {
output_name = op.outputs(0).arguments(0);
}
std::string op_type = op.type();
std::string prefix =
"[WARN][Paddle2ONNX] [" + op_type + ": " + output_name + "]";
return P2OLogger(true, prefix);
}
// Some operators is not implement very well, e.g the output may not be same
// We mark these operators as experimental, these operators requires double
// checking after model exported.
virtual void MarkAsExperimentalOp() { is_experimental_op_ = true; }
virtual bool IsExperimentalOp() const { return is_experimental_op_; }
// the return value in [7, MAX_ONNX_OPSET_VERSION], represent the minimum
// opset_version
// if return value < 0, means the op is not supported.
virtual int32_t GetMinOpset(bool verbose = false) { return 7; }
virtual bool IsExportAsCustomOp() { return export_as_custom_op; }
void Run() {
int32_t opset_version = helper_->GetOpsetVersion();
Assert(opset_version >= 7 && opset_version <= MAX_ONNX_OPSET_VERSION,
"[Paddle2ONNX] Only support opset_version in range of [7, " +
std::to_string(MAX_ONNX_OPSET_VERSION) + "].");
if (IsExportAsCustomOp()) {
return ExportAsCustomOp();
}
if (opset_version == 16) {
Opset16();
} else if (opset_version == 15) {
Opset15();
} else if (opset_version == 14) {
Opset14();
} else if (opset_version == 13) {
Opset13();
} else if (opset_version == 12) {
Opset12();
} else if (opset_version == 11) {
Opset11();
} else if (opset_version == 10) {
Opset10();
} else if (opset_version == 9) {
Opset9();
} else if (opset_version == 8) {
Opset8();
} else {
Opset7();
}
}
virtual void ExportAsCustomOp() {
Assert(false,
"Operator " + name_ + "doesn't support export as custom operator.");
}
virtual void Opset16() { Opset15(); }
virtual void Opset15() { Opset14(); }
virtual void Opset14() { Opset13(); }
virtual void Opset13() { Opset12(); }
virtual void Opset12() { Opset11(); }
virtual void Opset11() { Opset10(); }
virtual void Opset10() { Opset9(); }
virtual void Opset9() { Opset8(); }
virtual void Opset8() { Opset7(); }
virtual void Opset7() {
Assert(false,
"This error shouldn't happend, please report to "
"https://github.com/PaddlePaddle/Paddle2ONNX.git.");
}
virtual ~Mapper() = default;
bool is_experimental_op_ = false;
const PaddleParser* parser_;
OnnxHelper* helper_;
int32_t block_idx_;
int32_t op_idx_;
std::string name_; // op transform name
std::string OpType() const {
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
return op.type();
}
std::string Name() const { return name_; }
bool HasInput(const std::string& name) const {
return parser_->OpHasInput(block_idx_, op_idx_, name);
}
bool HasOutput(const std::string& name) const {
return parser_->OpHasOutput(block_idx_, op_idx_, name);
}
std::vector<TensorInfo> GetInput(const std::string& name) const {
return parser_->GetOpInput(block_idx_, op_idx_, name);
}
std::vector<TensorInfo> GetOutput(const std::string& name) const {
return parser_->GetOpOutput(block_idx_, op_idx_, name);
}
// Judge whether Attribute(name)'s type is Var or Vars.
bool IsAttrVar(const std::string& name) const {
return parser_->OpIsAttrVar(block_idx_, op_idx_, name);
}
// Get TensorInfo(s) from Attribute Var or Vars.
std::vector<TensorInfo> GetAttrVar(const std::string& name) const {
return parser_->GetOpAttrVar(block_idx_, op_idx_, name);
}
bool HasAttr(const std::string& name) const {
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
return parser_->OpHasAttr(op, name);
}
void GetAttr(const std::string& name, int64_t* val) {
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
parser_->GetOpAttr(op, name, val);
}
void GetAttr(const std::string& name, float* val) {
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
parser_->GetOpAttr(op, name, val);
}
void GetAttr(const std::string& name, bool* val) {
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
parser_->GetOpAttr(op, name, val);
}
void GetAttr(const std::string& name, std::string* val) {
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
parser_->GetOpAttr(op, name, val);
}
void GetAttr(const std::string& name, std::vector<int64_t>* val) {
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
parser_->GetOpAttr(op, name, val);
}
void GetAttr(const std::string& name, std::vector<float>* val) {
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
parser_->GetOpAttr(op, name, val);
}
void GetAttr(const std::string& name, std::vector<double>* val) {
auto& op = parser_->GetOpDesc(block_idx_, op_idx_);
parser_->GetOpAttr(op, name, val);
}
bool IsConstantInput(const std::string& input_key) const {
auto input_info = GetInput(input_key);
return parser_->IsConstantTensor(block_idx_, input_info[0].name);
}
bool IsConstant(const TensorInfo& info) const {
return parser_->IsConstantTensor(block_idx_, info.name);
}
template <typename T>
bool TryGetInputValue(const std::string& input_key, std::vector<T>* data) {
auto input_info = GetInput(input_key);
return parser_->TryGetTensorValue(block_idx_, input_info[0].name, data);
}
template <typename T>
bool TryGetValue(const TensorInfo& info, std::vector<T>* data) {
return parser_->TryGetTensorValue(block_idx_, info.name, data);
}
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,44 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/affine_channel.h"
namespace paddle2onnx {
REGISTER_MAPPER(affine_channel, AffineChannelMapper)
int32_t AffineChannelMapper::GetMinOpset(bool verbose) {
if (data_layout_ == "NHWC") {
Error() << "Data format NHWC is not supported." << std::endl;
return false;
}
return 7;
}
void AffineChannelMapper::Opset7() {
auto x_info = GetInput("X");
auto scale_info = GetInput("Scale");
auto bias_info = GetInput("Bias");
auto out_info = GetOutput("Out");
auto scale = scale_info[0].name;
auto bias = bias_info[0].name;
if (scale_info[0].shape.size() <= 1) {
scale = helper_->Reshape(scale, {1, -1, 1, 1});
bias = helper_->Reshape(bias, {1, -1, 1, 1});
}
auto out = helper_->MakeNode("Mul", {x_info[0].name, scale})->output(0);
helper_->MakeNode("Add", {out, bias}, {out_info[0].name});
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,38 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class AffineChannelMapper : public Mapper {
public:
AffineChannelMapper(const PaddleParser& p, OnnxHelper* helper,
int64_t block_id, int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("data_layout", &data_layout_);
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
private:
std::string data_layout_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,45 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/batch_norm.h"
#include <string>
#include <vector>
namespace paddle2onnx {
REGISTER_MAPPER(batch_norm, BatchNormMapper)
void BatchNormMapper::Opset7() {
auto input_info = GetInput("X");
auto scale_info = GetInput("Scale");
auto bias_info = GetInput("Bias");
auto mean_info = GetInput("Mean");
auto variance_info = GetInput("Variance");
auto output_info = GetOutput("Y");
auto node = helper_->MakeNode(
"BatchNormalization",
{input_info[0].name, scale_info[0].name, bias_info[0].name,
mean_info[0].name, variance_info[0].name},
{output_info[0].name});
if (helper_->GetOpsetVersion() < 9) {
int64_t spatial = 1;
AddAttribute(node, "spatial", spatial);
}
AddAttribute(node, "epsilon", epsilon_);
AddAttribute(node, "momentum", momentum_);
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,39 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class BatchNormMapper : public Mapper {
public:
BatchNormMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("epsilon", &epsilon_);
GetAttr("momentum", &momentum_);
}
void Opset7();
private:
float epsilon_;
float momentum_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,81 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/conv2d.h"
#include <string>
#include <vector>
namespace paddle2onnx {
REGISTER_MAPPER(conv2d, Conv2dMapper)
REGISTER_MAPPER(depthwise_conv2d, Conv2dMapper)
int32_t Conv2dMapper::GetMinOpset(bool verbose) {
// NHWC is not supported
if (data_format_ == "NHWC") {
Error() << "Cannot support input with NHWC format." << std::endl;
return -1;
}
if (padding_algorithm_ == "EXPLICIT") {
if (paddings_.size() != 2 && paddings_.size() != 4) {
Error() << "While padding_algorithm is EXPLICIT, size of paddings should "
"be 2 or 4."
<< std::endl;
return -1;
}
}
if (dilations_[0] != 1 || dilations_[1] != 1) {
if (padding_algorithm_ == "SAME") {
Error() << "While dilations != 1, cannot support padding = 'SAME'."
<< std::endl;
return -1;
}
}
return 7;
}
void Conv2dMapper::Opset7() {
auto kernel_info = GetInput("Filter");
auto input_info = GetInput("Input");
auto output_info = GetOutput("Output");
auto node = helper_->MakeNode(
"Conv", {input_info[0].name, kernel_info[0].name}, {output_info[0].name});
AddAttribute(node, "dilations", dilations_);
std::vector<int64_t> kernel_shape = {kernel_info[0].shape[2],
kernel_info[0].shape[3]};
AddAttribute(node, "kernel_shape", kernel_shape);
AddAttribute(node, "strides", strides_);
AddAttribute(node, "group", groups_);
if (padding_algorithm_ == "SAME") {
std::string auto_pad = "SAME_UPPER";
AddAttribute(node, "auto_pad", auto_pad);
} else if (padding_algorithm_ == "VALID") {
std::string auto_pad = "VALID";
AddAttribute(node, "auto_pad", auto_pad);
} else {
std::vector<int64_t> paddings;
if (paddings_.size() == 2) {
paddings.insert(paddings.begin(), paddings_.begin(), paddings_.end());
paddings.insert(paddings.begin(), paddings_.begin(), paddings_.end());
} else {
paddings.assign(paddings_.begin(), paddings_.end());
paddings[1] = paddings_[2];
paddings[2] = paddings_[1];
}
AddAttribute(node, "pads", paddings);
}
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,48 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class Conv2dMapper : public Mapper {
public:
Conv2dMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("groups", &groups_);
GetAttr("dilations", &dilations_);
GetAttr("strides", &strides_);
GetAttr("paddings", &paddings_);
GetAttr("padding_algorithm", &padding_algorithm_);
GetAttr("data_format", &data_format_);
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
private:
std::vector<int64_t> dilations_;
std::vector<int64_t> strides_;
std::vector<int64_t> paddings_;
std::string padding_algorithm_;
std::string data_format_;
int64_t groups_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,66 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/conv2d_transpose.h"
#include <string>
#include <vector>
namespace paddle2onnx {
REGISTER_MAPPER(conv2d_transpose, Conv2dTransposeMapper)
REGISTER_MAPPER(depthwise_conv2d_transpose, Conv2dTransposeMapper)
int32_t Conv2dTransposeMapper::GetMinOpset(bool verbose) {
// NHWC is not supported
if (data_format_ == "NHWC") {
Error() << "[ERROR] Cannot support NHWC format for operator "
"conv2d_transpose/depthwise_conv2d_transpose."
<< std::endl;
return -1;
}
return 7;
}
void Conv2dTransposeMapper::Opset7() {
auto kernel_info = GetInput("Filter");
auto input_info = GetInput("Input");
auto output_info = GetOutput("Output");
auto input = helper_->AutoCast(input_info[0].name, input_info[0].dtype,
P2ODataType::FP32);
auto kernel = helper_->AutoCast(kernel_info[0].name, kernel_info[0].dtype,
P2ODataType::FP32);
auto node = helper_->MakeNode("ConvTranspose", {input, kernel});
AddAttribute(node, "dilations", dilations_);
std::vector<int64_t> kernel_shape = {kernel_info[0].shape[2],
kernel_info[0].shape[3]};
AddAttribute(node, "kernel_shape", kernel_shape);
AddAttribute(node, "strides", strides_);
AddAttribute(node, "group", groups_);
if (padding_algorithm_ == "SAME") {
std::string auto_pad = "SAME_UPPER";
AddAttribute(node, "auto_pad", auto_pad);
} else if (padding_algorithm_ == "VALID") {
std::string auto_pad = "VALID";
AddAttribute(node, "auto_pad", auto_pad);
} else {
AddAttribute(node, "pads", paddings_);
}
if (output_padding_.size() > 0) {
AddAttribute(node, "output_padding", output_padding_);
}
helper_->AutoCast(node->output(0), output_info[0].name, P2ODataType::FP32,
output_info[0].dtype);
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,58 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class Conv2dTransposeMapper : public Mapper {
public:
Conv2dTransposeMapper(const PaddleParser& p, OnnxHelper* helper,
int64_t block_id, int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("groups", &groups_);
GetAttr("dilations", &dilations_);
GetAttr("strides", &strides_);
GetAttr("paddings", &paddings_);
GetAttr("padding_algorithm", &padding_algorithm_);
GetAttr("output_padding", &output_padding_);
GetAttr("data_format", &data_format_);
if (paddings_.size() == 2) {
paddings_.push_back(paddings_[0]);
paddings_.push_back(paddings_[1]);
} else if (paddings_.size() == 4) {
int32_t tmp = paddings_[1];
paddings_[1] = paddings_[2];
paddings_[2] = tmp;
}
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
private:
std::vector<int64_t> dilations_;
std::vector<int64_t> strides_;
std::vector<int64_t> paddings_;
std::vector<int64_t> output_padding_;
std::string padding_algorithm_;
std::string data_format_;
int64_t groups_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,82 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/conv3d.h"
#include <string>
#include <vector>
namespace paddle2onnx {
REGISTER_MAPPER(conv3d, Conv3dMapper)
int32_t Conv3dMapper::GetMinOpset(bool verbose) {
// NDHWC is not supported
if (data_format_ == "NDHWC") {
Error() << "Cannot support input with NDHWC format." << std::endl;
return -1;
}
if (padding_algorithm_ == "EXPLICIT") {
if (paddings_.size() != 3 && paddings_.size() != 6) {
Error() << "While padding_algorithm is EXPLICIT, size of paddings should "
"be 3 or 6."
<< std::endl;
return -1;
}
}
if (dilations_[0] != 1 || dilations_[1] != 1 || dilations_[2] != 1) {
if (padding_algorithm_ == "SAME") {
Error() << "While dilations != 1, cannot support padding = 'SAME'."
<< std::endl;
return -1;
}
}
return 7;
}
void Conv3dMapper::Opset7() {
auto kernel_info = GetInput("Filter");
auto input_info = GetInput("Input");
auto output_info = GetOutput("Output");
auto node = helper_->MakeNode(
"Conv", {input_info[0].name, kernel_info[0].name}, {output_info[0].name});
AddAttribute(node, "dilations", dilations_);
std::vector<int64_t> kernel_shape = {kernel_info[0].shape[2],
kernel_info[0].shape[3],
kernel_info[0].shape[4]};
AddAttribute(node, "kernel_shape", kernel_shape);
AddAttribute(node, "strides", strides_);
AddAttribute(node, "group", groups_);
if (padding_algorithm_ == "SAME") {
std::string auto_pad = "SAME_UPPER";
AddAttribute(node, "auto_pad", auto_pad);
} else if (padding_algorithm_ == "VALID") {
std::string auto_pad = "VALID";
AddAttribute(node, "auto_pad", auto_pad);
} else {
std::vector<int64_t> paddings;
if (paddings_.size() == 3) {
paddings.insert(paddings.begin(), paddings_.begin(), paddings_.end());
paddings.insert(paddings.begin(), paddings_.begin(), paddings_.end());
} else {
std::vector<int64_t> index = {0, 2, 4, 1, 3, 5};
for (auto &i : index) {
paddings.push_back(paddings_[i]);
}
}
AddAttribute(node, "pads", paddings);
}
}
} // namespace paddle2onnx

48
paddle2onnx/mapper/nn/conv3d.h Executable file
View File

@@ -0,0 +1,48 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class Conv3dMapper : public Mapper {
public:
Conv3dMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("groups", &groups_);
GetAttr("dilations", &dilations_);
GetAttr("strides", &strides_);
GetAttr("paddings", &paddings_);
GetAttr("padding_algorithm", &padding_algorithm_);
GetAttr("data_format", &data_format_);
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
private:
std::vector<int64_t> dilations_;
std::vector<int64_t> strides_;
std::vector<int64_t> paddings_;
std::string padding_algorithm_;
std::string data_format_;
int64_t groups_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,43 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/data_norm.h"
namespace paddle2onnx {
REGISTER_MAPPER(data_norm, DataNormMapper)
int32_t DataNormMapper::GetMinOpset(bool verbose) {
if (slot_dim_ > 0) {
Error() << "slot_dim > 0 is not supported." << std::endl;
return -1;
}
return 7;
}
void DataNormMapper::Opset7() {
auto input_info = GetInput("X");
auto batch_size_info = GetInput("BatchSize");
auto batch_sum_info = GetInput("BatchSum");
auto batch_square_sum_info = GetInput("BatchSquareSum");
auto output_info = GetOutput("Y");
Assert(slot_dim_ <= 0, "slot_dim > 0 is not supported.");
auto mean_arr = helper_->MakeNode("Div", {batch_sum_info[0].name, batch_size_info[0].name})->output(0);
auto scale_arr = helper_->MakeNode("Div", {batch_size_info[0].name, batch_square_sum_info[0].name})->output(0);
scale_arr = helper_->MakeNode("Sqrt", {scale_arr})->output(0);
auto out = helper_->MakeNode("Sub", {input_info[0].name, mean_arr})->output(0);
helper_->MakeNode("Mul" ,{out, scale_arr}, {output_info[0].name});
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,44 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class DataNormMapper : public Mapper {
public:
DataNormMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("data_layout", &data_layout_);
GetAttr("epsilon", &epsilon_);
if (HasAttr("slot_dim")) {
GetAttr("slot_dim", &slot_dim_);
}
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
private:
std::string data_layout_;
float epsilon_;
int64_t slot_dim_ = -1;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,66 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/dropout.h"
#include <vector>
namespace paddle2onnx {
REGISTER_MAPPER(dropout, DropoutMapper)
int32_t DropoutMapper::GetMinOpset(bool verbose) {
if (dropout_implementation_ != "downgrade_in_infer" &&
dropout_implementation_ != "upscale_in_train") {
Error() << "Drop out type: " << dropout_implementation_
<< " is not supported yet." << std::endl;
return -1;
}
if (dropout_implementation_ == "downgrade_in_infer") {
if (IsAttrVar("dropout_prob") &&
!IsConstant(GetAttrVar("dropout_prob")[0])) {
Error() << "While Attribute(dropout_prob)'s type is Tensor, it's not "
"supported "
"unless it's a constant tensor when dropout_implementation is "
"downgrade_in_infer."
<< std::endl;
return -1;
}
}
return 7;
}
void DropoutMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
if (dropout_implementation_ == "upscale_in_train") {
helper_->MakeNode("Identity", {input_info[0].name}, {output_info[0].name});
} else {
if (IsAttrVar("dropout_prob")) {
auto prob_info = GetAttrVar("dropout_prob");
std::vector<float> temp;
TryGetValue(prob_info[0], &temp);
dropout_prob_ = temp[0];
} else {
GetAttr("dropout_prob", &dropout_prob_);
}
std::vector<float> value = {1 - dropout_prob_};
std::string scale_node =
helper_->Constant(GetOnnxDtype(input_info[0].dtype), value);
helper_->MakeNode("Mul", {input_info[0].name, scale_node},
{output_info[0].name});
}
}
} // namespace paddle2onnx

37
paddle2onnx/mapper/nn/dropout.h Executable file
View File

@@ -0,0 +1,37 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class DropoutMapper : public Mapper {
public:
DropoutMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("dropout_implementation", &dropout_implementation_);
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
private:
float dropout_prob_ = 0.0;
std::string dropout_implementation_ = "upscale_in_train";
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,75 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/group_norm.h"
#include <cmath>
#include <string>
#include <vector>
namespace paddle2onnx {
REGISTER_MAPPER(group_norm, GroupNormMapper)
int32_t GroupNormMapper::GetMinOpset(bool verbose) {
auto input_info = GetInput("X");
if (input_info[0].Rank() != 4) {
Error() << "Only support 4D-Tensor as input for GroupNorm" << std::endl;
return -1;
}
return 7;
}
void GroupNormMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Y");
std::vector<int64_t> shape_val = {0, groups_, -1};
std::string shape =
helper_->Constant(GetOnnxDtype(P2ODataType::INT64), shape_val);
auto reshape_input =
helper_->MakeNode("Reshape", {input_info[0].name, shape});
std::string scale_ = helper_->Constant(GetOnnxDtype(input_info[0].dtype),
std::vector<float>(groups_, 1.0));
std::string bias_ = helper_->Constant(GetOnnxDtype(input_info[0].dtype),
std::vector<float>(groups_, 0.0));
auto reshaped_output = helper_->MakeNode(
"InstanceNormalization", {reshape_input->output(0), scale_, bias_});
AddAttribute(reshaped_output, "epsilon", epsilon_);
auto origin_shape = helper_->MakeNode("Shape", {input_info[0].name});
if (HasInput("Scale") && HasInput("Bias")) {
auto scale_info = GetInput("Scale");
auto bias_info = GetInput("Bias");
auto output = helper_->MakeNode(
"Reshape", {reshaped_output->output(0), origin_shape->output(0)});
std::string unsqueezed_scale =
helper_->Unsqueeze(scale_info[0].name, {1, 2});
std::string unsqueezed_bias = helper_->Unsqueeze(bias_info[0].name, {1, 2});
auto scale_output =
helper_->MakeNode("Mul", {output->output(0), unsqueezed_scale});
helper_->MakeNode("Add", {scale_output->output(0), unsqueezed_bias},
{output_info[0].name});
} else {
helper_->MakeNode("Reshape",
{reshaped_output->output(0), origin_shape->output(0)},
{output_info[0].name});
}
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,40 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class GroupNormMapper : public Mapper {
public:
GroupNormMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("groups", &groups_);
GetAttr("epsilon", &epsilon_);
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
private:
int64_t groups_;
float epsilon_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,59 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/instance_norm.h"
#include <string>
#include <vector>
namespace paddle2onnx {
REGISTER_MAPPER(instance_norm, InstanceNormMapper)
int32_t InstanceNormMapper::GetMinOpset(bool verbose) {
auto input_info = GetInput("X");
int num_groups = input_info[0].shape[1];
if (num_groups < 0) {
Error() << "The dimension in axis=1 of input tensor must be known, but now it's unknown." << std::endl;
return -1;
}
return 7;
}
void InstanceNormMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Y");
int num_groups = input_info[0].shape[1];
std::string scale = "";
if (HasInput("Scale")) {
scale = GetInput("Scale")[0].name;
} else {
scale = helper_->Constant(GetOnnxDtype(input_info[0].dtype), std::vector<float>(num_groups, 1.0));
}
std::string bias = "";
if (HasInput("Bias")) {
bias = GetInput("Bias")[0].name;
} else {
bias = helper_->Constant(GetOnnxDtype(input_info[0].dtype), std::vector<float>(num_groups, 0.0));
}
auto node = helper_->MakeNode(
"InstanceNormalization",
{input_info[0].name, scale, bias},
{output_info[0].name});
AddAttribute(node, "epsilon", epsilon_);
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,38 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class InstanceNormMapper : public Mapper {
public:
InstanceNormMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("epsilon", &epsilon_);
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
private:
float epsilon_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,136 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/interpolate.h"
namespace paddle2onnx {
REGISTER_MAPPER(bilinear_interp, InterpolateMapper)
REGISTER_MAPPER(bilinear_interp_v2, InterpolateMapper)
REGISTER_MAPPER(nearest_interp_v2, InterpolateMapper)
REGISTER_MAPPER(bicubic_interp_v2, InterpolateMapper)
REGISTER_MAPPER(linear_interp_v2, InterpolateMapper)
REGISTER_MAPPER(trilinear_interp_v2, InterpolateMapper)
int32_t InterpolateMapper::GetMinOpset(bool verbose) {
if (data_layout_ == "NHWC") {
Error() << "Data format of NHWC is not supported." << std::endl;
return -1;
}
auto x_info = GetInput("X");
if (x_info[0].Rank() > 5 && x_info[0].Rank() < 3) {
Error() << "Only support 3D/4D/5D tensor, but now its dimension is "
<< x_info[0].Rank() << std::endl;
return -1;
}
Logger(verbose, 11) << RequireOpset(11) << std::endl;
return 11;
}
std::string InterpolateMapper::ComputeOutSize() {
bool has_out_size = HasInput("OutSize");
bool has_size_tensor = HasInput("SizeTensor");
if (has_out_size) {
auto out_size_info = GetInput("OutSize");
return helper_->AutoCast(out_size_info[0].name, out_size_info[0].dtype,
P2ODataType::INT64);
} else {
auto size_tensor_info = GetInput("SizeTensor");
return helper_->ConcatIndices(size_tensor_info);
}
}
std::string InterpolateMapper::ComputeScale() {
auto scale_info = GetInput("Scale");
auto scale = helper_->AutoCast(scale_info[0].name, scale_info[0].dtype,
P2ODataType::FP32);
auto padding = helper_->Constant(ONNX_NAMESPACE::TensorProto::FLOAT,
std::vector<float>(2, 1.0));
scale = helper_->Concat({padding, scale}, 0);
return scale;
}
void InterpolateMapper::Opset11() {
auto x_info = GetInput("X");
auto out_info = GetOutput("Out");
std::string coordinate_transformation_mode = "half_pixel";
auto resize_type = resize_mapper_[method_];
if (align_corners_) {
coordinate_transformation_mode = "align_corners";
} else if (resize_type == "nearest") {
coordinate_transformation_mode = "asymmetric";
} else if (align_mode_ == 1 && resize_type != "cubic") {
coordinate_transformation_mode = "asymmetric";
}
std::string scale = "";
std::string size = "";
bool has_out_size = HasInput("OutSize");
bool has_size_tensor = HasInput("SizeTensor");
bool has_scale_tensor = HasInput("Scale");
if (has_out_size || has_size_tensor) {
size = ComputeOutSize();
} else if (has_scale_tensor) {
scale = ComputeScale();
} else {
// get size or scale from attribute
if (out_d_ > 0 || out_w_ > 0 || out_h_ > 0) {
std::vector<int64_t> out_size;
if (x_info[0].Rank() == 5) {
out_size.push_back(out_d_);
out_size.push_back(out_h_);
}
if (x_info[0].Rank() == 4) {
out_size.push_back(out_h_);
}
out_size.push_back(out_w_);
size = helper_->Constant(ONNX_NAMESPACE::TensorProto::INT64, out_size);
} else {
std::vector<float> scale_;
GetAttr("scale", &scale_);
float padding = 1.0;
scale_.insert(scale_.begin(), padding);
scale_.insert(scale_.begin(), padding);
scale = helper_->Constant(ONNX_NAMESPACE::TensorProto::FLOAT, scale_);
}
}
std::string roi = helper_->Constant(ONNX_NAMESPACE::TensorProto::FLOAT, std::vector<float>());
if (scale == "") {
// has to generate a empty tensor for resize
scale = helper_->Constant(ONNX_NAMESPACE::TensorProto::FLOAT,
std::vector<float>());
}
if (size != "") {
auto ipt_shape = helper_->MakeNode("Shape", {x_info[0].name})->output(0);
auto nc = helper_->Slice(ipt_shape, {0}, {0}, {2});
size = helper_->Concat({nc, size}, 0);
}
std::shared_ptr<ONNX_NAMESPACE::NodeProto> node;
if (size != "") {
node = helper_->MakeNode("Resize", {x_info[0].name, roi, scale, size},
{out_info[0].name});
} else {
node = helper_->MakeNode("Resize", {x_info[0].name, roi, scale},
{out_info[0].name});
}
Assert(resize_mapper_.find(OpType()) != resize_mapper_.end(),
"Cannot find " + OpType() + " in resize_mapper.");
AddAttribute(node, "mode", resize_mapper_[OpType()]);
AddAttribute(node, "coordinate_transformation_mode",
coordinate_transformation_mode);
if (resize_mapper_[OpType()] == "nearest" &&
coordinate_transformation_mode == "asymmetric") {
AddAttribute(node, "nearest_mode", "floor");
}
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,57 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class InterpolateMapper : public Mapper {
public:
InterpolateMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("data_layout", &data_layout_);
GetAttr("align_corners", &align_corners_);
GetAttr("align_mode", &align_mode_);
GetAttr("out_d", &out_d_);
GetAttr("out_h", &out_h_);
GetAttr("out_w", &out_w_);
method_ = OpType();
resize_mapper_["bilinear_interp"] = "linear";
resize_mapper_["bilinear_interp_v2"] = "linear";
resize_mapper_["nearest_interp_v2"] = "nearest";
resize_mapper_["bicubic_interp_v2"] = "cubic";
resize_mapper_["linear_interp_v2"] = "linear";
resize_mapper_["trilinear_interp_v2"] = "linear";
}
int32_t GetMinOpset(bool verbose = false);
void Opset11();
private:
std::string ComputeOutSize();
std::string ComputeScale();
std::map<std::string, std::string> resize_mapper_;
std::string method_;
std::string data_layout_;
int64_t align_mode_;
int64_t out_d_;
int64_t out_h_;
int64_t out_w_;
bool align_corners_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,146 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/layer_norm.h"
#include <cmath>
#include <string>
#include <vector>
namespace paddle2onnx {
REGISTER_MAPPER(layer_norm, LayerNormMapper)
void LayerNormMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Y");
std::string input_name = helper_->AutoCast(
input_info[0].name, input_info[0].dtype, P2ODataType::FP32);
std::vector<int64_t> input_shape = input_info[0].shape;
std::vector<int64_t> axes;
for (auto i = begin_norm_axis_; i < input_shape.size(); i++) {
axes.push_back(i);
}
if (begin_norm_axis_ == input_shape.size() - 1) {
axes[0] = -1;
}
float epsilon = epsilon_;
std::string epsilon_node =
helper_->Constant({}, GetOnnxDtype(P2ODataType::FP32), epsilon);
std::string two_node =
helper_->Constant({}, GetOnnxDtype(P2ODataType::FP32), float(2.0));
auto mean_node = helper_->MakeNode("ReduceMean", {input_name});
AddAttribute(mean_node, "axes", axes);
auto numerator_node =
helper_->MakeNode("Sub", {input_name, mean_node->output(0)});
auto pow_num_node =
helper_->MakeNode("Pow", {numerator_node->output(0), two_node});
auto variance_node =
helper_->MakeNode("ReduceMean", {pow_num_node->output(0)});
AddAttribute(variance_node, "axes", axes);
auto add_eps_node =
helper_->MakeNode("Add", {variance_node->output(0), epsilon_node});
auto denominator_node = helper_->MakeNode("Sqrt", {add_eps_node->output(0)});
auto ipt_shape_node = helper_->MakeNode("Shape", {input_name});
std::vector<int64_t> slice_axes = {0};
std::vector<int64_t> start = {
static_cast<int64_t>(input_shape.size() - axes.size())};
std::vector<int64_t> end = {static_cast<int64_t>(input_shape.size())};
std::string weight_shape_node =
helper_->Slice(ipt_shape_node->output(0), slice_axes, start, end);
bool has_input_Bias = HasInput("Bias");
bool has_input_Scale = HasInput("Scale");
if (has_input_Bias && has_input_Scale) {
auto scale_info = GetInput("Scale");
auto bias_info = GetInput("Bias");
std::string scale_name = helper_->AutoCast(
scale_info[0].name, scale_info[0].dtype, P2ODataType::FP32);
std::string bias_name = helper_->AutoCast(
bias_info[0].name, bias_info[0].dtype, P2ODataType::FP32);
std::string scale_node = "";
std::string bias_node = "";
if (begin_norm_axis_ == input_shape.size() - 1) {
scale_node = helper_->Reshape(scale_name, {-1});
bias_node = helper_->Reshape(bias_name, {-1});
} else {
scale_node = helper_->MakeNode("Reshape", {scale_name, weight_shape_node})
->output(0);
bias_node = helper_->MakeNode("Reshape", {bias_name, weight_shape_node})
->output(0);
}
auto layer_norm_pre_node = helper_->MakeNode(
"Div", {numerator_node->output(0), denominator_node->output(0)});
auto layer_norm_node =
helper_->MakeNode("Mul", {layer_norm_pre_node->output(0), scale_node});
auto pre_cast_node =
helper_->MakeNode("Add", {layer_norm_node->output(0), bias_node});
helper_->AutoCast(pre_cast_node->output(0), output_info[0].name,
P2ODataType::FP32, output_info[0].dtype);
return;
}
if (has_input_Bias) {
auto bias_info = GetInput("Bias");
std::string bias_name = helper_->AutoCast(
bias_info[0].name, bias_info[0].dtype, P2ODataType::FP32);
std::string bias_node = "";
if (begin_norm_axis_ == input_shape.size() - 1) {
bias_node = helper_->Reshape(bias_name, {-1});
} else {
bias_node = helper_->MakeNode("Reshape", {bias_name, weight_shape_node})
->output(0);
}
auto layer_norm_node = helper_->MakeNode(
"Div", {numerator_node->output(0), denominator_node->output(0)});
auto pre_cast_node =
helper_->MakeNode("Add", {layer_norm_node->output(0), bias_node});
helper_->AutoCast(pre_cast_node->output(0), output_info[0].name,
P2ODataType::FP32, output_info[0].dtype);
return;
}
if (has_input_Scale) {
auto scale_info = GetInput("Scale");
std::string scale_name = helper_->AutoCast(
scale_info[0].name, scale_info[0].dtype, P2ODataType::FP32);
std::string scale_node = "";
if (begin_norm_axis_ == input_shape.size() - 1) {
scale_node = helper_->Reshape(scale_name, {-1});
} else {
scale_node = helper_->MakeNode("Reshape", {scale_name, weight_shape_node})
->output(0);
}
auto layer_norm_node = helper_->MakeNode(
"Div", {numerator_node->output(0), denominator_node->output(0)});
auto pre_cast_node =
helper_->MakeNode("Mul", {layer_norm_node->output(0), scale_node});
helper_->AutoCast(pre_cast_node->output(0), output_info[0].name,
P2ODataType::FP32, output_info[0].dtype);
return;
}
auto pre_cast_node = helper_->MakeNode(
"Div", {numerator_node->output(0), denominator_node->output(0)});
helper_->AutoCast(pre_cast_node->output(0), output_info[0].name,
P2ODataType::FP32, output_info[0].dtype);
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,39 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class LayerNormMapper : public Mapper {
public:
LayerNormMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("begin_norm_axis", &begin_norm_axis_);
GetAttr("epsilon", &epsilon_);
}
void Opset7();
private:
int64_t begin_norm_axis_;
float epsilon_;
};
} // namespace paddle2onnx

28
paddle2onnx/mapper/nn/norm.cc Executable file
View File

@@ -0,0 +1,28 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/norm.h"
namespace paddle2onnx {
REGISTER_MAPPER(norm, NormMapper)
void NormMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
auto node = helper_->MakeNode("LpNormalization", {input_info[0].name},
{output_info[0].name});
AddAttribute(node, "axis", axis_);
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,37 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class NormMapper : public Mapper {
public:
NormMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("axis", &axis_);
}
void Opset7();
private:
int64_t axis_ = -1;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,114 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/pad3d.h"
namespace paddle2onnx {
REGISTER_MAPPER(pad3d, Pad3DMapper)
int32_t Pad3DMapper::GetMinOpset(bool verbose) {
if (data_format_ == "NDHWC") {
Error() << "NDHWC format is not supported." << std::endl;
return -1;
}
if (mode_ == "circular") {
Error() << "Padding mode `circular` is not supported." << std::endl;
return -1;
}
if (HasInput("Paddings")) {
if (!IsConstantInput("Paddings")) {
Logger(verbose, 11) << "While Paddings is input and it's not a constant tensor, " << RequireOpset(11) << std::endl;
return 11;
}
std::vector<int64_t> paddings;
if (!TryGetInputValue("Paddings", &paddings)) {
Logger(verbose, 11) << "Cannot get constant value from input of Paddings, " << RequireOpset(11) << std::endl;
return 11;
} else {
if (paddings.size() != 6) {
Error() << "Size of paddings should be equal to 6, but now it's " << paddings.size() << std::endl;
return -1;
}
}
} else {
if (paddings_.size() != 6) {
Error() << "Size of paddings should be equal to 6, but now it's " << paddings_.size() << std::endl;
return -1;
}
}
return 7;
}
std::vector<int64_t> Pad3DMapper::ConvertPaddingParameter(const std::vector<int64_t>& paddings) {
std::vector<int64_t> new_paddings(10, 0);
new_paddings[2] = paddings[4];
new_paddings[3] = paddings[2];
new_paddings[4] = paddings[0];
new_paddings[7] = paddings[5];
new_paddings[8] = paddings[3];
new_paddings[9] = paddings[1];
return new_paddings;
}
void Pad3DMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
auto mode = mode_;
if (mode == "replicate") {
mode = "edge";
}
std::vector<int64_t> paddings;
if (HasInput("Paddings")) {
Assert(TryGetInputValue("Paddings", &paddings), "Cannot get constant value from input of Paddings, " + RequireOpset(11));
} else {
paddings.assign(paddings_.begin(), paddings_.end());
}
std::vector<int64_t> new_paddings = ConvertPaddingParameter(paddings);
auto node = helper_->MakeNode("Pad", {input_info[0].name}, {output_info[0].name});
AddAttribute(node, "mode", mode);
AddAttribute(node, "value", value_);
AddAttribute(node, "pads", new_paddings);
}
void Pad3DMapper::Opset11() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
auto mode = mode_;
if (mode == "replicate") {
mode = "edge";
}
std::string paddings = "";
if (HasInput("Paddings")) {
std::vector<int64_t> paddings_value;
if (TryGetInputValue("Paddings", &paddings_value)) {
std::vector<int64_t> new_paddings = ConvertPaddingParameter(paddings_value);
paddings = helper_->Constant(ONNX_NAMESPACE::TensorProto::INT64, new_paddings);
} else {
auto pad_info = GetInput("Paddings");
auto cast_pad = helper_->AutoCast(pad_info[0].name, pad_info[0].dtype, P2ODataType::INT64);
auto split_pads = helper_->Split(cast_pad, std::vector<int64_t>(6, 1), 0);
auto zero = helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::INT64, int64_t(0));
paddings = helper_->Concat({zero, zero, split_pads[4], split_pads[2], split_pads[0], zero, zero, split_pads[5], split_pads[3], split_pads[1]}, 0);
}
} else {
std::vector<int64_t> new_paddings = ConvertPaddingParameter(paddings_);
paddings = helper_->Constant(ONNX_NAMESPACE::TensorProto::INT64, new_paddings);
}
auto value = helper_->Constant({}, GetOnnxDtype(input_info[0].dtype), value_);
auto node = helper_->MakeNode("Pad", {input_info[0].name, paddings, value}, {output_info[0].name});
AddAttribute(node, "mode", mode);
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,45 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class Pad3DMapper : public Mapper {
public:
Pad3DMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
GetAttr("data_format", &data_format_);
GetAttr("mode", &mode_);
GetAttr("value", &value_);
GetAttr("paddings", &paddings_);
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
void Opset11();
private:
std::vector<int64_t> ConvertPaddingParameter(const std::vector<int64_t>& paddings);
std::string data_format_;
std::string mode_;
std::vector<int64_t> paddings_;
float value_;
};
} // namespace paddle2onnx

View File

@@ -0,0 +1,332 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle2onnx/mapper/nn/pool2d.h"
#include <algorithm>
#include <cmath>
#include <string>
#include <vector>
namespace paddle2onnx {
REGISTER_MAPPER(pool2d, Pool2dMapper)
REGISTER_MAPPER(max_pool2d_with_index, Pool2dMapper)
bool Pool2dMapper::IsSameSpan(const int64_t& in_size, const int64_t& out_size) {
std::vector<int64_t> spans;
spans.reserve(out_size);
for (auto i = 0; i < out_size; ++i) {
int64_t start = std::floor(i * (in_size / out_size));
int64_t end = std::ceil((i + 1) * (in_size / out_size));
spans.push_back(end - start);
}
std::sort(spans.begin(), spans.end());
return spans[0] == spans[spans.size() - 1];
}
bool Pool2dMapper::IsExportAsCustomOp() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
GetAttr("ksize", &k_size_);
if (global_pooling_ || (k_size_[0] == 1 && k_size_[1] == 1)) {
return false;
}
if (export_as_custom_op && adaptive_) {
bool is_1x1_kernel = true;
for (auto i : k_size_) {
if (i != 1) {
is_1x1_kernel = false;
}
}
if (is_1x1_kernel) {
return false;
}
for (auto one_input : input_info) {
for (auto i = 2; i < one_input.shape.size(); ++i) {
if (one_input.shape[i] == -1) {
return true;
}
}
}
int64_t input_h = input_info[0].shape[2];
int64_t input_w = input_info[0].shape[3];
int64_t output_h = output_info[0].shape[2];
int64_t output_w = output_info[0].shape[3];
if (output_h == -1 || output_w == -1 || !IsSameSpan(input_h, output_h) ||
!IsSameSpan(input_w, output_w)) {
return true;
}
}
return false;
}
void Pool2dMapper::ExportAsCustomOp() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
auto node = helper_->MakeNode(custom_op_name, {input_info[0].name},
{output_info[0].name});
node->set_domain("Paddle");
AddAttribute(node, "pooling_type", pooling_type_);
for (auto i = 1; i < output_info[0].shape.size(); i++) {
if (output_info[0].shape[i] == -1) {
if (input_info[0].shape[i] == -1) {
Assert(false,
"Can not convert to AdaptivePool custom OP, because the shapes "
"of the input and output are unknown.");
} else {
output_info[0].shape[i] = input_info[0].shape[i];
}
}
}
AddAttribute(node, "output_size", output_info[0].shape);
Warn() << "Pool2d is exported as custom operator: " << custom_op_name
<< std::endl;
helper_->MakeValueInfo(input_info[0].name, input_info[0].dtype,
input_info[0].shape);
helper_->MakeValueInfo(output_info[0].name, output_info[0].dtype,
output_info[0].shape);
}
void Pool2dMapper::AdaptivePool(const std::vector<TensorInfo>& input_info,
const std::vector<TensorInfo>& output_info) {
int64_t input_h = input_info[0].shape[2];
int64_t input_w = input_info[0].shape[3];
int64_t output_h = output_info[0].shape[2];
int64_t output_w = output_info[0].shape[3];
int64_t stride_h = std::floor(input_h / output_h);
int64_t stride_w = std::floor(input_w / output_w);
int64_t kernel_h = input_h - (output_h - 1) * stride_h;
int64_t kernel_w = input_w - (output_w - 1) * stride_w;
std::string onnx_pool_type;
if (OpType() == "max_pool2d_with_index") {
onnx_pool_type = "MaxPool";
} else {
auto iter = op_mapper_.find(pooling_type_);
onnx_pool_type = iter->second[0];
}
std::shared_ptr<ONNX_NAMESPACE::NodeProto>* node_ptr;
auto input = helper_->AutoCast(input_info[0].name, input_info[0].dtype,
P2ODataType::FP32);
auto node = helper_->MakeNode(onnx_pool_type, {input});
helper_->AutoCast(node->output(0), output_info[0].name, P2ODataType::FP32,
output_info[0].dtype);
std::vector<int64_t> kernel_size = {kernel_h, kernel_w};
AddAttribute(node, "kernel_shape", kernel_size);
std::vector<int64_t> strides = {stride_h, stride_w};
AddAttribute(node, "strides", strides);
if (helper_->GetOpsetVersion() > 10) {
AddAttribute(node, "ceil_mode", static_cast<int64_t>(ceil_mode_));
}
std::string auto_pad = "NOTSET";
if (padding_algorithm_ == "SAME") {
auto_pad = "SAME_UPPER";
} else if (padding_algorithm_ == "VALID") {
auto_pad = "VALID";
}
AddAttribute(node, "auto_pad", auto_pad);
if (pooling_type_ == "avg") {
AddAttribute(node, "count_include_pad", static_cast<int64_t>(exclusive_));
}
}
void Pool2dMapper::NoAdaptivePool(const std::vector<TensorInfo>& input_info,
const std::vector<TensorInfo>& output_info) {
std::vector<int64_t> input_shape = input_info[0].shape;
if (pads_.size() == 2) {
pads_.push_back(pads_[0]);
pads_.push_back(pads_[1]);
} else if (pads_.size() == 4) {
std::vector<int64_t> index = {0, 2, 1, 3};
std::vector<int64_t> copy = pads_;
for (auto i = 0; i < index.size(); ++i) {
pads_[i] = copy[index[i]];
}
}
if (input_shape[2] > 0 && input_shape[2] + pads_[0] < k_size_[0]) {
k_size_[0] = input_shape[2] + pads_[0];
}
if (input_shape[3] > 0 && input_shape[3] + pads_[1] < k_size_[1]) {
k_size_[1] = input_shape[3] + pads_[1];
}
int64_t max_ksize = *std::max_element(std::begin(k_size_), std::end(k_size_));
int64_t max_pads = *std::max_element(std::begin(pads_), std::end(pads_));
auto input_x = helper_->AutoCast(input_info[0].name, input_info[0].dtype,
P2ODataType::FP32);
if (max_ksize <= max_pads) {
std::vector<int64_t> onnx_paddings = {0, 0, pads_[0], pads_[1],
0, 0, pads_[2], pads_[3]};
std::vector<std::string> inputs_names = {input_x};
if (helper_->GetOpsetVersion() >= 11) {
std::string paddings_node =
helper_->Constant(GetOnnxDtype(P2ODataType::INT64), onnx_paddings);
inputs_names.push_back(paddings_node);
std::vector<float> val = {0.0};
std::string val_node =
helper_->Constant(GetOnnxDtype(P2ODataType::FP32), val);
inputs_names.push_back(val_node);
}
auto node = helper_->MakeNode("Pad", inputs_names);
std::string mode = "constant";
AddAttribute(node, "mode", mode);
if (helper_->GetOpsetVersion() < 11) {
AddAttribute(node, "pads", onnx_paddings);
float val = 0.0;
AddAttribute(node, "value", val);
}
input_x = node->output(0);
pads_.clear();
pads_.resize(4, 0);
}
std::string onnx_pool_type;
if (OpType() == "max_pool2d_with_index") {
onnx_pool_type = "MaxPool";
} else {
auto iter = op_mapper_.find(pooling_type_);
onnx_pool_type = iter->second[0];
}
auto node = helper_->MakeNode(onnx_pool_type, {input_x});
helper_->AutoCast(node->output(0), output_info[0].name, P2ODataType::FP32,
output_info[0].dtype);
AddAttribute(node, "kernel_shape", k_size_);
AddAttribute(node, "strides", strides_);
std::string auto_pad = "NOTSET";
if (padding_algorithm_ == "SAME") {
auto_pad = "SAME_UPPER";
AddAttribute(node, "auto_pad", auto_pad);
} else if (padding_algorithm_ == "VALID") {
auto_pad = "VALID";
AddAttribute(node, "auto_pad", auto_pad);
} else {
AddAttribute(node, "pads", pads_);
}
if (OpType() != "max_pool2d_with_index" && helper_->GetOpsetVersion() >= 10) {
AddAttribute(node, "ceil_mode", static_cast<int64_t>(ceil_mode_));
}
if (OpType() != "max_pool2d_with_index" && pooling_type_ == "avg") {
AddAttribute(node, "count_include_pad", static_cast<int64_t>(exclusive_));
}
}
int32_t Pool2dMapper::GetMinOpset(bool verbose) {
// NHWC is not supported
if (data_format_ == "NHWC") {
Error() << "NHWC format is not supported." << std::endl;
return -1;
}
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
if (IsAttrVar("ksize")) {
Error() << "While Attribute(ksize)'s type is Tensor, it's not "
"supported."
<< std::endl;
return -1;
} else {
GetAttr("ksize", &k_size_);
}
if (global_pooling_ || (k_size_[0] == 1 && k_size_[1] == 1)) {
if (ceil_mode_) {
Logger(verbose, 10) << "While ceil_model is True, " << RequireOpset(10)
<< std::endl;
return 10;
}
return 7;
}
if (adaptive_) {
for (auto one_input : input_info) {
for (auto i = 2; i < one_input.shape.size(); ++i) {
if (one_input.shape[i] == -1) {
if (export_as_custom_op) {
return 7;
} else {
Error() << "Adaptive only support static input shape." << std::endl;
return -1;
}
}
}
}
int64_t input_h = input_info[0].shape[2];
int64_t input_w = input_info[0].shape[3];
int64_t output_h = output_info[0].shape[2];
int64_t output_w = output_info[0].shape[3];
if (output_h == -1 || output_w == -1 || !IsSameSpan(input_h, output_h) ||
!IsSameSpan(input_w, output_w)) {
if (export_as_custom_op) {
return 7;
} else {
Error() << "Cannot convert adaptive pool with input_size: " << input_h
<< " " << input_h << " output_size: " << output_h << " "
<< output_w << std::endl;
return -1;
}
}
}
if (OpType() == "max_pool2d_with_index") {
return 9;
}
auto iter = op_mapper_.find(pooling_type_);
if (op_mapper_.end() == iter) {
Error() << "Cannot find " << pooling_type_ << " in pool op_mapper."
<< std::endl;
return -1;
}
if (ceil_mode_) {
Logger(verbose, 10) << "While ceil_model is True, " << RequireOpset(10)
<< std::endl;
return 10;
}
return 7;
}
void Pool2dMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
GetAttr("ksize", &k_size_);
bool is_1x1_kernel = true;
for (auto i : k_size_) {
if (i != 1) {
is_1x1_kernel = false;
}
}
if (global_pooling_ || (adaptive_ && is_1x1_kernel)) {
std::string onnx_pool_type;
if (OpType() == "max_pool2d_with_index") {
onnx_pool_type = "GlobalMaxPool";
} else {
auto iter = op_mapper_.find(pooling_type_);
onnx_pool_type = iter->second[1];
}
auto input = helper_->AutoCast(input_info[0].name, input_info[0].dtype,
P2ODataType::FP32);
auto output = helper_->MakeNode(onnx_pool_type, {input})->output(0);
helper_->AutoCast(output, output_info[0].name, P2ODataType::FP32,
output_info[0].dtype);
} else if (adaptive_) {
AdaptivePool(input_info, output_info);
} else {
NoAdaptivePool(input_info, output_info);
}
}
} // namespace paddle2onnx

View File

@@ -0,0 +1,67 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle2onnx/mapper/mapper.h"
namespace paddle2onnx {
class Pool2dMapper : public Mapper {
public:
Pool2dMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id,
int64_t op_id)
: Mapper(p, helper, block_id, op_id) {
op_mapper_["max"] = {"MaxPool", "GlobalMaxPool"};
op_mapper_["avg"] = {"AveragePool", "GlobalAveragePool"};
GetAttr("global_pooling", &global_pooling_);
GetAttr("adaptive", &adaptive_);
GetAttr("strides", &strides_);
GetAttr("paddings", &pads_);
if (OpType() != "max_pool2d_with_index") {
GetAttr("pooling_type", &pooling_type_);
GetAttr("data_format", &data_format_);
GetAttr("ceil_mode", &ceil_mode_);
GetAttr("padding_algorithm", &padding_algorithm_);
GetAttr("exclusive", &exclusive_);
exclusive_ = !exclusive_;
}
}
int32_t GetMinOpset(bool verbose = false);
void Opset7();
void ExportAsCustomOp();
bool IsExportAsCustomOp();
private:
bool IsSameSpan(const int64_t& in_size, const int64_t& out_size);
void AdaptivePool(const std::vector<TensorInfo>& input_info,
const std::vector<TensorInfo>& output_info);
void NoAdaptivePool(const std::vector<TensorInfo>& input_info,
const std::vector<TensorInfo>& output_info);
bool ceil_mode_;
bool global_pooling_;
bool adaptive_;
bool exclusive_;
std::string data_format_;
std::string pooling_type_;
std::string padding_algorithm_;
std::vector<int64_t> k_size_;
std::vector<int64_t> pads_;
std::vector<int64_t> strides_;
std::map<std::string, std::vector<std::string>> op_mapper_;
};
} // namespace paddle2onnx

Some files were not shown because too many files have changed in this diff Show More