[Doc] Change all PaddleLite or Paddle-Lite to Paddle Lite (#929)

* [FlyCV] Bump up FlyCV -> official release 1.0.0

* change PaddleLite or Paddle-Lite to Paddle lite

* fix docs

* fix doc

Co-authored-by: DefTruth <qiustudent_r@163.com>
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
yeliang2258
2022-12-21 14:15:50 +08:00
committed by GitHub
parent 725fe52df3
commit b42ec302e6
21 changed files with 104 additions and 86 deletions

2
examples/application/js/converter/DEVELOPMENT.md Normal file → Executable file
View File

@@ -64,7 +64,7 @@ Parameter | description
--modelPath | The model file path, used when the weight file is merged.
--paramPath | The weight file pathused when the weight file is merged.
--outputDir | `Necessary`, the output model directory generated after converting.
--disableOptimize | Whether to disable optimize model, `1`is to disable, `0`is use optimize(need to install PaddleLite), default 0.
--disableOptimize | Whether to disable optimize model, `1`is to disable, `0`is use optimize(need to install Paddle Lite), default 0.
--logModelInfo | Whether to print model structure information `0` means not to print, `1` means to print, default 0.
--sliceDataSize | Shard size (in KB) of each weight file. Default size is 4096.
--useGPUOpt | Whether to use gpu opt, default is False.

4
examples/application/js/converter/DEVELOPMENT_cn.md Normal file → Executable file
View File

@@ -63,11 +63,11 @@ python convertToPaddleJSModel.py --inputDir=<fluid_model_directory> --outputDir=
--modelPath | fluid 模型文件所在路径,使用合并参数文件时使用该参数
--paramPath | fluid 参数文件所在路径,使用合并参数文件时使用该参数
--outputDir | `必要参数` Paddle.js 模型输出路径
--disableOptimize | 是否关闭模型优化, `1` 为关闭优化,`0` 为开启优化(需安装 PaddleLite ),默认执行优化
--disableOptimize | 是否关闭模型优化, `1` 为关闭优化,`0` 为开启优化(需安装 Paddle Lite ),默认执行优化
--logModelInfo | 是否打印模型结构信息, `0` 为不打印, `1` 为打印,默认不打印
--sliceDataSize | 分片输出 Paddle.js 参数文件时每片文件的大小单位KB默认 4096
--useGPUOpt | 是否开启模型 GPU 优化,默认不开启(当模型准备运行在 webgl/webgpu 计算方案时,可以设置为 True 开启,在 wasm/plainjs 方案,则不用开启)
## 3. 其他信息
若需要转换的模型为 `TensorFlow/Caffe/ONNX` 格式,可使用 PaddlePaddle 项目下的 `X2Paddle`工具,将其他格式的模型转为 fluid 模型后,再使用本工具转化为 Paddle.js 模型。
详细请参考 [X2Paddle 项目](https://github.com/PaddlePaddle/X2Paddle)
详细请参考 [X2Paddle 项目](https://github.com/PaddlePaddle/X2Paddle)

View File

@@ -9,19 +9,20 @@ import stat
import traceback
import copy
def cleanTempModel(optimizedModelTempDir):
""" 清理opt优化完的临时模型文件 """
if os.path.exists(optimizedModelTempDir):
print("Cleaning optimized temporary model...")
shutil.rmtree(optimizedModelTempDir, onerror=grantWritePermission)
def grantWritePermission(func, path, execinfo):
""" 文件授权 """
os.chmod(path, stat.S_IWRITE)
func(path)
def main():
"""
Example:
@@ -29,20 +30,41 @@ def main():
"""
try:
p = argparse.ArgumentParser(description='转化为PaddleJS模型参数解析')
p.add_argument('--inputDir', help='fluid模型所在目录。当且仅当使用分片参数文件时使用该参数。将过滤modelPath和paramsPath参数且模型文件名必须为`__model__`', required=False)
p.add_argument('--modelPath', help='fluid模型文件所在路径使用合并参数文件时使用该参数', required=False)
p.add_argument('--paramPath', help='fluid参数文件所在路径使用合并参数文件时使用该参数', required=False)
p.add_argument("--outputDir", help='paddleJS模型输出路径必要参数', required=True)
p.add_argument("--disableOptimize", type=int, default=0, help='是否关闭模型优化非必要参数1为关闭优化0为开启优化默认开启优化', required=False)
p.add_argument("--logModelInfo", type=int, default=0, help='是否输出模型结构信息非必要参数0为不输出1为输出默认不输出', required=False)
p.add_argument("--sliceDataSize", type=int, default=4096, help='分片输出参数文件时每片文件的大小单位KB非必要参数默认4096KB', required=False)
p.add_argument(
'--inputDir',
help='fluid模型所在目录。当且仅当使用分片参数文件时使用该参数。将过滤modelPath和paramsPath参数且模型文件名必须为`__model__`',
required=False)
p.add_argument(
'--modelPath', help='fluid模型文件所在路径使用合并参数文件时使用该参数', required=False)
p.add_argument(
'--paramPath', help='fluid参数文件所在路径使用合并参数文件时使用该参数', required=False)
p.add_argument(
"--outputDir", help='paddleJS模型输出路径必要参数', required=True)
p.add_argument(
"--disableOptimize",
type=int,
default=0,
help='是否关闭模型优化非必要参数1为关闭优化0为开启优化默认开启优化',
required=False)
p.add_argument(
"--logModelInfo",
type=int,
default=0,
help='是否输出模型结构信息非必要参数0为不输出1为输出默认不输出',
required=False)
p.add_argument(
"--sliceDataSize",
type=int,
default=4096,
help='分片输出参数文件时每片文件的大小单位KB非必要参数默认4096KB',
required=False)
p.add_argument('--useGPUOpt', help='转换模型是否执行GPU优化方法', required=False)
args = p.parse_args()
# 获取当前用户使用的 python 解释器 bin 位置
pythonCmd = sys.executable
# TODO: 由于PaddleLite和PaddlePaddle存在包冲突因此将整个模型转换工具拆成两个python文件由一个入口python文件通过命令行调用
# TODO: 由于Paddle Lite和PaddlePaddle存在包冲突因此将整个模型转换工具拆成两个python文件由一个入口python文件通过命令行调用
# 区分本地执行和命令行执行
if os.path.exists("optimizeModel.py"):
optimizeCmd = pythonCmd + " optimizeModel.py"
@@ -76,7 +98,6 @@ def main():
args.modelPath = os.path.join(optimizedModelTempDir, "model")
args.paramPath = os.path.join(optimizedModelTempDir, "params")
print("============Convert Model Args=============")
if inputDir:
print("inputDir: " + inputDir)
@@ -88,14 +109,14 @@ def main():
print("enableLogModelInfo: " + str(enableLogModelInfo))
print("sliceDataSize:" + str(sliceDataSize))
print("Starting...")
if enableOptimization:
print("Optimizing model...")
for param in ["inputDir", "modelPath", "paramPath", "outputDir"]:
if optArgs.__dict__[param]:
# 用""框起命令参数值,解决路径中的空格问题
optimizeCmd += " --" + param + "="+ '"' + str(optArgs.__dict__[param]) + '"'
optimizeCmd += " --" + param + "=" + '"' + str(
optArgs.__dict__[param]) + '"'
os.system(optimizeCmd)
try:
os.listdir(optimizedModelTempDir)
@@ -110,13 +131,16 @@ def main():
else:
print("\n\033[32mOptimizing model successfully.\033[0m")
else:
print("\033[33mYou choosed not to optimize model, consequently, optimizing model is skiped.\033[0m")
print(
"\033[33mYou choosed not to optimize model, consequently, optimizing model is skiped.\033[0m"
)
print("\nConverting model...")
for param in args.__dict__:
if args.__dict__[param]:
# 用""框起参数,解决路径中的空格问题
convertCmd += " --" + param + "=" + '"' + str(args.__dict__[param]) + '"'
convertCmd += " --" + param + "=" + '"' + str(args.__dict__[
param]) + '"'
os.system(convertCmd)
try:
file = os.listdir(outputDir)

38
examples/application/js/converter/fuseOps.py Normal file → Executable file
View File

@@ -1,20 +1,12 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
def opListFuse(ops):
""" 算子融合 """
fuseOpList = [
'relu',
'relu6',
'leaky_relu',
'scale',
'sigmoid',
'hard_sigmoid',
'pow',
'sqrt',
'tanh',
'hard_swish',
'dropout'
'relu', 'relu6', 'leaky_relu', 'scale', 'sigmoid', 'hard_sigmoid',
'pow', 'sqrt', 'tanh', 'hard_swish', 'dropout'
]
# 判断op是否为单节点
@@ -37,39 +29,41 @@ def opListFuse(ops):
else:
return False
for index in reversed(range(len(ops))):
if index > 0:
op = ops[index]
# 兼容paddlelite 算子融合字段
# 兼容 Paddle Lite 算子融合字段
if 'act_type' in op['attrs']:
name = op['attrs']['act_type']
op['attrs']['fuse_opt'] = {}
op['attrs']['fuse_opt'][name] = {}
if name == 'hard_swish':
op['attrs']['fuse_opt'][name]['offset'] = op['attrs']['hard_swish_offset']
op['attrs']['fuse_opt'][name]['scale'] = op['attrs']['hard_swish_scale']
op['attrs']['fuse_opt'][name]['threshold'] = op['attrs']['hard_swish_threshold']
op['attrs']['fuse_opt'][name]['offset'] = op['attrs'][
'hard_swish_offset']
op['attrs']['fuse_opt'][name]['scale'] = op['attrs'][
'hard_swish_scale']
op['attrs']['fuse_opt'][name]['threshold'] = op['attrs'][
'hard_swish_threshold']
if name == 'relu6':
op['attrs']['fuse_opt'][name]['threshold'] = op['attrs']['fuse_brelu_threshold']
op['attrs']['fuse_opt'][name]['threshold'] = op['attrs'][
'fuse_brelu_threshold']
for fuse in fuseOpList:
if op['type'] == fuse:
prevOp = ops[index - 1]
if opExistSingleNode(prevOp['outputs']['Out'][0]) and len(prevOp['outputs']['Out']) == 1 :
if opExistSingleNode(prevOp['outputs']['Out'][0]) and len(
prevOp['outputs']['Out']) == 1:
prevOp['attrs']['fuse_opt'] = {}
if 'fuse_opt' in op['attrs']:
prevOp['attrs']['fuse_opt'] = op['attrs']['fuse_opt']
prevOp['attrs']['fuse_opt'] = op['attrs'][
'fuse_opt']
del op['attrs']['fuse_opt']
prevOp['attrs']['fuse_opt'][fuse] = op['attrs']
prevOp['outputs']['Out'] = op['outputs']['Out']
del ops[index]