[Quantization] Update auto compression configs files. (#846)

* Fix links in readme

* Fix links in readme

* Update PPOCRv2/v3 examples

* Update auto compression configs
This commit is contained in:
yunyaoXYY
2022-12-11 14:16:13 +08:00
committed by GitHub
parent e877f0fd07
commit 29f034cf93
14 changed files with 23 additions and 28 deletions

View File

@@ -6,15 +6,14 @@ FastDeploy基于PaddleSlim的Auto Compression Toolkit(ACT), 给用户提供了
### 环境依赖
1.用户参考PaddlePaddle官网, 安装develop版本
1.用户参考PaddlePaddle官网, 安装Paddle 2.4 版本
```
https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/develop/install/pip/linux-pip.html
```
2.安装paddleslim-develop版本
2.安装PaddleSlim 2.4 版本
```bash
git clone https://github.com/PaddlePaddle/PaddleSlim.git & cd PaddleSlim
python setup.py install
pip install paddleslim==2.4.0
```
### 一键模型自动化压缩工具安装方式

View File

@@ -7,17 +7,14 @@ We take the Yolov5 series as an example to demonstrate how to install and execut
### Environment Dependencies
1. Install the develop version downloaded from PaddlePaddle official website.
1.Install PaddlePaddle 2.4 version
```
https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/develop/install/pip/linux-pip.html
```
2.Install PaddleSlim-develop
2.Install PaddleSlim 2.4 version
```bash
git clone https://github.com/PaddlePaddle/PaddleSlim.git & cd PaddleSlim
python setup.py install
pip install paddleslim==2.4.0
```
### Install Fastdeploy Auto Compression Toolkit

View File

@@ -24,7 +24,7 @@ Distillation:
alpha: 1.0 #蒸馏loss所占权重
loss: soft_label #蒸馏loss算法
Quantization:
QuantAware:
onnx_format: true #是否采用ONNX量化标准格式, 要在FastDeploy上部署, 必须选true
use_pact: true #量化训练是否使用PACT方法
activation_quantize_type: 'moving_average_abs_max' #激活量化方式

View File

@@ -26,7 +26,7 @@ Distillation:
alpha: 1.0 #Distillation loss weight
loss: soft_label #Distillation loss algorithm
Quantization:
QuantAware:
onnx_format: true #Whether to use ONNX quantization standard format or not, must be true to deploy on FastDeploy
use_pact: true #Whether to use the PACT method for training
activation_quantize_type: 'moving_average_abs_max' #Activations quantization methods

View File

@@ -17,7 +17,7 @@ Distillation:
- softmax_0.tmp_0
Quantization:
QuantAware:
use_pact: true
activation_bits: 8
is_full_quantize: false

View File

@@ -16,7 +16,7 @@ Distillation:
node:
- softmax_0.tmp_0
Quantization:
QuantAware:
use_pact: true
activation_bits: 8
is_full_quantize: false

View File

@@ -14,7 +14,7 @@ Distillation:
alpha: 1.0
loss: soft_label
Quantization:
QuantAware:
onnx_format: true
use_pact: true
activation_quantize_type: 'moving_average_abs_max'

View File

@@ -14,7 +14,7 @@ Distillation:
alpha: 1.0
loss: soft_label
Quantization:
QuantAware:
onnx_format: true
use_pact: true
activation_quantize_type: 'moving_average_abs_max'

View File

@@ -14,7 +14,7 @@ Distillation:
alpha: 1.0
loss: soft_label
Quantization:
QuantAware:
onnx_format: true
use_pact: true
activation_quantize_type: 'moving_average_abs_max'

View File

@@ -14,12 +14,13 @@ Distillation:
alpha: 1.0
loss: soft_label
Quantization:
QuantAware:
onnx_format: true
activation_quantize_type: 'moving_average_abs_max'
quantize_op_types:
- conv2d
- depthwise_conv2d
- conv2d_transpose
PTQ:

View File

@@ -14,7 +14,7 @@ Distillation:
alpha: 1.0
loss: soft_label
Quantization:
QuantAware:
onnx_format: true
activation_quantize_type: 'moving_average_abs_max'
quantize_op_types:

View File

@@ -17,7 +17,7 @@ Distillation:
node:
- conv2d_94.tmp_0
Quantization:
QuantAware:
onnx_format: True
quantize_op_types:
- conv2d