[Quantization] Add new PaddleClas models quantization support. (#864)

* Fix links in readme

* Fix links in readme

* Update PPOCRv2/v3 examples

* Update auto compression configs

* Add neww quantization  support for paddleclas model

* Update quantized Yolov6s model download link
This commit is contained in:
yunyaoXYY
2022-12-13 10:21:44 +08:00
committed by GitHub
parent 7415552cb2
commit 5fc6cf30df
8 changed files with 207 additions and 2 deletions

View File

@@ -23,7 +23,7 @@ cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
#下载FastDeloy提供的yolov6s量化模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_qat_model.tar
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_qat_model_new.tar
tar -xvf yolov6s_qat_model.tar
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg

View File

@@ -17,7 +17,7 @@ git clone https://github.com/PaddlePaddle/FastDeploy.git
cd examples/slim/yolov6/python
#下载FastDeloy提供的yolov6s量化模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_qat_model.tar
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_qat_model_new.tar
tar -xvf yolov6s_qat_model.tar
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg

View File

@@ -99,6 +99,11 @@ FastDeploy目前为用户提供了多个模型的压缩[config](./configs/)文
| -------------------- | ------------------------------------------------------------ |----------------------------------------- |
| [mobilenetv1_ssld_quant](./configs/classification/mobilenetv1_ssld_quant.yaml) | [mobilenetv1_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_ssld_infer.tgz) | |
| [resnet50_vd_quant](./configs/classification/resnet50_vd_quant.yaml) | [resnet50_vd](https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz) | |
| [efficientnetb0_quant](./configs/classification/efficientnetb0_quant.yaml) | [efficientnetb0](https://bj.bcebos.com/paddlehub/fastdeploy/EfficientNetB0_small_infer.tgz) | |
| [mobilenetv3_large_x1_0_quant](./configs/classification/mobilenetv3_large_x1_0_quant.yaml) | [mobilenetv3_large_x1_0](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV3_large_x1_0_ssld_infer.tgz) | |
| [pphgnet_tiny_quant](./configs/classification/pphgnet_tiny_quant.yaml) | [pphgnet_tiny](https://bj.bcebos.com/paddlehub/fastdeploy/PPHGNet_tiny_ssld_infer.tgz) | |
| [pplcnetv2_base_quant](./configs/classification/pplcnetv2_base_quant.yaml) | [pplcnetv2_base](https://bj.bcebos.com/paddlehub/fastdeploy/PPLCNetV2_base_infer.tgz) | |
| [yolov5s_quant](./configs/detection/yolov5s_quant.yaml) | [yolov5s](https://paddle-slim-models.bj.bcebos.com/act/yolov5s.onnx) | |
| [yolov6s_quant](./configs/detection/yolov6s_quant.yaml) | [yolov6s](https://paddle-slim-models.bj.bcebos.com/act/yolov6s.onnx) | |
| [yolov7_quant](./configs/detection/yolov7_quant.yaml) | [yolov7](https://paddle-slim-models.bj.bcebos.com/act/yolov7.onnx) | |

View File

@@ -107,6 +107,10 @@ FastDeploy currently provides users with compression [config](./configs/) files
| -------------------- | ------------------------------------------------------------ |----------------------------------------- |
| [mobilenetv1_ssld_quant](./configs/classification/mobilenetv1_ssld_quant.yaml) | [mobilenetv1_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_ssld_infer.tgz) | |
| [resnet50_vd_quant](./configs/classification/resnet50_vd_quant.yaml) | [resnet50_vd](https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz) | |
| [efficientnetb0_quant](./configs/classification/efficientnetb0_quant.yaml) | [efficientnetb0](https://bj.bcebos.com/paddlehub/fastdeploy/EfficientNetB0_small_infer.tgz) | |
| [mobilenetv3_large_x1_0_quant](./configs/classification/mobilenetv3_large_x1_0_quant.yaml) | [mobilenetv3_large_x1_0](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV3_large_x1_0_ssld_infer.tgz) | |
| [pphgnet_tiny_quant](./configs/classification/pphgnet_tiny_quant.yaml) | [pphgnet_tiny](https://bj.bcebos.com/paddlehub/fastdeploy/PPHGNet_tiny_ssld_infer.tgz) | |
| [pplcnetv2_base_quant](./configs/classification/pplcnetv2_base_quant.yaml) | [pplcnetv2_base](https://bj.bcebos.com/paddlehub/fastdeploy/PPLCNetV2_base_infer.tgz) | |
| [yolov5s_quant](./configs/detection/yolov5s_quant.yaml) | [yolov5s](https://paddle-slim-models.bj.bcebos.com/act/yolov5s.onnx) | |
| [yolov6s_quant](./configs/detection/yolov6s_quant.yaml) | [yolov6s](https://paddle-slim-models.bj.bcebos.com/act/yolov6s.onnx) | |
| [yolov7_quant](./configs/detection/yolov7_quant.yaml) | [yolov7](https://paddle-slim-models.bj.bcebos.com/act/yolov7.onnx) | |

View File

@@ -0,0 +1,50 @@
Global:
model_dir: ./EfficientNetB0_small_infer/
format: 'paddle'
model_filename: inference.pdmodel
params_filename: inference.pdiparams
qat_image_path: ./ImageNet_val_640
ptq_image_path: ./ImageNet_val_640
input_list: ['inputs']
qat_preprocess: cls_image_preprocess
ptq_preprocess: cls_image_preprocess
qat_batch_size: 32
Distillation:
alpha: 1.0
loss: l2
node:
- softmax_0.tmp_0
QuantAware:
use_pact: true
activation_bits: 8
is_full_quantize: false
onnx_format: True
activation_quantize_type: moving_average_abs_max
weight_quantize_type: channel_wise_abs_max
not_quant_pattern:
- skip_quant
quantize_op_types:
- conv2d
- depthwise_conv2d
- matmul
- matmul_v2
weight_bits: 8
TrainConfig:
epochs: 1
eval_iter: 500
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.015
T_max: 8000
optimizer_builder:
optimizer:
type: Momentum
weight_decay: 0.00002
origin_metric: 0.7738
PTQ:
calibration_method: 'avg' # option: avg, abs_max, hist, KL, mse
skip_tensor_list: None

View File

@@ -0,0 +1,46 @@
Global:
model_dir: ./MobileNetV3_large_x1_0_ssld_infer/
format: 'paddle'
model_filename: inference.pdmodel
params_filename: inference.pdiparams
qat_image_path: ./ImageNet_val_640
ptq_image_path: ./ImageNet_val_640
input_list: ['inputs']
qat_preprocess: cls_image_preprocess
ptq_preprocess: cls_image_preprocess
qat_batch_size: 128
Distillation:
alpha: 1.0
loss: soft_label
QuantAware:
use_pact: true
activation_bits: 8
is_full_quantize: false
onnx_format: True
activation_quantize_type: moving_average_abs_max
weight_quantize_type: channel_wise_abs_max
not_quant_pattern:
- skip_quant
quantize_op_types:
- conv2d
- depthwise_conv2d
- matmul
- matmul_v2
weight_bits: 8
TrainConfig:
epochs: 2
eval_iter: 5000
learning_rate: 0.001
optimizer_builder:
optimizer:
type: Momentum
weight_decay: 0.00002
origin_metric: 0.7896
PTQ:
calibration_method: 'avg' # option: avg, abs_max, hist, KL, mse
skip_tensor_list: None

View File

@@ -0,0 +1,50 @@
Global:
model_dir: ./PPHGNet_tiny_ssld_infer/
format: 'paddle'
model_filename: inference.pdmodel
params_filename: inference.pdiparams
qat_image_path: ./ImageNet_val_640
ptq_image_path: ./ImageNet_val_640
input_list: ['x']
qat_preprocess: cls_image_preprocess
ptq_preprocess: cls_image_preprocess
qat_batch_size: 32
Distillation:
alpha: 1.0
loss: l2
node:
- softmax_1.tmp_0
QuantAware:
use_pact: true
activation_bits: 8
is_full_quantize: false
onnx_format: True
activation_quantize_type: moving_average_abs_max
weight_quantize_type: channel_wise_abs_max
not_quant_pattern:
- skip_quant
quantize_op_types:
- conv2d
- depthwise_conv2d
- matmul
- matmul_v2
weight_bits: 8
TrainConfig:
epochs: 1
eval_iter: 500
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.015
T_max: 8000
optimizer_builder:
optimizer:
type: Momentum
weight_decay: 0.00002
origin_metric: 0.7959
PTQ:
calibration_method: 'avg' # option: avg, abs_max, hist, KL, mse
skip_tensor_list: None

View File

@@ -0,0 +1,50 @@
Global:
model_dir: ./PPLCNetV2_base_infer/
format: 'paddle'
model_filename: inference.pdmodel
params_filename: inference.pdiparams
qat_image_path: ./ImageNet_val_640
ptq_image_path: ./ImageNet_val_640
input_list: ['x']
qat_preprocess: cls_image_preprocess
ptq_preprocess: cls_image_preprocess
qat_batch_size: 32
Distillation:
alpha: 1.0
loss: l2
node:
- softmax_1.tmp_0
QuantAware:
use_pact: true
activation_bits: 8
is_full_quantize: false
onnx_format: True
activation_quantize_type: moving_average_abs_max
weight_quantize_type: channel_wise_abs_max
not_quant_pattern:
- skip_quant
quantize_op_types:
- conv2d
- depthwise_conv2d
- matmul
- matmul_v2
weight_bits: 8
TrainConfig:
epochs: 1
eval_iter: 500
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.015
T_max: 8000
optimizer_builder:
optimizer:
type: Momentum
weight_decay: 0.00002
origin_metric: 0.7704
PTQ:
calibration_method: 'avg' # option: avg, abs_max, hist, KL, mse
skip_tensor_list: None