mirror of
https://github.com/MarcA711/rknn-models.git
synced 2025-10-05 07:27:08 +08:00
build multiple suffixes and socs at once
This commit is contained in:
65
conv.py
65
conv.py
@@ -1,35 +1,38 @@
|
|||||||
from rknn.api import RKNN
|
from rknn.api import RKNN
|
||||||
|
import os
|
||||||
|
|
||||||
INPUT_MODEL = "yolov8x.onnx"
|
for suffix in ["n", "s", "m", "l", "x"]:
|
||||||
WIDTH = 320
|
for soc in ["rk3562","rk3566", "rk3568", "rk3588"]:
|
||||||
HEIGHT = 320
|
INPUT_MODEL = 'yolov8{}.onnx'.format(suffix)
|
||||||
OUTPUT_MODEL_BASENAME = 'yolov8x'
|
WIDTH = 320
|
||||||
QUANTIZATION = False
|
HEIGHT = 320
|
||||||
DATASET = './dataset_coco10.txt'
|
OUTPUT_MODEL_BASENAME = 'yolov8{}'.format(suffix)
|
||||||
|
QUANTIZATION = False
|
||||||
|
DATASET = './dataset_coco10.txt'
|
||||||
|
|
||||||
# Config
|
# Config
|
||||||
MEAN_VALUES = [[0, 0, 0]]
|
MEAN_VALUES = [[0, 0, 0]]
|
||||||
STD_VALUES = [[255, 255, 255]]
|
STD_VALUES = [[255, 255, 255]]
|
||||||
QUANT_IMG_RGB2BGR = True
|
QUANT_IMG_RGB2BGR = True
|
||||||
QUANTIZED_DTYPE = "asymmetric_quantized-8"
|
QUANTIZED_DTYPE = "asymmetric_quantized-8"
|
||||||
QUANTIZED_ALGORITHM = "normal"
|
QUANTIZED_ALGORITHM = "normal"
|
||||||
QUANTIZED_METHOD = "channel"
|
QUANTIZED_METHOD = "channel"
|
||||||
FLOAT_DTYPE = "float16"
|
FLOAT_DTYPE = "float16"
|
||||||
OPTIMIZATION_LEVEL = 2
|
OPTIMIZATION_LEVEL = 2
|
||||||
TARGET_PLATFORM = "rk3588"
|
TARGET_PLATFORM = soc
|
||||||
CUSTOM_STRING = None
|
CUSTOM_STRING = None
|
||||||
REMOVE_WEIGHT = None
|
REMOVE_WEIGHT = None
|
||||||
COMPRESS_WEIGHT = False
|
COMPRESS_WEIGHT = False
|
||||||
SINGLE_CORE_MODE = False
|
SINGLE_CORE_MODE = False
|
||||||
MODEL_PRUNNING = False
|
MODEL_PRUNNING = False
|
||||||
OP_TARGET = None
|
OP_TARGET = None
|
||||||
DYNAMIC_INPUT = None
|
DYNAMIC_INPUT = None
|
||||||
|
|
||||||
|
OUTPUT_MODEL_FILE = "./output/{}/{}-{}x{}-{}.rknn".format(soc, OUTPUT_MODEL_BASENAME, WIDTH, HEIGHT, soc)
|
||||||
|
os.makedirs("./output/{}".format(soc), exist_ok=True)
|
||||||
|
|
||||||
OUTPUT_MODEL = OUTPUT_MODEL_BASENAME + '-' + str(WIDTH) + 'x' + str(HEIGHT) + ".rknn"
|
rknn = RKNN()
|
||||||
|
rknn.config(mean_values=MEAN_VALUES,
|
||||||
rknn = RKNN()
|
|
||||||
rknn.config(mean_values=MEAN_VALUES,
|
|
||||||
std_values=STD_VALUES,
|
std_values=STD_VALUES,
|
||||||
quant_img_RGB2BGR=QUANT_IMG_RGB2BGR,
|
quant_img_RGB2BGR=QUANT_IMG_RGB2BGR,
|
||||||
quantized_dtype=QUANTIZED_DTYPE,
|
quantized_dtype=QUANTIZED_DTYPE,
|
||||||
@@ -46,15 +49,15 @@ rknn.config(mean_values=MEAN_VALUES,
|
|||||||
op_target=OP_TARGET,
|
op_target=OP_TARGET,
|
||||||
dynamic_input=DYNAMIC_INPUT)
|
dynamic_input=DYNAMIC_INPUT)
|
||||||
|
|
||||||
# if rknn.load_pytorch("./input/" + INPUT_MODEL, [[HEIGHT, WIDTH, 3]]) != 0:
|
# if rknn.load_pytorch("./input/" + INPUT_MODEL, [[HEIGHT, WIDTH, 3]]) != 0:
|
||||||
if rknn.load_onnx("./input/" + INPUT_MODEL) != 0:
|
if rknn.load_onnx("./input/" + INPUT_MODEL) != 0:
|
||||||
print('Error loading model.')
|
print('Error loading model.')
|
||||||
exit()
|
exit()
|
||||||
|
|
||||||
if rknn.build(do_quantization=QUANTIZATION, dataset=DATASET) != 0:
|
if rknn.build(do_quantization=QUANTIZATION, dataset=DATASET) != 0:
|
||||||
print('Error building model.')
|
print('Error building model.')
|
||||||
exit()
|
exit()
|
||||||
|
|
||||||
if rknn.export_rknn("./output/" + OUTPUT_MODEL) != 0:
|
if rknn.export_rknn(OUTPUT_MODEL_FILE) != 0:
|
||||||
print('Error exporting rknn model.')
|
print('Error exporting rknn model.')
|
||||||
exit()
|
exit()
|
||||||
|
Reference in New Issue
Block a user