From 3017ec487c73d31a71fca591c2ccb9c5598dac4c Mon Sep 17 00:00:00 2001 From: zhoushunjie Date: Fri, 4 Nov 2022 02:46:39 +0000 Subject: [PATCH 1/6] Add uie benchmark --- benchmark/benchmark_uie.py | 192 +++++++++++++++++++++++++++++++++ benchmark/run_benchmark_uie.sh | 24 +++++ 2 files changed, 216 insertions(+) create mode 100644 benchmark/benchmark_uie.py create mode 100644 benchmark/run_benchmark_uie.sh diff --git a/benchmark/benchmark_uie.py b/benchmark/benchmark_uie.py new file mode 100644 index 000000000..d7f74a048 --- /dev/null +++ b/benchmark/benchmark_uie.py @@ -0,0 +1,192 @@ +import numpy as np +import os +import time +import distutils.util +import sys +import json + +from paddlenlp.utils.log import logger +import fastdeploy as fd +from fastdeploy.text import UIEModel, SchemaLanguage +import pynvml +import psutil +import GPUtil +import multiprocessing + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_dir", + required=True, + help="The directory of model and tokenizer.") + parser.add_argument( + "--data_path", required=True, help="The path of uie data.") + parser.add_argument( + "--device", + type=str, + default='cpu', + choices=['gpu', 'cpu'], + help="Type of inference device, support 'cpu' or 'gpu'.") + parser.add_argument( + "--backend", + type=str, + default='pp', + choices=['ort', 'pp', 'trt', 'pp-trt', 'openvino'], + help="The inference runtime backend.") + parser.add_argument( + "--device_id", type=int, default=0, help="device(gpu) id") + parser.add_argument( + "--batch_size", type=int, default=1, help="The batch size of data.") + parser.add_argument( + "--max_length", + type=int, + default=128, + help="The max length of sequence.") + parser.add_argument( + "--log_interval", + type=int, + default=10, + help="The interval of logging.") + parser.add_argument( + "--cpu_num_threads", + type=int, + default=1, + help="The number of threads when inferring on cpu.") + parser.add_argument( + "--use_fp16", + type=distutils.util.strtobool, + default=False, + help="Use FP16 mode") + return parser.parse_args() + + +def build_option(args): + option = fd.RuntimeOption() + if args.device == 'cpu': + option.use_cpu() + option.set_cpu_thread_num(args.cpu_num_threads) + else: + option.use_gpu(args.device_id) + if args.backend == 'pp': + option.use_paddle_backend() + elif args.backend == 'ort': + option.use_ort_backend() + elif args.backend == 'openvino': + option.use_openvino_backend() + else: + option.use_trt_backend() + if args.backend == 'pp-trt': + option.enable_paddle_to_trt() + option.enable_paddle_trt_collect_shape() + trt_file = os.path.join(args.model_dir, "infer.trt") + option.set_trt_input_shape( + 'input_ids', + min_shape=[1, args.max_length], + opt_shape=[args.batch_size, args.max_length], + max_shape=[args.batch_size, args.max_length]) + option.set_trt_input_shape( + 'token_type_ids', + min_shape=[1, args.max_length], + opt_shape=[args.batch_size, args.max_length], + max_shape=[args.batch_size, args.max_length]) + option.set_trt_input_shape( + 'pos_ids', + min_shape=[1, args.max_length], + opt_shape=[args.batch_size, args.max_length], + max_shape=[args.batch_size, args.max_length]) + option.set_trt_input_shape( + 'att_mask', + min_shape=[1, args.max_length], + opt_shape=[args.batch_size, args.max_length], + max_shape=[args.batch_size, args.max_length]) + if args.use_fp16: + option.enable_trt_fp16() + trt_file = trt_file + ".fp16" + option.set_trt_cache_file(trt_file) + return option + + +def get_current_memory_mb(gpu_id=None): + pid = os.getpid() + p = psutil.Process(pid) + info = p.memory_full_info() + cpu_mem = info.uss / 1024. / 1024. + gpu_mem = 0 + if gpu_id is not None: + pynvml.nvmlInit() + handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id) + meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) + gpu_mem = meminfo.used / 1024. / 1024. + return cpu_mem, gpu_mem + + +def get_current_gputil(gpu_id): + GPUs = GPUtil.getGPUs() + gpu_load = GPUs[gpu_id].load + return gpu_load + + +def sample_gpuutil(gpu_id, gpu_utilization=[]): + while True: + gpu_utilization.append(get_current_gputil(gpu_id)) + time.sleep(0.01) + + +def get_dataset(data_path, max_seq_len=512): + json_lines = [] + with open(data_path, 'r', encoding='utf-8') as f: + for line in f: + json_line = json.loads(line) + content = json_line['content'].strip() + prompt = json_line['prompt'] + # Model Input is aslike: [CLS] Prompt [SEP] Content [SEP] + # It include three summary tokens. + if max_seq_len <= len(prompt) + 3: + raise ValueError( + "The value of max_seq_len is too small, please set a larger value" + ) + json_lines.append(json_line) + + return json_lines + + +def run_inference(ds, uie): + for i, sample in enumerate(ds): + uie.set_schema([sample['prompt']]) + result = uie.predict([sample['content']]) + if (i + 1) % args.log_interval == 0: + runtime_statis = uie.print_statis_info_of_runtime() + print(f"Step {i + 1}:") + print(runtime_statis) + print() + + runtime_statis = uie.print_statis_info_of_runtime() + print(f"Final:") + print(runtime_statis) + print() + + +if __name__ == '__main__': + args = parse_arguments() + runtime_option = build_option(args) + model_path = os.path.join(args.model_dir, "inference.pdmodel") + param_path = os.path.join(args.model_dir, "inference.pdiparams") + vocab_path = os.path.join(args.model_dir, "vocab.txt") + + ds = get_dataset(args.data_path) + schema = ["时间"] + uie = UIEModel( + model_path, + param_path, + vocab_path, + position_prob=0.5, + max_length=args.max_length, + schema=schema, + runtime_option=runtime_option, + schema_language=SchemaLanguage.ZH) + + uie.enable_record_time_of_runtime() + run_inference(ds, uie) diff --git a/benchmark/run_benchmark_uie.sh b/benchmark/run_benchmark_uie.sh new file mode 100644 index 000000000..8f0c03ee3 --- /dev/null +++ b/benchmark/run_benchmark_uie.sh @@ -0,0 +1,24 @@ +# wget https://bj.bcebos.com/fastdeploy/benchmark/uie/reimbursement_form_data.txt +# wget https://bj.bcebos.com/fastdeploy/models/uie/uie-base.tgz + +# GPU +## FP32 Model +python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device gpu +python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device gpu +python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp-trt --device gpu +python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend trt --device gpu + +## INT8 Model +python benchmark_uie.py --model_dir uie_bs1_lr1e-5_qat_final_format_4inputs --data_path reimbursement_form_data.txt --backend pp-trt --device gpu +python benchmark_uie.py --model_dir uie_bs1_lr1e-5_qat_final_format_4inputs --data_path reimbursement_form_data.txt --backend trt --device gpu + +# CPU +## FP32 Model +python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device cpu +python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device cpu +python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend openvino --device cpu + +## INT8 Model + +python benchmark_uie.py --model_dir uie_bs1_lr1e-5_qat_final_format_4inputs --data_path reimbursement_form_data.txt --backend pp --device cpu +python benchmark_uie.py --model_dir uie_bs1_lr1e-5_qat_final_format_4inputs --data_path reimbursement_form_data.txt --backend ort --device cpu From 2ac94e91beca5ed974c9a41189dffb619267a7ba Mon Sep 17 00:00:00 2001 From: zhoushunjie Date: Fri, 4 Nov 2022 03:47:47 +0000 Subject: [PATCH 2/6] fix trt dy shape --- benchmark/benchmark_uie.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/benchmark/benchmark_uie.py b/benchmark/benchmark_uie.py index d7f74a048..a97bb026f 100644 --- a/benchmark/benchmark_uie.py +++ b/benchmark/benchmark_uie.py @@ -84,23 +84,23 @@ def build_option(args): trt_file = os.path.join(args.model_dir, "infer.trt") option.set_trt_input_shape( 'input_ids', - min_shape=[1, args.max_length], - opt_shape=[args.batch_size, args.max_length], + min_shape=[1, 1], + opt_shape=[args.batch_size, args.max_length // 2], max_shape=[args.batch_size, args.max_length]) option.set_trt_input_shape( 'token_type_ids', - min_shape=[1, args.max_length], - opt_shape=[args.batch_size, args.max_length], + min_shape=[1, 1], + opt_shape=[args.batch_size, args.max_length // 2], max_shape=[args.batch_size, args.max_length]) option.set_trt_input_shape( 'pos_ids', - min_shape=[1, args.max_length], - opt_shape=[args.batch_size, args.max_length], + min_shape=[1, 1], + opt_shape=[args.batch_size, args.max_length // 2], max_shape=[args.batch_size, args.max_length]) option.set_trt_input_shape( 'att_mask', - min_shape=[1, args.max_length], - opt_shape=[args.batch_size, args.max_length], + min_shape=[1, 1], + opt_shape=[args.batch_size, args.max_length // 2], max_shape=[args.batch_size, args.max_length]) if args.use_fp16: option.enable_trt_fp16() From 4f7233c11fb9ccca7d1544fcf209b5e3cd500c3a Mon Sep 17 00:00:00 2001 From: zhoushunjie Date: Tue, 27 Dec 2022 04:55:46 +0000 Subject: [PATCH 3/6] update uie benchmark --- benchmark/benchmark_uie.py | 31 +++++++++++++++++++---------- benchmark/run_benchmark_uie.sh | 36 ++++++++++++++++------------------ 2 files changed, 38 insertions(+), 29 deletions(-) diff --git a/benchmark/benchmark_uie.py b/benchmark/benchmark_uie.py index a97bb026f..8e73c34f1 100644 --- a/benchmark/benchmark_uie.py +++ b/benchmark/benchmark_uie.py @@ -5,7 +5,6 @@ import distutils.util import sys import json -from paddlenlp.utils.log import logger import fastdeploy as fd from fastdeploy.text import UIEModel, SchemaLanguage import pynvml @@ -60,6 +59,8 @@ def parse_arguments(): type=distutils.util.strtobool, default=False, help="Use FP16 mode") + parser.add_argument( + "--epoch", type=int, default=1, help="The epoch of test") return parser.parse_args() @@ -153,19 +154,29 @@ def get_dataset(data_path, max_seq_len=512): return json_lines -def run_inference(ds, uie): - for i, sample in enumerate(ds): +def run_inference(ds, uie, epoch=1, warmup_steps=10): + for j, sample in enumerate(ds): + if j > warmup_steps: + break uie.set_schema([sample['prompt']]) result = uie.predict([sample['content']]) - if (i + 1) % args.log_interval == 0: - runtime_statis = uie.print_statis_info_of_runtime() - print(f"Step {i + 1}:") - print(runtime_statis) - print() - + print(f"Run {warmup_steps} steps to warm up") + start = time.time() + for ep in range(epoch): + curr_start = time.time() + for i, sample in enumerate(ds): + uie.set_schema([sample['prompt']]) + result = uie.predict([sample['content']]) + print( + f"Epoch {ep} average time = {(time.time() - curr_start) * 1000.0 / (len(ds)):.4f} ms" + ) + end = time.time() runtime_statis = uie.print_statis_info_of_runtime() print(f"Final:") print(runtime_statis) + print( + f"Total average time = {(end - start) * 1000.0 / (len(ds) * epoch):.4f} ms" + ) print() @@ -189,4 +200,4 @@ if __name__ == '__main__': schema_language=SchemaLanguage.ZH) uie.enable_record_time_of_runtime() - run_inference(ds, uie) + run_inference(ds, uie, args.epoch) diff --git a/benchmark/run_benchmark_uie.sh b/benchmark/run_benchmark_uie.sh index 8f0c03ee3..58030d5f6 100644 --- a/benchmark/run_benchmark_uie.sh +++ b/benchmark/run_benchmark_uie.sh @@ -1,24 +1,22 @@ # wget https://bj.bcebos.com/fastdeploy/benchmark/uie/reimbursement_form_data.txt # wget https://bj.bcebos.com/fastdeploy/models/uie/uie-base.tgz - +# tar xvfz uie-base.tgz # GPU -## FP32 Model -python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device gpu -python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device gpu -python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp-trt --device gpu -python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend trt --device gpu - -## INT8 Model -python benchmark_uie.py --model_dir uie_bs1_lr1e-5_qat_final_format_4inputs --data_path reimbursement_form_data.txt --backend pp-trt --device gpu -python benchmark_uie.py --model_dir uie_bs1_lr1e-5_qat_final_format_4inputs --data_path reimbursement_form_data.txt --backend trt --device gpu +echo "-------------------------------GPU Benchmark---------------------------------------" +python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device gpu +python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device gpu +python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp-trt --device gpu --use_fp16 False +python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend trt --device gpu --use_fp16 False +python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp-trt --device gpu --use_fp16 True +python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend trt --device gpu --use_fp16 True +echo "-----------------------------------------------------------------------------------" # CPU -## FP32 Model -python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device cpu -python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device cpu -python benchmark_uie.py --model_dir uie-base --data_path reimbursement_form_data.txt --backend openvino --device cpu - -## INT8 Model - -python benchmark_uie.py --model_dir uie_bs1_lr1e-5_qat_final_format_4inputs --data_path reimbursement_form_data.txt --backend pp --device cpu -python benchmark_uie.py --model_dir uie_bs1_lr1e-5_qat_final_format_4inputs --data_path reimbursement_form_data.txt --backend ort --device cpu +echo "-------------------------------CPU Benchmark---------------------------------------" +for cpu_num_threads in 1 2 4 8 16; +do + python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device cpu --cpu_num_threads ${cpu_num_threads} + python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device cpu --cpu_num_threads ${cpu_num_threads} + python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend openvino --device cpu --cpu_num_threads ${cpu_num_threads} +done +echo "-----------------------------------------------------------------------------------" From 51e346ea0945978d6367ea178cd7794d9c47163d Mon Sep 17 00:00:00 2001 From: zhoushunjie Date: Thu, 29 Dec 2022 10:29:32 +0000 Subject: [PATCH 4/6] Update uie benchmark output --- benchmark/benchmark_uie.py | 240 ++++++++++++++++++++++++--------- benchmark/run_benchmark_uie.sh | 23 ++-- 2 files changed, 193 insertions(+), 70 deletions(-) diff --git a/benchmark/benchmark_uie.py b/benchmark/benchmark_uie.py index 8e73c34f1..40c3ac4c4 100644 --- a/benchmark/benchmark_uie.py +++ b/benchmark/benchmark_uie.py @@ -7,10 +7,6 @@ import json import fastdeploy as fd from fastdeploy.text import UIEModel, SchemaLanguage -import pynvml -import psutil -import GPUtil -import multiprocessing def parse_arguments(): @@ -44,23 +40,23 @@ def parse_arguments(): type=int, default=128, help="The max length of sequence.") - parser.add_argument( - "--log_interval", - type=int, - default=10, - help="The interval of logging.") parser.add_argument( "--cpu_num_threads", type=int, - default=1, + default=8, help="The number of threads when inferring on cpu.") parser.add_argument( - "--use_fp16", + "--enable_trt_fp16", type=distutils.util.strtobool, default=False, - help="Use FP16 mode") + help="whether enable fp16 in trt backend") parser.add_argument( "--epoch", type=int, default=1, help="The epoch of test") + parser.add_argument( + "--enable_collect_memory_info", + type=ast.literal_eval, + default=False, + help="whether enable collect memory info") return parser.parse_args() @@ -103,37 +99,116 @@ def build_option(args): min_shape=[1, 1], opt_shape=[args.batch_size, args.max_length // 2], max_shape=[args.batch_size, args.max_length]) - if args.use_fp16: + if args.enable_trt_fp16: option.enable_trt_fp16() trt_file = trt_file + ".fp16" option.set_trt_cache_file(trt_file) return option -def get_current_memory_mb(gpu_id=None): - pid = os.getpid() - p = psutil.Process(pid) - info = p.memory_full_info() - cpu_mem = info.uss / 1024. / 1024. - gpu_mem = 0 - if gpu_id is not None: - pynvml.nvmlInit() - handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id) - meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) - gpu_mem = meminfo.used / 1024. / 1024. - return cpu_mem, gpu_mem +class StatBase(object): + """StatBase""" + nvidia_smi_path = "nvidia-smi" + gpu_keys = ('index', 'uuid', 'name', 'timestamp', 'memory.total', + 'memory.free', 'memory.used', 'utilization.gpu', + 'utilization.memory') + nu_opt = ',nounits' + cpu_keys = ('cpu.util', 'memory.util', 'memory.used') -def get_current_gputil(gpu_id): - GPUs = GPUtil.getGPUs() - gpu_load = GPUs[gpu_id].load - return gpu_load +class Monitor(StatBase): + """Monitor""" + def __init__(self, use_gpu=False, gpu_id=0, interval=0.1): + self.result = {} + self.gpu_id = gpu_id + self.use_gpu = use_gpu + self.interval = interval + self.cpu_stat_q = multiprocessing.Queue() -def sample_gpuutil(gpu_id, gpu_utilization=[]): - while True: - gpu_utilization.append(get_current_gputil(gpu_id)) - time.sleep(0.01) + def start(self): + cmd = '%s --id=%s --query-gpu=%s --format=csv,noheader%s -lms 50' % ( + StatBase.nvidia_smi_path, self.gpu_id, ','.join(StatBase.gpu_keys), + StatBase.nu_opt) + if self.use_gpu: + self.gpu_stat_worker = subprocess.Popen( + cmd, + stderr=subprocess.STDOUT, + stdout=subprocess.PIPE, + shell=True, + close_fds=True, + preexec_fn=os.setsid) + # cpu stat + pid = os.getpid() + self.cpu_stat_worker = multiprocessing.Process( + target=self.cpu_stat_func, + args=(self.cpu_stat_q, pid, self.interval)) + self.cpu_stat_worker.start() + + def stop(self): + try: + if self.use_gpu: + os.killpg(self.gpu_stat_worker.pid, signal.SIGUSR1) + # os.killpg(p.pid, signal.SIGTERM) + self.cpu_stat_worker.terminate() + self.cpu_stat_worker.join(timeout=0.01) + except Exception as e: + print(e) + return + + # gpu + if self.use_gpu: + lines = self.gpu_stat_worker.stdout.readlines() + lines = [ + line.strip().decode("utf-8") for line in lines + if line.strip() != '' + ] + gpu_info_list = [{ + k: v + for k, v in zip(StatBase.gpu_keys, line.split(', ')) + } for line in lines] + if len(gpu_info_list) == 0: + return + result = gpu_info_list[0] + for item in gpu_info_list: + for k in item.keys(): + if k not in ["name", "uuid", "timestamp"]: + result[k] = max(int(result[k]), int(item[k])) + else: + result[k] = max(result[k], item[k]) + self.result['gpu'] = result + + # cpu + cpu_result = {} + if self.cpu_stat_q.qsize() > 0: + cpu_result = { + k: v + for k, v in zip(StatBase.cpu_keys, self.cpu_stat_q.get()) + } + while not self.cpu_stat_q.empty(): + item = { + k: v + for k, v in zip(StatBase.cpu_keys, self.cpu_stat_q.get()) + } + for k in StatBase.cpu_keys: + cpu_result[k] = max(cpu_result[k], item[k]) + cpu_result['name'] = cpuinfo.get_cpu_info()['brand_raw'] + self.result['cpu'] = cpu_result + + def output(self): + return self.result + + def cpu_stat_func(self, q, pid, interval=0.0): + """cpu stat function""" + stat_info = psutil.Process(pid) + while True: + # pid = os.getpid() + cpu_util, mem_util, mem_use = stat_info.cpu_percent( + ), stat_info.memory_percent(), round(stat_info.memory_info().rss / + 1024.0 / 1024.0, 4) + q.put([cpu_util, mem_util, mem_use]) + time.sleep(interval) + return def get_dataset(data_path, max_seq_len=512): @@ -154,32 +229,6 @@ def get_dataset(data_path, max_seq_len=512): return json_lines -def run_inference(ds, uie, epoch=1, warmup_steps=10): - for j, sample in enumerate(ds): - if j > warmup_steps: - break - uie.set_schema([sample['prompt']]) - result = uie.predict([sample['content']]) - print(f"Run {warmup_steps} steps to warm up") - start = time.time() - for ep in range(epoch): - curr_start = time.time() - for i, sample in enumerate(ds): - uie.set_schema([sample['prompt']]) - result = uie.predict([sample['content']]) - print( - f"Epoch {ep} average time = {(time.time() - curr_start) * 1000.0 / (len(ds)):.4f} ms" - ) - end = time.time() - runtime_statis = uie.print_statis_info_of_runtime() - print(f"Final:") - print(runtime_statis) - print( - f"Total average time = {(end - start) * 1000.0 / (len(ds) * epoch):.4f} ms" - ) - print() - - if __name__ == '__main__': args = parse_arguments() runtime_option = build_option(args) @@ -187,6 +236,25 @@ if __name__ == '__main__': param_path = os.path.join(args.model_dir, "inference.pdiparams") vocab_path = os.path.join(args.model_dir, "vocab.txt") + gpu_id = args.device_id + enable_collect_memory_info = args.enable_collect_memory_info + dump_result = dict() + end2end_statis = list() + cpu_mem = list() + gpu_mem = list() + gpu_util = list() + if args.device == "cpu": + file_path = args.model_dir + "_model_" + args.backend + "_" + \ + args.device + "_" + str(args.cpu_num_thread) + ".txt" + else: + if args.enable_trt_fp16: + file_path = args.model_dir + "_model_" + \ + args.backend + "_fp16_" + args.device + ".txt" + else: + file_path = args.model_dir + "_model_" + args.backend + "_" + args.device + ".txt" + f = open(file_path, "w") + f.writelines("===={}====: \n".format(os.path.split(file_path)[-1][:-4])) + ds = get_dataset(args.data_path) schema = ["时间"] uie = UIEModel( @@ -195,9 +263,59 @@ if __name__ == '__main__': vocab_path, position_prob=0.5, max_length=args.max_length, + batch_size=args.batch_size, schema=schema, runtime_option=runtime_option, schema_language=SchemaLanguage.ZH) - uie.enable_record_time_of_runtime() - run_inference(ds, uie, args.epoch) + try: + if enable_collect_memory_info: + import multiprocessing + import subprocess + import psutil + import signal + import cpuinfo + enable_gpu = args.device == "gpu" + monitor = Monitor(enable_gpu, gpu_id) + monitor.start() + uie.enable_record_time_of_runtime() + + for ep in range(args.epoch): + for i, sample in enumerate(ds): + curr_start = time.time() + uie.set_schema([sample['prompt']]) + result = uie.predict([sample['content']]) + end2end_statis.append(time.time() - curr_start) + runtime_statis = uie.print_statis_info_of_runtime() + + warmup_iter = args.epoch * len(ds) // 5 + + end2end_statis_repeat = end2end_statis[warmup_iter:] + if enable_collect_memory_info: + monitor.stop() + mem_info = monitor.output() + dump_result["cpu_rss_mb"] = mem_info['cpu'][ + 'memory.used'] if 'cpu' in mem_info else 0 + dump_result["gpu_rss_mb"] = mem_info['gpu'][ + 'memory.used'] if 'gpu' in mem_info else 0 + dump_result["gpu_util"] = mem_info['gpu'][ + 'utilization.gpu'] if 'gpu' in mem_info else 0 + + dump_result["runtime"] = runtime_statis["avg_time"] * 1000 + dump_result["end2end"] = np.mean(end2end_statis_repeat) * 1000 + + time_cost_str = f"Runtime(ms): {dump_result['runtime']}\n" \ + f"End2End(ms): {dump_result['end2end']}\n" + f.writelines(time_cost_str) + print(time_cost_str) + + if enable_collect_memory_info: + mem_info_str = f"cpu_rss_mb: {dump_result['cpu_rss_mb']}\n" \ + f"gpu_rss_mb: {dump_result['gpu_rss_mb']}\n" \ + f"gpu_util: {dump_result['gpu_util']}\n" + f.writelines(mem_info_str) + print(mem_info_str) + except: + f.writelines("!!!!!Infer Failed\n") + + f.close() diff --git a/benchmark/run_benchmark_uie.sh b/benchmark/run_benchmark_uie.sh index 58030d5f6..5ba9e88db 100644 --- a/benchmark/run_benchmark_uie.sh +++ b/benchmark/run_benchmark_uie.sh @@ -1,22 +1,27 @@ # wget https://bj.bcebos.com/fastdeploy/benchmark/uie/reimbursement_form_data.txt # wget https://bj.bcebos.com/fastdeploy/models/uie/uie-base.tgz # tar xvfz uie-base.tgz + +DEVICE_ID=0 + +echo "[FastDeploy] Running UIE benchmark..." + # GPU echo "-------------------------------GPU Benchmark---------------------------------------" -python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device gpu -python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device gpu -python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp-trt --device gpu --use_fp16 False -python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend trt --device gpu --use_fp16 False -python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp-trt --device gpu --use_fp16 True -python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend trt --device gpu --use_fp16 True +python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device_id $DEVICE_ID --device gpu --enable_collect_memory_info True +python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device_id $DEVICE_ID --device gpu --enable_collect_memory_info True +python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp-trt --device_id $DEVICE_ID --device gpu --enable_trt_fp16 False --enable_collect_memory_info True +python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend trt --device_id $DEVICE_ID --device gpu --enable_trt_fp16 False --enable_collect_memory_info True +python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp-trt --device_id $DEVICE_ID --device gpu --enable_trt_fp16 True --enable_collect_memory_info True +python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend trt --device_id $DEVICE_ID --device gpu --enable_trt_fp16 True --enable_collect_memory_info True echo "-----------------------------------------------------------------------------------" # CPU echo "-------------------------------CPU Benchmark---------------------------------------" for cpu_num_threads in 1 2 4 8 16; do - python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device cpu --cpu_num_threads ${cpu_num_threads} - python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device cpu --cpu_num_threads ${cpu_num_threads} - python benchmark_uie.py --log_interval 100 --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend openvino --device cpu --cpu_num_threads ${cpu_num_threads} + python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device cpu --cpu_num_threads ${cpu_num_threads} --enable_collect_memory_info True + python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device cpu --cpu_num_threads ${cpu_num_threads} --enable_collect_memory_info True + python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend openvino --device cpu --cpu_num_threads ${cpu_num_threads} --enable_collect_memory_info True done echo "-----------------------------------------------------------------------------------" From 34aebb1de34c692df26c76617f066c4196fb2c03 Mon Sep 17 00:00:00 2001 From: zhoushunjie Date: Thu, 29 Dec 2022 10:53:25 +0000 Subject: [PATCH 5/6] Fix cpu_num_thread->cpu_num_threads --- benchmark/benchmark_uie.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/benchmark_uie.py b/benchmark/benchmark_uie.py index 40c3ac4c4..18e7566fe 100644 --- a/benchmark/benchmark_uie.py +++ b/benchmark/benchmark_uie.py @@ -245,7 +245,7 @@ if __name__ == '__main__': gpu_util = list() if args.device == "cpu": file_path = args.model_dir + "_model_" + args.backend + "_" + \ - args.device + "_" + str(args.cpu_num_thread) + ".txt" + args.device + "_" + str(args.cpu_num_threads) + ".txt" else: if args.enable_trt_fp16: file_path = args.model_dir + "_model_" + \ From cefdadf5e2141c11247b089d04f544efbd039d06 Mon Sep 17 00:00:00 2001 From: zhoushunjie Date: Thu, 29 Dec 2022 11:09:26 +0000 Subject: [PATCH 6/6] Update backend name --- benchmark/benchmark_uie.py | 10 +++++----- benchmark/run_benchmark_uie.sh | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/benchmark/benchmark_uie.py b/benchmark/benchmark_uie.py index 18e7566fe..44c562d7e 100644 --- a/benchmark/benchmark_uie.py +++ b/benchmark/benchmark_uie.py @@ -28,8 +28,8 @@ def parse_arguments(): parser.add_argument( "--backend", type=str, - default='pp', - choices=['ort', 'pp', 'trt', 'pp-trt', 'openvino'], + default='paddle', + choices=['ort', 'paddle', 'trt', 'paddle_trt', 'ov'], help="The inference runtime backend.") parser.add_argument( "--device_id", type=int, default=0, help="device(gpu) id") @@ -67,15 +67,15 @@ def build_option(args): option.set_cpu_thread_num(args.cpu_num_threads) else: option.use_gpu(args.device_id) - if args.backend == 'pp': + if args.backend == 'paddle': option.use_paddle_backend() elif args.backend == 'ort': option.use_ort_backend() - elif args.backend == 'openvino': + elif args.backend == 'ov': option.use_openvino_backend() else: option.use_trt_backend() - if args.backend == 'pp-trt': + if args.backend == 'paddle_trt': option.enable_paddle_to_trt() option.enable_paddle_trt_collect_shape() trt_file = os.path.join(args.model_dir, "infer.trt") diff --git a/benchmark/run_benchmark_uie.sh b/benchmark/run_benchmark_uie.sh index 5ba9e88db..51eb5d973 100644 --- a/benchmark/run_benchmark_uie.sh +++ b/benchmark/run_benchmark_uie.sh @@ -8,20 +8,20 @@ echo "[FastDeploy] Running UIE benchmark..." # GPU echo "-------------------------------GPU Benchmark---------------------------------------" -python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device_id $DEVICE_ID --device gpu --enable_collect_memory_info True +python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend paddle --device_id $DEVICE_ID --device gpu --enable_collect_memory_info True python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device_id $DEVICE_ID --device gpu --enable_collect_memory_info True -python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp-trt --device_id $DEVICE_ID --device gpu --enable_trt_fp16 False --enable_collect_memory_info True +python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend paddle_trt --device_id $DEVICE_ID --device gpu --enable_trt_fp16 False --enable_collect_memory_info True python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend trt --device_id $DEVICE_ID --device gpu --enable_trt_fp16 False --enable_collect_memory_info True -python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp-trt --device_id $DEVICE_ID --device gpu --enable_trt_fp16 True --enable_collect_memory_info True +python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend paddle_trt --device_id $DEVICE_ID --device gpu --enable_trt_fp16 True --enable_collect_memory_info True python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend trt --device_id $DEVICE_ID --device gpu --enable_trt_fp16 True --enable_collect_memory_info True echo "-----------------------------------------------------------------------------------" # CPU echo "-------------------------------CPU Benchmark---------------------------------------" -for cpu_num_threads in 1 2 4 8 16; +for cpu_num_threads in 1 8; do - python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend pp --device cpu --cpu_num_threads ${cpu_num_threads} --enable_collect_memory_info True + python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend paddle --device cpu --cpu_num_threads ${cpu_num_threads} --enable_collect_memory_info True python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend ort --device cpu --cpu_num_threads ${cpu_num_threads} --enable_collect_memory_info True - python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend openvino --device cpu --cpu_num_threads ${cpu_num_threads} --enable_collect_memory_info True + python benchmark_uie.py --epoch 5 --model_dir uie-base --data_path reimbursement_form_data.txt --backend ov --device cpu --cpu_num_threads ${cpu_num_threads} --enable_collect_memory_info True done echo "-----------------------------------------------------------------------------------"