Update evaluation function to support calculate average inference time (#106)

* Update README.md

* Update README.md

* Update README.md

* Create README.md

* Update README.md

* Update README.md

* Update README.md

* Update README.md

* Add evaluation calculate time and fix some bugs

* Update classification __init__

* Move to ppseg

Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
huangjianhui
2022-08-12 17:42:09 +08:00
committed by GitHub
parent 724d3dfc85
commit 32047016d6
12 changed files with 124 additions and 62 deletions

View File

@@ -23,14 +23,14 @@ class PaddleClasModel(FastDeployModel):
model_file,
params_file,
config_file,
backend_option=None,
runtime_option=None,
model_format=Frontend.PADDLE):
super(PaddleClasModel, self).__init__(backend_option)
super(PaddleClasModel, self).__init__(runtime_option)
assert model_format == Frontend.PADDLE, "PaddleClasModel only support model format of Frontend.Paddle now."
self._model = C.vision.classification.PaddleClasModel(model_file, params_file,
config_file, self._runtime_option,
model_format)
self._model = C.vision.classification.PaddleClasModel(
model_file, params_file, config_file, self._runtime_option,
model_format)
assert self.initialized, "PaddleClas model initialize failed."
def predict(self, input_image, topk=1):

View File

@@ -14,6 +14,8 @@
import numpy as np
import os
import re
import time
import collections
def topk_accuracy(topk_list, label_list):
@@ -25,6 +27,7 @@ def topk_accuracy(topk_list, label_list):
def eval_classify(model, image_file_path, label_file_path, topk=5):
from tqdm import trange
import cv2
import math
result_list = []
label_list = []
@@ -36,6 +39,7 @@ def eval_classify(model, image_file_path, label_file_path, topk=5):
label_file_path), "The label_file_path:{} is not a file.".format(
label_file_path)
assert isinstance(topk, int), "The tok:{} is not int type".format(topk)
with open(label_file_path, 'r') as file:
lines = file.readlines()
for line in lines:
@@ -44,14 +48,30 @@ def eval_classify(model, image_file_path, label_file_path, topk=5):
label = items[1]
image_label_dict[image_name] = int(label)
images_num = len(image_label_dict)
twenty_percent_images_num = math.ceil(images_num * 0.2)
start_time = 0
end_time = 0
average_inference_time = 0
scores = collections.OrderedDict()
for (image, label), i in zip(image_label_dict.items(),
trange(
images_num, desc='Inference Progress')):
if i == twenty_percent_images_num:
start_time = time.time()
label_list.append([label])
image_path = os.path.join(image_file_path, image)
im = cv2.imread(image_path)
result = model.predict(im, topk)
result_list.append(result.label_ids)
if i == images_num - 1:
end_time = time.time()
average_inference_time = round(
(end_time - start_time) / (images_num - twenty_percent_images_num), 4)
topk_acc_score = topk_accuracy(np.array(result_list), np.array(label_list))
return topk_acc_score
if topk == 1:
scores.update({'topk1': topk_acc_score})
elif topk == 5:
scores.update({'topk5': topk_acc_score})
scores.update({'average_inference_time': average_inference_time})
return scores

View File

@@ -15,6 +15,7 @@
import numpy as np
import copy
import collections
import math
def eval_detection(model,
@@ -26,7 +27,7 @@ def eval_detection(model,
from .utils import CocoDetection
from .utils import COCOMetric
import cv2
from tqdm import trange
from tqdm import trange
if conf_threshold is not None or nms_iou_threshold is not None:
assert conf_threshold is not None and nms_iou_threshold is not None, "The conf_threshold and nms_iou_threshold should be setted at the same time"
@@ -48,9 +49,15 @@ def eval_detection(model,
eval_metric = COCOMetric(
coco_gt=copy.deepcopy(eval_dataset.coco_gt), classwise=False)
scores = collections.OrderedDict()
twenty_percent_image_num = math.ceil(image_num * 0.2)
start_time = 0
end_time = 0
average_inference_time = 0
for image_info, i in zip(all_image_info,
trange(
image_num, desc="Inference Progress")):
if i == twenty_percent_image_num:
start_time = time.time()
im = cv2.imread(image_info["image"])
im_id = image_info["im_id"]
if conf_threshold is None and nms_iou_threshold is None:
@@ -66,8 +73,13 @@ def eval_detection(model,
'im_id': im_id
}
eval_metric.update(im_id, pred)
if i == image_num - 1:
end_time = time.time()
average_inference_time = round(
(end_time - start_time) / (image_num - twenty_percent_image_num), 4)
eval_metric.accumulate()
eval_details = eval_metric.details
scores.update(eval_metric.get())
scores.update({'average_inference_time': average_inference_time})
eval_metric.reset()
return scores