mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
219 lines
8.3 KiB
Python
219 lines
8.3 KiB
Python
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
|
|
import sys
|
|
import numpy as np
|
|
from .map_utils import draw_pr_curve
|
|
from .json_results import get_det_res, get_det_poly_res, get_seg_res, get_solov2_segm_res
|
|
from . import fd_logging as logging
|
|
import copy
|
|
|
|
|
|
def loadRes(coco_obj, anns):
|
|
"""
|
|
Load result file and return a result api object.
|
|
:param resFile (str) : file name of result file
|
|
:return: res (obj) : result api object
|
|
"""
|
|
|
|
# This function has the same functionality as pycocotools.COCO.loadRes,
|
|
# except that the input anns is list of results rather than a json file.
|
|
# Refer to
|
|
# https://github.com/cocodataset/cocoapi/blob/8c9bcc3cf640524c4c20a9c40e89cb6a2f2fa0e9/PythonAPI/pycocotools/coco.py#L305,
|
|
|
|
# matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
|
|
# or matplotlib.backends is imported for the first time
|
|
# pycocotools import matplotlib
|
|
import matplotlib
|
|
matplotlib.use('Agg')
|
|
from pycocotools.coco import COCO
|
|
import pycocotools.mask as maskUtils
|
|
import time
|
|
res = COCO()
|
|
res.dataset['images'] = [img for img in coco_obj.dataset['images']]
|
|
|
|
tic = time.time()
|
|
assert type(anns) == list, 'results in not an array of objects'
|
|
annsImgIds = [ann['image_id'] for ann in anns]
|
|
assert set(annsImgIds) == (set(annsImgIds) & set(coco_obj.getImgIds())), \
|
|
'Results do not correspond to current coco set'
|
|
if 'caption' in anns[0]:
|
|
imgIds = set([img['id'] for img in res.dataset['images']]) & set(
|
|
[ann['image_id'] for ann in anns])
|
|
res.dataset['images'] = [
|
|
img for img in res.dataset['images'] if img['id'] in imgIds
|
|
]
|
|
for id, ann in enumerate(anns):
|
|
ann['id'] = id + 1
|
|
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
|
|
res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
|
|
'categories'])
|
|
for id, ann in enumerate(anns):
|
|
bb = ann['bbox']
|
|
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
|
|
if not 'segmentation' in ann:
|
|
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
|
|
ann['area'] = bb[2] * bb[3]
|
|
ann['id'] = id + 1
|
|
ann['iscrowd'] = 0
|
|
elif 'segmentation' in anns[0]:
|
|
res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
|
|
'categories'])
|
|
for id, ann in enumerate(anns):
|
|
# now only support compressed RLE format as segmentation results
|
|
ann['area'] = maskUtils.area(ann['segmentation'])
|
|
if not 'bbox' in ann:
|
|
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
|
|
ann['id'] = id + 1
|
|
ann['iscrowd'] = 0
|
|
elif 'keypoints' in anns[0]:
|
|
res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
|
|
'categories'])
|
|
for id, ann in enumerate(anns):
|
|
s = ann['keypoints']
|
|
x = s[0::3]
|
|
y = s[1::3]
|
|
x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
|
|
ann['area'] = (x1 - x0) * (y1 - y0)
|
|
ann['id'] = id + 1
|
|
ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
|
|
|
|
res.dataset['annotations'] = anns
|
|
res.createIndex()
|
|
return res
|
|
|
|
|
|
def get_infer_results(outs, catid, bias=0):
|
|
"""
|
|
Get result at the stage of inference.
|
|
The output format is dictionary containing bbox or mask result.
|
|
|
|
For example, bbox result is a list and each element contains
|
|
image_id, category_id, bbox and score.
|
|
"""
|
|
if outs is None or len(outs) == 0:
|
|
raise ValueError(
|
|
'The number of valid detection result if zero. Please use reasonable model and check input data.'
|
|
)
|
|
|
|
im_id = outs['im_id']
|
|
|
|
infer_res = {}
|
|
if 'bbox' in outs:
|
|
if len(outs['bbox']) > 0 and len(outs['bbox'][0]) > 6:
|
|
infer_res['bbox'] = get_det_poly_res(
|
|
outs['bbox'], outs['bbox_num'], im_id, catid, bias=bias)
|
|
else:
|
|
infer_res['bbox'] = get_det_res(
|
|
outs['bbox'], outs['bbox_num'], im_id, catid, bias=bias)
|
|
|
|
if 'mask' in outs:
|
|
# mask post process
|
|
infer_res['mask'] = get_seg_res(outs['mask'], outs['bbox'],
|
|
outs['bbox_num'], im_id, catid)
|
|
|
|
if 'segm' in outs:
|
|
infer_res['segm'] = get_solov2_segm_res(outs, im_id, catid)
|
|
|
|
return infer_res
|
|
|
|
|
|
def cocoapi_eval(anns,
|
|
style,
|
|
coco_gt=None,
|
|
anno_file=None,
|
|
max_dets=(100, 300, 1000),
|
|
classwise=False):
|
|
"""
|
|
Args:
|
|
anns: Evaluation result.
|
|
style (str): COCOeval style, can be `bbox` , `segm` and `proposal`.
|
|
coco_gt (str): Whether to load COCOAPI through anno_file,
|
|
eg: coco_gt = COCO(anno_file)
|
|
anno_file (str): COCO annotations file.
|
|
max_dets (tuple): COCO evaluation maxDets.
|
|
classwise (bool): Whether per-category AP and draw P-R Curve or not.
|
|
"""
|
|
assert coco_gt is not None or anno_file is not None
|
|
from pycocotools.coco import COCO
|
|
from pycocotools.cocoeval import COCOeval
|
|
|
|
if coco_gt is None:
|
|
coco_gt = COCO(anno_file)
|
|
logging.info("Start evaluate...")
|
|
coco_dt = loadRes(coco_gt, anns)
|
|
if style == 'proposal':
|
|
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
|
|
coco_eval.params.useCats = 0
|
|
coco_eval.params.maxDets = list(max_dets)
|
|
else:
|
|
coco_eval = COCOeval(coco_gt, coco_dt, style)
|
|
coco_eval.evaluate()
|
|
coco_eval.accumulate()
|
|
coco_eval.summarize()
|
|
if classwise:
|
|
# Compute per-category AP and PR curve
|
|
try:
|
|
from terminaltables import AsciiTable
|
|
except Exception as e:
|
|
logging.error(
|
|
'terminaltables not found, plaese install terminaltables. '
|
|
'for example: `pip install terminaltables`.')
|
|
raise e
|
|
precisions = coco_eval.eval['precision']
|
|
cat_ids = coco_gt.getCatIds()
|
|
# precision: (iou, recall, cls, area range, max dets)
|
|
assert len(cat_ids) == precisions.shape[2]
|
|
results_per_category = []
|
|
for idx, catId in enumerate(cat_ids):
|
|
# area range index 0: all area ranges
|
|
# max dets index -1: typically 100 per image
|
|
nm = coco_gt.loadCats(catId)[0]
|
|
precision = precisions[:, :, idx, 0, -1]
|
|
precision = precision[precision > -1]
|
|
if precision.size:
|
|
ap = np.mean(precision)
|
|
else:
|
|
ap = float('nan')
|
|
results_per_category.append(
|
|
(str(nm["name"]), '{:0.3f}'.format(float(ap))))
|
|
pr_array = precisions[0, :, idx, 0, 2]
|
|
recall_array = np.arange(0.0, 1.01, 0.01)
|
|
draw_pr_curve(
|
|
pr_array,
|
|
recall_array,
|
|
out_dir=style + '_pr_curve',
|
|
file_name='{}_precision_recall_curve.jpg'.format(nm["name"]))
|
|
|
|
num_columns = min(6, len(results_per_category) * 2)
|
|
|
|
import itertools
|
|
results_flatten = list(itertools.chain(*results_per_category))
|
|
headers = ['category', 'AP'] * (num_columns // 2)
|
|
results_2d = itertools.zip_longest(
|
|
* [results_flatten[i::num_columns] for i in range(num_columns)])
|
|
table_data = [headers]
|
|
table_data += [result for result in results_2d]
|
|
table = AsciiTable(table_data)
|
|
logging.info('Per-category of {} AP: \n{}'.format(style, table.table))
|
|
logging.info("per-category PR curve has output to {} folder.".format(
|
|
style + '_pr_curve'))
|
|
# flush coco evaluation result
|
|
sys.stdout.flush()
|
|
return coco_eval.stats
|