Add PPSeg evaluation function (#137)

* Add PPSeg evaluation function

* Add average_inference_time function
This commit is contained in:
huangjianhui
2022-08-22 10:53:54 +08:00
committed by GitHub
parent beddcba900
commit e35d6dcfd4
5 changed files with 315 additions and 1 deletions

View File

@@ -0,0 +1,95 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tqdm import trange
import numpy as np
import collections
import os
import math
import time
def eval_segmentation(model, data_dir):
import cv2
from utils import Cityscapes
from utils import f1_score, calculate_area, mean_iou, accuracy, kappa
assert os.path.isdir(
data_dir), "The image_file_path:{} is not a directory.".format(
data_dir)
eval_dataset = Cityscapes(dataset_root=data_dir, mode="val")
file_list = eval_dataset.file_list
image_num = eval_dataset.num_samples
num_classes = eval_dataset.num_classes
intersect_area_all = 0
pred_area_all = 0
label_area_all = 0
conf_mat_all = []
twenty_percent_image_num = math.ceil(image_num * 0.2)
start_time = 0
end_time = 0
average_inference_time = 0
for image_label_path, i in zip(file_list,
trange(
image_num, desc="Inference Progress")):
if i == twenty_percent_image_num:
start_time = time.time()
im = cv2.imread(image_label_path[0])
label = cv2.imread(image_label_path[1], cv2.IMREAD_GRAYSCALE)
result = model.predict(im)
if i == image_num - 1:
end_time = time.time()
average_inference_time = round(
(end_time - start_time) / (image_num - twenty_percent_image_num),
4)
pred = np.array(result.label_map).reshape(result.shape[0],
result.shape[1])
intersect_area, pred_area, label_area = calculate_area(pred, label,
num_classes)
intersect_area_all = intersect_area_all + intersect_area
pred_area_all = pred_area_all + pred_area
label_area_all = label_area_all + label_area
class_iou, miou = mean_iou(intersect_area_all, pred_area_all,
label_area_all)
class_acc, oacc = accuracy(intersect_area_all, pred_area_all)
kappa_res = kappa(intersect_area_all, pred_area_all, label_area_all)
category_f1score = f1_score(intersect_area_all, pred_area_all,
label_area_all)
eval_metrics = collections.OrderedDict(
zip([
'miou', 'category_iou', 'oacc', 'category_acc', 'kappa',
'category_F1-score', 'average_inference_time(s)'
], [
miou, class_iou, oacc, class_acc, kappa_res, category_f1score,
average_inference_time
]))
return eval_metrics
import fastdeploy as fd
#model = fd.vision.segmentation.PaddleSegModel("/huangjianhui/temp/FastDeploy/model_zoo/vision/ppseg/unet/unet_Cityscapes/model.pdmodel",
# "/huangjianhui/temp/FastDeploy/model_zoo/vision/ppseg/unet/unet_Cityscapes//model.pdiparams",
# "/huangjianhui/temp/FastDeploy/model_zoo/vision/ppseg/unet/unet_Cityscapes/deploy.yaml")
#
option = fd.RuntimeOption()
option.use_paddle_backend()
option.use_gpu(3)
model = fd.vision.segmentation.PaddleSegModel(
"/huangjianhui/temp/FastDeploy/model_zoo/vision/ppseg/PP-LiteSeg/output_no_static_size/model.pdmodel",
"/huangjianhui/temp/FastDeploy/model_zoo/vision/ppseg/PP-LiteSeg/output_no_static_size/model.pdiparams",
"/huangjianhui/temp/FastDeploy/model_zoo/vision/ppseg/PP-LiteSeg/output_no_static_size/deploy.yaml",
option)
result = eval_segmentation(model, "/huangjianhui/PaddleSeg/data/cityscapes/")

View File

@@ -14,8 +14,10 @@
from . import fd_logging from . import fd_logging
from .util import * from .util import *
from .metrics import * from .coco_metrics import *
from .seg_metrics import *
from .json_results import * from .json_results import *
from .map_utils import * from .map_utils import *
from .coco_utils import * from .coco_utils import *
from .coco import * from .coco import *
from .cityscapes import *

View File

@@ -0,0 +1,74 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from . import fd_logging as logging
#import fd_logging as logging
class Cityscapes(object):
"""
Cityscapes dataset `https://www.cityscapes-dataset.com/`.
The folder structure is as follow:
cityscapes
|
|--leftImg8bit
| |--train
| |--val
| |--test
|
|--gtFine
| |--train
| |--val
| |--test
Args:
dataset_root (str): Cityscapes dataset directory.
"""
NUM_CLASSES = 19
def __init__(self, dataset_root, mode):
self.dataset_root = dataset_root
self.file_list = list()
mode = mode.lower()
self.mode = mode
self.num_classes = self.NUM_CLASSES
self.ignore_index = 255
img_dir = os.path.join(self.dataset_root, 'leftImg8bit')
label_dir = os.path.join(self.dataset_root, 'gtFine')
if self.dataset_root is None or not os.path.isdir(
self.dataset_root) or not os.path.isdir(
img_dir) or not os.path.isdir(label_dir):
raise ValueError(
"The dataset is not Found or the folder structure is nonconfoumance."
)
label_files = sorted(
glob.glob(
os.path.join(label_dir, mode, '*',
'*_gtFine_labelTrainIds.png')))
img_files = sorted(
glob.glob(os.path.join(img_dir, mode, '*', '*_leftImg8bit.png')))
self.file_list = [
[img_path, label_path]
for img_path, label_path in zip(img_files, label_files)
]
self.num_samples = len(self.file_list)
logging.info("{} samples in file {}".format(self.num_samples, img_dir))

View File

@@ -0,0 +1,143 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def f1_score(intersect_area, pred_area, label_area):
class_f1_sco = []
for i in range(len(intersect_area)):
if pred_area[i] + label_area[i] == 0:
f1_sco = 0
elif pred_area[i] == 0:
f1_sco = 0
else:
prec = intersect_area[i] / pred_area[i]
rec = intersect_area[i] / label_area[i]
f1_sco = 2 * prec * rec / (prec + rec)
class_f1_sco.append(f1_sco)
return np.array(class_f1_sco)
def calculate_area(pred, label, num_classes, ignore_index=255):
"""
Calculate intersect, prediction and label area
Args:
pred (np.ndarray): The prediction by model.
label (np.ndarray): The ground truth of image.
num_classes (int): The unique number of target classes.
ignore_index (int): Specifies a target value that is ignored. Default: 255.
Returns:
Numpy Array: The intersection area of prediction and the ground on all class.
Numpy Array: The prediction area on all class.
Numpy Array: The ground truth area on all class
"""
if not pred.shape == label.shape:
raise ValueError('Shape of `pred` and `label should be equal, '
'but there are {} and {}.'.format(pred.shape,
label.shape))
mask = label != ignore_index
pred = pred + 1
label = label + 1
pred = pred * mask
label = label * mask
pred = np.eye(num_classes + 1)[pred]
label = np.eye(num_classes + 1)[label]
pred = pred[:, 1:]
label = label[:, 1:]
pred_area = []
label_area = []
intersect_area = []
for i in range(num_classes):
pred_i = pred[:, :, i]
label_i = label[:, :, i]
pred_area_i = np.sum(pred_i)
label_area_i = np.sum(label_i)
intersect_area_i = np.sum(pred_i * label_i)
pred_area.append(pred_area_i)
label_area.append(label_area_i)
intersect_area.append(intersect_area_i)
return np.array(intersect_area), np.array(pred_area), np.array(label_area)
def mean_iou(intersect_area, pred_area, label_area):
"""
Calculate iou.
Args:
intersect_area (np.ndarray): The intersection area of prediction and ground truth on all classes.
pred_area (np.ndarray): The prediction area on all classes.
label_area (np.ndarray): The ground truth area on all classes.
Returns:
np.ndarray: iou on all classes.
float: mean iou of all classes.
"""
union = pred_area + label_area - intersect_area
class_iou = []
for i in range(len(intersect_area)):
if union[i] == 0:
iou = 0
else:
iou = intersect_area[i] / union[i]
class_iou.append(iou)
miou = np.mean(class_iou)
return np.array(class_iou), miou
def accuracy(intersect_area, pred_area):
"""
Calculate accuracy
Args:
intersect_area (np.ndarray): The intersection area of prediction and ground truth on all classes..
pred_area (np.ndarray): The prediction area on all classes.
Returns:
np.ndarray: accuracy on all classes.
float: mean accuracy.
"""
class_acc = []
for i in range(len(intersect_area)):
if pred_area[i] == 0:
acc = 0
else:
acc = intersect_area[i] / pred_area[i]
class_acc.append(acc)
macc = np.sum(intersect_area) / np.sum(pred_area)
return np.array(class_acc), macc
def kappa(intersect_area, pred_area, label_area):
"""
Calculate kappa coefficient
Args:
intersect_area (np.ndarray): The intersection area of prediction and ground truth on all classes..
pred_area (np.ndarray): The prediction area on all classes.
label_area (np.ndarray): The ground truth area on all classes.
Returns:
float: kappa coefficient.
"""
total_area = np.sum(label_area)
po = np.sum(intersect_area) / total_area
pe = np.sum(pred_area * label_area) / (total_area * total_area)
kappa = (po - pe) / (1 - pe)
return kappa