mirror of
https://github.com/we0091234/yolov7_plate.git
synced 2025-09-26 21:01:13 +08:00
135 lines
6.6 KiB
Python
135 lines
6.6 KiB
Python
import argparse
|
|
import time
|
|
from pathlib import Path
|
|
|
|
import os
|
|
import copy
|
|
import cv2
|
|
import torch
|
|
import torch.backends.cudnn as cudnn
|
|
from numpy import random
|
|
from tqdm import tqdm
|
|
from models.experimental import attempt_load
|
|
from utils.datasets import LoadStreams, LoadImages, letterbox
|
|
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
|
|
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box
|
|
from utils.plots import colors, plot_one_box
|
|
from utils.torch_utils import select_device, load_classifier, time_synchronized
|
|
import numpy as np
|
|
|
|
def detect(opt):
|
|
weights, imgsz, kpt_label = opt.weights, opt.img_size, opt.kpt_label
|
|
|
|
# Initialize
|
|
set_logging()
|
|
device = select_device(opt.device)
|
|
half = device.type != 'cpu' # half precision only supported on CUDA
|
|
|
|
# Load model
|
|
model = attempt_load(weights, map_location=device) # load FP32 model
|
|
stride = int(model.stride.max()) # model stride
|
|
if isinstance(imgsz, (list,tuple)):
|
|
assert len(imgsz) ==2; "height and width of image has to be specified"
|
|
imgsz[0] = check_img_size(imgsz[0], s=stride)
|
|
imgsz[1] = check_img_size(imgsz[1], s=stride)
|
|
else:
|
|
imgsz = check_img_size(imgsz, s=stride) # check img_size
|
|
names = model.module.names if hasattr(model, 'module') else model.names # get class names
|
|
if half:
|
|
model.half() # to FP16
|
|
|
|
# Run inference
|
|
if device.type != 'cpu':
|
|
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
|
|
t0 = time.time()
|
|
|
|
# testing dataset
|
|
testset_folder = opt.dataset_folder
|
|
testset_list = opt.dataset_folder[:-7] + "wider_val.txt"
|
|
|
|
with open(testset_list, 'r') as fr:
|
|
test_dataset = fr.read().split()
|
|
num_images = len(test_dataset)
|
|
for img_name in tqdm(test_dataset):
|
|
image_path = testset_folder + img_name
|
|
img0 = cv2.imread(image_path) # BGR
|
|
img = letterbox(img0, imgsz)[0]
|
|
# Convert
|
|
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
|
img = np.ascontiguousarray(img)
|
|
img = torch.from_numpy(img).to(device)
|
|
img = img.half() if half else img.float() # uint8 to fp16/32
|
|
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
|
if img.ndimension() == 3:
|
|
img = img.unsqueeze(0)
|
|
|
|
# Inference
|
|
t1 = time_synchronized()
|
|
pred = model(img, augment=opt.augment)[0]
|
|
# Apply NMS
|
|
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms, kpt_label=kpt_label)
|
|
t2 = time_synchronized()
|
|
|
|
save_name = opt.save_folder + img_name[:-4] + ".txt"
|
|
dirname = os.path.dirname(save_name)
|
|
if not os.path.isdir(dirname):
|
|
os.makedirs(dirname)
|
|
with open(save_name, "w") as fd:
|
|
file_name = os.path.basename(save_name)[:-4] + "\n"
|
|
bboxs_num = str(len(pred[0])) + "\n"
|
|
fd.write(file_name)
|
|
fd.write(bboxs_num)
|
|
# Process detections
|
|
for i, det in enumerate(pred): # detections per image
|
|
gn = torch.tensor(img0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
|
if len(det):
|
|
# Rescale boxes from img_size to im0 size
|
|
scale_coords(img.shape[2:], det[:, :4], img0.shape, kpt_label=False)
|
|
scale_coords(img.shape[2:], det[:, 6:], img0.shape, kpt_label=kpt_label, step=3)
|
|
|
|
# Print results
|
|
for c in det[:, 5].unique():
|
|
n = (det[:, 5] == c).sum() # detections per class
|
|
|
|
# Write results
|
|
for det_index, (*xyxy, conf, cls) in enumerate(det[:,:6]):
|
|
c = int(cls) # integer class
|
|
label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}')
|
|
kpts = det[det_index, 6:]
|
|
x1 = int(xyxy[0] + 0.5)
|
|
y1 = int(xyxy[1] + 0.5)
|
|
x2 = int(xyxy[2] + 0.5)
|
|
y2 = int(xyxy[3] + 0.5)
|
|
fd.write('%d %d %d %d %.03f' % (x1, y1, x2-x1, y2-y1, conf if conf <= 1 else 1) + '\n')
|
|
#plot_one_box(xyxy, img0, label=label, color=colors(c, True), line_thickness=opt.line_thickness, kpt_label=kpt_label, kpts=kpts, steps=3, orig_shape=img0.shape[:2])
|
|
#cv2.imwrite('result.jpg', img0)
|
|
print(f'Done. ({time.time() - t0:.3f}s)')
|
|
|
|
|
|
if __name__ == '__main__':
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
|
|
parser.add_argument('--img-size', nargs= '+', type=int, default=640, help='inference size (pixels)')
|
|
parser.add_argument('--conf-thres', type=float, default=0.01, help='object confidence threshold')
|
|
parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
|
|
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
|
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
|
|
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
|
|
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
|
parser.add_argument('--update', action='store_true', help='update all models')
|
|
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
|
|
parser.add_argument('--name', default='exp', help='save results to project/name')
|
|
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
|
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
|
|
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
|
|
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
|
|
parser.add_argument('--kpt-label', type=int, default=5, help='number of keypoints')
|
|
parser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results')
|
|
parser.add_argument('--dataset_folder', default='data/widerface/widerface/val/images/', type=str, help='dataset path')
|
|
opt = parser.parse_args()
|
|
print(opt)
|
|
check_requirements(exclude=('tensorboard', 'pycocotools', 'thop'))
|
|
|
|
with torch.no_grad():
|
|
detect(opt=opt)
|