mirror of
https://github.com/kerberos-io/heatmap.git
synced 2025-09-26 20:31:14 +08:00
cleaned up the file
This commit is contained in:
@@ -1,9 +1,11 @@
|
|||||||
import cv2
|
import cv2
|
||||||
import datetime
|
import datetime
|
||||||
import imutils
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from centroidtracker import CentroidTracker
|
from centroidtracker import CentroidTracker
|
||||||
|
import pandas as pd
|
||||||
|
import imutils
|
||||||
|
|
||||||
|
|
||||||
protopath = "MobileNetSSD_deploy.prototxt"
|
protopath = "MobileNetSSD_deploy.prototxt"
|
||||||
modelpath = "MobileNetSSD_deploy.caffemodel"
|
modelpath = "MobileNetSSD_deploy.caffemodel"
|
||||||
@@ -15,141 +17,170 @@ detector = cv2.dnn.readNetFromCaffe(prototxt=protopath, caffeModel=modelpath)
|
|||||||
|
|
||||||
|
|
||||||
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
|
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
|
||||||
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
|
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
|
||||||
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
|
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
|
||||||
"sofa", "train", "tvmonitor"]
|
"sofa", "train", "tvmonitor"]
|
||||||
|
|
||||||
# maxDisappeared, time wait when object moves out of frame
|
# maxDisappeared, time wait when object moves out of frame
|
||||||
tracker = CentroidTracker(maxDisappeared=700, maxDistance=220)
|
tracker = CentroidTracker(maxDisappeared=500, maxDistance=220)
|
||||||
|
|
||||||
|
|
||||||
def non_max_suppression_fast(boxes, overlapThresh):
|
def non_max_suppression_fast(boxes, overlapThresh):
|
||||||
try:
|
'"Cobine boundingboxes that overlap into one bbox"'
|
||||||
if len(boxes) == 0:
|
try:
|
||||||
return []
|
if len(boxes) == 0:
|
||||||
|
return []
|
||||||
|
|
||||||
if boxes.dtype.kind == "i":
|
if boxes.dtype.kind == "i":
|
||||||
boxes = boxes.astype("float")
|
boxes = boxes.astype("float")
|
||||||
|
|
||||||
pick = []
|
pick = []
|
||||||
|
|
||||||
x1 = boxes[:, 0]
|
x1 = boxes[:, 0]
|
||||||
y1 = boxes[:, 1]
|
y1 = boxes[:, 1]
|
||||||
x2 = boxes[:, 2]
|
x2 = boxes[:, 2]
|
||||||
y2 = boxes[:, 3]
|
y2 = boxes[:, 3]
|
||||||
|
|
||||||
|
area = (x2 - x1 + 1) * (y2 - y1 + 1)
|
||||||
|
idxs = np.argsort(y2)
|
||||||
|
|
||||||
area = (x2 - x1 + 1) * (y2 - y1 + 1)
|
while len(idxs) > 0:
|
||||||
idxs = np.argsort(y2)
|
last = len(idxs) - 1
|
||||||
|
i = idxs[last]
|
||||||
|
pick.append(i)
|
||||||
|
|
||||||
while len(idxs) > 0:
|
xx1 = np.maximum(x1[i], x1[idxs[:last]])
|
||||||
last = len(idxs) - 1
|
yy1 = np.maximum(y1[i], y1[idxs[:last]])
|
||||||
i = idxs[last]
|
xx2 = np.minimum(x2[i], x2[idxs[:last]])
|
||||||
pick.append(i)
|
yy2 = np.minimum(y2[i], y2[idxs[:last]])
|
||||||
|
|
||||||
xx1 = np.maximum(x1[i], x1[idxs[:last]])
|
w = np.maximum(0, xx2 - xx1 + 1)
|
||||||
yy1 = np.maximum(y1[i], y1[idxs[:last]])
|
h = np.maximum(0, yy2 - yy1 + 1)
|
||||||
xx2 = np.minimum(x2[i], x2[idxs[:last]])
|
|
||||||
yy2 = np.minimum(y2[i], y2[idxs[:last]])
|
|
||||||
|
|
||||||
w = np.maximum(0, xx2 - xx1 + 1)
|
overlap = (w * h) / area[idxs[:last]]
|
||||||
h = np.maximum(0, yy2 - yy1 + 1)
|
|
||||||
|
|
||||||
overlap = (w * h) / area[idxs[:last]]
|
idxs = np.delete(idxs, np.concatenate(([last],
|
||||||
|
np.where(overlap > overlapThresh)[0])))
|
||||||
|
|
||||||
idxs = np.delete(idxs, np.concatenate(([last],
|
return boxes[pick].astype("int")
|
||||||
np.where(overlap > overlapThresh)[0])))
|
except Exception as e:
|
||||||
|
print("Exception occurred in non_max_suppression : {}".format(e))
|
||||||
|
|
||||||
return boxes[pick].astype("int")
|
def convert_to_2d(Xcenter, y2):
|
||||||
except Exception as e:
|
'" Convert the coordinates from a 3d playing field to a 2d playing field"'
|
||||||
print("Exception occurred in non_max_suppression : {}".format(e))
|
pts_src = np.array([[257, 262], [370, 225], [492, 190], [294, 324], [474, 272], [620, 213], [727, 383], [799, 259]])
|
||||||
|
# Take points from the frame as reference and give the same point coordinates on the picture for a transformation
|
||||||
|
pts_dst = np.array([[110, 145], [349, 145], [588, 145], [110, 500], [349, 500], [588, 500], [349, 855], [588, 855]])
|
||||||
|
|
||||||
|
# calculate matrix H
|
||||||
|
h, status = cv2.findHomography(pts_src, pts_dst)
|
||||||
|
|
||||||
def main():
|
# provide a point you wish to map from image 1 to image 2
|
||||||
cap = cv2.VideoCapture('1639943552_6-967003_camera1_200-200-400-400_24_769.mp4')
|
a = np.array([[Xcenter, y2]], dtype='float32')
|
||||||
|
a = np.array([a])
|
||||||
|
|
||||||
fps_start_time = datetime.datetime.now()
|
# finally, get the mapping
|
||||||
fps = 0
|
pointsOut = cv2.perspectiveTransform(a, h)
|
||||||
total_frames = 0
|
pointsOut = pointsOut.astype(int)
|
||||||
centroid_dict = defaultdict(list)
|
return pointsOut
|
||||||
object_id_list = []
|
|
||||||
|
|
||||||
while True:
|
def main(video="1639943552_6-967003_camera1_200-200-400-400_24_769.mp4"):
|
||||||
ret, frame = cap.read()
|
'"Read the frames, recognise humans and track them. The coordinates of the bottom of the bbox are saved for transormation and plotting"'
|
||||||
frame = imutils.resize(frame, width=600)
|
cap = cv2.VideoCapture(video)
|
||||||
total_frames = total_frames + 1
|
|
||||||
(height, width) = frame.shape[:2]
|
|
||||||
|
|
||||||
blob = cv2.dnn.blobFromImage(frame, 0.007843, (width, height), 127.5)
|
paddel_2d = cv2.imread('media/paddelfield.jpeg')
|
||||||
detector.setInput(blob)
|
|
||||||
person_detections = detector.forward()
|
|
||||||
|
|
||||||
rects = []
|
fps_start_time = datetime.datetime.now()
|
||||||
for i in np.arange(0, person_detections.shape[2]):
|
fps = 0
|
||||||
confidence = person_detections[0, 0, i, 2]
|
total_frames = 0
|
||||||
if confidence > 0.5:
|
centroid_dict = defaultdict(list)
|
||||||
idx = int(person_detections[0, 0, i, 1])
|
object_id_list = []
|
||||||
|
|
||||||
if CLASSES[idx] != "person":
|
while True:
|
||||||
continue
|
ret, frame = cap.read()
|
||||||
|
frame = imutils.resize(frame, width=800)
|
||||||
|
# frame = frame[540:1080, 700:1920]
|
||||||
|
total_frames = total_frames + 1
|
||||||
|
(height, width) = frame.shape[:2]
|
||||||
|
|
||||||
person_box = person_detections[0, 0, i, 3:7] * np.array([width, height, width, height])
|
blob = cv2.dnn.blobFromImage(frame, 0.007843, (width, height), 127.5)
|
||||||
(startX, startY, endX, endY) = person_box.astype("int")
|
detector.setInput(blob)
|
||||||
rects.append(person_box)
|
person_detections = detector.forward()
|
||||||
|
|
||||||
boundingboxes = np.array(rects)
|
rects = []
|
||||||
boundingboxes = boundingboxes.astype(int)
|
for i in np.arange(0, person_detections.shape[2]):
|
||||||
rects = non_max_suppression_fast(boundingboxes, 0.4)
|
confidence = person_detections[0, 0, i, 2]
|
||||||
objects = tracker.update(rects)
|
if confidence > 0.5:
|
||||||
|
idx = int(person_detections[0, 0, i, 1])
|
||||||
|
|
||||||
for (objectId, bbox) in objects.items():
|
if CLASSES[idx] != "person":
|
||||||
x1, y1, x2, y2 = bbox
|
continue
|
||||||
x1 = int(x1)
|
|
||||||
y1 = int(y1)
|
|
||||||
x2 = int(x2)
|
|
||||||
y2 = int(y2)
|
|
||||||
|
|
||||||
xCenter = int((x1 + x2) / 2)
|
person_box = person_detections[0, 0, i, 3:7] * np.array([width, height, width, height])
|
||||||
yCenter = int((y1 + y2) / 2)
|
(startX, startY, endX, endY) = person_box.astype("int")
|
||||||
|
rects.append(person_box)
|
||||||
|
|
||||||
cv2.circle(frame, (xCenter, y2), 5, (0, 255, 0), -1)
|
boundingboxes = np.array(rects)
|
||||||
|
boundingboxes = boundingboxes.astype(int)
|
||||||
|
rects = non_max_suppression_fast(boundingboxes, 0.4)
|
||||||
|
objects = tracker.update(rects)
|
||||||
|
|
||||||
centroid_dict[objectId].append((xCenter, y2))
|
for (objectId, bbox) in objects.items():
|
||||||
|
x1, y1, x2, y2 = bbox
|
||||||
|
x1 = int(x1)
|
||||||
|
y1 = int(y1)
|
||||||
|
x2 = int(x2)
|
||||||
|
y2 = int(y2)
|
||||||
|
|
||||||
if objectId not in object_id_list:
|
xCenter = int((x1 + x2) / 2)
|
||||||
object_id_list.append(objectId)
|
yCenter = int((y1 + y2) / 2)
|
||||||
start_pt = (xCenter, y2)
|
|
||||||
end_pt = (xCenter, y2)
|
|
||||||
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 2)
|
|
||||||
else:
|
|
||||||
L = len(centroid_dict[objectId])
|
|
||||||
for pt in range(len(centroid_dict[objectId])):
|
|
||||||
if not pt + 1 == L:
|
|
||||||
start_pt = (centroid_dict[objectId][pt][0], centroid_dict[objectId][pt][1])
|
|
||||||
end_pt = (centroid_dict[objectId][pt + 1][0], centroid_dict[objectId][pt + 1][1])
|
|
||||||
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 1)
|
|
||||||
|
|
||||||
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
|
cv2.circle(frame, (xCenter, y2), 5, (0, 255, 0), -1)
|
||||||
text = "ID: {}".format(objectId)
|
|
||||||
cv2.putText(frame, text, (x1, y1-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
|
|
||||||
|
|
||||||
fps_end_time = datetime.datetime.now()
|
# saving the converted cords
|
||||||
time_diff = fps_end_time - fps_start_time
|
pointsout = convert_to_2d(xCenter, y2)
|
||||||
if time_diff.seconds == 0:
|
for tuple in pointsout:
|
||||||
fps = 0.0
|
for points in tuple:
|
||||||
else:
|
pd.DataFrame({'x': [points[0]], 'y': [points[1]]}, index=[objectId]).to_csv('cords.csv', mode='a',
|
||||||
fps = (total_frames / time_diff.seconds)
|
header=False)
|
||||||
fps_text = "FPS: {:.2f}".format(fps)
|
|
||||||
cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
|
|
||||||
|
|
||||||
cv2.imshow("Application", frame)
|
centroid_dict[objectId].append((xCenter, y2))
|
||||||
# cv2.VideoWriter_fourcc("new_vid.mp4", cv2.VideoWriter_fourcc(*'mp4v'), 20, (width, height))
|
|
||||||
key = cv2.waitKey(1)
|
if objectId not in object_id_list:
|
||||||
if key == ord('q'):
|
object_id_list.append(objectId)
|
||||||
break
|
start_pt = (xCenter, y2)
|
||||||
cap.release()
|
end_pt = (xCenter, y2)
|
||||||
cv2.destroyAllWindows()
|
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 2)
|
||||||
|
else:
|
||||||
|
L = len(centroid_dict[objectId])
|
||||||
|
for pt in range(len(centroid_dict[objectId])):
|
||||||
|
if not pt + 1 == L:
|
||||||
|
start_pt = (centroid_dict[objectId][pt][0], centroid_dict[objectId][pt][1])
|
||||||
|
end_pt = (centroid_dict[objectId][pt + 1][0], centroid_dict[objectId][pt + 1][1])
|
||||||
|
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 1)
|
||||||
|
|
||||||
|
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
|
||||||
|
text = "ID: {}".format(objectId)
|
||||||
|
cv2.putText(frame, text, (x1, y1 - 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
|
||||||
|
|
||||||
|
fps_end_time = datetime.datetime.now()
|
||||||
|
time_diff = fps_end_time - fps_start_time
|
||||||
|
if time_diff.seconds == 0:
|
||||||
|
fps = 0.0
|
||||||
|
else:
|
||||||
|
fps = (total_frames / time_diff.seconds)
|
||||||
|
fps_text = "FPS: {:.2f}".format(fps)
|
||||||
|
cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
|
||||||
|
|
||||||
|
# cv2.imshow("Application", frame)
|
||||||
|
key = cv2.waitKey(1)
|
||||||
|
if key == ord('q'):
|
||||||
|
break
|
||||||
|
cap.release()
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user