Update python code

This commit is contained in:
esimov
2019-10-30 17:42:06 +02:00
parent 5ff666cb81
commit 793a8f3232

View File

@@ -15,6 +15,30 @@ ARRAY_DIM = 6
MOUTH_AR_THRESH = 0.2 MOUTH_AR_THRESH = 0.2
def verify_alpha_channel(frame):
try:
frame.shape[3] # 4th position
except IndexError:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
return frame
def alpha_blend(frame_1, frame_2, mask):
alpha = mask/255.0
blended = cv2.convertScaleAbs(frame_1*(1-alpha) + frame_2*alpha)
return blended
def apply_circle_focus_blur(frame, x, y):
frame = verify_alpha_channel(frame)
height, width, _ = frame.shape
mask = np.zeros((height, width, 4), dtype='uint8')
cv2.circle(mask, (int(x), int(y)), int(x/2),
(255, 255, 255), -1, cv2.LINE_AA)
mask = cv2.GaussianBlur(mask, (41, 41), cv2.BORDER_DEFAULT)
blured = cv2.GaussianBlur(frame, (41, 41), cv2.BORDER_DEFAULT)
blended = alpha_blend(frame, blured, 255-mask)
frame = cv2.cvtColor(blended, cv2.COLOR_BGRA2BGR)
return frame
# define class GoPixelSlice to map to: # define class GoPixelSlice to map to:
# C type struct { void *data; GoInt len; GoInt cap; } # C type struct { void *data; GoInt len; GoInt cap; }
class GoPixelSlice(Structure): class GoPixelSlice(Structure):
@@ -22,7 +46,7 @@ class GoPixelSlice(Structure):
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong), ("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
] ]
# Obtain the camera pixels and transfer them to Go trough Ctypes. # Obtain the camera pixels and transfer them to Go trough Ctypes
def process_frame(pixs): def process_frame(pixs):
dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32) dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32)
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte)) pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
@@ -36,9 +60,11 @@ def process_frame(pixs):
ndets = pigo.FindFaces(faces) ndets = pigo.FindFaces(faces)
data_pointer = cast(ndets, POINTER((c_longlong * ARRAY_DIM) * MAX_NDETS)) data_pointer = cast(ndets, POINTER((c_longlong * ARRAY_DIM) * MAX_NDETS))
if data_pointer : if data_pointer:
buffarr = ((c_longlong * ARRAY_DIM) * MAX_NDETS).from_address(addressof(data_pointer.contents)) buffarr = ((c_longlong * ARRAY_DIM) *
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, ARRAY_DIM,)) MAX_NDETS).from_address(addressof(data_pointer.contents))
res = np.ndarray(buffer=buffarr, dtype=c_longlong,
shape=(MAX_NDETS, ARRAY_DIM,))
# The first value of the buffer aray represents the buffer length. # The first value of the buffer aray represents the buffer length.
dets_len = res[0][0] dets_len = res[0][0]
@@ -58,13 +84,14 @@ cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# For this reason we should delay the object detection process with a few milliseconds. # For this reason we should delay the object detection process with a few milliseconds.
time.sleep(0.4) time.sleep(0.4)
showFaceDet = True showFaceDet = False
showPupil = True showPupil = True
showLandmarkPoints = True showLandmarkPoints = True
while(True): while(True):
ret, frame = cap.read() ret, frame = cap.read()
pixs = np.ascontiguousarray(frame[:, :, 1].reshape((frame.shape[0], frame.shape[1]))) pixs = np.ascontiguousarray(
frame[:, :, 1].reshape((frame.shape[0], frame.shape[1])))
pixs = pixs.flatten() pixs = pixs.flatten()
# Verify if camera is intialized by checking if pixel array is not empty. # Verify if camera is intialized by checking if pixel array is not empty.
@@ -77,15 +104,19 @@ while(True):
if q > 50: if q > 50:
if det_type == 0: # 0 == face; if det_type == 0: # 0 == face;
if showFaceDet: if showFaceDet:
cv2.rectangle(frame, (col-scale/2, row-scale/2), (col+scale/2, row+scale/2), (0, 0, 255), 2) cv2.rectangle(
frame, (col-scale/2, row-scale/2), (col+scale/2, row+scale/2), (0, 0, 255), 2)
elif det_type == 1: # 1 == pupil; elif det_type == 1: # 1 == pupil;
if showPupil: if showPupil:
cv2.circle(frame, (int(col), int(row)), 4, (0, 0, 255), -1, 8, 0) cv2.circle(frame, (int(col), int(row)),
4, (0, 0, 255), -1, 8, 0)
elif det_type == 2: # 2 == facial landmark; elif det_type == 2: # 2 == facial landmark;
if showLandmarkPoints: if showLandmarkPoints:
cv2.circle(frame, (int(col), int(row)), 4, (0, 255, 0), -1, 8, 0) cv2.circle(frame, (int(col), int(row)),
4, (0, 255, 0), -1, 8, 0)
elif det_type == 3: elif det_type == 3:
if mouth_ar < MOUTH_AR_THRESH: # mouth is open if mouth_ar < MOUTH_AR_THRESH: # mouth is open
frame = apply_circle_focus_blur(frame, col, row)
cv2.putText(frame, "TALKING!", (10, 30), cv2.putText(frame, "TALKING!", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)