mirror of
https://github.com/esimov/pigo.git
synced 2025-10-05 16:16:55 +08:00
fixed typo across the project
This commit is contained in:
@@ -153,7 +153,7 @@ func TestFlploc_LandmarkPointsDetectorShouldReturnCorrectDetectionPoints(t *test
|
||||
|
||||
}
|
||||
}
|
||||
expectedLandmarkPoints := 2*len(eyeCascades) + len(mouthCascades) + 1
|
||||
expectedLandmarkPoints := 2*len(eyeCascades) + len(mouthCascades) + 1 // lendmark points of the left/right eyes, mouth + nose
|
||||
if expectedLandmarkPoints != detectedLandmarkPoints {
|
||||
t.Fatalf("expected facial landmark points to be detected: %d, got: %d", expectedLandmarkPoints, detectedLandmarkPoints)
|
||||
}
|
||||
|
@@ -59,7 +59,7 @@ func FindFaces(pixels []uint8) uintptr {
|
||||
|
||||
coords := make([]int, 0, len(dets))
|
||||
go func() {
|
||||
// Since in Go we cannot transfer a 2d array trough an array pointer
|
||||
// Since in Go we cannot transfer a 2d array through an array pointer
|
||||
// we have to transform it into 1d array.
|
||||
for _, v := range dets {
|
||||
coords = append(coords, v...)
|
||||
|
@@ -22,20 +22,20 @@ class GoPixelSlice(Structure):
|
||||
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
|
||||
]
|
||||
|
||||
# Obtain the camera pixels and transfer them to Go trough C types.
|
||||
# Obtain the camera pixels and transfer them to Go through C types.
|
||||
def process_frame(pixs):
|
||||
dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32)
|
||||
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
|
||||
|
||||
|
||||
# call FindFaces
|
||||
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
|
||||
pigo.FindFaces.argtypes = [GoPixelSlice]
|
||||
pigo.FindFaces.restype = c_void_p
|
||||
|
||||
# Call the exported FindFaces function from Go.
|
||||
# Call the exported FindFaces function from Go.
|
||||
ndets = pigo.FindFaces(faces)
|
||||
data_pointer = cast(ndets, POINTER((c_longlong * ARRAY_DIM) * MAX_NDETS))
|
||||
|
||||
|
||||
if data_pointer :
|
||||
buffarr = ((c_longlong * ARRAY_DIM) * MAX_NDETS).from_address(addressof(data_pointer.contents))
|
||||
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, 5,))
|
||||
@@ -54,7 +54,7 @@ cap = cv2.VideoCapture(0)
|
||||
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
|
||||
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
|
||||
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# For this reason we should delay the object detection process with a few milliseconds.
|
||||
time.sleep(0.4)
|
||||
|
||||
@@ -77,9 +77,9 @@ while(True):
|
||||
for det in dets:
|
||||
if det[4] == 1: # 1 == face; 0 == pupil
|
||||
face_posy = det[1]
|
||||
cv2.rectangle(frame,
|
||||
(int(det[1])-int(det[2]/2), int(det[0])-int(det[2]/2)),
|
||||
(int(det[1])+int(det[2]/2), int(det[0])+int(det[2]/2)),
|
||||
cv2.rectangle(frame,
|
||||
(int(det[1])-int(det[2]/2), int(det[0])-int(det[2]/2)),
|
||||
(int(det[1])+int(det[2]/2), int(det[0])+int(det[2]/2)),
|
||||
(0, 0, 255), 2
|
||||
)
|
||||
else:
|
||||
@@ -89,22 +89,22 @@ while(True):
|
||||
|
||||
x1, x2 = int(det[0])-int(det[2]*1.2), int(det[0])+int(det[2]*1.2)
|
||||
y1, y2 = int(det[1])-int(det[2]*1.2), int(det[1])+int(det[2]*1.2)
|
||||
subimg = frame[x1:x2, y1:y2]
|
||||
|
||||
subimg = frame[x1:x2, y1:y2]
|
||||
|
||||
if subimg is not None:
|
||||
gray = cv2.cvtColor(subimg, cv2.COLOR_BGR2GRAY)
|
||||
img_blur = cv2.medianBlur(gray, 1)
|
||||
|
||||
if img_blur is not None:
|
||||
max_radius = int(det[2]*0.45)
|
||||
circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, int(det[2]*0.45),
|
||||
circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, int(det[2]*0.45),
|
||||
param1=60, param2=21, minRadius=4, maxRadius=max_radius)
|
||||
|
||||
|
||||
if circles is not None:
|
||||
circles = np.uint16(np.around(circles))
|
||||
for i in circles[0, :]:
|
||||
if i[2] < max_radius and i[2] > 0:
|
||||
# Draw outer circle
|
||||
if i[2] < max_radius and i[2] > 0:
|
||||
# Draw outer circle
|
||||
cv2.circle(frame, (int(det[1]), int(det[0])), i[2], (0, 255, 0), 2)
|
||||
# Draw inner circle
|
||||
cv2.circle(frame, (int(det[1]), int(det[0])), 2, (255, 0, 255), 3)
|
||||
@@ -112,21 +112,21 @@ while(True):
|
||||
if face_posy < y1:
|
||||
count_left = 0
|
||||
else:
|
||||
count_right = 0
|
||||
|
||||
if count_left < EYE_CLOSED_CONSEC_FRAMES:
|
||||
count_right = 0
|
||||
|
||||
if count_left < EYE_CLOSED_CONSEC_FRAMES:
|
||||
cv2.putText(frame, "Left blink!", (10, 30),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
|
||||
elif count_right < EYE_CLOSED_CONSEC_FRAMES:
|
||||
cv2.putText(frame, "Right blink!", (frame.shape[1]-150, 30),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
|
||||
|
||||
|
||||
cv2.circle(frame, (int(det[1]), int(det[0])), 4, (0, 0, 255), -1, 8, 0)
|
||||
|
||||
if show_eyes:
|
||||
cv2.rectangle(frame,
|
||||
(int(det[1])-int(det[2]), int(det[0])-int(det[2])),
|
||||
(int(det[1])+int(det[2]), int(det[0])+int(det[2])),
|
||||
cv2.rectangle(frame,
|
||||
(int(det[1])-int(det[2]), int(det[0])-int(det[2])),
|
||||
(int(det[1])+int(det[2]), int(det[0])+int(det[2])),
|
||||
(0, 255, 0), 2
|
||||
)
|
||||
|
||||
|
@@ -19,20 +19,20 @@ class GoPixelSlice(Structure):
|
||||
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
|
||||
]
|
||||
|
||||
# Obtain the camera pixels and transfer them to Go trough Ctypes.
|
||||
# Obtain the camera pixels and transfer them to Go through Ctypes.
|
||||
def process_frame(pixs):
|
||||
dets = np.zeros(3 * MAX_NDETS, dtype=np.float32)
|
||||
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
|
||||
|
||||
|
||||
# call FindFaces
|
||||
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
|
||||
pigo.FindFaces.argtypes = [GoPixelSlice]
|
||||
pigo.FindFaces.restype = c_void_p
|
||||
|
||||
# Call the exported FindFaces function from Go.
|
||||
# Call the exported FindFaces function from Go.
|
||||
ndets = pigo.FindFaces(faces)
|
||||
data_pointer = cast(ndets, POINTER((c_longlong * 3) * MAX_NDETS))
|
||||
|
||||
|
||||
if data_pointer :
|
||||
buffarr = ((c_longlong * 3) * MAX_NDETS).from_address(addressof(data_pointer.contents))
|
||||
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, 3,))
|
||||
@@ -49,7 +49,7 @@ cap = cv2.VideoCapture(0)
|
||||
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
|
||||
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
|
||||
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# For this reason we should delay the object detection process with a few milliseconds.
|
||||
time.sleep(0.4)
|
||||
|
||||
|
@@ -19,20 +19,20 @@ class GoPixelSlice(Structure):
|
||||
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
|
||||
]
|
||||
|
||||
# Obtain the camera pixels and transfer them to Go trough Ctypes.
|
||||
# Obtain the camera pixels and transfer them to Go through Ctypes.
|
||||
def process_frame(pixs):
|
||||
dets = np.zeros(3 * MAX_NDETS, dtype=np.float32)
|
||||
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
|
||||
|
||||
|
||||
# call FindFaces
|
||||
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
|
||||
pigo.FindFaces.argtypes = [GoPixelSlice]
|
||||
pigo.FindFaces.restype = c_void_p
|
||||
|
||||
# Call the exported FindFaces function from Go.
|
||||
# Call the exported FindFaces function from Go.
|
||||
ndets = pigo.FindFaces(faces)
|
||||
data_pointer = cast(ndets, POINTER((c_longlong * 3) * MAX_NDETS))
|
||||
|
||||
|
||||
if data_pointer :
|
||||
buffarr = ((c_longlong * 3) * MAX_NDETS).from_address(addressof(data_pointer.contents))
|
||||
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, 3,))
|
||||
@@ -50,7 +50,7 @@ cap = cv2.VideoCapture(0)
|
||||
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
|
||||
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
|
||||
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# For this reason we should delay the object detection process with a few milliseconds.
|
||||
time.sleep(0.4)
|
||||
|
||||
|
@@ -34,7 +34,7 @@ func FindFaces(pixels []uint8) uintptr {
|
||||
|
||||
det := make([]int, 0, len(result))
|
||||
go func() {
|
||||
// Since in Go we cannot transfer a 2d array trough an array pointer
|
||||
// Since in Go we cannot transfer a 2d array through an array pointer
|
||||
// we have to transform it into 1d array.
|
||||
for _, v := range result {
|
||||
det = append(det, v...)
|
||||
|
@@ -95,7 +95,7 @@ func FindFaces(pixels []uint8) uintptr {
|
||||
coords := make([]int, 0, len(dets))
|
||||
|
||||
go func() {
|
||||
// Since in Go we cannot transfer a 2d array trough an array pointer
|
||||
// Since in Go we cannot transfer a 2d array through an array pointer
|
||||
// we have to transform it into 1d array.
|
||||
for _, v := range dets {
|
||||
coords = append(coords, v...)
|
||||
|
@@ -20,20 +20,20 @@ class GoPixelSlice(Structure):
|
||||
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
|
||||
]
|
||||
|
||||
# Obtain the camera pixels and transfer them to Go trough Ctypes.
|
||||
# Obtain the camera pixels and transfer them to Go through Ctypes.
|
||||
def process_frame(pixs):
|
||||
dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32)
|
||||
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
|
||||
|
||||
|
||||
# call FindFaces
|
||||
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
|
||||
pigo.FindFaces.argtypes = [GoPixelSlice]
|
||||
pigo.FindFaces.restype = c_void_p
|
||||
|
||||
# Call the exported FindFaces function from Go.
|
||||
# Call the exported FindFaces function from Go.
|
||||
ndets = pigo.FindFaces(faces)
|
||||
data_pointer = cast(ndets, POINTER((c_longlong * ARRAY_DIM) * MAX_NDETS))
|
||||
|
||||
|
||||
if data_pointer :
|
||||
buffarr = ((c_longlong * ARRAY_DIM) * MAX_NDETS).from_address(addressof(data_pointer.contents))
|
||||
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, ARRAY_DIM,))
|
||||
@@ -42,7 +42,7 @@ def process_frame(pixs):
|
||||
dets_len = res[0][0]
|
||||
res = np.delete(res, 0, 0) # delete the first element from the array
|
||||
|
||||
# We have to multiply the detection length with the total
|
||||
# We have to multiply the detection length with the total
|
||||
# detection points(face, pupils and facial lendmark points), in total 18
|
||||
dets = list(res.reshape(-1, ARRAY_DIM))[0:dets_len*18]
|
||||
return dets
|
||||
@@ -52,7 +52,7 @@ cap = cv2.VideoCapture(0)
|
||||
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
|
||||
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
|
||||
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# For this reason we should delay the object detection process with a few milliseconds.
|
||||
time.sleep(0.4)
|
||||
|
||||
@@ -74,18 +74,18 @@ while(True):
|
||||
for det in dets:
|
||||
if det[3] > 50:
|
||||
if det[4] == 0: # 0 == face;
|
||||
cv2.rectangle(frame,
|
||||
(int(det[1])-int(det[2]/2), int(det[0])-int(det[2]/2)),
|
||||
(int(det[1])+int(det[2]/2), int(det[0])+int(det[2]/2)),
|
||||
cv2.rectangle(frame,
|
||||
(int(det[1])-int(det[2]/2), int(det[0])-int(det[2]/2)),
|
||||
(int(det[1])+int(det[2]/2), int(det[0])+int(det[2]/2)),
|
||||
(0, 0, 255), 2
|
||||
)
|
||||
elif det[4] == 1: # 1 == pupil;
|
||||
if showPupil:
|
||||
cv2.circle(frame, (int(det[1]), int(det[0])), 4, (0, 0, 255), -1, 8, 0)
|
||||
if showEyes:
|
||||
cv2.rectangle(frame,
|
||||
(int(det[1])-int(det[2]), int(det[0])-int(det[2])),
|
||||
(int(det[1])+int(det[2]), int(det[0])+int(det[2])),
|
||||
cv2.rectangle(frame,
|
||||
(int(det[1])-int(det[2]), int(det[0])-int(det[2])),
|
||||
(int(det[1])+int(det[2]), int(det[0])+int(det[2])),
|
||||
(0, 255, 255), 2
|
||||
)
|
||||
elif det[4] == 2: # 2 == facial landmark;
|
||||
|
@@ -59,7 +59,7 @@ func FindFaces(pixels []uint8) uintptr {
|
||||
|
||||
coords := make([]int, 0, len(dets))
|
||||
go func() {
|
||||
// Since in Go we cannot transfer a 2d array trough an array pointer
|
||||
// Since in Go we cannot transfer a 2d array through an array pointer
|
||||
// we have to transform it into 1d array.
|
||||
for _, v := range dets {
|
||||
coords = append(coords, v...)
|
||||
|
@@ -20,20 +20,20 @@ class GoPixelSlice(Structure):
|
||||
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
|
||||
]
|
||||
|
||||
# Obtain the camera pixels and transfer them to Go trough Ctypes.
|
||||
# Obtain the camera pixels and transfer them to Go through Ctypes.
|
||||
def process_frame(pixs):
|
||||
dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32)
|
||||
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
|
||||
|
||||
|
||||
# call FindFaces
|
||||
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
|
||||
pigo.FindFaces.argtypes = [GoPixelSlice]
|
||||
pigo.FindFaces.restype = c_void_p
|
||||
|
||||
# Call the exported FindFaces function from Go.
|
||||
# Call the exported FindFaces function from Go.
|
||||
ndets = pigo.FindFaces(faces)
|
||||
data_pointer = cast(ndets, POINTER((c_longlong * ARRAY_DIM) * MAX_NDETS))
|
||||
|
||||
|
||||
if data_pointer :
|
||||
buffarr = ((c_longlong * ARRAY_DIM) * MAX_NDETS).from_address(addressof(data_pointer.contents))
|
||||
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, ARRAY_DIM,))
|
||||
@@ -52,7 +52,7 @@ cap = cv2.VideoCapture(0)
|
||||
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
|
||||
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
|
||||
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# For this reason we should delay the object detection process with a few milliseconds.
|
||||
time.sleep(0.4)
|
||||
|
||||
@@ -78,9 +78,9 @@ while(True):
|
||||
if showPupil:
|
||||
cv2.circle(frame, (int(det[1]), int(det[0])), 4, (0, 0, 255), -1, 8, 0)
|
||||
if showEyes:
|
||||
cv2.rectangle(frame,
|
||||
(int(det[1])-int(det[2]), int(det[0])-int(det[2])),
|
||||
(int(det[1])+int(det[2]), int(det[0])+int(det[2])),
|
||||
cv2.rectangle(frame,
|
||||
(int(det[1])-int(det[2]), int(det[0])-int(det[2])),
|
||||
(int(det[1])+int(det[2]), int(det[0])+int(det[2])),
|
||||
(0, 255, 0), 2
|
||||
)
|
||||
|
||||
|
@@ -70,7 +70,7 @@ func FindFaces(pixels []uint8) uintptr {
|
||||
|
||||
coords := make([]int, 0, len(dets))
|
||||
go func() {
|
||||
// Since in Go we cannot transfer a 2d array trough an array pointer
|
||||
// Since in Go we cannot transfer a 2d array through an array pointer
|
||||
// we have to transform it into 1d array.
|
||||
for _, v := range dets {
|
||||
coords = append(coords, v...)
|
||||
|
@@ -34,20 +34,20 @@ def rotateImage(image, angle):
|
||||
|
||||
return result
|
||||
|
||||
# Obtain the camera pixels and transfer them to Go trough Ctypes.
|
||||
# Obtain the camera pixels and transfer them to Go through Ctypes.
|
||||
def process_frame(pixs):
|
||||
dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32)
|
||||
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
|
||||
|
||||
|
||||
# call FindFaces
|
||||
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
|
||||
pigo.FindFaces.argtypes = [GoPixelSlice]
|
||||
pigo.FindFaces.restype = c_void_p
|
||||
|
||||
# Call the exported FindFaces function from Go.
|
||||
# Call the exported FindFaces function from Go.
|
||||
ndets = pigo.FindFaces(faces)
|
||||
data_pointer = cast(ndets, POINTER((c_longlong * ARRAY_DIM) * MAX_NDETS))
|
||||
|
||||
|
||||
if data_pointer :
|
||||
buffarr = ((c_longlong * ARRAY_DIM) * MAX_NDETS).from_address(addressof(data_pointer.contents))
|
||||
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, ARRAY_DIM,))
|
||||
@@ -66,7 +66,7 @@ cap = cv2.VideoCapture(0)
|
||||
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
|
||||
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
|
||||
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# Changing the camera resolution introduce a short delay in the camera initialization.
|
||||
# For this reason we should delay the object detection process with a few milliseconds.
|
||||
time.sleep(0.4)
|
||||
|
||||
@@ -81,7 +81,7 @@ while(True):
|
||||
|
||||
if dets is not None:
|
||||
# We know that the detected faces are taking place in the first positions of the multidimensional array.
|
||||
for row, col, scale, q, angle in dets:
|
||||
for row, col, scale, q, angle in dets:
|
||||
if q > 50:
|
||||
if angle == 0:
|
||||
px, py = col, row
|
||||
@@ -105,8 +105,8 @@ while(True):
|
||||
# Convert the image to BGR
|
||||
source_img = source_img[:,:,:3]
|
||||
|
||||
if scale < img_height or scale < img_width:
|
||||
if img_height > img_width:
|
||||
if scale < img_height or scale < img_width:
|
||||
if img_height > img_width:
|
||||
img_scale = float(scale)/float(img_height)
|
||||
else:
|
||||
img_scale = float(scale)/float(img_width)
|
||||
@@ -115,15 +115,15 @@ while(True):
|
||||
img = cv2.resize(source_img, (width, height), cv2.INTER_AREA)
|
||||
mask = cv2.resize(orig_mask, (width, height), cv2.INTER_AREA)
|
||||
mask_inv = cv2.resize(orig_mask_inv, (width, height), cv2.INTER_AREA)
|
||||
|
||||
|
||||
if px == None or py == None:
|
||||
continue
|
||||
|
||||
|
||||
y1 = row-scale/2+(row-scale/2-(py-height))
|
||||
y2 = row-scale/2+height+(row-scale/2-(py-height))
|
||||
x1 = col-scale/2
|
||||
x2 = col-scale/2+width
|
||||
|
||||
|
||||
if y1 < 0 or y2 < 0:
|
||||
continue
|
||||
roi = frame[y1:y2, x1:x2]
|
||||
@@ -132,15 +132,15 @@ while(True):
|
||||
|
||||
# join the roi_bg and roi_fg
|
||||
dst = cv2.add(roi_bg, roi_fg)
|
||||
frame[y1:y2, x1:x2] = dst
|
||||
|
||||
frame[y1:y2, x1:x2] = dst
|
||||
|
||||
cv2.imshow('', frame)
|
||||
|
||||
key = cv2.waitKey(1)
|
||||
if key & 0xFF == ord('q'):
|
||||
break
|
||||
elif key & 0xFF == ord('w'):
|
||||
show_face = not show_face
|
||||
show_face = not show_face
|
||||
elif key & 0xFF == ord('e'):
|
||||
img_idx += 1
|
||||
if img_idx > len(source_imgs)-1:
|
||||
|
@@ -119,7 +119,7 @@ func FindFaces(pixels []uint8) uintptr {
|
||||
coords := make([]int, 0, len(dets))
|
||||
|
||||
go func() {
|
||||
// Since in Go we cannot transfer a 2d array trough an array pointer
|
||||
// Since in Go we cannot transfer a 2d array through an array pointer
|
||||
// we have to transform it into 1d array.
|
||||
for _, v := range dets {
|
||||
coords = append(coords, v...)
|
||||
|
@@ -47,7 +47,7 @@ class GoPixelSlice(Structure):
|
||||
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
|
||||
]
|
||||
|
||||
# Obtain the camera pixels and transfer them to Go trough Ctypes
|
||||
# Obtain the camera pixels and transfer them to Go through Ctypes
|
||||
def process_frame(pixs):
|
||||
dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32)
|
||||
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
|
||||
|
@@ -1,7 +1,7 @@
|
||||
## Webcam demo (slow)
|
||||
|
||||
This demo shows how we can transfer the webcam frames from Python to Go by sending the captured frames as byte array into the standard output.
|
||||
We will run the face detector over the byte arrays received from the standard output and send the result into a web browser trough a webserver.
|
||||
This demo shows how we can transfer the webcam frames from Python to Go by sending the captured frames as byte array into the standard output.
|
||||
We will run the face detector over the byte arrays received from the standard output and send the result into a web browser through a webserver.
|
||||
|
||||
### Run
|
||||
|
||||
|
@@ -17,12 +17,12 @@ $ GOOS=js GOARCH=wasm go build -o lib.wasm wasm.go
|
||||
```
|
||||
### Supported keys:
|
||||
<kbd>s</kbd> - Show/hide pupils<br/>
|
||||
<kbd>c</kbd> - Circle trough the detection shape types (`rectangle`|`circle`|`ellipse`)<br/>
|
||||
<kbd>c</kbd> - Circle through the detection shape types (`rectangle`|`circle`|`ellipse`)<br/>
|
||||
<kbd>f</kbd> - Show/hide facial landmark points (hidden by default)
|
||||
|
||||
## Demos
|
||||
|
||||
For **Webassembly** related demos check this separate repo:
|
||||
For **Webassembly** related demos check this separate repo:
|
||||
|
||||
https://github.com/esimov/pigo-wasm-demos
|
||||
https://github.com/esimov/pigo-wasm-demos
|
||||
|
||||
|
@@ -23,7 +23,7 @@ func NewDetector() *Detector {
|
||||
return &d
|
||||
}
|
||||
|
||||
// FetchCascade retrive the cascade file trough a JS http connection.
|
||||
// FetchCascade retrive the cascade file through a JS http connection.
|
||||
// It should return the binary data as uint8 integers or err in case of an error.
|
||||
func (d *Detector) FetchCascade(url string) ([]byte, error) {
|
||||
d.respChan = make(chan []uint8)
|
||||
|
Reference in New Issue
Block a user