diff --git a/core/flploc_test.go b/core/flploc_test.go
index a7de551..fa743e1 100644
--- a/core/flploc_test.go
+++ b/core/flploc_test.go
@@ -153,7 +153,7 @@ func TestFlploc_LandmarkPointsDetectorShouldReturnCorrectDetectionPoints(t *test
}
}
- expectedLandmarkPoints := 2*len(eyeCascades) + len(mouthCascades) + 1
+ expectedLandmarkPoints := 2*len(eyeCascades) + len(mouthCascades) + 1 // lendmark points of the left/right eyes, mouth + nose
if expectedLandmarkPoints != detectedLandmarkPoints {
t.Fatalf("expected facial landmark points to be detected: %d, got: %d", expectedLandmarkPoints, detectedLandmarkPoints)
}
diff --git a/examples/blinkdet/blinkdet.go b/examples/blinkdet/blinkdet.go
index 2cc67c8..fd40a3e 100644
--- a/examples/blinkdet/blinkdet.go
+++ b/examples/blinkdet/blinkdet.go
@@ -59,7 +59,7 @@ func FindFaces(pixels []uint8) uintptr {
coords := make([]int, 0, len(dets))
go func() {
- // Since in Go we cannot transfer a 2d array trough an array pointer
+ // Since in Go we cannot transfer a 2d array through an array pointer
// we have to transform it into 1d array.
for _, v := range dets {
coords = append(coords, v...)
diff --git a/examples/blinkdet/blinkdet.py b/examples/blinkdet/blinkdet.py
index 37a35d1..d574b44 100644
--- a/examples/blinkdet/blinkdet.py
+++ b/examples/blinkdet/blinkdet.py
@@ -22,20 +22,20 @@ class GoPixelSlice(Structure):
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
]
-# Obtain the camera pixels and transfer them to Go trough C types.
+# Obtain the camera pixels and transfer them to Go through C types.
def process_frame(pixs):
dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32)
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
-
+
# call FindFaces
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
pigo.FindFaces.argtypes = [GoPixelSlice]
pigo.FindFaces.restype = c_void_p
- # Call the exported FindFaces function from Go.
+ # Call the exported FindFaces function from Go.
ndets = pigo.FindFaces(faces)
data_pointer = cast(ndets, POINTER((c_longlong * ARRAY_DIM) * MAX_NDETS))
-
+
if data_pointer :
buffarr = ((c_longlong * ARRAY_DIM) * MAX_NDETS).from_address(addressof(data_pointer.contents))
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, 5,))
@@ -54,7 +54,7 @@ cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
-# Changing the camera resolution introduce a short delay in the camera initialization.
+# Changing the camera resolution introduce a short delay in the camera initialization.
# For this reason we should delay the object detection process with a few milliseconds.
time.sleep(0.4)
@@ -77,9 +77,9 @@ while(True):
for det in dets:
if det[4] == 1: # 1 == face; 0 == pupil
face_posy = det[1]
- cv2.rectangle(frame,
- (int(det[1])-int(det[2]/2), int(det[0])-int(det[2]/2)),
- (int(det[1])+int(det[2]/2), int(det[0])+int(det[2]/2)),
+ cv2.rectangle(frame,
+ (int(det[1])-int(det[2]/2), int(det[0])-int(det[2]/2)),
+ (int(det[1])+int(det[2]/2), int(det[0])+int(det[2]/2)),
(0, 0, 255), 2
)
else:
@@ -89,22 +89,22 @@ while(True):
x1, x2 = int(det[0])-int(det[2]*1.2), int(det[0])+int(det[2]*1.2)
y1, y2 = int(det[1])-int(det[2]*1.2), int(det[1])+int(det[2]*1.2)
- subimg = frame[x1:x2, y1:y2]
-
+ subimg = frame[x1:x2, y1:y2]
+
if subimg is not None:
gray = cv2.cvtColor(subimg, cv2.COLOR_BGR2GRAY)
img_blur = cv2.medianBlur(gray, 1)
if img_blur is not None:
max_radius = int(det[2]*0.45)
- circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, int(det[2]*0.45),
+ circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, int(det[2]*0.45),
param1=60, param2=21, minRadius=4, maxRadius=max_radius)
-
+
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
- if i[2] < max_radius and i[2] > 0:
- # Draw outer circle
+ if i[2] < max_radius and i[2] > 0:
+ # Draw outer circle
cv2.circle(frame, (int(det[1]), int(det[0])), i[2], (0, 255, 0), 2)
# Draw inner circle
cv2.circle(frame, (int(det[1]), int(det[0])), 2, (255, 0, 255), 3)
@@ -112,21 +112,21 @@ while(True):
if face_posy < y1:
count_left = 0
else:
- count_right = 0
-
- if count_left < EYE_CLOSED_CONSEC_FRAMES:
+ count_right = 0
+
+ if count_left < EYE_CLOSED_CONSEC_FRAMES:
cv2.putText(frame, "Left blink!", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
elif count_right < EYE_CLOSED_CONSEC_FRAMES:
cv2.putText(frame, "Right blink!", (frame.shape[1]-150, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
-
+
cv2.circle(frame, (int(det[1]), int(det[0])), 4, (0, 0, 255), -1, 8, 0)
if show_eyes:
- cv2.rectangle(frame,
- (int(det[1])-int(det[2]), int(det[0])-int(det[2])),
- (int(det[1])+int(det[2]), int(det[0])+int(det[2])),
+ cv2.rectangle(frame,
+ (int(det[1])-int(det[2]), int(det[0])-int(det[2])),
+ (int(det[1])+int(det[2]), int(det[0])+int(det[2])),
(0, 255, 0), 2
)
diff --git a/examples/facedet/demo.py b/examples/facedet/demo.py
index c140c4e..438e72f 100644
--- a/examples/facedet/demo.py
+++ b/examples/facedet/demo.py
@@ -19,20 +19,20 @@ class GoPixelSlice(Structure):
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
]
-# Obtain the camera pixels and transfer them to Go trough Ctypes.
+# Obtain the camera pixels and transfer them to Go through Ctypes.
def process_frame(pixs):
dets = np.zeros(3 * MAX_NDETS, dtype=np.float32)
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
-
+
# call FindFaces
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
pigo.FindFaces.argtypes = [GoPixelSlice]
pigo.FindFaces.restype = c_void_p
- # Call the exported FindFaces function from Go.
+ # Call the exported FindFaces function from Go.
ndets = pigo.FindFaces(faces)
data_pointer = cast(ndets, POINTER((c_longlong * 3) * MAX_NDETS))
-
+
if data_pointer :
buffarr = ((c_longlong * 3) * MAX_NDETS).from_address(addressof(data_pointer.contents))
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, 3,))
@@ -49,7 +49,7 @@ cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
-# Changing the camera resolution introduce a short delay in the camera initialization.
+# Changing the camera resolution introduce a short delay in the camera initialization.
# For this reason we should delay the object detection process with a few milliseconds.
time.sleep(0.4)
diff --git a/examples/facedet/faceblur.py b/examples/facedet/faceblur.py
index d025ae2..96a5514 100644
--- a/examples/facedet/faceblur.py
+++ b/examples/facedet/faceblur.py
@@ -19,20 +19,20 @@ class GoPixelSlice(Structure):
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
]
-# Obtain the camera pixels and transfer them to Go trough Ctypes.
+# Obtain the camera pixels and transfer them to Go through Ctypes.
def process_frame(pixs):
dets = np.zeros(3 * MAX_NDETS, dtype=np.float32)
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
-
+
# call FindFaces
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
pigo.FindFaces.argtypes = [GoPixelSlice]
pigo.FindFaces.restype = c_void_p
- # Call the exported FindFaces function from Go.
+ # Call the exported FindFaces function from Go.
ndets = pigo.FindFaces(faces)
data_pointer = cast(ndets, POINTER((c_longlong * 3) * MAX_NDETS))
-
+
if data_pointer :
buffarr = ((c_longlong * 3) * MAX_NDETS).from_address(addressof(data_pointer.contents))
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, 3,))
@@ -50,7 +50,7 @@ cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
-# Changing the camera resolution introduce a short delay in the camera initialization.
+# Changing the camera resolution introduce a short delay in the camera initialization.
# For this reason we should delay the object detection process with a few milliseconds.
time.sleep(0.4)
diff --git a/examples/facedet/pigo.go b/examples/facedet/pigo.go
index 87eaa66..ec32d17 100644
--- a/examples/facedet/pigo.go
+++ b/examples/facedet/pigo.go
@@ -34,7 +34,7 @@ func FindFaces(pixels []uint8) uintptr {
det := make([]int, 0, len(result))
go func() {
- // Since in Go we cannot transfer a 2d array trough an array pointer
+ // Since in Go we cannot transfer a 2d array through an array pointer
// we have to transform it into 1d array.
for _, v := range result {
det = append(det, v...)
diff --git a/examples/facial_landmark/flploc.go b/examples/facial_landmark/flploc.go
index de37c45..9ef576d 100644
--- a/examples/facial_landmark/flploc.go
+++ b/examples/facial_landmark/flploc.go
@@ -95,7 +95,7 @@ func FindFaces(pixels []uint8) uintptr {
coords := make([]int, 0, len(dets))
go func() {
- // Since in Go we cannot transfer a 2d array trough an array pointer
+ // Since in Go we cannot transfer a 2d array through an array pointer
// we have to transform it into 1d array.
for _, v := range dets {
coords = append(coords, v...)
diff --git a/examples/facial_landmark/flploc.py b/examples/facial_landmark/flploc.py
index 7483d42..f3ca3b9 100644
--- a/examples/facial_landmark/flploc.py
+++ b/examples/facial_landmark/flploc.py
@@ -20,20 +20,20 @@ class GoPixelSlice(Structure):
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
]
-# Obtain the camera pixels and transfer them to Go trough Ctypes.
+# Obtain the camera pixels and transfer them to Go through Ctypes.
def process_frame(pixs):
dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32)
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
-
+
# call FindFaces
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
pigo.FindFaces.argtypes = [GoPixelSlice]
pigo.FindFaces.restype = c_void_p
- # Call the exported FindFaces function from Go.
+ # Call the exported FindFaces function from Go.
ndets = pigo.FindFaces(faces)
data_pointer = cast(ndets, POINTER((c_longlong * ARRAY_DIM) * MAX_NDETS))
-
+
if data_pointer :
buffarr = ((c_longlong * ARRAY_DIM) * MAX_NDETS).from_address(addressof(data_pointer.contents))
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, ARRAY_DIM,))
@@ -42,7 +42,7 @@ def process_frame(pixs):
dets_len = res[0][0]
res = np.delete(res, 0, 0) # delete the first element from the array
- # We have to multiply the detection length with the total
+ # We have to multiply the detection length with the total
# detection points(face, pupils and facial lendmark points), in total 18
dets = list(res.reshape(-1, ARRAY_DIM))[0:dets_len*18]
return dets
@@ -52,7 +52,7 @@ cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
-# Changing the camera resolution introduce a short delay in the camera initialization.
+# Changing the camera resolution introduce a short delay in the camera initialization.
# For this reason we should delay the object detection process with a few milliseconds.
time.sleep(0.4)
@@ -74,18 +74,18 @@ while(True):
for det in dets:
if det[3] > 50:
if det[4] == 0: # 0 == face;
- cv2.rectangle(frame,
- (int(det[1])-int(det[2]/2), int(det[0])-int(det[2]/2)),
- (int(det[1])+int(det[2]/2), int(det[0])+int(det[2]/2)),
+ cv2.rectangle(frame,
+ (int(det[1])-int(det[2]/2), int(det[0])-int(det[2]/2)),
+ (int(det[1])+int(det[2]/2), int(det[0])+int(det[2]/2)),
(0, 0, 255), 2
)
elif det[4] == 1: # 1 == pupil;
if showPupil:
cv2.circle(frame, (int(det[1]), int(det[0])), 4, (0, 0, 255), -1, 8, 0)
if showEyes:
- cv2.rectangle(frame,
- (int(det[1])-int(det[2]), int(det[0])-int(det[2])),
- (int(det[1])+int(det[2]), int(det[0])+int(det[2])),
+ cv2.rectangle(frame,
+ (int(det[1])-int(det[2]), int(det[0])-int(det[2])),
+ (int(det[1])+int(det[2]), int(det[0])+int(det[2])),
(0, 255, 255), 2
)
elif det[4] == 2: # 2 == facial landmark;
diff --git a/examples/puploc/puploc.go b/examples/puploc/puploc.go
index e2f1c72..bbb643d 100644
--- a/examples/puploc/puploc.go
+++ b/examples/puploc/puploc.go
@@ -59,7 +59,7 @@ func FindFaces(pixels []uint8) uintptr {
coords := make([]int, 0, len(dets))
go func() {
- // Since in Go we cannot transfer a 2d array trough an array pointer
+ // Since in Go we cannot transfer a 2d array through an array pointer
// we have to transform it into 1d array.
for _, v := range dets {
coords = append(coords, v...)
diff --git a/examples/puploc/puploc.py b/examples/puploc/puploc.py
index 7a2b038..464507a 100644
--- a/examples/puploc/puploc.py
+++ b/examples/puploc/puploc.py
@@ -20,20 +20,20 @@ class GoPixelSlice(Structure):
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
]
-# Obtain the camera pixels and transfer them to Go trough Ctypes.
+# Obtain the camera pixels and transfer them to Go through Ctypes.
def process_frame(pixs):
dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32)
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
-
+
# call FindFaces
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
pigo.FindFaces.argtypes = [GoPixelSlice]
pigo.FindFaces.restype = c_void_p
- # Call the exported FindFaces function from Go.
+ # Call the exported FindFaces function from Go.
ndets = pigo.FindFaces(faces)
data_pointer = cast(ndets, POINTER((c_longlong * ARRAY_DIM) * MAX_NDETS))
-
+
if data_pointer :
buffarr = ((c_longlong * ARRAY_DIM) * MAX_NDETS).from_address(addressof(data_pointer.contents))
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, ARRAY_DIM,))
@@ -52,7 +52,7 @@ cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
-# Changing the camera resolution introduce a short delay in the camera initialization.
+# Changing the camera resolution introduce a short delay in the camera initialization.
# For this reason we should delay the object detection process with a few milliseconds.
time.sleep(0.4)
@@ -78,9 +78,9 @@ while(True):
if showPupil:
cv2.circle(frame, (int(det[1]), int(det[0])), 4, (0, 0, 255), -1, 8, 0)
if showEyes:
- cv2.rectangle(frame,
- (int(det[1])-int(det[2]), int(det[0])-int(det[2])),
- (int(det[1])+int(det[2]), int(det[0])+int(det[2])),
+ cv2.rectangle(frame,
+ (int(det[1])-int(det[2]), int(det[0])-int(det[2])),
+ (int(det[1])+int(det[2]), int(det[0])+int(det[2])),
(0, 255, 0), 2
)
diff --git a/examples/puploc_masquerade/puploc.go b/examples/puploc_masquerade/puploc.go
index 0ded221..a04d3d6 100644
--- a/examples/puploc_masquerade/puploc.go
+++ b/examples/puploc_masquerade/puploc.go
@@ -70,7 +70,7 @@ func FindFaces(pixels []uint8) uintptr {
coords := make([]int, 0, len(dets))
go func() {
- // Since in Go we cannot transfer a 2d array trough an array pointer
+ // Since in Go we cannot transfer a 2d array through an array pointer
// we have to transform it into 1d array.
for _, v := range dets {
coords = append(coords, v...)
diff --git a/examples/puploc_masquerade/puploc.py b/examples/puploc_masquerade/puploc.py
index d0ced55..50eaa3a 100644
--- a/examples/puploc_masquerade/puploc.py
+++ b/examples/puploc_masquerade/puploc.py
@@ -34,20 +34,20 @@ def rotateImage(image, angle):
return result
-# Obtain the camera pixels and transfer them to Go trough Ctypes.
+# Obtain the camera pixels and transfer them to Go through Ctypes.
def process_frame(pixs):
dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32)
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
-
+
# call FindFaces
faces = GoPixelSlice(pixels, len(pixs), len(pixs))
pigo.FindFaces.argtypes = [GoPixelSlice]
pigo.FindFaces.restype = c_void_p
- # Call the exported FindFaces function from Go.
+ # Call the exported FindFaces function from Go.
ndets = pigo.FindFaces(faces)
data_pointer = cast(ndets, POINTER((c_longlong * ARRAY_DIM) * MAX_NDETS))
-
+
if data_pointer :
buffarr = ((c_longlong * ARRAY_DIM) * MAX_NDETS).from_address(addressof(data_pointer.contents))
res = np.ndarray(buffer=buffarr, dtype=c_longlong, shape=(MAX_NDETS, ARRAY_DIM,))
@@ -66,7 +66,7 @@ cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
-# Changing the camera resolution introduce a short delay in the camera initialization.
+# Changing the camera resolution introduce a short delay in the camera initialization.
# For this reason we should delay the object detection process with a few milliseconds.
time.sleep(0.4)
@@ -81,7 +81,7 @@ while(True):
if dets is not None:
# We know that the detected faces are taking place in the first positions of the multidimensional array.
- for row, col, scale, q, angle in dets:
+ for row, col, scale, q, angle in dets:
if q > 50:
if angle == 0:
px, py = col, row
@@ -105,8 +105,8 @@ while(True):
# Convert the image to BGR
source_img = source_img[:,:,:3]
- if scale < img_height or scale < img_width:
- if img_height > img_width:
+ if scale < img_height or scale < img_width:
+ if img_height > img_width:
img_scale = float(scale)/float(img_height)
else:
img_scale = float(scale)/float(img_width)
@@ -115,15 +115,15 @@ while(True):
img = cv2.resize(source_img, (width, height), cv2.INTER_AREA)
mask = cv2.resize(orig_mask, (width, height), cv2.INTER_AREA)
mask_inv = cv2.resize(orig_mask_inv, (width, height), cv2.INTER_AREA)
-
+
if px == None or py == None:
continue
-
+
y1 = row-scale/2+(row-scale/2-(py-height))
y2 = row-scale/2+height+(row-scale/2-(py-height))
x1 = col-scale/2
x2 = col-scale/2+width
-
+
if y1 < 0 or y2 < 0:
continue
roi = frame[y1:y2, x1:x2]
@@ -132,15 +132,15 @@ while(True):
# join the roi_bg and roi_fg
dst = cv2.add(roi_bg, roi_fg)
- frame[y1:y2, x1:x2] = dst
-
+ frame[y1:y2, x1:x2] = dst
+
cv2.imshow('', frame)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
elif key & 0xFF == ord('w'):
- show_face = not show_face
+ show_face = not show_face
elif key & 0xFF == ord('e'):
img_idx += 1
if img_idx > len(source_imgs)-1:
diff --git a/examples/talk_detector/talkdet.go b/examples/talk_detector/talkdet.go
index f5e6756..72e24b2 100644
--- a/examples/talk_detector/talkdet.go
+++ b/examples/talk_detector/talkdet.go
@@ -119,7 +119,7 @@ func FindFaces(pixels []uint8) uintptr {
coords := make([]int, 0, len(dets))
go func() {
- // Since in Go we cannot transfer a 2d array trough an array pointer
+ // Since in Go we cannot transfer a 2d array through an array pointer
// we have to transform it into 1d array.
for _, v := range dets {
coords = append(coords, v...)
diff --git a/examples/talk_detector/talkdet.py b/examples/talk_detector/talkdet.py
index 50bbb6e..d552dac 100644
--- a/examples/talk_detector/talkdet.py
+++ b/examples/talk_detector/talkdet.py
@@ -47,7 +47,7 @@ class GoPixelSlice(Structure):
("pixels", POINTER(c_ubyte)), ("len", c_longlong), ("cap", c_longlong),
]
-# Obtain the camera pixels and transfer them to Go trough Ctypes
+# Obtain the camera pixels and transfer them to Go through Ctypes
def process_frame(pixs):
dets = np.zeros(ARRAY_DIM * MAX_NDETS, dtype=np.float32)
pixels = cast((c_ubyte * len(pixs))(*pixs), POINTER(c_ubyte))
diff --git a/examples/web/README.md b/examples/web/README.md
index 361641b..1a0c1ac 100644
--- a/examples/web/README.md
+++ b/examples/web/README.md
@@ -1,7 +1,7 @@
## Webcam demo (slow)
-This demo shows how we can transfer the webcam frames from Python to Go by sending the captured frames as byte array into the standard output.
-We will run the face detector over the byte arrays received from the standard output and send the result into a web browser trough a webserver.
+This demo shows how we can transfer the webcam frames from Python to Go by sending the captured frames as byte array into the standard output.
+We will run the face detector over the byte arrays received from the standard output and send the result into a web browser through a webserver.
### Run
diff --git a/wasm/README.md b/wasm/README.md
index 7c64744..3ac4e7c 100644
--- a/wasm/README.md
+++ b/wasm/README.md
@@ -17,12 +17,12 @@ $ GOOS=js GOARCH=wasm go build -o lib.wasm wasm.go
```
### Supported keys:
s - Show/hide pupils
-c - Circle trough the detection shape types (`rectangle`|`circle`|`ellipse`)
+c - Circle through the detection shape types (`rectangle`|`circle`|`ellipse`)
f - Show/hide facial landmark points (hidden by default)
## Demos
-For **Webassembly** related demos check this separate repo:
+For **Webassembly** related demos check this separate repo:
-https://github.com/esimov/pigo-wasm-demos
+https://github.com/esimov/pigo-wasm-demos
diff --git a/wasm/detector/fetch.go b/wasm/detector/fetch.go
index 113d020..4c8963f 100644
--- a/wasm/detector/fetch.go
+++ b/wasm/detector/fetch.go
@@ -23,7 +23,7 @@ func NewDetector() *Detector {
return &d
}
-// FetchCascade retrive the cascade file trough a JS http connection.
+// FetchCascade retrive the cascade file through a JS http connection.
// It should return the binary data as uint8 integers or err in case of an error.
func (d *Detector) FetchCascade(url string) ([]byte, error) {
d.respChan = make(chan []uint8)