mirror of
https://github.com/hacksider/Deep-Live-Cam.git
synced 2025-09-26 20:31:28 +08:00
Compare commits
12 Commits
ab8a1c82c1
...
privacy
Author | SHA1 | Date | |
---|---|---|---|
![]() |
48c83151a4 | ||
![]() |
bb3502d9bd | ||
![]() |
a101a1f3f1 | ||
![]() |
01ef955372 | ||
![]() |
ab3b73631b | ||
![]() |
d8fc1ffa04 | ||
![]() |
5dfd1c0ced | ||
![]() |
59cd3be0f9 | ||
![]() |
ccb676ac17 | ||
![]() |
f0c66732e7 | ||
![]() |
8055d79daf | ||
![]() |
3c7dd1a574 |
1
.python-version
Normal file
1
.python-version
Normal file
@@ -0,0 +1 @@
|
||||
3.10.0
|
@@ -20,6 +20,7 @@ import modules.metadata
|
||||
import modules.ui as ui
|
||||
from modules.processors.frame.core import get_frame_processors_modules
|
||||
from modules.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path
|
||||
from modules.fake_face_handler import cleanup_fake_face
|
||||
|
||||
if 'ROCMExecutionProvider' in modules.globals.execution_providers:
|
||||
del torch
|
||||
@@ -35,9 +36,7 @@ def parse_args() -> None:
|
||||
program.add_argument('-t', '--target', help='select an target image or video', dest='target_path')
|
||||
program.add_argument('-o', '--output', help='select output file or directory', dest='output_path')
|
||||
program.add_argument('--frame-processor', help='pipeline of frame processors', dest='frame_processor', default=['face_swapper'], choices=['face_swapper', 'face_enhancer'], nargs='+')
|
||||
program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=False)
|
||||
program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True)
|
||||
program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False)
|
||||
program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true', default=False)
|
||||
program.add_argument('--nsfw-filter', help='filter the NSFW image or video', dest='nsfw_filter', action='store_true', default=False)
|
||||
program.add_argument('--map-faces', help='map source target faces', dest='map_faces', action='store_true', default=False)
|
||||
@@ -65,9 +64,9 @@ def parse_args() -> None:
|
||||
modules.globals.output_path = normalize_output_path(modules.globals.source_path, modules.globals.target_path, args.output_path)
|
||||
modules.globals.frame_processors = args.frame_processor
|
||||
modules.globals.headless = args.source_path or args.target_path or args.output_path
|
||||
modules.globals.keep_fps = args.keep_fps
|
||||
modules.globals.keep_fps = True
|
||||
modules.globals.keep_frames = True
|
||||
modules.globals.keep_audio = args.keep_audio
|
||||
modules.globals.keep_frames = args.keep_frames
|
||||
modules.globals.many_faces = args.many_faces
|
||||
modules.globals.mouth_mask = args.mouth_mask
|
||||
modules.globals.nsfw_filter = args.nsfw_filter
|
||||
@@ -241,6 +240,7 @@ def start() -> None:
|
||||
def destroy(to_quit=True) -> None:
|
||||
if modules.globals.target_path:
|
||||
clean_temp(modules.globals.target_path)
|
||||
cleanup_fake_face()
|
||||
if to_quit: quit()
|
||||
|
||||
|
||||
|
BIN
modules/deeplivecam.ico
Normal file
BIN
modules/deeplivecam.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 264 KiB |
120
modules/fake_face_handler.py
Normal file
120
modules/fake_face_handler.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import os
|
||||
import requests
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
import cv2
|
||||
import numpy as np
|
||||
import modules.globals
|
||||
|
||||
def add_padding_to_face(image, padding_ratio=0.3):
|
||||
"""Add padding around the face image
|
||||
|
||||
Args:
|
||||
image: The input face image
|
||||
padding_ratio: Amount of padding to add as a ratio of image dimensions
|
||||
|
||||
Returns:
|
||||
Padded image with background padding added
|
||||
"""
|
||||
if image is None:
|
||||
return None
|
||||
|
||||
height, width = image.shape[:2]
|
||||
pad_x = int(width * padding_ratio)
|
||||
pad_y = int(height * padding_ratio)
|
||||
|
||||
# Create larger image with padding
|
||||
padded_height = height + 2 * pad_y
|
||||
padded_width = width + 2 * pad_x
|
||||
padded_image = np.zeros((padded_height, padded_width, 3), dtype=np.uint8)
|
||||
|
||||
# Fill padded area with blurred and darkened edge pixels
|
||||
edge_color = cv2.blur(image, (15, 15))
|
||||
edge_color = (edge_color * 0.6).astype(np.uint8) # Darken the padding
|
||||
|
||||
# Fill the padded image with original face
|
||||
padded_image[pad_y:pad_y+height, pad_x:pad_x+width] = image
|
||||
|
||||
# Fill padding areas with edge color
|
||||
# Top padding - repeat first row
|
||||
top_edge = edge_color[0, :, :]
|
||||
for i in range(pad_y):
|
||||
padded_image[i, pad_x:pad_x+width] = top_edge
|
||||
|
||||
# Bottom padding - repeat last row
|
||||
bottom_edge = edge_color[-1, :, :]
|
||||
for i in range(pad_y):
|
||||
padded_image[pad_y+height+i, pad_x:pad_x+width] = bottom_edge
|
||||
|
||||
# Left padding - repeat first column
|
||||
left_edge = edge_color[:, 0, :]
|
||||
for i in range(pad_x):
|
||||
padded_image[pad_y:pad_y+height, i] = left_edge
|
||||
|
||||
# Right padding - repeat last column
|
||||
right_edge = edge_color[:, -1, :]
|
||||
for i in range(pad_x):
|
||||
padded_image[pad_y:pad_y+height, pad_x+width+i] = right_edge
|
||||
|
||||
# Fill corners with nearest edge colors
|
||||
# Top-left corner
|
||||
padded_image[:pad_y, :pad_x] = edge_color[0, 0, :]
|
||||
# Top-right corner
|
||||
padded_image[:pad_y, pad_x+width:] = edge_color[0, -1, :]
|
||||
# Bottom-left corner
|
||||
padded_image[pad_y+height:, :pad_x] = edge_color[-1, 0, :]
|
||||
# Bottom-right corner
|
||||
padded_image[pad_y+height:, pad_x+width:] = edge_color[-1, -1, :]
|
||||
|
||||
return padded_image
|
||||
|
||||
def get_fake_face() -> str:
|
||||
"""Fetch a face from thispersondoesnotexist.com and save it temporarily"""
|
||||
try:
|
||||
# Create temp directory if it doesn't exist
|
||||
temp_dir = Path(tempfile.gettempdir()) / "deep-live-cam"
|
||||
temp_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Generate temp file path
|
||||
temp_file = temp_dir / "fake_face.jpg"
|
||||
|
||||
# Basic headers to mimic a browser request
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
||||
}
|
||||
|
||||
# Fetch the image
|
||||
response = requests.get('https://thispersondoesnotexist.com', headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
# Read image from response
|
||||
image_array = np.asarray(bytearray(response.content), dtype=np.uint8)
|
||||
image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
|
||||
|
||||
# Add padding around the face
|
||||
padded_image = add_padding_to_face(image)
|
||||
|
||||
# Save the padded image
|
||||
cv2.imwrite(str(temp_file), padded_image)
|
||||
return str(temp_file)
|
||||
else:
|
||||
print(f"Failed to fetch fake face: {response.status_code}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error fetching fake face: {str(e)}")
|
||||
return None
|
||||
|
||||
def cleanup_fake_face():
|
||||
"""Clean up the temporary fake face image"""
|
||||
try:
|
||||
if modules.globals.fake_face_path and os.path.exists(modules.globals.fake_face_path):
|
||||
os.remove(modules.globals.fake_face_path)
|
||||
modules.globals.fake_face_path = None
|
||||
except Exception as e:
|
||||
print(f"Error cleaning up fake face: {str(e)}")
|
||||
|
||||
def refresh_fake_face():
|
||||
"""Refresh the fake face image"""
|
||||
cleanup_fake_face()
|
||||
modules.globals.fake_face_path = get_fake_face()
|
||||
return modules.globals.fake_face_path is not None
|
@@ -21,7 +21,7 @@ keep_audio = True
|
||||
keep_frames = False
|
||||
many_faces = False
|
||||
map_faces = False
|
||||
color_correction = False # New global variable for color correction toggle
|
||||
color_correction = False
|
||||
nsfw_filter = False
|
||||
video_encoder = None
|
||||
video_quality = None
|
||||
@@ -41,3 +41,12 @@ show_mouth_mask_box = False
|
||||
mask_feather_ratio = 8
|
||||
mask_down_size = 0.50
|
||||
mask_size = 1
|
||||
mouth_mask_size = 1.0
|
||||
eyes_mask = False
|
||||
show_eyes_mask_box = False
|
||||
eyebrows_mask = False
|
||||
show_eyebrows_mask_box = False
|
||||
eyes_mask_size = 1.0
|
||||
eyebrows_mask_size = 1.0
|
||||
use_fake_face = False
|
||||
fake_face_path = None
|
||||
|
634
modules/processors/frame/face_masking.py
Normal file
634
modules/processors/frame/face_masking.py
Normal file
@@ -0,0 +1,634 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
from modules.typing import Face, Frame
|
||||
import modules.globals
|
||||
|
||||
def apply_color_transfer(source, target):
|
||||
"""
|
||||
Apply color transfer from target to source image
|
||||
"""
|
||||
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
|
||||
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32")
|
||||
|
||||
source_mean, source_std = cv2.meanStdDev(source)
|
||||
target_mean, target_std = cv2.meanStdDev(target)
|
||||
|
||||
# Reshape mean and std to be broadcastable
|
||||
source_mean = source_mean.reshape(1, 1, 3)
|
||||
source_std = source_std.reshape(1, 1, 3)
|
||||
target_mean = target_mean.reshape(1, 1, 3)
|
||||
target_std = target_std.reshape(1, 1, 3)
|
||||
|
||||
# Perform the color transfer
|
||||
source = (source - source_mean) * (target_std / source_std) + target_mean
|
||||
|
||||
return cv2.cvtColor(np.clip(source, 0, 255).astype("uint8"), cv2.COLOR_LAB2BGR)
|
||||
|
||||
def create_face_mask(face: Face, frame: Frame) -> np.ndarray:
|
||||
mask = np.zeros(frame.shape[:2], dtype=np.uint8)
|
||||
landmarks = face.landmark_2d_106
|
||||
if landmarks is not None:
|
||||
# Convert landmarks to int32
|
||||
landmarks = landmarks.astype(np.int32)
|
||||
|
||||
# Extract facial features
|
||||
right_side_face = landmarks[0:16]
|
||||
left_side_face = landmarks[17:32]
|
||||
right_eye = landmarks[33:42]
|
||||
right_eye_brow = landmarks[43:51]
|
||||
left_eye = landmarks[87:96]
|
||||
left_eye_brow = landmarks[97:105]
|
||||
|
||||
# Calculate forehead extension
|
||||
right_eyebrow_top = np.min(right_eye_brow[:, 1])
|
||||
left_eyebrow_top = np.min(left_eye_brow[:, 1])
|
||||
eyebrow_top = min(right_eyebrow_top, left_eyebrow_top)
|
||||
|
||||
face_top = np.min([right_side_face[0, 1], left_side_face[-1, 1]])
|
||||
forehead_height = face_top - eyebrow_top
|
||||
extended_forehead_height = int(forehead_height * 5.0) # Extend by 50%
|
||||
|
||||
# Create forehead points
|
||||
forehead_left = right_side_face[0].copy()
|
||||
forehead_right = left_side_face[-1].copy()
|
||||
forehead_left[1] -= extended_forehead_height
|
||||
forehead_right[1] -= extended_forehead_height
|
||||
|
||||
# Combine all points to create the face outline
|
||||
face_outline = np.vstack(
|
||||
[
|
||||
[forehead_left],
|
||||
right_side_face,
|
||||
left_side_face[::-1], # Reverse left side to create a continuous outline
|
||||
[forehead_right],
|
||||
]
|
||||
)
|
||||
|
||||
# Calculate padding
|
||||
padding = int(
|
||||
np.linalg.norm(right_side_face[0] - left_side_face[-1]) * 0.05
|
||||
) # 5% of face width
|
||||
|
||||
# Create a slightly larger convex hull for padding
|
||||
hull = cv2.convexHull(face_outline)
|
||||
hull_padded = []
|
||||
for point in hull:
|
||||
x, y = point[0]
|
||||
center = np.mean(face_outline, axis=0)
|
||||
direction = np.array([x, y]) - center
|
||||
direction = direction / np.linalg.norm(direction)
|
||||
padded_point = np.array([x, y]) + direction * padding
|
||||
hull_padded.append(padded_point)
|
||||
|
||||
hull_padded = np.array(hull_padded, dtype=np.int32)
|
||||
|
||||
# Fill the padded convex hull
|
||||
cv2.fillConvexPoly(mask, hull_padded, 255)
|
||||
|
||||
# Smooth the mask edges
|
||||
mask = cv2.GaussianBlur(mask, (5, 5), 3)
|
||||
|
||||
return mask
|
||||
|
||||
def create_lower_mouth_mask(
|
||||
face: Face, frame: Frame
|
||||
) -> (np.ndarray, np.ndarray, tuple, np.ndarray):
|
||||
mask = np.zeros(frame.shape[:2], dtype=np.uint8)
|
||||
mouth_cutout = None
|
||||
landmarks = face.landmark_2d_106
|
||||
if landmarks is not None:
|
||||
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
|
||||
lower_lip_order = [
|
||||
65,
|
||||
66,
|
||||
62,
|
||||
70,
|
||||
69,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
0,
|
||||
8,
|
||||
7,
|
||||
6,
|
||||
5,
|
||||
4,
|
||||
3,
|
||||
2,
|
||||
65,
|
||||
]
|
||||
lower_lip_landmarks = landmarks[lower_lip_order].astype(
|
||||
np.float32
|
||||
) # Use float for precise calculations
|
||||
|
||||
# Calculate the center of the landmarks
|
||||
center = np.mean(lower_lip_landmarks, axis=0)
|
||||
|
||||
# Expand the landmarks outward using the mouth_mask_size
|
||||
expansion_factor = (
|
||||
1 + modules.globals.mask_down_size * modules.globals.mouth_mask_size
|
||||
) # Adjust expansion based on slider
|
||||
expanded_landmarks = (lower_lip_landmarks - center) * expansion_factor + center
|
||||
|
||||
# Extend the top lip part
|
||||
toplip_indices = [
|
||||
20,
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
] # Indices for landmarks 2, 65, 66, 62, 70, 69, 18
|
||||
toplip_extension = (
|
||||
modules.globals.mask_size * modules.globals.mouth_mask_size * 0.5
|
||||
) # Adjust extension based on slider
|
||||
for idx in toplip_indices:
|
||||
direction = expanded_landmarks[idx] - center
|
||||
direction = direction / np.linalg.norm(direction)
|
||||
expanded_landmarks[idx] += direction * toplip_extension
|
||||
|
||||
# Extend the bottom part (chin area)
|
||||
chin_indices = [
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
] # Indices for landmarks 21, 22, 23, 24, 0, 8
|
||||
chin_extension = 2 * 0.2 # Adjust this factor to control the extension
|
||||
for idx in chin_indices:
|
||||
expanded_landmarks[idx][1] += (
|
||||
expanded_landmarks[idx][1] - center[1]
|
||||
) * chin_extension
|
||||
|
||||
# Convert back to integer coordinates
|
||||
expanded_landmarks = expanded_landmarks.astype(np.int32)
|
||||
|
||||
# Calculate bounding box for the expanded lower mouth
|
||||
min_x, min_y = np.min(expanded_landmarks, axis=0)
|
||||
max_x, max_y = np.max(expanded_landmarks, axis=0)
|
||||
|
||||
# Add some padding to the bounding box
|
||||
padding = int((max_x - min_x) * 0.1) # 10% padding
|
||||
min_x = max(0, min_x - padding)
|
||||
min_y = max(0, min_y - padding)
|
||||
max_x = min(frame.shape[1], max_x + padding)
|
||||
max_y = min(frame.shape[0], max_y + padding)
|
||||
|
||||
# Ensure the bounding box dimensions are valid
|
||||
if max_x <= min_x or max_y <= min_y:
|
||||
if (max_x - min_x) <= 1:
|
||||
max_x = min_x + 1
|
||||
if (max_y - min_y) <= 1:
|
||||
max_y = min_y + 1
|
||||
|
||||
# Create the mask
|
||||
mask_roi = np.zeros((max_y - min_y, max_x - min_x), dtype=np.uint8)
|
||||
cv2.fillPoly(mask_roi, [expanded_landmarks - [min_x, min_y]], 255)
|
||||
|
||||
# Apply Gaussian blur to soften the mask edges
|
||||
mask_roi = cv2.GaussianBlur(mask_roi, (15, 15), 5)
|
||||
|
||||
# Place the mask ROI in the full-sized mask
|
||||
mask[min_y:max_y, min_x:max_x] = mask_roi
|
||||
|
||||
# Extract the masked area from the frame
|
||||
mouth_cutout = frame[min_y:max_y, min_x:max_x].copy()
|
||||
|
||||
# Return the expanded lower lip polygon in original frame coordinates
|
||||
lower_lip_polygon = expanded_landmarks
|
||||
|
||||
return mask, mouth_cutout, (min_x, min_y, max_x, max_y), lower_lip_polygon
|
||||
|
||||
def create_eyes_mask(face: Face, frame: Frame) -> (np.ndarray, np.ndarray, tuple, np.ndarray):
|
||||
mask = np.zeros(frame.shape[:2], dtype=np.uint8)
|
||||
eyes_cutout = None
|
||||
landmarks = face.landmark_2d_106
|
||||
if landmarks is not None:
|
||||
# Left eye landmarks (87-96) and right eye landmarks (33-42)
|
||||
left_eye = landmarks[87:96]
|
||||
right_eye = landmarks[33:42]
|
||||
|
||||
# Calculate centers and dimensions for each eye
|
||||
left_eye_center = np.mean(left_eye, axis=0).astype(np.int32)
|
||||
right_eye_center = np.mean(right_eye, axis=0).astype(np.int32)
|
||||
|
||||
# Calculate eye dimensions with size adjustment
|
||||
def get_eye_dimensions(eye_points):
|
||||
x_coords = eye_points[:, 0]
|
||||
y_coords = eye_points[:, 1]
|
||||
width = int((np.max(x_coords) - np.min(x_coords)) * (1 + modules.globals.mask_down_size * modules.globals.eyes_mask_size))
|
||||
height = int((np.max(y_coords) - np.min(y_coords)) * (1 + modules.globals.mask_down_size * modules.globals.eyes_mask_size))
|
||||
return width, height
|
||||
|
||||
left_width, left_height = get_eye_dimensions(left_eye)
|
||||
right_width, right_height = get_eye_dimensions(right_eye)
|
||||
|
||||
# Add extra padding
|
||||
padding = int(max(left_width, right_width) * 0.2)
|
||||
|
||||
# Calculate bounding box for both eyes
|
||||
min_x = min(left_eye_center[0] - left_width//2, right_eye_center[0] - right_width//2) - padding
|
||||
max_x = max(left_eye_center[0] + left_width//2, right_eye_center[0] + right_width//2) + padding
|
||||
min_y = min(left_eye_center[1] - left_height//2, right_eye_center[1] - right_height//2) - padding
|
||||
max_y = max(left_eye_center[1] + left_height//2, right_eye_center[1] + right_height//2) + padding
|
||||
|
||||
# Ensure coordinates are within frame bounds
|
||||
min_x = max(0, min_x)
|
||||
min_y = max(0, min_y)
|
||||
max_x = min(frame.shape[1], max_x)
|
||||
max_y = min(frame.shape[0], max_y)
|
||||
|
||||
# Create mask for the eyes region
|
||||
mask_roi = np.zeros((max_y - min_y, max_x - min_x), dtype=np.uint8)
|
||||
|
||||
# Draw ellipses for both eyes
|
||||
left_center = (left_eye_center[0] - min_x, left_eye_center[1] - min_y)
|
||||
right_center = (right_eye_center[0] - min_x, right_eye_center[1] - min_y)
|
||||
|
||||
# Calculate axes lengths (half of width and height)
|
||||
left_axes = (left_width//2, left_height//2)
|
||||
right_axes = (right_width//2, right_height//2)
|
||||
|
||||
# Draw filled ellipses
|
||||
cv2.ellipse(mask_roi, left_center, left_axes, 0, 0, 360, 255, -1)
|
||||
cv2.ellipse(mask_roi, right_center, right_axes, 0, 0, 360, 255, -1)
|
||||
|
||||
# Apply Gaussian blur to soften mask edges
|
||||
mask_roi = cv2.GaussianBlur(mask_roi, (15, 15), 5)
|
||||
|
||||
# Place the mask ROI in the full-sized mask
|
||||
mask[min_y:max_y, min_x:max_x] = mask_roi
|
||||
|
||||
# Extract the masked area from the frame
|
||||
eyes_cutout = frame[min_y:max_y, min_x:max_x].copy()
|
||||
|
||||
# Create polygon points for visualization
|
||||
def create_ellipse_points(center, axes):
|
||||
t = np.linspace(0, 2*np.pi, 32)
|
||||
x = center[0] + axes[0] * np.cos(t)
|
||||
y = center[1] + axes[1] * np.sin(t)
|
||||
return np.column_stack((x, y)).astype(np.int32)
|
||||
|
||||
# Generate points for both ellipses
|
||||
left_points = create_ellipse_points((left_eye_center[0], left_eye_center[1]), (left_width//2, left_height//2))
|
||||
right_points = create_ellipse_points((right_eye_center[0], right_eye_center[1]), (right_width//2, right_height//2))
|
||||
|
||||
# Combine points for both eyes
|
||||
eyes_polygon = np.vstack([left_points, right_points])
|
||||
|
||||
return mask, eyes_cutout, (min_x, min_y, max_x, max_y), eyes_polygon
|
||||
|
||||
def create_curved_eyebrow(points):
|
||||
if len(points) >= 5:
|
||||
# Sort points by x-coordinate
|
||||
sorted_idx = np.argsort(points[:, 0])
|
||||
sorted_points = points[sorted_idx]
|
||||
|
||||
# Calculate dimensions
|
||||
x_min, y_min = np.min(sorted_points, axis=0)
|
||||
x_max, y_max = np.max(sorted_points, axis=0)
|
||||
width = x_max - x_min
|
||||
height = y_max - y_min
|
||||
|
||||
# Create more points for smoother curve
|
||||
num_points = 50
|
||||
x = np.linspace(x_min, x_max, num_points)
|
||||
|
||||
# Fit quadratic curve through points for more natural arch
|
||||
coeffs = np.polyfit(sorted_points[:, 0], sorted_points[:, 1], 2)
|
||||
y = np.polyval(coeffs, x)
|
||||
|
||||
# Increased offsets to create more separation
|
||||
top_offset = height * 0.5 # Increased from 0.3 to shift up more
|
||||
bottom_offset = height * 0.2 # Increased from 0.1 to shift down more
|
||||
|
||||
# Create smooth curves
|
||||
top_curve = y - top_offset
|
||||
bottom_curve = y + bottom_offset
|
||||
|
||||
# Create curved endpoints with more pronounced taper
|
||||
end_points = 5
|
||||
start_x = np.linspace(x[0] - width * 0.15, x[0], end_points) # Increased taper
|
||||
end_x = np.linspace(x[-1], x[-1] + width * 0.15, end_points) # Increased taper
|
||||
|
||||
# Create tapered ends
|
||||
start_curve = np.column_stack((
|
||||
start_x,
|
||||
np.linspace(bottom_curve[0], top_curve[0], end_points)
|
||||
))
|
||||
end_curve = np.column_stack((
|
||||
end_x,
|
||||
np.linspace(bottom_curve[-1], top_curve[-1], end_points)
|
||||
))
|
||||
|
||||
# Combine all points to form a smooth contour
|
||||
contour_points = np.vstack([
|
||||
start_curve,
|
||||
np.column_stack((x, top_curve)),
|
||||
end_curve,
|
||||
np.column_stack((x[::-1], bottom_curve[::-1]))
|
||||
])
|
||||
|
||||
# Add slight padding for better coverage
|
||||
center = np.mean(contour_points, axis=0)
|
||||
vectors = contour_points - center
|
||||
padded_points = center + vectors * 1.2 # Increased padding slightly
|
||||
|
||||
return padded_points
|
||||
return points
|
||||
|
||||
def create_eyebrows_mask(face: Face, frame: Frame) -> (np.ndarray, np.ndarray, tuple, np.ndarray):
|
||||
mask = np.zeros(frame.shape[:2], dtype=np.uint8)
|
||||
eyebrows_cutout = None
|
||||
landmarks = face.landmark_2d_106
|
||||
if landmarks is not None:
|
||||
# Left eyebrow landmarks (97-105) and right eyebrow landmarks (43-51)
|
||||
left_eyebrow = landmarks[97:105].astype(np.float32)
|
||||
right_eyebrow = landmarks[43:51].astype(np.float32)
|
||||
|
||||
# Calculate centers and dimensions for each eyebrow
|
||||
left_center = np.mean(left_eyebrow, axis=0)
|
||||
right_center = np.mean(right_eyebrow, axis=0)
|
||||
|
||||
# Calculate bounding box with padding adjusted by size
|
||||
all_points = np.vstack([left_eyebrow, right_eyebrow])
|
||||
padding_factor = modules.globals.eyebrows_mask_size
|
||||
min_x = np.min(all_points[:, 0]) - 25 * padding_factor
|
||||
max_x = np.max(all_points[:, 0]) + 25 * padding_factor
|
||||
min_y = np.min(all_points[:, 1]) - 20 * padding_factor
|
||||
max_y = np.max(all_points[:, 1]) + 15 * padding_factor
|
||||
|
||||
# Ensure coordinates are within frame bounds
|
||||
min_x = max(0, int(min_x))
|
||||
min_y = max(0, int(min_y))
|
||||
max_x = min(frame.shape[1], int(max_x))
|
||||
max_y = min(frame.shape[0], int(max_y))
|
||||
|
||||
# Create mask for the eyebrows region
|
||||
mask_roi = np.zeros((max_y - min_y, max_x - min_x), dtype=np.uint8)
|
||||
|
||||
try:
|
||||
# Convert points to local coordinates
|
||||
left_local = left_eyebrow - [min_x, min_y]
|
||||
right_local = right_eyebrow - [min_x, min_y]
|
||||
|
||||
def create_curved_eyebrow(points):
|
||||
if len(points) >= 5:
|
||||
# Sort points by x-coordinate
|
||||
sorted_idx = np.argsort(points[:, 0])
|
||||
sorted_points = points[sorted_idx]
|
||||
|
||||
# Calculate dimensions
|
||||
x_min, y_min = np.min(sorted_points, axis=0)
|
||||
x_max, y_max = np.max(sorted_points, axis=0)
|
||||
width = x_max - x_min
|
||||
height = y_max - y_min
|
||||
|
||||
# Create more points for smoother curve
|
||||
num_points = 50
|
||||
x = np.linspace(x_min, x_max, num_points)
|
||||
|
||||
# Fit quadratic curve through points for more natural arch
|
||||
coeffs = np.polyfit(sorted_points[:, 0], sorted_points[:, 1], 2)
|
||||
y = np.polyval(coeffs, x)
|
||||
|
||||
# Increased offsets to create more separation
|
||||
top_offset = height * 0.5 # Increased from 0.3 to shift up more
|
||||
bottom_offset = height * 0.2 # Increased from 0.1 to shift down more
|
||||
|
||||
# Create smooth curves
|
||||
top_curve = y - top_offset
|
||||
bottom_curve = y + bottom_offset
|
||||
|
||||
# Create curved endpoints with more pronounced taper
|
||||
end_points = 5
|
||||
start_x = np.linspace(x[0] - width * 0.15, x[0], end_points) # Increased taper
|
||||
end_x = np.linspace(x[-1], x[-1] + width * 0.15, end_points) # Increased taper
|
||||
|
||||
# Create tapered ends
|
||||
start_curve = np.column_stack((
|
||||
start_x,
|
||||
np.linspace(bottom_curve[0], top_curve[0], end_points)
|
||||
))
|
||||
end_curve = np.column_stack((
|
||||
end_x,
|
||||
np.linspace(bottom_curve[-1], top_curve[-1], end_points)
|
||||
))
|
||||
|
||||
# Combine all points to form a smooth contour
|
||||
contour_points = np.vstack([
|
||||
start_curve,
|
||||
np.column_stack((x, top_curve)),
|
||||
end_curve,
|
||||
np.column_stack((x[::-1], bottom_curve[::-1]))
|
||||
])
|
||||
|
||||
# Add slight padding for better coverage
|
||||
center = np.mean(contour_points, axis=0)
|
||||
vectors = contour_points - center
|
||||
padded_points = center + vectors * 1.2 # Increased padding slightly
|
||||
|
||||
return padded_points
|
||||
return points
|
||||
|
||||
# Generate and draw eyebrow shapes
|
||||
left_shape = create_curved_eyebrow(left_local)
|
||||
right_shape = create_curved_eyebrow(right_local)
|
||||
|
||||
# Apply multi-stage blurring for natural feathering
|
||||
# First, strong Gaussian blur for initial softening
|
||||
mask_roi = cv2.GaussianBlur(mask_roi, (21, 21), 7)
|
||||
|
||||
# Second, medium blur for transition areas
|
||||
mask_roi = cv2.GaussianBlur(mask_roi, (11, 11), 3)
|
||||
|
||||
# Finally, light blur for fine details
|
||||
mask_roi = cv2.GaussianBlur(mask_roi, (5, 5), 1)
|
||||
|
||||
# Normalize mask values
|
||||
mask_roi = cv2.normalize(mask_roi, None, 0, 255, cv2.NORM_MINMAX)
|
||||
|
||||
# Place the mask ROI in the full-sized mask
|
||||
mask[min_y:max_y, min_x:max_x] = mask_roi
|
||||
|
||||
# Extract the masked area from the frame
|
||||
eyebrows_cutout = frame[min_y:max_y, min_x:max_x].copy()
|
||||
|
||||
# Combine points for visualization
|
||||
eyebrows_polygon = np.vstack([
|
||||
left_shape + [min_x, min_y],
|
||||
right_shape + [min_x, min_y]
|
||||
]).astype(np.int32)
|
||||
|
||||
except Exception as e:
|
||||
# Fallback to simple polygons if curve fitting fails
|
||||
left_local = left_eyebrow - [min_x, min_y]
|
||||
right_local = right_eyebrow - [min_x, min_y]
|
||||
cv2.fillPoly(mask_roi, [left_local.astype(np.int32)], 255)
|
||||
cv2.fillPoly(mask_roi, [right_local.astype(np.int32)], 255)
|
||||
mask_roi = cv2.GaussianBlur(mask_roi, (21, 21), 7)
|
||||
mask[min_y:max_y, min_x:max_x] = mask_roi
|
||||
eyebrows_cutout = frame[min_y:max_y, min_x:max_x].copy()
|
||||
eyebrows_polygon = np.vstack([left_eyebrow, right_eyebrow]).astype(np.int32)
|
||||
|
||||
return mask, eyebrows_cutout, (min_x, min_y, max_x, max_y), eyebrows_polygon
|
||||
|
||||
def apply_mask_area(
|
||||
frame: np.ndarray,
|
||||
cutout: np.ndarray,
|
||||
box: tuple,
|
||||
face_mask: np.ndarray,
|
||||
polygon: np.ndarray,
|
||||
) -> np.ndarray:
|
||||
min_x, min_y, max_x, max_y = box
|
||||
box_width = max_x - min_x
|
||||
box_height = max_y - min_y
|
||||
|
||||
if (
|
||||
cutout is None
|
||||
or box_width is None
|
||||
or box_height is None
|
||||
or face_mask is None
|
||||
or polygon is None
|
||||
):
|
||||
return frame
|
||||
|
||||
try:
|
||||
resized_cutout = cv2.resize(cutout, (box_width, box_height))
|
||||
roi = frame[min_y:max_y, min_x:max_x]
|
||||
|
||||
if roi.shape != resized_cutout.shape:
|
||||
resized_cutout = cv2.resize(
|
||||
resized_cutout, (roi.shape[1], roi.shape[0])
|
||||
)
|
||||
|
||||
color_corrected_area = apply_color_transfer(resized_cutout, roi)
|
||||
|
||||
# Create mask for the area
|
||||
polygon_mask = np.zeros(roi.shape[:2], dtype=np.uint8)
|
||||
|
||||
# Split points for left and right parts if needed
|
||||
if len(polygon) > 50: # Arbitrary threshold to detect if we have multiple parts
|
||||
mid_point = len(polygon) // 2
|
||||
left_points = polygon[:mid_point] - [min_x, min_y]
|
||||
right_points = polygon[mid_point:] - [min_x, min_y]
|
||||
cv2.fillPoly(polygon_mask, [left_points], 255)
|
||||
cv2.fillPoly(polygon_mask, [right_points], 255)
|
||||
else:
|
||||
adjusted_polygon = polygon - [min_x, min_y]
|
||||
cv2.fillPoly(polygon_mask, [adjusted_polygon], 255)
|
||||
|
||||
# Apply strong initial feathering
|
||||
polygon_mask = cv2.GaussianBlur(polygon_mask, (21, 21), 7)
|
||||
|
||||
# Apply additional feathering
|
||||
feather_amount = min(
|
||||
30,
|
||||
box_width // modules.globals.mask_feather_ratio,
|
||||
box_height // modules.globals.mask_feather_ratio,
|
||||
)
|
||||
feathered_mask = cv2.GaussianBlur(
|
||||
polygon_mask.astype(float), (0, 0), feather_amount
|
||||
)
|
||||
feathered_mask = feathered_mask / feathered_mask.max()
|
||||
|
||||
# Apply additional smoothing to the mask edges
|
||||
feathered_mask = cv2.GaussianBlur(feathered_mask, (5, 5), 1)
|
||||
|
||||
face_mask_roi = face_mask[min_y:max_y, min_x:max_x]
|
||||
combined_mask = feathered_mask * (face_mask_roi / 255.0)
|
||||
|
||||
combined_mask = combined_mask[:, :, np.newaxis]
|
||||
blended = (
|
||||
color_corrected_area * combined_mask + roi * (1 - combined_mask)
|
||||
).astype(np.uint8)
|
||||
|
||||
# Apply face mask to blended result
|
||||
face_mask_3channel = (
|
||||
np.repeat(face_mask_roi[:, :, np.newaxis], 3, axis=2) / 255.0
|
||||
)
|
||||
final_blend = blended * face_mask_3channel + roi * (1 - face_mask_3channel)
|
||||
|
||||
frame[min_y:max_y, min_x:max_x] = final_blend.astype(np.uint8)
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
return frame
|
||||
|
||||
def draw_mask_visualization(
|
||||
frame: Frame,
|
||||
mask_data: tuple,
|
||||
label: str,
|
||||
draw_method: str = "polygon"
|
||||
) -> Frame:
|
||||
mask, cutout, (min_x, min_y, max_x, max_y), polygon = mask_data
|
||||
|
||||
vis_frame = frame.copy()
|
||||
|
||||
# Ensure coordinates are within frame bounds
|
||||
height, width = vis_frame.shape[:2]
|
||||
min_x, min_y = max(0, min_x), max(0, min_y)
|
||||
max_x, max_y = min(width, max_x), min(height, max_y)
|
||||
|
||||
if draw_method == "ellipse" and len(polygon) > 50: # For eyes
|
||||
# Split points for left and right parts
|
||||
mid_point = len(polygon) // 2
|
||||
left_points = polygon[:mid_point]
|
||||
right_points = polygon[mid_point:]
|
||||
|
||||
try:
|
||||
# Fit ellipses to points - need at least 5 points
|
||||
if len(left_points) >= 5 and len(right_points) >= 5:
|
||||
# Convert points to the correct format for ellipse fitting
|
||||
left_points = left_points.astype(np.float32)
|
||||
right_points = right_points.astype(np.float32)
|
||||
|
||||
# Fit ellipses
|
||||
left_ellipse = cv2.fitEllipse(left_points)
|
||||
right_ellipse = cv2.fitEllipse(right_points)
|
||||
|
||||
# Draw the ellipses
|
||||
cv2.ellipse(vis_frame, left_ellipse, (0, 255, 0), 2)
|
||||
cv2.ellipse(vis_frame, right_ellipse, (0, 255, 0), 2)
|
||||
except Exception as e:
|
||||
# If ellipse fitting fails, draw simple rectangles as fallback
|
||||
left_rect = cv2.boundingRect(left_points)
|
||||
right_rect = cv2.boundingRect(right_points)
|
||||
cv2.rectangle(vis_frame,
|
||||
(left_rect[0], left_rect[1]),
|
||||
(left_rect[0] + left_rect[2], left_rect[1] + left_rect[3]),
|
||||
(0, 255, 0), 2)
|
||||
cv2.rectangle(vis_frame,
|
||||
(right_rect[0], right_rect[1]),
|
||||
(right_rect[0] + right_rect[2], right_rect[1] + right_rect[3]),
|
||||
(0, 255, 0), 2)
|
||||
else: # For mouth and eyebrows
|
||||
# Draw the polygon
|
||||
if len(polygon) > 50: # If we have multiple parts
|
||||
mid_point = len(polygon) // 2
|
||||
left_points = polygon[:mid_point]
|
||||
right_points = polygon[mid_point:]
|
||||
cv2.polylines(vis_frame, [left_points], True, (0, 255, 0), 2, cv2.LINE_AA)
|
||||
cv2.polylines(vis_frame, [right_points], True, (0, 255, 0), 2, cv2.LINE_AA)
|
||||
else:
|
||||
cv2.polylines(vis_frame, [polygon], True, (0, 255, 0), 2, cv2.LINE_AA)
|
||||
|
||||
# Add label
|
||||
cv2.putText(
|
||||
vis_frame,
|
||||
label,
|
||||
(min_x, min_y - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
0.5,
|
||||
(255, 255, 255),
|
||||
1,
|
||||
)
|
||||
|
||||
return vis_frame
|
@@ -14,6 +14,14 @@ from modules.utilities import (
|
||||
is_video,
|
||||
)
|
||||
from modules.cluster_analysis import find_closest_centroid
|
||||
from modules.processors.frame.face_masking import (
|
||||
create_face_mask,
|
||||
create_lower_mouth_mask,
|
||||
create_eyes_mask,
|
||||
create_eyebrows_mask,
|
||||
apply_mask_area,
|
||||
draw_mask_visualization
|
||||
)
|
||||
import os
|
||||
|
||||
FACE_SWAPPER = None
|
||||
@@ -74,24 +82,62 @@ def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
|
||||
temp_frame, target_face, source_face, paste_back=True
|
||||
)
|
||||
|
||||
# Create face mask for both mouth and eyes masking
|
||||
face_mask = create_face_mask(target_face, temp_frame)
|
||||
|
||||
if modules.globals.mouth_mask:
|
||||
# Create a mask for the target face
|
||||
face_mask = create_face_mask(target_face, temp_frame)
|
||||
|
||||
# Create the mouth mask
|
||||
mouth_mask, mouth_cutout, mouth_box, lower_lip_polygon = (
|
||||
create_lower_mouth_mask(target_face, temp_frame)
|
||||
)
|
||||
|
||||
# Apply the mouth area
|
||||
swapped_frame = apply_mouth_area(
|
||||
swapped_frame, mouth_cutout, mouth_box, face_mask, lower_lip_polygon
|
||||
# Create and apply mouth mask
|
||||
mouth_mask_data = create_lower_mouth_mask(target_face, temp_frame)
|
||||
swapped_frame = apply_mask_area(
|
||||
swapped_frame,
|
||||
mouth_mask_data[1], # mouth_cutout
|
||||
mouth_mask_data[2], # mouth_box
|
||||
face_mask,
|
||||
mouth_mask_data[3] # mouth_polygon
|
||||
)
|
||||
|
||||
if modules.globals.show_mouth_mask_box:
|
||||
mouth_mask_data = (mouth_mask, mouth_cutout, mouth_box, lower_lip_polygon)
|
||||
swapped_frame = draw_mouth_mask_visualization(
|
||||
swapped_frame, target_face, mouth_mask_data
|
||||
swapped_frame = draw_mask_visualization(
|
||||
swapped_frame,
|
||||
mouth_mask_data,
|
||||
"Lower Mouth Mask"
|
||||
)
|
||||
|
||||
if modules.globals.eyes_mask:
|
||||
# Create and apply eyes mask
|
||||
eyes_mask_data = create_eyes_mask(target_face, temp_frame)
|
||||
swapped_frame = apply_mask_area(
|
||||
swapped_frame,
|
||||
eyes_mask_data[1], # eyes_cutout
|
||||
eyes_mask_data[2], # eyes_box
|
||||
face_mask,
|
||||
eyes_mask_data[3] # eyes_polygon
|
||||
)
|
||||
|
||||
if modules.globals.show_eyes_mask_box:
|
||||
swapped_frame = draw_mask_visualization(
|
||||
swapped_frame,
|
||||
eyes_mask_data,
|
||||
"Eyes Mask",
|
||||
draw_method="ellipse"
|
||||
)
|
||||
|
||||
if modules.globals.eyebrows_mask:
|
||||
# Create and apply eyebrows mask
|
||||
eyebrows_mask_data = create_eyebrows_mask(target_face, temp_frame)
|
||||
swapped_frame = apply_mask_area(
|
||||
swapped_frame,
|
||||
eyebrows_mask_data[1], # eyebrows_cutout
|
||||
eyebrows_mask_data[2], # eyebrows_box
|
||||
face_mask,
|
||||
eyebrows_mask_data[3] # eyebrows_polygon
|
||||
)
|
||||
|
||||
if modules.globals.show_eyebrows_mask_box:
|
||||
swapped_frame = draw_mask_visualization(
|
||||
swapped_frame,
|
||||
eyebrows_mask_data,
|
||||
"Eyebrows Mask"
|
||||
)
|
||||
|
||||
return swapped_frame
|
||||
@@ -255,361 +301,3 @@ def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
|
||||
modules.processors.frame.core.process_video(
|
||||
source_path, temp_frame_paths, process_frames
|
||||
)
|
||||
|
||||
|
||||
def create_lower_mouth_mask(
|
||||
face: Face, frame: Frame
|
||||
) -> (np.ndarray, np.ndarray, tuple, np.ndarray):
|
||||
mask = np.zeros(frame.shape[:2], dtype=np.uint8)
|
||||
mouth_cutout = None
|
||||
landmarks = face.landmark_2d_106
|
||||
if landmarks is not None:
|
||||
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
|
||||
lower_lip_order = [
|
||||
65,
|
||||
66,
|
||||
62,
|
||||
70,
|
||||
69,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
0,
|
||||
8,
|
||||
7,
|
||||
6,
|
||||
5,
|
||||
4,
|
||||
3,
|
||||
2,
|
||||
65,
|
||||
]
|
||||
lower_lip_landmarks = landmarks[lower_lip_order].astype(
|
||||
np.float32
|
||||
) # Use float for precise calculations
|
||||
|
||||
# Calculate the center of the landmarks
|
||||
center = np.mean(lower_lip_landmarks, axis=0)
|
||||
|
||||
# Expand the landmarks outward
|
||||
expansion_factor = (
|
||||
1 + modules.globals.mask_down_size
|
||||
) # Adjust this for more or less expansion
|
||||
expanded_landmarks = (lower_lip_landmarks - center) * expansion_factor + center
|
||||
|
||||
# Extend the top lip part
|
||||
toplip_indices = [
|
||||
20,
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
] # Indices for landmarks 2, 65, 66, 62, 70, 69, 18
|
||||
toplip_extension = (
|
||||
modules.globals.mask_size * 0.5
|
||||
) # Adjust this factor to control the extension
|
||||
for idx in toplip_indices:
|
||||
direction = expanded_landmarks[idx] - center
|
||||
direction = direction / np.linalg.norm(direction)
|
||||
expanded_landmarks[idx] += direction * toplip_extension
|
||||
|
||||
# Extend the bottom part (chin area)
|
||||
chin_indices = [
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
] # Indices for landmarks 21, 22, 23, 24, 0, 8
|
||||
chin_extension = 2 * 0.2 # Adjust this factor to control the extension
|
||||
for idx in chin_indices:
|
||||
expanded_landmarks[idx][1] += (
|
||||
expanded_landmarks[idx][1] - center[1]
|
||||
) * chin_extension
|
||||
|
||||
# Convert back to integer coordinates
|
||||
expanded_landmarks = expanded_landmarks.astype(np.int32)
|
||||
|
||||
# Calculate bounding box for the expanded lower mouth
|
||||
min_x, min_y = np.min(expanded_landmarks, axis=0)
|
||||
max_x, max_y = np.max(expanded_landmarks, axis=0)
|
||||
|
||||
# Add some padding to the bounding box
|
||||
padding = int((max_x - min_x) * 0.1) # 10% padding
|
||||
min_x = max(0, min_x - padding)
|
||||
min_y = max(0, min_y - padding)
|
||||
max_x = min(frame.shape[1], max_x + padding)
|
||||
max_y = min(frame.shape[0], max_y + padding)
|
||||
|
||||
# Ensure the bounding box dimensions are valid
|
||||
if max_x <= min_x or max_y <= min_y:
|
||||
if (max_x - min_x) <= 1:
|
||||
max_x = min_x + 1
|
||||
if (max_y - min_y) <= 1:
|
||||
max_y = min_y + 1
|
||||
|
||||
# Create the mask
|
||||
mask_roi = np.zeros((max_y - min_y, max_x - min_x), dtype=np.uint8)
|
||||
cv2.fillPoly(mask_roi, [expanded_landmarks - [min_x, min_y]], 255)
|
||||
|
||||
# Apply Gaussian blur to soften the mask edges
|
||||
mask_roi = cv2.GaussianBlur(mask_roi, (15, 15), 5)
|
||||
|
||||
# Place the mask ROI in the full-sized mask
|
||||
mask[min_y:max_y, min_x:max_x] = mask_roi
|
||||
|
||||
# Extract the masked area from the frame
|
||||
mouth_cutout = frame[min_y:max_y, min_x:max_x].copy()
|
||||
|
||||
# Return the expanded lower lip polygon in original frame coordinates
|
||||
lower_lip_polygon = expanded_landmarks
|
||||
|
||||
return mask, mouth_cutout, (min_x, min_y, max_x, max_y), lower_lip_polygon
|
||||
|
||||
|
||||
def draw_mouth_mask_visualization(
|
||||
frame: Frame, face: Face, mouth_mask_data: tuple
|
||||
) -> Frame:
|
||||
landmarks = face.landmark_2d_106
|
||||
if landmarks is not None and mouth_mask_data is not None:
|
||||
mask, mouth_cutout, (min_x, min_y, max_x, max_y), lower_lip_polygon = (
|
||||
mouth_mask_data
|
||||
)
|
||||
|
||||
vis_frame = frame.copy()
|
||||
|
||||
# Ensure coordinates are within frame bounds
|
||||
height, width = vis_frame.shape[:2]
|
||||
min_x, min_y = max(0, min_x), max(0, min_y)
|
||||
max_x, max_y = min(width, max_x), min(height, max_y)
|
||||
|
||||
# Adjust mask to match the region size
|
||||
mask_region = mask[0 : max_y - min_y, 0 : max_x - min_x]
|
||||
|
||||
# Remove the color mask overlay
|
||||
# color_mask = cv2.applyColorMap((mask_region * 255).astype(np.uint8), cv2.COLORMAP_JET)
|
||||
|
||||
# Ensure shapes match before blending
|
||||
vis_region = vis_frame[min_y:max_y, min_x:max_x]
|
||||
# Remove blending with color_mask
|
||||
# if vis_region.shape[:2] == color_mask.shape[:2]:
|
||||
# blended = cv2.addWeighted(vis_region, 0.7, color_mask, 0.3, 0)
|
||||
# vis_frame[min_y:max_y, min_x:max_x] = blended
|
||||
|
||||
# Draw the lower lip polygon
|
||||
cv2.polylines(vis_frame, [lower_lip_polygon], True, (0, 255, 0), 2)
|
||||
|
||||
# Remove the red box
|
||||
# cv2.rectangle(vis_frame, (min_x, min_y), (max_x, max_y), (0, 0, 255), 2)
|
||||
|
||||
# Visualize the feathered mask
|
||||
feather_amount = max(
|
||||
1,
|
||||
min(
|
||||
30,
|
||||
(max_x - min_x) // modules.globals.mask_feather_ratio,
|
||||
(max_y - min_y) // modules.globals.mask_feather_ratio,
|
||||
),
|
||||
)
|
||||
# Ensure kernel size is odd
|
||||
kernel_size = 2 * feather_amount + 1
|
||||
feathered_mask = cv2.GaussianBlur(
|
||||
mask_region.astype(float), (kernel_size, kernel_size), 0
|
||||
)
|
||||
feathered_mask = (feathered_mask / feathered_mask.max() * 255).astype(np.uint8)
|
||||
# Remove the feathered mask color overlay
|
||||
# color_feathered_mask = cv2.applyColorMap(feathered_mask, cv2.COLORMAP_VIRIDIS)
|
||||
|
||||
# Ensure shapes match before blending feathered mask
|
||||
# if vis_region.shape == color_feathered_mask.shape:
|
||||
# blended_feathered = cv2.addWeighted(vis_region, 0.7, color_feathered_mask, 0.3, 0)
|
||||
# vis_frame[min_y:max_y, min_x:max_x] = blended_feathered
|
||||
|
||||
# Add labels
|
||||
cv2.putText(
|
||||
vis_frame,
|
||||
"Lower Mouth Mask",
|
||||
(min_x, min_y - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
0.5,
|
||||
(255, 255, 255),
|
||||
1,
|
||||
)
|
||||
cv2.putText(
|
||||
vis_frame,
|
||||
"Feathered Mask",
|
||||
(min_x, max_y + 20),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
0.5,
|
||||
(255, 255, 255),
|
||||
1,
|
||||
)
|
||||
|
||||
return vis_frame
|
||||
return frame
|
||||
|
||||
|
||||
def apply_mouth_area(
|
||||
frame: np.ndarray,
|
||||
mouth_cutout: np.ndarray,
|
||||
mouth_box: tuple,
|
||||
face_mask: np.ndarray,
|
||||
mouth_polygon: np.ndarray,
|
||||
) -> np.ndarray:
|
||||
min_x, min_y, max_x, max_y = mouth_box
|
||||
box_width = max_x - min_x
|
||||
box_height = max_y - min_y
|
||||
|
||||
if (
|
||||
mouth_cutout is None
|
||||
or box_width is None
|
||||
or box_height is None
|
||||
or face_mask is None
|
||||
or mouth_polygon is None
|
||||
):
|
||||
return frame
|
||||
|
||||
try:
|
||||
resized_mouth_cutout = cv2.resize(mouth_cutout, (box_width, box_height))
|
||||
roi = frame[min_y:max_y, min_x:max_x]
|
||||
|
||||
if roi.shape != resized_mouth_cutout.shape:
|
||||
resized_mouth_cutout = cv2.resize(
|
||||
resized_mouth_cutout, (roi.shape[1], roi.shape[0])
|
||||
)
|
||||
|
||||
color_corrected_mouth = apply_color_transfer(resized_mouth_cutout, roi)
|
||||
|
||||
# Use the provided mouth polygon to create the mask
|
||||
polygon_mask = np.zeros(roi.shape[:2], dtype=np.uint8)
|
||||
adjusted_polygon = mouth_polygon - [min_x, min_y]
|
||||
cv2.fillPoly(polygon_mask, [adjusted_polygon], 255)
|
||||
|
||||
# Apply feathering to the polygon mask
|
||||
feather_amount = min(
|
||||
30,
|
||||
box_width // modules.globals.mask_feather_ratio,
|
||||
box_height // modules.globals.mask_feather_ratio,
|
||||
)
|
||||
feathered_mask = cv2.GaussianBlur(
|
||||
polygon_mask.astype(float), (0, 0), feather_amount
|
||||
)
|
||||
feathered_mask = feathered_mask / feathered_mask.max()
|
||||
|
||||
face_mask_roi = face_mask[min_y:max_y, min_x:max_x]
|
||||
combined_mask = feathered_mask * (face_mask_roi / 255.0)
|
||||
|
||||
combined_mask = combined_mask[:, :, np.newaxis]
|
||||
blended = (
|
||||
color_corrected_mouth * combined_mask + roi * (1 - combined_mask)
|
||||
).astype(np.uint8)
|
||||
|
||||
# Apply face mask to blended result
|
||||
face_mask_3channel = (
|
||||
np.repeat(face_mask_roi[:, :, np.newaxis], 3, axis=2) / 255.0
|
||||
)
|
||||
final_blend = blended * face_mask_3channel + roi * (1 - face_mask_3channel)
|
||||
|
||||
frame[min_y:max_y, min_x:max_x] = final_blend.astype(np.uint8)
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
return frame
|
||||
|
||||
|
||||
def create_face_mask(face: Face, frame: Frame) -> np.ndarray:
|
||||
mask = np.zeros(frame.shape[:2], dtype=np.uint8)
|
||||
landmarks = face.landmark_2d_106
|
||||
if landmarks is not None:
|
||||
# Convert landmarks to int32
|
||||
landmarks = landmarks.astype(np.int32)
|
||||
|
||||
# Extract facial features
|
||||
right_side_face = landmarks[0:16]
|
||||
left_side_face = landmarks[17:32]
|
||||
right_eye = landmarks[33:42]
|
||||
right_eye_brow = landmarks[43:51]
|
||||
left_eye = landmarks[87:96]
|
||||
left_eye_brow = landmarks[97:105]
|
||||
|
||||
# Calculate forehead extension
|
||||
right_eyebrow_top = np.min(right_eye_brow[:, 1])
|
||||
left_eyebrow_top = np.min(left_eye_brow[:, 1])
|
||||
eyebrow_top = min(right_eyebrow_top, left_eyebrow_top)
|
||||
|
||||
face_top = np.min([right_side_face[0, 1], left_side_face[-1, 1]])
|
||||
forehead_height = face_top - eyebrow_top
|
||||
extended_forehead_height = int(forehead_height * 5.0) # Extend by 50%
|
||||
|
||||
# Create forehead points
|
||||
forehead_left = right_side_face[0].copy()
|
||||
forehead_right = left_side_face[-1].copy()
|
||||
forehead_left[1] -= extended_forehead_height
|
||||
forehead_right[1] -= extended_forehead_height
|
||||
|
||||
# Combine all points to create the face outline
|
||||
face_outline = np.vstack(
|
||||
[
|
||||
[forehead_left],
|
||||
right_side_face,
|
||||
left_side_face[
|
||||
::-1
|
||||
], # Reverse left side to create a continuous outline
|
||||
[forehead_right],
|
||||
]
|
||||
)
|
||||
|
||||
# Calculate padding
|
||||
padding = int(
|
||||
np.linalg.norm(right_side_face[0] - left_side_face[-1]) * 0.05
|
||||
) # 5% of face width
|
||||
|
||||
# Create a slightly larger convex hull for padding
|
||||
hull = cv2.convexHull(face_outline)
|
||||
hull_padded = []
|
||||
for point in hull:
|
||||
x, y = point[0]
|
||||
center = np.mean(face_outline, axis=0)
|
||||
direction = np.array([x, y]) - center
|
||||
direction = direction / np.linalg.norm(direction)
|
||||
padded_point = np.array([x, y]) + direction * padding
|
||||
hull_padded.append(padded_point)
|
||||
|
||||
hull_padded = np.array(hull_padded, dtype=np.int32)
|
||||
|
||||
# Fill the padded convex hull
|
||||
cv2.fillConvexPoly(mask, hull_padded, 255)
|
||||
|
||||
# Smooth the mask edges
|
||||
mask = cv2.GaussianBlur(mask, (5, 5), 3)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
def apply_color_transfer(source, target):
|
||||
"""
|
||||
Apply color transfer from target to source image
|
||||
"""
|
||||
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
|
||||
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32")
|
||||
|
||||
source_mean, source_std = cv2.meanStdDev(source)
|
||||
target_mean, target_std = cv2.meanStdDev(target)
|
||||
|
||||
# Reshape mean and std to be broadcastable
|
||||
source_mean = source_mean.reshape(1, 1, 3)
|
||||
source_std = source_std.reshape(1, 1, 3)
|
||||
target_mean = target_mean.reshape(1, 1, 3)
|
||||
target_std = target_std.reshape(1, 1, 3)
|
||||
|
||||
# Perform the color transfer
|
||||
source = (source - source_mean) * (target_std / source_std) + target_mean
|
||||
|
||||
return cv2.cvtColor(np.clip(source, 0, 255).astype("uint8"), cv2.COLOR_LAB2BGR)
|
||||
|
382
modules/ui.py
382
modules/ui.py
@@ -3,7 +3,7 @@ import webbrowser
|
||||
import customtkinter as ctk
|
||||
from typing import Callable, Tuple
|
||||
import cv2
|
||||
from cv2_enumerate_cameras import enumerate_cameras # Add this import
|
||||
from cv2_enumerate_cameras import enumerate_cameras
|
||||
from PIL import Image, ImageOps
|
||||
import time
|
||||
import json
|
||||
@@ -28,6 +28,7 @@ from modules.utilities import (
|
||||
from modules.video_capture import VideoCapturer
|
||||
from modules.gettext import LanguageManager
|
||||
import platform
|
||||
from modules.fake_face_handler import cleanup_fake_face, refresh_fake_face
|
||||
|
||||
if platform.system() == "Windows":
|
||||
from pygrabber.dshow_graph import FilterGraph
|
||||
@@ -35,7 +36,7 @@ if platform.system() == "Windows":
|
||||
ROOT = None
|
||||
POPUP = None
|
||||
POPUP_LIVE = None
|
||||
ROOT_HEIGHT = 700
|
||||
ROOT_HEIGHT = 730
|
||||
ROOT_WIDTH = 600
|
||||
|
||||
PREVIEW = None
|
||||
@@ -78,9 +79,12 @@ target_label_dict_live = {}
|
||||
|
||||
img_ft, vid_ft = modules.globals.file_types
|
||||
|
||||
fake_face_switch = None
|
||||
fake_face_value = None
|
||||
|
||||
|
||||
def init(start: Callable[[], None], destroy: Callable[[], None], lang: str) -> ctk.CTk:
|
||||
global ROOT, PREVIEW, _
|
||||
global ROOT, PREVIEW, _, fake_face_switch, fake_face_value
|
||||
|
||||
lang_manager = LanguageManager(lang)
|
||||
_ = lang_manager._
|
||||
@@ -91,51 +95,56 @@ def init(start: Callable[[], None], destroy: Callable[[], None], lang: str) -> c
|
||||
|
||||
|
||||
def save_switch_states():
|
||||
switch_states = {
|
||||
"keep_fps": modules.globals.keep_fps,
|
||||
"keep_audio": modules.globals.keep_audio,
|
||||
"keep_frames": modules.globals.keep_frames,
|
||||
"many_faces": modules.globals.many_faces,
|
||||
"map_faces": modules.globals.map_faces,
|
||||
"color_correction": modules.globals.color_correction,
|
||||
"nsfw_filter": modules.globals.nsfw_filter,
|
||||
"live_mirror": modules.globals.live_mirror,
|
||||
"live_resizable": modules.globals.live_resizable,
|
||||
"fp_ui": modules.globals.fp_ui,
|
||||
"show_fps": modules.globals.show_fps,
|
||||
"mouth_mask": modules.globals.mouth_mask,
|
||||
"show_mouth_mask_box": modules.globals.show_mouth_mask_box,
|
||||
}
|
||||
with open("switch_states.json", "w") as f:
|
||||
json.dump(switch_states, f)
|
||||
try:
|
||||
states = {
|
||||
"keep_fps": modules.globals.keep_fps,
|
||||
"keep_audio": modules.globals.keep_audio,
|
||||
"keep_frames": modules.globals.keep_frames,
|
||||
"many_faces": modules.globals.many_faces,
|
||||
"map_faces": modules.globals.map_faces,
|
||||
"color_correction": modules.globals.color_correction,
|
||||
"nsfw_filter": modules.globals.nsfw_filter,
|
||||
"live_mirror": modules.globals.live_mirror,
|
||||
"live_resizable": modules.globals.live_resizable,
|
||||
"fp_ui": modules.globals.fp_ui,
|
||||
"show_fps": modules.globals.show_fps,
|
||||
"mouth_mask": modules.globals.mouth_mask,
|
||||
"show_mouth_mask_box": modules.globals.show_mouth_mask_box,
|
||||
"use_fake_face": modules.globals.use_fake_face
|
||||
}
|
||||
with open(get_config_path(), 'w') as f:
|
||||
json.dump(states, f)
|
||||
except Exception as e:
|
||||
print(f"Error saving switch states: {str(e)}")
|
||||
|
||||
|
||||
def load_switch_states():
|
||||
try:
|
||||
with open("switch_states.json", "r") as f:
|
||||
switch_states = json.load(f)
|
||||
modules.globals.keep_fps = switch_states.get("keep_fps", True)
|
||||
modules.globals.keep_audio = switch_states.get("keep_audio", True)
|
||||
modules.globals.keep_frames = switch_states.get("keep_frames", False)
|
||||
modules.globals.many_faces = switch_states.get("many_faces", False)
|
||||
modules.globals.map_faces = switch_states.get("map_faces", False)
|
||||
modules.globals.color_correction = switch_states.get("color_correction", False)
|
||||
modules.globals.nsfw_filter = switch_states.get("nsfw_filter", False)
|
||||
modules.globals.live_mirror = switch_states.get("live_mirror", False)
|
||||
modules.globals.live_resizable = switch_states.get("live_resizable", False)
|
||||
modules.globals.fp_ui = switch_states.get("fp_ui", {"face_enhancer": False})
|
||||
modules.globals.show_fps = switch_states.get("show_fps", False)
|
||||
modules.globals.mouth_mask = switch_states.get("mouth_mask", False)
|
||||
modules.globals.show_mouth_mask_box = switch_states.get(
|
||||
"show_mouth_mask_box", False
|
||||
)
|
||||
except FileNotFoundError:
|
||||
# If the file doesn't exist, use default values
|
||||
pass
|
||||
if os.path.exists(get_config_path()):
|
||||
with open(get_config_path(), 'r') as f:
|
||||
states = json.load(f)
|
||||
modules.globals.keep_fps = states.get("keep_fps", True)
|
||||
modules.globals.keep_audio = states.get("keep_audio", True)
|
||||
modules.globals.keep_frames = states.get("keep_frames", False)
|
||||
modules.globals.many_faces = states.get("many_faces", False)
|
||||
modules.globals.map_faces = states.get("map_faces", False)
|
||||
modules.globals.color_correction = states.get("color_correction", False)
|
||||
modules.globals.nsfw_filter = states.get("nsfw_filter", False)
|
||||
modules.globals.live_mirror = states.get("live_mirror", False)
|
||||
modules.globals.live_resizable = states.get("live_resizable", False)
|
||||
modules.globals.fp_ui = states.get("fp_ui", {"face_enhancer": False})
|
||||
modules.globals.show_fps = states.get("show_fps", False)
|
||||
modules.globals.mouth_mask = states.get("mouth_mask", False)
|
||||
modules.globals.show_mouth_mask_box = states.get(
|
||||
"show_mouth_mask_box", False
|
||||
)
|
||||
modules.globals.use_fake_face = False
|
||||
except Exception as e:
|
||||
print(f"Error loading switch states: {str(e)}")
|
||||
|
||||
|
||||
def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
|
||||
global source_label, target_label, status_label, show_fps_switch
|
||||
global source_label, target_label, status_label, show_fps_switch, fake_face_switch, fake_face_value
|
||||
|
||||
load_switch_states()
|
||||
|
||||
@@ -150,22 +159,28 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
|
||||
)
|
||||
root.configure()
|
||||
root.protocol("WM_DELETE_WINDOW", lambda: destroy())
|
||||
|
||||
# Add icon to the main window
|
||||
icon_path = resolve_relative_path("deeplivecam.ico")
|
||||
if os.path.exists(icon_path):
|
||||
root.iconbitmap(icon_path)
|
||||
|
||||
# Image Selection Area (Top)
|
||||
source_label = ctk.CTkLabel(root, text=None)
|
||||
source_label.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.25)
|
||||
source_label.place(relx=0.1, rely=0.05, relwidth=0.3, relheight=0.25)
|
||||
|
||||
target_label = ctk.CTkLabel(root, text=None)
|
||||
target_label.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.25)
|
||||
target_label.place(relx=0.6, rely=0.05, relwidth=0.3, relheight=0.25)
|
||||
|
||||
select_face_button = ctk.CTkButton(
|
||||
root, text=_("Select a face"), cursor="hand2", command=lambda: select_source_path()
|
||||
)
|
||||
select_face_button.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1)
|
||||
select_face_button.place(relx=0.1, rely=0.35, relwidth=0.3, relheight=0.1)
|
||||
|
||||
swap_faces_button = ctk.CTkButton(
|
||||
root, text="↔", cursor="hand2", command=lambda: swap_faces_paths()
|
||||
)
|
||||
swap_faces_button.place(relx=0.45, rely=0.4, relwidth=0.1, relheight=0.1)
|
||||
swap_faces_button.place(relx=0.45, rely=0.35, relwidth=0.1, relheight=0.1)
|
||||
|
||||
select_target_button = ctk.CTkButton(
|
||||
root,
|
||||
@@ -173,60 +188,30 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
|
||||
cursor="hand2",
|
||||
command=lambda: select_target_path(),
|
||||
)
|
||||
select_target_button.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1)
|
||||
select_target_button.place(relx=0.6, rely=0.35, relwidth=0.3, relheight=0.1)
|
||||
|
||||
keep_fps_value = ctk.BooleanVar(value=modules.globals.keep_fps)
|
||||
keep_fps_checkbox = ctk.CTkSwitch(
|
||||
# AI Generated Face controls
|
||||
fake_face_value = ctk.BooleanVar(value=modules.globals.use_fake_face)
|
||||
fake_face_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
text=_("Keep fps"),
|
||||
variable=keep_fps_value,
|
||||
text=_("Privacy Mode"),
|
||||
variable=fake_face_value,
|
||||
cursor="hand2",
|
||||
command=lambda: (
|
||||
setattr(modules.globals, "keep_fps", keep_fps_value.get()),
|
||||
save_switch_states(),
|
||||
),
|
||||
command=lambda: toggle_fake_face(fake_face_value)
|
||||
)
|
||||
keep_fps_checkbox.place(relx=0.1, rely=0.6)
|
||||
fake_face_switch.place(relx=0.1, rely=0.50)
|
||||
|
||||
keep_frames_value = ctk.BooleanVar(value=modules.globals.keep_frames)
|
||||
keep_frames_switch = ctk.CTkSwitch(
|
||||
# Add refresh button next to the switch
|
||||
refresh_face_button = ctk.CTkButton(
|
||||
root,
|
||||
text=_("Keep frames"),
|
||||
variable=keep_frames_value,
|
||||
text="↻",
|
||||
width=30,
|
||||
cursor="hand2",
|
||||
command=lambda: (
|
||||
setattr(modules.globals, "keep_frames", keep_frames_value.get()),
|
||||
save_switch_states(),
|
||||
),
|
||||
command=lambda: refresh_fake_face_clicked()
|
||||
)
|
||||
keep_frames_switch.place(relx=0.1, rely=0.65)
|
||||
|
||||
enhancer_value = ctk.BooleanVar(value=modules.globals.fp_ui["face_enhancer"])
|
||||
enhancer_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
text=_("Face Enhancer"),
|
||||
variable=enhancer_value,
|
||||
cursor="hand2",
|
||||
command=lambda: (
|
||||
update_tumbler("face_enhancer", enhancer_value.get()),
|
||||
save_switch_states(),
|
||||
),
|
||||
)
|
||||
enhancer_switch.place(relx=0.1, rely=0.7)
|
||||
|
||||
keep_audio_value = ctk.BooleanVar(value=modules.globals.keep_audio)
|
||||
keep_audio_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
text=_("Keep audio"),
|
||||
variable=keep_audio_value,
|
||||
cursor="hand2",
|
||||
command=lambda: (
|
||||
setattr(modules.globals, "keep_audio", keep_audio_value.get()),
|
||||
save_switch_states(),
|
||||
),
|
||||
)
|
||||
keep_audio_switch.place(relx=0.6, rely=0.6)
|
||||
refresh_face_button.place(relx=0.35, rely=0.50)
|
||||
|
||||
# Face Processing Options (Middle Left)
|
||||
many_faces_value = ctk.BooleanVar(value=modules.globals.many_faces)
|
||||
many_faces_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
@@ -238,24 +223,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
|
||||
save_switch_states(),
|
||||
),
|
||||
)
|
||||
many_faces_switch.place(relx=0.6, rely=0.65)
|
||||
|
||||
color_correction_value = ctk.BooleanVar(value=modules.globals.color_correction)
|
||||
color_correction_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
text=_("Fix Blueish Cam"),
|
||||
variable=color_correction_value,
|
||||
cursor="hand2",
|
||||
command=lambda: (
|
||||
setattr(modules.globals, "color_correction", color_correction_value.get()),
|
||||
save_switch_states(),
|
||||
),
|
||||
)
|
||||
color_correction_switch.place(relx=0.6, rely=0.70)
|
||||
|
||||
# nsfw_value = ctk.BooleanVar(value=modules.globals.nsfw_filter)
|
||||
# nsfw_switch = ctk.CTkSwitch(root, text='NSFW filter', variable=nsfw_value, cursor='hand2', command=lambda: setattr(modules.globals, 'nsfw_filter', nsfw_value.get()))
|
||||
# nsfw_switch.place(relx=0.6, rely=0.7)
|
||||
many_faces_switch.place(relx=0.1, rely=0.55)
|
||||
|
||||
map_faces = ctk.BooleanVar(value=modules.globals.map_faces)
|
||||
map_faces_switch = ctk.CTkSwitch(
|
||||
@@ -269,8 +237,35 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
|
||||
close_mapper_window() if not map_faces.get() else None
|
||||
),
|
||||
)
|
||||
map_faces_switch.place(relx=0.1, rely=0.75)
|
||||
map_faces_switch.place(relx=0.1, rely=0.60)
|
||||
|
||||
enhancer_value = ctk.BooleanVar(value=modules.globals.fp_ui["face_enhancer"])
|
||||
enhancer_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
text=_("Face Enhancer"),
|
||||
variable=enhancer_value,
|
||||
cursor="hand2",
|
||||
command=lambda: (
|
||||
update_tumbler("face_enhancer", enhancer_value.get()),
|
||||
save_switch_states(),
|
||||
),
|
||||
)
|
||||
enhancer_switch.place(relx=0.1, rely=0.65)
|
||||
|
||||
keep_audio_value = ctk.BooleanVar(value=modules.globals.keep_audio)
|
||||
keep_audio_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
text=_("Keep audio"),
|
||||
variable=keep_audio_value,
|
||||
cursor="hand2",
|
||||
command=lambda: (
|
||||
setattr(modules.globals, "keep_audio", keep_audio_value.get()),
|
||||
save_switch_states(),
|
||||
),
|
||||
)
|
||||
keep_audio_switch.place(relx=0.1, rely=0.70)
|
||||
|
||||
# Add show FPS switch right after keep_audio_switch
|
||||
show_fps_value = ctk.BooleanVar(value=modules.globals.show_fps)
|
||||
show_fps_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
@@ -282,8 +277,9 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
|
||||
save_switch_states(),
|
||||
),
|
||||
)
|
||||
show_fps_switch.place(relx=0.6, rely=0.75)
|
||||
show_fps_switch.place(relx=0.1, rely=0.75)
|
||||
|
||||
# Mask Switches (Middle Right - Top Section)
|
||||
mouth_mask_var = ctk.BooleanVar(value=modules.globals.mouth_mask)
|
||||
mouth_mask_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
@@ -292,38 +288,117 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
|
||||
cursor="hand2",
|
||||
command=lambda: setattr(modules.globals, "mouth_mask", mouth_mask_var.get()),
|
||||
)
|
||||
mouth_mask_switch.place(relx=0.1, rely=0.55)
|
||||
mouth_mask_switch.place(relx=0.6, rely=0.50)
|
||||
|
||||
# Add mouth mask size slider
|
||||
mouth_mask_size_slider = ctk.CTkSlider(
|
||||
root,
|
||||
from_=0.5,
|
||||
to=2.0,
|
||||
number_of_steps=30,
|
||||
command=lambda value: setattr(modules.globals, "mouth_mask_size", value)
|
||||
)
|
||||
mouth_mask_size_slider.set(modules.globals.mouth_mask_size)
|
||||
mouth_mask_size_slider.place(relx=0.8, rely=0.50, relwidth=0.1)
|
||||
|
||||
eyes_mask_var = ctk.BooleanVar(value=modules.globals.eyes_mask)
|
||||
eyes_mask_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
text=_("Eyes Mask"),
|
||||
variable=eyes_mask_var,
|
||||
cursor="hand2",
|
||||
command=lambda: setattr(modules.globals, "eyes_mask", eyes_mask_var.get()),
|
||||
)
|
||||
eyes_mask_switch.place(relx=0.6, rely=0.55)
|
||||
|
||||
# Add eyes mask size slider
|
||||
eyes_mask_size_slider = ctk.CTkSlider(
|
||||
root,
|
||||
from_=0.5,
|
||||
to=2.0,
|
||||
number_of_steps=30,
|
||||
command=lambda value: setattr(modules.globals, "eyes_mask_size", value)
|
||||
)
|
||||
eyes_mask_size_slider.set(modules.globals.eyes_mask_size)
|
||||
eyes_mask_size_slider.place(relx=0.8, rely=0.55, relwidth=0.1)
|
||||
|
||||
eyebrows_mask_var = ctk.BooleanVar(value=modules.globals.eyebrows_mask)
|
||||
eyebrows_mask_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
text=_("Eyebrows Mask"),
|
||||
variable=eyebrows_mask_var,
|
||||
cursor="hand2",
|
||||
command=lambda: setattr(modules.globals, "eyebrows_mask", eyebrows_mask_var.get()),
|
||||
)
|
||||
eyebrows_mask_switch.place(relx=0.6, rely=0.60)
|
||||
|
||||
# Add eyebrows mask size slider
|
||||
eyebrows_mask_size_slider = ctk.CTkSlider(
|
||||
root,
|
||||
from_=0.5,
|
||||
to=2.0,
|
||||
number_of_steps=30,
|
||||
command=lambda value: setattr(modules.globals, "eyebrows_mask_size", value)
|
||||
)
|
||||
eyebrows_mask_size_slider.set(modules.globals.eyebrows_mask_size)
|
||||
eyebrows_mask_size_slider.place(relx=0.8, rely=0.60, relwidth=0.1)
|
||||
|
||||
# Box Visualization Switches (Middle Right - Bottom Section)
|
||||
show_mouth_mask_box_var = ctk.BooleanVar(value=modules.globals.show_mouth_mask_box)
|
||||
show_mouth_mask_box_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
text=_("Show Mouth Mask Box"),
|
||||
text=_("Show Mouth Box"),
|
||||
variable=show_mouth_mask_box_var,
|
||||
cursor="hand2",
|
||||
command=lambda: setattr(
|
||||
modules.globals, "show_mouth_mask_box", show_mouth_mask_box_var.get()
|
||||
),
|
||||
)
|
||||
show_mouth_mask_box_switch.place(relx=0.6, rely=0.55)
|
||||
show_mouth_mask_box_switch.place(relx=0.6, rely=0.65)
|
||||
|
||||
show_eyes_mask_box_var = ctk.BooleanVar(value=modules.globals.show_eyes_mask_box)
|
||||
show_eyes_mask_box_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
text=_("Show Eyes Box"),
|
||||
variable=show_eyes_mask_box_var,
|
||||
cursor="hand2",
|
||||
command=lambda: setattr(
|
||||
modules.globals, "show_eyes_mask_box", show_eyes_mask_box_var.get()
|
||||
),
|
||||
)
|
||||
show_eyes_mask_box_switch.place(relx=0.6, rely=0.70)
|
||||
|
||||
show_eyebrows_mask_box_var = ctk.BooleanVar(value=modules.globals.show_eyebrows_mask_box)
|
||||
show_eyebrows_mask_box_switch = ctk.CTkSwitch(
|
||||
root,
|
||||
text=_("Show Eyebrows Box"),
|
||||
variable=show_eyebrows_mask_box_var,
|
||||
cursor="hand2",
|
||||
command=lambda: setattr(
|
||||
modules.globals, "show_eyebrows_mask_box", show_eyebrows_mask_box_var.get()
|
||||
),
|
||||
)
|
||||
show_eyebrows_mask_box_switch.place(relx=0.6, rely=0.75)
|
||||
|
||||
# Main Control Buttons (Bottom)
|
||||
start_button = ctk.CTkButton(
|
||||
root, text=_("Start"), cursor="hand2", command=lambda: analyze_target(start, root)
|
||||
)
|
||||
start_button.place(relx=0.15, rely=0.80, relwidth=0.2, relheight=0.05)
|
||||
|
||||
stop_button = ctk.CTkButton(
|
||||
root, text=_("Destroy"), cursor="hand2", command=lambda: destroy()
|
||||
)
|
||||
stop_button.place(relx=0.4, rely=0.80, relwidth=0.2, relheight=0.05)
|
||||
|
||||
preview_button = ctk.CTkButton(
|
||||
root, text=_("Preview"), cursor="hand2", command=lambda: toggle_preview()
|
||||
)
|
||||
preview_button.place(relx=0.65, rely=0.80, relwidth=0.2, relheight=0.05)
|
||||
preview_button.place(relx=0.4, rely=0.80, relwidth=0.2, relheight=0.05)
|
||||
|
||||
# --- Camera Selection ---
|
||||
stop_button = ctk.CTkButton(
|
||||
root, text=_("Destroy"), cursor="hand2", command=lambda: destroy()
|
||||
)
|
||||
stop_button.place(relx=0.65, rely=0.80, relwidth=0.2, relheight=0.05)
|
||||
|
||||
# Camera Section (Bottom)
|
||||
camera_label = ctk.CTkLabel(root, text=_("Select Camera:"))
|
||||
camera_label.place(relx=0.1, rely=0.86, relwidth=0.2, relheight=0.05)
|
||||
camera_label.place(relx=0.1, rely=0.87, relwidth=0.2, relheight=0.05)
|
||||
|
||||
available_cameras = get_available_cameras()
|
||||
camera_indices, camera_names = available_cameras
|
||||
@@ -342,7 +417,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
|
||||
root, variable=camera_variable, values=camera_names
|
||||
)
|
||||
|
||||
camera_optionmenu.place(relx=0.35, rely=0.86, relwidth=0.25, relheight=0.05)
|
||||
camera_optionmenu.place(relx=0.35, rely=0.87, relwidth=0.25, relheight=0.05)
|
||||
|
||||
live_button = ctk.CTkButton(
|
||||
root,
|
||||
@@ -362,16 +437,16 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
|
||||
else "disabled"
|
||||
),
|
||||
)
|
||||
live_button.place(relx=0.65, rely=0.86, relwidth=0.2, relheight=0.05)
|
||||
# --- End Camera Selection ---
|
||||
live_button.place(relx=0.65, rely=0.87, relwidth=0.2, relheight=0.05)
|
||||
|
||||
# Status and Links (Bottom)
|
||||
status_label = ctk.CTkLabel(root, text=None, justify="center")
|
||||
status_label.place(relx=0.1, rely=0.9, relwidth=0.8)
|
||||
status_label.place(relx=0.1, rely=0.92, relwidth=0.8)
|
||||
|
||||
donate_label = ctk.CTkLabel(
|
||||
root, text="Deep Live Cam", justify="center", cursor="hand2"
|
||||
)
|
||||
donate_label.place(relx=0.1, rely=0.95, relwidth=0.8)
|
||||
donate_label.place(relx=0.1, rely=0.94, relwidth=0.8)
|
||||
donate_label.configure(
|
||||
text_color=ctk.ThemeManager.theme.get("URL").get("text_color")
|
||||
)
|
||||
@@ -544,6 +619,11 @@ def create_preview(parent: ctk.CTkToplevel) -> ctk.CTkToplevel:
|
||||
preview.configure()
|
||||
preview.protocol("WM_DELETE_WINDOW", lambda: toggle_preview())
|
||||
preview.resizable(width=True, height=True)
|
||||
|
||||
# Add icon to the preview window
|
||||
icon_path = resolve_relative_path("deeplivecam.ico")
|
||||
if os.path.exists(icon_path):
|
||||
preview.iconbitmap(icon_path)
|
||||
|
||||
preview_label = ctk.CTkLabel(preview, text=None)
|
||||
preview_label.pack(fill="both", expand=True)
|
||||
@@ -580,7 +660,7 @@ def update_tumbler(var: str, value: bool) -> None:
|
||||
|
||||
|
||||
def select_source_path() -> None:
|
||||
global RECENT_DIRECTORY_SOURCE, img_ft, vid_ft
|
||||
global RECENT_DIRECTORY_SOURCE, img_ft, vid_ft, fake_face_switch, fake_face_value
|
||||
|
||||
PREVIEW.withdraw()
|
||||
source_path = ctk.filedialog.askopenfilename(
|
||||
@@ -589,6 +669,10 @@ def select_source_path() -> None:
|
||||
filetypes=[img_ft],
|
||||
)
|
||||
if is_image(source_path):
|
||||
modules.globals.use_fake_face = False
|
||||
fake_face_value.set(False)
|
||||
cleanup_fake_face()
|
||||
|
||||
modules.globals.source_path = source_path
|
||||
RECENT_DIRECTORY_SOURCE = os.path.dirname(modules.globals.source_path)
|
||||
image = render_image_preview(modules.globals.source_path, (200, 200))
|
||||
@@ -761,8 +845,7 @@ def update_preview(frame_number: int = 0) -> None:
|
||||
modules.globals.frame_processors
|
||||
):
|
||||
temp_frame = frame_processor.process_frame(
|
||||
get_one_face(cv2.imread(modules.globals.source_path)), temp_frame
|
||||
)
|
||||
get_one_face(cv2.imread(modules.globals.source_path)), temp_frame)
|
||||
image = Image.fromarray(cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB))
|
||||
image = ImageOps.contain(
|
||||
image, (PREVIEW_MAX_WIDTH, PREVIEW_MAX_HEIGHT), Image.LANCZOS
|
||||
@@ -1199,4 +1282,43 @@ def update_webcam_target(
|
||||
target_label_dict_live[button_num] = target_image
|
||||
else:
|
||||
update_pop_live_status("Face could not be detected in last upload!")
|
||||
return map
|
||||
return map
|
||||
|
||||
def toggle_fake_face(switch_var: ctk.BooleanVar) -> None:
|
||||
modules.globals.use_fake_face = switch_var.get()
|
||||
if modules.globals.use_fake_face:
|
||||
if not modules.globals.fake_face_path:
|
||||
if refresh_fake_face():
|
||||
modules.globals.source_path = modules.globals.fake_face_path
|
||||
# Update the source image preview
|
||||
image = render_image_preview(modules.globals.source_path, (200, 200))
|
||||
source_label.configure(image=image)
|
||||
else:
|
||||
cleanup_fake_face()
|
||||
# Clear the source image preview
|
||||
source_label.configure(image=None)
|
||||
modules.globals.source_path = None
|
||||
|
||||
def refresh_fake_face_clicked() -> None:
|
||||
"""Handle refresh button click to update fake face during live preview"""
|
||||
if not modules.globals.use_fake_face:
|
||||
# If privacy mode is off, turn it on first
|
||||
modules.globals.use_fake_face = True
|
||||
fake_face_value.set(True)
|
||||
|
||||
if refresh_fake_face():
|
||||
modules.globals.source_path = modules.globals.fake_face_path
|
||||
# Update the source image preview
|
||||
image = render_image_preview(modules.globals.source_path, (200, 200))
|
||||
source_label.configure(image=image)
|
||||
|
||||
# Force reload of frame processors to use new source face
|
||||
global FRAME_PROCESSORS_MODULES
|
||||
FRAME_PROCESSORS_MODULES = []
|
||||
frame_processors = get_frame_processors_modules(modules.globals.frame_processors)
|
||||
|
||||
def get_config_path() -> str:
|
||||
"""Get the path to the config file"""
|
||||
config_dir = os.path.join(os.path.expanduser("~"), ".deep-live-cam")
|
||||
os.makedirs(config_dir, exist_ok=True)
|
||||
return os.path.join(config_dir, "switch_states.json")
|
Reference in New Issue
Block a user