Last active
July 2, 2024 07:54
-
-
Save rhiskey/6e0559aef48ed6aec3a1cc8843f6e705 to your computer and use it in GitHub Desktop.
Mouse movement via eyes
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import cv2 | |
import numpy as np | |
import pyautogui | |
## Install CMake on system then: | |
## `pip install cmake dlib opencv-python pyautogui numpy` | |
# Constants for sensitivity and smoothing | |
SENSITIVITY = 20 # Adjust as needed | |
SMOOTHING = 0.5 # Adjust as needed | |
pyautogui.FAILSAFE = False | |
# Function to detect eyes using Haar Cascade Classifier | |
def detect_eyes(frame): | |
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml') | |
eyes = eye_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5) | |
eye_centers = [] | |
for (ex,ey,ew,eh) in eyes: | |
eye_centers.append((ex + ew//2, ey + eh//2)) | |
return eye_centers | |
# Function to move mouse based on eye position | |
def move_mouse(eye_positions, frame_width, frame_height): | |
if len(eye_positions) > 0: | |
x, y = eye_positions[0] # Assuming the first detected eye is used | |
# Normalize coordinates | |
nx = 2 * (x / frame_width) - 1 | |
ny = 1 - 2 * (y / frame_height) | |
# Get screen size | |
screen_width, screen_height = pyautogui.size() | |
# Calculate target coordinates on screen | |
target_x = int(nx * screen_width / 2) | |
target_y = int(ny * screen_height / 2) | |
# Calculate current mouse position | |
current_x, current_y = pyautogui.position() | |
# Smooth the movement | |
smooth_x = current_x + (target_x - current_x) * SMOOTHING | |
smooth_y = current_y + (target_y - current_y) * SMOOTHING | |
# Move mouse to target coordinates | |
pyautogui.moveTo(smooth_x, smooth_y) | |
# Main function to read webcam data and perform eye tracking | |
def main(): | |
cap = cv2.VideoCapture(0) # Open default webcam | |
if not cap.isOpened(): | |
print("Error: Could not open webcam.") | |
return | |
while True: | |
ret, frame = cap.read() | |
if not ret: | |
break | |
frame_width = frame.shape[1] | |
frame_height = frame.shape[0] | |
# Detect eyes | |
eye_positions = detect_eyes(frame) | |
# Display detected eyes | |
for (ex, ey) in eye_positions: | |
cv2.circle(frame, (ex, ey), 2, (0, 255, 0), 2) | |
# Move mouse based on eye position | |
move_mouse(eye_positions, frame_width, frame_height) | |
# Display the frame with detected eyes | |
cv2.imshow('Eye Tracking', frame) | |
if cv2.waitKey(1) & 0xFF == ord('q'): | |
break | |
cap.release() | |
cv2.destroyAllWindows() | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment