-
-
Save edfungus/67c14af0d5afaae5b18c to your computer and use it in GitHub Desktop.
#Identify pupils. Based on beta 1 | |
import numpy as np | |
import cv2 | |
import time | |
cap = cv2.VideoCapture(0) #640,480 | |
w = 640 | |
h = 480 | |
while(cap.isOpened()): | |
ret, frame = cap.read() | |
if ret==True: | |
#downsample | |
#frameD = cv2.pyrDown(cv2.pyrDown(frame)) | |
#frameDBW = cv2.cvtColor(frameD,cv2.COLOR_RGB2GRAY) | |
#detect face | |
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY) | |
faces = cv2.CascadeClassifier('haarcascade_eye.xml') | |
detected = faces.detectMultiScale(frame, 1.3, 5) | |
#faces = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') | |
#detected2 = faces.detectMultiScale(frameDBW, 1.3, 5) | |
pupilFrame = frame | |
pupilO = frame | |
windowClose = np.ones((5,5),np.uint8) | |
windowOpen = np.ones((2,2),np.uint8) | |
windowErode = np.ones((2,2),np.uint8) | |
#draw square | |
for (x,y,w,h) in detected: | |
cv2.rectangle(frame, (x,y), ((x+w),(y+h)), (0,0,255),1) | |
cv2.line(frame, (x,y), ((x+w,y+h)), (0,0,255),1) | |
cv2.line(frame, (x+w,y), ((x,y+h)), (0,0,255),1) | |
pupilFrame = cv2.equalizeHist(frame[y+(h*.25):(y+h), x:(x+w)]) | |
pupilO = pupilFrame | |
ret, pupilFrame = cv2.threshold(pupilFrame,55,255,cv2.THRESH_BINARY) #50 ..nothin 70 is better | |
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_CLOSE, windowClose) | |
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_ERODE, windowErode) | |
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_OPEN, windowOpen) | |
#so above we do image processing to get the pupil.. | |
#now we find the biggest blob and get the centriod | |
threshold = cv2.inRange(pupilFrame,250,255) #get the blobs | |
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) | |
#if there are 3 or more blobs, delete the biggest and delete the left most for the right eye | |
#if there are 2 blob, take the second largest | |
#if there are 1 or less blobs, do nothing | |
if len(contours) >= 2: | |
#find biggest blob | |
maxArea = 0 | |
MAindex = 0 #to get the unwanted frame | |
distanceX = [] #delete the left most (for right eye) | |
currentIndex = 0 | |
for cnt in contours: | |
area = cv2.contourArea(cnt) | |
center = cv2.moments(cnt) | |
cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00']) | |
distanceX.append(cx) | |
if area > maxArea: | |
maxArea = area | |
MAindex = currentIndex | |
currentIndex = currentIndex + 1 | |
del contours[MAindex] #remove the picture frame contour | |
del distanceX[MAindex] | |
eye = 'right' | |
if len(contours) >= 2: #delete the left most blob for right eye | |
if eye == 'right': | |
edgeOfEye = distanceX.index(min(distanceX)) | |
else: | |
edgeOfEye = distanceX.index(max(distanceX)) | |
del contours[edgeOfEye] | |
del distanceX[edgeOfEye] | |
if len(contours) >= 1: #get largest blob | |
maxArea = 0 | |
for cnt in contours: | |
area = cv2.contourArea(cnt) | |
if area > maxArea: | |
maxArea = area | |
largeBlob = cnt | |
if len(largeBlob) > 0: | |
center = cv2.moments(largeBlob) | |
cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00']) | |
cv2.circle(pupilO,(cx,cy),5,255,-1) | |
#show picture | |
cv2.imshow('frame',pupilO) | |
cv2.imshow('frame2',pupilFrame) | |
if cv2.waitKey(1) & 0xFF == ord('q'): | |
break | |
#else: | |
#break | |
# Release everything if job is finished | |
cap.release() | |
cv2.destroyAllWindows() |
I have this problem:
Traceback (most recent call last):
File "pupil.py", line 54, in <module>
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
ValueError: too many values to unpack
_,contours, hierarchy = cv2.findContours(threshold,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
what if i want to use the coordinates cx, cy to define what left is and what right is? any suggestions
I always get this warning and error after I move my eye in left and right directions
Warning (from warnings module):
pupilFrame = cv2.equalizeHist(frame[y+(h*0.25):(y+h), x:(x+w)])
VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
Traceback (most recent call last):
File "J:\Tharwat\pupil-2.py", line 64, in <module>
cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00'])
ZeroDivisionError: float division by zero
Using OpenCV 3.1.0-dev on Windows
Above errors are come while run the program .How I resolve it ?
pupilFrame = cv2.equalizeHist(frame[y+(h*.25):(y+h), x:(x+w)])
Traceback (most recent call last):
File "", line 1, in
runfile('C:/Users/akshay/Documents/TRDDC/opencv and python/new_exp_My_code/detect_yellow.py', wdir='C:/Users/akshay/Documents/TRDDC/opencv and python/new_exp_My_code')
File "C:\Program Files\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 866, in runfile
execfile(filename, namespace)
File "C:\Program Files\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 87, in execfile
exec(compile(scripttext, filename, 'exec'), glob, loc)
File "C:/Users/akshay/Documents/TRDDC/opencv and python/new_exp_My_code/detect_yellow.py", line 47, in
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
ValueError: too many values to unpack
not working
Traceback (most recent call last):
File "C:\Python27\Pupil Detect\pupil.py", line 38, in
pupilFrame = cv2.equalizeHist(frame[y+(h*.25):(y+h), x:(x+w)])
TypeError: slice indices must be integers or None or have an index method
DimkaSF:
You need to cast the first argument to an int, like this:
pupilFrame = cv2.equalizeHist(frame[int(y+(h*.25)):(y+h), x:(x+w)])
Hi,
Is there a way to change a bool to true, when the algorithm detects an eye-blink? I don't understand how the window changes in size when it detects a blink.
Can anyone tell me what this is about? The tabs look consistent to me
File "pupil.py", line 20
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
TabError: inconsistent use of tabs and spaces in indentation
Traceback (most recent call last):
File "pupil.py", line 47, in
cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00'])
ZeroDivisionError: float division by zero
how can i solve this?
I need to track the direction of the eye, but the coordinates of the square are changing constantly, if I get the exact coordinates of the square i can find the centre of the square that helps in tracking the direction of the blob.....any suggestion?
File "pupil.py", line 47, in
cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00'])
ZeroDivisionError: float division by zero
Use below code for above error.
if center['m00'] != 0:
cx = int(center["m10"] / center["m00"])
cy = int(center["m01"] / center["m00"])
else:
cx,cy = 0, 0
Can someone help me by providing this code with the proper indentation> I am finding it difficult to follow the code.
Thanks
The 2 windows that are displayed keep changing from displaying the full face and just the eye. This is not how it is supposed to work is it?
File "", line 29
pupilFrame = cv2.equalizeHist(frame(y+(h*.25):(y+h), x:(x+w))
^
SyntaxError: invalid syntax
The following code should work in Raspberry Pi
import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0) #640,480
w = 640
h = 480
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
#downsample
#frameD = cv2.pyrDown(cv2.pyrDown(frame))
#frameDBW = cv2.cvtColor(frameD,cv2.COLOR_RGB2GRAY)
#detect face
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
detected = faces.detectMultiScale(frame, 1.3, 5)
#faces = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#detected2 = faces.detectMultiScale(frameDBW, 1.3, 5)
pupilFrame = frame
pupilO = frame
windowClose = np.ones((5,5),np.uint8)
windowOpen = np.ones((2,2),np.uint8)
windowErode = np.ones((2,2),np.uint8)
#draw square
for (x,y,w,h) in detected:
cv2.rectangle(frame, (x,y), ((x+w),(y+h)), (0,0,255),1)
cv2.line(frame, (x,y), ((x+w,y+h)), (0,0,255),1)
cv2.line(frame, (x+w,y), ((x,y+h)), (0,0,255),1)
pupilFrame = cv2.equalizeHist(frame[int(y+(h*.25)):(y+h), x:(x+w)])
pupilO = pupilFrame
ret, pupilFrame = cv2.threshold(pupilFrame,55,255,cv2.THRESH_BINARY) #50 ..nothin 70 is better
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_CLOSE, windowClose)
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_ERODE, windowErode)
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_OPEN, windowOpen)
#so above we do image processing to get the pupil..
#now we find the biggest blob and get the centriod
threshold = cv2.inRange(pupilFrame,250,255) #get the blobs
_, contours, hierarchy = cv2.findContours(threshold,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
#if there are 3 or more blobs, delete the biggest and delete the left most for the right eye
#if there are 2 blob, take the second largest
#if there are 1 or less blobs, do nothing
if len(contours) >= 2:
#find biggest blob
maxArea = 0
MAindex = 0 #to get the unwanted frame
distanceX = [] #delete the left most (for right eye)
currentIndex = 0
for cnt in contours:
area = cv2.contourArea(cnt)
center = cv2.moments(cnt)
#cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00'])
if center['m00'] != 0:
cx = int(center["m10"] / center["m00"])
cy = int(center["m01"] / center["m00"])
else:
cx,cy = 0, 0
distanceX.append(cx)
if area > maxArea:
maxArea = area
MAindex = currentIndex
currentIndex = currentIndex + 1
del contours[MAindex] #remove the picture frame contour
del distanceX[MAindex]
eye = 'right'
if len(contours) >= 2: #delete the left most blob for right eye
if eye == 'right':
edgeOfEye = distanceX.index(min(distanceX))
else:
edgeOfEye = distanceX.index(max(distanceX))
del contours[edgeOfEye]
del distanceX[edgeOfEye]
if len(contours) >= 1: #get largest blob
maxArea = 0
for cnt in contours:
area = cv2.contourArea(cnt)
if area > maxArea:
maxArea = area
largeBlob = cnt
if len(largeBlob) > 0:
center = cv2.moments(largeBlob)
cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00'])
cv2.circle(pupilO,(cx,cy),5,255,-1)
#show picture
cv2.imshow('frame',pupilO)
cv2.imshow('frame2',pupilFrame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#else:
#break
Release everything if job is finished
cap.release()
cv2.destroyAllWindows()
This codes works after resolving ZeroDivisionError: float division by zero error.
Solution was posted by davepatel29.
Great work...................................................
However. i have got the following small issue. It works the way it is designed (i guess).
Issue - Showing pupil/iris, if show any blank paper, id card or some other object which has no face/eye.
It just blink or show once and disappears.
If this can be fixed. This code works fine for me....
Python 3.7 windows 7 with SP1.
I will test it on RPI 4 and post the resuilt.
To -->- bma131 commented on Mar 21
Your codes did not work for me. It was giving the following error.
After changing the following line, it worked...... (removed "_,")
_,contours, hierarchy = cv2.findContours(threshold,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
changed to
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
However the issue with blank paper or id cards(with or without picture) shows the pupil on the screen.
It worked for me correctly thanks for the effort!
I have one question though, what does maxArea contains in each iteration ? Blob size for right eye ?
-
It worked for me correctly thanks for the effort!
Which one worked for you ?.
The program itself or the spoofing(id card, blank paper etc......) -
Are you referring this maxArea = 0
I think it is sort of flag to find the biggest area.
Please see the following line in the code.
if area > maxArea:
maxArea = area
From the above code, area moving to maxarea, if it is bigger than the previous. This way can find the biggest one.
- It worked for me correctly thanks for the effort!
Which one worked for you ?. -- pupil.py file itself worked for me!
The program itself or the spoofing(id card, blank paper etc......) -- the program itself in this case- Are you referring this maxArea = 0
I think it is sort of flag to find the biggest area.
Please see the following line in the code.
if area > maxArea:
maxArea = area
From the above code, area moving to maxarea, if it is bigger than the previous. This way can find the biggest one.
What I wonder here is, sorry for the late answer I've been offline due to self-isolation and I had no access to my laptop anyways. What does maxArea corresponds in here ? As far as I understood, it is the area of the biggest contour in eye and it actually indicates the pupil. But I'm not sure about it.
hi friends, thank for your source code. But I have a trouble like
", line 70, in
del contours[MAindex] # remove the picture frame contour
~~~~~~~~^^^^^^^^^
TypeError: 'tuple' object doesn't support item deletion
if I set cap is a image in 5th line, it's work. But not working with videocapture.
Please, help me if you can.
With the help of github copilot I am working through the issues. My setup is win 11 py 3.12.0. I have added comment with my name. I hopes this helps others.
I'm at troubleshooting this error
File "D:\py\pupil\measure.py", line 100, in
del contours[MAindex]
~~~~~~~~^^^^^^^^^
IndexError: list assignment index out of range
#Identify pupils. Based on beta 1
import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0) #640,480
w = 640
h = 480
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
#downsample
#frameD = cv2.pyrDown(cv2.pyrDown(frame))
#frameDBW = cv2.cvtColor(frameD,cv2.COLOR_RGB2GRAY)
#detect face
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
#warprdiv added to resolve file location issue
#faces = cv2.CascadeClassifier('haarcascade_eye.xml')
faces = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
detected = faces.detectMultiScale(frame, 1.3, 5)
#faces = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#detected2 = faces.detectMultiScale(frameDBW, 1.3, 5)
pupilFrame = frame
pupilO = frame
windowClose = np.ones((5,5),np.uint8)
windowOpen = np.ones((2,2),np.uint8)
windowErode = np.ones((2,2),np.uint8)
#draw square
for (x,y,w,h) in detected:
cv2.rectangle(frame, (x,y), ((x+w),(y+h)), (0,0,255),1)
cv2.line(frame, (x,y), ((x+w,y+h)), (0,0,255),1)
cv2.line(frame, (x+w,y), ((x,y+h)), (0,0,255),1)
#pupilFrame = cv2.equalizeHist(frame[y+(h*.25):(y+h), x:(x+w)])
pupilFrame = cv2.equalizeHist(frame[int(y+(h*.25)):int(y+h), int(x):int(x+w)])
pupilO = pupilFrame
ret, pupilFrame = cv2.threshold(pupilFrame,55,255,cv2.THRESH_BINARY) #50 ..nothin 70 is better
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_CLOSE, windowClose)
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_ERODE, windowErode)
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_OPEN, windowOpen)
#so above we do image processing to get the pupil..
#now we find the biggest blob and get the centriod
threshold = cv2.inRange(pupilFrame,250,255) #get the blobs
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
#if there are 3 or more blobs, delete the biggest and delete the left most for the right eye
#if there are 2 blob, take the second largest
#if there are 1 or less blobs, do nothing
if len(contours) >= 2:
#find biggest blob
maxArea = 0
MAindex = 0 #to get the unwanted frame
distanceX = [] #delete the left most (for right eye)
currentIndex = 0
for cnt in contours:
area = cv2.contourArea(cnt)
center = cv2.moments(cnt)
#warpdriv added take care of divide by zero error
if center['m00'] != 0:
cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00'])
distanceX.append(cx)
if area > maxArea:
maxArea = area
MAindex = currentIndex
currentIndex = currentIndex + 1
#del contours[MAindex] #remove the picture frame contour
#warpdriv added
contours = list(contours)
del contours[MAindex]
contours = tuple(contours)
#del distanceX[MAindex]
#warpdriv added
distanceX = list(distanceX)
del distanceX[MAindex]
distanceX = tuple(distanceX)
eye = 'right'
if len(contours) >= 2: #delete the left most blob for right eye
if eye == 'right':
edgeOfEye = distanceX.index(min(distanceX))
else:
edgeOfEye = distanceX.index(max(distanceX))
#warpdriv added
#del contours[MAindex] #remove the picture frame contour
contours = list(contours)
del contours[MAindex]
contours = tuple(contours)
#warpdriv added
#del distanceX[MAindex]
distanceX = list(distanceX)
del distanceX[MAindex]
distanceX = tuple(distanceX)
if len(contours) >= 1: #get largest blob
maxArea = 0
for cnt in contours:
area = cv2.contourArea(cnt)
if area > maxArea:
maxArea = area
largeBlob = cnt
if len(largeBlob) > 0:
center = cv2.moments(largeBlob)
cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00'])
cv2.circle(pupilO,(cx,cy),5,255,-1)
#show picture
cv2.imshow('frame',pupilO)
cv2.imshow('frame2',pupilFrame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#else:
#break
Release everything if job is finished
cap.release()
cv2.destroyAllWindows()
It works, you just need https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml