forked from thearn/webcam-pulse-detector
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcv2smile-detect.py
209 lines (160 loc) · 6.01 KB
/
cv2smile-detect.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
#!/usr/bin/env python
import cv2.cv as cv
import cv2
import time
import Image
import threading
###########################
import matplotlib
import datetime
import matplotlib.dates as md
import time
##########################
f = open('timedata.txt','w')
class Camera(object):
def __init__(self, camera = 0):
print("initializing camera")
self.cam = cv2.VideoCapture(camera)
time.sleep(3)
print("Camera ready!")
if not self.cam:
raise Exception("Camera not accessible")
self.shape = self.get_frame().shape
print(self.shape)
def get_frame(self):
_,frame = self.cam.read()
return frame
def release(self):
self.cam.release()
cv.NamedWindow("camera", 1)
camera = Camera(camera=0)
capture = cv.CreateCameraCapture(0)
#font = cv.CvFont
font = cv.InitFont(1, 1, 1, 1, 1, 1)
width = None
height = None
width = 320
height = 240
smileness = 0
smilecount = 0
if width is None:
width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
else:
cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_WIDTH,width)
if height is None:
height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
else:
cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_HEIGHT,height)
result = cv.CreateImage((width,height),cv.IPL_DEPTH_8U,3)
mqLoop = 0
#openCV functions
def Load():
return (faceCascade, smileCascade)
def Display(image):
cv.NamedWindow("Smile Test")
cv.ShowImage("Smile Test", image)
cv.WaitKey(0)
cv.DestroyWindow("Smile Test")
def DetectRedEyes(image, faceCascade, smileCascade):
min_size = (20,20)
image_scale = 2
haar_scale = 1.2
min_neighbors = 2
haar_flags = 0
# Allocate the temporary images
gray = cv.CreateImage((image.width, image.height), 8, 1)
smallImage = cv.CreateImage((cv.Round(image.width / image_scale),cv.Round (image.height / image_scale)), 8 ,1)
# Convert color input image to grayscale
cv.CvtColor(image, gray, cv.CV_BGR2GRAY)
# Scale input image for faster processing
cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)
# Equalize the histogram
cv.EqualizeHist(smallImage, smallImage)
# Detect the faces
faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0),
haar_scale, min_neighbors, haar_flags, min_size)
# If faces are found
if faces:
#print faces
for ((x, y, w, h), n) in faces:
# the input to cv.HaarDetectObjects was resized, so scale the
# bounding box of each face and convert it to two CvPoints
#print "face"
pt1 = (int(x * image_scale), int(y * image_scale))
pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
# print pt1
# print pt2
#cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 1, 8, 0)
#cv.PutText(image, "face", pt1, font, cv.RGB(255, 0, 0))
face_region = cv.GetSubRect(image,(x,int(y + (h/4)),w,int(h/2)))
#split face
#cv.Rectangle(image, (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), pt2, cv.RGB(0,255,0), 1, 8, 0)
#cv.PutText(image, "lower", (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), font, cv.RGB(0, 255, 0))
cv.SetImageROI(image, (pt1[0],
(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 )),
pt2[0] - pt1[0],
int((pt2[1] - (pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))))))
smiles = cv.HaarDetectObjects(image, smileCascade, cv.CreateMemStorage(0), 1.1, 5, 0, (15,15))
if smiles:
#print smiles
for smile in smiles:
cv.Rectangle(image,
(smile[0][0],smile[0][1]),
(smile[0][0] + smile[0][2], smile[0][1] + smile[0][3]),
cv.RGB(0, 0, 255), 1, 8, 0)
cv.PutText(image, "smile", (smile[0][0],smile[0][1]), font, cv.RGB(0, 0, 255))
cv.PutText(image,str(smile[1]), (smile[0][0], smile[0][1] + smile[0][3]), font, cv.RGB(0, 0, 255))
#print ((abs(smile[0][1] - smile[0][2]) / abs(pt1[0] - pt2[0])) * 100)
global smileness
smileness = smile[1]
cv.ResetImageROI(image)
#if smile[1] > 90:
# mqttc.publish("smiles", "got smile", 1)
# time.sleep(5)
#eyes = cv.HaarDetectObjects(image, eyeCascade,
#cv.CreateMemStorage(0),
#haar_scale, min_neighbors,
#haar_flags, (15,15))
#if eyes:
# For each eye found
#print eyes
#for eye in eyes:
# Draw a rectangle around the eye
# cv.Rectangle(image,
# (eye[0][0],
# eye[0][1]),
# (eye[0][0] + eye[0][2],
# eye[0][1] + eye[0][3]),
# cv.RGB(255, 0, 0), 1, 8, 0)
cv.ResetImageROI(image)
return image
faceCascade = cv.Load("haarcascade_frontalface_alt.xml")
#eyeCascade = cv.Load("haarcascade_eye.xml")
#smileCascade = cv.Load("smileD/smiled_04.xml")
smileCascade = cv.Load("haarcascade_smile.xml")
while True:
if smileness > 70:
smilecount+= 1
else:
smilecount = 0
if smilecount >=40:
smilecount = 0
#mqttc.publish("smiles", "smile", 0)
#mT.publish()
time.sleep(0)
if mqLoop >= 1:
#mqttc.loop()
x = str(datetime.datetime.now())
f.write(str(md.datestr2num(x)) + " " + str(smileness) + "\n")
mqLoop = 0
else:
mqLoop+= 0.9
img = cv.QueryFrame(capture)
smileness = 0
if img:
image = DetectRedEyes(img, faceCascade, smileCascade)
cv.ShowImage("camera", image)
#print smileness
k = cv.WaitKey(10);
if k == 'f':
break