-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathkcf_independent.py
178 lines (161 loc) · 5.52 KB
/
kcf_independent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
# coding=utf-8
import numpy as np
import cv2
import sys
import time
face_cascade = cv2.CascadeClassifier('C:\\code_env\\haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('C:\\code_env\\haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
cap.release()
cap = cv2.VideoCapture(0)
cv2.destroyAllWindows()
fps = 0
fps_counter = 0
timer = time.time()
frames = 0
refind_timer = 0
okm = []
trackerm = []
# facem=[]
def refind():
global okm
global trackerm
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = np.array(face_cascade.detectMultiScale(gray, 1.3, 5))
# cv2.imshow('old_frame1',frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
cap.release()
cv2.destroyAllWindows()
sys.exit()
facem = []
if len(faces) != 0:
faces_x = faces[:, 0]
faces_y = faces[:, 1]
faces_w = faces[:, 2]
faces_h = faces[:, 3]
# face1 is the image of extracted face
for x, y, w, h in zip(faces_x, faces_y, faces_w, faces_h):
face1 = frame[y:y + h, x:x + w]
facem.append(face1)
cv2.imshow('face', face1)
print "facem len:", len(facem)
trackerm = []
okm = []
for x, y, w, h in zip(faces_x, faces_y, faces_w, faces_h):
tracker = cv2.TrackerKCF_create()
bbox = (x, y, w, h)
ok = tracker.init(frame, bbox)
trackerm.append(tracker)
okm.append(ok)
else:
print "no faces found in refind"
def init():
print 'inits'
global trackerm
# initial detection of faces
while 1:
ret, frame = cap.read()
frame = np.array(frame)
# print type(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = np.array(face_cascade.detectMultiScale(gray, 1.3, 5))
cv2.imshow('output', frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
cap.release()
cv2.destroyAllWindows()
sys.exit()
# x y w h for 1 face and face1 image
# facem stores the multiple face frames
facem = []
if len(faces) != 0:
faces_x = faces[:, 0]
faces_y = faces[:, 1]
faces_w = faces[:, 2]
faces_h = faces[:, 3]
print "faces_x", faces_x
print "faces_w", faces_w
print "faces_x+faces_w", faces_x + faces_w
# face1 is the image of extracted face
for x, y, w, h in zip(faces_x, faces_y, faces_w, faces_h):
face1 = frame[y:y + h, x:x + w]
facem.append(face1)
cv2.imshow('face', face1)
# print "faces", faces
# print "face1:", face1
print "facem len:", len(facem)
# if len(facem)>1:
cv2.imshow('face1', facem[0])
print "face1:dtype", type(facem[0])
print "face1", facem[0]
break
# kcf tracker initialisation
trackerm = []
okm = []
for x, y, w, h in zip(faces_x, faces_y, faces_w, faces_h):
tracker = cv2.TrackerKCF_create()
bbox = (x, y, w, h)
ok = tracker.init(frame, bbox)
trackerm.append(tracker)
okm.append(ok)
print 'init'
if __name__ == '__main__':
init()
while True:
global trackerm
# Read a new frame
ok, frame = cap.read()
if not ok:
break
okm = []
bboxm = []
for tracker in trackerm:
ok, bbox = tracker.update(frame)
okm.append(ok)
bboxm.append(bbox)
# Draw bounding box
okm = np.array(okm)
bboxm = np.array(bboxm)
for box_x, box_y, box_w, box_h in zip(bboxm[okm == True][:, 0], bboxm[okm == True][:, 1],
bboxm[okm == True][:, 2],
bboxm[okm == True][:, 3]):
p1 = (int(box_x), int(box_y))
p2 = (int(box_x + box_w), int(box_y + box_h))
cv2.rectangle(frame, p1, p2, (0, 255, 0), 2, 1)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27:
cv2.destroyAllWindows()
cap.release()
break
# timer,fps, refind:finds the face and features again
fps_counter = fps_counter + 1
if time.time() - timer > 1:
print timer, ":", fps_counter
fps = fps_counter
fps_counter = 0
timer = time.time()
refind_timer = refind_timer + 1
# removing is ok cond gives good fit bounding box but reduces fps , significant if box is large
# if ok==0:
if refind_timer == 2:
print "before refine bbox:", bboxm
refind()
print "after refine bbox:", bboxm
refind_timer = 0
# tracker = cv2.TrackerKCF_create()
# ok = tracker.init(frame, bbox)
# print "refined ok", ok
# frames, refind:finds the face and features again
frames = frames + 1
if frames > fps:
frames = 0
# refind()
cv2.putText(frame, "Fps:" + str(fps), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), 2)
# output
cv2.imshow('output', frame)
# print type(frame)
# dp.video_loop(data2, frame)
cap.release()