forked from sonialoussaief/HumanDetection
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcorrelation_tracker.py
More file actions
executable file
·83 lines (76 loc) · 3.21 KB
/
correlation_tracker.py
File metadata and controls
executable file
·83 lines (76 loc) · 3.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example shows how to use the correlation_tracker from the dlib Python
# library. This object lets you track the position of an object as it moves
# from frame to frame in a video sequence. To use it, you give the
# correlation_tracker the bounding box of the object you want to track in the
# current video frame. Then it will identify the location of the object in
# subsequent frames.
#
# In this particular example, we are going to run on the
# video sequence that comes with dlib, which can be found in the
# examples/video_frames folder. This video shows a juice box sitting on a table
# and someone is waving the camera around. The task is to track the position of
# the juice box as the camera moves around.
#
# COMPILING THE DLIB PYTHON INTERFACE
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
# you are using another python version or operating system then you need to
# compile the dlib python interface before you can use this file. To do this,
# run compile_dlib_python_module.bat. This should work on any operating
# system so long as you have CMake and boost-python installed.
# On Ubuntu, this can be done easily by running the command:
# sudo apt-get install libboost-python-dev cmake
#
# Also note that this example requires scikit-image which can be installed
# via the command:
# pip install -U scikit-image
# Or downloaded from http://scikit-image.org/download.html.
import os
import glob
import dlib
from skimage import io
import cv2
import imutils
# Path to the video frames
video_folder = '/Users/Alex/Desktop/HumanDetection/video/Video1.mov'
cap = cv2.VideoCapture(video_folder)
# Create the correlation tracker - the object needs to be initialized
# before it can be used
tracker = dlib.correlation_tracker()
win = dlib.image_window()
(grabbed, img) = cap.read()
frame1 = imutils.resize(img, width=500)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
tracker.start_track(gray, dlib.rectangle(342, 136, 356, 177))
# We will track the frames as we load them from the video
try:
while True:
(grabbed, img) = cap.read()
img = imutils.resize(img, width=500)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
# We need to initialize the tracker on the first frame
# if k == 0:
# Start a track on the juice box. If you look at the first frame you
# will see that the juice box is contained within the bounding
# box (74, 67, 112, 153).
# tracker.start_track(img, dlib.rectangle(74, 67, 112, 153))# (342, 141), (356, 174)
# else:
# Else we just attempt to track from the previous frame
tracker.update(gray)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
win.clear_overlay()
win.set_image(img)
win.add_overlay(tracker.get_position())
p = tracker.get_position()
a = p.top
print p
print dlib.drectangle.top(p)
print p.bottom
print p.left
print p.right
except KeyboardInterrupt:
print 'end'