Skip to content

Commit 981c652

Browse files
committed
update ch40 other
1 parent cb86df8 commit 981c652

File tree

7 files changed

+287
-0
lines changed

7 files changed

+287
-0
lines changed
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
# -*- coding: utf-8 -*-
2+
# @Time : 2017/7/13 下午5:13
3+
# @Author : play4fun
4+
# @File : findHomography.py
5+
# @Software: PyCharm
6+
7+
"""
8+
findHomography.py:联合使用特征提取和 calib3d 模块中的 findHomography 在复杂图像中查找已知对象
9+
"""
10+
11+
import numpy as np
12+
import cv2
13+
from matplotlib import pyplot as plt
14+
15+
MIN_MATCH_COUNT = 10
16+
img1 = cv2.imread('box.png', 0) # queryImage
17+
img2 = cv2.imread('box_in_scene.png', 0) # trainImage
18+
19+
# Initiate SIFT detector
20+
sift = cv2.xfeatures2d.SIFT_create()
21+
# find the keypoints and descriptors with SIFT
22+
kp1, des1 = sift.detectAndCompute(img1, None)
23+
kp2, des2 = sift.detectAndCompute(img2, None)
24+
25+
FLANN_INDEX_KDTREE = 0
26+
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
27+
search_params = dict(checks=50)
28+
flann = cv2.FlannBasedMatcher(index_params, search_params)
29+
matches = flann.knnMatch(des1, des2, k=2)
30+
31+
# store all the good matches as per Lowe's ratio test.
32+
good = []
33+
for m, n in matches:
34+
if m.distance < 0.7 * n.distance:
35+
good.append(m)
36+
'''
37+
现在我们 置只有存在 10 个以上匹 时才去查找目标 MIN_MATCH_COUNT=10 否则显示 告消息 现在匹 不
38+
如果找到了 够的匹 我们 提取两幅图像中匹 点的坐标。把它们传 入到函数中 算 变换。一旦我们找到 3x3 的变换矩 就可以使用它将查 图像的四个 点 四个 变换到目标图像中去了。然后再绘制出来。
39+
'''
40+
41+
if len(good) > MIN_MATCH_COUNT:
42+
# 获取关 点的坐标
43+
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
44+
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
45+
46+
# 第三个参数 Method used to computed a homography matrix. The following methods are possible: #0 - a regular method using all the points
47+
# CV_RANSAC - RANSAC-based robust method
48+
# CV_LMEDS - Least-Median robust method
49+
# 第四个参数取值范围在 1 到 10 绝一个点对的 值。原图像的点经 变换后点与目标图像上对应点的 差 # 差就 为是 outlier
50+
# 回值中 M 为变换矩 。
51+
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
52+
matchesMask = mask.ravel().tolist()
53+
# 获得原图像的高和宽
54+
h, w, d = img1.shape
55+
# 使用得到的变换矩 对原图像的四个 变换 获得在目标图像上对应的坐标
56+
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
57+
dst = cv2.perspectiveTransform(pts, M)
58+
# 原图像为灰度图
59+
img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
60+
else:
61+
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
62+
matchesMask = None
63+
64+
65+
#最后我再绘制 inliers 如果能成功的找到目标图像的话 或者匹配的关 点 如果失败。
66+
draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color
67+
singlePointColor=None,
68+
matchesMask=matchesMask, # draw only inliers
69+
flags=2)
70+
71+
img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)
72+
73+
plt.imshow(img3, 'gray'), plt.show()
74+
#复杂图像中被找到的目标图像被标记成白色
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
# -*- coding: utf-8 -*-
2+
# @Time : 2017/7/13 下午5:56
3+
# @Author : play4fun
4+
# @File : Camshift.py
5+
# @Software: PyCharm
6+
7+
"""
8+
Camshift.py:
9+
"""
10+
11+
import numpy as np
12+
import cv2
13+
14+
cap = cv2.VideoCapture('../data/slow.flv')
15+
# take first frame of the video
16+
ret, frame = cap.read()
17+
18+
# setup initial location of window
19+
r, h, c, w = 250, 90, 400, 125 # simply hardcoded the values
20+
track_window = (c, r, w, h)
21+
# set up the ROI for tracking
22+
roi = frame[r:r + h, c:c + w]
23+
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
24+
mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
25+
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
26+
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
27+
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
28+
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
29+
30+
while True:
31+
ret, frame = cap.read()
32+
if ret is True:
33+
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
34+
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
35+
# apply meanshift to get the new location
36+
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
37+
# Draw it on image
38+
pts = cv2.boxPoints(ret)
39+
pts = np.int0(pts)
40+
img2 = cv2.polylines(frame, [pts], True, 255, 2)
41+
cv2.imshow('img2', img2)
42+
k = cv2.waitKey(60) & 0xff
43+
if k == 27:
44+
break
45+
else:
46+
cv2.imwrite(chr(k) + ".jpg", img2)
47+
else:
48+
break
49+
cv2.destroyAllWindows()
50+
cap.release()
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
# -*- coding: utf-8 -*-
2+
# @Time : 2017/7/13 下午5:27
3+
# @Author : play4fun
4+
# @File : Meanshift.py
5+
# @Software: PyCharm
6+
7+
"""
8+
Meanshift.py:
9+
10+
问题:我们的窗口的大小是固 定的
11+
而汽车由远及近 在视觉上 是一个 渐变大的过程
12+
固定的窗口是不 合适的。所以我们需要根据目标的大小和角度来对窗口的大小和角度进行修订
13+
14+
"""
15+
16+
import numpy as np
17+
import cv2
18+
19+
cap = cv2.VideoCapture('slow.flv')
20+
21+
# take first frame of the video
22+
ret, frame = cap.read()
23+
# setup initial location of window
24+
r, h, c, w = 250, 90, 400, 125 # simply hardcoded the values
25+
track_window = (c, r, w, h)
26+
# set up the ROI for tracking
27+
roi = frame[r:r + h, c:c + w]
28+
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
29+
#将低亮度的值忽略掉
30+
mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
31+
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
32+
33+
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
34+
35+
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
36+
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
37+
38+
while True:
39+
ret, frame = cap.read()
40+
if ret == True:
41+
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
42+
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
43+
# apply meanshift to get the new location
44+
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
45+
# Draw it on image
46+
x, y, w, h = track_window
47+
img2 = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
48+
cv2.imshow('img2', img2)
49+
k = cv2.waitKey(60) & 0xff
50+
if k == 27:
51+
break
52+
else:
53+
cv2.imwrite(chr(k) + ".jpg", img2)
54+
else:
55+
break
56+
cv2.destroyAllWindows()
57+
cap.release()
58+
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
# -*- coding: utf-8 -*-
2+
# @Time : 2017/7/13 下午6:08
3+
# @Author : play4fun
4+
# @File : 40.4-OpenCV中的稠密光流.py
5+
# @Software: PyCharm
6+
7+
"""
8+
40.4-OpenCV中的稠密光流.py:
9+
"""
10+
11+
import cv2
12+
import numpy as np
13+
14+
cap = cv2.VideoCapture("vtest.avi")
15+
ret, frame1 = cap.read()
16+
17+
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
18+
hsv = np.zeros_like(frame1)
19+
hsv[..., 1] = 255
20+
21+
while True:
22+
ret, frame2 = cap.read()
23+
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
24+
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
25+
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
26+
hsv[..., 0] = ang * 180 / np.pi / 2
27+
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
28+
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
29+
cv2.imshow('frame2', bgr)
30+
k = cv2.waitKey(30) & 0xff
31+
if k == 27:
32+
break
33+
elif k == ord('s'):
34+
cv2.imwrite('opticalfb.png', frame2)
35+
cv2.imwrite('opticalhsv.png', bgr)
36+
prvs = next
37+
38+
cap.release()
39+
cv2.destroyAllWindows()
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
# -*- coding: utf-8 -*-
2+
# @Time : 2017/7/13 下午5:59
3+
# @Author : play4fun
4+
# @File : calcOpticalFlowPyrLK.py
5+
# @Software: PyCharm
6+
7+
"""
8+
calcOpticalFlowPyrLK.py:
9+
由于目标对象或者摄像机的移动造成的图像对象在 续两帧图像中的移动 被称为光流。
10+
它是一个 2D 向量场 可以用来显示一个点从第一帧图像到第二 帧图像之间的移动。
11+
光 流在很多领域中都很有用
12+
• 由运动重建结构
13+
• 视频压缩
14+
• Video Stabilization 等
15+
"""
16+
17+
import numpy as np
18+
import cv2
19+
20+
cap = cv2.VideoCapture('slow.flv')
21+
22+
# params for ShiTomasi corner detection
23+
feature_params = dict(maxCorners=100,
24+
qualityLevel=0.3,
25+
minDistance=7,
26+
blockSize=7)
27+
# Parameters for lucas kanade optical flow
28+
# maxLevel 为使用的图像金字塔层数
29+
lk_params = dict(winSize=(15, 15),
30+
maxLevel=2,
31+
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
32+
# Create some random colors
33+
color = np.random.randint(0, 255, (100, 3))
34+
35+
# Take first frame and find corners in it
36+
ret, old_frame = cap.read()
37+
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
38+
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
39+
# Create a mask image for drawing purposes
40+
mask = np.zeros_like(old_frame)
41+
42+
while True:
43+
ret, frame = cap.read()
44+
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
45+
# calculate optical flow能够获取点的新位置
46+
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
47+
# Select good points
48+
good_new = p1[st == 1]
49+
good_old = p0[st == 1]
50+
# draw the tracks
51+
for i, (new, old) in enumerate(zip(good_new, good_old)):
52+
a, b = new.ravel()
53+
c, d = old.ravel()
54+
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
55+
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
56+
img = cv2.add(frame, mask)
57+
cv2.imshow('frame', img)
58+
k = cv2.waitKey(30) & 0xff
59+
if k == 27:
60+
break
61+
# Now update the previous frame and previous points
62+
old_gray = frame_gray.copy()
63+
p0 = good_new.reshape(-1, 1, 2)
64+
65+
cv2.destroyAllWindows()
66+
cap.release()

data/ml.png

100755100644
45.8 KB
Loading

data/slow.flv

1.18 MB
Binary file not shown.

0 commit comments

Comments
 (0)