Skip to content

Commit 5632a67

Browse files
committed
initial commit
0 parents  commit 5632a67

20 files changed

+36230
-0
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
venv
2+
.idea

01_load_media.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import cv2
2+
3+
# Load and Display an Image 5s
4+
img = cv2.imread('resources/shenzhen-subway.jpg')
5+
cv2.imshow("Output", img)
6+
cv2.waitKey(5000)
7+
8+
###############################################################
9+
10+
# Capture a video to variable and display until `q` key is pressed
11+
cap = cv2.VideoCapture('resources/sz-office.mp4')
12+
try:
13+
while True:
14+
success, vid = cap.read()
15+
cv2.imshow('Video', vid)
16+
if cv2.waitKey(1) & 0xFF == ord('q'):
17+
break
18+
except:
19+
print("Reached end of video file")
20+
21+
# Capture video from your webcam
22+
cap = cv2.VideoCapture(0) #Capture video source zero `/dev/video0`
23+
cap.set(16, 1920)
24+
cap.set(9, 1080)
25+
26+
while True:
27+
success, vid = cap.read()
28+
cv2.imshow('Video', vid)
29+
if cv2.waitKey(1) & 0xFF == ord('q'):
30+
break
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import cv2
2+
import numpy as np
3+
4+
# Image grayscale, blur and edge detection
5+
img = cv2.imread('resources/shenzhen-subway.jpg')
6+
kernel = np.ones((5, 5), np.uint8)
7+
8+
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
9+
imgBlur = cv2.GaussianBlur(imgGray, (9, 9), 0)
10+
imgLowEdge = cv2.Canny(img, 100, 100)
11+
imgHighEdge = cv2.Canny(img, 200, 300)
12+
imgDilation = cv2.dilate(imgLowEdge, kernel, iterations=5)
13+
imgEroded = cv2.erode(imgDilation, kernel, iterations=3)
14+
15+
cv2.imshow("Grayscale Image", imgGray)
16+
cv2.imshow("Blurred Image", imgBlur)
17+
cv2.imshow("Low Edge Threshold Image", imgLowEdge)
18+
cv2.imshow("High Edge Threshold Image", imgHighEdge)
19+
cv2.imshow("Increased Edge Thickness", imgDilation)
20+
cv2.imshow("Decreased Edge Thickness", imgEroded)
21+
cv2.waitKey(5000)

03_joining_images.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
import cv2
2+
import numpy as np
3+
4+
# Joining Images in Vertical or Horizontal Stacks
5+
img = cv2.imread('resources/shenzhen-subway.jpg')
6+
kernel = np.ones((5, 5), np.uint8)
7+
8+
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
9+
imgBlur = cv2.GaussianBlur(imgGray, (9, 9), 0)
10+
imgLowEdge = cv2.Canny(img, 100, 100)
11+
imgHighEdge = cv2.Canny(img, 200, 300)
12+
imgDilation = cv2.dilate(imgLowEdge, kernel, iterations=5)
13+
imgEroded = cv2.erode(imgDilation, kernel, iterations=3)
14+
15+
imgVer = np.vstack(( imgGray, imgBlur))
16+
imgHor = np.hstack((imgLowEdge, imgHighEdge, imgDilation, imgEroded))
17+
18+
cv2.imshow("Posterized Image", imgHor)
19+
cv2.imshow("Grayscale Image", imgVer)
20+
cv2.waitKey(5000)

04_cropping_resizing.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
import cv2
2+
3+
# Image cropping and resizing
4+
img = cv2.imread('resources/shenzhen-subway.jpg')
5+
print(img.shape)
6+
imgResize = cv2.resize(img, (300, 200))
7+
print(imgResize.shape)
8+
9+
imgCropped = img[0:250, 624:824] # height,width
10+
11+
cv2.imshow("Resized Image", imgResize)
12+
cv2.imshow("Image Crop", imgCropped)
13+
cv2.waitKey(5000)

05_adding_shapes_text.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
import cv2
2+
import numpy as np
3+
4+
# Adding Shapes and Text
5+
img = np.zeros((512, 512, 3), np.uint8) # 512x512 black image background with 3 colour channels BGR
6+
imageDim = str(img.shape[0]) + ' : ' + str(img.shape[1])
7+
8+
img[:] = 255, 0, 0 # Colour entire image blue
9+
img[112:400, 112:400] = 0, 255, 0 # Add a green square in the middle
10+
11+
# cv2.line(img, (0, 0), (512, 512), (0, 0, 255), 3) # Draw a line from the top left to bottom right
12+
# cv2.line(img, (512, 0), (0, 512), (0, 0, 255), 3) # Draw a line from the top right to bottom left
13+
# Do the same thing - but for an image of unknown dimensions
14+
# with img.shape[1]=width and img.shape[0]=height
15+
cv2.line(img, (0, 0), (img.shape[1], img.shape[0]), (0, 0, 255), 3)
16+
cv2.line(img, (img.shape[1], 0), (0, img.shape[0]), (0, 0, 255), 3)
17+
18+
cv2.rectangle(img, (3, 3), (509, 509), (0, 0, 255), 5) # Draw a rectangle around the image
19+
cv2.rectangle(img, (212, 212), (300, 300), (0, 0, 255), cv2.FILLED) # Draw square in the middle
20+
21+
cv2.circle(img, (256, 256), 30, (0, 255, 255), cv2.FILLED) # Add a circle in the middle
22+
23+
cv2.putText(img, imageDim, (180, 70), cv2.QT_FONT_NORMAL, 1, (255, 255, 255), 1) # Print image dimensions
24+
25+
cv2.imshow("Image", img)
26+
cv2.waitKey(5000)

06_perspective_transformation.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import cv2
2+
import numpy as np
3+
4+
# Dewarp selections
5+
img = cv2.imread('resources/sign.jpg')
6+
7+
width, height = 250, 350
8+
9+
pts1 = np.float32([[920, 227], [1216, 244], [873, 780], [1182, 809]])
10+
pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])
11+
12+
matrix = cv2.getPerspectiveTransform(pts1, pts2)
13+
14+
imgOutput = cv2.warpPerspective(img, matrix, (width,height))
15+
16+
17+
cv2.imshow('Image', imgOutput)
18+
cv2.waitKey(5000)

07_colour_selection.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import cv2
2+
import numpy as np
3+
4+
# Color Detection
5+
from time import time, sleep
6+
path = 'resources/songhua-river.jpg'
7+
# Create a hue slider that helps us
8+
# find the correct colour to select
9+
def empty(a):
10+
pass
11+
cv2.namedWindow('TrackBars') # Create the Window
12+
cv2.resizeWindow('TrackBars', 640, 240) # Give it a size
13+
cv2.createTrackbar('Hue Min', 'TrackBars', 0, 179, empty) # Add a slider for min Hue 0-179
14+
cv2.createTrackbar('Hue Max', 'TrackBars', 179, 179, empty) # Add a slider for max Hue 179
15+
cv2.createTrackbar('Sat Min', 'TrackBars', 107, 255, empty) # Add a slider for min Saturation 0-255
16+
cv2.createTrackbar('Sat Max', 'TrackBars', 255, 255, empty) # Add a slider for max Saturation 255
17+
cv2.createTrackbar('Val Min', 'TrackBars', 180, 255, empty) # Add a slider for min Value 0-255
18+
cv2.createTrackbar('Val Max', 'TrackBars', 255, 255, empty) # Add a slider for max Value 255
19+
20+
while True: # Run loop to continuously update from trackbars
21+
img = cv2.imread(path)
22+
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
23+
h_min = cv2.getTrackbarPos('Hue Min', 'TrackBars')
24+
h_max = cv2.getTrackbarPos('Hue Max', 'TrackBars')
25+
s_min = cv2.getTrackbarPos('Sat Min', 'TrackBars')
26+
s_max = cv2.getTrackbarPos('Sat Max', 'TrackBars')
27+
v_min = cv2.getTrackbarPos('Val Min', 'TrackBars')
28+
v_max = cv2.getTrackbarPos('Val Max', 'TrackBars')
29+
30+
print(h_min, h_max, s_min, s_max, v_min, v_max)
31+
32+
lower = np.array([h_min, s_min, v_min])
33+
upper = np.array([h_max, s_max, v_max])
34+
mask = cv2.inRange(imgHSV, lower, upper) # Create a selection mask based on thresholds
35+
imgSelection = cv2.bitwise_and(img, img, mask=mask) # Apply layer mask to image
36+
37+
imgHor = np.hstack((img, imgSelection))
38+
39+
cv2.imshow('Original & Selection', imgHor)
40+
cv2.imshow('Mask', mask)
41+
cv2.waitKey(5000)

08_contours_shape_detection.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
import cv2
2+
import numpy as np
3+
4+
def getContours(img): # Retrieve contours from detected shapes
5+
contours, hierachy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
6+
for cnt in contours:
7+
area = cv2.contourArea(cnt) # Get areas for all contours
8+
# print(area) # Print calculated areas
9+
if area > 400: # Set threshold to exclude noise
10+
cv2.drawContours(imgBlack, cnt, -1, (255, 0, 0), 1) # Draw those areas onto the image
11+
peri = cv2.arcLength(cnt, True) # Get contour perimeter
12+
approx = cv2.approxPolyDP(cnt, 0.02*peri, True) # Approximate polygonal curve
13+
# print(len(approx)) # Print the number corner points of each contour
14+
objCorners = len(approx)
15+
x, y, w, h = cv2.boundingRect(approx) # Get coordinates from curve
16+
17+
if objCorners == 3:
18+
objectType = "Triangle" # Define object based on corner count
19+
20+
elif objCorners == 4:
21+
aspectRatio = w/float(h) # Check if w/h=1 => square
22+
if 0.95 < aspectRatio < 1.05: objectType = "Square"
23+
else: objectType = "Rectangle"
24+
25+
elif objCorners == 5:
26+
objectType = "Pentagon"
27+
28+
elif objCorners == 6:
29+
objectType = "Hexagon"
30+
31+
elif objCorners == 7:
32+
objectType = "Heptagon"
33+
34+
elif objCorners > 7:
35+
objectType = "Circle?"
36+
37+
else: objectType = "Unknown"
38+
39+
cv2.rectangle(imgBlack, (x, y), (x+w, y+h), (0, 0, 255, 1)) # Print bounding box
40+
cv2.putText(imgBlack, objectType,
41+
(x+(w//2)-10, y+(h//2)-10), # Put objectType in Center
42+
cv2.QT_FONT_NORMAL, 0.5, (255, 255, 0), 1)
43+
44+
# Contours and Shape detection
45+
path = 'resources/objects_dark.png'
46+
# path = 'resources/objects_light.png'
47+
img = cv2.imread(path)
48+
imgBlack = np.zeros_like(img)
49+
# imgWhite = np.ones_like(img)
50+
51+
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
52+
imgBlur = cv2.GaussianBlur(imgGray, (7, 7), 1)
53+
imgEdge = cv2.Canny(imgBlur, 50, 50)
54+
55+
getContours(imgEdge)
56+
57+
cv2.imshow("Shape", imgBlack)
58+
cv2.imshow("Original", img)
59+
cv2.waitKey(15000)

09_face_detection.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
import cv2
2+
3+
# Face Detection
4+
faceCascade = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
5+
imagePath = 'resources/hongkong-metro.png'
6+
img = cv2.imread(imagePath)
7+
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Create grayscale image
8+
9+
faces = faceCascade.detectMultiScale(imgGray, 1.1, 4) # Detect all faces in image
10+
11+
for (x, y, w, h) in faces:
12+
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)
13+
14+
cv2.imshow("Face Detection", img)
15+
cv2.waitKey(5000)

0 commit comments

Comments
 (0)