You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
197 lines
6.0 KiB
197 lines
6.0 KiB
import cv2
|
|
import base64
|
|
import zmq
|
|
|
|
# Constant variables definition.
|
|
MAJOR_VERSION = cv2.getVersionMajor()
|
|
DESIRED_HEIGHT = 480 # The input image will be resized to this height, preserving its aspect ratio.
|
|
BLUE_THRESHOLD = 150 # If the blue channel is bigger than this, it is considered background and removed.
|
|
BINARY_THRESHOLD = 30 # If the pixel is not brighter than this, it is removed before detection.
|
|
CANNY_LOW_THRES = 150 # Low threshold for the canny edge detector.
|
|
CANNY_HIGH_THRES = 350 # High threshold for the canny edge detector.
|
|
LINE_THICKNESS = 2 # Thickness of the drawn lines.
|
|
MIN_CONTOUR_AREA = 100 # Min area of a contour to be considered valid.
|
|
MAX_CONTOUR_AREA = 2100 # Max area of a contour to be considered valid.
|
|
BLUR_KERNEL_SIZE = 3 # The size of the Gaussian blur kernel.
|
|
DILATION_KERNEL_SIZE = 5 # The size of the dilation kernel.
|
|
DILATION_ITERATIONS = 5 # The number of dilation iterations.
|
|
MIN_DISTANCE_FOR_MOVE = 10 # Min distance of the drone from the center for the servos to move.
|
|
|
|
# Colors (assuming the default BGR order).
|
|
RED = (0, 0, 255)
|
|
GREEN = (0, 255, 0)
|
|
BLUE = (255, 0, 0)
|
|
YELLOW = (0, 255, 255)
|
|
|
|
# -------------- Function definitions -----------------------------
|
|
def resizeImage(img):
|
|
"Resize the input image based on the DESIRED_HEIGHT variable."
|
|
p = img.shape
|
|
aspectRatio = p[0]/p[1]
|
|
width = DESIRED_HEIGHT*aspectRatio
|
|
img = cv2.resize(img, ( DESIRED_HEIGHT, int(width) ))
|
|
return img
|
|
|
|
def removeColors(img):
|
|
out = None
|
|
dim = img.shape
|
|
blue = img.copy()
|
|
for i in range(dim[0]):
|
|
for j in range(dim[1]):
|
|
pixel = img[i,j]
|
|
if pixel[0] > 150:
|
|
blue[i,j,:] = 255
|
|
else:
|
|
blue[i,j,:] = 0
|
|
|
|
gray = cv2.cvtColor(blue, cv2.COLOR_BGR2GRAY)
|
|
if MAJOR_VERSION == 3:
|
|
_, contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
else:
|
|
contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
if len(contours) > 0:
|
|
maxContour = max(contours, key = cv2.contourArea)
|
|
x,y,w,h = cv2.boundingRect(maxContour)
|
|
out = img[y:y+h,x:x+w,:]
|
|
|
|
return out
|
|
|
|
def findMatchingContour(img, objX, objY):
|
|
|
|
dilated = img.copy()
|
|
#dilated = cv2.dilate(img, (5,5), iterations=1)
|
|
canny = cv2.Canny(dilated, CANNY_LOW_THRES, CANNY_HIGH_THRES)
|
|
if MAJOR_VERSION == 3:
|
|
_, contours, hierarchy = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
|
|
else:
|
|
contours, hierarchy = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
|
|
|
|
#print('len:' + str(len(contours)))
|
|
contours.sort(key = cv2.contourArea, reverse = True)
|
|
|
|
#cv2.imshow('hey', canny)
|
|
for i in range(len(contours)):
|
|
contour = contours[i]
|
|
x,y,w,h = cv2.boundingRect(contour)
|
|
|
|
area = w*h
|
|
dist = cv2.pointPolygonTest(contour, (objX,objY), False)
|
|
#print('dist: ' + str(dist))
|
|
if area >= MIN_CONTOUR_AREA and area <= MAX_CONTOUR_AREA and dist >= 0:
|
|
return (True, contour)
|
|
|
|
return (False, None)
|
|
|
|
def processImage(img):
|
|
|
|
# Resize image to the desired height.
|
|
resized = resizeImage(img)
|
|
#return removeColors(resized)
|
|
dim = resized.shape
|
|
|
|
# Convert to grayscale.
|
|
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
|
|
|
|
# Blur the image.
|
|
blur = cv2.GaussianBlur(gray, (BLUR_KERNEL_SIZE, BLUR_KERNEL_SIZE), 0)
|
|
|
|
# Threshold the image and find its contours.
|
|
_, imgThres = cv2.threshold(blur, BINARY_THRESHOLD, 255, cv2.THRESH_BINARY_INV)
|
|
|
|
# Dilate the image.
|
|
dilated = cv2.dilate(imgThres, (DILATION_KERNEL_SIZE,DILATION_KERNEL_SIZE), iterations=DILATION_ITERATIONS)
|
|
|
|
# Find the largest image contour.
|
|
if MAJOR_VERSION == 3:
|
|
_, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
|
|
else:
|
|
contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
if len(contours) > 0:
|
|
maxContour = max(contours, key = cv2.contourArea)
|
|
else:
|
|
print('No contours found.')
|
|
return (resized, 0, 0)
|
|
|
|
# Get the bounding rectangle of the contour.
|
|
x,y,w,h = cv2.boundingRect(maxContour)
|
|
|
|
# Get the centroid of the rectangle.
|
|
objCenterX = int( (x + x + w) / 2)
|
|
objCenterY = int( (y + y + h) / 2)
|
|
imgCenterX = int(dim[1]/2)
|
|
imgCenterY = int(dim[0]/2)
|
|
#cv2.circle(resized, (objCenterX, objCenterY), 5, BLUE, LINE_THICKNESS)
|
|
ret, finalContour = findMatchingContour(blur, objCenterX, objCenterY)
|
|
if (ret == False):
|
|
return (resized, 0, 0)
|
|
|
|
#cv2.fillPoly(resized, finalContour, BLUE, cv2.LINE_4)
|
|
x,y,w,h = cv2.boundingRect(finalContour)
|
|
objCenterX = int( (x + x + w) / 2)
|
|
objCenterY = int( (y + y + h) / 2)
|
|
|
|
# Draw the bounding rectangle and its centroid to the image.
|
|
#cv2.circle(resized, (objCenterX, objCenterY), 5, YELLOW, LINE_THICKNESS)
|
|
cv2.rectangle(resized, (x,y), (x+w,y+h), RED, LINE_THICKNESS)
|
|
|
|
# Determinate the direction of the object relative to the center of the camera.
|
|
xDir, yDir = determinateDir(imgCenterX, imgCenterY, objCenterX, objCenterY)
|
|
|
|
return (resized, xDir, yDir)
|
|
|
|
def determinateDir(cenX, cenY, objX, objY):
|
|
xDir = 0
|
|
yDir = 0
|
|
|
|
if abs(cenX - objX) >= MIN_DISTANCE_FOR_MOVE:
|
|
if objX > cenX:
|
|
xDir = 1
|
|
else:
|
|
xDir = -1
|
|
if abs(cenY - objY) >= MIN_DISTANCE_FOR_MOVE:
|
|
if objY > cenY:
|
|
yDir = -1
|
|
else:
|
|
yDir = 1
|
|
return (xDir, yDir)
|
|
|
|
#####################################################################################
|
|
|
|
context = zmq.Context()
|
|
|
|
socket = context.socket(zmq.REP)
|
|
socket.bind("tcp://*:4444")
|
|
while True:
|
|
client_ip = socket.recv()
|
|
break
|
|
|
|
footage_socket = context.socket(zmq.PUB)
|
|
footage_socket.connect('tcp://' + client_ip.decode() + ':5555')
|
|
|
|
#camera = cv2.VideoCapture(0) # init the camera
|
|
|
|
|
|
|
|
cap = cv2.VideoCapture('C:/Users/Giorgos Ger/Desktop/drone_test.mp4')
|
|
if (cap.isOpened() == False):
|
|
print('Error opening stream.')
|
|
quit()
|
|
|
|
#cap.set(1, 30*6)
|
|
|
|
while (cap.isOpened()):
|
|
try:
|
|
ret, frame = cap.read()
|
|
if (ret == True):
|
|
img, xDir, yDir = processImage(frame)
|
|
#cv2.imshow('Frame', img)
|
|
encoded, buffer = cv2.imencode('.jpg', img)
|
|
jpg_as_text = base64.b64encode(buffer)
|
|
footage_socket.send(jpg_as_text)
|
|
cv2.waitKey(33)
|
|
else:
|
|
break
|
|
except KeyboardInterrupt:
|
|
cap.release()
|
|
cv2.destroyAllWindows()
|
|
break
|