import cv2 import base64 import zmq import servo # Constant variables definition. MAJOR_VERSION = cv2.getVersionMajor() SERVO_INITIAL_X_ANGLE = 0 # The initial horizontal angle of the camera. SERVO_INITIAL_Y_ANGLE = 0 # The initial vertical angle of the camera. SERVO_STEP_ANGLE = 5 # The angle at which the servo motors move each frame. DESIRED_HEIGHT = 480 # The input image will be resized to this height, preserving its aspect ratio. BLUE_THRESHOLD = 150 # If the blue channel is bigger than this, it is considered background and removed. BINARY_THRESHOLD = 30 # If the pixel is not brighter than this, it is removed before detection. CANNY_LOW_THRES = 150 # Low threshold for the canny edge detector. CANNY_HIGH_THRES = 350 # High threshold for the canny edge detector. LINE_THICKNESS = 2 # Thickness of the drawn lines. MIN_CONTOUR_AREA = 100 # Min area of a contour to be considered valid. MAX_CONTOUR_AREA = 2100 # Max area of a contour to be considered valid. BLUR_KERNEL_SIZE = 3 # The size of the Gaussian blur kernel. DILATION_KERNEL_SIZE = 5 # The size of the dilation kernel. DILATION_ITERATIONS = 5 # The number of dilation iterations. MIN_DISTANCE_FOR_MOVE = 10 # Min distance of the drone from the center for the servos to move. # Colors (assuming the default BGR order). RED = (0, 0, 255) GREEN = (0, 255, 0) BLUE = (255, 0, 0) YELLOW = (0, 255, 255) # -------------- Function definitions ----------------------------- def resizeImage(img): "Resize the input image based on the DESIRED_HEIGHT variable." p = img.shape aspectRatio = p[0]/p[1] width = DESIRED_HEIGHT*aspectRatio img = cv2.resize(img, ( DESIRED_HEIGHT, int(width) )) return img def findMatchingContour(img, objX, objY): dilated = img.copy() #dilated = cv2.dilate(img, (5,5), iterations=1) canny = cv2.Canny(dilated, CANNY_LOW_THRES, CANNY_HIGH_THRES) if MAJOR_VERSION == 3: _, contours, hierarchy = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) else: contours, hierarchy = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) #print('len:' + str(len(contours))) contours.sort(key = cv2.contourArea, reverse = True) #cv2.imshow('hey', canny) for i in range(len(contours)): contour = contours[i] x,y,w,h = cv2.boundingRect(contour) area = w*h dist = cv2.pointPolygonTest(contour, (objX,objY), False) #print('dist: ' + str(dist)) if area >= MIN_CONTOUR_AREA and area <= MAX_CONTOUR_AREA and dist >= 0: return (True, contour) return (False, None) def processImage(img): # Resize image to the desired height. resized = resizeImage(img) #return removeColors(resized) dim = resized.shape # Convert to grayscale. gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY) # Blur the image. blur = cv2.GaussianBlur(gray, (BLUR_KERNEL_SIZE, BLUR_KERNEL_SIZE), 0) # Threshold the image and find its contours. _, imgThres = cv2.threshold(blur, BINARY_THRESHOLD, 255, cv2.THRESH_BINARY_INV) # Dilate the image. dilated = cv2.dilate(imgThres, (DILATION_KERNEL_SIZE,DILATION_KERNEL_SIZE), iterations=DILATION_ITERATIONS) # Find the largest image contour. if MAJOR_VERSION == 3: _, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) else: contours, hierarchy = cv2.findContours(dilated, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) if len(contours) > 0: maxContour = max(contours, key = cv2.contourArea) else: print('No contours found.') return (resized, 0, 0) # Get the bounding rectangle of the contour. x,y,w,h = cv2.boundingRect(maxContour) # Get the centroid of the rectangle. objCenterX = int( (x + x + w) / 2) objCenterY = int( (y + y + h) / 2) imgCenterX = int(dim[1]/2) imgCenterY = int(dim[0]/2) #cv2.circle(resized, (objCenterX, objCenterY), 5, BLUE, LINE_THICKNESS) ret, finalContour = findMatchingContour(blur, objCenterX, objCenterY) if (ret == False): return (resized, 0, 0) #cv2.fillPoly(resized, finalContour, BLUE, cv2.LINE_4) x,y,w,h = cv2.boundingRect(finalContour) objCenterX = int( (x + x + w) / 2) objCenterY = int( (y + y + h) / 2) # Draw the bounding rectangle and its centroid to the image. #cv2.circle(resized, (objCenterX, objCenterY), 5, YELLOW, LINE_THICKNESS) cv2.rectangle(resized, (x,y), (x+w,y+h), RED, LINE_THICKNESS) # Determinate the direction of the object relative to the center of the camera. xDir, yDir = determinateDir(imgCenterX, imgCenterY, objCenterX, objCenterY) return (resized, xDir, yDir) def determinateDir(cenX, cenY, objX, objY): xDir = 0 yDir = 0 if abs(cenX - objX) >= MIN_DISTANCE_FOR_MOVE: if objX > cenX: xDir = 1 else: xDir = -1 if abs(cenY - objY) >= MIN_DISTANCE_FOR_MOVE: if objY > cenY: yDir = -1 else: yDir = 1 return (xDir, yDir) ##################################################################################### context = zmq.Context() socket = context.socket(zmq.REP) socket.bind("tcp://*:4444") while True: client_ip = socket.recv() break footage_socket = context.socket(zmq.PUB) footage_socket.connect('tcp://' + client_ip.decode() + ':5555') #camera = cv2.VideoCapture(0) # init the camera servoX = SERVO_INITIAL_X_ANGLE servoY = SERVO_INITIAL_Y_ANGLE cap = cv2.VideoCapture('C:/Users/steyi/Desktop/drone_test.mp4') if (cap.isOpened() == False): print('Error opening stream.') quit() #cap.set(1, 30*6) while (cap.isOpened()): try: ret, frame = cap.read() if (ret == True): img, xDir, yDir = processImage(frame) #cv2.imshow('Frame', img) encoded, buffer = cv2.imencode('.jpg', img) jpg_as_text = base64.b64encode(buffer) footage_socket.send(jpg_as_text) cv2.waitKey(33) # Move the servo motors. if yDir == 1: servoY = servoY + SERVO_STEP_ANGLE if servoY > 180: servoY = 180 servo.SetAngleUp(servoY) elif yDir == -1: servoY = servoY - SERVO_STEP_ANGLE if servoY < 0: servoY = 0 servo.SetAngleUp(servoY) if xDir == 1: servoX = servoX + SERVO_STEP_ANGLE if servoX > 180: servoX = 180 if servoX < 0: servoX = 0 servo.SetAngleDown(servoX) elif xDir == -1: servoX = servoX - SERVO_STEP_ANGLE servo.SetAngleDown(servoX) else: break except KeyboardInterrupt: cap.release() cv2.destroyAllWindows() break