|
@ -38,25 +38,24 @@ def resizeImage(img): |
|
|
|
|
|
|
|
|
def findMatchingContour(img, objX, objY): |
|
|
def findMatchingContour(img, objX, objY): |
|
|
|
|
|
|
|
|
dilated = img.copy() |
|
|
# Apply the canny detector on the image and find the contours. |
|
|
#dilated = cv2.dilate(img, (5,5), iterations=1) |
|
|
canny = cv2.Canny(img, CANNY_LOW_THRES, CANNY_HIGH_THRES) |
|
|
canny = cv2.Canny(dilated, CANNY_LOW_THRES, CANNY_HIGH_THRES) |
|
|
|
|
|
if MAJOR_VERSION == 3: |
|
|
if MAJOR_VERSION == 3: |
|
|
_, contours, hierarchy = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) |
|
|
_, contours, hierarchy = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) |
|
|
else: |
|
|
else: |
|
|
contours, hierarchy = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) |
|
|
contours, hierarchy = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) |
|
|
|
|
|
|
|
|
#print('len:' + str(len(contours))) |
|
|
# Sort the contours based on their area. |
|
|
contours.sort(key = cv2.contourArea, reverse = True) |
|
|
contours.sort(key = cv2.contourArea, reverse = True) |
|
|
|
|
|
|
|
|
#cv2.imshow('hey', canny) |
|
|
# Determine which of the detected contours is the drone. |
|
|
for i in range(len(contours)): |
|
|
for i in range(len(contours)): |
|
|
contour = contours[i] |
|
|
contour = contours[i] |
|
|
x,y,w,h = cv2.boundingRect(contour) |
|
|
x,y,w,h = cv2.boundingRect(contour) |
|
|
|
|
|
|
|
|
area = w*h |
|
|
area = w*h |
|
|
dist = cv2.pointPolygonTest(contour, (objX,objY), False) |
|
|
dist = cv2.pointPolygonTest(contour, (objX,objY), False) |
|
|
#print('dist: ' + str(dist)) |
|
|
|
|
|
if area >= MIN_CONTOUR_AREA and area <= MAX_CONTOUR_AREA and dist >= 0: |
|
|
if area >= MIN_CONTOUR_AREA and area <= MAX_CONTOUR_AREA and dist >= 0: |
|
|
return (True, contour) |
|
|
return (True, contour) |
|
|
|
|
|
|
|
@ -66,7 +65,6 @@ def processImage(img): |
|
|
|
|
|
|
|
|
# Resize image to the desired height. |
|
|
# Resize image to the desired height. |
|
|
resized = resizeImage(img) |
|
|
resized = resizeImage(img) |
|
|
#return removeColors(resized) |
|
|
|
|
|
dim = resized.shape |
|
|
dim = resized.shape |
|
|
|
|
|
|
|
|
# Convert to grayscale. |
|
|
# Convert to grayscale. |
|
@ -100,18 +98,16 @@ def processImage(img): |
|
|
objCenterY = int( (y + y + h) / 2) |
|
|
objCenterY = int( (y + y + h) / 2) |
|
|
imgCenterX = int(dim[1]/2) |
|
|
imgCenterX = int(dim[1]/2) |
|
|
imgCenterY = int(dim[0]/2) |
|
|
imgCenterY = int(dim[0]/2) |
|
|
#cv2.circle(resized, (objCenterX, objCenterY), 5, BLUE, LINE_THICKNESS) |
|
|
|
|
|
ret, finalContour = findMatchingContour(blur, objCenterX, objCenterY) |
|
|
ret, finalContour = findMatchingContour(blur, objCenterX, objCenterY) |
|
|
if (ret == False): |
|
|
if (ret == False): |
|
|
return (resized, 0, 0) |
|
|
return (resized, 0, 0) |
|
|
|
|
|
|
|
|
#cv2.fillPoly(resized, finalContour, BLUE, cv2.LINE_4) |
|
|
|
|
|
x,y,w,h = cv2.boundingRect(finalContour) |
|
|
x,y,w,h = cv2.boundingRect(finalContour) |
|
|
objCenterX = int( (x + x + w) / 2) |
|
|
objCenterX = int( (x + x + w) / 2) |
|
|
objCenterY = int( (y + y + h) / 2) |
|
|
objCenterY = int( (y + y + h) / 2) |
|
|
|
|
|
|
|
|
# Draw the bounding rectangle and its centroid to the image. |
|
|
# Draw the bounding rectangle and its centroid to the image. |
|
|
#cv2.circle(resized, (objCenterX, objCenterY), 5, YELLOW, LINE_THICKNESS) |
|
|
|
|
|
cv2.rectangle(resized, (x,y), (x+w,y+h), RED, LINE_THICKNESS) |
|
|
cv2.rectangle(resized, (x,y), (x+w,y+h), RED, LINE_THICKNESS) |
|
|
cv2.line(resized, (objCenterX, objCenterY), (imgCenterX, imgCenterY), YELLOW, LINE_THICKNESS) |
|
|
cv2.line(resized, (objCenterX, objCenterY), (imgCenterX, imgCenterY), YELLOW, LINE_THICKNESS) |
|
|
|
|
|
|
|
@ -160,14 +156,13 @@ if (cap.isOpened() == False): |
|
|
print('Error opening stream.') |
|
|
print('Error opening stream.') |
|
|
quit() |
|
|
quit() |
|
|
|
|
|
|
|
|
#cap.set(1, 30*6) |
|
|
|
|
|
|
|
|
|
|
|
while (cap.isOpened()): |
|
|
while (cap.isOpened()): |
|
|
try: |
|
|
try: |
|
|
ret, frame = cap.read() |
|
|
ret, frame = cap.read() |
|
|
if (ret == True): |
|
|
if (ret == True): |
|
|
img, xDir, yDir = processImage(frame) |
|
|
img, xDir, yDir = processImage(frame) |
|
|
#cv2.imshow('Frame', img) |
|
|
|
|
|
|
|
|
# Encode and send the processed frame. |
|
|
encoded, buffer = cv2.imencode('.jpg', img) |
|
|
encoded, buffer = cv2.imencode('.jpg', img) |
|
|
jpg_as_text = base64.b64encode(buffer) |
|
|
jpg_as_text = base64.b64encode(buffer) |
|
|
footage_socket.send(jpg_as_text) |
|
|
footage_socket.send(jpg_as_text) |
|
|