-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsaveeeeeeeee copy.py
279 lines (213 loc) · 10.5 KB
/
saveeeeeeeee copy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
import cv2
import numpy as np
import math
from typing import List, Tuple
# global variables go here:
FIELD_OF_VIEW = (63.3,49.7) # (x degrees, y degrees)
#for trigo
CAM_HEIGHT = 61.5 #in cm
CAM_PITCH_ANGLE = 48 #in degrees
CAM_YAW_ANGLE=24
X_OFFSET = 30 #in cm
Y_OFFSET = 31.3 #in cm
# Define lower and upper bounds for HSV color range
HSV_LOW_BOUND = np.array([0, 100, 100], np.uint8)
HSV_HIGH_BOUND = np.array([30, 255, 255], np.uint8)
# Define hierarchy indices
NEXT = 0
PREVIOUS = 1
FIRST_CHILD = 2
PARENT = 3
# Define kernels for smuthing and seperating donuts
SMOOTHING_KERNEL = np.ones((5,5),np.float32)/25
# ERODE_KERNEL = np.array([[0,1,0],[1,1,1],[0,1,0]], np.uint8)
ERODE_KERNEL = np.ones((7, 7), np.uint8)
MARKER1_POS = (69, 190)
MARKER2_POS = (73, 178)
# Reference marker expected HLS values
MARKER1_COLOR = (50, 205, 50) # #32cd32 Bright green
MARKER2_COLOR = (25, 255, 255) # #ffff19 Vivid Yellow
#find x and y angles of note
def calculate_angle(fov: Tuple[float, float], center: Tuple[int, int], frame: np.ndarray, camera_pitch_angle: float, cam_yaw):
"""Calculate the relative angle from the camera to the note in both axes
-
Args:
- `fov (tuple[float, float]):` The field of view of the camera (angle_x, angle_y).
- `center (tuple[int, int]):` the coordinates of the center of the note in the frame (x, y).
- `frame (np.ndarray):` the frame which the note is in.
- `camera_pitch_angle (float):` the pitch angle relative to the face of the camera.
Returns:
- `(angle_x, angle_y)`: the angles in both axes from the camera to the note
"""
angle_x = (fov[0] / frame.shape[1]) * ((frame.shape[1] / 2) - center[0]) - cam_yaw
angle_y = (fov[1] / frame.shape[0]) * ((frame.shape[0] / 2) - center[1]) + camera_pitch_angle
return angle_x, angle_y
def calculate_distance(angle_y: float, cam_height: float):
"""Calculates the distance from the camera to the note in the X axis
-
Args:
- `angle_y (float):` the y axis angle from the note to the camera
- `cam_height (float):` the height of the camera relative to the ground
Returns:
`The distance in the X axis from the camera to the note.`
"""
return cam_height*np.tan(np.radians(angle_y))
def convert_to_mid_of_robot(llpython: list, x_offset: int, y_offset: int):
"""Convert the distance and angle from the camera into the distance from the robot.
Args:
llpython (list): the output limelight array
x_offset (int): the offset of the camera from the center of the robot
y_offset (int): the offset of the camera from the center of the robot
Returns:
`llpython`: The output array for the limelight
"""
distance = llpython[0]
angle = llpython[1]
angle_rad = math.radians(angle)
y = distance * math.sin(angle_rad) + y_offset
x = distance * math.cos(angle_rad) + x_offset
angle = math.degrees(math.atan(y/x))
llpython = [math.hypot(x,y), angle]
return llpython
def find_largest_contour_and_child(contours: List[np.ndarray], hierarchy: List[np.ndarray]) -> Tuple[int, int]:
"""Find the largest contour index and his child index
Args:
contours (list[np.ndarray]): The countours list
hierarchy (list[np.ndarray]): The respective heirarchy list
Returns:
(int, int): the indexes of the largest contour and his child
"""
largest_contour_index = max(range(len(contours)), key=lambda i: cv2.contourArea(contours[i]))
child_index = hierarchy[largest_contour_index][FIRST_CHILD]
biggest_child_contour_index = -1
biggest_child_contour_area = 0
while child_index != -1:
child_contour = contours[child_index]
child_contour_area = cv2.contourArea(child_contour)
if child_contour_area > biggest_child_contour_area:
biggest_child_contour_area = child_contour_area
biggest_child_contour_index = child_index
child_index = hierarchy[child_index][NEXT]
return (largest_contour_index ,biggest_child_contour_index)
def get_exposure_increase(image, ref_pos, ref_color):
# Get reference color RGB
ref_r, ref_g, ref_b = cv2.mean(ref_color)[:3]
# Sample a 3x3 region around the pos
x, y = ref_pos
x1 = max(x-1,0)
y1 = max(y-1,0)
x2 = min(x+1, image.shape[1]-1)
y2 = min(y+1, image.shape[0]-1)
image_patch = image[y1:y2, x1:x2]
b, g, r = cv2.mean(image_patch)[:3]
# Calculate luminances
ref_lum = 0.299*ref_r + 0.587*ref_g + 0.114*ref_b
img_lum = 0.299*r + 0.587*g + 0.114*b
# Check for divide by 0
if img_lum == 0:
return 0
ratio = ref_lum / img_lum
# Keep ratio within limits
ratio = max(ratio, 0.01)
ratio = min(ratio, 100)
# Log scale
log_ratio = np.log2(ratio)
# Round to nearest 10 ms
ms = round(log_ratio * 100 / 10) * 10
return ms
def expand_hsv_bounds(img, contour, hsv_low_bound, hsv_high_bound, neighborhood_size=5, tolerance=5):
"""
Expand the HSV value bounds based on the pixels in the contours, their neighborhood, and additional mouse points.
Args:
img (np.ndarray): The input image.
contour (np.ndarray): A contour representing the region of interest.
hsv_low_bound (np.ndarray): The current lower HSV bound.
hsv_high_bound (np.ndarray): The current upper HSV bound.
neighborhood_size (int): The size of the neighborhood around each contour pixel.
tolerance (int): The tolerance value for including neighboring pixels in the bounds.
mouse_points (list): A list of (x, y) coordinates from mouse clicks.
Returns:
tuple: A tuple containing the updated HSV low and high bounds.
"""
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
updated_low_bound = hsv_low_bound.copy()
updated_high_bound = hsv_high_bound.copy()
for pixel in contour:
x, y = pixel[0]
for neighbor_x in range(max(0, x - neighborhood_size // 2), min(img.shape[1], x + neighborhood_size // 2 + 1)):
for neighbor_y in range(max(0, y - neighborhood_size // 2), min(img.shape[0], y + neighborhood_size // 2 + 1)):
neighbor_hsv = hsv_img[neighbor_y, neighbor_x]
if np.all(np.abs(neighbor_hsv - hsv_low_bound) <= tolerance) or np.all(np.abs(neighbor_hsv - hsv_high_bound) <= tolerance):
updated_low_bound = np.minimum(updated_low_bound, neighbor_hsv - tolerance)
updated_high_bound = np.maximum(updated_high_bound, neighbor_hsv + tolerance)
return updated_low_bound, updated_high_bound
# runPipeline() is called every frame by Limelight's backend.
def runPipeline(image, llrobot):
global HSV_LOW_BOUND, HSV_HIGH_BOUND
exposure_increase = (get_exposure_increase(image, MARKER1_POS, MARKER1_COLOR) + get_exposure_increase(image, MARKER2_POS, MARKER2_COLOR))/2
dist = 0
Angle = 0
#blur the imagee to smooth it
image = cv2.filter2D(image,-1,SMOOTHING_KERNEL)
# Convert image to HSV color space
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Create a mask based on the specified HSV color range
mask = cv2.inRange(hsv, HSV_LOW_BOUND, HSV_HIGH_BOUND)
mask = cv2.erode(mask,ERODE_KERNEL)
mask = cv2.dilate(mask,ERODE_KERNEL)
# Find contours in the mask
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw all contours on the original image
#cv2.drawContours(image, contours, -1, (255, 0, 255), 3)
# Process only if there are contours detected
if len(contours) != 0:
hierarchy = hierarchy[0]
largest_contour_index, biggest_child_contour_index = find_largest_contour_and_child(contours, hierarchy)
HSV_LOW_BOUND, HSV_HIGH_BOUND = expand_hsv_bounds(img, contours[largest_contour_index], HSV_LOW_BOUND, HSV_HIGH_BOUND)
print(HSV_LOW_BOUND)
print(HSV_HIGH_BOUND)
cv2.drawContours(image, contours, largest_contour_index, (255, 0, 0), 5)
# Draw the largest child contour
if biggest_child_contour_index != -1:
biggest_child_contour = contours[biggest_child_contour_index]
cv2.drawContours(image, [biggest_child_contour], 0, (0, 255, 0), 2)
outer_contour = contours[largest_contour_index]
inner_contour = contours[biggest_child_contour_index]
# Check if both outer and inner contours have areas greater than 3600
if (cv2.contourArea(outer_contour) > 100) and (cv2.contourArea(inner_contour) > 100):
x, y, w, h = cv2.boundingRect(outer_contour)
outer_aspect_ratio = float(w) / h
outer_center = (int(x+(w/2)), int(y+(h/2)))
cv2.circle(image, outer_center, 10, (255,255,255),-3)
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 5)
x1, y1, w1, h1 = cv2.boundingRect(inner_contour)
inner_aspect_ratio = float(w1) / h1
inner_center = (int(x1+(w1/2)), int(y1+(h1/2)))
cv2.circle(image, inner_center, 5, (0,0,0),-3)
cv2.rectangle(image, (x1, y1), (x1+w1, y1+h1), (255, 0, 0), 5)
# Check if the aspect ratios and distance between centers meet the criteria
if (abs(outer_aspect_ratio - inner_aspect_ratio) < 10) and (math.dist(outer_center, inner_center) < 20):
image = cv2.putText(image, 'probably donut?☺☻♥', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2, cv2.LINE_AA)
Angle, Angle_y = calculate_angle(FIELD_OF_VIEW, inner_center ,image, CAM_PITCH_ANGLE, CAM_YAW_ANGLE)
dist = calculate_distance(Angle_y,CAM_HEIGHT)
else:
cv2.putText(image, 'coected note pls bump to seperate', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
else:
print("There is no child contour :(")
llpython = [dist,Angle, exposure_increase]
if (llpython[0] > 0):
llpython = convert_to_mid_of_robot(llpython, X_OFFSET, Y_OFFSET)
cv2.circle(image, MARKER1_POS, 5, MARKER1_COLOR,-1)
cv2.circle(image, MARKER2_POS, 5, MARKER2_COLOR,-1)
cv2.imshow("image",image)
return contours, image, llpython
cap = cv2.VideoCapture(0)
while(1):
ret, img = cap.read()
runPipeline(img, 5)
# Check for key press to exit
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# Release the video capture and destroy all windows
cv2.destroyAllWindows()