foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +100 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/behaviors/common.py +686 -0
  9. ate/behaviors/tree.py +454 -0
  10. ate/cli.py +855 -3995
  11. ate/client.py +90 -0
  12. ate/commands/__init__.py +168 -0
  13. ate/commands/auth.py +389 -0
  14. ate/commands/bridge.py +448 -0
  15. ate/commands/data.py +185 -0
  16. ate/commands/deps.py +111 -0
  17. ate/commands/generate.py +384 -0
  18. ate/commands/memory.py +907 -0
  19. ate/commands/parts.py +166 -0
  20. ate/commands/primitive.py +399 -0
  21. ate/commands/protocol.py +288 -0
  22. ate/commands/recording.py +524 -0
  23. ate/commands/repo.py +154 -0
  24. ate/commands/simulation.py +291 -0
  25. ate/commands/skill.py +303 -0
  26. ate/commands/skills.py +487 -0
  27. ate/commands/team.py +147 -0
  28. ate/commands/workflow.py +271 -0
  29. ate/detection/__init__.py +38 -0
  30. ate/detection/base.py +142 -0
  31. ate/detection/color_detector.py +399 -0
  32. ate/detection/trash_detector.py +322 -0
  33. ate/drivers/__init__.py +39 -0
  34. ate/drivers/ble_transport.py +405 -0
  35. ate/drivers/mechdog.py +942 -0
  36. ate/drivers/wifi_camera.py +477 -0
  37. ate/interfaces/__init__.py +187 -0
  38. ate/interfaces/base.py +273 -0
  39. ate/interfaces/body.py +267 -0
  40. ate/interfaces/detection.py +282 -0
  41. ate/interfaces/locomotion.py +422 -0
  42. ate/interfaces/manipulation.py +408 -0
  43. ate/interfaces/navigation.py +389 -0
  44. ate/interfaces/perception.py +362 -0
  45. ate/interfaces/sensors.py +247 -0
  46. ate/interfaces/types.py +371 -0
  47. ate/llm_proxy.py +239 -0
  48. ate/mcp_server.py +387 -0
  49. ate/memory/__init__.py +35 -0
  50. ate/memory/cloud.py +244 -0
  51. ate/memory/context.py +269 -0
  52. ate/memory/embeddings.py +184 -0
  53. ate/memory/export.py +26 -0
  54. ate/memory/merge.py +146 -0
  55. ate/memory/migrate/__init__.py +34 -0
  56. ate/memory/migrate/base.py +89 -0
  57. ate/memory/migrate/pipeline.py +189 -0
  58. ate/memory/migrate/sources/__init__.py +13 -0
  59. ate/memory/migrate/sources/chroma.py +170 -0
  60. ate/memory/migrate/sources/pinecone.py +120 -0
  61. ate/memory/migrate/sources/qdrant.py +110 -0
  62. ate/memory/migrate/sources/weaviate.py +160 -0
  63. ate/memory/reranker.py +353 -0
  64. ate/memory/search.py +26 -0
  65. ate/memory/store.py +548 -0
  66. ate/recording/__init__.py +83 -0
  67. ate/recording/demonstration.py +378 -0
  68. ate/recording/session.py +415 -0
  69. ate/recording/upload.py +304 -0
  70. ate/recording/visual.py +416 -0
  71. ate/recording/wrapper.py +95 -0
  72. ate/robot/__init__.py +221 -0
  73. ate/robot/agentic_servo.py +856 -0
  74. ate/robot/behaviors.py +493 -0
  75. ate/robot/ble_capture.py +1000 -0
  76. ate/robot/ble_enumerate.py +506 -0
  77. ate/robot/calibration.py +668 -0
  78. ate/robot/calibration_state.py +388 -0
  79. ate/robot/commands.py +3735 -0
  80. ate/robot/direction_calibration.py +554 -0
  81. ate/robot/discovery.py +441 -0
  82. ate/robot/introspection.py +330 -0
  83. ate/robot/llm_system_id.py +654 -0
  84. ate/robot/locomotion_calibration.py +508 -0
  85. ate/robot/manager.py +270 -0
  86. ate/robot/marker_generator.py +611 -0
  87. ate/robot/perception.py +502 -0
  88. ate/robot/primitives.py +614 -0
  89. ate/robot/profiles.py +281 -0
  90. ate/robot/registry.py +322 -0
  91. ate/robot/servo_mapper.py +1153 -0
  92. ate/robot/skill_upload.py +675 -0
  93. ate/robot/target_calibration.py +500 -0
  94. ate/robot/teach.py +515 -0
  95. ate/robot/types.py +242 -0
  96. ate/robot/visual_labeler.py +1048 -0
  97. ate/robot/visual_servo_loop.py +494 -0
  98. ate/robot/visual_servoing.py +570 -0
  99. ate/robot/visual_system_id.py +906 -0
  100. ate/transports/__init__.py +121 -0
  101. ate/transports/base.py +394 -0
  102. ate/transports/ble.py +405 -0
  103. ate/transports/hybrid.py +444 -0
  104. ate/transports/serial.py +345 -0
  105. ate/urdf/__init__.py +30 -0
  106. ate/urdf/capture.py +582 -0
  107. ate/urdf/cloud.py +491 -0
  108. ate/urdf/collision.py +271 -0
  109. ate/urdf/commands.py +708 -0
  110. ate/urdf/depth.py +360 -0
  111. ate/urdf/inertial.py +312 -0
  112. ate/urdf/kinematics.py +330 -0
  113. ate/urdf/lifting.py +415 -0
  114. ate/urdf/meshing.py +300 -0
  115. ate/urdf/models/__init__.py +110 -0
  116. ate/urdf/models/depth_anything.py +253 -0
  117. ate/urdf/models/sam2.py +324 -0
  118. ate/urdf/motion_analysis.py +396 -0
  119. ate/urdf/pipeline.py +468 -0
  120. ate/urdf/scale.py +256 -0
  121. ate/urdf/scan_session.py +411 -0
  122. ate/urdf/segmentation.py +299 -0
  123. ate/urdf/synthesis.py +319 -0
  124. ate/urdf/topology.py +336 -0
  125. ate/urdf/validation.py +371 -0
  126. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
  127. foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
  128. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
  129. foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
  130. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
  131. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,399 @@
1
+ """
2
+ Color-based object detection.
3
+
4
+ Simple but effective detection using color segmentation.
5
+ Works without ML dependencies - just needs PIL/Pillow.
6
+
7
+ Great for:
8
+ - Detecting colored objects (red balls, blue bins, etc.)
9
+ - Prototyping before using ML models
10
+ - Real-time detection on limited hardware
11
+ """
12
+
13
+ from dataclasses import dataclass
14
+ from typing import List, Tuple, Optional, Any, Dict
15
+ import io
16
+
17
+ from .base import DetectorBase, Detection, BoundingBox
18
+
19
+ try:
20
+ from PIL import Image as PILImage
21
+ HAS_PIL = True
22
+ except ImportError:
23
+ HAS_PIL = False
24
+
25
+
26
+ @dataclass
27
+ class ColorRange:
28
+ """
29
+ HSV color range for detection.
30
+
31
+ HSV is better than RGB for color detection because
32
+ it separates color (hue) from brightness.
33
+ """
34
+ name: str
35
+ h_min: int # Hue min (0-179)
36
+ h_max: int # Hue max (0-179)
37
+ s_min: int # Saturation min (0-255)
38
+ s_max: int # Saturation max (0-255)
39
+ v_min: int # Value min (0-255)
40
+ v_max: int # Value max (0-255)
41
+
42
+
43
+ # Predefined color ranges (HSV)
44
+ PREDEFINED_COLORS: Dict[str, ColorRange] = {
45
+ "red": ColorRange("red", 0, 10, 100, 255, 100, 255),
46
+ "red2": ColorRange("red2", 160, 179, 100, 255, 100, 255), # Red wraps around
47
+ "orange": ColorRange("orange", 11, 25, 100, 255, 100, 255),
48
+ "yellow": ColorRange("yellow", 26, 35, 100, 255, 100, 255),
49
+ "green": ColorRange("green", 36, 85, 50, 255, 50, 255),
50
+ "blue": ColorRange("blue", 86, 125, 50, 255, 50, 255),
51
+ "purple": ColorRange("purple", 126, 155, 50, 255, 50, 255),
52
+ "pink": ColorRange("pink", 156, 165, 50, 255, 100, 255),
53
+ "white": ColorRange("white", 0, 179, 0, 30, 200, 255),
54
+ "black": ColorRange("black", 0, 179, 0, 255, 0, 50),
55
+ "gray": ColorRange("gray", 0, 179, 0, 30, 50, 200),
56
+ }
57
+
58
+
59
+ class ColorDetector(DetectorBase):
60
+ """
61
+ Detect objects by color using HSV thresholding.
62
+
63
+ Example:
64
+ detector = ColorDetector()
65
+
66
+ # Detect red objects
67
+ detections = detector.detect(image, target_colors=["red"])
68
+
69
+ # Or use custom color range
70
+ custom = ColorRange("bright_green", 40, 80, 150, 255, 150, 255)
71
+ detections = detector.detect(image, color_ranges=[custom])
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ min_area: int = 500,
77
+ max_area: int = 100000,
78
+ merge_distance: int = 20,
79
+ ):
80
+ """
81
+ Initialize color detector.
82
+
83
+ Args:
84
+ min_area: Minimum detection area in pixels
85
+ max_area: Maximum detection area in pixels
86
+ merge_distance: Distance to merge nearby detections
87
+ """
88
+ if not HAS_PIL:
89
+ raise ImportError("ColorDetector requires Pillow. Install with: pip install Pillow")
90
+
91
+ self.min_area = min_area
92
+ self.max_area = max_area
93
+ self.merge_distance = merge_distance
94
+
95
+ def detect(
96
+ self,
97
+ image: Any,
98
+ target_colors: List[str] = None,
99
+ color_ranges: List[ColorRange] = None,
100
+ **kwargs
101
+ ) -> List[Detection]:
102
+ """
103
+ Detect colored objects.
104
+
105
+ Args:
106
+ image: Input image (Image dataclass, PIL Image, or bytes)
107
+ target_colors: List of color names ["red", "blue", ...]
108
+ color_ranges: Custom ColorRange objects
109
+
110
+ Returns:
111
+ List of Detection objects
112
+ """
113
+ # Convert image to PIL
114
+ pil_image = self._to_pil(image)
115
+ if pil_image is None:
116
+ return []
117
+
118
+ # Build list of color ranges to detect
119
+ ranges = []
120
+ if target_colors:
121
+ for color in target_colors:
122
+ if color in PREDEFINED_COLORS:
123
+ ranges.append(PREDEFINED_COLORS[color])
124
+ # Handle red wraparound
125
+ if color == "red":
126
+ ranges.append(PREDEFINED_COLORS["red2"])
127
+
128
+ if color_ranges:
129
+ ranges.extend(color_ranges)
130
+
131
+ if not ranges:
132
+ # Default: detect common trash colors
133
+ ranges = [
134
+ PREDEFINED_COLORS["red"],
135
+ PREDEFINED_COLORS["red2"],
136
+ PREDEFINED_COLORS["blue"],
137
+ PREDEFINED_COLORS["green"],
138
+ PREDEFINED_COLORS["yellow"],
139
+ ]
140
+
141
+ # Detect for each color range
142
+ detections = []
143
+ for color_range in ranges:
144
+ dets = self._detect_color(pil_image, color_range)
145
+ detections.extend(dets)
146
+
147
+ # Merge overlapping detections
148
+ detections = self._merge_detections(detections)
149
+
150
+ return detections
151
+
152
+ def detect_class(self, image: Any, class_name: str, **kwargs) -> List[Detection]:
153
+ """Detect objects of a specific color class."""
154
+ return self.detect(image, target_colors=[class_name], **kwargs)
155
+
156
+ def _to_pil(self, image: Any) -> Optional[PILImage.Image]:
157
+ """Convert various image formats to PIL Image."""
158
+ if isinstance(image, PILImage.Image):
159
+ return image
160
+
161
+ # Handle our Image dataclass
162
+ if hasattr(image, 'data') and hasattr(image, 'width'):
163
+ if image.width == 0 or image.height == 0:
164
+ return None
165
+
166
+ if image.encoding == "jpeg":
167
+ return PILImage.open(io.BytesIO(image.data))
168
+ elif image.encoding == "rgb8":
169
+ return PILImage.frombytes("RGB", (image.width, image.height), image.data)
170
+ else:
171
+ # Try to decode as JPEG
172
+ try:
173
+ return PILImage.open(io.BytesIO(image.data))
174
+ except Exception:
175
+ return None
176
+
177
+ # Handle raw bytes (assume JPEG)
178
+ if isinstance(image, bytes):
179
+ try:
180
+ return PILImage.open(io.BytesIO(image))
181
+ except Exception:
182
+ return None
183
+
184
+ return None
185
+
186
+ def _detect_color(self, image: PILImage.Image, color_range: ColorRange) -> List[Detection]:
187
+ """
188
+ Detect regions matching a color range.
189
+
190
+ Uses a simple but effective approach:
191
+ 1. Convert to HSV
192
+ 2. Threshold by color range
193
+ 3. Find connected components
194
+ 4. Filter by size
195
+ """
196
+ # Convert to RGB if needed
197
+ if image.mode != "RGB":
198
+ image = image.convert("RGB")
199
+
200
+ width, height = image.size
201
+ pixels = image.load()
202
+
203
+ # Create binary mask
204
+ mask = [[False] * width for _ in range(height)]
205
+
206
+ for y in range(height):
207
+ for x in range(width):
208
+ r, g, b = pixels[x, y]
209
+ h, s, v = self._rgb_to_hsv(r, g, b)
210
+
211
+ # Check if in range
212
+ in_range = (
213
+ color_range.s_min <= s <= color_range.s_max and
214
+ color_range.v_min <= v <= color_range.v_max
215
+ )
216
+
217
+ # Handle hue wraparound for red
218
+ if color_range.h_min <= color_range.h_max:
219
+ in_range = in_range and color_range.h_min <= h <= color_range.h_max
220
+ else:
221
+ in_range = in_range and (h >= color_range.h_min or h <= color_range.h_max)
222
+
223
+ mask[y][x] = in_range
224
+
225
+ # Find connected components (simple flood fill)
226
+ visited = [[False] * width for _ in range(height)]
227
+ components = []
228
+
229
+ for y in range(height):
230
+ for x in range(width):
231
+ if mask[y][x] and not visited[y][x]:
232
+ # Flood fill to find component
233
+ component = self._flood_fill(mask, visited, x, y, width, height)
234
+ if component:
235
+ components.append(component)
236
+
237
+ # Convert components to detections
238
+ detections = []
239
+ for points in components:
240
+ area = len(points)
241
+ if area < self.min_area or area > self.max_area:
242
+ continue
243
+
244
+ # Calculate bounding box
245
+ min_x = min(p[0] for p in points)
246
+ max_x = max(p[0] for p in points)
247
+ min_y = min(p[1] for p in points)
248
+ max_y = max(p[1] for p in points)
249
+
250
+ bbox = BoundingBox(
251
+ x=min_x,
252
+ y=min_y,
253
+ width=max_x - min_x + 1,
254
+ height=max_y - min_y + 1,
255
+ )
256
+
257
+ # Confidence based on area and aspect ratio
258
+ aspect = bbox.width / max(bbox.height, 1)
259
+ aspect_score = 1.0 - min(abs(aspect - 1.0), 1.0) # Prefer square-ish
260
+ area_score = min(area / self.max_area, 1.0)
261
+ confidence = 0.5 + 0.3 * aspect_score + 0.2 * area_score
262
+
263
+ detections.append(Detection(
264
+ label=color_range.name.replace("2", ""), # Remove "red2" -> "red"
265
+ confidence=confidence,
266
+ bbox=bbox,
267
+ metadata={"area": area, "color": color_range.name},
268
+ ))
269
+
270
+ return detections
271
+
272
+ def _flood_fill(
273
+ self,
274
+ mask: List[List[bool]],
275
+ visited: List[List[bool]],
276
+ start_x: int,
277
+ start_y: int,
278
+ width: int,
279
+ height: int,
280
+ max_points: int = 50000,
281
+ ) -> List[Tuple[int, int]]:
282
+ """Flood fill to find connected component."""
283
+ stack = [(start_x, start_y)]
284
+ points = []
285
+
286
+ while stack and len(points) < max_points:
287
+ x, y = stack.pop()
288
+
289
+ if x < 0 or x >= width or y < 0 or y >= height:
290
+ continue
291
+ if visited[y][x] or not mask[y][x]:
292
+ continue
293
+
294
+ visited[y][x] = True
295
+ points.append((x, y))
296
+
297
+ # Add neighbors (4-connected)
298
+ stack.append((x + 1, y))
299
+ stack.append((x - 1, y))
300
+ stack.append((x, y + 1))
301
+ stack.append((x, y - 1))
302
+
303
+ return points
304
+
305
+ def _merge_detections(self, detections: List[Detection]) -> List[Detection]:
306
+ """Merge overlapping detections of the same color."""
307
+ if len(detections) <= 1:
308
+ return detections
309
+
310
+ merged = []
311
+ used = [False] * len(detections)
312
+
313
+ for i, det1 in enumerate(detections):
314
+ if used[i]:
315
+ continue
316
+
317
+ # Find all overlapping detections of same label
318
+ group = [det1]
319
+ used[i] = True
320
+
321
+ for j, det2 in enumerate(detections[i + 1:], i + 1):
322
+ if used[j]:
323
+ continue
324
+ if det1.label != det2.label:
325
+ continue
326
+
327
+ # Check if close enough to merge
328
+ dist = self._box_distance(det1.bbox, det2.bbox)
329
+ if dist < self.merge_distance:
330
+ group.append(det2)
331
+ used[j] = True
332
+
333
+ # Merge group into single detection
334
+ if len(group) == 1:
335
+ merged.append(det1)
336
+ else:
337
+ merged.append(self._merge_group(group))
338
+
339
+ return merged
340
+
341
+ def _box_distance(self, b1: BoundingBox, b2: BoundingBox) -> float:
342
+ """Calculate distance between bounding boxes."""
343
+ c1 = b1.center
344
+ c2 = b2.center
345
+ return ((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2) ** 0.5
346
+
347
+ def _merge_group(self, group: List[Detection]) -> Detection:
348
+ """Merge multiple detections into one."""
349
+ # Calculate combined bounding box
350
+ min_x = min(d.bbox.x for d in group)
351
+ min_y = min(d.bbox.y for d in group)
352
+ max_x = max(d.bbox.x + d.bbox.width for d in group)
353
+ max_y = max(d.bbox.y + d.bbox.height for d in group)
354
+
355
+ bbox = BoundingBox(
356
+ x=min_x,
357
+ y=min_y,
358
+ width=max_x - min_x,
359
+ height=max_y - min_y,
360
+ )
361
+
362
+ # Average confidence
363
+ confidence = sum(d.confidence for d in group) / len(group)
364
+
365
+ # Sum area
366
+ total_area = sum(d.metadata.get("area", 0) for d in group)
367
+
368
+ return Detection(
369
+ label=group[0].label,
370
+ confidence=confidence,
371
+ bbox=bbox,
372
+ metadata={"area": total_area, "merged_count": len(group)},
373
+ )
374
+
375
+ def _rgb_to_hsv(self, r: int, g: int, b: int) -> Tuple[int, int, int]:
376
+ """Convert RGB (0-255) to HSV (H: 0-179, S: 0-255, V: 0-255)."""
377
+ r, g, b = r / 255.0, g / 255.0, b / 255.0
378
+ mx = max(r, g, b)
379
+ mn = min(r, g, b)
380
+ diff = mx - mn
381
+
382
+ # Hue
383
+ if diff == 0:
384
+ h = 0
385
+ elif mx == r:
386
+ h = (60 * ((g - b) / diff) + 360) % 360
387
+ elif mx == g:
388
+ h = (60 * ((b - r) / diff) + 120) % 360
389
+ else:
390
+ h = (60 * ((r - g) / diff) + 240) % 360
391
+
392
+ # Saturation
393
+ s = 0 if mx == 0 else (diff / mx)
394
+
395
+ # Value
396
+ v = mx
397
+
398
+ # Convert to OpenCV-style ranges
399
+ return int(h / 2), int(s * 255), int(v * 255)