foodforthought-cli 0.2.8__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +12 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/cli.py +855 -4551
  9. ate/client.py +90 -0
  10. ate/commands/__init__.py +168 -0
  11. ate/commands/auth.py +389 -0
  12. ate/commands/bridge.py +448 -0
  13. ate/commands/data.py +185 -0
  14. ate/commands/deps.py +111 -0
  15. ate/commands/generate.py +384 -0
  16. ate/commands/memory.py +907 -0
  17. ate/commands/parts.py +166 -0
  18. ate/commands/primitive.py +399 -0
  19. ate/commands/protocol.py +288 -0
  20. ate/commands/recording.py +524 -0
  21. ate/commands/repo.py +154 -0
  22. ate/commands/simulation.py +291 -0
  23. ate/commands/skill.py +303 -0
  24. ate/commands/skills.py +487 -0
  25. ate/commands/team.py +147 -0
  26. ate/commands/workflow.py +271 -0
  27. ate/detection/__init__.py +38 -0
  28. ate/detection/base.py +142 -0
  29. ate/detection/color_detector.py +402 -0
  30. ate/detection/trash_detector.py +322 -0
  31. ate/drivers/__init__.py +18 -6
  32. ate/drivers/ble_transport.py +405 -0
  33. ate/drivers/mechdog.py +360 -24
  34. ate/drivers/wifi_camera.py +477 -0
  35. ate/interfaces/__init__.py +16 -0
  36. ate/interfaces/base.py +2 -0
  37. ate/interfaces/sensors.py +247 -0
  38. ate/llm_proxy.py +239 -0
  39. ate/memory/__init__.py +35 -0
  40. ate/memory/cloud.py +244 -0
  41. ate/memory/context.py +269 -0
  42. ate/memory/embeddings.py +184 -0
  43. ate/memory/export.py +26 -0
  44. ate/memory/merge.py +146 -0
  45. ate/memory/migrate/__init__.py +34 -0
  46. ate/memory/migrate/base.py +89 -0
  47. ate/memory/migrate/pipeline.py +189 -0
  48. ate/memory/migrate/sources/__init__.py +13 -0
  49. ate/memory/migrate/sources/chroma.py +170 -0
  50. ate/memory/migrate/sources/pinecone.py +120 -0
  51. ate/memory/migrate/sources/qdrant.py +110 -0
  52. ate/memory/migrate/sources/weaviate.py +160 -0
  53. ate/memory/reranker.py +353 -0
  54. ate/memory/search.py +26 -0
  55. ate/memory/store.py +548 -0
  56. ate/recording/__init__.py +42 -3
  57. ate/recording/session.py +12 -2
  58. ate/recording/visual.py +416 -0
  59. ate/robot/__init__.py +142 -0
  60. ate/robot/agentic_servo.py +856 -0
  61. ate/robot/behaviors.py +493 -0
  62. ate/robot/ble_capture.py +1000 -0
  63. ate/robot/ble_enumerate.py +506 -0
  64. ate/robot/calibration.py +88 -3
  65. ate/robot/calibration_state.py +388 -0
  66. ate/robot/commands.py +143 -11
  67. ate/robot/direction_calibration.py +554 -0
  68. ate/robot/discovery.py +104 -2
  69. ate/robot/llm_system_id.py +654 -0
  70. ate/robot/locomotion_calibration.py +508 -0
  71. ate/robot/marker_generator.py +611 -0
  72. ate/robot/perception.py +502 -0
  73. ate/robot/primitives.py +614 -0
  74. ate/robot/profiles.py +6 -0
  75. ate/robot/registry.py +5 -2
  76. ate/robot/servo_mapper.py +1153 -0
  77. ate/robot/skill_upload.py +285 -3
  78. ate/robot/target_calibration.py +500 -0
  79. ate/robot/teach.py +515 -0
  80. ate/robot/types.py +242 -0
  81. ate/robot/visual_labeler.py +9 -0
  82. ate/robot/visual_servo_loop.py +494 -0
  83. ate/robot/visual_servoing.py +570 -0
  84. ate/robot/visual_system_id.py +906 -0
  85. ate/transports/__init__.py +121 -0
  86. ate/transports/base.py +394 -0
  87. ate/transports/ble.py +405 -0
  88. ate/transports/hybrid.py +444 -0
  89. ate/transports/serial.py +345 -0
  90. ate/urdf/__init__.py +30 -0
  91. ate/urdf/capture.py +582 -0
  92. ate/urdf/cloud.py +491 -0
  93. ate/urdf/collision.py +271 -0
  94. ate/urdf/commands.py +708 -0
  95. ate/urdf/depth.py +360 -0
  96. ate/urdf/inertial.py +312 -0
  97. ate/urdf/kinematics.py +330 -0
  98. ate/urdf/lifting.py +415 -0
  99. ate/urdf/meshing.py +300 -0
  100. ate/urdf/models/__init__.py +110 -0
  101. ate/urdf/models/depth_anything.py +253 -0
  102. ate/urdf/models/sam2.py +324 -0
  103. ate/urdf/motion_analysis.py +396 -0
  104. ate/urdf/pipeline.py +468 -0
  105. ate/urdf/scale.py +256 -0
  106. ate/urdf/scan_session.py +411 -0
  107. ate/urdf/segmentation.py +299 -0
  108. ate/urdf/synthesis.py +319 -0
  109. ate/urdf/topology.py +336 -0
  110. ate/urdf/validation.py +371 -0
  111. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/METADATA +1 -1
  112. foodforthought_cli-0.3.1.dist-info/RECORD +166 -0
  113. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/WHEEL +1 -1
  114. foodforthought_cli-0.2.8.dist-info/RECORD +0 -73
  115. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/entry_points.txt +0 -0
  116. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,402 @@
1
+ """
2
+ Color-based object detection.
3
+
4
+ Simple but effective detection using color segmentation.
5
+ Works without ML dependencies - just needs PIL/Pillow.
6
+
7
+ Great for:
8
+ - Detecting colored objects (red balls, blue bins, etc.)
9
+ - Prototyping before using ML models
10
+ - Real-time detection on limited hardware
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from dataclasses import dataclass
16
+ from typing import List, Tuple, Optional, Any, Dict
17
+ import io
18
+
19
+ from .base import DetectorBase, Detection, BoundingBox
20
+
21
+ try:
22
+ from PIL import Image as PILImage
23
+ HAS_PIL = True
24
+ except ImportError:
25
+ PILImage = None # type: ignore[assignment]
26
+ HAS_PIL = False
27
+
28
+
29
+ @dataclass
30
+ class ColorRange:
31
+ """
32
+ HSV color range for detection.
33
+
34
+ HSV is better than RGB for color detection because
35
+ it separates color (hue) from brightness.
36
+ """
37
+ name: str
38
+ h_min: int # Hue min (0-179)
39
+ h_max: int # Hue max (0-179)
40
+ s_min: int # Saturation min (0-255)
41
+ s_max: int # Saturation max (0-255)
42
+ v_min: int # Value min (0-255)
43
+ v_max: int # Value max (0-255)
44
+
45
+
46
+ # Predefined color ranges (HSV)
47
+ PREDEFINED_COLORS: Dict[str, ColorRange] = {
48
+ "red": ColorRange("red", 0, 10, 100, 255, 100, 255),
49
+ "red2": ColorRange("red2", 160, 179, 100, 255, 100, 255), # Red wraps around
50
+ "orange": ColorRange("orange", 11, 25, 100, 255, 100, 255),
51
+ "yellow": ColorRange("yellow", 26, 35, 100, 255, 100, 255),
52
+ "green": ColorRange("green", 36, 85, 50, 255, 50, 255),
53
+ "blue": ColorRange("blue", 86, 125, 50, 255, 50, 255),
54
+ "purple": ColorRange("purple", 126, 155, 50, 255, 50, 255),
55
+ "pink": ColorRange("pink", 156, 165, 50, 255, 100, 255),
56
+ "white": ColorRange("white", 0, 179, 0, 30, 200, 255),
57
+ "black": ColorRange("black", 0, 179, 0, 255, 0, 50),
58
+ "gray": ColorRange("gray", 0, 179, 0, 30, 50, 200),
59
+ }
60
+
61
+
62
+ class ColorDetector(DetectorBase):
63
+ """
64
+ Detect objects by color using HSV thresholding.
65
+
66
+ Example:
67
+ detector = ColorDetector()
68
+
69
+ # Detect red objects
70
+ detections = detector.detect(image, target_colors=["red"])
71
+
72
+ # Or use custom color range
73
+ custom = ColorRange("bright_green", 40, 80, 150, 255, 150, 255)
74
+ detections = detector.detect(image, color_ranges=[custom])
75
+ """
76
+
77
+ def __init__(
78
+ self,
79
+ min_area: int = 500,
80
+ max_area: int = 100000,
81
+ merge_distance: int = 20,
82
+ ):
83
+ """
84
+ Initialize color detector.
85
+
86
+ Args:
87
+ min_area: Minimum detection area in pixels
88
+ max_area: Maximum detection area in pixels
89
+ merge_distance: Distance to merge nearby detections
90
+ """
91
+ if not HAS_PIL:
92
+ raise ImportError("ColorDetector requires Pillow. Install with: pip install Pillow")
93
+
94
+ self.min_area = min_area
95
+ self.max_area = max_area
96
+ self.merge_distance = merge_distance
97
+
98
+ def detect(
99
+ self,
100
+ image: Any,
101
+ target_colors: List[str] = None,
102
+ color_ranges: List[ColorRange] = None,
103
+ **kwargs
104
+ ) -> List[Detection]:
105
+ """
106
+ Detect colored objects.
107
+
108
+ Args:
109
+ image: Input image (Image dataclass, PIL Image, or bytes)
110
+ target_colors: List of color names ["red", "blue", ...]
111
+ color_ranges: Custom ColorRange objects
112
+
113
+ Returns:
114
+ List of Detection objects
115
+ """
116
+ # Convert image to PIL
117
+ pil_image = self._to_pil(image)
118
+ if pil_image is None:
119
+ return []
120
+
121
+ # Build list of color ranges to detect
122
+ ranges = []
123
+ if target_colors:
124
+ for color in target_colors:
125
+ if color in PREDEFINED_COLORS:
126
+ ranges.append(PREDEFINED_COLORS[color])
127
+ # Handle red wraparound
128
+ if color == "red":
129
+ ranges.append(PREDEFINED_COLORS["red2"])
130
+
131
+ if color_ranges:
132
+ ranges.extend(color_ranges)
133
+
134
+ if not ranges:
135
+ # Default: detect common trash colors
136
+ ranges = [
137
+ PREDEFINED_COLORS["red"],
138
+ PREDEFINED_COLORS["red2"],
139
+ PREDEFINED_COLORS["blue"],
140
+ PREDEFINED_COLORS["green"],
141
+ PREDEFINED_COLORS["yellow"],
142
+ ]
143
+
144
+ # Detect for each color range
145
+ detections = []
146
+ for color_range in ranges:
147
+ dets = self._detect_color(pil_image, color_range)
148
+ detections.extend(dets)
149
+
150
+ # Merge overlapping detections
151
+ detections = self._merge_detections(detections)
152
+
153
+ return detections
154
+
155
+ def detect_class(self, image: Any, class_name: str, **kwargs) -> List[Detection]:
156
+ """Detect objects of a specific color class."""
157
+ return self.detect(image, target_colors=[class_name], **kwargs)
158
+
159
+ def _to_pil(self, image: Any) -> Optional[PILImage.Image]:
160
+ """Convert various image formats to PIL Image."""
161
+ if isinstance(image, PILImage.Image):
162
+ return image
163
+
164
+ # Handle our Image dataclass
165
+ if hasattr(image, 'data') and hasattr(image, 'width'):
166
+ if image.width == 0 or image.height == 0:
167
+ return None
168
+
169
+ if image.encoding == "jpeg":
170
+ return PILImage.open(io.BytesIO(image.data))
171
+ elif image.encoding == "rgb8":
172
+ return PILImage.frombytes("RGB", (image.width, image.height), image.data)
173
+ else:
174
+ # Try to decode as JPEG
175
+ try:
176
+ return PILImage.open(io.BytesIO(image.data))
177
+ except Exception:
178
+ return None
179
+
180
+ # Handle raw bytes (assume JPEG)
181
+ if isinstance(image, bytes):
182
+ try:
183
+ return PILImage.open(io.BytesIO(image))
184
+ except Exception:
185
+ return None
186
+
187
+ return None
188
+
189
+ def _detect_color(self, image: PILImage.Image, color_range: ColorRange) -> List[Detection]:
190
+ """
191
+ Detect regions matching a color range.
192
+
193
+ Uses a simple but effective approach:
194
+ 1. Convert to HSV
195
+ 2. Threshold by color range
196
+ 3. Find connected components
197
+ 4. Filter by size
198
+ """
199
+ # Convert to RGB if needed
200
+ if image.mode != "RGB":
201
+ image = image.convert("RGB")
202
+
203
+ width, height = image.size
204
+ pixels = image.load()
205
+
206
+ # Create binary mask
207
+ mask = [[False] * width for _ in range(height)]
208
+
209
+ for y in range(height):
210
+ for x in range(width):
211
+ r, g, b = pixels[x, y]
212
+ h, s, v = self._rgb_to_hsv(r, g, b)
213
+
214
+ # Check if in range
215
+ in_range = (
216
+ color_range.s_min <= s <= color_range.s_max and
217
+ color_range.v_min <= v <= color_range.v_max
218
+ )
219
+
220
+ # Handle hue wraparound for red
221
+ if color_range.h_min <= color_range.h_max:
222
+ in_range = in_range and color_range.h_min <= h <= color_range.h_max
223
+ else:
224
+ in_range = in_range and (h >= color_range.h_min or h <= color_range.h_max)
225
+
226
+ mask[y][x] = in_range
227
+
228
+ # Find connected components (simple flood fill)
229
+ visited = [[False] * width for _ in range(height)]
230
+ components = []
231
+
232
+ for y in range(height):
233
+ for x in range(width):
234
+ if mask[y][x] and not visited[y][x]:
235
+ # Flood fill to find component
236
+ component = self._flood_fill(mask, visited, x, y, width, height)
237
+ if component:
238
+ components.append(component)
239
+
240
+ # Convert components to detections
241
+ detections = []
242
+ for points in components:
243
+ area = len(points)
244
+ if area < self.min_area or area > self.max_area:
245
+ continue
246
+
247
+ # Calculate bounding box
248
+ min_x = min(p[0] for p in points)
249
+ max_x = max(p[0] for p in points)
250
+ min_y = min(p[1] for p in points)
251
+ max_y = max(p[1] for p in points)
252
+
253
+ bbox = BoundingBox(
254
+ x=min_x,
255
+ y=min_y,
256
+ width=max_x - min_x + 1,
257
+ height=max_y - min_y + 1,
258
+ )
259
+
260
+ # Confidence based on area and aspect ratio
261
+ aspect = bbox.width / max(bbox.height, 1)
262
+ aspect_score = 1.0 - min(abs(aspect - 1.0), 1.0) # Prefer square-ish
263
+ area_score = min(area / self.max_area, 1.0)
264
+ confidence = 0.5 + 0.3 * aspect_score + 0.2 * area_score
265
+
266
+ detections.append(Detection(
267
+ label=color_range.name.replace("2", ""), # Remove "red2" -> "red"
268
+ confidence=confidence,
269
+ bbox=bbox,
270
+ metadata={"area": area, "color": color_range.name},
271
+ ))
272
+
273
+ return detections
274
+
275
+ def _flood_fill(
276
+ self,
277
+ mask: List[List[bool]],
278
+ visited: List[List[bool]],
279
+ start_x: int,
280
+ start_y: int,
281
+ width: int,
282
+ height: int,
283
+ max_points: int = 50000,
284
+ ) -> List[Tuple[int, int]]:
285
+ """Flood fill to find connected component."""
286
+ stack = [(start_x, start_y)]
287
+ points = []
288
+
289
+ while stack and len(points) < max_points:
290
+ x, y = stack.pop()
291
+
292
+ if x < 0 or x >= width or y < 0 or y >= height:
293
+ continue
294
+ if visited[y][x] or not mask[y][x]:
295
+ continue
296
+
297
+ visited[y][x] = True
298
+ points.append((x, y))
299
+
300
+ # Add neighbors (4-connected)
301
+ stack.append((x + 1, y))
302
+ stack.append((x - 1, y))
303
+ stack.append((x, y + 1))
304
+ stack.append((x, y - 1))
305
+
306
+ return points
307
+
308
+ def _merge_detections(self, detections: List[Detection]) -> List[Detection]:
309
+ """Merge overlapping detections of the same color."""
310
+ if len(detections) <= 1:
311
+ return detections
312
+
313
+ merged = []
314
+ used = [False] * len(detections)
315
+
316
+ for i, det1 in enumerate(detections):
317
+ if used[i]:
318
+ continue
319
+
320
+ # Find all overlapping detections of same label
321
+ group = [det1]
322
+ used[i] = True
323
+
324
+ for j, det2 in enumerate(detections[i + 1:], i + 1):
325
+ if used[j]:
326
+ continue
327
+ if det1.label != det2.label:
328
+ continue
329
+
330
+ # Check if close enough to merge
331
+ dist = self._box_distance(det1.bbox, det2.bbox)
332
+ if dist < self.merge_distance:
333
+ group.append(det2)
334
+ used[j] = True
335
+
336
+ # Merge group into single detection
337
+ if len(group) == 1:
338
+ merged.append(det1)
339
+ else:
340
+ merged.append(self._merge_group(group))
341
+
342
+ return merged
343
+
344
+ def _box_distance(self, b1: BoundingBox, b2: BoundingBox) -> float:
345
+ """Calculate distance between bounding boxes."""
346
+ c1 = b1.center
347
+ c2 = b2.center
348
+ return ((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2) ** 0.5
349
+
350
+ def _merge_group(self, group: List[Detection]) -> Detection:
351
+ """Merge multiple detections into one."""
352
+ # Calculate combined bounding box
353
+ min_x = min(d.bbox.x for d in group)
354
+ min_y = min(d.bbox.y for d in group)
355
+ max_x = max(d.bbox.x + d.bbox.width for d in group)
356
+ max_y = max(d.bbox.y + d.bbox.height for d in group)
357
+
358
+ bbox = BoundingBox(
359
+ x=min_x,
360
+ y=min_y,
361
+ width=max_x - min_x,
362
+ height=max_y - min_y,
363
+ )
364
+
365
+ # Average confidence
366
+ confidence = sum(d.confidence for d in group) / len(group)
367
+
368
+ # Sum area
369
+ total_area = sum(d.metadata.get("area", 0) for d in group)
370
+
371
+ return Detection(
372
+ label=group[0].label,
373
+ confidence=confidence,
374
+ bbox=bbox,
375
+ metadata={"area": total_area, "merged_count": len(group)},
376
+ )
377
+
378
+ def _rgb_to_hsv(self, r: int, g: int, b: int) -> Tuple[int, int, int]:
379
+ """Convert RGB (0-255) to HSV (H: 0-179, S: 0-255, V: 0-255)."""
380
+ r, g, b = r / 255.0, g / 255.0, b / 255.0
381
+ mx = max(r, g, b)
382
+ mn = min(r, g, b)
383
+ diff = mx - mn
384
+
385
+ # Hue
386
+ if diff == 0:
387
+ h = 0
388
+ elif mx == r:
389
+ h = (60 * ((g - b) / diff) + 360) % 360
390
+ elif mx == g:
391
+ h = (60 * ((b - r) / diff) + 120) % 360
392
+ else:
393
+ h = (60 * ((r - g) / diff) + 240) % 360
394
+
395
+ # Saturation
396
+ s = 0 if mx == 0 else (diff / mx)
397
+
398
+ # Value
399
+ v = mx
400
+
401
+ # Convert to OpenCV-style ranges
402
+ return int(h / 2), int(s * 255), int(v * 255)