foodforthought-cli 0.2.8__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +12 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/cli.py +855 -4551
  9. ate/client.py +90 -0
  10. ate/commands/__init__.py +168 -0
  11. ate/commands/auth.py +389 -0
  12. ate/commands/bridge.py +448 -0
  13. ate/commands/data.py +185 -0
  14. ate/commands/deps.py +111 -0
  15. ate/commands/generate.py +384 -0
  16. ate/commands/memory.py +907 -0
  17. ate/commands/parts.py +166 -0
  18. ate/commands/primitive.py +399 -0
  19. ate/commands/protocol.py +288 -0
  20. ate/commands/recording.py +524 -0
  21. ate/commands/repo.py +154 -0
  22. ate/commands/simulation.py +291 -0
  23. ate/commands/skill.py +303 -0
  24. ate/commands/skills.py +487 -0
  25. ate/commands/team.py +147 -0
  26. ate/commands/workflow.py +271 -0
  27. ate/detection/__init__.py +38 -0
  28. ate/detection/base.py +142 -0
  29. ate/detection/color_detector.py +399 -0
  30. ate/detection/trash_detector.py +322 -0
  31. ate/drivers/__init__.py +18 -6
  32. ate/drivers/ble_transport.py +405 -0
  33. ate/drivers/mechdog.py +360 -24
  34. ate/drivers/wifi_camera.py +477 -0
  35. ate/interfaces/__init__.py +16 -0
  36. ate/interfaces/base.py +2 -0
  37. ate/interfaces/sensors.py +247 -0
  38. ate/llm_proxy.py +239 -0
  39. ate/memory/__init__.py +35 -0
  40. ate/memory/cloud.py +244 -0
  41. ate/memory/context.py +269 -0
  42. ate/memory/embeddings.py +184 -0
  43. ate/memory/export.py +26 -0
  44. ate/memory/merge.py +146 -0
  45. ate/memory/migrate/__init__.py +34 -0
  46. ate/memory/migrate/base.py +89 -0
  47. ate/memory/migrate/pipeline.py +189 -0
  48. ate/memory/migrate/sources/__init__.py +13 -0
  49. ate/memory/migrate/sources/chroma.py +170 -0
  50. ate/memory/migrate/sources/pinecone.py +120 -0
  51. ate/memory/migrate/sources/qdrant.py +110 -0
  52. ate/memory/migrate/sources/weaviate.py +160 -0
  53. ate/memory/reranker.py +353 -0
  54. ate/memory/search.py +26 -0
  55. ate/memory/store.py +548 -0
  56. ate/recording/__init__.py +42 -3
  57. ate/recording/session.py +12 -2
  58. ate/recording/visual.py +416 -0
  59. ate/robot/__init__.py +142 -0
  60. ate/robot/agentic_servo.py +856 -0
  61. ate/robot/behaviors.py +493 -0
  62. ate/robot/ble_capture.py +1000 -0
  63. ate/robot/ble_enumerate.py +506 -0
  64. ate/robot/calibration.py +88 -3
  65. ate/robot/calibration_state.py +388 -0
  66. ate/robot/commands.py +143 -11
  67. ate/robot/direction_calibration.py +554 -0
  68. ate/robot/discovery.py +104 -2
  69. ate/robot/llm_system_id.py +654 -0
  70. ate/robot/locomotion_calibration.py +508 -0
  71. ate/robot/marker_generator.py +611 -0
  72. ate/robot/perception.py +502 -0
  73. ate/robot/primitives.py +614 -0
  74. ate/robot/profiles.py +6 -0
  75. ate/robot/registry.py +5 -2
  76. ate/robot/servo_mapper.py +1153 -0
  77. ate/robot/skill_upload.py +285 -3
  78. ate/robot/target_calibration.py +500 -0
  79. ate/robot/teach.py +515 -0
  80. ate/robot/types.py +242 -0
  81. ate/robot/visual_labeler.py +9 -0
  82. ate/robot/visual_servo_loop.py +494 -0
  83. ate/robot/visual_servoing.py +570 -0
  84. ate/robot/visual_system_id.py +906 -0
  85. ate/transports/__init__.py +121 -0
  86. ate/transports/base.py +394 -0
  87. ate/transports/ble.py +405 -0
  88. ate/transports/hybrid.py +444 -0
  89. ate/transports/serial.py +345 -0
  90. ate/urdf/__init__.py +30 -0
  91. ate/urdf/capture.py +582 -0
  92. ate/urdf/cloud.py +491 -0
  93. ate/urdf/collision.py +271 -0
  94. ate/urdf/commands.py +708 -0
  95. ate/urdf/depth.py +360 -0
  96. ate/urdf/inertial.py +312 -0
  97. ate/urdf/kinematics.py +330 -0
  98. ate/urdf/lifting.py +415 -0
  99. ate/urdf/meshing.py +300 -0
  100. ate/urdf/models/__init__.py +110 -0
  101. ate/urdf/models/depth_anything.py +253 -0
  102. ate/urdf/models/sam2.py +324 -0
  103. ate/urdf/motion_analysis.py +396 -0
  104. ate/urdf/pipeline.py +468 -0
  105. ate/urdf/scale.py +256 -0
  106. ate/urdf/scan_session.py +411 -0
  107. ate/urdf/segmentation.py +299 -0
  108. ate/urdf/synthesis.py +319 -0
  109. ate/urdf/topology.py +336 -0
  110. ate/urdf/validation.py +371 -0
  111. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +1 -1
  112. foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
  113. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
  114. foodforthought_cli-0.2.8.dist-info/RECORD +0 -73
  115. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
  116. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,502 @@
1
+ """
2
+ Perception integration for robot behaviors.
3
+
4
+ Connects camera drivers and detectors to the behavior executor,
5
+ enabling closed-loop control with real vision.
6
+ """
7
+
8
+ import time
9
+ from dataclasses import dataclass
10
+ from typing import Optional, List, Tuple, Dict, Any
11
+
12
+ from ..drivers.wifi_camera import WiFiCamera, WiFiCameraConfig
13
+ from ..detection.color_detector import ColorDetector
14
+ from ..detection.base import Detection, BoundingBox
15
+
16
+
17
+ @dataclass
18
+ class PerceptionResult:
19
+ """Result from perception system."""
20
+ found: bool
21
+ label: str = ""
22
+ confidence: float = 0.0
23
+ x: float = 0.0 # Normalized x position (0-1, center is 0.5)
24
+ y: float = 0.0 # Normalized y position (0-1)
25
+ width: float = 0.0 # Normalized width
26
+ height: float = 0.0 # Normalized height
27
+ distance: Optional[float] = None # Estimated distance in meters
28
+ raw_detection: Optional[Detection] = None # Original detection
29
+
30
+ @property
31
+ def center_offset(self) -> float:
32
+ """How far from center (negative=left, positive=right)."""
33
+ return self.x - 0.5
34
+
35
+ @property
36
+ def is_centered(self) -> bool:
37
+ """Is target roughly centered?"""
38
+ return abs(self.center_offset) < 0.15
39
+
40
+ @property
41
+ def is_close(self) -> bool:
42
+ """Is target within pickup range?"""
43
+ return self.distance is not None and self.distance < 0.2
44
+
45
+
46
+ class PerceptionSystem:
47
+ """
48
+ Unified perception system for robot behaviors.
49
+
50
+ Coordinates:
51
+ - Camera capture
52
+ - Object detection
53
+ - Distance estimation
54
+ - Target tracking
55
+ """
56
+
57
+ def __init__(
58
+ self,
59
+ camera_ip: str = "192.168.4.1", # MechDog default AP
60
+ camera_port: int = 80,
61
+ ):
62
+ """
63
+ Initialize perception system.
64
+
65
+ Args:
66
+ camera_ip: WiFi camera IP address
67
+ camera_port: HTTP port
68
+ """
69
+ self.camera_config = WiFiCameraConfig(
70
+ ip=camera_ip,
71
+ port=camera_port,
72
+ )
73
+ self.camera: Optional[WiFiCamera] = None
74
+ self.detector = ColorDetector(
75
+ min_area=300, # Smaller objects
76
+ max_area=150000, # Allow large close objects
77
+ merge_distance=30,
78
+ )
79
+
80
+ # Tracking state
81
+ self.last_detection: Optional[PerceptionResult] = None
82
+ self.detection_history: List[PerceptionResult] = []
83
+ self.frame_size: Tuple[int, int] = (640, 480)
84
+
85
+ # Distance estimation calibration
86
+ # Based on object appearing X% of frame at Y meters
87
+ self.reference_size = 0.15 # Reference: object at 15% of frame width
88
+ self.reference_distance = 0.30 # is 30cm away
89
+
90
+ def connect(self) -> bool:
91
+ """Connect to camera."""
92
+ try:
93
+ self.camera = WiFiCamera(self.camera_config)
94
+ if self.camera.is_connected():
95
+ print(f"Camera connected: {self.camera_config.ip}")
96
+ return True
97
+ else:
98
+ print(f"Camera not responding: {self.camera_config.ip}")
99
+ return False
100
+ except Exception as e:
101
+ print(f"Camera connection failed: {e}")
102
+ return False
103
+
104
+ def disconnect(self):
105
+ """Disconnect from camera."""
106
+ if self.camera:
107
+ self.camera.stop_streaming()
108
+ self.camera = None
109
+
110
+ def detect(
111
+ self,
112
+ target_type: str = "any",
113
+ color: Optional[str] = None,
114
+ ) -> PerceptionResult:
115
+ """
116
+ Detect objects in current camera view.
117
+
118
+ Args:
119
+ target_type: Object type to find ("any", "ball", "cube", etc.)
120
+ color: Color filter ("green", "red", "blue", etc.)
121
+
122
+ Returns:
123
+ PerceptionResult with detection info
124
+ """
125
+ if not self.camera:
126
+ # Simulation mode
127
+ return self._simulate_detection(target_type, color)
128
+
129
+ # Capture frame
130
+ image = self.camera.get_image()
131
+ if image.width == 0:
132
+ return PerceptionResult(found=False)
133
+
134
+ self.frame_size = (image.width, image.height)
135
+
136
+ # Run detection
137
+ target_colors = [color] if color else None
138
+ detections = self.detector.detect(image, target_colors=target_colors)
139
+
140
+ if not detections:
141
+ return PerceptionResult(found=False)
142
+
143
+ # Find best detection (largest + most centered)
144
+ best = self._select_best_detection(detections)
145
+
146
+ # Convert to normalized coordinates
147
+ result = self._detection_to_result(best)
148
+
149
+ # Track history
150
+ self.last_detection = result
151
+ self.detection_history.append(result)
152
+ if len(self.detection_history) > 10:
153
+ self.detection_history.pop(0)
154
+
155
+ return result
156
+
157
+ def _select_best_detection(self, detections: List[Detection]) -> Detection:
158
+ """
159
+ Select the best detection from candidates.
160
+
161
+ Prefers: larger area + more centered
162
+ """
163
+ if len(detections) == 1:
164
+ return detections[0]
165
+
166
+ def score(det: Detection) -> float:
167
+ # Normalize center position
168
+ cx = det.bbox.center[0] / self.frame_size[0]
169
+ center_offset = abs(cx - 0.5)
170
+
171
+ # Score: large area, centered position
172
+ area_score = det.bbox.area / (self.frame_size[0] * self.frame_size[1])
173
+ center_score = 1.0 - (center_offset * 2) # 1.0 when centered, 0 at edges
174
+
175
+ return area_score * 0.7 + center_score * 0.3 + det.confidence * 0.2
176
+
177
+ return max(detections, key=score)
178
+
179
+ def _detection_to_result(self, detection: Detection) -> PerceptionResult:
180
+ """Convert Detection to PerceptionResult with normalized coords."""
181
+ w, h = self.frame_size
182
+
183
+ # Normalize coordinates
184
+ norm_x = (detection.bbox.x + detection.bbox.width / 2) / w
185
+ norm_y = (detection.bbox.y + detection.bbox.height / 2) / h
186
+ norm_w = detection.bbox.width / w
187
+ norm_h = detection.bbox.height / h
188
+
189
+ # Estimate distance from apparent size
190
+ apparent_size = max(norm_w, norm_h)
191
+ distance = self._estimate_distance(apparent_size)
192
+
193
+ return PerceptionResult(
194
+ found=True,
195
+ label=detection.label,
196
+ confidence=detection.confidence,
197
+ x=norm_x,
198
+ y=norm_y,
199
+ width=norm_w,
200
+ height=norm_h,
201
+ distance=distance,
202
+ raw_detection=detection,
203
+ )
204
+
205
+ def _estimate_distance(self, apparent_size: float) -> float:
206
+ """
207
+ Estimate distance from apparent object size.
208
+
209
+ Uses inverse relationship: distance = k / size
210
+ Calibrated from reference measurement.
211
+ """
212
+ if apparent_size < 0.01:
213
+ return 2.0 # Very far (or detection error)
214
+
215
+ # k = reference_distance * reference_size
216
+ k = self.reference_distance * self.reference_size
217
+
218
+ distance = k / apparent_size
219
+
220
+ # Clamp to reasonable range
221
+ return max(0.05, min(2.0, distance))
222
+
223
+ def _simulate_detection(
224
+ self,
225
+ target_type: str,
226
+ color: Optional[str],
227
+ ) -> PerceptionResult:
228
+ """Simulate detection for testing without camera."""
229
+ # Simulate a green ball slightly right of center
230
+ return PerceptionResult(
231
+ found=True,
232
+ label=f"{color or ''} {target_type}".strip(),
233
+ confidence=0.85,
234
+ x=0.55, # Slightly right
235
+ y=0.6, # Lower half
236
+ width=0.12,
237
+ height=0.12,
238
+ distance=0.25, # 25cm away
239
+ )
240
+
241
+ def get_alignment_correction(self, target: PerceptionResult) -> str:
242
+ """
243
+ Get alignment correction direction.
244
+
245
+ Returns:
246
+ "left", "right", "centered", or "lost"
247
+ """
248
+ if not target.found:
249
+ return "lost"
250
+
251
+ offset = target.center_offset
252
+ threshold = 0.1
253
+
254
+ if abs(offset) < threshold:
255
+ return "centered"
256
+ elif offset > 0:
257
+ return "right" # Target is right, need to turn right
258
+ else:
259
+ return "left" # Target is left, need to turn left
260
+
261
+ def is_target_stable(self, min_frames: int = 3) -> bool:
262
+ """
263
+ Check if target detection is stable across frames.
264
+
265
+ Helps filter out noise and false positives.
266
+ """
267
+ if len(self.detection_history) < min_frames:
268
+ return False
269
+
270
+ recent = self.detection_history[-min_frames:]
271
+
272
+ # All must be found
273
+ if not all(r.found for r in recent):
274
+ return False
275
+
276
+ # Position should be consistent (within 20% of frame)
277
+ x_coords = [r.x for r in recent]
278
+ y_coords = [r.y for r in recent]
279
+
280
+ x_range = max(x_coords) - min(x_coords)
281
+ y_range = max(y_coords) - min(y_coords)
282
+
283
+ return x_range < 0.2 and y_range < 0.2
284
+
285
+
286
+ class LivePerceptionExecutor:
287
+ """
288
+ Executes behaviors with live perception feedback.
289
+
290
+ Replaces simulated detection in behaviors.py with real camera input.
291
+ """
292
+
293
+ def __init__(
294
+ self,
295
+ robot_interface,
296
+ camera_ip: str = "192.168.4.1",
297
+ ):
298
+ """
299
+ Initialize live perception executor.
300
+
301
+ Args:
302
+ robot_interface: Robot driver (MechDogDriver)
303
+ camera_ip: WiFi camera IP
304
+ """
305
+ self.robot = robot_interface
306
+ self.perception = PerceptionSystem(camera_ip=camera_ip)
307
+
308
+ # Import primitives for skill execution
309
+ from .primitives import PrimitiveLibrary
310
+ self.primitives = PrimitiveLibrary(robot_interface)
311
+
312
+ # Configuration
313
+ self.approach_distance = 0.15 # meters
314
+ self.alignment_threshold = 0.12
315
+ self.max_approach_steps = 20
316
+ self.step_delay = 0.4
317
+
318
+ def connect(self) -> bool:
319
+ """Connect to camera."""
320
+ return self.perception.connect()
321
+
322
+ def disconnect(self):
323
+ """Disconnect from camera."""
324
+ self.perception.disconnect()
325
+
326
+ def pickup_green_ball(self) -> bool:
327
+ """
328
+ Find and pick up a green ball.
329
+
330
+ Full behavior with real perception:
331
+ 1. Detect green ball
332
+ 2. Align to center it in view
333
+ 3. Approach until close
334
+ 4. Execute pickup sequence
335
+ 5. Verify success
336
+
337
+ Returns:
338
+ True if successful
339
+ """
340
+ print("\n" + "=" * 50)
341
+ print("BEHAVIOR: Pickup Green Ball (Live Perception)")
342
+ print("=" * 50)
343
+
344
+ # Step 1: Initial detection
345
+ print("\n[1] Scanning for green ball...")
346
+ target = self.perception.detect("ball", "green")
347
+
348
+ if not target.found:
349
+ print(" No green ball found!")
350
+ return False
351
+
352
+ print(f" Found! x={target.x:.2f}, dist={target.distance:.2f}m")
353
+
354
+ # Step 2: Align
355
+ print("\n[2] Aligning to target...")
356
+ for i in range(10):
357
+ target = self.perception.detect("ball", "green")
358
+ if not target.found:
359
+ print(" Lost target during alignment!")
360
+ return False
361
+
362
+ direction = self.perception.get_alignment_correction(target)
363
+
364
+ if direction == "centered":
365
+ print(" Aligned!")
366
+ break
367
+ elif direction == "left":
368
+ print(f" Turning left (offset: {target.center_offset:.2f})")
369
+ self._turn_left()
370
+ elif direction == "right":
371
+ print(f" Turning right (offset: {target.center_offset:.2f})")
372
+ self._turn_right()
373
+
374
+ time.sleep(0.3)
375
+ else:
376
+ print(" Failed to align after 10 attempts")
377
+ return False
378
+
379
+ # Step 3: Approach
380
+ print("\n[3] Approaching target...")
381
+ for step in range(self.max_approach_steps):
382
+ target = self.perception.detect("ball", "green")
383
+
384
+ if not target.found:
385
+ print(" Lost target during approach!")
386
+ return False
387
+
388
+ if target.distance <= self.approach_distance:
389
+ print(f" Close enough! ({target.distance:.2f}m)")
390
+ break
391
+
392
+ print(f" Step {step+1}: dist={target.distance:.2f}m")
393
+ self._step_forward()
394
+
395
+ # Re-align if needed
396
+ if abs(target.center_offset) > self.alignment_threshold:
397
+ if target.center_offset > 0:
398
+ self._turn_right(angle=5)
399
+ else:
400
+ self._turn_left(angle=5)
401
+
402
+ time.sleep(self.step_delay)
403
+ else:
404
+ print(" Too many steps, giving up")
405
+ return False
406
+
407
+ # Step 4: Execute pickup sequence
408
+ print("\n[4] Executing pickup sequence...")
409
+
410
+ print(" Tilting forward...")
411
+ self.primitives.execute("tilt_forward")
412
+ time.sleep(0.3)
413
+
414
+ print(" Ready to grab...")
415
+ self.primitives.execute("ready_to_grab")
416
+ time.sleep(0.3)
417
+
418
+ print(" Closing gripper...")
419
+ self.primitives.execute("gripper_close")
420
+ time.sleep(0.3)
421
+
422
+ print(" Lifting...")
423
+ self.primitives.execute("arm_carry")
424
+ time.sleep(0.2)
425
+
426
+ print(" Standing...")
427
+ self.primitives.execute("stand")
428
+ time.sleep(0.3)
429
+
430
+ # Step 5: Verify (visual check - ball should not be visible on ground)
431
+ print("\n[5] Verifying pickup...")
432
+ time.sleep(0.5)
433
+
434
+ verify_result = self.perception.detect("ball", "green")
435
+
436
+ if not verify_result.found or verify_result.distance > 0.5:
437
+ print(" SUCCESS! Ball no longer visible on ground.")
438
+ return True
439
+ else:
440
+ print(" Ball still visible - pickup may have failed")
441
+ return False
442
+
443
+ def _step_forward(self):
444
+ """Take one step forward."""
445
+ if hasattr(self.robot, 'walk'):
446
+ self.robot.walk('forward', 1)
447
+ else:
448
+ print(" [Simulated: step forward]")
449
+
450
+ def _turn_left(self, angle: int = 15):
451
+ """Turn left."""
452
+ if hasattr(self.robot, 'turn'):
453
+ self.robot.turn('left', angle)
454
+ else:
455
+ print(f" [Simulated: turn left {angle}deg]")
456
+
457
+ def _turn_right(self, angle: int = 15):
458
+ """Turn right."""
459
+ if hasattr(self.robot, 'turn'):
460
+ self.robot.turn('right', angle)
461
+ else:
462
+ print(f" [Simulated: turn right {angle}deg]")
463
+
464
+
465
+ def run_perception_demo():
466
+ """Demo perception system."""
467
+ print("=" * 50)
468
+ print("PERCEPTION SYSTEM DEMO")
469
+ print("=" * 50)
470
+
471
+ # Create perception system in simulation mode (no camera)
472
+ perception = PerceptionSystem()
473
+
474
+ print("\nTesting simulated detection...")
475
+ result = perception.detect("ball", "green")
476
+
477
+ print(f"\nDetection result:")
478
+ print(f" Found: {result.found}")
479
+ print(f" Label: {result.label}")
480
+ print(f" Position: ({result.x:.2f}, {result.y:.2f})")
481
+ print(f" Distance: {result.distance:.2f}m")
482
+ print(f" Centered: {result.is_centered}")
483
+ print(f" Alignment: {perception.get_alignment_correction(result)}")
484
+
485
+ # Test live executor in simulation
486
+ print("\n" + "-" * 50)
487
+ print("Testing LivePerceptionExecutor (simulation)...")
488
+
489
+ executor = LivePerceptionExecutor(
490
+ robot_interface=None, # No real robot
491
+ camera_ip="192.168.4.1",
492
+ )
493
+
494
+ # Run pickup behavior (will use simulated perception)
495
+ success = executor.pickup_green_ball()
496
+
497
+ print("\n" + "-" * 50)
498
+ print(f"Pickup result: {'SUCCESS' if success else 'FAILED'}")
499
+
500
+
501
+ if __name__ == "__main__":
502
+ run_perception_demo()