foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +100 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/behaviors/common.py +686 -0
  9. ate/behaviors/tree.py +454 -0
  10. ate/cli.py +855 -3995
  11. ate/client.py +90 -0
  12. ate/commands/__init__.py +168 -0
  13. ate/commands/auth.py +389 -0
  14. ate/commands/bridge.py +448 -0
  15. ate/commands/data.py +185 -0
  16. ate/commands/deps.py +111 -0
  17. ate/commands/generate.py +384 -0
  18. ate/commands/memory.py +907 -0
  19. ate/commands/parts.py +166 -0
  20. ate/commands/primitive.py +399 -0
  21. ate/commands/protocol.py +288 -0
  22. ate/commands/recording.py +524 -0
  23. ate/commands/repo.py +154 -0
  24. ate/commands/simulation.py +291 -0
  25. ate/commands/skill.py +303 -0
  26. ate/commands/skills.py +487 -0
  27. ate/commands/team.py +147 -0
  28. ate/commands/workflow.py +271 -0
  29. ate/detection/__init__.py +38 -0
  30. ate/detection/base.py +142 -0
  31. ate/detection/color_detector.py +399 -0
  32. ate/detection/trash_detector.py +322 -0
  33. ate/drivers/__init__.py +39 -0
  34. ate/drivers/ble_transport.py +405 -0
  35. ate/drivers/mechdog.py +942 -0
  36. ate/drivers/wifi_camera.py +477 -0
  37. ate/interfaces/__init__.py +187 -0
  38. ate/interfaces/base.py +273 -0
  39. ate/interfaces/body.py +267 -0
  40. ate/interfaces/detection.py +282 -0
  41. ate/interfaces/locomotion.py +422 -0
  42. ate/interfaces/manipulation.py +408 -0
  43. ate/interfaces/navigation.py +389 -0
  44. ate/interfaces/perception.py +362 -0
  45. ate/interfaces/sensors.py +247 -0
  46. ate/interfaces/types.py +371 -0
  47. ate/llm_proxy.py +239 -0
  48. ate/mcp_server.py +387 -0
  49. ate/memory/__init__.py +35 -0
  50. ate/memory/cloud.py +244 -0
  51. ate/memory/context.py +269 -0
  52. ate/memory/embeddings.py +184 -0
  53. ate/memory/export.py +26 -0
  54. ate/memory/merge.py +146 -0
  55. ate/memory/migrate/__init__.py +34 -0
  56. ate/memory/migrate/base.py +89 -0
  57. ate/memory/migrate/pipeline.py +189 -0
  58. ate/memory/migrate/sources/__init__.py +13 -0
  59. ate/memory/migrate/sources/chroma.py +170 -0
  60. ate/memory/migrate/sources/pinecone.py +120 -0
  61. ate/memory/migrate/sources/qdrant.py +110 -0
  62. ate/memory/migrate/sources/weaviate.py +160 -0
  63. ate/memory/reranker.py +353 -0
  64. ate/memory/search.py +26 -0
  65. ate/memory/store.py +548 -0
  66. ate/recording/__init__.py +83 -0
  67. ate/recording/demonstration.py +378 -0
  68. ate/recording/session.py +415 -0
  69. ate/recording/upload.py +304 -0
  70. ate/recording/visual.py +416 -0
  71. ate/recording/wrapper.py +95 -0
  72. ate/robot/__init__.py +221 -0
  73. ate/robot/agentic_servo.py +856 -0
  74. ate/robot/behaviors.py +493 -0
  75. ate/robot/ble_capture.py +1000 -0
  76. ate/robot/ble_enumerate.py +506 -0
  77. ate/robot/calibration.py +668 -0
  78. ate/robot/calibration_state.py +388 -0
  79. ate/robot/commands.py +3735 -0
  80. ate/robot/direction_calibration.py +554 -0
  81. ate/robot/discovery.py +441 -0
  82. ate/robot/introspection.py +330 -0
  83. ate/robot/llm_system_id.py +654 -0
  84. ate/robot/locomotion_calibration.py +508 -0
  85. ate/robot/manager.py +270 -0
  86. ate/robot/marker_generator.py +611 -0
  87. ate/robot/perception.py +502 -0
  88. ate/robot/primitives.py +614 -0
  89. ate/robot/profiles.py +281 -0
  90. ate/robot/registry.py +322 -0
  91. ate/robot/servo_mapper.py +1153 -0
  92. ate/robot/skill_upload.py +675 -0
  93. ate/robot/target_calibration.py +500 -0
  94. ate/robot/teach.py +515 -0
  95. ate/robot/types.py +242 -0
  96. ate/robot/visual_labeler.py +1048 -0
  97. ate/robot/visual_servo_loop.py +494 -0
  98. ate/robot/visual_servoing.py +570 -0
  99. ate/robot/visual_system_id.py +906 -0
  100. ate/transports/__init__.py +121 -0
  101. ate/transports/base.py +394 -0
  102. ate/transports/ble.py +405 -0
  103. ate/transports/hybrid.py +444 -0
  104. ate/transports/serial.py +345 -0
  105. ate/urdf/__init__.py +30 -0
  106. ate/urdf/capture.py +582 -0
  107. ate/urdf/cloud.py +491 -0
  108. ate/urdf/collision.py +271 -0
  109. ate/urdf/commands.py +708 -0
  110. ate/urdf/depth.py +360 -0
  111. ate/urdf/inertial.py +312 -0
  112. ate/urdf/kinematics.py +330 -0
  113. ate/urdf/lifting.py +415 -0
  114. ate/urdf/meshing.py +300 -0
  115. ate/urdf/models/__init__.py +110 -0
  116. ate/urdf/models/depth_anything.py +253 -0
  117. ate/urdf/models/sam2.py +324 -0
  118. ate/urdf/motion_analysis.py +396 -0
  119. ate/urdf/pipeline.py +468 -0
  120. ate/urdf/scale.py +256 -0
  121. ate/urdf/scan_session.py +411 -0
  122. ate/urdf/segmentation.py +299 -0
  123. ate/urdf/synthesis.py +319 -0
  124. ate/urdf/topology.py +336 -0
  125. ate/urdf/validation.py +371 -0
  126. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
  127. foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
  128. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
  129. foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
  130. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
  131. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
ate/robot/behaviors.py ADDED
@@ -0,0 +1,493 @@
1
+ """
2
+ Behavior Executor - Runs complex skills with perception integration.
3
+
4
+ Behaviors are the highest level of skill:
5
+ - Integrate camera/detection
6
+ - Have loops and conditions
7
+ - Verify success/failure
8
+ - Handle recovery
9
+
10
+ Example flow:
11
+ 1. detect_green_ball() → returns target location
12
+ 2. while distance > threshold: approach()
13
+ 3. tilt_pickup()
14
+ 4. verify_holding() → success/retry
15
+ """
16
+
17
+ import time
18
+ import json
19
+ from dataclasses import dataclass
20
+ from typing import Dict, List, Optional, Callable, Any, Tuple
21
+ from pathlib import Path
22
+ from enum import Enum
23
+
24
+ from .primitives import PrimitiveLibrary, Behavior, HardwareRequirement
25
+
26
+
27
+ class BehaviorResult(Enum):
28
+ """Result of behavior execution."""
29
+ SUCCESS = "success"
30
+ FAILED = "failed"
31
+ NO_TARGET = "no_target"
32
+ TIMEOUT = "timeout"
33
+ ABORTED = "aborted"
34
+
35
+
36
+ @dataclass
37
+ class DetectionResult:
38
+ """Result from object detection."""
39
+ found: bool
40
+ label: str = ""
41
+ confidence: float = 0.0
42
+ x: float = 0.0 # Normalized x position (0-1, center is 0.5)
43
+ y: float = 0.0 # Normalized y position
44
+ width: float = 0.0 # Normalized width
45
+ height: float = 0.0 # Normalized height
46
+ distance: Optional[float] = None # Estimated distance in meters
47
+
48
+ @property
49
+ def center_offset(self) -> float:
50
+ """How far from center (negative=left, positive=right)."""
51
+ return self.x - 0.5
52
+
53
+ @property
54
+ def is_centered(self) -> bool:
55
+ """Is target roughly centered?"""
56
+ return abs(self.center_offset) < 0.15
57
+
58
+
59
+ class BehaviorExecutor:
60
+ """
61
+ Executes complex behaviors with perception.
62
+
63
+ Coordinates:
64
+ - Detection (finding targets)
65
+ - Movement (primitives and locomotion)
66
+ - Verification (checking success)
67
+ """
68
+
69
+ def __init__(
70
+ self,
71
+ robot_interface,
72
+ camera_interface=None,
73
+ detector=None,
74
+ ):
75
+ self.robot = robot_interface
76
+ self.camera = camera_interface
77
+ self.detector = detector
78
+ self.primitives = PrimitiveLibrary(robot_interface)
79
+
80
+ # State
81
+ self.current_target: Optional[DetectionResult] = None
82
+ self.is_holding_object: bool = False
83
+
84
+ # Configuration
85
+ self.approach_distance = 0.15 # meters
86
+ self.alignment_threshold = 0.1 # normalized x offset
87
+ self.max_approach_steps = 20
88
+ self.step_delay = 0.5 # seconds between movement steps
89
+
90
+ def detect(self, target_type: str = "any", color: Optional[str] = None) -> DetectionResult:
91
+ """
92
+ Detect objects using the camera.
93
+
94
+ Args:
95
+ target_type: Type of object to find ("any", "ball", "cube", etc.)
96
+ color: Optional color filter ("green", "red", "blue")
97
+
98
+ Returns:
99
+ DetectionResult with target info
100
+ """
101
+ if not self.detector:
102
+ print("No detector configured - simulating detection")
103
+ # Return simulated result for testing
104
+ return DetectionResult(
105
+ found=True,
106
+ label=f"{color or ''} {target_type}".strip(),
107
+ confidence=0.85,
108
+ x=0.5, # Centered
109
+ y=0.6, # Lower half of frame
110
+ width=0.1,
111
+ height=0.1,
112
+ distance=0.3, # 30cm away
113
+ )
114
+
115
+ # Real detection
116
+ frame = self.camera.capture() if self.camera else None
117
+ if frame is None:
118
+ return DetectionResult(found=False)
119
+
120
+ # Use detector to find objects
121
+ detections = self.detector.detect(frame)
122
+
123
+ # Filter by target type and color
124
+ for det in detections:
125
+ if target_type != "any" and det.label != target_type:
126
+ continue
127
+ if color and not det.label.lower().startswith(color.lower()):
128
+ continue
129
+
130
+ # Estimate distance based on bounding box size
131
+ # Larger box = closer object
132
+ estimated_distance = self._estimate_distance(det.width, det.height)
133
+
134
+ return DetectionResult(
135
+ found=True,
136
+ label=det.label,
137
+ confidence=det.confidence,
138
+ x=det.x,
139
+ y=det.y,
140
+ width=det.width,
141
+ height=det.height,
142
+ distance=estimated_distance,
143
+ )
144
+
145
+ return DetectionResult(found=False)
146
+
147
+ def _estimate_distance(self, width: float, height: float) -> float:
148
+ """
149
+ Estimate distance to object based on apparent size.
150
+
151
+ This is a rough approximation. For accurate distance,
152
+ use stereo vision, depth camera, or LIDAR.
153
+ """
154
+ # Assume a ball ~5cm diameter
155
+ # At 15cm distance, it would appear ~30% of frame width
156
+ # At 30cm distance, it would appear ~15% of frame width
157
+ apparent_size = max(width, height)
158
+ if apparent_size < 0.05:
159
+ return 1.0 # Far away
160
+ elif apparent_size > 0.4:
161
+ return 0.1 # Very close
162
+ else:
163
+ # Inverse relationship: distance ≈ k / size
164
+ return 0.15 / apparent_size
165
+
166
+ def align_to_target(self, target: DetectionResult) -> bool:
167
+ """
168
+ Rotate robot to center target in frame.
169
+
170
+ Returns True when aligned.
171
+ """
172
+ if target.is_centered:
173
+ return True
174
+
175
+ offset = target.center_offset
176
+ print(f"Aligning: offset = {offset:.2f}")
177
+
178
+ # Turn toward target
179
+ # Positive offset = target is on right = turn right
180
+ if offset > self.alignment_threshold:
181
+ self._turn_right()
182
+ elif offset < -self.alignment_threshold:
183
+ self._turn_left()
184
+
185
+ time.sleep(0.3)
186
+ return False
187
+
188
+ def approach_target(self, target: DetectionResult) -> bool:
189
+ """
190
+ Move toward target until within approach_distance.
191
+
192
+ Returns True when close enough.
193
+ """
194
+ if target.distance and target.distance <= self.approach_distance:
195
+ return True
196
+
197
+ print(f"Approaching: distance = {target.distance:.2f}m")
198
+ self._step_forward()
199
+ time.sleep(self.step_delay)
200
+ return False
201
+
202
+ def _step_forward(self):
203
+ """Take one step forward (placeholder for locomotion)."""
204
+ print(" [Step forward]")
205
+ # TODO: Integrate with locomotion primitives
206
+ # For now, we assume the robot has a walk command
207
+ if hasattr(self.robot, 'walk'):
208
+ self.robot.walk('forward', 1)
209
+
210
+ def _turn_left(self):
211
+ """Turn left (placeholder)."""
212
+ print(" [Turn left]")
213
+ if hasattr(self.robot, 'turn'):
214
+ self.robot.turn('left', 15)
215
+
216
+ def _turn_right(self):
217
+ """Turn right (placeholder)."""
218
+ print(" [Turn right]")
219
+ if hasattr(self.robot, 'turn'):
220
+ self.robot.turn('right', 15)
221
+
222
+ def verify_holding_object(self) -> bool:
223
+ """
224
+ Verify that gripper is holding an object.
225
+
226
+ Could use:
227
+ - Gripper position feedback
228
+ - Force sensor
229
+ - Visual detection of object in gripper
230
+ """
231
+ # For now, assume success if gripper closed
232
+ # TODO: Add actual verification
233
+ return self.is_holding_object
234
+
235
+ def execute_behavior(
236
+ self,
237
+ behavior_name: str,
238
+ timeout: float = 30.0,
239
+ **kwargs
240
+ ) -> BehaviorResult:
241
+ """
242
+ Execute a named behavior.
243
+
244
+ This is the main entry point for running complex skills.
245
+ """
246
+ behavior = self.primitives.behaviors.get(behavior_name)
247
+ if not behavior:
248
+ print(f"Unknown behavior: {behavior_name}")
249
+ return BehaviorResult.FAILED
250
+
251
+ print(f"\n=== Executing: {behavior_name} ===")
252
+ print(f"Description: {behavior.description}")
253
+ print(f"Steps: {len(behavior.steps)}")
254
+
255
+ start_time = time.time()
256
+
257
+ try:
258
+ for step in behavior.steps:
259
+ # Check timeout
260
+ if time.time() - start_time > timeout:
261
+ print("Timeout!")
262
+ return BehaviorResult.TIMEOUT
263
+
264
+ result = self._execute_step(step, **kwargs)
265
+ if result == BehaviorResult.FAILED:
266
+ return result
267
+ if result == BehaviorResult.NO_TARGET:
268
+ return result
269
+
270
+ return BehaviorResult.SUCCESS
271
+
272
+ except KeyboardInterrupt:
273
+ print("\nAborted by user")
274
+ return BehaviorResult.ABORTED
275
+
276
+ except Exception as e:
277
+ print(f"Error: {e}")
278
+ return BehaviorResult.FAILED
279
+
280
+ def _execute_step(self, step: Dict, **kwargs) -> BehaviorResult:
281
+ """Execute a single step in a behavior."""
282
+ action = step.get("action") if isinstance(step, dict) else step
283
+
284
+ if action == "detect":
285
+ target_type = step.get("target", "any")
286
+ color = step.get("color")
287
+ print(f"Detecting: {color or ''} {target_type}")
288
+
289
+ result = self.detect(target_type, color)
290
+ if not result.found:
291
+ print(" No target found")
292
+ return BehaviorResult.NO_TARGET
293
+
294
+ self.current_target = result
295
+ print(f" Found: {result.label} at distance {result.distance:.2f}m")
296
+ return BehaviorResult.SUCCESS
297
+
298
+ elif action == "align":
299
+ if not self.current_target:
300
+ return BehaviorResult.NO_TARGET
301
+
302
+ print("Aligning to target...")
303
+ for _ in range(10): # Max 10 alignment attempts
304
+ # Re-detect to get current position
305
+ self.current_target = self.detect(
306
+ step.get("target", "any"),
307
+ step.get("color")
308
+ )
309
+ if not self.current_target.found:
310
+ return BehaviorResult.NO_TARGET
311
+ if self.align_to_target(self.current_target):
312
+ print(" Aligned!")
313
+ return BehaviorResult.SUCCESS
314
+
315
+ print(" Failed to align")
316
+ return BehaviorResult.FAILED
317
+
318
+ elif action == "while":
319
+ condition = step.get("condition", "")
320
+ do_action = step.get("do", "")
321
+ print(f"Loop: while {condition} do {do_action}")
322
+
323
+ for _ in range(self.max_approach_steps):
324
+ # Re-detect
325
+ self.current_target = self.detect()
326
+ if not self.current_target.found:
327
+ return BehaviorResult.NO_TARGET
328
+
329
+ # Check condition
330
+ if "distance" in condition:
331
+ threshold = float(condition.split(">")[-1].strip())
332
+ if self.current_target.distance <= threshold:
333
+ print(" Condition met")
334
+ return BehaviorResult.SUCCESS
335
+
336
+ # Execute action
337
+ if do_action == "step_forward":
338
+ self._step_forward()
339
+ elif do_action == "approach":
340
+ self.approach_target(self.current_target)
341
+
342
+ time.sleep(0.3)
343
+
344
+ return BehaviorResult.TIMEOUT
345
+
346
+ elif action == "skill":
347
+ skill_name = step.get("name", "")
348
+ print(f"Executing skill: {skill_name}")
349
+ if self.primitives.execute(skill_name):
350
+ if "gripper_close" in skill_name:
351
+ self.is_holding_object = True
352
+ elif "gripper_open" in skill_name:
353
+ self.is_holding_object = False
354
+ return BehaviorResult.SUCCESS
355
+ return BehaviorResult.FAILED
356
+
357
+ elif action == "wait_ms":
358
+ duration = step.get("duration", 0)
359
+ print(f"Waiting {duration}ms")
360
+ time.sleep(duration / 1000)
361
+ return BehaviorResult.SUCCESS
362
+
363
+ elif action == "verify":
364
+ check = step.get("check", "")
365
+ print(f"Verifying: {check}")
366
+ if check == "gripper_closed" or check == "holding_object":
367
+ if self.verify_holding_object():
368
+ print(" Verified!")
369
+ return BehaviorResult.SUCCESS
370
+ print(" Verification failed")
371
+ return BehaviorResult.FAILED
372
+
373
+ return BehaviorResult.SUCCESS
374
+
375
+ elif action == "return":
376
+ # End of behavior with return data
377
+ return BehaviorResult.SUCCESS
378
+
379
+ else:
380
+ print(f"Unknown action: {action}")
381
+ return BehaviorResult.FAILED
382
+
383
+ def pickup_green_ball(self) -> BehaviorResult:
384
+ """
385
+ High-level behavior: Find and pick up a green ball.
386
+
387
+ This is the composed behavior the user requested.
388
+ """
389
+ print("\n" + "=" * 60)
390
+ print("BEHAVIOR: Pickup Green Ball")
391
+ print("=" * 60)
392
+
393
+ # Step 1: Detect green ball
394
+ print("\n[1] Detecting green ball...")
395
+ target = self.detect("ball", "green")
396
+ if not target.found:
397
+ print(" No green ball found!")
398
+ return BehaviorResult.NO_TARGET
399
+
400
+ print(f" Found! Distance: {target.distance:.2f}m, Offset: {target.center_offset:.2f}")
401
+ self.current_target = target
402
+
403
+ # Step 2: Align to target
404
+ print("\n[2] Aligning to target...")
405
+ for _ in range(5):
406
+ target = self.detect("ball", "green")
407
+ if not target.found:
408
+ print(" Lost target!")
409
+ return BehaviorResult.NO_TARGET
410
+ if target.is_centered:
411
+ print(" Aligned!")
412
+ break
413
+ self.align_to_target(target)
414
+ time.sleep(0.3)
415
+
416
+ # Step 3: Approach until close enough
417
+ print("\n[3] Approaching target...")
418
+ for i in range(self.max_approach_steps):
419
+ target = self.detect("ball", "green")
420
+ if not target.found:
421
+ print(" Lost target!")
422
+ return BehaviorResult.NO_TARGET
423
+
424
+ if target.distance <= self.approach_distance:
425
+ print(f" Close enough! ({target.distance:.2f}m)")
426
+ break
427
+
428
+ print(f" Step {i+1}: distance = {target.distance:.2f}m")
429
+ self._step_forward()
430
+ time.sleep(self.step_delay)
431
+ else:
432
+ print(" Too many steps, giving up")
433
+ return BehaviorResult.TIMEOUT
434
+
435
+ # Step 4: Tilt forward and prepare
436
+ print("\n[4] Tilting forward...")
437
+ self.primitives.execute("tilt_forward")
438
+ time.sleep(0.3)
439
+
440
+ # Step 5: Ready to grab
441
+ print("\n[5] Preparing gripper...")
442
+ self.primitives.execute("ready_to_grab")
443
+ time.sleep(0.3)
444
+
445
+ # Step 6: Close gripper
446
+ print("\n[6] Grasping...")
447
+ self.primitives.execute("gripper_close")
448
+ self.is_holding_object = True
449
+ time.sleep(0.3)
450
+
451
+ # Step 7: Lift and stand
452
+ print("\n[7] Lifting and standing...")
453
+ self.primitives.execute("arm_carry")
454
+ time.sleep(0.2)
455
+ self.primitives.execute("stand")
456
+
457
+ # Step 8: Verify
458
+ print("\n[8] Verifying...")
459
+ if self.verify_holding_object():
460
+ print(" SUCCESS! Holding object.")
461
+ return BehaviorResult.SUCCESS
462
+ else:
463
+ print(" Gripper may be empty")
464
+ return BehaviorResult.FAILED
465
+
466
+
467
+ def run_behavior_demo():
468
+ """Demo the behavior executor."""
469
+ print("=" * 60)
470
+ print("BEHAVIOR EXECUTOR DEMO")
471
+ print("=" * 60)
472
+
473
+ # Create executor without real robot (simulation mode)
474
+ executor = BehaviorExecutor(
475
+ robot_interface=None,
476
+ camera_interface=None,
477
+ detector=None, # Will use simulated detection
478
+ )
479
+
480
+ # Show available behaviors
481
+ print("\nAvailable behaviors:")
482
+ for name, beh in executor.primitives.behaviors.items():
483
+ print(f" - {name}: {beh.description}")
484
+
485
+ # Run the pickup_green_ball behavior
486
+ print("\n" + "-" * 60)
487
+ result = executor.pickup_green_ball()
488
+ print("\n" + "-" * 60)
489
+ print(f"Result: {result.value}")
490
+
491
+
492
+ if __name__ == "__main__":
493
+ run_behavior_demo()