foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +100 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/behaviors/common.py +686 -0
  9. ate/behaviors/tree.py +454 -0
  10. ate/cli.py +855 -3995
  11. ate/client.py +90 -0
  12. ate/commands/__init__.py +168 -0
  13. ate/commands/auth.py +389 -0
  14. ate/commands/bridge.py +448 -0
  15. ate/commands/data.py +185 -0
  16. ate/commands/deps.py +111 -0
  17. ate/commands/generate.py +384 -0
  18. ate/commands/memory.py +907 -0
  19. ate/commands/parts.py +166 -0
  20. ate/commands/primitive.py +399 -0
  21. ate/commands/protocol.py +288 -0
  22. ate/commands/recording.py +524 -0
  23. ate/commands/repo.py +154 -0
  24. ate/commands/simulation.py +291 -0
  25. ate/commands/skill.py +303 -0
  26. ate/commands/skills.py +487 -0
  27. ate/commands/team.py +147 -0
  28. ate/commands/workflow.py +271 -0
  29. ate/detection/__init__.py +38 -0
  30. ate/detection/base.py +142 -0
  31. ate/detection/color_detector.py +399 -0
  32. ate/detection/trash_detector.py +322 -0
  33. ate/drivers/__init__.py +39 -0
  34. ate/drivers/ble_transport.py +405 -0
  35. ate/drivers/mechdog.py +942 -0
  36. ate/drivers/wifi_camera.py +477 -0
  37. ate/interfaces/__init__.py +187 -0
  38. ate/interfaces/base.py +273 -0
  39. ate/interfaces/body.py +267 -0
  40. ate/interfaces/detection.py +282 -0
  41. ate/interfaces/locomotion.py +422 -0
  42. ate/interfaces/manipulation.py +408 -0
  43. ate/interfaces/navigation.py +389 -0
  44. ate/interfaces/perception.py +362 -0
  45. ate/interfaces/sensors.py +247 -0
  46. ate/interfaces/types.py +371 -0
  47. ate/llm_proxy.py +239 -0
  48. ate/mcp_server.py +387 -0
  49. ate/memory/__init__.py +35 -0
  50. ate/memory/cloud.py +244 -0
  51. ate/memory/context.py +269 -0
  52. ate/memory/embeddings.py +184 -0
  53. ate/memory/export.py +26 -0
  54. ate/memory/merge.py +146 -0
  55. ate/memory/migrate/__init__.py +34 -0
  56. ate/memory/migrate/base.py +89 -0
  57. ate/memory/migrate/pipeline.py +189 -0
  58. ate/memory/migrate/sources/__init__.py +13 -0
  59. ate/memory/migrate/sources/chroma.py +170 -0
  60. ate/memory/migrate/sources/pinecone.py +120 -0
  61. ate/memory/migrate/sources/qdrant.py +110 -0
  62. ate/memory/migrate/sources/weaviate.py +160 -0
  63. ate/memory/reranker.py +353 -0
  64. ate/memory/search.py +26 -0
  65. ate/memory/store.py +548 -0
  66. ate/recording/__init__.py +83 -0
  67. ate/recording/demonstration.py +378 -0
  68. ate/recording/session.py +415 -0
  69. ate/recording/upload.py +304 -0
  70. ate/recording/visual.py +416 -0
  71. ate/recording/wrapper.py +95 -0
  72. ate/robot/__init__.py +221 -0
  73. ate/robot/agentic_servo.py +856 -0
  74. ate/robot/behaviors.py +493 -0
  75. ate/robot/ble_capture.py +1000 -0
  76. ate/robot/ble_enumerate.py +506 -0
  77. ate/robot/calibration.py +668 -0
  78. ate/robot/calibration_state.py +388 -0
  79. ate/robot/commands.py +3735 -0
  80. ate/robot/direction_calibration.py +554 -0
  81. ate/robot/discovery.py +441 -0
  82. ate/robot/introspection.py +330 -0
  83. ate/robot/llm_system_id.py +654 -0
  84. ate/robot/locomotion_calibration.py +508 -0
  85. ate/robot/manager.py +270 -0
  86. ate/robot/marker_generator.py +611 -0
  87. ate/robot/perception.py +502 -0
  88. ate/robot/primitives.py +614 -0
  89. ate/robot/profiles.py +281 -0
  90. ate/robot/registry.py +322 -0
  91. ate/robot/servo_mapper.py +1153 -0
  92. ate/robot/skill_upload.py +675 -0
  93. ate/robot/target_calibration.py +500 -0
  94. ate/robot/teach.py +515 -0
  95. ate/robot/types.py +242 -0
  96. ate/robot/visual_labeler.py +1048 -0
  97. ate/robot/visual_servo_loop.py +494 -0
  98. ate/robot/visual_servoing.py +570 -0
  99. ate/robot/visual_system_id.py +906 -0
  100. ate/transports/__init__.py +121 -0
  101. ate/transports/base.py +394 -0
  102. ate/transports/ble.py +405 -0
  103. ate/transports/hybrid.py +444 -0
  104. ate/transports/serial.py +345 -0
  105. ate/urdf/__init__.py +30 -0
  106. ate/urdf/capture.py +582 -0
  107. ate/urdf/cloud.py +491 -0
  108. ate/urdf/collision.py +271 -0
  109. ate/urdf/commands.py +708 -0
  110. ate/urdf/depth.py +360 -0
  111. ate/urdf/inertial.py +312 -0
  112. ate/urdf/kinematics.py +330 -0
  113. ate/urdf/lifting.py +415 -0
  114. ate/urdf/meshing.py +300 -0
  115. ate/urdf/models/__init__.py +110 -0
  116. ate/urdf/models/depth_anything.py +253 -0
  117. ate/urdf/models/sam2.py +324 -0
  118. ate/urdf/motion_analysis.py +396 -0
  119. ate/urdf/pipeline.py +468 -0
  120. ate/urdf/scale.py +256 -0
  121. ate/urdf/scan_session.py +411 -0
  122. ate/urdf/segmentation.py +299 -0
  123. ate/urdf/synthesis.py +319 -0
  124. ate/urdf/topology.py +336 -0
  125. ate/urdf/validation.py +371 -0
  126. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
  127. foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
  128. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
  129. foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
  130. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
  131. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,399 @@
1
+ """
2
+ Approach behaviors for targeting detected objects.
3
+
4
+ These behaviors work with any locomotion interface (quadruped, wheeled, etc.)
5
+ rather than requiring NavigationInterface. They use:
6
+ - Visual servoing (centering target in camera view)
7
+ - Distance sensing (ultrasonic, visual estimation, depth camera)
8
+
9
+ This is how FoodforThought makes skills portable across different robots.
10
+ """
11
+
12
+ import time
13
+ from dataclasses import dataclass
14
+ from typing import Optional, Protocol, Union, Tuple
15
+ from enum import Enum, auto
16
+
17
+ from .tree import BehaviorNode, BehaviorStatus
18
+
19
+ from ..interfaces import (
20
+ QuadrupedLocomotion,
21
+ WheeledLocomotion,
22
+ BipedLocomotion,
23
+ LocomotionInterface,
24
+ CameraInterface,
25
+ Vector3,
26
+ ActionResult,
27
+ )
28
+ from ..interfaces.sensors import (
29
+ DistanceSensorInterface,
30
+ DistanceReading,
31
+ VisualDistanceEstimator,
32
+ )
33
+ from ..interfaces.detection import Detection, BoundingBox
34
+
35
+
36
+ class ApproachState(Enum):
37
+ """State machine for approach behavior."""
38
+ SEARCHING = auto() # Looking for target
39
+ CENTERING = auto() # Turning to center target
40
+ APPROACHING = auto() # Moving toward target
41
+ REACHED = auto() # Close enough to target
42
+ LOST = auto() # Lost sight of target
43
+
44
+
45
+ @dataclass
46
+ class ApproachConfig:
47
+ """Configuration for approach behavior."""
48
+ # Target distance to stop at (meters)
49
+ target_distance: float = 0.15
50
+
51
+ # Image centering tolerance (fraction of image width from center)
52
+ center_tolerance: float = 0.15
53
+
54
+ # Speed settings
55
+ approach_speed: float = 0.3
56
+ turn_speed: float = 0.4
57
+ slow_distance: float = 0.3 # Slow down when this close
58
+
59
+ # Timeout settings
60
+ max_approach_time: float = 30.0
61
+ lost_target_timeout: float = 3.0
62
+
63
+ # Visual estimation (when no hardware distance sensor)
64
+ use_visual_distance: bool = True
65
+ default_object_size: float = 0.10 # meters
66
+
67
+
68
+ class ApproachTarget(BehaviorNode):
69
+ """
70
+ Approach a detected target using visual servoing.
71
+
72
+ Works with ANY locomotion interface by using:
73
+ 1. Camera to track target position in image
74
+ 2. Distance sensor OR visual estimation for range
75
+ 3. Locomotion commands (walk/drive forward, turn)
76
+
77
+ Blackboard:
78
+ - Reads: "target_detection" or "target_bbox" or "target_position"
79
+ - Writes: "approach_state", "target_distance", "approach_complete"
80
+
81
+ Example:
82
+ # Works with quadruped
83
+ approach = ApproachTarget(
84
+ locomotion=mechdog,
85
+ camera=mechdog, # Same driver implements both
86
+ distance_sensor=mechdog, # Has ultrasonic
87
+ )
88
+
89
+ # Works with wheeled robot
90
+ approach = ApproachTarget(
91
+ locomotion=diff_drive,
92
+ camera=usb_camera,
93
+ distance_sensor=None, # Will use visual estimation
94
+ )
95
+ """
96
+
97
+ def __init__(
98
+ self,
99
+ locomotion: LocomotionInterface,
100
+ camera: Optional[CameraInterface] = None,
101
+ distance_sensor: Optional[DistanceSensorInterface] = None,
102
+ config: Optional[ApproachConfig] = None,
103
+ name: str = "",
104
+ ):
105
+ super().__init__(name or "ApproachTarget")
106
+ self.locomotion = locomotion
107
+ self.camera = camera
108
+ self.distance_sensor = distance_sensor
109
+ self.config = config or ApproachConfig()
110
+
111
+ # Visual distance estimator (fallback when no hardware sensor)
112
+ self._visual_estimator: Optional[VisualDistanceEstimator] = None
113
+ if camera and self.config.use_visual_distance:
114
+ res = camera.get_resolution()
115
+ if res[0] > 0:
116
+ self._visual_estimator = VisualDistanceEstimator(
117
+ image_width=res[0],
118
+ image_height=res[1],
119
+ )
120
+
121
+ # State
122
+ self._state = ApproachState.SEARCHING
123
+ self._start_time: Optional[float] = None
124
+ self._last_detection_time: Optional[float] = None
125
+ self._current_distance: float = float('inf')
126
+
127
+ def tick(self) -> BehaviorStatus:
128
+ """Execute one tick of the approach behavior."""
129
+ now = time.time()
130
+
131
+ # Initialize on first tick
132
+ if self._start_time is None:
133
+ self._start_time = now
134
+ self._state = ApproachState.SEARCHING
135
+
136
+ # Check timeout
137
+ if now - self._start_time > self.config.max_approach_time:
138
+ self._stop_locomotion()
139
+ return BehaviorStatus.FAILURE
140
+
141
+ # Get current target info from blackboard
142
+ target_info = self._get_target_info()
143
+
144
+ if target_info is None:
145
+ # No target info available
146
+ if self._last_detection_time is None:
147
+ return BehaviorStatus.FAILURE # Never saw target
148
+
149
+ # Check if we lost target recently
150
+ if now - self._last_detection_time > self.config.lost_target_timeout:
151
+ self._state = ApproachState.LOST
152
+ self._stop_locomotion()
153
+ return BehaviorStatus.FAILURE
154
+
155
+ # Keep approaching based on last known info
156
+ return BehaviorStatus.RUNNING
157
+
158
+ # Update last detection time
159
+ self._last_detection_time = now
160
+
161
+ bbox, object_type = target_info
162
+
163
+ # Get distance to target
164
+ distance = self._get_distance(bbox, object_type)
165
+ self._current_distance = distance
166
+
167
+ if self.blackboard:
168
+ self.blackboard.set("target_distance", distance)
169
+ self.blackboard.set("approach_state", self._state.name)
170
+
171
+ # Check if we've reached the target
172
+ if distance <= self.config.target_distance:
173
+ self._state = ApproachState.REACHED
174
+ self._stop_locomotion()
175
+ if self.blackboard:
176
+ self.blackboard.set("approach_complete", True)
177
+ return BehaviorStatus.SUCCESS
178
+
179
+ # Get target position in image (for centering)
180
+ center_offset = self._get_center_offset(bbox)
181
+
182
+ # State machine
183
+ if abs(center_offset) > self.config.center_tolerance:
184
+ # Need to turn to center target
185
+ self._state = ApproachState.CENTERING
186
+ self._turn_toward_target(center_offset)
187
+ return BehaviorStatus.RUNNING
188
+ else:
189
+ # Target is centered, approach
190
+ self._state = ApproachState.APPROACHING
191
+ self._move_toward_target(distance)
192
+ return BehaviorStatus.RUNNING
193
+
194
+ def _get_target_info(self) -> Optional[Tuple[BoundingBox, str]]:
195
+ """Get target bounding box and type from blackboard."""
196
+ if not self.blackboard:
197
+ return None
198
+
199
+ # Try detection first
200
+ detection = self.blackboard.get("target_detection")
201
+ if detection and isinstance(detection, Detection):
202
+ return (detection.bbox, detection.class_name)
203
+
204
+ # Try raw bbox
205
+ bbox = self.blackboard.get("target_bbox")
206
+ if bbox and isinstance(bbox, BoundingBox):
207
+ return (bbox, "unknown")
208
+
209
+ # Try detection list (get first)
210
+ detections = self.blackboard.get("detections")
211
+ if detections and len(detections) > 0:
212
+ det = detections[0]
213
+ if isinstance(det, Detection):
214
+ return (det.bbox, det.class_name)
215
+
216
+ return None
217
+
218
+ def _get_center_offset(self, bbox: BoundingBox) -> float:
219
+ """
220
+ Get offset from image center as fraction of image width.
221
+
222
+ Returns:
223
+ Negative = target is left of center
224
+ Positive = target is right of center
225
+ 0 = centered
226
+ """
227
+ if not self.camera:
228
+ return 0.0
229
+
230
+ res = self.camera.get_resolution()
231
+ if res[0] == 0:
232
+ return 0.0
233
+
234
+ image_width = res[0]
235
+ image_center_x = image_width / 2
236
+
237
+ # Get bbox center
238
+ bbox_center_x = bbox.x + bbox.width / 2
239
+
240
+ # Calculate offset as fraction of image width
241
+ offset = (bbox_center_x - image_center_x) / image_width
242
+
243
+ return offset
244
+
245
+ def _get_distance(self, bbox: BoundingBox, object_type: str) -> float:
246
+ """
247
+ Get distance to target using best available method.
248
+
249
+ Priority:
250
+ 1. Hardware distance sensor (ultrasonic, lidar)
251
+ 2. Visual estimation from bbox size
252
+ 3. Default large distance
253
+ """
254
+ # Try hardware sensor first
255
+ if self.distance_sensor:
256
+ reading = self.distance_sensor.get_distance()
257
+ if reading.valid:
258
+ return reading.distance
259
+
260
+ # Fall back to visual estimation
261
+ if self._visual_estimator and bbox.width > 0:
262
+ reading = self._visual_estimator.estimate_from_detection(
263
+ bbox_width=int(bbox.width),
264
+ object_type=object_type,
265
+ known_size=self.config.default_object_size,
266
+ )
267
+ if reading.valid:
268
+ return reading.distance
269
+
270
+ # Default: assume we need to approach
271
+ return 1.0
272
+
273
+ def _turn_toward_target(self, offset: float) -> None:
274
+ """Turn to center the target in view."""
275
+ # Positive offset = target is right, turn right (negative angular)
276
+ # Negative offset = target is left, turn left (positive angular)
277
+
278
+ # Scale turn based on offset magnitude
279
+ turn_rate = -offset * self.config.turn_speed * 2
280
+
281
+ # Use appropriate locomotion method
282
+ if hasattr(self.locomotion, 'turn_continuous'):
283
+ self.locomotion.turn_continuous(turn_rate)
284
+ elif hasattr(self.locomotion, 'walk'):
285
+ # For quadrupeds, walk in place while turning
286
+ direction = Vector3(0, -offset, 0) # y component controls turn
287
+ self.locomotion.walk(direction, speed=0.1)
288
+ elif hasattr(self.locomotion, 'drive'):
289
+ # For wheeled, differential drive
290
+ self.locomotion.drive(0, turn_rate)
291
+
292
+ def _move_toward_target(self, distance: float) -> None:
293
+ """Move toward the target."""
294
+ # Calculate speed based on distance
295
+ speed = self.config.approach_speed
296
+ if distance < self.config.slow_distance:
297
+ # Slow down as we get close
298
+ speed *= distance / self.config.slow_distance
299
+ speed = max(speed, 0.1) # Minimum speed
300
+
301
+ # Use appropriate locomotion method
302
+ if hasattr(self.locomotion, 'walk'):
303
+ self.locomotion.walk(Vector3.forward(), speed=speed)
304
+ elif hasattr(self.locomotion, 'drive'):
305
+ self.locomotion.drive(speed, 0)
306
+
307
+ def _stop_locomotion(self) -> None:
308
+ """Stop all locomotion."""
309
+ if hasattr(self.locomotion, 'stop'):
310
+ self.locomotion.stop()
311
+
312
+ def reset(self) -> None:
313
+ """Reset behavior state."""
314
+ super().reset()
315
+ self._state = ApproachState.SEARCHING
316
+ self._start_time = None
317
+ self._last_detection_time = None
318
+ self._current_distance = float('inf')
319
+
320
+
321
+ class VisualServoApproach(BehaviorNode):
322
+ """
323
+ Simpler approach using only visual feedback.
324
+
325
+ Approaches until the bounding box reaches a target size in the image.
326
+ No distance sensor required - uses bbox size as proxy for distance.
327
+
328
+ Blackboard:
329
+ - Reads: "target_detection" or "target_bbox"
330
+ - Writes: "approach_complete"
331
+ """
332
+
333
+ def __init__(
334
+ self,
335
+ locomotion: LocomotionInterface,
336
+ target_bbox_height_fraction: float = 0.6, # Target height as fraction of image
337
+ center_tolerance: float = 0.1,
338
+ approach_speed: float = 0.3,
339
+ image_height: int = 480,
340
+ name: str = "",
341
+ ):
342
+ super().__init__(name or "VisualServoApproach")
343
+ self.locomotion = locomotion
344
+ self.target_fraction = target_bbox_height_fraction
345
+ self.center_tolerance = center_tolerance
346
+ self.approach_speed = approach_speed
347
+ self.image_height = image_height
348
+
349
+ def tick(self) -> BehaviorStatus:
350
+ """Execute visual servo approach."""
351
+ if not self.blackboard:
352
+ return BehaviorStatus.FAILURE
353
+
354
+ # Get target bbox
355
+ detection = self.blackboard.get("target_detection")
356
+ bbox = None
357
+ if detection and isinstance(detection, Detection):
358
+ bbox = detection.bbox
359
+ else:
360
+ bbox = self.blackboard.get("target_bbox")
361
+
362
+ if not bbox:
363
+ # No target visible
364
+ if hasattr(self.locomotion, 'stop'):
365
+ self.locomotion.stop()
366
+ return BehaviorStatus.FAILURE
367
+
368
+ # Check if bbox is large enough (close enough)
369
+ height_fraction = bbox.height / self.image_height
370
+ if height_fraction >= self.target_fraction:
371
+ if hasattr(self.locomotion, 'stop'):
372
+ self.locomotion.stop()
373
+ if self.blackboard:
374
+ self.blackboard.set("approach_complete", True)
375
+ return BehaviorStatus.SUCCESS
376
+
377
+ # Calculate center offset
378
+ image_center_x = self.image_height * (4/3) / 2 # Assume 4:3 aspect
379
+ bbox_center_x = bbox.x + bbox.width / 2
380
+ offset = (bbox_center_x - image_center_x) / image_center_x
381
+
382
+ # Turn or approach
383
+ if abs(offset) > self.center_tolerance:
384
+ # Turn toward target
385
+ turn_rate = -offset * 0.5
386
+ if hasattr(self.locomotion, 'turn_continuous'):
387
+ self.locomotion.turn_continuous(turn_rate)
388
+ elif hasattr(self.locomotion, 'walk'):
389
+ self.locomotion.walk(Vector3(0, -offset, 0), speed=0.1)
390
+ else:
391
+ # Approach
392
+ speed = self.approach_speed * (1 - height_fraction / self.target_fraction)
393
+ speed = max(speed, 0.1)
394
+ if hasattr(self.locomotion, 'walk'):
395
+ self.locomotion.walk(Vector3.forward(), speed=speed)
396
+ elif hasattr(self.locomotion, 'drive'):
397
+ self.locomotion.drive(speed, 0)
398
+
399
+ return BehaviorStatus.RUNNING