foodforthought-cli 0.2.8__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +12 -0
- ate/behaviors/approach.py +399 -0
- ate/cli.py +855 -4551
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +18 -6
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +360 -24
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +16 -0
- ate/interfaces/base.py +2 -0
- ate/interfaces/sensors.py +247 -0
- ate/llm_proxy.py +239 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +42 -3
- ate/recording/session.py +12 -2
- ate/recording/visual.py +416 -0
- ate/robot/__init__.py +142 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +88 -3
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +143 -11
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +104 -2
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +6 -0
- ate/robot/registry.py +5 -2
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +285 -3
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +9 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +1 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.8.dist-info/RECORD +0 -73
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
ate/recording/visual.py
ADDED
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Visual recording for demonstrations with image capture and detection.
|
|
3
|
+
|
|
4
|
+
Extends the standard recording session to capture camera images at regular
|
|
5
|
+
intervals, optionally running object detection on each frame.
|
|
6
|
+
|
|
7
|
+
This enables:
|
|
8
|
+
- Recording demonstrations with synchronized video
|
|
9
|
+
- Automatic object detection during recording
|
|
10
|
+
- Training data collection for perception models
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import time
|
|
14
|
+
import json
|
|
15
|
+
import base64
|
|
16
|
+
import threading
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from dataclasses import dataclass, field
|
|
19
|
+
from typing import List, Optional, Dict, Any, Callable
|
|
20
|
+
|
|
21
|
+
from ate.interfaces import RobotInterface, CameraInterface
|
|
22
|
+
from ate.interfaces.perception import Image
|
|
23
|
+
from .session import RecordingSession, RecordedCall, RecordingMetadata
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class DetectionResult:
|
|
28
|
+
"""A single object detection result."""
|
|
29
|
+
label: str
|
|
30
|
+
confidence: float
|
|
31
|
+
bbox: Dict[str, int] # x, y, width, height
|
|
32
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
33
|
+
|
|
34
|
+
def to_dict(self) -> dict:
|
|
35
|
+
return {
|
|
36
|
+
"label": self.label,
|
|
37
|
+
"confidence": self.confidence,
|
|
38
|
+
"bbox": self.bbox,
|
|
39
|
+
"metadata": self.metadata,
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
@classmethod
|
|
43
|
+
def from_dict(cls, data: dict) -> "DetectionResult":
|
|
44
|
+
return cls(**data)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class RecordedFrame:
|
|
49
|
+
"""A recorded camera frame with optional detections."""
|
|
50
|
+
|
|
51
|
+
timestamp: float # Unix timestamp
|
|
52
|
+
relative_time: float # Time since recording started
|
|
53
|
+
width: int
|
|
54
|
+
height: int
|
|
55
|
+
format: str # jpeg, png, rgb8
|
|
56
|
+
data: bytes # Raw image data
|
|
57
|
+
detections: List[DetectionResult] = field(default_factory=list)
|
|
58
|
+
|
|
59
|
+
def to_dict(self, include_data: bool = True) -> dict:
|
|
60
|
+
result = {
|
|
61
|
+
"timestamp": self.timestamp,
|
|
62
|
+
"relative_time": self.relative_time,
|
|
63
|
+
"width": self.width,
|
|
64
|
+
"height": self.height,
|
|
65
|
+
"format": self.format,
|
|
66
|
+
"detections": [d.to_dict() for d in self.detections],
|
|
67
|
+
}
|
|
68
|
+
if include_data:
|
|
69
|
+
result["data"] = base64.b64encode(self.data).decode('ascii')
|
|
70
|
+
return result
|
|
71
|
+
|
|
72
|
+
@classmethod
|
|
73
|
+
def from_dict(cls, data: dict) -> "RecordedFrame":
|
|
74
|
+
frame_data = base64.b64decode(data.get("data", ""))
|
|
75
|
+
detections = [DetectionResult.from_dict(d) for d in data.get("detections", [])]
|
|
76
|
+
return cls(
|
|
77
|
+
timestamp=data["timestamp"],
|
|
78
|
+
relative_time=data["relative_time"],
|
|
79
|
+
width=data["width"],
|
|
80
|
+
height=data["height"],
|
|
81
|
+
format=data["format"],
|
|
82
|
+
data=frame_data,
|
|
83
|
+
detections=detections,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class VisualRecordingSession(RecordingSession):
|
|
88
|
+
"""
|
|
89
|
+
Recording session with visual capture and detection.
|
|
90
|
+
|
|
91
|
+
Extends RecordingSession to capture camera images at regular intervals
|
|
92
|
+
and optionally run object detection on each frame.
|
|
93
|
+
|
|
94
|
+
Usage:
|
|
95
|
+
from ate.detection import TrashDetector
|
|
96
|
+
|
|
97
|
+
detector = TrashDetector()
|
|
98
|
+
|
|
99
|
+
with VisualRecordingSession(
|
|
100
|
+
driver,
|
|
101
|
+
name="pickup_demo",
|
|
102
|
+
capture_fps=5.0,
|
|
103
|
+
detector=detector,
|
|
104
|
+
) as session:
|
|
105
|
+
# Drive robot - images captured automatically
|
|
106
|
+
driver.walk(Vector3.forward(), speed=0.2)
|
|
107
|
+
time.sleep(5.0)
|
|
108
|
+
|
|
109
|
+
# Save with visual data
|
|
110
|
+
session.save("pickup_demo.demonstration")
|
|
111
|
+
session.save_frames("pickup_demo_frames.json")
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
def __init__(
|
|
115
|
+
self,
|
|
116
|
+
driver: RobotInterface,
|
|
117
|
+
name: str = "recording",
|
|
118
|
+
description: Optional[str] = None,
|
|
119
|
+
tags: Optional[List[str]] = None,
|
|
120
|
+
auto_save: bool = False,
|
|
121
|
+
save_path: Optional[str] = None,
|
|
122
|
+
# Visual capture settings
|
|
123
|
+
capture_fps: float = 5.0, # Frames per second to capture
|
|
124
|
+
detector: Any = None, # Object detector (TrashDetector, ColorDetector, etc.)
|
|
125
|
+
max_frames: int = 1000, # Maximum frames to store
|
|
126
|
+
store_frames: bool = True, # Whether to store image data
|
|
127
|
+
):
|
|
128
|
+
"""
|
|
129
|
+
Initialize visual recording session.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
driver: Robot driver (must implement CameraInterface for visual capture)
|
|
133
|
+
name: Human-readable name
|
|
134
|
+
description: Optional description
|
|
135
|
+
tags: Optional tags
|
|
136
|
+
auto_save: Auto-save on exit
|
|
137
|
+
save_path: Path for auto-save
|
|
138
|
+
capture_fps: Frames per second to capture (default: 5.0)
|
|
139
|
+
detector: Optional object detector for automatic detection
|
|
140
|
+
max_frames: Maximum frames to store in memory
|
|
141
|
+
store_frames: Whether to store raw image data
|
|
142
|
+
"""
|
|
143
|
+
super().__init__(driver, name, description, tags, auto_save, save_path)
|
|
144
|
+
|
|
145
|
+
self._capture_fps = capture_fps
|
|
146
|
+
self._detector = detector
|
|
147
|
+
self._max_frames = max_frames
|
|
148
|
+
self._store_frames = store_frames
|
|
149
|
+
|
|
150
|
+
self._frames: List[RecordedFrame] = []
|
|
151
|
+
self._capture_thread: Optional[threading.Thread] = None
|
|
152
|
+
self._stop_capture = threading.Event()
|
|
153
|
+
|
|
154
|
+
# Check if driver has camera
|
|
155
|
+
self._has_camera = isinstance(driver, CameraInterface)
|
|
156
|
+
|
|
157
|
+
@property
|
|
158
|
+
def frames(self) -> List[RecordedFrame]:
|
|
159
|
+
"""Get all recorded frames."""
|
|
160
|
+
return self._frames.copy()
|
|
161
|
+
|
|
162
|
+
@property
|
|
163
|
+
def frame_count(self) -> int:
|
|
164
|
+
"""Get number of captured frames."""
|
|
165
|
+
return len(self._frames)
|
|
166
|
+
|
|
167
|
+
@property
|
|
168
|
+
def detection_count(self) -> int:
|
|
169
|
+
"""Get total number of detections across all frames."""
|
|
170
|
+
return sum(len(f.detections) for f in self._frames)
|
|
171
|
+
|
|
172
|
+
def start(self) -> None:
|
|
173
|
+
"""Start recording with visual capture."""
|
|
174
|
+
super().start()
|
|
175
|
+
|
|
176
|
+
if self._has_camera:
|
|
177
|
+
self._stop_capture.clear()
|
|
178
|
+
self._capture_thread = threading.Thread(
|
|
179
|
+
target=self._capture_loop,
|
|
180
|
+
daemon=True,
|
|
181
|
+
)
|
|
182
|
+
self._capture_thread.start()
|
|
183
|
+
|
|
184
|
+
def stop(self) -> None:
|
|
185
|
+
"""Stop recording and visual capture."""
|
|
186
|
+
self._stop_capture.set()
|
|
187
|
+
|
|
188
|
+
if self._capture_thread and self._capture_thread.is_alive():
|
|
189
|
+
self._capture_thread.join(timeout=2.0)
|
|
190
|
+
|
|
191
|
+
super().stop()
|
|
192
|
+
|
|
193
|
+
def _capture_loop(self) -> None:
|
|
194
|
+
"""Background thread for capturing frames."""
|
|
195
|
+
interval = 1.0 / self._capture_fps
|
|
196
|
+
|
|
197
|
+
while not self._stop_capture.is_set():
|
|
198
|
+
if len(self._frames) >= self._max_frames:
|
|
199
|
+
break
|
|
200
|
+
|
|
201
|
+
try:
|
|
202
|
+
self._capture_frame()
|
|
203
|
+
except Exception as e:
|
|
204
|
+
# Log but don't crash on capture errors
|
|
205
|
+
pass
|
|
206
|
+
|
|
207
|
+
self._stop_capture.wait(interval)
|
|
208
|
+
|
|
209
|
+
def _capture_frame(self) -> None:
|
|
210
|
+
"""Capture a single frame and run detection."""
|
|
211
|
+
if not self._has_camera:
|
|
212
|
+
return
|
|
213
|
+
|
|
214
|
+
timestamp = time.time()
|
|
215
|
+
relative_time = timestamp - self._start_time
|
|
216
|
+
|
|
217
|
+
# Get image from camera
|
|
218
|
+
image = self._driver.get_image()
|
|
219
|
+
if image.width == 0 or image.height == 0:
|
|
220
|
+
return # Skip empty frames
|
|
221
|
+
|
|
222
|
+
# Run detection if detector available
|
|
223
|
+
detections = []
|
|
224
|
+
if self._detector is not None:
|
|
225
|
+
try:
|
|
226
|
+
# Try detect_trash first (for TrashDetector)
|
|
227
|
+
if hasattr(self._detector, 'detect_trash'):
|
|
228
|
+
items = self._detector.detect_trash(image)
|
|
229
|
+
for item in items:
|
|
230
|
+
det = item.detection
|
|
231
|
+
detections.append(DetectionResult(
|
|
232
|
+
label=item.trash_type,
|
|
233
|
+
confidence=item.confidence,
|
|
234
|
+
bbox={
|
|
235
|
+
"x": det.bbox.x,
|
|
236
|
+
"y": det.bbox.y,
|
|
237
|
+
"width": det.bbox.width,
|
|
238
|
+
"height": det.bbox.height,
|
|
239
|
+
},
|
|
240
|
+
metadata={
|
|
241
|
+
"is_on_ground": item.is_on_ground,
|
|
242
|
+
"size_category": item.size_category,
|
|
243
|
+
"priority": item.priority,
|
|
244
|
+
},
|
|
245
|
+
))
|
|
246
|
+
else:
|
|
247
|
+
# Generic detector with detect() method
|
|
248
|
+
raw_detections = self._detector.detect(image)
|
|
249
|
+
for det in raw_detections:
|
|
250
|
+
detections.append(DetectionResult(
|
|
251
|
+
label=det.label,
|
|
252
|
+
confidence=det.confidence,
|
|
253
|
+
bbox={
|
|
254
|
+
"x": det.bbox.x,
|
|
255
|
+
"y": det.bbox.y,
|
|
256
|
+
"width": det.bbox.width,
|
|
257
|
+
"height": det.bbox.height,
|
|
258
|
+
},
|
|
259
|
+
metadata=det.metadata,
|
|
260
|
+
))
|
|
261
|
+
except Exception as e:
|
|
262
|
+
# Detection failed - still record frame without detections
|
|
263
|
+
pass
|
|
264
|
+
|
|
265
|
+
# Create recorded frame
|
|
266
|
+
frame = RecordedFrame(
|
|
267
|
+
timestamp=timestamp,
|
|
268
|
+
relative_time=relative_time,
|
|
269
|
+
width=image.width,
|
|
270
|
+
height=image.height,
|
|
271
|
+
format=image.encoding,
|
|
272
|
+
data=image.data if self._store_frames else b"",
|
|
273
|
+
detections=detections,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
self._frames.append(frame)
|
|
277
|
+
|
|
278
|
+
def capture_now(self) -> Optional[RecordedFrame]:
|
|
279
|
+
"""
|
|
280
|
+
Manually capture a frame right now.
|
|
281
|
+
|
|
282
|
+
Useful for capturing at specific moments (e.g., before/after actions).
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
Captured frame or None if capture failed
|
|
286
|
+
"""
|
|
287
|
+
if not self._has_camera or not self._recording:
|
|
288
|
+
return None
|
|
289
|
+
|
|
290
|
+
prev_count = len(self._frames)
|
|
291
|
+
self._capture_frame()
|
|
292
|
+
|
|
293
|
+
if len(self._frames) > prev_count:
|
|
294
|
+
return self._frames[-1]
|
|
295
|
+
return None
|
|
296
|
+
|
|
297
|
+
def get_frames_in_range(
|
|
298
|
+
self,
|
|
299
|
+
start_time: float,
|
|
300
|
+
end_time: float,
|
|
301
|
+
) -> List[RecordedFrame]:
|
|
302
|
+
"""Get frames within a time range."""
|
|
303
|
+
return [
|
|
304
|
+
f for f in self._frames
|
|
305
|
+
if start_time <= f.relative_time <= end_time
|
|
306
|
+
]
|
|
307
|
+
|
|
308
|
+
def get_frames_with_detections(
|
|
309
|
+
self,
|
|
310
|
+
label: Optional[str] = None,
|
|
311
|
+
min_confidence: float = 0.0,
|
|
312
|
+
) -> List[RecordedFrame]:
|
|
313
|
+
"""Get frames that have detections matching criteria."""
|
|
314
|
+
result = []
|
|
315
|
+
for frame in self._frames:
|
|
316
|
+
matching = [
|
|
317
|
+
d for d in frame.detections
|
|
318
|
+
if d.confidence >= min_confidence
|
|
319
|
+
and (label is None or d.label == label)
|
|
320
|
+
]
|
|
321
|
+
if matching:
|
|
322
|
+
result.append(frame)
|
|
323
|
+
return result
|
|
324
|
+
|
|
325
|
+
def get_metadata(self) -> RecordingMetadata:
|
|
326
|
+
"""Get recording metadata including visual stats."""
|
|
327
|
+
metadata = super().get_metadata()
|
|
328
|
+
# Add visual metadata to tags
|
|
329
|
+
if self._has_camera:
|
|
330
|
+
metadata.tags = list(metadata.tags) + [
|
|
331
|
+
f"frames:{len(self._frames)}",
|
|
332
|
+
f"detections:{self.detection_count}",
|
|
333
|
+
]
|
|
334
|
+
return metadata
|
|
335
|
+
|
|
336
|
+
def save_frames(self, path: str, include_data: bool = True) -> None:
|
|
337
|
+
"""
|
|
338
|
+
Save frames to a separate file.
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
path: File path for frames
|
|
342
|
+
include_data: Whether to include raw image data (base64)
|
|
343
|
+
"""
|
|
344
|
+
data = {
|
|
345
|
+
"version": "1.0",
|
|
346
|
+
"recording_id": self.get_metadata().id,
|
|
347
|
+
"frame_count": len(self._frames),
|
|
348
|
+
"detection_count": self.detection_count,
|
|
349
|
+
"frames": [f.to_dict(include_data=include_data) for f in self._frames],
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
with open(path, 'w') as f:
|
|
353
|
+
json.dump(data, f, indent=2 if not include_data else None)
|
|
354
|
+
|
|
355
|
+
def to_dict(self) -> dict:
|
|
356
|
+
"""Convert to dictionary including frame summary."""
|
|
357
|
+
base = super().to_dict()
|
|
358
|
+
|
|
359
|
+
# Add visual summary (not full frames - too large for API)
|
|
360
|
+
base["visual"] = {
|
|
361
|
+
"has_camera": self._has_camera,
|
|
362
|
+
"frame_count": len(self._frames),
|
|
363
|
+
"detection_count": self.detection_count,
|
|
364
|
+
"capture_fps": self._capture_fps,
|
|
365
|
+
"detection_summary": self._get_detection_summary(),
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
return base
|
|
369
|
+
|
|
370
|
+
def _get_detection_summary(self) -> Dict[str, int]:
|
|
371
|
+
"""Get count of each detection type."""
|
|
372
|
+
counts: Dict[str, int] = {}
|
|
373
|
+
for frame in self._frames:
|
|
374
|
+
for det in frame.detections:
|
|
375
|
+
counts[det.label] = counts.get(det.label, 0) + 1
|
|
376
|
+
return counts
|
|
377
|
+
|
|
378
|
+
def summary(self) -> str:
|
|
379
|
+
"""Get human-readable summary including visual data."""
|
|
380
|
+
base = super().summary()
|
|
381
|
+
|
|
382
|
+
if not self._has_camera:
|
|
383
|
+
return base + "\n\nVisual capture: Not available (no camera)"
|
|
384
|
+
|
|
385
|
+
lines = [
|
|
386
|
+
base,
|
|
387
|
+
"",
|
|
388
|
+
"Visual capture:",
|
|
389
|
+
f" Frames captured: {len(self._frames)}",
|
|
390
|
+
f" Total detections: {self.detection_count}",
|
|
391
|
+
]
|
|
392
|
+
|
|
393
|
+
# Detection breakdown
|
|
394
|
+
summary = self._get_detection_summary()
|
|
395
|
+
if summary:
|
|
396
|
+
lines.append(" Detection breakdown:")
|
|
397
|
+
for label, count in sorted(summary.items(), key=lambda x: -x[1]):
|
|
398
|
+
lines.append(f" - {label}: {count}")
|
|
399
|
+
|
|
400
|
+
return "\n".join(lines)
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def load_frames(path: str) -> List[RecordedFrame]:
|
|
404
|
+
"""
|
|
405
|
+
Load recorded frames from a file.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
path: Path to frames file
|
|
409
|
+
|
|
410
|
+
Returns:
|
|
411
|
+
List of RecordedFrame objects
|
|
412
|
+
"""
|
|
413
|
+
with open(path, 'r') as f:
|
|
414
|
+
data = json.load(f)
|
|
415
|
+
|
|
416
|
+
return [RecordedFrame.from_dict(frame) for frame in data.get("frames", [])]
|
ate/robot/__init__.py
CHANGED
|
@@ -54,6 +54,87 @@ from .manager import (
|
|
|
54
54
|
RobotManager,
|
|
55
55
|
)
|
|
56
56
|
|
|
57
|
+
from .primitives import (
|
|
58
|
+
PrimitiveLibrary,
|
|
59
|
+
Primitive,
|
|
60
|
+
CompoundSkill,
|
|
61
|
+
Behavior,
|
|
62
|
+
HardwareRequirement,
|
|
63
|
+
create_mechdog_primitives,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
from .perception import (
|
|
67
|
+
PerceptionSystem,
|
|
68
|
+
PerceptionResult,
|
|
69
|
+
LivePerceptionExecutor,
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
from .behaviors import (
|
|
73
|
+
BehaviorExecutor,
|
|
74
|
+
BehaviorResult,
|
|
75
|
+
DetectionResult,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
from .visual_servoing import (
|
|
79
|
+
VisualServoController,
|
|
80
|
+
GreenBallDetector,
|
|
81
|
+
TargetDetection,
|
|
82
|
+
ServoState,
|
|
83
|
+
create_mechdog_servo_controller,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
from .agentic_servo import (
|
|
87
|
+
AgenticServoController,
|
|
88
|
+
HaikuServoAgent,
|
|
89
|
+
AgentDecision,
|
|
90
|
+
run_agentic_pickup,
|
|
91
|
+
run_agentic_pickup_wifi,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
from .servo_mapper import (
|
|
95
|
+
ServoProbe,
|
|
96
|
+
ServoMapperAgent,
|
|
97
|
+
ServoMapping,
|
|
98
|
+
ServoRange,
|
|
99
|
+
ServoType,
|
|
100
|
+
PrimitiveGenerator,
|
|
101
|
+
run_servo_mapping,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
from .calibration_state import (
|
|
105
|
+
CalibrationState,
|
|
106
|
+
DirectionMapping,
|
|
107
|
+
check_prerequisite,
|
|
108
|
+
require_direction_calibration,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
from .direction_calibration import (
|
|
112
|
+
DirectionCalibrator,
|
|
113
|
+
TwitchResult,
|
|
114
|
+
run_direction_calibration,
|
|
115
|
+
load_direction_calibration,
|
|
116
|
+
get_toward_direction,
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
from .visual_servo_loop import (
|
|
120
|
+
VisualServoLoop,
|
|
121
|
+
ServoConfig,
|
|
122
|
+
ServoRole,
|
|
123
|
+
ServoState,
|
|
124
|
+
ServoResult,
|
|
125
|
+
ServoIteration,
|
|
126
|
+
load_direction_calibration_for_servos,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
from .target_calibration import (
|
|
130
|
+
TargetProfile,
|
|
131
|
+
TargetCalibrator,
|
|
132
|
+
HSVRange,
|
|
133
|
+
run_target_calibration,
|
|
134
|
+
list_target_profiles,
|
|
135
|
+
detect_with_profile,
|
|
136
|
+
)
|
|
137
|
+
|
|
57
138
|
__all__ = [
|
|
58
139
|
# Discovery
|
|
59
140
|
"DiscoveredRobot",
|
|
@@ -76,4 +157,65 @@ __all__ = [
|
|
|
76
157
|
"test_capability",
|
|
77
158
|
# Manager
|
|
78
159
|
"RobotManager",
|
|
160
|
+
# Primitives
|
|
161
|
+
"PrimitiveLibrary",
|
|
162
|
+
"Primitive",
|
|
163
|
+
"CompoundSkill",
|
|
164
|
+
"Behavior",
|
|
165
|
+
"HardwareRequirement",
|
|
166
|
+
"create_mechdog_primitives",
|
|
167
|
+
# Perception
|
|
168
|
+
"PerceptionSystem",
|
|
169
|
+
"PerceptionResult",
|
|
170
|
+
"LivePerceptionExecutor",
|
|
171
|
+
# Behaviors
|
|
172
|
+
"BehaviorExecutor",
|
|
173
|
+
"BehaviorResult",
|
|
174
|
+
"DetectionResult",
|
|
175
|
+
# Visual Servoing
|
|
176
|
+
"VisualServoController",
|
|
177
|
+
"GreenBallDetector",
|
|
178
|
+
"TargetDetection",
|
|
179
|
+
"ServoState",
|
|
180
|
+
"create_mechdog_servo_controller",
|
|
181
|
+
# Agentic Servoing
|
|
182
|
+
"AgenticServoController",
|
|
183
|
+
"HaikuServoAgent",
|
|
184
|
+
"AgentDecision",
|
|
185
|
+
"run_agentic_pickup",
|
|
186
|
+
"run_agentic_pickup_wifi",
|
|
187
|
+
# Servo Mapping
|
|
188
|
+
"ServoProbe",
|
|
189
|
+
"ServoMapperAgent",
|
|
190
|
+
"ServoMapping",
|
|
191
|
+
"ServoRange",
|
|
192
|
+
"ServoType",
|
|
193
|
+
"PrimitiveGenerator",
|
|
194
|
+
"run_servo_mapping",
|
|
195
|
+
# Calibration State Machine
|
|
196
|
+
"CalibrationState",
|
|
197
|
+
"DirectionMapping",
|
|
198
|
+
"check_prerequisite",
|
|
199
|
+
"require_direction_calibration",
|
|
200
|
+
# Direction Calibration (Twitch Test)
|
|
201
|
+
"DirectionCalibrator",
|
|
202
|
+
"TwitchResult",
|
|
203
|
+
"run_direction_calibration",
|
|
204
|
+
"load_direction_calibration",
|
|
205
|
+
"get_toward_direction",
|
|
206
|
+
# Visual Servo Loop
|
|
207
|
+
"VisualServoLoop",
|
|
208
|
+
"ServoConfig",
|
|
209
|
+
"ServoRole",
|
|
210
|
+
"ServoState",
|
|
211
|
+
"ServoResult",
|
|
212
|
+
"ServoIteration",
|
|
213
|
+
"load_direction_calibration_for_servos",
|
|
214
|
+
# Target Detection Calibration
|
|
215
|
+
"TargetProfile",
|
|
216
|
+
"TargetCalibrator",
|
|
217
|
+
"HSVRange",
|
|
218
|
+
"run_target_calibration",
|
|
219
|
+
"list_target_profiles",
|
|
220
|
+
"detect_with_profile",
|
|
79
221
|
]
|