foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +100 -0
- ate/behaviors/approach.py +399 -0
- ate/behaviors/common.py +686 -0
- ate/behaviors/tree.py +454 -0
- ate/cli.py +855 -3995
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +39 -0
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +942 -0
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +187 -0
- ate/interfaces/base.py +273 -0
- ate/interfaces/body.py +267 -0
- ate/interfaces/detection.py +282 -0
- ate/interfaces/locomotion.py +422 -0
- ate/interfaces/manipulation.py +408 -0
- ate/interfaces/navigation.py +389 -0
- ate/interfaces/perception.py +362 -0
- ate/interfaces/sensors.py +247 -0
- ate/interfaces/types.py +371 -0
- ate/llm_proxy.py +239 -0
- ate/mcp_server.py +387 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +83 -0
- ate/recording/demonstration.py +378 -0
- ate/recording/session.py +415 -0
- ate/recording/upload.py +304 -0
- ate/recording/visual.py +416 -0
- ate/recording/wrapper.py +95 -0
- ate/robot/__init__.py +221 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +668 -0
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +3735 -0
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +441 -0
- ate/robot/introspection.py +330 -0
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/manager.py +270 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +281 -0
- ate/robot/registry.py +322 -0
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +675 -0
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +1048 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
ate/recording/visual.py
ADDED
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Visual recording for demonstrations with image capture and detection.
|
|
3
|
+
|
|
4
|
+
Extends the standard recording session to capture camera images at regular
|
|
5
|
+
intervals, optionally running object detection on each frame.
|
|
6
|
+
|
|
7
|
+
This enables:
|
|
8
|
+
- Recording demonstrations with synchronized video
|
|
9
|
+
- Automatic object detection during recording
|
|
10
|
+
- Training data collection for perception models
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import time
|
|
14
|
+
import json
|
|
15
|
+
import base64
|
|
16
|
+
import threading
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from dataclasses import dataclass, field
|
|
19
|
+
from typing import List, Optional, Dict, Any, Callable
|
|
20
|
+
|
|
21
|
+
from ate.interfaces import RobotInterface, CameraInterface
|
|
22
|
+
from ate.interfaces.perception import Image
|
|
23
|
+
from .session import RecordingSession, RecordedCall, RecordingMetadata
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class DetectionResult:
|
|
28
|
+
"""A single object detection result."""
|
|
29
|
+
label: str
|
|
30
|
+
confidence: float
|
|
31
|
+
bbox: Dict[str, int] # x, y, width, height
|
|
32
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
33
|
+
|
|
34
|
+
def to_dict(self) -> dict:
|
|
35
|
+
return {
|
|
36
|
+
"label": self.label,
|
|
37
|
+
"confidence": self.confidence,
|
|
38
|
+
"bbox": self.bbox,
|
|
39
|
+
"metadata": self.metadata,
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
@classmethod
|
|
43
|
+
def from_dict(cls, data: dict) -> "DetectionResult":
|
|
44
|
+
return cls(**data)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class RecordedFrame:
|
|
49
|
+
"""A recorded camera frame with optional detections."""
|
|
50
|
+
|
|
51
|
+
timestamp: float # Unix timestamp
|
|
52
|
+
relative_time: float # Time since recording started
|
|
53
|
+
width: int
|
|
54
|
+
height: int
|
|
55
|
+
format: str # jpeg, png, rgb8
|
|
56
|
+
data: bytes # Raw image data
|
|
57
|
+
detections: List[DetectionResult] = field(default_factory=list)
|
|
58
|
+
|
|
59
|
+
def to_dict(self, include_data: bool = True) -> dict:
|
|
60
|
+
result = {
|
|
61
|
+
"timestamp": self.timestamp,
|
|
62
|
+
"relative_time": self.relative_time,
|
|
63
|
+
"width": self.width,
|
|
64
|
+
"height": self.height,
|
|
65
|
+
"format": self.format,
|
|
66
|
+
"detections": [d.to_dict() for d in self.detections],
|
|
67
|
+
}
|
|
68
|
+
if include_data:
|
|
69
|
+
result["data"] = base64.b64encode(self.data).decode('ascii')
|
|
70
|
+
return result
|
|
71
|
+
|
|
72
|
+
@classmethod
|
|
73
|
+
def from_dict(cls, data: dict) -> "RecordedFrame":
|
|
74
|
+
frame_data = base64.b64decode(data.get("data", ""))
|
|
75
|
+
detections = [DetectionResult.from_dict(d) for d in data.get("detections", [])]
|
|
76
|
+
return cls(
|
|
77
|
+
timestamp=data["timestamp"],
|
|
78
|
+
relative_time=data["relative_time"],
|
|
79
|
+
width=data["width"],
|
|
80
|
+
height=data["height"],
|
|
81
|
+
format=data["format"],
|
|
82
|
+
data=frame_data,
|
|
83
|
+
detections=detections,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class VisualRecordingSession(RecordingSession):
|
|
88
|
+
"""
|
|
89
|
+
Recording session with visual capture and detection.
|
|
90
|
+
|
|
91
|
+
Extends RecordingSession to capture camera images at regular intervals
|
|
92
|
+
and optionally run object detection on each frame.
|
|
93
|
+
|
|
94
|
+
Usage:
|
|
95
|
+
from ate.detection import TrashDetector
|
|
96
|
+
|
|
97
|
+
detector = TrashDetector()
|
|
98
|
+
|
|
99
|
+
with VisualRecordingSession(
|
|
100
|
+
driver,
|
|
101
|
+
name="pickup_demo",
|
|
102
|
+
capture_fps=5.0,
|
|
103
|
+
detector=detector,
|
|
104
|
+
) as session:
|
|
105
|
+
# Drive robot - images captured automatically
|
|
106
|
+
driver.walk(Vector3.forward(), speed=0.2)
|
|
107
|
+
time.sleep(5.0)
|
|
108
|
+
|
|
109
|
+
# Save with visual data
|
|
110
|
+
session.save("pickup_demo.demonstration")
|
|
111
|
+
session.save_frames("pickup_demo_frames.json")
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
def __init__(
|
|
115
|
+
self,
|
|
116
|
+
driver: RobotInterface,
|
|
117
|
+
name: str = "recording",
|
|
118
|
+
description: Optional[str] = None,
|
|
119
|
+
tags: Optional[List[str]] = None,
|
|
120
|
+
auto_save: bool = False,
|
|
121
|
+
save_path: Optional[str] = None,
|
|
122
|
+
# Visual capture settings
|
|
123
|
+
capture_fps: float = 5.0, # Frames per second to capture
|
|
124
|
+
detector: Any = None, # Object detector (TrashDetector, ColorDetector, etc.)
|
|
125
|
+
max_frames: int = 1000, # Maximum frames to store
|
|
126
|
+
store_frames: bool = True, # Whether to store image data
|
|
127
|
+
):
|
|
128
|
+
"""
|
|
129
|
+
Initialize visual recording session.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
driver: Robot driver (must implement CameraInterface for visual capture)
|
|
133
|
+
name: Human-readable name
|
|
134
|
+
description: Optional description
|
|
135
|
+
tags: Optional tags
|
|
136
|
+
auto_save: Auto-save on exit
|
|
137
|
+
save_path: Path for auto-save
|
|
138
|
+
capture_fps: Frames per second to capture (default: 5.0)
|
|
139
|
+
detector: Optional object detector for automatic detection
|
|
140
|
+
max_frames: Maximum frames to store in memory
|
|
141
|
+
store_frames: Whether to store raw image data
|
|
142
|
+
"""
|
|
143
|
+
super().__init__(driver, name, description, tags, auto_save, save_path)
|
|
144
|
+
|
|
145
|
+
self._capture_fps = capture_fps
|
|
146
|
+
self._detector = detector
|
|
147
|
+
self._max_frames = max_frames
|
|
148
|
+
self._store_frames = store_frames
|
|
149
|
+
|
|
150
|
+
self._frames: List[RecordedFrame] = []
|
|
151
|
+
self._capture_thread: Optional[threading.Thread] = None
|
|
152
|
+
self._stop_capture = threading.Event()
|
|
153
|
+
|
|
154
|
+
# Check if driver has camera
|
|
155
|
+
self._has_camera = isinstance(driver, CameraInterface)
|
|
156
|
+
|
|
157
|
+
@property
|
|
158
|
+
def frames(self) -> List[RecordedFrame]:
|
|
159
|
+
"""Get all recorded frames."""
|
|
160
|
+
return self._frames.copy()
|
|
161
|
+
|
|
162
|
+
@property
|
|
163
|
+
def frame_count(self) -> int:
|
|
164
|
+
"""Get number of captured frames."""
|
|
165
|
+
return len(self._frames)
|
|
166
|
+
|
|
167
|
+
@property
|
|
168
|
+
def detection_count(self) -> int:
|
|
169
|
+
"""Get total number of detections across all frames."""
|
|
170
|
+
return sum(len(f.detections) for f in self._frames)
|
|
171
|
+
|
|
172
|
+
def start(self) -> None:
|
|
173
|
+
"""Start recording with visual capture."""
|
|
174
|
+
super().start()
|
|
175
|
+
|
|
176
|
+
if self._has_camera:
|
|
177
|
+
self._stop_capture.clear()
|
|
178
|
+
self._capture_thread = threading.Thread(
|
|
179
|
+
target=self._capture_loop,
|
|
180
|
+
daemon=True,
|
|
181
|
+
)
|
|
182
|
+
self._capture_thread.start()
|
|
183
|
+
|
|
184
|
+
def stop(self) -> None:
|
|
185
|
+
"""Stop recording and visual capture."""
|
|
186
|
+
self._stop_capture.set()
|
|
187
|
+
|
|
188
|
+
if self._capture_thread and self._capture_thread.is_alive():
|
|
189
|
+
self._capture_thread.join(timeout=2.0)
|
|
190
|
+
|
|
191
|
+
super().stop()
|
|
192
|
+
|
|
193
|
+
def _capture_loop(self) -> None:
|
|
194
|
+
"""Background thread for capturing frames."""
|
|
195
|
+
interval = 1.0 / self._capture_fps
|
|
196
|
+
|
|
197
|
+
while not self._stop_capture.is_set():
|
|
198
|
+
if len(self._frames) >= self._max_frames:
|
|
199
|
+
break
|
|
200
|
+
|
|
201
|
+
try:
|
|
202
|
+
self._capture_frame()
|
|
203
|
+
except Exception as e:
|
|
204
|
+
# Log but don't crash on capture errors
|
|
205
|
+
pass
|
|
206
|
+
|
|
207
|
+
self._stop_capture.wait(interval)
|
|
208
|
+
|
|
209
|
+
def _capture_frame(self) -> None:
|
|
210
|
+
"""Capture a single frame and run detection."""
|
|
211
|
+
if not self._has_camera:
|
|
212
|
+
return
|
|
213
|
+
|
|
214
|
+
timestamp = time.time()
|
|
215
|
+
relative_time = timestamp - self._start_time
|
|
216
|
+
|
|
217
|
+
# Get image from camera
|
|
218
|
+
image = self._driver.get_image()
|
|
219
|
+
if image.width == 0 or image.height == 0:
|
|
220
|
+
return # Skip empty frames
|
|
221
|
+
|
|
222
|
+
# Run detection if detector available
|
|
223
|
+
detections = []
|
|
224
|
+
if self._detector is not None:
|
|
225
|
+
try:
|
|
226
|
+
# Try detect_trash first (for TrashDetector)
|
|
227
|
+
if hasattr(self._detector, 'detect_trash'):
|
|
228
|
+
items = self._detector.detect_trash(image)
|
|
229
|
+
for item in items:
|
|
230
|
+
det = item.detection
|
|
231
|
+
detections.append(DetectionResult(
|
|
232
|
+
label=item.trash_type,
|
|
233
|
+
confidence=item.confidence,
|
|
234
|
+
bbox={
|
|
235
|
+
"x": det.bbox.x,
|
|
236
|
+
"y": det.bbox.y,
|
|
237
|
+
"width": det.bbox.width,
|
|
238
|
+
"height": det.bbox.height,
|
|
239
|
+
},
|
|
240
|
+
metadata={
|
|
241
|
+
"is_on_ground": item.is_on_ground,
|
|
242
|
+
"size_category": item.size_category,
|
|
243
|
+
"priority": item.priority,
|
|
244
|
+
},
|
|
245
|
+
))
|
|
246
|
+
else:
|
|
247
|
+
# Generic detector with detect() method
|
|
248
|
+
raw_detections = self._detector.detect(image)
|
|
249
|
+
for det in raw_detections:
|
|
250
|
+
detections.append(DetectionResult(
|
|
251
|
+
label=det.label,
|
|
252
|
+
confidence=det.confidence,
|
|
253
|
+
bbox={
|
|
254
|
+
"x": det.bbox.x,
|
|
255
|
+
"y": det.bbox.y,
|
|
256
|
+
"width": det.bbox.width,
|
|
257
|
+
"height": det.bbox.height,
|
|
258
|
+
},
|
|
259
|
+
metadata=det.metadata,
|
|
260
|
+
))
|
|
261
|
+
except Exception as e:
|
|
262
|
+
# Detection failed - still record frame without detections
|
|
263
|
+
pass
|
|
264
|
+
|
|
265
|
+
# Create recorded frame
|
|
266
|
+
frame = RecordedFrame(
|
|
267
|
+
timestamp=timestamp,
|
|
268
|
+
relative_time=relative_time,
|
|
269
|
+
width=image.width,
|
|
270
|
+
height=image.height,
|
|
271
|
+
format=image.encoding,
|
|
272
|
+
data=image.data if self._store_frames else b"",
|
|
273
|
+
detections=detections,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
self._frames.append(frame)
|
|
277
|
+
|
|
278
|
+
def capture_now(self) -> Optional[RecordedFrame]:
|
|
279
|
+
"""
|
|
280
|
+
Manually capture a frame right now.
|
|
281
|
+
|
|
282
|
+
Useful for capturing at specific moments (e.g., before/after actions).
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
Captured frame or None if capture failed
|
|
286
|
+
"""
|
|
287
|
+
if not self._has_camera or not self._recording:
|
|
288
|
+
return None
|
|
289
|
+
|
|
290
|
+
prev_count = len(self._frames)
|
|
291
|
+
self._capture_frame()
|
|
292
|
+
|
|
293
|
+
if len(self._frames) > prev_count:
|
|
294
|
+
return self._frames[-1]
|
|
295
|
+
return None
|
|
296
|
+
|
|
297
|
+
def get_frames_in_range(
|
|
298
|
+
self,
|
|
299
|
+
start_time: float,
|
|
300
|
+
end_time: float,
|
|
301
|
+
) -> List[RecordedFrame]:
|
|
302
|
+
"""Get frames within a time range."""
|
|
303
|
+
return [
|
|
304
|
+
f for f in self._frames
|
|
305
|
+
if start_time <= f.relative_time <= end_time
|
|
306
|
+
]
|
|
307
|
+
|
|
308
|
+
def get_frames_with_detections(
|
|
309
|
+
self,
|
|
310
|
+
label: Optional[str] = None,
|
|
311
|
+
min_confidence: float = 0.0,
|
|
312
|
+
) -> List[RecordedFrame]:
|
|
313
|
+
"""Get frames that have detections matching criteria."""
|
|
314
|
+
result = []
|
|
315
|
+
for frame in self._frames:
|
|
316
|
+
matching = [
|
|
317
|
+
d for d in frame.detections
|
|
318
|
+
if d.confidence >= min_confidence
|
|
319
|
+
and (label is None or d.label == label)
|
|
320
|
+
]
|
|
321
|
+
if matching:
|
|
322
|
+
result.append(frame)
|
|
323
|
+
return result
|
|
324
|
+
|
|
325
|
+
def get_metadata(self) -> RecordingMetadata:
|
|
326
|
+
"""Get recording metadata including visual stats."""
|
|
327
|
+
metadata = super().get_metadata()
|
|
328
|
+
# Add visual metadata to tags
|
|
329
|
+
if self._has_camera:
|
|
330
|
+
metadata.tags = list(metadata.tags) + [
|
|
331
|
+
f"frames:{len(self._frames)}",
|
|
332
|
+
f"detections:{self.detection_count}",
|
|
333
|
+
]
|
|
334
|
+
return metadata
|
|
335
|
+
|
|
336
|
+
def save_frames(self, path: str, include_data: bool = True) -> None:
|
|
337
|
+
"""
|
|
338
|
+
Save frames to a separate file.
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
path: File path for frames
|
|
342
|
+
include_data: Whether to include raw image data (base64)
|
|
343
|
+
"""
|
|
344
|
+
data = {
|
|
345
|
+
"version": "1.0",
|
|
346
|
+
"recording_id": self.get_metadata().id,
|
|
347
|
+
"frame_count": len(self._frames),
|
|
348
|
+
"detection_count": self.detection_count,
|
|
349
|
+
"frames": [f.to_dict(include_data=include_data) for f in self._frames],
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
with open(path, 'w') as f:
|
|
353
|
+
json.dump(data, f, indent=2 if not include_data else None)
|
|
354
|
+
|
|
355
|
+
def to_dict(self) -> dict:
|
|
356
|
+
"""Convert to dictionary including frame summary."""
|
|
357
|
+
base = super().to_dict()
|
|
358
|
+
|
|
359
|
+
# Add visual summary (not full frames - too large for API)
|
|
360
|
+
base["visual"] = {
|
|
361
|
+
"has_camera": self._has_camera,
|
|
362
|
+
"frame_count": len(self._frames),
|
|
363
|
+
"detection_count": self.detection_count,
|
|
364
|
+
"capture_fps": self._capture_fps,
|
|
365
|
+
"detection_summary": self._get_detection_summary(),
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
return base
|
|
369
|
+
|
|
370
|
+
def _get_detection_summary(self) -> Dict[str, int]:
|
|
371
|
+
"""Get count of each detection type."""
|
|
372
|
+
counts: Dict[str, int] = {}
|
|
373
|
+
for frame in self._frames:
|
|
374
|
+
for det in frame.detections:
|
|
375
|
+
counts[det.label] = counts.get(det.label, 0) + 1
|
|
376
|
+
return counts
|
|
377
|
+
|
|
378
|
+
def summary(self) -> str:
|
|
379
|
+
"""Get human-readable summary including visual data."""
|
|
380
|
+
base = super().summary()
|
|
381
|
+
|
|
382
|
+
if not self._has_camera:
|
|
383
|
+
return base + "\n\nVisual capture: Not available (no camera)"
|
|
384
|
+
|
|
385
|
+
lines = [
|
|
386
|
+
base,
|
|
387
|
+
"",
|
|
388
|
+
"Visual capture:",
|
|
389
|
+
f" Frames captured: {len(self._frames)}",
|
|
390
|
+
f" Total detections: {self.detection_count}",
|
|
391
|
+
]
|
|
392
|
+
|
|
393
|
+
# Detection breakdown
|
|
394
|
+
summary = self._get_detection_summary()
|
|
395
|
+
if summary:
|
|
396
|
+
lines.append(" Detection breakdown:")
|
|
397
|
+
for label, count in sorted(summary.items(), key=lambda x: -x[1]):
|
|
398
|
+
lines.append(f" - {label}: {count}")
|
|
399
|
+
|
|
400
|
+
return "\n".join(lines)
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def load_frames(path: str) -> List[RecordedFrame]:
|
|
404
|
+
"""
|
|
405
|
+
Load recorded frames from a file.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
path: Path to frames file
|
|
409
|
+
|
|
410
|
+
Returns:
|
|
411
|
+
List of RecordedFrame objects
|
|
412
|
+
"""
|
|
413
|
+
with open(path, 'r') as f:
|
|
414
|
+
data = json.load(f)
|
|
415
|
+
|
|
416
|
+
return [RecordedFrame.from_dict(frame) for frame in data.get("frames", [])]
|
ate/recording/wrapper.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Recording wrapper for automatic telemetry capture.
|
|
3
|
+
|
|
4
|
+
Provides a simpler API than RecordingSession for cases where you
|
|
5
|
+
want to wrap a driver and record everything automatically.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Optional
|
|
9
|
+
from ate.interfaces import RobotInterface
|
|
10
|
+
from .session import RecordingSession
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class RecordingWrapper:
|
|
14
|
+
"""
|
|
15
|
+
Wrapper that automatically records all driver interactions.
|
|
16
|
+
|
|
17
|
+
Unlike RecordingSession (context manager), RecordingWrapper creates
|
|
18
|
+
a persistent recording that captures everything until explicitly stopped.
|
|
19
|
+
|
|
20
|
+
Usage:
|
|
21
|
+
# Wrap a driver
|
|
22
|
+
raw_driver = MechDogDriver(port="/dev/...")
|
|
23
|
+
driver = RecordingWrapper(raw_driver, name="session_01")
|
|
24
|
+
|
|
25
|
+
# Use driver normally - all calls are recorded
|
|
26
|
+
driver.connect()
|
|
27
|
+
driver.stand()
|
|
28
|
+
driver.walk(Vector3.forward(), speed=0.3)
|
|
29
|
+
driver.stop()
|
|
30
|
+
driver.disconnect()
|
|
31
|
+
|
|
32
|
+
# Save the recording
|
|
33
|
+
driver.save_recording("session_01.demonstration")
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
driver: RobotInterface,
|
|
39
|
+
name: str = "recording",
|
|
40
|
+
description: Optional[str] = None,
|
|
41
|
+
auto_start: bool = True,
|
|
42
|
+
):
|
|
43
|
+
"""
|
|
44
|
+
Initialize recording wrapper.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
driver: Robot driver to wrap
|
|
48
|
+
name: Recording name
|
|
49
|
+
description: Optional description
|
|
50
|
+
auto_start: Start recording immediately
|
|
51
|
+
"""
|
|
52
|
+
self._driver = driver
|
|
53
|
+
self._session = RecordingSession(
|
|
54
|
+
driver,
|
|
55
|
+
name=name,
|
|
56
|
+
description=description,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
if auto_start:
|
|
60
|
+
self._session.start()
|
|
61
|
+
|
|
62
|
+
def __getattr__(self, name: str):
|
|
63
|
+
"""Forward attribute access to wrapped driver."""
|
|
64
|
+
return getattr(self._driver, name)
|
|
65
|
+
|
|
66
|
+
def start_recording(self) -> None:
|
|
67
|
+
"""Start recording (if not already)."""
|
|
68
|
+
self._session.start()
|
|
69
|
+
|
|
70
|
+
def stop_recording(self) -> None:
|
|
71
|
+
"""Stop recording."""
|
|
72
|
+
self._session.stop()
|
|
73
|
+
|
|
74
|
+
def save_recording(self, path: str) -> None:
|
|
75
|
+
"""Save recording to file."""
|
|
76
|
+
self._session.save(path)
|
|
77
|
+
|
|
78
|
+
def get_recording_summary(self) -> str:
|
|
79
|
+
"""Get recording summary."""
|
|
80
|
+
return self._session.summary()
|
|
81
|
+
|
|
82
|
+
@property
|
|
83
|
+
def is_recording(self) -> bool:
|
|
84
|
+
"""Check if currently recording."""
|
|
85
|
+
return self._session.is_recording
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def recording_duration(self) -> Optional[float]:
|
|
89
|
+
"""Get recording duration in seconds."""
|
|
90
|
+
return self._session.duration
|
|
91
|
+
|
|
92
|
+
@property
|
|
93
|
+
def call_count(self) -> int:
|
|
94
|
+
"""Get number of recorded calls."""
|
|
95
|
+
return len(self._session.calls)
|