foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +100 -0
- ate/behaviors/approach.py +399 -0
- ate/behaviors/common.py +686 -0
- ate/behaviors/tree.py +454 -0
- ate/cli.py +855 -3995
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +39 -0
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +942 -0
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +187 -0
- ate/interfaces/base.py +273 -0
- ate/interfaces/body.py +267 -0
- ate/interfaces/detection.py +282 -0
- ate/interfaces/locomotion.py +422 -0
- ate/interfaces/manipulation.py +408 -0
- ate/interfaces/navigation.py +389 -0
- ate/interfaces/perception.py +362 -0
- ate/interfaces/sensors.py +247 -0
- ate/interfaces/types.py +371 -0
- ate/llm_proxy.py +239 -0
- ate/mcp_server.py +387 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +83 -0
- ate/recording/demonstration.py +378 -0
- ate/recording/session.py +415 -0
- ate/recording/upload.py +304 -0
- ate/recording/visual.py +416 -0
- ate/recording/wrapper.py +95 -0
- ate/robot/__init__.py +221 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +668 -0
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +3735 -0
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +441 -0
- ate/robot/introspection.py +330 -0
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/manager.py +270 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +281 -0
- ate/robot/registry.py +322 -0
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +675 -0
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +1048 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,378 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Demonstration file format for cross-robot skill transfer.
|
|
3
|
+
|
|
4
|
+
A demonstration is a recorded sequence of interface calls that can be:
|
|
5
|
+
- Loaded and inspected
|
|
6
|
+
- Replayed on compatible robots
|
|
7
|
+
- Filtered by interface
|
|
8
|
+
- Labeled with task segments
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import time
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from dataclasses import dataclass, field
|
|
15
|
+
from typing import List, Optional, Dict, Any, Type
|
|
16
|
+
|
|
17
|
+
from ate.interfaces import RobotInterface, ActionResult
|
|
18
|
+
from .session import RecordedCall, RecordingMetadata
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class TaskSegment:
|
|
23
|
+
"""A labeled segment within a demonstration."""
|
|
24
|
+
|
|
25
|
+
start_time: float # Relative time (seconds)
|
|
26
|
+
end_time: float # Relative time (seconds)
|
|
27
|
+
label: str # Task label (e.g., "approaching", "grasping")
|
|
28
|
+
description: Optional[str] = None
|
|
29
|
+
confidence: float = 1.0 # Labeler confidence (0-1)
|
|
30
|
+
|
|
31
|
+
def to_dict(self) -> dict:
|
|
32
|
+
return {
|
|
33
|
+
"start_time": self.start_time,
|
|
34
|
+
"end_time": self.end_time,
|
|
35
|
+
"label": self.label,
|
|
36
|
+
"description": self.description,
|
|
37
|
+
"confidence": self.confidence,
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
@classmethod
|
|
41
|
+
def from_dict(cls, data: dict) -> "TaskSegment":
|
|
42
|
+
return cls(**data)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class Demonstration:
|
|
46
|
+
"""
|
|
47
|
+
A recorded demonstration that can be replayed on compatible robots.
|
|
48
|
+
|
|
49
|
+
Demonstrations are the transferable unit of robot intelligence.
|
|
50
|
+
They capture what the robot did (interface calls) without how it did it
|
|
51
|
+
(hardware-specific commands).
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
def __init__(
|
|
55
|
+
self,
|
|
56
|
+
metadata: RecordingMetadata,
|
|
57
|
+
calls: List[RecordedCall],
|
|
58
|
+
segments: Optional[List[TaskSegment]] = None,
|
|
59
|
+
):
|
|
60
|
+
"""
|
|
61
|
+
Initialize a demonstration.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
metadata: Recording metadata
|
|
65
|
+
calls: List of recorded calls
|
|
66
|
+
segments: Optional labeled task segments
|
|
67
|
+
"""
|
|
68
|
+
self.metadata = metadata
|
|
69
|
+
self.calls = calls
|
|
70
|
+
self.segments = segments or []
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def duration(self) -> float:
|
|
74
|
+
"""Get total duration in seconds."""
|
|
75
|
+
if not self.calls:
|
|
76
|
+
return 0.0
|
|
77
|
+
return self.calls[-1].relative_time
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def robot_archetype(self) -> str:
|
|
81
|
+
"""Get the robot archetype (quadruped, biped, etc.)."""
|
|
82
|
+
return self.metadata.robot_archetype
|
|
83
|
+
|
|
84
|
+
@property
|
|
85
|
+
def capabilities(self) -> List[str]:
|
|
86
|
+
"""Get required capabilities."""
|
|
87
|
+
return self.metadata.capabilities
|
|
88
|
+
|
|
89
|
+
def filter(
|
|
90
|
+
self,
|
|
91
|
+
interface: Optional[str] = None,
|
|
92
|
+
method: Optional[str] = None,
|
|
93
|
+
start_time: Optional[float] = None,
|
|
94
|
+
end_time: Optional[float] = None,
|
|
95
|
+
) -> List[RecordedCall]:
|
|
96
|
+
"""
|
|
97
|
+
Filter calls by interface, method, or time range.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
interface: Filter by interface name
|
|
101
|
+
method: Filter by method name
|
|
102
|
+
start_time: Filter by minimum relative time
|
|
103
|
+
end_time: Filter by maximum relative time
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
Filtered list of calls
|
|
107
|
+
"""
|
|
108
|
+
result = self.calls
|
|
109
|
+
|
|
110
|
+
if interface:
|
|
111
|
+
result = [c for c in result if c.interface == interface]
|
|
112
|
+
if method:
|
|
113
|
+
result = [c for c in result if c.method == method]
|
|
114
|
+
if start_time is not None:
|
|
115
|
+
result = [c for c in result if c.relative_time >= start_time]
|
|
116
|
+
if end_time is not None:
|
|
117
|
+
result = [c for c in result if c.relative_time <= end_time]
|
|
118
|
+
|
|
119
|
+
return result
|
|
120
|
+
|
|
121
|
+
def get_calls_in_segment(self, segment: TaskSegment) -> List[RecordedCall]:
|
|
122
|
+
"""Get calls within a labeled segment."""
|
|
123
|
+
return self.filter(
|
|
124
|
+
start_time=segment.start_time,
|
|
125
|
+
end_time=segment.end_time,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
def get_interfaces_used(self) -> List[str]:
|
|
129
|
+
"""Get list of interfaces used in this demonstration."""
|
|
130
|
+
return list(set(c.interface for c in self.calls))
|
|
131
|
+
|
|
132
|
+
def get_methods_used(self) -> Dict[str, List[str]]:
|
|
133
|
+
"""Get methods used per interface."""
|
|
134
|
+
result: Dict[str, set] = {}
|
|
135
|
+
for call in self.calls:
|
|
136
|
+
if call.interface not in result:
|
|
137
|
+
result[call.interface] = set()
|
|
138
|
+
result[call.interface].add(call.method)
|
|
139
|
+
return {k: list(v) for k, v in result.items()}
|
|
140
|
+
|
|
141
|
+
def is_compatible(self, driver: RobotInterface) -> bool:
|
|
142
|
+
"""
|
|
143
|
+
Check if this demonstration is compatible with a driver.
|
|
144
|
+
|
|
145
|
+
Compatibility means the driver implements all interfaces
|
|
146
|
+
used in the demonstration.
|
|
147
|
+
"""
|
|
148
|
+
driver_interfaces = self._get_driver_interfaces(driver)
|
|
149
|
+
required = set(self.get_interfaces_used())
|
|
150
|
+
return required.issubset(driver_interfaces)
|
|
151
|
+
|
|
152
|
+
def _get_driver_interfaces(self, driver: RobotInterface) -> set:
|
|
153
|
+
"""Get interface names implemented by a driver."""
|
|
154
|
+
from ate.interfaces import (
|
|
155
|
+
RobotInterface, SafetyInterface,
|
|
156
|
+
QuadrupedLocomotion, BipedLocomotion, WheeledLocomotion, AerialLocomotion,
|
|
157
|
+
ArmInterface, GripperInterface, DualArmInterface,
|
|
158
|
+
CameraInterface, DepthCameraInterface, LidarInterface, IMUInterface, ForceTorqueInterface,
|
|
159
|
+
BodyPoseInterface,
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
all_interfaces = {
|
|
163
|
+
"RobotInterface": RobotInterface,
|
|
164
|
+
"SafetyInterface": SafetyInterface,
|
|
165
|
+
"QuadrupedLocomotion": QuadrupedLocomotion,
|
|
166
|
+
"BipedLocomotion": BipedLocomotion,
|
|
167
|
+
"WheeledLocomotion": WheeledLocomotion,
|
|
168
|
+
"AerialLocomotion": AerialLocomotion,
|
|
169
|
+
"ArmInterface": ArmInterface,
|
|
170
|
+
"GripperInterface": GripperInterface,
|
|
171
|
+
"DualArmInterface": DualArmInterface,
|
|
172
|
+
"CameraInterface": CameraInterface,
|
|
173
|
+
"DepthCameraInterface": DepthCameraInterface,
|
|
174
|
+
"LidarInterface": LidarInterface,
|
|
175
|
+
"IMUInterface": IMUInterface,
|
|
176
|
+
"ForceTorqueInterface": ForceTorqueInterface,
|
|
177
|
+
"BodyPoseInterface": BodyPoseInterface,
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
return {
|
|
181
|
+
name for name, cls in all_interfaces.items()
|
|
182
|
+
if isinstance(driver, cls)
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
def replay(
|
|
186
|
+
self,
|
|
187
|
+
driver: RobotInterface,
|
|
188
|
+
speed: float = 1.0,
|
|
189
|
+
skip_perception: bool = True,
|
|
190
|
+
dry_run: bool = False,
|
|
191
|
+
) -> List[ActionResult]:
|
|
192
|
+
"""
|
|
193
|
+
Replay this demonstration on a compatible driver.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
driver: Robot driver to replay on
|
|
197
|
+
speed: Playback speed (1.0 = realtime, 2.0 = 2x speed)
|
|
198
|
+
skip_perception: Skip perception calls (get_image, etc.)
|
|
199
|
+
dry_run: If True, don't actually execute commands
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
List of ActionResults from each call
|
|
203
|
+
"""
|
|
204
|
+
if not self.is_compatible(driver):
|
|
205
|
+
missing = set(self.get_interfaces_used()) - self._get_driver_interfaces(driver)
|
|
206
|
+
raise ValueError(f"Driver missing required interfaces: {missing}")
|
|
207
|
+
|
|
208
|
+
# Filter perception calls if requested
|
|
209
|
+
calls = self.calls
|
|
210
|
+
if skip_perception:
|
|
211
|
+
perception_methods = {
|
|
212
|
+
"get_image", "get_depth_image", "get_point_cloud",
|
|
213
|
+
"get_scan", "get_reading", "get_orientation",
|
|
214
|
+
"get_force", "get_torque",
|
|
215
|
+
}
|
|
216
|
+
calls = [c for c in calls if c.method not in perception_methods]
|
|
217
|
+
|
|
218
|
+
results = []
|
|
219
|
+
prev_time = 0.0
|
|
220
|
+
|
|
221
|
+
for call in calls:
|
|
222
|
+
# Wait for timing (unless first call)
|
|
223
|
+
if call.relative_time > prev_time:
|
|
224
|
+
wait_time = (call.relative_time - prev_time) / speed
|
|
225
|
+
if not dry_run:
|
|
226
|
+
time.sleep(wait_time)
|
|
227
|
+
|
|
228
|
+
prev_time = call.relative_time
|
|
229
|
+
|
|
230
|
+
if dry_run:
|
|
231
|
+
results.append(ActionResult.ok(f"[DRY RUN] {call.method}"))
|
|
232
|
+
continue
|
|
233
|
+
|
|
234
|
+
# Execute the call
|
|
235
|
+
method = getattr(driver, call.method, None)
|
|
236
|
+
if method is None:
|
|
237
|
+
results.append(ActionResult.error(f"Method not found: {call.method}"))
|
|
238
|
+
continue
|
|
239
|
+
|
|
240
|
+
try:
|
|
241
|
+
# Deserialize arguments
|
|
242
|
+
args = self._deserialize_args(call.args)
|
|
243
|
+
kwargs = self._deserialize_kwargs(call.kwargs)
|
|
244
|
+
result = method(*args, **kwargs)
|
|
245
|
+
results.append(result if isinstance(result, ActionResult) else ActionResult.ok())
|
|
246
|
+
except Exception as e:
|
|
247
|
+
results.append(ActionResult.error(str(e)))
|
|
248
|
+
|
|
249
|
+
return results
|
|
250
|
+
|
|
251
|
+
def _deserialize_args(self, args: tuple) -> tuple:
|
|
252
|
+
"""Deserialize arguments back to interface types."""
|
|
253
|
+
from ate.interfaces import Vector3, Quaternion, Pose
|
|
254
|
+
|
|
255
|
+
result = []
|
|
256
|
+
for arg in args:
|
|
257
|
+
result.append(self._deserialize_value(arg))
|
|
258
|
+
return tuple(result)
|
|
259
|
+
|
|
260
|
+
def _deserialize_kwargs(self, kwargs: dict) -> dict:
|
|
261
|
+
"""Deserialize keyword arguments."""
|
|
262
|
+
return {k: self._deserialize_value(v) for k, v in kwargs.items()}
|
|
263
|
+
|
|
264
|
+
def _deserialize_value(self, value: Any) -> Any:
|
|
265
|
+
"""Deserialize a single value back to interface type."""
|
|
266
|
+
from ate.interfaces import Vector3, Quaternion, Pose
|
|
267
|
+
|
|
268
|
+
if isinstance(value, dict) and "__type__" in value:
|
|
269
|
+
type_name = value["__type__"]
|
|
270
|
+
if type_name == "Vector3":
|
|
271
|
+
return Vector3(value["x"], value["y"], value["z"])
|
|
272
|
+
if type_name == "Quaternion":
|
|
273
|
+
return Quaternion(value["x"], value["y"], value["z"], value["w"])
|
|
274
|
+
if type_name == "Pose":
|
|
275
|
+
pos = self._deserialize_value(value["position"])
|
|
276
|
+
ori = self._deserialize_value(value["orientation"])
|
|
277
|
+
return Pose(pos, ori)
|
|
278
|
+
# Return dict as-is for unknown types
|
|
279
|
+
return value
|
|
280
|
+
|
|
281
|
+
if isinstance(value, list):
|
|
282
|
+
return [self._deserialize_value(v) for v in value]
|
|
283
|
+
|
|
284
|
+
return value
|
|
285
|
+
|
|
286
|
+
def add_segment(self, segment: TaskSegment) -> None:
|
|
287
|
+
"""Add a labeled task segment."""
|
|
288
|
+
self.segments.append(segment)
|
|
289
|
+
# Keep segments sorted by start time
|
|
290
|
+
self.segments.sort(key=lambda s: s.start_time)
|
|
291
|
+
|
|
292
|
+
def label_range(
|
|
293
|
+
self,
|
|
294
|
+
start_time: float,
|
|
295
|
+
end_time: float,
|
|
296
|
+
label: str,
|
|
297
|
+
description: Optional[str] = None,
|
|
298
|
+
) -> TaskSegment:
|
|
299
|
+
"""
|
|
300
|
+
Label a time range with a task.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
start_time: Start of segment (relative time)
|
|
304
|
+
end_time: End of segment (relative time)
|
|
305
|
+
label: Task label
|
|
306
|
+
description: Optional description
|
|
307
|
+
|
|
308
|
+
Returns:
|
|
309
|
+
Created TaskSegment
|
|
310
|
+
"""
|
|
311
|
+
segment = TaskSegment(
|
|
312
|
+
start_time=start_time,
|
|
313
|
+
end_time=end_time,
|
|
314
|
+
label=label,
|
|
315
|
+
description=description,
|
|
316
|
+
)
|
|
317
|
+
self.add_segment(segment)
|
|
318
|
+
return segment
|
|
319
|
+
|
|
320
|
+
def to_dict(self) -> dict:
|
|
321
|
+
"""Convert to dictionary for serialization."""
|
|
322
|
+
return {
|
|
323
|
+
"version": "1.0",
|
|
324
|
+
"metadata": self.metadata.to_dict(),
|
|
325
|
+
"calls": [c.to_dict() for c in self.calls],
|
|
326
|
+
"segments": [s.to_dict() for s in self.segments],
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
def save(self, path: str) -> None:
|
|
330
|
+
"""Save to file."""
|
|
331
|
+
with open(path, 'w') as f:
|
|
332
|
+
json.dump(self.to_dict(), f, indent=2)
|
|
333
|
+
|
|
334
|
+
@classmethod
|
|
335
|
+
def from_dict(cls, data: dict) -> "Demonstration":
|
|
336
|
+
"""Create from dictionary."""
|
|
337
|
+
metadata = RecordingMetadata.from_dict(data["metadata"])
|
|
338
|
+
calls = [RecordedCall.from_dict(c) for c in data["calls"]]
|
|
339
|
+
segments = [TaskSegment.from_dict(s) for s in data.get("segments", [])]
|
|
340
|
+
return cls(metadata, calls, segments)
|
|
341
|
+
|
|
342
|
+
def summary(self) -> str:
|
|
343
|
+
"""Get human-readable summary."""
|
|
344
|
+
lines = [
|
|
345
|
+
f"Demonstration: {self.metadata.name}",
|
|
346
|
+
f"Robot: {self.metadata.robot_name} ({self.metadata.robot_archetype})",
|
|
347
|
+
f"Duration: {self.duration:.2f}s",
|
|
348
|
+
f"Total calls: {len(self.calls)}",
|
|
349
|
+
f"Labeled segments: {len(self.segments)}",
|
|
350
|
+
"",
|
|
351
|
+
]
|
|
352
|
+
|
|
353
|
+
if self.segments:
|
|
354
|
+
lines.append("Segments:")
|
|
355
|
+
for seg in self.segments:
|
|
356
|
+
lines.append(f" [{seg.start_time:.2f}s - {seg.end_time:.2f}s] {seg.label}")
|
|
357
|
+
|
|
358
|
+
lines.append("")
|
|
359
|
+
lines.append("Interfaces used:")
|
|
360
|
+
for interface, methods in self.get_methods_used().items():
|
|
361
|
+
lines.append(f" {interface}: {', '.join(methods)}")
|
|
362
|
+
|
|
363
|
+
return "\n".join(lines)
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
def load_demonstration(path: str) -> Demonstration:
|
|
367
|
+
"""
|
|
368
|
+
Load a demonstration from file.
|
|
369
|
+
|
|
370
|
+
Args:
|
|
371
|
+
path: Path to .demonstration file
|
|
372
|
+
|
|
373
|
+
Returns:
|
|
374
|
+
Demonstration object
|
|
375
|
+
"""
|
|
376
|
+
with open(path, 'r') as f:
|
|
377
|
+
data = json.load(f)
|
|
378
|
+
return Demonstration.from_dict(data)
|