foodforthought-cli 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +1 -1
- ate/bridge_server.py +622 -0
- ate/cli.py +2625 -242
- ate/compatibility.py +580 -0
- ate/generators/__init__.py +19 -0
- ate/generators/docker_generator.py +461 -0
- ate/generators/hardware_config.py +469 -0
- ate/generators/ros2_generator.py +617 -0
- ate/generators/skill_generator.py +783 -0
- ate/marketplace.py +524 -0
- ate/mcp_server.py +1341 -107
- ate/primitives.py +1016 -0
- ate/robot_setup.py +2222 -0
- ate/skill_schema.py +537 -0
- ate/telemetry/__init__.py +33 -0
- ate/telemetry/cli.py +455 -0
- ate/telemetry/collector.py +444 -0
- ate/telemetry/context.py +318 -0
- ate/telemetry/fleet_agent.py +419 -0
- ate/telemetry/formats/__init__.py +18 -0
- ate/telemetry/formats/hdf5_serializer.py +503 -0
- ate/telemetry/formats/mcap_serializer.py +457 -0
- ate/telemetry/types.py +334 -0
- foodforthought_cli-0.2.3.dist-info/METADATA +300 -0
- foodforthought_cli-0.2.3.dist-info/RECORD +44 -0
- foodforthought_cli-0.2.3.dist-info/top_level.txt +6 -0
- mechdog_labeled/__init__.py +3 -0
- mechdog_labeled/primitives.py +113 -0
- mechdog_labeled/servo_map.py +209 -0
- mechdog_output/__init__.py +3 -0
- mechdog_output/primitives.py +59 -0
- mechdog_output/servo_map.py +203 -0
- test_autodetect/__init__.py +3 -0
- test_autodetect/primitives.py +113 -0
- test_autodetect/servo_map.py +209 -0
- test_full_auto/__init__.py +3 -0
- test_full_auto/primitives.py +113 -0
- test_full_auto/servo_map.py +209 -0
- test_smart_detect/__init__.py +3 -0
- test_smart_detect/primitives.py +113 -0
- test_smart_detect/servo_map.py +209 -0
- foodforthought_cli-0.2.1.dist-info/METADATA +0 -151
- foodforthought_cli-0.2.1.dist-info/RECORD +0 -9
- foodforthought_cli-0.2.1.dist-info/top_level.txt +0 -1
- {foodforthought_cli-0.2.1.dist-info → foodforthought_cli-0.2.3.dist-info}/WHEEL +0 -0
- {foodforthought_cli-0.2.1.dist-info → foodforthought_cli-0.2.3.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,444 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Telemetry Collector for FoodforThought
|
|
3
|
+
|
|
4
|
+
Core class for collecting robot telemetry data from simulations and hardware,
|
|
5
|
+
with automatic buffering and upload to FoodforThought.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
import os
|
|
11
|
+
import sys
|
|
12
|
+
import time
|
|
13
|
+
import threading
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
from typing import Dict, List, Optional, Any
|
|
16
|
+
from uuid import uuid4
|
|
17
|
+
|
|
18
|
+
import requests
|
|
19
|
+
|
|
20
|
+
from .types import (
|
|
21
|
+
TelemetrySource,
|
|
22
|
+
TelemetryBuffer,
|
|
23
|
+
TrajectoryFrame,
|
|
24
|
+
TrajectoryMetadata,
|
|
25
|
+
TrajectoryRecording,
|
|
26
|
+
ExecutionEvent,
|
|
27
|
+
EventType,
|
|
28
|
+
Pose,
|
|
29
|
+
Contact,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class TelemetryCollector:
|
|
34
|
+
"""
|
|
35
|
+
Collector for robot telemetry data.
|
|
36
|
+
|
|
37
|
+
Handles buffering, batching, and uploading telemetry to FoodforThought.
|
|
38
|
+
Supports both synchronous recording and automatic background uploads.
|
|
39
|
+
|
|
40
|
+
Example usage:
|
|
41
|
+
collector = TelemetryCollector("robot-123")
|
|
42
|
+
collector.start_recording(skill_id="pick_and_place")
|
|
43
|
+
|
|
44
|
+
while executing:
|
|
45
|
+
collector.record_frame(
|
|
46
|
+
joint_positions={"joint1": 0.5, "joint2": 1.2},
|
|
47
|
+
joint_velocities={"joint1": 0.1, "joint2": -0.2},
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
recording = collector.stop_recording(success=True)
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
robot_id: str,
|
|
56
|
+
api_url: str = None,
|
|
57
|
+
api_key: str = None,
|
|
58
|
+
buffer_size: int = 36000, # 10 minutes at 60Hz
|
|
59
|
+
auto_upload: bool = True,
|
|
60
|
+
upload_interval: float = 60.0, # Upload every 60 seconds
|
|
61
|
+
project_id: str = None,
|
|
62
|
+
):
|
|
63
|
+
"""
|
|
64
|
+
Initialize the telemetry collector.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
robot_id: Unique identifier for the robot
|
|
68
|
+
api_url: FoodforThought API URL (defaults to env var or production)
|
|
69
|
+
api_key: API key for authentication (defaults to env var)
|
|
70
|
+
buffer_size: Maximum number of frames to buffer
|
|
71
|
+
auto_upload: Whether to automatically upload completed recordings
|
|
72
|
+
upload_interval: Interval for background uploads (seconds)
|
|
73
|
+
project_id: Default project ID for artifact creation
|
|
74
|
+
"""
|
|
75
|
+
self.robot_id = robot_id
|
|
76
|
+
self.api_url = api_url or os.getenv("FFT_API_URL", "https://kindly.fyi/api")
|
|
77
|
+
self.api_key = api_key or os.getenv("FFT_API_KEY") or os.getenv("ATE_API_KEY")
|
|
78
|
+
self.project_id = project_id or os.getenv("FFT_PROJECT_ID")
|
|
79
|
+
|
|
80
|
+
self.buffer = TelemetryBuffer(max_size=buffer_size)
|
|
81
|
+
self.auto_upload = auto_upload
|
|
82
|
+
self.upload_interval = upload_interval
|
|
83
|
+
|
|
84
|
+
self._recording = False
|
|
85
|
+
self._current_recording: Optional[TrajectoryRecording] = None
|
|
86
|
+
self._upload_thread: Optional[threading.Thread] = None
|
|
87
|
+
self._stop_upload_thread = threading.Event()
|
|
88
|
+
|
|
89
|
+
# Validate API key
|
|
90
|
+
if not self.api_key and auto_upload:
|
|
91
|
+
print("Warning: No API key found. Set FFT_API_KEY or ATE_API_KEY environment variable.",
|
|
92
|
+
file=sys.stderr)
|
|
93
|
+
|
|
94
|
+
def start_recording(
|
|
95
|
+
self,
|
|
96
|
+
skill_id: Optional[str] = None,
|
|
97
|
+
skill_params: Optional[Dict[str, Any]] = None,
|
|
98
|
+
source: str = "hardware",
|
|
99
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
100
|
+
) -> str:
|
|
101
|
+
"""
|
|
102
|
+
Start a new trajectory recording.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
skill_id: ID of the skill being executed (optional)
|
|
106
|
+
skill_params: Parameters passed to the skill
|
|
107
|
+
source: Source of telemetry ('simulation', 'hardware', 'fleet')
|
|
108
|
+
metadata: Additional metadata for the recording
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
recording_id: Unique ID for this recording
|
|
112
|
+
|
|
113
|
+
Raises:
|
|
114
|
+
RuntimeError: If already recording
|
|
115
|
+
"""
|
|
116
|
+
if self._recording:
|
|
117
|
+
raise RuntimeError("Already recording. Call stop_recording() first.")
|
|
118
|
+
|
|
119
|
+
recording_id = str(uuid4())
|
|
120
|
+
|
|
121
|
+
# Parse source
|
|
122
|
+
try:
|
|
123
|
+
telemetry_source = TelemetrySource(source)
|
|
124
|
+
except ValueError:
|
|
125
|
+
telemetry_source = TelemetrySource.HARDWARE
|
|
126
|
+
|
|
127
|
+
# Build metadata
|
|
128
|
+
recording_metadata = TrajectoryMetadata(
|
|
129
|
+
skill_params=skill_params,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# Merge additional metadata
|
|
133
|
+
if metadata:
|
|
134
|
+
if "environmentId" in metadata:
|
|
135
|
+
recording_metadata.environment_id = metadata["environmentId"]
|
|
136
|
+
if "robotVersion" in metadata:
|
|
137
|
+
recording_metadata.robot_version = metadata["robotVersion"]
|
|
138
|
+
if "urdfHash" in metadata:
|
|
139
|
+
recording_metadata.urdf_hash = metadata["urdfHash"]
|
|
140
|
+
if "tags" in metadata:
|
|
141
|
+
recording_metadata.tags = metadata["tags"]
|
|
142
|
+
if "jointNames" in metadata:
|
|
143
|
+
recording_metadata.joint_names = metadata["jointNames"]
|
|
144
|
+
|
|
145
|
+
self._current_recording = TrajectoryRecording(
|
|
146
|
+
id=recording_id,
|
|
147
|
+
robot_id=self.robot_id,
|
|
148
|
+
skill_id=skill_id,
|
|
149
|
+
source=telemetry_source,
|
|
150
|
+
start_time=datetime.utcnow(),
|
|
151
|
+
frames=[],
|
|
152
|
+
events=[],
|
|
153
|
+
metadata=recording_metadata,
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
self._recording = True
|
|
157
|
+
self.buffer.clear() # Clear any previous data
|
|
158
|
+
|
|
159
|
+
# Log start event
|
|
160
|
+
self.log_event(EventType.SKILL_START, {
|
|
161
|
+
"skillId": skill_id,
|
|
162
|
+
"skillParams": skill_params,
|
|
163
|
+
})
|
|
164
|
+
|
|
165
|
+
return recording_id
|
|
166
|
+
|
|
167
|
+
def record_frame(
|
|
168
|
+
self,
|
|
169
|
+
joint_positions: Dict[str, float],
|
|
170
|
+
joint_velocities: Optional[Dict[str, float]] = None,
|
|
171
|
+
joint_torques: Optional[Dict[str, float]] = None,
|
|
172
|
+
joint_accelerations: Optional[Dict[str, float]] = None,
|
|
173
|
+
end_effector_pose: Optional[Pose] = None,
|
|
174
|
+
contacts: Optional[List[Contact]] = None,
|
|
175
|
+
sensor_readings: Optional[Dict[str, float]] = None,
|
|
176
|
+
control_inputs: Optional[Dict[str, float]] = None,
|
|
177
|
+
timestamp: Optional[float] = None,
|
|
178
|
+
) -> None:
|
|
179
|
+
"""
|
|
180
|
+
Record a single frame of trajectory data.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
joint_positions: Joint name -> position (radians or meters)
|
|
184
|
+
joint_velocities: Joint name -> velocity
|
|
185
|
+
joint_torques: Joint name -> torque
|
|
186
|
+
joint_accelerations: Joint name -> acceleration
|
|
187
|
+
end_effector_pose: Pose of the end effector
|
|
188
|
+
contacts: List of contacts detected this frame
|
|
189
|
+
sensor_readings: Sensor name -> value
|
|
190
|
+
control_inputs: Control signal name -> value
|
|
191
|
+
timestamp: Override timestamp (defaults to time since recording start)
|
|
192
|
+
|
|
193
|
+
Raises:
|
|
194
|
+
RuntimeError: If not currently recording
|
|
195
|
+
"""
|
|
196
|
+
if not self._recording or not self._current_recording:
|
|
197
|
+
raise RuntimeError("Not recording. Call start_recording() first.")
|
|
198
|
+
|
|
199
|
+
# Calculate timestamp from recording start
|
|
200
|
+
if timestamp is None:
|
|
201
|
+
elapsed = (datetime.utcnow() - self._current_recording.start_time).total_seconds()
|
|
202
|
+
else:
|
|
203
|
+
elapsed = timestamp
|
|
204
|
+
|
|
205
|
+
# Auto-detect joint names on first frame
|
|
206
|
+
if not self._current_recording.metadata.joint_names:
|
|
207
|
+
self._current_recording.metadata.joint_names = list(joint_positions.keys())
|
|
208
|
+
|
|
209
|
+
frame = TrajectoryFrame(
|
|
210
|
+
timestamp=elapsed,
|
|
211
|
+
joint_positions=joint_positions,
|
|
212
|
+
joint_velocities=joint_velocities or {},
|
|
213
|
+
joint_torques=joint_torques or {},
|
|
214
|
+
joint_accelerations=joint_accelerations or {},
|
|
215
|
+
end_effector_pose=end_effector_pose,
|
|
216
|
+
contacts=contacts or [],
|
|
217
|
+
sensor_readings=sensor_readings or {},
|
|
218
|
+
control_inputs=control_inputs or {},
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
self._current_recording.frames.append(frame)
|
|
222
|
+
self.buffer.add(frame)
|
|
223
|
+
|
|
224
|
+
def log_event(
|
|
225
|
+
self,
|
|
226
|
+
event_type: EventType,
|
|
227
|
+
data: Optional[Dict[str, Any]] = None,
|
|
228
|
+
) -> None:
|
|
229
|
+
"""
|
|
230
|
+
Log an execution event.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
event_type: Type of event (EventType enum or string)
|
|
234
|
+
data: Additional event data
|
|
235
|
+
"""
|
|
236
|
+
if not self._recording or not self._current_recording:
|
|
237
|
+
# Allow logging events even when not recording for debugging
|
|
238
|
+
return
|
|
239
|
+
|
|
240
|
+
# Calculate timestamp
|
|
241
|
+
elapsed = (datetime.utcnow() - self._current_recording.start_time).total_seconds()
|
|
242
|
+
|
|
243
|
+
# Parse event type
|
|
244
|
+
if isinstance(event_type, str):
|
|
245
|
+
try:
|
|
246
|
+
event_type = EventType(event_type)
|
|
247
|
+
except ValueError:
|
|
248
|
+
pass # Keep as string if not in enum
|
|
249
|
+
|
|
250
|
+
event = ExecutionEvent(
|
|
251
|
+
timestamp=elapsed,
|
|
252
|
+
event_type=event_type,
|
|
253
|
+
data=data or {},
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
self._current_recording.events.append(event)
|
|
257
|
+
self.buffer.add_event(event)
|
|
258
|
+
|
|
259
|
+
def stop_recording(self, success: bool = True) -> TrajectoryRecording:
|
|
260
|
+
"""
|
|
261
|
+
Stop recording and finalize the trajectory.
|
|
262
|
+
|
|
263
|
+
Args:
|
|
264
|
+
success: Whether the execution was successful
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
The completed TrajectoryRecording
|
|
268
|
+
|
|
269
|
+
Raises:
|
|
270
|
+
RuntimeError: If not currently recording
|
|
271
|
+
"""
|
|
272
|
+
if not self._recording or not self._current_recording:
|
|
273
|
+
raise RuntimeError("Not recording.")
|
|
274
|
+
|
|
275
|
+
# Finalize recording
|
|
276
|
+
self._current_recording.end_time = datetime.utcnow()
|
|
277
|
+
self._current_recording.success = success
|
|
278
|
+
|
|
279
|
+
# Calculate metadata
|
|
280
|
+
duration = (
|
|
281
|
+
self._current_recording.end_time - self._current_recording.start_time
|
|
282
|
+
).total_seconds()
|
|
283
|
+
|
|
284
|
+
self._current_recording.metadata.duration = duration
|
|
285
|
+
self._current_recording.metadata.total_frames = len(self._current_recording.frames)
|
|
286
|
+
|
|
287
|
+
if duration > 0 and self._current_recording.metadata.total_frames > 0:
|
|
288
|
+
self._current_recording.metadata.frame_rate = (
|
|
289
|
+
self._current_recording.metadata.total_frames / duration
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
# Log end event
|
|
293
|
+
self.log_event(EventType.SKILL_END, {
|
|
294
|
+
"success": success,
|
|
295
|
+
"duration": duration,
|
|
296
|
+
"frameCount": self._current_recording.metadata.total_frames,
|
|
297
|
+
})
|
|
298
|
+
|
|
299
|
+
recording = self._current_recording
|
|
300
|
+
self._current_recording = None
|
|
301
|
+
self._recording = False
|
|
302
|
+
|
|
303
|
+
# Upload if auto_upload is enabled
|
|
304
|
+
if self.auto_upload and self.api_key:
|
|
305
|
+
self._upload_recording_sync(recording)
|
|
306
|
+
|
|
307
|
+
return recording
|
|
308
|
+
|
|
309
|
+
def _upload_recording_sync(self, recording: TrajectoryRecording) -> Optional[Dict]:
|
|
310
|
+
"""Synchronously upload a recording to FoodforThought."""
|
|
311
|
+
try:
|
|
312
|
+
# Serialize to JSON for upload
|
|
313
|
+
data = self._serialize_recording(recording)
|
|
314
|
+
|
|
315
|
+
headers = {
|
|
316
|
+
"Content-Type": "application/json",
|
|
317
|
+
}
|
|
318
|
+
if self.api_key:
|
|
319
|
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
|
320
|
+
|
|
321
|
+
# Upload telemetry data
|
|
322
|
+
response = requests.post(
|
|
323
|
+
f"{self.api_url}/telemetry/ingest",
|
|
324
|
+
headers=headers,
|
|
325
|
+
json={
|
|
326
|
+
"recording": data,
|
|
327
|
+
"projectId": self.project_id,
|
|
328
|
+
"robotId": recording.robot_id,
|
|
329
|
+
"skillId": recording.skill_id,
|
|
330
|
+
},
|
|
331
|
+
timeout=30,
|
|
332
|
+
)
|
|
333
|
+
response.raise_for_status()
|
|
334
|
+
|
|
335
|
+
result = response.json()
|
|
336
|
+
print(f"Uploaded recording {recording.id}: {result.get('artifactId', 'unknown')}")
|
|
337
|
+
return result
|
|
338
|
+
|
|
339
|
+
except requests.exceptions.RequestException as e:
|
|
340
|
+
print(f"Failed to upload recording: {e}", file=sys.stderr)
|
|
341
|
+
return None
|
|
342
|
+
|
|
343
|
+
def _serialize_recording(self, recording: TrajectoryRecording) -> Dict[str, Any]:
|
|
344
|
+
"""Serialize recording to JSON-compatible dict."""
|
|
345
|
+
return {
|
|
346
|
+
"id": recording.id,
|
|
347
|
+
"robotId": recording.robot_id,
|
|
348
|
+
"skillId": recording.skill_id,
|
|
349
|
+
"source": recording.source.value if isinstance(recording.source, TelemetrySource) else recording.source,
|
|
350
|
+
"startTime": recording.start_time.isoformat() if recording.start_time else None,
|
|
351
|
+
"endTime": recording.end_time.isoformat() if recording.end_time else None,
|
|
352
|
+
"success": recording.success,
|
|
353
|
+
"metadata": recording.metadata.to_dict(),
|
|
354
|
+
"frames": [f.to_dict() for f in recording.frames],
|
|
355
|
+
"events": [e.to_dict() for e in recording.events],
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
def export_to_json(self, recording: TrajectoryRecording) -> str:
|
|
359
|
+
"""Export recording to JSON string."""
|
|
360
|
+
return json.dumps(self._serialize_recording(recording), indent=2)
|
|
361
|
+
|
|
362
|
+
def export_to_file(self, recording: TrajectoryRecording, filepath: str, format: str = "json") -> None:
|
|
363
|
+
"""
|
|
364
|
+
Export recording to file.
|
|
365
|
+
|
|
366
|
+
Args:
|
|
367
|
+
recording: The recording to export
|
|
368
|
+
filepath: Output file path
|
|
369
|
+
format: Export format ('json', 'mcap', 'hdf5')
|
|
370
|
+
"""
|
|
371
|
+
if format == "json":
|
|
372
|
+
with open(filepath, "w") as f:
|
|
373
|
+
f.write(self.export_to_json(recording))
|
|
374
|
+
elif format == "mcap":
|
|
375
|
+
from .formats.mcap_serializer import serialize_to_mcap
|
|
376
|
+
data = serialize_to_mcap(recording)
|
|
377
|
+
with open(filepath, "wb") as f:
|
|
378
|
+
f.write(data)
|
|
379
|
+
elif format == "hdf5":
|
|
380
|
+
from .formats.hdf5_serializer import serialize_to_hdf5
|
|
381
|
+
data = serialize_to_hdf5(recording)
|
|
382
|
+
with open(filepath, "wb") as f:
|
|
383
|
+
f.write(data)
|
|
384
|
+
else:
|
|
385
|
+
raise ValueError(f"Unsupported format: {format}")
|
|
386
|
+
|
|
387
|
+
print(f"Exported recording to {filepath}")
|
|
388
|
+
|
|
389
|
+
@property
|
|
390
|
+
def is_recording(self) -> bool:
|
|
391
|
+
"""Whether currently recording."""
|
|
392
|
+
return self._recording
|
|
393
|
+
|
|
394
|
+
@property
|
|
395
|
+
def current_recording_id(self) -> Optional[str]:
|
|
396
|
+
"""ID of current recording, if any."""
|
|
397
|
+
return self._current_recording.id if self._current_recording else None
|
|
398
|
+
|
|
399
|
+
@property
|
|
400
|
+
def frame_count(self) -> int:
|
|
401
|
+
"""Number of frames in current recording."""
|
|
402
|
+
return len(self._current_recording.frames) if self._current_recording else 0
|
|
403
|
+
|
|
404
|
+
def start_background_upload(self) -> None:
|
|
405
|
+
"""Start background thread for periodic uploads."""
|
|
406
|
+
if self._upload_thread and self._upload_thread.is_alive():
|
|
407
|
+
return
|
|
408
|
+
|
|
409
|
+
self._stop_upload_thread.clear()
|
|
410
|
+
self._upload_thread = threading.Thread(target=self._upload_loop, daemon=True)
|
|
411
|
+
self._upload_thread.start()
|
|
412
|
+
|
|
413
|
+
def stop_background_upload(self) -> None:
|
|
414
|
+
"""Stop background upload thread."""
|
|
415
|
+
self._stop_upload_thread.set()
|
|
416
|
+
if self._upload_thread:
|
|
417
|
+
self._upload_thread.join(timeout=5)
|
|
418
|
+
|
|
419
|
+
def _upload_loop(self) -> None:
|
|
420
|
+
"""Background upload loop."""
|
|
421
|
+
while not self._stop_upload_thread.is_set():
|
|
422
|
+
# Wait for interval or stop signal
|
|
423
|
+
self._stop_upload_thread.wait(timeout=self.upload_interval)
|
|
424
|
+
|
|
425
|
+
if self._stop_upload_thread.is_set():
|
|
426
|
+
break
|
|
427
|
+
|
|
428
|
+
# Upload buffered data if not currently recording
|
|
429
|
+
# (during recording, data is uploaded on stop_recording)
|
|
430
|
+
if not self._recording and not self.buffer.is_empty():
|
|
431
|
+
frames, events = self.buffer.flush()
|
|
432
|
+
if frames:
|
|
433
|
+
# Create a partial recording for upload
|
|
434
|
+
partial_recording = TrajectoryRecording(
|
|
435
|
+
id=str(uuid4()),
|
|
436
|
+
robot_id=self.robot_id,
|
|
437
|
+
source=TelemetrySource.FLEET,
|
|
438
|
+
start_time=datetime.utcnow(),
|
|
439
|
+
end_time=datetime.utcnow(),
|
|
440
|
+
success=True,
|
|
441
|
+
frames=frames,
|
|
442
|
+
events=events,
|
|
443
|
+
)
|
|
444
|
+
self._upload_recording_sync(partial_recording)
|