foodforthought-cli 0.2.1__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. ate/__init__.py +1 -1
  2. ate/bridge_server.py +622 -0
  3. ate/cli.py +2625 -242
  4. ate/compatibility.py +580 -0
  5. ate/generators/__init__.py +19 -0
  6. ate/generators/docker_generator.py +461 -0
  7. ate/generators/hardware_config.py +469 -0
  8. ate/generators/ros2_generator.py +617 -0
  9. ate/generators/skill_generator.py +783 -0
  10. ate/marketplace.py +524 -0
  11. ate/mcp_server.py +1341 -107
  12. ate/primitives.py +1016 -0
  13. ate/robot_setup.py +2222 -0
  14. ate/skill_schema.py +537 -0
  15. ate/telemetry/__init__.py +33 -0
  16. ate/telemetry/cli.py +455 -0
  17. ate/telemetry/collector.py +444 -0
  18. ate/telemetry/context.py +318 -0
  19. ate/telemetry/fleet_agent.py +419 -0
  20. ate/telemetry/formats/__init__.py +18 -0
  21. ate/telemetry/formats/hdf5_serializer.py +503 -0
  22. ate/telemetry/formats/mcap_serializer.py +457 -0
  23. ate/telemetry/types.py +334 -0
  24. foodforthought_cli-0.2.4.dist-info/METADATA +300 -0
  25. foodforthought_cli-0.2.4.dist-info/RECORD +44 -0
  26. foodforthought_cli-0.2.4.dist-info/top_level.txt +6 -0
  27. mechdog_labeled/__init__.py +3 -0
  28. mechdog_labeled/primitives.py +113 -0
  29. mechdog_labeled/servo_map.py +209 -0
  30. mechdog_output/__init__.py +3 -0
  31. mechdog_output/primitives.py +59 -0
  32. mechdog_output/servo_map.py +203 -0
  33. test_autodetect/__init__.py +3 -0
  34. test_autodetect/primitives.py +113 -0
  35. test_autodetect/servo_map.py +209 -0
  36. test_full_auto/__init__.py +3 -0
  37. test_full_auto/primitives.py +113 -0
  38. test_full_auto/servo_map.py +209 -0
  39. test_smart_detect/__init__.py +3 -0
  40. test_smart_detect/primitives.py +113 -0
  41. test_smart_detect/servo_map.py +209 -0
  42. foodforthought_cli-0.2.1.dist-info/METADATA +0 -151
  43. foodforthought_cli-0.2.1.dist-info/RECORD +0 -9
  44. foodforthought_cli-0.2.1.dist-info/top_level.txt +0 -1
  45. {foodforthought_cli-0.2.1.dist-info → foodforthought_cli-0.2.4.dist-info}/WHEEL +0 -0
  46. {foodforthought_cli-0.2.1.dist-info → foodforthought_cli-0.2.4.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,503 @@
1
+ """
2
+ HDF5 Serializer for Telemetry Data
3
+
4
+ Serializes trajectory recordings to HDF5 format, which is optimized
5
+ for machine learning training pipelines. Stores data as NumPy arrays
6
+ for efficient loading and processing.
7
+
8
+ Requires: h5py, numpy
9
+ """
10
+
11
+ import io
12
+ import json
13
+ from typing import Dict, Any, List, Optional
14
+
15
+ from ..types import TrajectoryRecording, TrajectoryFrame, TrajectoryMetadata, TelemetrySource
16
+
17
+
18
+ def _check_dependencies():
19
+ """Check if required dependencies are available."""
20
+ try:
21
+ import h5py
22
+ import numpy as np
23
+ return True
24
+ except ImportError:
25
+ return False
26
+
27
+
28
+ def serialize_to_hdf5(recording: TrajectoryRecording) -> bytes:
29
+ """
30
+ Serialize trajectory recording to HDF5 format.
31
+
32
+ Creates an HDF5 file with:
33
+ - /timestamps: Time array (N,)
34
+ - /joint_positions: Position array (N, num_joints)
35
+ - /joint_velocities: Velocity array (N, num_joints)
36
+ - /joint_torques: Torque array (N, num_joints)
37
+ - /joint_names: Joint name strings
38
+ - /end_effector_poses: EE poses (N, 7) [x,y,z,qx,qy,qz,qw] (if available)
39
+ - /events/*: Event data groups
40
+ - Metadata stored as attributes on root group
41
+
42
+ Args:
43
+ recording: The trajectory recording to serialize
44
+
45
+ Returns:
46
+ HDF5 file as bytes
47
+
48
+ Raises:
49
+ ImportError: If h5py or numpy is not installed
50
+ """
51
+ try:
52
+ import h5py
53
+ import numpy as np
54
+ except ImportError:
55
+ raise ImportError(
56
+ "HDF5 serialization requires h5py and numpy. "
57
+ "Install with: pip install h5py numpy"
58
+ )
59
+
60
+ buffer = io.BytesIO()
61
+
62
+ with h5py.File(buffer, "w") as f:
63
+ # Store metadata as root attributes
64
+ f.attrs["recording_id"] = recording.id
65
+ f.attrs["robot_id"] = recording.robot_id
66
+ f.attrs["skill_id"] = recording.skill_id or ""
67
+ f.attrs["source"] = recording.source.value if isinstance(recording.source, TelemetrySource) else str(recording.source)
68
+ f.attrs["success"] = recording.success
69
+ f.attrs["duration"] = recording.metadata.duration
70
+ f.attrs["frame_rate"] = recording.metadata.frame_rate
71
+ f.attrs["total_frames"] = recording.metadata.total_frames
72
+
73
+ if recording.start_time:
74
+ f.attrs["start_time"] = recording.start_time.isoformat()
75
+ if recording.end_time:
76
+ f.attrs["end_time"] = recording.end_time.isoformat()
77
+
78
+ if recording.metadata.skill_params:
79
+ f.attrs["skill_params"] = json.dumps(recording.metadata.skill_params)
80
+ if recording.metadata.tags:
81
+ f.attrs["tags"] = json.dumps(recording.metadata.tags)
82
+
83
+ if not recording.frames:
84
+ # Empty recording
85
+ return buffer.getvalue()
86
+
87
+ # Get joint names from first frame
88
+ joint_names = list(recording.frames[0].joint_positions.keys())
89
+ n_joints = len(joint_names)
90
+ n_frames = len(recording.frames)
91
+
92
+ # Store joint names
93
+ dt = h5py.special_dtype(vlen=str)
94
+ joint_names_ds = f.create_dataset("joint_names", (n_joints,), dtype=dt)
95
+ for i, name in enumerate(joint_names):
96
+ joint_names_ds[i] = name
97
+
98
+ # Create arrays
99
+ timestamps = np.zeros(n_frames, dtype=np.float64)
100
+ positions = np.zeros((n_frames, n_joints), dtype=np.float64)
101
+ velocities = np.zeros((n_frames, n_joints), dtype=np.float64)
102
+ torques = np.zeros((n_frames, n_joints), dtype=np.float64)
103
+ accelerations = np.zeros((n_frames, n_joints), dtype=np.float64)
104
+
105
+ # Check if we have end effector poses
106
+ has_ee_poses = recording.frames[0].end_effector_pose is not None
107
+ if has_ee_poses:
108
+ ee_poses = np.zeros((n_frames, 7), dtype=np.float64) # x,y,z,qx,qy,qz,qw
109
+
110
+ # Check if we have control inputs
111
+ ctrl_names = list(recording.frames[0].control_inputs.keys()) if recording.frames[0].control_inputs else []
112
+ if ctrl_names:
113
+ n_ctrl = len(ctrl_names)
114
+ control_inputs = np.zeros((n_frames, n_ctrl), dtype=np.float64)
115
+ ctrl_names_ds = f.create_dataset("control_names", (n_ctrl,), dtype=dt)
116
+ for i, name in enumerate(ctrl_names):
117
+ ctrl_names_ds[i] = name
118
+
119
+ # Fill arrays
120
+ for i, frame in enumerate(recording.frames):
121
+ timestamps[i] = frame.timestamp
122
+
123
+ for j, name in enumerate(joint_names):
124
+ positions[i, j] = frame.joint_positions.get(name, 0.0)
125
+ velocities[i, j] = frame.joint_velocities.get(name, 0.0)
126
+ torques[i, j] = frame.joint_torques.get(name, 0.0)
127
+ accelerations[i, j] = frame.joint_accelerations.get(name, 0.0)
128
+
129
+ if has_ee_poses and frame.end_effector_pose:
130
+ pose = frame.end_effector_pose
131
+ ee_poses[i] = [
132
+ pose.position.x, pose.position.y, pose.position.z,
133
+ pose.orientation.x, pose.orientation.y,
134
+ pose.orientation.z, pose.orientation.w,
135
+ ]
136
+
137
+ if ctrl_names:
138
+ for j, name in enumerate(ctrl_names):
139
+ control_inputs[i, j] = frame.control_inputs.get(name, 0.0)
140
+
141
+ # Store datasets with compression
142
+ f.create_dataset("timestamps", data=timestamps, compression="gzip", compression_opts=4)
143
+ f.create_dataset("joint_positions", data=positions, compression="gzip", compression_opts=4)
144
+ f.create_dataset("joint_velocities", data=velocities, compression="gzip", compression_opts=4)
145
+ f.create_dataset("joint_torques", data=torques, compression="gzip", compression_opts=4)
146
+ f.create_dataset("joint_accelerations", data=accelerations, compression="gzip", compression_opts=4)
147
+
148
+ if has_ee_poses:
149
+ f.create_dataset("end_effector_poses", data=ee_poses, compression="gzip", compression_opts=4)
150
+
151
+ if ctrl_names:
152
+ f.create_dataset("control_inputs", data=control_inputs, compression="gzip", compression_opts=4)
153
+
154
+ # Store events
155
+ if recording.events:
156
+ events_grp = f.create_group("events")
157
+ events_grp.attrs["count"] = len(recording.events)
158
+
159
+ event_timestamps = np.zeros(len(recording.events), dtype=np.float64)
160
+ event_types = []
161
+ event_data = []
162
+
163
+ for i, event in enumerate(recording.events):
164
+ event_timestamps[i] = event.timestamp
165
+ event_type = event.event_type.value if hasattr(event.event_type, "value") else str(event.event_type)
166
+ event_types.append(event_type)
167
+ event_data.append(json.dumps(event.data))
168
+
169
+ events_grp.create_dataset("timestamps", data=event_timestamps)
170
+
171
+ event_types_ds = events_grp.create_dataset("types", (len(event_types),), dtype=dt)
172
+ for i, et in enumerate(event_types):
173
+ event_types_ds[i] = et
174
+
175
+ event_data_ds = events_grp.create_dataset("data", (len(event_data),), dtype=dt)
176
+ for i, ed in enumerate(event_data):
177
+ event_data_ds[i] = ed
178
+
179
+ # Store contacts (if any frame has contacts)
180
+ has_contacts = any(frame.contacts for frame in recording.frames)
181
+ if has_contacts:
182
+ contacts_grp = f.create_group("contacts")
183
+
184
+ # Store as variable-length data per frame
185
+ contact_counts = np.zeros(n_frames, dtype=np.int32)
186
+ all_contact_data = []
187
+
188
+ for i, frame in enumerate(recording.frames):
189
+ contact_counts[i] = len(frame.contacts)
190
+ for contact in frame.contacts:
191
+ all_contact_data.append({
192
+ "frame": i,
193
+ "body1": contact.body1,
194
+ "body2": contact.body2,
195
+ "force": contact.force,
196
+ "position": contact.position.to_list(),
197
+ "normal": contact.normal.to_list(),
198
+ })
199
+
200
+ contacts_grp.create_dataset("counts_per_frame", data=contact_counts)
201
+ if all_contact_data:
202
+ contact_json_ds = contacts_grp.create_dataset(
203
+ "data", (len(all_contact_data),), dtype=dt
204
+ )
205
+ for i, cd in enumerate(all_contact_data):
206
+ contact_json_ds[i] = json.dumps(cd)
207
+
208
+ return buffer.getvalue()
209
+
210
+
211
+ def deserialize_from_hdf5(data: bytes) -> TrajectoryRecording:
212
+ """
213
+ Deserialize HDF5 data to TrajectoryRecording.
214
+
215
+ Args:
216
+ data: HDF5 file as bytes
217
+
218
+ Returns:
219
+ Parsed TrajectoryRecording
220
+
221
+ Raises:
222
+ ImportError: If h5py or numpy is not installed
223
+ """
224
+ try:
225
+ import h5py
226
+ import numpy as np
227
+ except ImportError:
228
+ raise ImportError(
229
+ "HDF5 deserialization requires h5py and numpy. "
230
+ "Install with: pip install h5py numpy"
231
+ )
232
+
233
+ from ..types import (
234
+ TrajectoryRecording,
235
+ TrajectoryFrame,
236
+ TrajectoryMetadata,
237
+ ExecutionEvent,
238
+ EventType,
239
+ TelemetrySource,
240
+ Pose,
241
+ Vector3,
242
+ Quaternion,
243
+ Contact,
244
+ )
245
+
246
+ buffer = io.BytesIO(data)
247
+
248
+ frames: List[TrajectoryFrame] = []
249
+ events: List[ExecutionEvent] = []
250
+
251
+ with h5py.File(buffer, "r") as f:
252
+ # Read metadata
253
+ recording_id = f.attrs.get("recording_id", "")
254
+ robot_id = f.attrs.get("robot_id", "")
255
+ skill_id = f.attrs.get("skill_id", "") or None
256
+ source_str = f.attrs.get("source", "hardware")
257
+ success = bool(f.attrs.get("success", True))
258
+ duration = float(f.attrs.get("duration", 0))
259
+ frame_rate = float(f.attrs.get("frame_rate", 0))
260
+ total_frames = int(f.attrs.get("total_frames", 0))
261
+
262
+ try:
263
+ source = TelemetrySource(source_str)
264
+ except ValueError:
265
+ source = TelemetrySource.HARDWARE
266
+
267
+ skill_params = None
268
+ if "skill_params" in f.attrs:
269
+ try:
270
+ skill_params = json.loads(f.attrs["skill_params"])
271
+ except json.JSONDecodeError:
272
+ pass
273
+
274
+ tags = []
275
+ if "tags" in f.attrs:
276
+ try:
277
+ tags = json.loads(f.attrs["tags"])
278
+ except json.JSONDecodeError:
279
+ pass
280
+
281
+ # Read joint names
282
+ joint_names = []
283
+ if "joint_names" in f:
284
+ joint_names = [name.decode("utf-8") if isinstance(name, bytes) else name
285
+ for name in f["joint_names"][:]]
286
+
287
+ # Read control names
288
+ ctrl_names = []
289
+ if "control_names" in f:
290
+ ctrl_names = [name.decode("utf-8") if isinstance(name, bytes) else name
291
+ for name in f["control_names"][:]]
292
+
293
+ # Read data arrays
294
+ timestamps = f["timestamps"][:] if "timestamps" in f else []
295
+ positions = f["joint_positions"][:] if "joint_positions" in f else []
296
+ velocities = f["joint_velocities"][:] if "joint_velocities" in f else []
297
+ torques = f["joint_torques"][:] if "joint_torques" in f else []
298
+ accelerations = f["joint_accelerations"][:] if "joint_accelerations" in f else []
299
+
300
+ ee_poses = f["end_effector_poses"][:] if "end_effector_poses" in f else None
301
+ control_inputs = f["control_inputs"][:] if "control_inputs" in f else None
302
+
303
+ # Read contacts
304
+ contacts_by_frame = {}
305
+ if "contacts" in f:
306
+ contacts_grp = f["contacts"]
307
+ if "data" in contacts_grp:
308
+ for contact_json in contacts_grp["data"][:]:
309
+ if isinstance(contact_json, bytes):
310
+ contact_json = contact_json.decode("utf-8")
311
+ try:
312
+ cd = json.loads(contact_json)
313
+ frame_idx = cd["frame"]
314
+ if frame_idx not in contacts_by_frame:
315
+ contacts_by_frame[frame_idx] = []
316
+ contacts_by_frame[frame_idx].append(Contact(
317
+ body1=cd["body1"],
318
+ body2=cd["body2"],
319
+ force=cd["force"],
320
+ position=Vector3.from_list(cd.get("position", [0, 0, 0])),
321
+ normal=Vector3.from_list(cd.get("normal", [0, 0, 1])),
322
+ ))
323
+ except (json.JSONDecodeError, KeyError):
324
+ pass
325
+
326
+ # Build frames
327
+ n_frames = len(timestamps)
328
+ for i in range(n_frames):
329
+ joint_pos = {}
330
+ joint_vel = {}
331
+ joint_tor = {}
332
+ joint_acc = {}
333
+ ctrl_inp = {}
334
+
335
+ for j, name in enumerate(joint_names):
336
+ if len(positions) > i and len(positions[i]) > j:
337
+ joint_pos[name] = float(positions[i][j])
338
+ if len(velocities) > i and len(velocities[i]) > j:
339
+ joint_vel[name] = float(velocities[i][j])
340
+ if len(torques) > i and len(torques[i]) > j:
341
+ joint_tor[name] = float(torques[i][j])
342
+ if len(accelerations) > i and len(accelerations[i]) > j:
343
+ joint_acc[name] = float(accelerations[i][j])
344
+
345
+ if control_inputs is not None:
346
+ for j, name in enumerate(ctrl_names):
347
+ if len(control_inputs[i]) > j:
348
+ ctrl_inp[name] = float(control_inputs[i][j])
349
+
350
+ ee_pose = None
351
+ if ee_poses is not None and len(ee_poses) > i:
352
+ p = ee_poses[i]
353
+ ee_pose = Pose(
354
+ position=Vector3(x=p[0], y=p[1], z=p[2]),
355
+ orientation=Quaternion(x=p[3], y=p[4], z=p[5], w=p[6]),
356
+ )
357
+
358
+ frame = TrajectoryFrame(
359
+ timestamp=float(timestamps[i]),
360
+ joint_positions=joint_pos,
361
+ joint_velocities=joint_vel,
362
+ joint_torques=joint_tor,
363
+ joint_accelerations=joint_acc,
364
+ end_effector_pose=ee_pose,
365
+ contacts=contacts_by_frame.get(i, []),
366
+ control_inputs=ctrl_inp,
367
+ )
368
+ frames.append(frame)
369
+
370
+ # Read events
371
+ if "events" in f:
372
+ events_grp = f["events"]
373
+ event_timestamps = events_grp["timestamps"][:] if "timestamps" in events_grp else []
374
+ event_types = events_grp["types"][:] if "types" in events_grp else []
375
+ event_data = events_grp["data"][:] if "data" in events_grp else []
376
+
377
+ for i in range(len(event_timestamps)):
378
+ event_type_str = event_types[i]
379
+ if isinstance(event_type_str, bytes):
380
+ event_type_str = event_type_str.decode("utf-8")
381
+
382
+ try:
383
+ event_type = EventType(event_type_str)
384
+ except ValueError:
385
+ event_type = event_type_str
386
+
387
+ event_data_dict = {}
388
+ if i < len(event_data):
389
+ ed = event_data[i]
390
+ if isinstance(ed, bytes):
391
+ ed = ed.decode("utf-8")
392
+ try:
393
+ event_data_dict = json.loads(ed)
394
+ except json.JSONDecodeError:
395
+ pass
396
+
397
+ events.append(ExecutionEvent(
398
+ timestamp=float(event_timestamps[i]),
399
+ event_type=event_type,
400
+ data=event_data_dict,
401
+ ))
402
+
403
+ # Build recording
404
+ recording = TrajectoryRecording(
405
+ id=recording_id,
406
+ robot_id=robot_id,
407
+ skill_id=skill_id,
408
+ source=source,
409
+ success=success,
410
+ frames=frames,
411
+ events=events,
412
+ metadata=TrajectoryMetadata(
413
+ duration=duration,
414
+ frame_rate=frame_rate,
415
+ total_frames=total_frames,
416
+ skill_params=skill_params,
417
+ tags=tags,
418
+ joint_names=joint_names,
419
+ ),
420
+ )
421
+
422
+ return recording
423
+
424
+
425
+ def load_hdf5_for_training(
426
+ filepath: str,
427
+ normalize: bool = True,
428
+ include_velocities: bool = True,
429
+ include_torques: bool = False,
430
+ ) -> Dict[str, Any]:
431
+ """
432
+ Load HDF5 file and prepare data for ML training.
433
+
434
+ Returns NumPy arrays ready for use with PyTorch/TensorFlow.
435
+
436
+ Args:
437
+ filepath: Path to HDF5 file
438
+ normalize: Whether to normalize joint data to [-1, 1]
439
+ include_velocities: Include velocity data
440
+ include_torques: Include torque data
441
+
442
+ Returns:
443
+ Dictionary with:
444
+ - observations: Combined state array (N, obs_dim)
445
+ - actions: Control inputs (N, action_dim) if available
446
+ - timestamps: Time array (N,)
447
+ - success: Whether execution succeeded
448
+ - metadata: Recording metadata
449
+ """
450
+ try:
451
+ import h5py
452
+ import numpy as np
453
+ except ImportError:
454
+ raise ImportError("Requires h5py and numpy")
455
+
456
+ with h5py.File(filepath, "r") as f:
457
+ timestamps = f["timestamps"][:]
458
+ positions = f["joint_positions"][:]
459
+ n_frames, n_joints = positions.shape
460
+
461
+ # Build observation array
462
+ obs_components = [positions]
463
+
464
+ if include_velocities and "joint_velocities" in f:
465
+ obs_components.append(f["joint_velocities"][:])
466
+
467
+ if include_torques and "joint_torques" in f:
468
+ obs_components.append(f["joint_torques"][:])
469
+
470
+ if "end_effector_poses" in f:
471
+ obs_components.append(f["end_effector_poses"][:])
472
+
473
+ observations = np.concatenate(obs_components, axis=1)
474
+
475
+ # Normalize if requested
476
+ if normalize:
477
+ obs_mean = observations.mean(axis=0, keepdims=True)
478
+ obs_std = observations.std(axis=0, keepdims=True) + 1e-8
479
+ observations = (observations - obs_mean) / obs_std
480
+
481
+ # Actions (control inputs)
482
+ actions = None
483
+ if "control_inputs" in f:
484
+ actions = f["control_inputs"][:]
485
+
486
+ # Metadata
487
+ metadata = {
488
+ "recording_id": f.attrs.get("recording_id", ""),
489
+ "robot_id": f.attrs.get("robot_id", ""),
490
+ "skill_id": f.attrs.get("skill_id", ""),
491
+ "duration": float(f.attrs.get("duration", 0)),
492
+ "frame_rate": float(f.attrs.get("frame_rate", 0)),
493
+ }
494
+
495
+ return {
496
+ "observations": observations,
497
+ "actions": actions,
498
+ "timestamps": timestamps,
499
+ "success": bool(f.attrs.get("success", True)),
500
+ "metadata": metadata,
501
+ "joint_names": [name.decode("utf-8") if isinstance(name, bytes) else name
502
+ for name in f["joint_names"][:]] if "joint_names" in f else [],
503
+ }