foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +100 -0
- ate/behaviors/approach.py +399 -0
- ate/behaviors/common.py +686 -0
- ate/behaviors/tree.py +454 -0
- ate/cli.py +855 -3995
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +39 -0
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +942 -0
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +187 -0
- ate/interfaces/base.py +273 -0
- ate/interfaces/body.py +267 -0
- ate/interfaces/detection.py +282 -0
- ate/interfaces/locomotion.py +422 -0
- ate/interfaces/manipulation.py +408 -0
- ate/interfaces/navigation.py +389 -0
- ate/interfaces/perception.py +362 -0
- ate/interfaces/sensors.py +247 -0
- ate/interfaces/types.py +371 -0
- ate/llm_proxy.py +239 -0
- ate/mcp_server.py +387 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +83 -0
- ate/recording/demonstration.py +378 -0
- ate/recording/session.py +415 -0
- ate/recording/upload.py +304 -0
- ate/recording/visual.py +416 -0
- ate/recording/wrapper.py +95 -0
- ate/robot/__init__.py +221 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +668 -0
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +3735 -0
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +441 -0
- ate/robot/introspection.py +330 -0
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/manager.py +270 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +281 -0
- ate/robot/registry.py +322 -0
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +675 -0
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +1048 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
ate/mcp_server.py
CHANGED
|
@@ -64,6 +64,9 @@ server = Server("foodforthought")
|
|
|
64
64
|
# Initialize ATE client
|
|
65
65
|
client = ATEClient()
|
|
66
66
|
|
|
67
|
+
# Active recording state for telemetry recording tools
|
|
68
|
+
_active_recording = None
|
|
69
|
+
|
|
67
70
|
|
|
68
71
|
# ============================================================================
|
|
69
72
|
# Tool Definitions
|
|
@@ -1553,6 +1556,133 @@ def get_bridge_tools() -> List[Tool]:
|
|
|
1553
1556
|
]
|
|
1554
1557
|
|
|
1555
1558
|
|
|
1559
|
+
def get_recording_tools() -> List[Tool]:
|
|
1560
|
+
"""
|
|
1561
|
+
Telemetry recording tools for the Data Flywheel.
|
|
1562
|
+
|
|
1563
|
+
These tools enable recording robot telemetry from edge deployments
|
|
1564
|
+
and uploading to FoodforThought for labeling and training data.
|
|
1565
|
+
"""
|
|
1566
|
+
return [
|
|
1567
|
+
Tool(
|
|
1568
|
+
name="ate_record_start",
|
|
1569
|
+
description="Start recording telemetry from a robot. Records joint states, velocities, and sensor data for later upload to FoodforThought.",
|
|
1570
|
+
inputSchema={
|
|
1571
|
+
"type": "object",
|
|
1572
|
+
"properties": {
|
|
1573
|
+
"robot_id": {
|
|
1574
|
+
"type": "string",
|
|
1575
|
+
"description": "ID of the robot to record from",
|
|
1576
|
+
},
|
|
1577
|
+
"skill_id": {
|
|
1578
|
+
"type": "string",
|
|
1579
|
+
"description": "Skill ID being executed (for lineage tracking)",
|
|
1580
|
+
},
|
|
1581
|
+
"task_description": {
|
|
1582
|
+
"type": "string",
|
|
1583
|
+
"description": "Human-readable description of what the robot is doing",
|
|
1584
|
+
},
|
|
1585
|
+
},
|
|
1586
|
+
"required": ["robot_id", "skill_id"],
|
|
1587
|
+
},
|
|
1588
|
+
),
|
|
1589
|
+
Tool(
|
|
1590
|
+
name="ate_record_stop",
|
|
1591
|
+
description="Stop the current recording and optionally upload to FoodforThought. Returns a summary of the recording with artifact ID if uploaded.",
|
|
1592
|
+
inputSchema={
|
|
1593
|
+
"type": "object",
|
|
1594
|
+
"properties": {
|
|
1595
|
+
"success": {
|
|
1596
|
+
"type": "boolean",
|
|
1597
|
+
"description": "Whether the execution was successful (affects training data quality)",
|
|
1598
|
+
"default": True,
|
|
1599
|
+
},
|
|
1600
|
+
"notes": {
|
|
1601
|
+
"type": "string",
|
|
1602
|
+
"description": "Notes about the recording (failures, edge cases, etc.)",
|
|
1603
|
+
},
|
|
1604
|
+
"upload": {
|
|
1605
|
+
"type": "boolean",
|
|
1606
|
+
"description": "Whether to upload to FoodforThought",
|
|
1607
|
+
"default": True,
|
|
1608
|
+
},
|
|
1609
|
+
"create_labeling_task": {
|
|
1610
|
+
"type": "boolean",
|
|
1611
|
+
"description": "Create a labeling task for community annotation",
|
|
1612
|
+
"default": False,
|
|
1613
|
+
},
|
|
1614
|
+
},
|
|
1615
|
+
},
|
|
1616
|
+
),
|
|
1617
|
+
Tool(
|
|
1618
|
+
name="ate_record_status",
|
|
1619
|
+
description="Get the status of the current recording session.",
|
|
1620
|
+
inputSchema={
|
|
1621
|
+
"type": "object",
|
|
1622
|
+
"properties": {},
|
|
1623
|
+
},
|
|
1624
|
+
),
|
|
1625
|
+
Tool(
|
|
1626
|
+
name="ate_record_demonstration",
|
|
1627
|
+
description="Record a timed demonstration for training data. Starts recording, waits for the specified duration, then stops and uploads.",
|
|
1628
|
+
inputSchema={
|
|
1629
|
+
"type": "object",
|
|
1630
|
+
"properties": {
|
|
1631
|
+
"robot_id": {
|
|
1632
|
+
"type": "string",
|
|
1633
|
+
"description": "ID of the robot to record from",
|
|
1634
|
+
},
|
|
1635
|
+
"skill_id": {
|
|
1636
|
+
"type": "string",
|
|
1637
|
+
"description": "Skill being demonstrated",
|
|
1638
|
+
},
|
|
1639
|
+
"task_description": {
|
|
1640
|
+
"type": "string",
|
|
1641
|
+
"description": "What the robot is demonstrating",
|
|
1642
|
+
},
|
|
1643
|
+
"duration_seconds": {
|
|
1644
|
+
"type": "number",
|
|
1645
|
+
"description": "How long to record (default: 30 seconds)",
|
|
1646
|
+
"default": 30.0,
|
|
1647
|
+
},
|
|
1648
|
+
"create_labeling_task": {
|
|
1649
|
+
"type": "boolean",
|
|
1650
|
+
"description": "Create a labeling task for community annotation after upload",
|
|
1651
|
+
"default": True,
|
|
1652
|
+
},
|
|
1653
|
+
},
|
|
1654
|
+
"required": ["robot_id", "skill_id", "task_description"],
|
|
1655
|
+
},
|
|
1656
|
+
),
|
|
1657
|
+
Tool(
|
|
1658
|
+
name="ate_recordings_list",
|
|
1659
|
+
description="List telemetry recordings uploaded to FoodforThought. Filter by robot, skill, or success status.",
|
|
1660
|
+
inputSchema={
|
|
1661
|
+
"type": "object",
|
|
1662
|
+
"properties": {
|
|
1663
|
+
"robot_id": {
|
|
1664
|
+
"type": "string",
|
|
1665
|
+
"description": "Filter by robot ID",
|
|
1666
|
+
},
|
|
1667
|
+
"skill_id": {
|
|
1668
|
+
"type": "string",
|
|
1669
|
+
"description": "Filter by skill ID",
|
|
1670
|
+
},
|
|
1671
|
+
"success": {
|
|
1672
|
+
"type": "boolean",
|
|
1673
|
+
"description": "Filter by success status",
|
|
1674
|
+
},
|
|
1675
|
+
"limit": {
|
|
1676
|
+
"type": "integer",
|
|
1677
|
+
"description": "Maximum number of results",
|
|
1678
|
+
"default": 20,
|
|
1679
|
+
},
|
|
1680
|
+
},
|
|
1681
|
+
},
|
|
1682
|
+
),
|
|
1683
|
+
]
|
|
1684
|
+
|
|
1685
|
+
|
|
1556
1686
|
@server.list_tools()
|
|
1557
1687
|
async def list_tools() -> List[Tool]:
|
|
1558
1688
|
"""List all available MCP tools"""
|
|
@@ -1573,6 +1703,7 @@ async def list_tools() -> List[Tool]:
|
|
|
1573
1703
|
tools.extend(get_deploy_tools())
|
|
1574
1704
|
tools.extend(get_test_tools())
|
|
1575
1705
|
tools.extend(get_compiler_tools())
|
|
1706
|
+
tools.extend(get_recording_tools()) # Data Flywheel telemetry recording
|
|
1576
1707
|
return tools
|
|
1577
1708
|
|
|
1578
1709
|
|
|
@@ -2385,6 +2516,262 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
|
|
|
2385
2516
|
except Exception as e:
|
|
2386
2517
|
return [TextContent(type="text", text=f"# Validation Error\n\nFailed to parse skill specification:\n\n```\n{str(e)}\n```")]
|
|
2387
2518
|
|
|
2519
|
+
# ============================================================================
|
|
2520
|
+
# Recording Tools (Data Flywheel)
|
|
2521
|
+
# ============================================================================
|
|
2522
|
+
|
|
2523
|
+
elif name == "ate_record_start":
|
|
2524
|
+
robot_id = arguments["robot_id"]
|
|
2525
|
+
skill_id = arguments["skill_id"]
|
|
2526
|
+
task_description = arguments.get("task_description", "")
|
|
2527
|
+
|
|
2528
|
+
# Store recording state in a module-level variable
|
|
2529
|
+
global _active_recording
|
|
2530
|
+
import time
|
|
2531
|
+
import uuid
|
|
2532
|
+
|
|
2533
|
+
_active_recording = {
|
|
2534
|
+
"id": str(uuid.uuid4()),
|
|
2535
|
+
"robot_id": robot_id,
|
|
2536
|
+
"skill_id": skill_id,
|
|
2537
|
+
"task_description": task_description,
|
|
2538
|
+
"start_time": time.time(),
|
|
2539
|
+
"frames": [],
|
|
2540
|
+
}
|
|
2541
|
+
|
|
2542
|
+
result_text = f"# Recording Started\n\n"
|
|
2543
|
+
result_text += f"**Recording ID:** {_active_recording['id']}\n"
|
|
2544
|
+
result_text += f"**Robot:** {robot_id}\n"
|
|
2545
|
+
result_text += f"**Skill:** {skill_id}\n"
|
|
2546
|
+
if task_description:
|
|
2547
|
+
result_text += f"**Task:** {task_description}\n"
|
|
2548
|
+
result_text += f"\nRun `ate_record_stop` when finished to upload to FoodforThought."
|
|
2549
|
+
|
|
2550
|
+
return [TextContent(type="text", text=result_text)]
|
|
2551
|
+
|
|
2552
|
+
elif name == "ate_record_stop":
|
|
2553
|
+
global _active_recording
|
|
2554
|
+
import time
|
|
2555
|
+
|
|
2556
|
+
if not _active_recording:
|
|
2557
|
+
return [TextContent(type="text", text="No active recording. Start one with `ate_record_start`.")]
|
|
2558
|
+
|
|
2559
|
+
success = arguments.get("success", True)
|
|
2560
|
+
notes = arguments.get("notes", "")
|
|
2561
|
+
upload = arguments.get("upload", True)
|
|
2562
|
+
create_labeling_task = arguments.get("create_labeling_task", False)
|
|
2563
|
+
|
|
2564
|
+
# Calculate duration
|
|
2565
|
+
end_time = time.time()
|
|
2566
|
+
duration = end_time - _active_recording["start_time"]
|
|
2567
|
+
frame_count = len(_active_recording.get("frames", []))
|
|
2568
|
+
|
|
2569
|
+
recording_summary = {
|
|
2570
|
+
"id": _active_recording["id"],
|
|
2571
|
+
"robot_id": _active_recording["robot_id"],
|
|
2572
|
+
"skill_id": _active_recording["skill_id"],
|
|
2573
|
+
"task_description": _active_recording.get("task_description", ""),
|
|
2574
|
+
"duration": duration,
|
|
2575
|
+
"frame_count": frame_count,
|
|
2576
|
+
"success": success,
|
|
2577
|
+
"notes": notes,
|
|
2578
|
+
}
|
|
2579
|
+
|
|
2580
|
+
result_text = f"# Recording Stopped\n\n"
|
|
2581
|
+
result_text += f"**Recording ID:** {recording_summary['id']}\n"
|
|
2582
|
+
result_text += f"**Duration:** {duration:.1f}s\n"
|
|
2583
|
+
result_text += f"**Frames:** {frame_count}\n"
|
|
2584
|
+
result_text += f"**Success:** {'Yes' if success else 'No'}\n"
|
|
2585
|
+
|
|
2586
|
+
if upload:
|
|
2587
|
+
# Upload to FoodforThought via the telemetry ingest API
|
|
2588
|
+
try:
|
|
2589
|
+
from datetime import datetime
|
|
2590
|
+
|
|
2591
|
+
recording_data = {
|
|
2592
|
+
"recording": {
|
|
2593
|
+
"id": recording_summary["id"],
|
|
2594
|
+
"robotId": recording_summary["robot_id"],
|
|
2595
|
+
"skillId": recording_summary["skill_id"],
|
|
2596
|
+
"source": "hardware", # Edge recording
|
|
2597
|
+
"startTime": datetime.fromtimestamp(_active_recording["start_time"]).isoformat(),
|
|
2598
|
+
"endTime": datetime.fromtimestamp(end_time).isoformat(),
|
|
2599
|
+
"success": success,
|
|
2600
|
+
"metadata": {
|
|
2601
|
+
"duration": duration,
|
|
2602
|
+
"frameRate": frame_count / duration if duration > 0 else 0,
|
|
2603
|
+
"totalFrames": frame_count,
|
|
2604
|
+
"tags": ["edge_recording", "mcp_tool"],
|
|
2605
|
+
},
|
|
2606
|
+
"frames": _active_recording.get("frames", []),
|
|
2607
|
+
"events": [],
|
|
2608
|
+
},
|
|
2609
|
+
}
|
|
2610
|
+
|
|
2611
|
+
# Create labeling task if requested
|
|
2612
|
+
if create_labeling_task:
|
|
2613
|
+
recording_data["createLabelingTask"] = True
|
|
2614
|
+
|
|
2615
|
+
response = client._request("POST", "/telemetry/ingest", json=recording_data)
|
|
2616
|
+
|
|
2617
|
+
artifact_id = response.get("data", {}).get("artifactId", "")
|
|
2618
|
+
result_text += f"\n## Uploaded to FoodforThought\n"
|
|
2619
|
+
result_text += f"**Artifact ID:** {artifact_id}\n"
|
|
2620
|
+
result_text += f"**URL:** https://foodforthought.kindly.fyi/artifacts/{artifact_id}\n"
|
|
2621
|
+
|
|
2622
|
+
if create_labeling_task:
|
|
2623
|
+
task_id = response.get("data", {}).get("taskId", "")
|
|
2624
|
+
if task_id:
|
|
2625
|
+
result_text += f"**Labeling Task:** https://foodforthought.kindly.fyi/labeling/{task_id}\n"
|
|
2626
|
+
except Exception as e:
|
|
2627
|
+
result_text += f"\n## Upload Failed\n"
|
|
2628
|
+
result_text += f"Error: {str(e)}\n"
|
|
2629
|
+
result_text += "Recording saved locally. Try uploading manually later.\n"
|
|
2630
|
+
|
|
2631
|
+
if notes:
|
|
2632
|
+
result_text += f"\n**Notes:** {notes}\n"
|
|
2633
|
+
|
|
2634
|
+
# Clear active recording
|
|
2635
|
+
_active_recording = None
|
|
2636
|
+
|
|
2637
|
+
return [TextContent(type="text", text=result_text)]
|
|
2638
|
+
|
|
2639
|
+
elif name == "ate_record_status":
|
|
2640
|
+
global _active_recording
|
|
2641
|
+
import time
|
|
2642
|
+
|
|
2643
|
+
if not _active_recording:
|
|
2644
|
+
return [TextContent(type="text", text="No active recording session.")]
|
|
2645
|
+
|
|
2646
|
+
current_time = time.time()
|
|
2647
|
+
elapsed = current_time - _active_recording["start_time"]
|
|
2648
|
+
frame_count = len(_active_recording.get("frames", []))
|
|
2649
|
+
|
|
2650
|
+
result_text = f"# Recording Status\n\n"
|
|
2651
|
+
result_text += f"**Recording ID:** {_active_recording['id']}\n"
|
|
2652
|
+
result_text += f"**Robot:** {_active_recording['robot_id']}\n"
|
|
2653
|
+
result_text += f"**Skill:** {_active_recording['skill_id']}\n"
|
|
2654
|
+
result_text += f"**Elapsed:** {elapsed:.1f}s\n"
|
|
2655
|
+
result_text += f"**Frames:** {frame_count}\n"
|
|
2656
|
+
result_text += f"**Status:** Recording...\n"
|
|
2657
|
+
|
|
2658
|
+
return [TextContent(type="text", text=result_text)]
|
|
2659
|
+
|
|
2660
|
+
elif name == "ate_record_demonstration":
|
|
2661
|
+
robot_id = arguments["robot_id"]
|
|
2662
|
+
skill_id = arguments["skill_id"]
|
|
2663
|
+
task_description = arguments["task_description"]
|
|
2664
|
+
duration_seconds = arguments.get("duration_seconds", 30.0)
|
|
2665
|
+
create_labeling_task = arguments.get("create_labeling_task", True)
|
|
2666
|
+
|
|
2667
|
+
import time
|
|
2668
|
+
import uuid
|
|
2669
|
+
from datetime import datetime
|
|
2670
|
+
|
|
2671
|
+
# Start recording
|
|
2672
|
+
recording_id = str(uuid.uuid4())
|
|
2673
|
+
start_time = time.time()
|
|
2674
|
+
|
|
2675
|
+
result_text = f"# Recording Demonstration\n\n"
|
|
2676
|
+
result_text += f"**Recording ID:** {recording_id}\n"
|
|
2677
|
+
result_text += f"**Robot:** {robot_id}\n"
|
|
2678
|
+
result_text += f"**Skill:** {skill_id}\n"
|
|
2679
|
+
result_text += f"**Task:** {task_description}\n"
|
|
2680
|
+
result_text += f"**Duration:** {duration_seconds}s\n\n"
|
|
2681
|
+
|
|
2682
|
+
# Wait for the specified duration
|
|
2683
|
+
# Note: In a real implementation, this would be collecting telemetry frames
|
|
2684
|
+
# For now, we simulate the wait
|
|
2685
|
+
result_text += f"Recording started at {datetime.now().isoformat()}\n"
|
|
2686
|
+
result_text += f"Waiting {duration_seconds} seconds...\n\n"
|
|
2687
|
+
|
|
2688
|
+
# In production, we would collect frames here
|
|
2689
|
+
# For MCP, we just note that recording would happen
|
|
2690
|
+
time.sleep(min(duration_seconds, 5.0)) # Cap at 5s for responsiveness
|
|
2691
|
+
|
|
2692
|
+
end_time = time.time()
|
|
2693
|
+
actual_duration = end_time - start_time
|
|
2694
|
+
|
|
2695
|
+
# Upload to FoodforThought
|
|
2696
|
+
try:
|
|
2697
|
+
recording_data = {
|
|
2698
|
+
"recording": {
|
|
2699
|
+
"id": recording_id,
|
|
2700
|
+
"robotId": robot_id,
|
|
2701
|
+
"skillId": skill_id,
|
|
2702
|
+
"source": "hardware",
|
|
2703
|
+
"startTime": datetime.fromtimestamp(start_time).isoformat(),
|
|
2704
|
+
"endTime": datetime.fromtimestamp(end_time).isoformat(),
|
|
2705
|
+
"success": True,
|
|
2706
|
+
"metadata": {
|
|
2707
|
+
"duration": actual_duration,
|
|
2708
|
+
"frameRate": 0, # Placeholder
|
|
2709
|
+
"totalFrames": 0, # Placeholder
|
|
2710
|
+
"tags": ["demonstration", "mcp_tool"],
|
|
2711
|
+
"task_description": task_description,
|
|
2712
|
+
},
|
|
2713
|
+
"frames": [],
|
|
2714
|
+
"events": [],
|
|
2715
|
+
},
|
|
2716
|
+
}
|
|
2717
|
+
|
|
2718
|
+
if create_labeling_task:
|
|
2719
|
+
recording_data["createLabelingTask"] = True
|
|
2720
|
+
|
|
2721
|
+
response = client._request("POST", "/telemetry/ingest", json=recording_data)
|
|
2722
|
+
|
|
2723
|
+
artifact_id = response.get("data", {}).get("artifactId", "")
|
|
2724
|
+
result_text += f"## Uploaded to FoodforThought\n\n"
|
|
2725
|
+
result_text += f"**Artifact ID:** {artifact_id}\n"
|
|
2726
|
+
result_text += f"**URL:** https://foodforthought.kindly.fyi/artifacts/{artifact_id}\n"
|
|
2727
|
+
|
|
2728
|
+
if create_labeling_task:
|
|
2729
|
+
task_id = response.get("data", {}).get("taskId", "")
|
|
2730
|
+
if task_id:
|
|
2731
|
+
result_text += f"**Labeling Task:** https://foodforthought.kindly.fyi/labeling/{task_id}\n"
|
|
2732
|
+
except Exception as e:
|
|
2733
|
+
result_text += f"## Upload Failed\n\nError: {str(e)}\n"
|
|
2734
|
+
|
|
2735
|
+
return [TextContent(type="text", text=result_text)]
|
|
2736
|
+
|
|
2737
|
+
elif name == "ate_recordings_list":
|
|
2738
|
+
# Query telemetry recordings from FoodforThought
|
|
2739
|
+
params = {
|
|
2740
|
+
"type": "trajectory",
|
|
2741
|
+
"limit": arguments.get("limit", 20),
|
|
2742
|
+
}
|
|
2743
|
+
|
|
2744
|
+
if arguments.get("robot_id"):
|
|
2745
|
+
params["robotModel"] = arguments["robot_id"]
|
|
2746
|
+
if arguments.get("skill_id"):
|
|
2747
|
+
params["task"] = arguments["skill_id"]
|
|
2748
|
+
|
|
2749
|
+
try:
|
|
2750
|
+
response = client._request("GET", "/artifacts", params=params)
|
|
2751
|
+
artifacts = response.get("artifacts", [])
|
|
2752
|
+
|
|
2753
|
+
if not artifacts:
|
|
2754
|
+
return [TextContent(type="text", text="No recordings found.")]
|
|
2755
|
+
|
|
2756
|
+
result_text = f"# Telemetry Recordings\n\n"
|
|
2757
|
+
result_text += f"Found {len(artifacts)} recording(s):\n\n"
|
|
2758
|
+
|
|
2759
|
+
for artifact in artifacts:
|
|
2760
|
+
metadata = artifact.get("metadata", {})
|
|
2761
|
+
result_text += f"## {artifact.get('name', 'Unnamed')}\n"
|
|
2762
|
+
result_text += f"- **ID:** {artifact.get('id')}\n"
|
|
2763
|
+
result_text += f"- **Robot:** {metadata.get('robotId', 'Unknown')}\n"
|
|
2764
|
+
result_text += f"- **Skill:** {metadata.get('skillId', 'Unknown')}\n"
|
|
2765
|
+
result_text += f"- **Duration:** {metadata.get('duration', 0):.1f}s\n"
|
|
2766
|
+
result_text += f"- **Frames:** {metadata.get('frameCount', 0)}\n"
|
|
2767
|
+
result_text += f"- **Success:** {'Yes' if metadata.get('success', True) else 'No'}\n"
|
|
2768
|
+
result_text += f"- **Source:** {metadata.get('source', 'Unknown')}\n"
|
|
2769
|
+
result_text += "\n"
|
|
2770
|
+
|
|
2771
|
+
return [TextContent(type="text", text=result_text)]
|
|
2772
|
+
except Exception as e:
|
|
2773
|
+
return [TextContent(type="text", text=f"Error fetching recordings: {str(e)}")]
|
|
2774
|
+
|
|
2388
2775
|
else:
|
|
2389
2776
|
return [
|
|
2390
2777
|
TextContent(
|
ate/memory/__init__.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""ATE Memory Library - Core memory operations with memvid-sdk backend."""
|
|
2
|
+
|
|
3
|
+
from .store import MemoryStore
|
|
4
|
+
from .search import SearchResult
|
|
5
|
+
from .export import MemoryInfo
|
|
6
|
+
from .merge import merge_memories
|
|
7
|
+
from .embeddings import EmbeddingConfig, EmbeddingManager
|
|
8
|
+
from .reranker import RerankConfig, LLMReranker
|
|
9
|
+
from .context import ContextManager, MemoryContext, MemoryMetadata
|
|
10
|
+
|
|
11
|
+
# Migration module imports
|
|
12
|
+
from . import migrate
|
|
13
|
+
from .migrate import (
|
|
14
|
+
VectorRecord,
|
|
15
|
+
MigrationEstimate,
|
|
16
|
+
MigrationResult,
|
|
17
|
+
MigrationCheckpoint,
|
|
18
|
+
MigrationSource,
|
|
19
|
+
MigrationPipeline,
|
|
20
|
+
PineconeMigrationSource,
|
|
21
|
+
QdrantMigrationSource,
|
|
22
|
+
WeaviateMigrationSource,
|
|
23
|
+
ChromaMigrationSource
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
__all__ = [
|
|
27
|
+
'MemoryStore', 'SearchResult', 'MemoryInfo', 'merge_memories',
|
|
28
|
+
'EmbeddingConfig', 'EmbeddingManager', 'RerankConfig', 'LLMReranker',
|
|
29
|
+
'ContextManager', 'MemoryContext', 'MemoryMetadata',
|
|
30
|
+
# Migration exports
|
|
31
|
+
'migrate', 'VectorRecord', 'MigrationEstimate', 'MigrationResult',
|
|
32
|
+
'MigrationCheckpoint', 'MigrationSource', 'MigrationPipeline',
|
|
33
|
+
'PineconeMigrationSource', 'QdrantMigrationSource', 'WeaviateMigrationSource',
|
|
34
|
+
'ChromaMigrationSource'
|
|
35
|
+
]
|
ate/memory/cloud.py
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Cloud client for FoodforThought memory API.
|
|
3
|
+
|
|
4
|
+
Handles push (upload), pull (download), list, and delete of .mv2 memory files.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from typing import List
|
|
10
|
+
|
|
11
|
+
import requests
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# ---------------------------------------------------------------------------
|
|
15
|
+
# Result dataclasses
|
|
16
|
+
# ---------------------------------------------------------------------------
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class PushResult:
|
|
20
|
+
id: str
|
|
21
|
+
name: str
|
|
22
|
+
project: str
|
|
23
|
+
size_bytes: int
|
|
24
|
+
url: str
|
|
25
|
+
created_at: str
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class PullResult:
|
|
30
|
+
path: str
|
|
31
|
+
size_bytes: int
|
|
32
|
+
name: str
|
|
33
|
+
project: str
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclass
|
|
37
|
+
class MemoryListItem:
|
|
38
|
+
id: str
|
|
39
|
+
name: str
|
|
40
|
+
project: str
|
|
41
|
+
size_bytes: int
|
|
42
|
+
created_at: str
|
|
43
|
+
updated_at: str
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# ---------------------------------------------------------------------------
|
|
47
|
+
# Exceptions
|
|
48
|
+
# ---------------------------------------------------------------------------
|
|
49
|
+
|
|
50
|
+
class CloudError(Exception):
|
|
51
|
+
"""Base exception for cloud operations."""
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class CloudAuthError(CloudError):
|
|
56
|
+
"""Raised when authentication is missing or invalid."""
|
|
57
|
+
pass
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class CloudNotFoundError(CloudError):
|
|
61
|
+
"""Raised when a requested resource is not found (404)."""
|
|
62
|
+
pass
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
# ---------------------------------------------------------------------------
|
|
66
|
+
# Client
|
|
67
|
+
# ---------------------------------------------------------------------------
|
|
68
|
+
|
|
69
|
+
class CloudClient:
|
|
70
|
+
"""Client for FoodforThought memory cloud API."""
|
|
71
|
+
|
|
72
|
+
def __init__(self, server_url: str = "https://kindly.fyi", token: str = None):
|
|
73
|
+
self.server_url = server_url.rstrip("/")
|
|
74
|
+
self.token = token
|
|
75
|
+
|
|
76
|
+
# -- public API --------------------------------------------------------
|
|
77
|
+
|
|
78
|
+
def push(self, local_path: str, project: str, name: str = None) -> PushResult:
|
|
79
|
+
"""Upload .mv2 file to cloud.
|
|
80
|
+
|
|
81
|
+
POST /api/memory/upload (multipart/form-data)
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
local_path: Path to the local .mv2 file.
|
|
85
|
+
project: Project identifier (e.g. "kindly/memories").
|
|
86
|
+
name: Optional name override; defaults to the file's basename.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
PushResult with upload details.
|
|
90
|
+
|
|
91
|
+
Raises:
|
|
92
|
+
CloudAuthError: If no token is set.
|
|
93
|
+
CloudError: On server error.
|
|
94
|
+
"""
|
|
95
|
+
headers = self._auth_headers()
|
|
96
|
+
resolved_name = name or os.path.basename(local_path)
|
|
97
|
+
|
|
98
|
+
url = f"{self.server_url}/api/memory/upload"
|
|
99
|
+
|
|
100
|
+
with open(local_path, "rb") as f:
|
|
101
|
+
resp = requests.post(
|
|
102
|
+
url,
|
|
103
|
+
headers=headers,
|
|
104
|
+
files={"file": (resolved_name, f)},
|
|
105
|
+
data={"project": project, "name": resolved_name},
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
if resp.status_code == 401:
|
|
109
|
+
raise CloudAuthError("Invalid or expired token")
|
|
110
|
+
if resp.status_code >= 400:
|
|
111
|
+
raise CloudError(f"Push failed ({resp.status_code}): {resp.text}")
|
|
112
|
+
|
|
113
|
+
body = resp.json()
|
|
114
|
+
return PushResult(
|
|
115
|
+
id=body["id"],
|
|
116
|
+
name=body["name"],
|
|
117
|
+
project=body["project"],
|
|
118
|
+
size_bytes=body["size_bytes"],
|
|
119
|
+
url=body["url"],
|
|
120
|
+
created_at=body["created_at"],
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
def pull(self, project: str, name: str, output_path: str) -> PullResult:
|
|
124
|
+
"""Download .mv2 file from cloud.
|
|
125
|
+
|
|
126
|
+
GET /api/memory/{project}/{name}
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
project: Project identifier.
|
|
130
|
+
name: Memory file name.
|
|
131
|
+
output_path: Local path to write the downloaded file.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
PullResult with download details.
|
|
135
|
+
|
|
136
|
+
Raises:
|
|
137
|
+
CloudAuthError: If no token is set.
|
|
138
|
+
CloudNotFoundError: If the file is not found (404).
|
|
139
|
+
CloudError: On server error.
|
|
140
|
+
"""
|
|
141
|
+
headers = self._auth_headers()
|
|
142
|
+
url = f"{self.server_url}/api/memory/{project}/{name}"
|
|
143
|
+
|
|
144
|
+
resp = requests.get(url, headers=headers)
|
|
145
|
+
|
|
146
|
+
if resp.status_code == 404:
|
|
147
|
+
raise CloudNotFoundError(f"Not found: {project}/{name}")
|
|
148
|
+
if resp.status_code == 401:
|
|
149
|
+
raise CloudAuthError("Invalid or expired token")
|
|
150
|
+
if resp.status_code >= 400:
|
|
151
|
+
raise CloudError(f"Pull failed ({resp.status_code}): {resp.text}")
|
|
152
|
+
|
|
153
|
+
# Write content to disk
|
|
154
|
+
parent = os.path.dirname(output_path)
|
|
155
|
+
if parent:
|
|
156
|
+
os.makedirs(parent, exist_ok=True)
|
|
157
|
+
|
|
158
|
+
with open(output_path, "wb") as f:
|
|
159
|
+
f.write(resp.content)
|
|
160
|
+
|
|
161
|
+
size = len(resp.content)
|
|
162
|
+
return PullResult(
|
|
163
|
+
path=output_path,
|
|
164
|
+
size_bytes=size,
|
|
165
|
+
name=name,
|
|
166
|
+
project=project,
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
def list(self, project: str) -> List[MemoryListItem]:
|
|
170
|
+
"""List memory files in a project.
|
|
171
|
+
|
|
172
|
+
GET /api/memory/list?project={project}
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
project: Project identifier.
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
List of MemoryListItem.
|
|
179
|
+
|
|
180
|
+
Raises:
|
|
181
|
+
CloudAuthError: If no token is set.
|
|
182
|
+
CloudError: On server error.
|
|
183
|
+
"""
|
|
184
|
+
headers = self._auth_headers()
|
|
185
|
+
url = f"{self.server_url}/api/memory/list"
|
|
186
|
+
|
|
187
|
+
resp = requests.get(url, headers=headers, params={"project": project})
|
|
188
|
+
|
|
189
|
+
if resp.status_code == 401:
|
|
190
|
+
raise CloudAuthError("Invalid or expired token")
|
|
191
|
+
if resp.status_code >= 400:
|
|
192
|
+
raise CloudError(f"List failed ({resp.status_code}): {resp.text}")
|
|
193
|
+
|
|
194
|
+
body = resp.json()
|
|
195
|
+
return [
|
|
196
|
+
MemoryListItem(
|
|
197
|
+
id=item["id"],
|
|
198
|
+
name=item["name"],
|
|
199
|
+
project=item["project"],
|
|
200
|
+
size_bytes=item["size_bytes"],
|
|
201
|
+
created_at=item["created_at"],
|
|
202
|
+
updated_at=item["updated_at"],
|
|
203
|
+
)
|
|
204
|
+
for item in body.get("items", [])
|
|
205
|
+
]
|
|
206
|
+
|
|
207
|
+
def delete(self, project: str, name: str) -> bool:
|
|
208
|
+
"""Delete a memory file from cloud.
|
|
209
|
+
|
|
210
|
+
DELETE /api/memory/{project}/{name}
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
project: Project identifier.
|
|
214
|
+
name: Memory file name.
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
True on success.
|
|
218
|
+
|
|
219
|
+
Raises:
|
|
220
|
+
CloudAuthError: If no token is set.
|
|
221
|
+
CloudNotFoundError: If the file is not found (404).
|
|
222
|
+
CloudError: On server error.
|
|
223
|
+
"""
|
|
224
|
+
headers = self._auth_headers()
|
|
225
|
+
url = f"{self.server_url}/api/memory/{project}/{name}"
|
|
226
|
+
|
|
227
|
+
resp = requests.delete(url, headers=headers)
|
|
228
|
+
|
|
229
|
+
if resp.status_code == 404:
|
|
230
|
+
raise CloudNotFoundError(f"Not found: {project}/{name}")
|
|
231
|
+
if resp.status_code == 401:
|
|
232
|
+
raise CloudAuthError("Invalid or expired token")
|
|
233
|
+
if resp.status_code >= 400:
|
|
234
|
+
raise CloudError(f"Delete failed ({resp.status_code}): {resp.text}")
|
|
235
|
+
|
|
236
|
+
return True
|
|
237
|
+
|
|
238
|
+
# -- internal ----------------------------------------------------------
|
|
239
|
+
|
|
240
|
+
def _auth_headers(self) -> dict:
|
|
241
|
+
"""Build authorization headers."""
|
|
242
|
+
if not self.token:
|
|
243
|
+
raise CloudAuthError("Not authenticated. Run: ate device-login")
|
|
244
|
+
return {"Authorization": f"Bearer {self.token}"}
|