foodforthought-cli 0.2.0__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +1 -1
- ate/bridge_server.py +622 -0
- ate/cli.py +2625 -242
- ate/compatibility.py +580 -0
- ate/generators/__init__.py +19 -0
- ate/generators/docker_generator.py +461 -0
- ate/generators/hardware_config.py +469 -0
- ate/generators/ros2_generator.py +617 -0
- ate/generators/skill_generator.py +783 -0
- ate/marketplace.py +524 -0
- ate/mcp_server.py +2424 -148
- ate/primitives.py +1016 -0
- ate/robot_setup.py +2222 -0
- ate/skill_schema.py +537 -0
- ate/telemetry/__init__.py +33 -0
- ate/telemetry/cli.py +455 -0
- ate/telemetry/collector.py +444 -0
- ate/telemetry/context.py +318 -0
- ate/telemetry/fleet_agent.py +419 -0
- ate/telemetry/formats/__init__.py +18 -0
- ate/telemetry/formats/hdf5_serializer.py +503 -0
- ate/telemetry/formats/mcap_serializer.py +457 -0
- ate/telemetry/types.py +334 -0
- foodforthought_cli-0.2.3.dist-info/METADATA +300 -0
- foodforthought_cli-0.2.3.dist-info/RECORD +44 -0
- foodforthought_cli-0.2.3.dist-info/top_level.txt +6 -0
- mechdog_labeled/__init__.py +3 -0
- mechdog_labeled/primitives.py +113 -0
- mechdog_labeled/servo_map.py +209 -0
- mechdog_output/__init__.py +3 -0
- mechdog_output/primitives.py +59 -0
- mechdog_output/servo_map.py +203 -0
- test_autodetect/__init__.py +3 -0
- test_autodetect/primitives.py +113 -0
- test_autodetect/servo_map.py +209 -0
- test_full_auto/__init__.py +3 -0
- test_full_auto/primitives.py +113 -0
- test_full_auto/servo_map.py +209 -0
- test_smart_detect/__init__.py +3 -0
- test_smart_detect/primitives.py +113 -0
- test_smart_detect/servo_map.py +209 -0
- foodforthought_cli-0.2.0.dist-info/METADATA +0 -151
- foodforthought_cli-0.2.0.dist-info/RECORD +0 -9
- foodforthought_cli-0.2.0.dist-info/top_level.txt +0 -1
- {foodforthought_cli-0.2.0.dist-info → foodforthought_cli-0.2.3.dist-info}/WHEEL +0 -0
- {foodforthought_cli-0.2.0.dist-info → foodforthought_cli-0.2.3.dist-info}/entry_points.txt +0 -0
ate/cli.py
CHANGED
|
@@ -9,6 +9,7 @@ import os
|
|
|
9
9
|
import sys
|
|
10
10
|
import time
|
|
11
11
|
import random
|
|
12
|
+
import getpass
|
|
12
13
|
import requests
|
|
13
14
|
from pathlib import Path
|
|
14
15
|
from typing import Optional, Dict, List
|
|
@@ -16,23 +17,43 @@ from ate.generator import generate_skill_project, TEMPLATES
|
|
|
16
17
|
|
|
17
18
|
BASE_URL = os.getenv("ATE_API_URL", "https://kindly.fyi/api")
|
|
18
19
|
API_KEY = os.getenv("ATE_API_KEY", "")
|
|
20
|
+
CONFIG_DIR = Path.home() / ".ate"
|
|
21
|
+
CONFIG_FILE = CONFIG_DIR / "config.json"
|
|
22
|
+
|
|
19
23
|
|
|
20
24
|
|
|
21
25
|
class ATEClient:
|
|
22
26
|
"""Client for interacting with FoodforThought API"""
|
|
23
27
|
|
|
24
|
-
def __init__(self, base_url: str = BASE_URL, api_key: str =
|
|
28
|
+
def __init__(self, base_url: str = BASE_URL, api_key: Optional[str] = None):
|
|
25
29
|
self.base_url = base_url
|
|
26
30
|
self.headers = {
|
|
27
31
|
"Content-Type": "application/json",
|
|
28
32
|
}
|
|
33
|
+
|
|
34
|
+
# Priority: Explicit Arg > Env Var > Global Default
|
|
35
|
+
if api_key is None:
|
|
36
|
+
api_key = os.getenv("ATE_API_KEY", API_KEY)
|
|
37
|
+
|
|
29
38
|
if api_key:
|
|
30
39
|
# Ensure API key has correct format
|
|
31
40
|
if not api_key.startswith("ate_"):
|
|
32
41
|
print("Warning: API key should start with 'ate_'", file=sys.stderr)
|
|
33
42
|
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
34
43
|
else:
|
|
35
|
-
|
|
44
|
+
# Try to load from config file
|
|
45
|
+
if CONFIG_FILE.exists():
|
|
46
|
+
try:
|
|
47
|
+
with open(CONFIG_FILE) as f:
|
|
48
|
+
config = json.load(f)
|
|
49
|
+
stored_key = config.get("api_key")
|
|
50
|
+
if stored_key:
|
|
51
|
+
self.headers["Authorization"] = f"Bearer {stored_key}"
|
|
52
|
+
except Exception:
|
|
53
|
+
pass
|
|
54
|
+
|
|
55
|
+
if "Authorization" not in self.headers:
|
|
56
|
+
print("Warning: No API key found. Set ATE_API_KEY environment variable or run 'ate login'.", file=sys.stderr)
|
|
36
57
|
|
|
37
58
|
def _request(self, method: str, endpoint: str, **kwargs) -> Dict:
|
|
38
59
|
"""Make HTTP request to API"""
|
|
@@ -1386,273 +1407,2450 @@ def generate_launch_description():
|
|
|
1386
1407
|
if not all_passed:
|
|
1387
1408
|
sys.exit(1)
|
|
1388
1409
|
|
|
1410
|
+
# =========================================================================
|
|
1411
|
+
# Protocol Registry Methods
|
|
1412
|
+
# =========================================================================
|
|
1389
1413
|
|
|
1390
|
-
def
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1414
|
+
def protocol_list(self, robot_model: Optional[str], transport_type: Optional[str],
|
|
1415
|
+
verified_only: bool, search: Optional[str]) -> None:
|
|
1416
|
+
"""List protocols from the registry"""
|
|
1417
|
+
params = {}
|
|
1418
|
+
if robot_model:
|
|
1419
|
+
params["robotId"] = robot_model # Can be ID or search term
|
|
1420
|
+
if transport_type:
|
|
1421
|
+
params["transport"] = transport_type
|
|
1422
|
+
if verified_only:
|
|
1423
|
+
params["verified"] = "true"
|
|
1424
|
+
if search:
|
|
1425
|
+
params["q"] = search
|
|
1394
1426
|
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
init_parser.add_argument(
|
|
1400
|
-
"-v", "--visibility", choices=["public", "private"], default="public", help="Repository visibility"
|
|
1401
|
-
)
|
|
1427
|
+
try:
|
|
1428
|
+
response = self._request("GET", "/protocols", params=params)
|
|
1429
|
+
protocols = response.get("protocols", [])
|
|
1430
|
+
pagination = response.get("pagination", {})
|
|
1402
1431
|
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
clone_parser.add_argument("target_dir", nargs="?", help="Target directory")
|
|
1432
|
+
print(f"\n{'=' * 70}")
|
|
1433
|
+
print(f"Protocol Registry ({pagination.get('total', len(protocols))} total)")
|
|
1434
|
+
print(f"{'=' * 70}")
|
|
1407
1435
|
|
|
1408
|
-
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1436
|
+
if not protocols:
|
|
1437
|
+
print("\nNo protocols found matching your criteria.")
|
|
1438
|
+
print("Use 'ate protocol init <robot-model> --transport <type>' to contribute one!")
|
|
1439
|
+
return
|
|
1412
1440
|
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1441
|
+
for proto in protocols:
|
|
1442
|
+
robot = proto.get("robot", {})
|
|
1443
|
+
verified = "✓" if proto.get("verified") else " "
|
|
1444
|
+
primitives = proto.get("primitiveSkillCount", 0)
|
|
1445
|
+
upvotes = proto.get("upvotes", 0)
|
|
1446
|
+
downvotes = proto.get("downvotes", 0)
|
|
1447
|
+
score = upvotes - downvotes
|
|
1416
1448
|
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
1449
|
+
print(f"\n[{verified}] {robot.get('name', 'Unknown')} - {proto.get('transportType', '?')}")
|
|
1450
|
+
print(f" ID: {proto.get('id')}")
|
|
1451
|
+
print(f" Format: {proto.get('commandFormat', '?')}")
|
|
1452
|
+
print(f" Primitives: {primitives} | Score: {score} (+{upvotes}/-{downvotes})")
|
|
1453
|
+
if proto.get('verified'):
|
|
1454
|
+
print(f" Verified: Yes")
|
|
1421
1455
|
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
choices=["gazebo", "mujoco", "pybullet", "webots"],
|
|
1426
|
-
help="Simulation environment")
|
|
1427
|
-
test_parser.add_argument("-r", "--robot", help="Robot model to test with")
|
|
1428
|
-
test_parser.add_argument("--local", action="store_true", help="Run simulation locally")
|
|
1456
|
+
except Exception as e:
|
|
1457
|
+
print(f"Error listing protocols: {e}", file=sys.stderr)
|
|
1458
|
+
sys.exit(1)
|
|
1429
1459
|
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
benchmark_parser.add_argument("-n", "--trials", type=int, default=10, help="Number of trials")
|
|
1436
|
-
benchmark_parser.add_argument("--compare", help="Compare with baseline (repository ID)")
|
|
1460
|
+
def protocol_get(self, protocol_id: str) -> None:
|
|
1461
|
+
"""Get detailed information about a protocol"""
|
|
1462
|
+
try:
|
|
1463
|
+
proto = self._request("GET", f"/protocols/{protocol_id}")
|
|
1464
|
+
robot = proto.get("robot", {})
|
|
1437
1465
|
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
adapt_parser.add_argument("target_robot", help="Target robot model")
|
|
1442
|
-
adapt_parser.add_argument("-r", "--repo-id", help="Repository ID to adapt")
|
|
1443
|
-
adapt_parser.add_argument("--analyze-only", action="store_true",
|
|
1444
|
-
help="Only show compatibility analysis")
|
|
1466
|
+
print(f"\n{'=' * 70}")
|
|
1467
|
+
print(f"Protocol: {robot.get('name', 'Unknown')} - {proto.get('transportType', '?')}")
|
|
1468
|
+
print(f"{'=' * 70}")
|
|
1445
1469
|
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
choices=["collision", "speed", "workspace", "force", "all"],
|
|
1450
|
-
default=["all"], help="Safety checks to run")
|
|
1451
|
-
validate_parser.add_argument("--strict", action="store_true", help="Use strict validation")
|
|
1452
|
-
validate_parser.add_argument("-f", "--files", nargs="*", help="Specific files to validate")
|
|
1470
|
+
print(f"\nRobot: {robot.get('manufacturer', '?')} {robot.get('name', '?')}")
|
|
1471
|
+
print(f"Category: {robot.get('category', '?')}")
|
|
1472
|
+
print(f"Transport: {proto.get('transportType', '?')}")
|
|
1453
1473
|
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
help="Streaming action")
|
|
1458
|
-
stream_parser.add_argument("-s", "--sensors", nargs="+",
|
|
1459
|
-
help="Sensors to stream (e.g., camera, lidar, imu)")
|
|
1460
|
-
stream_parser.add_argument("-o", "--output", help="Output file or URL")
|
|
1461
|
-
stream_parser.add_argument("--format", default="rosbag",
|
|
1462
|
-
choices=["rosbag", "hdf5", "json", "live"],
|
|
1463
|
-
help="Data format")
|
|
1474
|
+
# Command configuration
|
|
1475
|
+
command = proto.get('command', {})
|
|
1476
|
+
print(f"Command Format: {command.get('format', '?')}")
|
|
1464
1477
|
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
|
|
1478
|
+
# Transport-specific details
|
|
1479
|
+
transport = proto.get('transportType', '')
|
|
1480
|
+
if transport == 'ble':
|
|
1481
|
+
ble = proto.get('ble', {})
|
|
1482
|
+
if ble:
|
|
1483
|
+
print(f"\nBLE Configuration:")
|
|
1484
|
+
if ble.get('advertisedName'):
|
|
1485
|
+
print(f" Advertised Name: {ble.get('advertisedName')}")
|
|
1486
|
+
if ble.get('serviceUuids'):
|
|
1487
|
+
print(f" Service UUIDs: {json.dumps(ble.get('serviceUuids'), indent=4)}")
|
|
1488
|
+
if ble.get('characteristics'):
|
|
1489
|
+
print(f" Characteristics: {json.dumps(ble.get('characteristics'), indent=4)}")
|
|
1490
|
+
if ble.get('mtu'):
|
|
1491
|
+
print(f" MTU: {ble.get('mtu')}")
|
|
1474
1492
|
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1493
|
+
elif transport == 'serial':
|
|
1494
|
+
serial = proto.get('serial', {})
|
|
1495
|
+
if serial:
|
|
1496
|
+
print(f"\nSerial Configuration:")
|
|
1497
|
+
print(f" Baud Rate: {serial.get('baudRate', '?')}")
|
|
1498
|
+
print(f" Data Bits: {serial.get('dataBits', 8)}")
|
|
1499
|
+
print(f" Stop Bits: {serial.get('stopBits', 1)}")
|
|
1500
|
+
print(f" Parity: {serial.get('parity', 'none')}")
|
|
1501
|
+
print(f" Flow Control: {serial.get('flowControl', 'none')}")
|
|
1483
1502
|
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
check_transfer_parser.add_argument("--min-score", type=float, default=0.0,
|
|
1493
|
-
help="Minimum score threshold (0.0-1.0)")
|
|
1503
|
+
elif transport == 'wifi':
|
|
1504
|
+
wifi = proto.get('wifi', {})
|
|
1505
|
+
if wifi:
|
|
1506
|
+
print(f"\nWiFi Configuration:")
|
|
1507
|
+
print(f" Protocol: {wifi.get('protocol', '?')}")
|
|
1508
|
+
print(f" Port: {wifi.get('port', '?')}")
|
|
1509
|
+
if wifi.get('host'):
|
|
1510
|
+
print(f" Default Host: {wifi.get('host')}")
|
|
1494
1511
|
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
|
|
1498
|
-
|
|
1512
|
+
elif transport == 'can':
|
|
1513
|
+
can = proto.get('can', {})
|
|
1514
|
+
if can:
|
|
1515
|
+
print(f"\nCAN Configuration:")
|
|
1516
|
+
print(f" Bitrate: {can.get('bitrate', '?')}")
|
|
1517
|
+
print(f" Interface: {can.get('interface', '?')}")
|
|
1499
1518
|
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
parts_list_parser = parts_subparsers.add_parser("list", help="List available parts")
|
|
1506
|
-
parts_list_parser.add_argument("-c", "--category",
|
|
1507
|
-
choices=["gripper", "sensor", "actuator", "controller",
|
|
1508
|
-
"end-effector", "camera", "lidar", "force-torque"],
|
|
1509
|
-
help="Filter by category")
|
|
1510
|
-
parts_list_parser.add_argument("-m", "--manufacturer", help="Filter by manufacturer")
|
|
1511
|
-
parts_list_parser.add_argument("-s", "--search", help="Search by name or part number")
|
|
1512
|
-
|
|
1513
|
-
# parts check
|
|
1514
|
-
parts_check_parser = parts_subparsers.add_parser("check",
|
|
1515
|
-
help="Check part compatibility for skill")
|
|
1516
|
-
parts_check_parser.add_argument("skill_id", help="Skill ID to check")
|
|
1517
|
-
|
|
1518
|
-
# parts require
|
|
1519
|
-
parts_require_parser = parts_subparsers.add_parser("require",
|
|
1520
|
-
help="Add part dependency to skill")
|
|
1521
|
-
parts_require_parser.add_argument("part_id", help="Part ID to require")
|
|
1522
|
-
parts_require_parser.add_argument("-s", "--skill", required=True, help="Skill ID")
|
|
1523
|
-
parts_require_parser.add_argument("-v", "--version", default="1.0.0",
|
|
1524
|
-
help="Minimum version (default: 1.0.0)")
|
|
1525
|
-
parts_require_parser.add_argument("--required", action="store_true",
|
|
1526
|
-
help="Mark as required (not optional)")
|
|
1519
|
+
# Discovery info
|
|
1520
|
+
discovery = proto.get('discovery', {})
|
|
1521
|
+
if discovery.get('notes'):
|
|
1522
|
+
print(f"\nDiscovery Notes:")
|
|
1523
|
+
print(f" {discovery.get('notes')}")
|
|
1527
1524
|
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1525
|
+
# Verification status
|
|
1526
|
+
verification = proto.get('verification', {})
|
|
1527
|
+
print(f"\nVerification:")
|
|
1528
|
+
if verification.get('verified'):
|
|
1529
|
+
print(f" Status: ✓ Verified")
|
|
1530
|
+
if verification.get('notes'):
|
|
1531
|
+
print(f" Notes: {verification.get('notes')}")
|
|
1532
|
+
else:
|
|
1533
|
+
print(f" Status: Unverified")
|
|
1536
1534
|
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
generate_parser.add_argument("-r", "--robot", default="ur5",
|
|
1543
|
-
help="Target robot model (default: ur5)")
|
|
1544
|
-
generate_parser.add_argument("-o", "--output", default="./new-skill",
|
|
1545
|
-
help="Output directory (default: ./new-skill)")
|
|
1535
|
+
# Community feedback
|
|
1536
|
+
print(f"\nCommunity:")
|
|
1537
|
+
upvotes = proto.get('upvotes', 0)
|
|
1538
|
+
downvotes = proto.get('downvotes', 0)
|
|
1539
|
+
print(f" Score: {proto.get('score', 0)} (+{upvotes}/-{downvotes})")
|
|
1546
1540
|
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
workflow_run_parser = workflow_subparsers.add_parser("run", help="Run a workflow")
|
|
1558
|
-
workflow_run_parser.add_argument("path", help="Path to workflow YAML file")
|
|
1559
|
-
workflow_run_parser.add_argument("--sim", action="store_true",
|
|
1560
|
-
help="Run in simulation mode")
|
|
1561
|
-
workflow_run_parser.add_argument("--dry-run", action="store_true",
|
|
1562
|
-
help="Show execution plan without running")
|
|
1563
|
-
|
|
1564
|
-
# workflow export
|
|
1565
|
-
workflow_export_parser = workflow_subparsers.add_parser("export",
|
|
1566
|
-
help="Export workflow to other formats")
|
|
1567
|
-
workflow_export_parser.add_argument("path", help="Path to workflow YAML file")
|
|
1568
|
-
workflow_export_parser.add_argument("-f", "--format", default="ros2",
|
|
1569
|
-
choices=["ros2", "json"],
|
|
1570
|
-
help="Export format (default: ros2)")
|
|
1571
|
-
workflow_export_parser.add_argument("-o", "--output", help="Output file path")
|
|
1541
|
+
# Associated primitives
|
|
1542
|
+
primitives = proto.get('primitiveSkills', [])
|
|
1543
|
+
if primitives:
|
|
1544
|
+
print(f"\nPrimitive Skills ({len(primitives)}):")
|
|
1545
|
+
for prim in primitives:
|
|
1546
|
+
status = prim.get('status', 'experimental')
|
|
1547
|
+
status_icon = "✓" if status in ['tested', 'verified'] else "○"
|
|
1548
|
+
reliability = prim.get('reliabilityScore')
|
|
1549
|
+
reliability_str = f" [{reliability:.0%}]" if reliability else ""
|
|
1550
|
+
print(f" {status_icon} {prim.get('name')} ({prim.get('category', '?')}){reliability_str}")
|
|
1572
1551
|
|
|
1573
|
-
|
|
1574
|
-
team_parser = subparsers.add_parser("team", help="Team collaboration management")
|
|
1575
|
-
team_subparsers = team_parser.add_subparsers(dest="team_action", help="Team action")
|
|
1576
|
-
|
|
1577
|
-
# team create
|
|
1578
|
-
team_create_parser = team_subparsers.add_parser("create", help="Create a new team")
|
|
1579
|
-
team_create_parser.add_argument("name", help="Team name")
|
|
1580
|
-
team_create_parser.add_argument("-d", "--description", help="Team description")
|
|
1581
|
-
|
|
1582
|
-
# team invite
|
|
1583
|
-
team_invite_parser = team_subparsers.add_parser("invite", help="Invite user to team")
|
|
1584
|
-
team_invite_parser.add_argument("email", help="Email of user to invite")
|
|
1585
|
-
team_invite_parser.add_argument("-t", "--team", required=True, help="Team slug")
|
|
1586
|
-
team_invite_parser.add_argument("-r", "--role", default="member",
|
|
1587
|
-
choices=["owner", "admin", "member", "viewer"],
|
|
1588
|
-
help="Role to assign (default: member)")
|
|
1589
|
-
|
|
1590
|
-
# team list
|
|
1591
|
-
team_subparsers.add_parser("list", help="List teams you belong to")
|
|
1592
|
-
|
|
1593
|
-
# team share (skill share with team)
|
|
1594
|
-
team_share_parser = team_subparsers.add_parser("share", help="Share skill with team")
|
|
1595
|
-
team_share_parser.add_argument("skill_id", help="Skill ID to share")
|
|
1596
|
-
team_share_parser.add_argument("-t", "--team", required=True, help="Team slug")
|
|
1552
|
+
print(f"\nView online: https://kindly.fyi/foodforthought/protocols/{proto.get('id')}")
|
|
1597
1553
|
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
# data upload
|
|
1603
|
-
data_upload_parser = data_subparsers.add_parser("upload", help="Upload sensor data")
|
|
1604
|
-
data_upload_parser.add_argument("path", help="Path to data directory or file")
|
|
1605
|
-
data_upload_parser.add_argument("-s", "--skill", required=True, help="Associated skill ID")
|
|
1606
|
-
data_upload_parser.add_argument("--stage", default="raw",
|
|
1607
|
-
choices=["raw", "annotated", "skill-abstracted", "production"],
|
|
1608
|
-
help="Data stage (default: raw)")
|
|
1609
|
-
|
|
1610
|
-
# data list
|
|
1611
|
-
data_list_parser = data_subparsers.add_parser("list", help="List datasets")
|
|
1612
|
-
data_list_parser.add_argument("-s", "--skill", help="Filter by skill ID")
|
|
1613
|
-
data_list_parser.add_argument("--stage", help="Filter by stage")
|
|
1614
|
-
|
|
1615
|
-
# data promote
|
|
1616
|
-
data_promote_parser = data_subparsers.add_parser("promote", help="Promote dataset stage")
|
|
1617
|
-
data_promote_parser.add_argument("dataset_id", help="Dataset ID")
|
|
1618
|
-
data_promote_parser.add_argument("--to", required=True, dest="to_stage",
|
|
1619
|
-
choices=["annotated", "skill-abstracted", "production"],
|
|
1620
|
-
help="Target stage")
|
|
1621
|
-
|
|
1622
|
-
# data export
|
|
1623
|
-
data_export_parser = data_subparsers.add_parser("export", help="Export dataset")
|
|
1624
|
-
data_export_parser.add_argument("dataset_id", help="Dataset ID")
|
|
1625
|
-
data_export_parser.add_argument("-f", "--format", default="rlds",
|
|
1626
|
-
choices=["json", "rlds", "lerobot", "hdf5"],
|
|
1627
|
-
help="Export format (default: rlds)")
|
|
1628
|
-
data_export_parser.add_argument("-o", "--output", default="./export",
|
|
1629
|
-
help="Output directory")
|
|
1554
|
+
except Exception as e:
|
|
1555
|
+
print(f"Error fetching protocol: {e}", file=sys.stderr)
|
|
1556
|
+
sys.exit(1)
|
|
1630
1557
|
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
deploy_config_parser = deploy_subparsers.add_parser("config",
|
|
1636
|
-
help="Deploy using config file")
|
|
1637
|
-
deploy_config_parser.add_argument("config_path", help="Path to deploy.yaml")
|
|
1638
|
-
deploy_config_parser.add_argument("-t", "--target", required=True,
|
|
1639
|
-
help="Target fleet or robot")
|
|
1640
|
-
deploy_config_parser.add_argument("--dry-run", action="store_true",
|
|
1641
|
-
help="Show plan without deploying")
|
|
1642
|
-
|
|
1643
|
-
# deploy status
|
|
1644
|
-
deploy_status_parser = deploy_subparsers.add_parser("status",
|
|
1645
|
-
help="Check deployment status")
|
|
1646
|
-
deploy_status_parser.add_argument("target", help="Target fleet or robot")
|
|
1558
|
+
def protocol_init(self, robot_model: str, transport_type: str, output_dir: str) -> None:
|
|
1559
|
+
"""Initialize a new protocol definition locally"""
|
|
1560
|
+
output_path = Path(output_dir)
|
|
1561
|
+
output_path.mkdir(parents=True, exist_ok=True)
|
|
1647
1562
|
|
|
1648
|
-
|
|
1563
|
+
# Create protocol template
|
|
1564
|
+
protocol_template = {
|
|
1565
|
+
"robotModel": robot_model,
|
|
1566
|
+
"transportType": transport_type,
|
|
1567
|
+
"commandFormat": "json", # Default, user should change
|
|
1568
|
+
"version": "1.0.0",
|
|
1569
|
+
}
|
|
1649
1570
|
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
|
|
1571
|
+
# Add transport-specific fields
|
|
1572
|
+
if transport_type == "ble":
|
|
1573
|
+
protocol_template.update({
|
|
1574
|
+
"bleServiceUuids": [{"uuid": "FFE0", "name": "Custom Service"}],
|
|
1575
|
+
"bleCharacteristics": [
|
|
1576
|
+
{"uuid": "FFE1", "name": "TX", "properties": ["write"]},
|
|
1577
|
+
{"uuid": "FFE2", "name": "RX", "properties": ["notify"]}
|
|
1578
|
+
],
|
|
1579
|
+
"bleAdvertisedName": f"{robot_model}-*",
|
|
1580
|
+
"bleMtu": 512,
|
|
1581
|
+
})
|
|
1582
|
+
elif transport_type == "serial":
|
|
1583
|
+
protocol_template.update({
|
|
1584
|
+
"serialBaudRate": 115200,
|
|
1585
|
+
"serialDataBits": 8,
|
|
1586
|
+
"serialStopBits": 1,
|
|
1587
|
+
"serialParity": "none",
|
|
1588
|
+
"serialFlowControl": "none",
|
|
1589
|
+
})
|
|
1590
|
+
elif transport_type == "wifi":
|
|
1591
|
+
protocol_template.update({
|
|
1592
|
+
"wifiProtocol": "tcp",
|
|
1593
|
+
"wifiPort": 8080,
|
|
1594
|
+
"wifiHost": "192.168.1.1",
|
|
1595
|
+
})
|
|
1596
|
+
elif transport_type == "can":
|
|
1597
|
+
protocol_template.update({
|
|
1598
|
+
"canBitrate": 500000,
|
|
1599
|
+
"canInterface": "can0",
|
|
1600
|
+
})
|
|
1601
|
+
elif transport_type == "i2c":
|
|
1602
|
+
protocol_template.update({
|
|
1603
|
+
"i2cAddress": 0x50,
|
|
1604
|
+
})
|
|
1605
|
+
elif transport_type == "spi":
|
|
1606
|
+
protocol_template.update({
|
|
1607
|
+
"spiMode": 0,
|
|
1608
|
+
"spiSpeedHz": 1000000,
|
|
1609
|
+
})
|
|
1610
|
+
|
|
1611
|
+
# Add command schema template
|
|
1612
|
+
protocol_template["commandSchema"] = {
|
|
1613
|
+
"commands": [
|
|
1614
|
+
{
|
|
1615
|
+
"name": "example_command",
|
|
1616
|
+
"description": "Example command - replace with actual commands",
|
|
1617
|
+
"parameters": [
|
|
1618
|
+
{"name": "param1", "type": "int", "min": 0, "max": 100}
|
|
1619
|
+
],
|
|
1620
|
+
"response": {"type": "json", "schema": {}}
|
|
1621
|
+
}
|
|
1622
|
+
]
|
|
1623
|
+
}
|
|
1624
|
+
|
|
1625
|
+
protocol_template["discoveryNotes"] = f"Protocol for {robot_model} via {transport_type}. Add your discovery notes here."
|
|
1626
|
+
|
|
1627
|
+
# Add primitive skills template
|
|
1628
|
+
protocol_template["primitiveSkills"] = [
|
|
1629
|
+
{
|
|
1630
|
+
"name": "example_motion",
|
|
1631
|
+
"displayName": "Example Motion",
|
|
1632
|
+
"category": "motion",
|
|
1633
|
+
"description": "Example primitive - replace with actual skills",
|
|
1634
|
+
"commandType": "json",
|
|
1635
|
+
"commandTemplate": '{"cmd": "example", "param": ${value}}',
|
|
1636
|
+
"parameters": {
|
|
1637
|
+
"value": {"type": "number", "min": 0, "max": 100, "default": 50}
|
|
1638
|
+
},
|
|
1639
|
+
"executionTimeMs": 500,
|
|
1640
|
+
"settleTimeMs": 100,
|
|
1641
|
+
"safetyNotes": "Ensure robot is in safe position before executing"
|
|
1642
|
+
}
|
|
1643
|
+
]
|
|
1644
|
+
|
|
1645
|
+
# Write protocol file
|
|
1646
|
+
protocol_file = output_path / "protocol.json"
|
|
1647
|
+
with open(protocol_file, "w") as f:
|
|
1648
|
+
json.dump(protocol_template, f, indent=2)
|
|
1649
|
+
|
|
1650
|
+
# Create README
|
|
1651
|
+
readme_content = f"""# {robot_model} Protocol ({transport_type})
|
|
1652
|
+
|
|
1653
|
+
## Overview
|
|
1654
|
+
This protocol defines how to communicate with the {robot_model} robot via {transport_type}.
|
|
1655
|
+
|
|
1656
|
+
## Discovery Notes
|
|
1657
|
+
Document how you discovered this protocol:
|
|
1658
|
+
- Tools used (e.g., nRF Connect, logic analyzer, Wireshark)
|
|
1659
|
+
- Reverse engineering steps
|
|
1660
|
+
- Testing methodology
|
|
1661
|
+
|
|
1662
|
+
## Usage
|
|
1663
|
+
1. Edit `protocol.json` with the correct values
|
|
1664
|
+
2. Document the command schema
|
|
1665
|
+
3. Run `ate protocol push` to upload to FoodForThought
|
|
1666
|
+
|
|
1667
|
+
## Commands
|
|
1668
|
+
Document available commands here.
|
|
1669
|
+
|
|
1670
|
+
## Testing
|
|
1671
|
+
1. Connect to the robot
|
|
1672
|
+
2. Test each command
|
|
1673
|
+
3. Document results
|
|
1674
|
+
"""
|
|
1675
|
+
readme_file = output_path / "README.md"
|
|
1676
|
+
with open(readme_file, "w") as f:
|
|
1677
|
+
f.write(readme_content)
|
|
1678
|
+
|
|
1679
|
+
print(f"Protocol template created in '{output_dir}'")
|
|
1680
|
+
print(f"\nFiles created:")
|
|
1681
|
+
print(f" - {protocol_file}")
|
|
1682
|
+
print(f" - {readme_file}")
|
|
1683
|
+
print(f"\nNext steps:")
|
|
1684
|
+
print(f" 1. Edit protocol.json with correct values")
|
|
1685
|
+
print(f" 2. Test the protocol with your robot")
|
|
1686
|
+
print(f" 3. Run 'ate protocol push' to upload")
|
|
1687
|
+
|
|
1688
|
+
def protocol_push(self, protocol_file: Optional[str]) -> None:
|
|
1689
|
+
"""Upload a protocol definition to FoodForThought"""
|
|
1690
|
+
# Find protocol file
|
|
1691
|
+
if protocol_file:
|
|
1692
|
+
proto_path = Path(protocol_file)
|
|
1693
|
+
else:
|
|
1694
|
+
proto_path = Path("protocol.json")
|
|
1695
|
+
if not proto_path.exists():
|
|
1696
|
+
proto_path = Path(".") / "protocol.json"
|
|
1697
|
+
|
|
1698
|
+
if not proto_path.exists():
|
|
1699
|
+
print("Error: No protocol.json found. Specify path or run from protocol directory.",
|
|
1700
|
+
file=sys.stderr)
|
|
1701
|
+
sys.exit(1)
|
|
1702
|
+
|
|
1703
|
+
with open(proto_path) as f:
|
|
1704
|
+
protocol_data = json.load(f)
|
|
1705
|
+
|
|
1706
|
+
# Validate required fields
|
|
1707
|
+
required = ["robotModel", "transportType", "commandFormat"]
|
|
1708
|
+
missing = [r for r in required if r not in protocol_data]
|
|
1709
|
+
if missing:
|
|
1710
|
+
print(f"Error: Missing required fields: {', '.join(missing)}", file=sys.stderr)
|
|
1711
|
+
sys.exit(1)
|
|
1712
|
+
|
|
1713
|
+
# Look up robot ID by model name using unified API
|
|
1714
|
+
robot_model = protocol_data.pop("robotModel")
|
|
1715
|
+
print(f"Looking up robot: {robot_model}...")
|
|
1716
|
+
try:
|
|
1717
|
+
robots_response = self._request("GET", "/robots/unified",
|
|
1718
|
+
params={"search": robot_model, "limit": "1"})
|
|
1719
|
+
robots = robots_response.get("robots", [])
|
|
1720
|
+
if not robots:
|
|
1721
|
+
print(f"Error: Robot model '{robot_model}' not found in registry.", file=sys.stderr)
|
|
1722
|
+
print("Check available robots at https://kindly.fyi/foodforthought/robots")
|
|
1723
|
+
sys.exit(1)
|
|
1724
|
+
|
|
1725
|
+
protocol_data["robotId"] = robots[0]["id"]
|
|
1726
|
+
robot_name = robots[0].get("name", robot_model)
|
|
1727
|
+
print(f" Found: {robot_name} (ID: {robots[0]['id']})")
|
|
1728
|
+
except Exception as e:
|
|
1729
|
+
print(f"Error looking up robot: {e}", file=sys.stderr)
|
|
1730
|
+
sys.exit(1)
|
|
1731
|
+
|
|
1732
|
+
# Extract primitiveSkills if present (they should be created with the protocol)
|
|
1733
|
+
primitives = protocol_data.get("primitiveSkills", [])
|
|
1734
|
+
primitive_count = len(primitives)
|
|
1735
|
+
if primitive_count > 0:
|
|
1736
|
+
print(f" Including {primitive_count} primitive skill(s)")
|
|
1737
|
+
|
|
1738
|
+
# Submit to API
|
|
1739
|
+
print("Uploading protocol...")
|
|
1740
|
+
try:
|
|
1741
|
+
response = self._request("POST", "/protocols", json=protocol_data)
|
|
1742
|
+
proto = response.get("protocol", {})
|
|
1743
|
+
created_primitives = response.get("primitiveSkills", [])
|
|
1744
|
+
|
|
1745
|
+
print(f"\n✓ Protocol published successfully!")
|
|
1746
|
+
print(f" ID: {proto.get('id')}")
|
|
1747
|
+
print(f" Robot: {proto.get('robot', {}).get('name', robot_name)}")
|
|
1748
|
+
print(f" Transport: {proto.get('transportType')}")
|
|
1749
|
+
|
|
1750
|
+
if created_primitives:
|
|
1751
|
+
print(f"\n Primitive Skills Created ({len(created_primitives)}):")
|
|
1752
|
+
for prim in created_primitives:
|
|
1753
|
+
print(f" - {prim.get('name')}")
|
|
1754
|
+
|
|
1755
|
+
print(f"\nView at: https://kindly.fyi/foodforthought/protocols/{proto.get('id')}")
|
|
1756
|
+
print("\nNext steps:")
|
|
1757
|
+
print(" - Ask the community to test and verify your protocol")
|
|
1758
|
+
print(" - Add more primitive skills with 'ate primitive add'")
|
|
1759
|
+
print(" - Submit test results with 'ate primitive test <id>'")
|
|
1760
|
+
except Exception as e:
|
|
1761
|
+
print(f"Error uploading protocol: {e}", file=sys.stderr)
|
|
1762
|
+
sys.exit(1)
|
|
1763
|
+
|
|
1764
|
+
def publish_protocol(self, protocol_file: Optional[str]) -> None:
|
|
1765
|
+
"""Alias for protocol_push - publish a protocol to FoodForThought"""
|
|
1766
|
+
self.protocol_push(protocol_file)
|
|
1767
|
+
|
|
1768
|
+
def protocol_scan_serial(self) -> None:
|
|
1769
|
+
"""Scan for available serial ports"""
|
|
1770
|
+
try:
|
|
1771
|
+
import serial.tools.list_ports
|
|
1772
|
+
except ImportError:
|
|
1773
|
+
print("Error: pyserial not installed. Run: pip install pyserial", file=sys.stderr)
|
|
1774
|
+
sys.exit(1)
|
|
1775
|
+
|
|
1776
|
+
ports = serial.tools.list_ports.comports()
|
|
1777
|
+
|
|
1778
|
+
print(f"\n{'=' * 60}")
|
|
1779
|
+
print("Available Serial Ports")
|
|
1780
|
+
print(f"{'=' * 60}")
|
|
1781
|
+
|
|
1782
|
+
if not ports:
|
|
1783
|
+
print("\nNo serial ports found.")
|
|
1784
|
+
return
|
|
1785
|
+
|
|
1786
|
+
for port in ports:
|
|
1787
|
+
print(f"\n{port.device}")
|
|
1788
|
+
print(f" Description: {port.description}")
|
|
1789
|
+
if port.manufacturer:
|
|
1790
|
+
print(f" Manufacturer: {port.manufacturer}")
|
|
1791
|
+
if port.product:
|
|
1792
|
+
print(f" Product: {port.product}")
|
|
1793
|
+
if port.serial_number:
|
|
1794
|
+
print(f" Serial: {port.serial_number}")
|
|
1795
|
+
print(f" Hardware ID: {port.hwid}")
|
|
1796
|
+
|
|
1797
|
+
def protocol_scan_ble(self) -> None:
|
|
1798
|
+
"""Scan for BLE devices (requires bleak)"""
|
|
1799
|
+
try:
|
|
1800
|
+
import asyncio
|
|
1801
|
+
from bleak import BleakScanner
|
|
1802
|
+
except ImportError:
|
|
1803
|
+
print("Error: bleak not installed. Run: pip install bleak", file=sys.stderr)
|
|
1804
|
+
sys.exit(1)
|
|
1805
|
+
|
|
1806
|
+
async def scan():
|
|
1807
|
+
print(f"\n{'=' * 60}")
|
|
1808
|
+
print("Scanning for BLE devices (10 seconds)...")
|
|
1809
|
+
print(f"{'=' * 60}")
|
|
1810
|
+
|
|
1811
|
+
devices = await BleakScanner.discover(timeout=10.0)
|
|
1812
|
+
|
|
1813
|
+
if not devices:
|
|
1814
|
+
print("\nNo BLE devices found.")
|
|
1815
|
+
return
|
|
1816
|
+
|
|
1817
|
+
print(f"\nFound {len(devices)} devices:\n")
|
|
1818
|
+
|
|
1819
|
+
for device in sorted(devices, key=lambda d: d.rssi, reverse=True):
|
|
1820
|
+
print(f"{device.name or 'Unknown'}")
|
|
1821
|
+
print(f" Address: {device.address}")
|
|
1822
|
+
print(f" RSSI: {device.rssi} dBm")
|
|
1823
|
+
if device.metadata.get("uuids"):
|
|
1824
|
+
print(f" Service UUIDs: {device.metadata.get('uuids')}")
|
|
1825
|
+
print()
|
|
1826
|
+
|
|
1827
|
+
asyncio.run(scan())
|
|
1828
|
+
|
|
1829
|
+
# =========================================================================
|
|
1830
|
+
# Primitive Skills Methods
|
|
1831
|
+
# =========================================================================
|
|
1832
|
+
|
|
1833
|
+
def primitive_list(self, robot_model: Optional[str], category: Optional[str],
|
|
1834
|
+
status: Optional[str], tested_only: bool) -> None:
|
|
1835
|
+
"""List primitive skills"""
|
|
1836
|
+
params = {}
|
|
1837
|
+
if robot_model:
|
|
1838
|
+
params["robotModel"] = robot_model
|
|
1839
|
+
if category:
|
|
1840
|
+
params["category"] = category
|
|
1841
|
+
if status:
|
|
1842
|
+
params["status"] = status
|
|
1843
|
+
if tested_only:
|
|
1844
|
+
params["testedOnly"] = "true"
|
|
1845
|
+
|
|
1846
|
+
try:
|
|
1847
|
+
response = self._request("GET", "/primitives", params=params)
|
|
1848
|
+
primitives = response.get("primitives", [])
|
|
1849
|
+
pagination = response.get("pagination", {})
|
|
1850
|
+
|
|
1851
|
+
print(f"\n{'=' * 70}")
|
|
1852
|
+
print(f"Primitive Skills ({pagination.get('total', len(primitives))} total)")
|
|
1853
|
+
print(f"{'=' * 70}")
|
|
1854
|
+
|
|
1855
|
+
if not primitives:
|
|
1856
|
+
print("\nNo primitive skills found.")
|
|
1857
|
+
return
|
|
1858
|
+
|
|
1859
|
+
# Group by robot
|
|
1860
|
+
by_robot = {}
|
|
1861
|
+
for prim in primitives:
|
|
1862
|
+
robot = prim.get("robotProfile", {}).get("modelName", "Unknown")
|
|
1863
|
+
if robot not in by_robot:
|
|
1864
|
+
by_robot[robot] = []
|
|
1865
|
+
by_robot[robot].append(prim)
|
|
1866
|
+
|
|
1867
|
+
for robot, prims in by_robot.items():
|
|
1868
|
+
print(f"\n{robot}:")
|
|
1869
|
+
for prim in prims:
|
|
1870
|
+
status_icons = {"verified": "✓", "tested": "○", "experimental": "◌", "deprecated": "✗"}
|
|
1871
|
+
icon = status_icons.get(prim.get("status", "experimental"), "?")
|
|
1872
|
+
reliability = prim.get("reliabilityScore")
|
|
1873
|
+
rel_str = f" ({reliability:.0%})" if reliability else ""
|
|
1874
|
+
|
|
1875
|
+
print(f" {icon} {prim.get('name')} [{prim.get('category')}]{rel_str}")
|
|
1876
|
+
print(f" ID: {prim.get('id')}")
|
|
1877
|
+
|
|
1878
|
+
except Exception as e:
|
|
1879
|
+
print(f"Error listing primitives: {e}", file=sys.stderr)
|
|
1880
|
+
sys.exit(1)
|
|
1881
|
+
|
|
1882
|
+
def primitive_get(self, primitive_id: str) -> None:
|
|
1883
|
+
"""Get detailed information about a primitive skill"""
|
|
1884
|
+
try:
|
|
1885
|
+
response = self._request("GET", f"/primitives/{primitive_id}")
|
|
1886
|
+
prim = response.get("primitive", {})
|
|
1887
|
+
robot = prim.get("robotProfile", {})
|
|
1888
|
+
protocol = prim.get("protocol", {})
|
|
1889
|
+
|
|
1890
|
+
print(f"\n{'=' * 70}")
|
|
1891
|
+
print(f"Primitive: {prim.get('displayName') or prim.get('name')}")
|
|
1892
|
+
print(f"{'=' * 70}")
|
|
1893
|
+
|
|
1894
|
+
print(f"\nRobot: {robot.get('manufacturer', '?')} {robot.get('modelName', '?')}")
|
|
1895
|
+
print(f"Category: {prim.get('category', '?')}")
|
|
1896
|
+
print(f"Status: {prim.get('status', '?')}")
|
|
1897
|
+
|
|
1898
|
+
if prim.get('reliabilityScore'):
|
|
1899
|
+
print(f"Reliability: {prim.get('reliabilityScore'):.1%}")
|
|
1900
|
+
|
|
1901
|
+
if prim.get('description'):
|
|
1902
|
+
print(f"\nDescription: {prim.get('description')}")
|
|
1903
|
+
|
|
1904
|
+
print(f"\nCommand Type: {prim.get('commandType')}")
|
|
1905
|
+
print(f"Command Template:")
|
|
1906
|
+
print(f" {prim.get('commandTemplate')}")
|
|
1907
|
+
|
|
1908
|
+
# Parameters
|
|
1909
|
+
params = prim.get('parameters', [])
|
|
1910
|
+
if params:
|
|
1911
|
+
print(f"\nParameters:")
|
|
1912
|
+
for p in params:
|
|
1913
|
+
range_str = ""
|
|
1914
|
+
if p.get('min') is not None and p.get('max') is not None:
|
|
1915
|
+
range_str = f" (range: {p.get('min')}-{p.get('max')})"
|
|
1916
|
+
unit = f" {p.get('unit')}" if p.get('unit') else ""
|
|
1917
|
+
default = f", default: {p.get('default')}" if p.get('default') is not None else ""
|
|
1918
|
+
tested = f", tested safe: {p.get('testedSafe')}" if p.get('testedSafe') is not None else ""
|
|
1919
|
+
print(f" - {p.get('name')}: {p.get('type')}{range_str}{unit}{default}{tested}")
|
|
1920
|
+
|
|
1921
|
+
# Timing
|
|
1922
|
+
if any([prim.get('executionTimeMs'), prim.get('settleTimeMs'), prim.get('cooldownMs')]):
|
|
1923
|
+
print(f"\nTiming:")
|
|
1924
|
+
if prim.get('executionTimeMs'):
|
|
1925
|
+
print(f" Execution: {prim.get('executionTimeMs')}ms")
|
|
1926
|
+
if prim.get('settleTimeMs'):
|
|
1927
|
+
print(f" Settle: {prim.get('settleTimeMs')}ms")
|
|
1928
|
+
if prim.get('cooldownMs'):
|
|
1929
|
+
print(f" Cooldown: {prim.get('cooldownMs')}ms")
|
|
1930
|
+
|
|
1931
|
+
# Safety
|
|
1932
|
+
if prim.get('safetyNotes'):
|
|
1933
|
+
print(f"\nSafety Notes:")
|
|
1934
|
+
print(f" {prim.get('safetyNotes')}")
|
|
1935
|
+
|
|
1936
|
+
# Dependencies
|
|
1937
|
+
depends_on = prim.get('dependsOn', [])
|
|
1938
|
+
if depends_on:
|
|
1939
|
+
print(f"\nDepends On ({len(depends_on)}):")
|
|
1940
|
+
for dep in depends_on:
|
|
1941
|
+
req = dep.get('requiredSkill', {})
|
|
1942
|
+
print(f" - {req.get('name')} ({dep.get('dependencyType')})")
|
|
1943
|
+
|
|
1944
|
+
required_by = prim.get('requiredBy', [])
|
|
1945
|
+
if required_by:
|
|
1946
|
+
print(f"\nRequired By ({len(required_by)}):")
|
|
1947
|
+
for dep in required_by:
|
|
1948
|
+
dep_skill = dep.get('dependentSkill', {})
|
|
1949
|
+
print(f" - {dep_skill.get('name')}")
|
|
1950
|
+
|
|
1951
|
+
except Exception as e:
|
|
1952
|
+
print(f"Error fetching primitive: {e}", file=sys.stderr)
|
|
1953
|
+
sys.exit(1)
|
|
1954
|
+
|
|
1955
|
+
def primitive_test(self, primitive_id: str, params_json: str,
|
|
1956
|
+
result: str, notes: Optional[str], video_url: Optional[str]) -> None:
|
|
1957
|
+
"""Submit a test result for a primitive skill"""
|
|
1958
|
+
try:
|
|
1959
|
+
parameters = json.loads(params_json)
|
|
1960
|
+
except json.JSONDecodeError as e:
|
|
1961
|
+
print(f"Error: Invalid JSON for parameters: {e}", file=sys.stderr)
|
|
1962
|
+
sys.exit(1)
|
|
1963
|
+
|
|
1964
|
+
data = {
|
|
1965
|
+
"parameters": parameters,
|
|
1966
|
+
"result": result,
|
|
1967
|
+
}
|
|
1968
|
+
if notes:
|
|
1969
|
+
data["resultNotes"] = notes
|
|
1970
|
+
if video_url:
|
|
1971
|
+
data["videoUrl"] = video_url
|
|
1972
|
+
|
|
1973
|
+
try:
|
|
1974
|
+
response = self._request("POST", f"/primitives/{primitive_id}/test", json=data)
|
|
1975
|
+
test_result = response.get("testResult", {})
|
|
1976
|
+
update = response.get("primitiveUpdate", {})
|
|
1977
|
+
|
|
1978
|
+
print(f"\n✓ Test result submitted!")
|
|
1979
|
+
print(f" Result: {result}")
|
|
1980
|
+
print(f" New Reliability Score: {update.get('reliabilityScore', 0):.1%}")
|
|
1981
|
+
|
|
1982
|
+
if update.get('statusChanged'):
|
|
1983
|
+
print(f" Status upgraded to: {update.get('status')}")
|
|
1984
|
+
|
|
1985
|
+
except Exception as e:
|
|
1986
|
+
print(f"Error submitting test result: {e}", file=sys.stderr)
|
|
1987
|
+
sys.exit(1)
|
|
1988
|
+
|
|
1989
|
+
def primitive_deps_show(self, primitive_id: str) -> None:
|
|
1990
|
+
"""Show dependencies for a primitive skill"""
|
|
1991
|
+
try:
|
|
1992
|
+
response = self._request("GET", f"/primitives/{primitive_id}/dependencies")
|
|
1993
|
+
deps = response.get("dependencies", [])
|
|
1994
|
+
dependents = response.get("dependents", [])
|
|
1995
|
+
summary = response.get("summary", {})
|
|
1996
|
+
deployment_ready = response.get("deploymentReady", False)
|
|
1997
|
+
|
|
1998
|
+
print(f"\n{'=' * 60}")
|
|
1999
|
+
print("Dependency Graph")
|
|
2000
|
+
print(f"{'=' * 60}")
|
|
2001
|
+
|
|
2002
|
+
print(f"\nDeployment Ready: {'✓ Yes' if deployment_ready else '✗ No'}")
|
|
2003
|
+
|
|
2004
|
+
if deps:
|
|
2005
|
+
print(f"\nDepends On ({len(deps)}):")
|
|
2006
|
+
for dep in deps:
|
|
2007
|
+
req = dep.get("requiredSkill", {})
|
|
2008
|
+
status_ok = req.get("status") in ["tested", "verified"]
|
|
2009
|
+
icon = "✓" if status_ok else "✗"
|
|
2010
|
+
print(f" {icon} {req.get('name')} ({req.get('status')})")
|
|
2011
|
+
print(f" Required status: {dep.get('requiredMinStatus')}")
|
|
2012
|
+
if req.get("reliabilityScore"):
|
|
2013
|
+
print(f" Reliability: {req.get('reliabilityScore'):.1%}")
|
|
2014
|
+
else:
|
|
2015
|
+
print(f"\nNo dependencies (this is a root primitive)")
|
|
2016
|
+
|
|
2017
|
+
if dependents:
|
|
2018
|
+
print(f"\nRequired By ({len(dependents)}):")
|
|
2019
|
+
for dep in dependents:
|
|
2020
|
+
skill = dep.get("dependentSkill", {})
|
|
2021
|
+
print(f" - {skill.get('name')}")
|
|
2022
|
+
|
|
2023
|
+
if summary.get('blockedDependencies', 0) > 0:
|
|
2024
|
+
print(f"\n⚠ {summary.get('blockedDependencies')} dependencies need testing before deployment")
|
|
2025
|
+
|
|
2026
|
+
except Exception as e:
|
|
2027
|
+
print(f"Error fetching dependencies: {e}", file=sys.stderr)
|
|
2028
|
+
sys.exit(1)
|
|
2029
|
+
|
|
2030
|
+
def primitive_deps_add(self, primitive_id: str, required_skill_id: str,
|
|
2031
|
+
dependency_type: str, min_status: str) -> None:
|
|
2032
|
+
"""Add a dependency to a primitive skill"""
|
|
2033
|
+
data = {
|
|
2034
|
+
"requiredSkillId": required_skill_id,
|
|
2035
|
+
"dependencyType": dependency_type,
|
|
2036
|
+
"requiredMinStatus": min_status,
|
|
2037
|
+
}
|
|
2038
|
+
|
|
2039
|
+
try:
|
|
2040
|
+
response = self._request("POST", f"/primitives/{primitive_id}/dependencies", json=data)
|
|
2041
|
+
dep = response.get("dependency", {})
|
|
2042
|
+
cross_robot = response.get("crossRobot", False)
|
|
2043
|
+
|
|
2044
|
+
print(f"\n✓ Dependency added!")
|
|
2045
|
+
if cross_robot:
|
|
2046
|
+
print(f" ⚠ Note: This is a cross-robot dependency")
|
|
2047
|
+
|
|
2048
|
+
except Exception as e:
|
|
2049
|
+
print(f"Error adding dependency: {e}", file=sys.stderr)
|
|
2050
|
+
sys.exit(1)
|
|
2051
|
+
|
|
2052
|
+
def primitive_init(self, name: str, protocol_id: Optional[str] = None,
|
|
2053
|
+
from_recording: Optional[str] = None, category: str = "motion",
|
|
2054
|
+
output_dir: str = ".") -> None:
|
|
2055
|
+
"""Initialize a new primitive skill definition locally"""
|
|
2056
|
+
print(f"\n{'=' * 60}")
|
|
2057
|
+
print("Initializing Primitive Skill")
|
|
2058
|
+
print(f"{'=' * 60}\n")
|
|
2059
|
+
|
|
2060
|
+
# Create output directory
|
|
2061
|
+
out_path = Path(output_dir)
|
|
2062
|
+
out_path.mkdir(parents=True, exist_ok=True)
|
|
2063
|
+
|
|
2064
|
+
primitive_data = {
|
|
2065
|
+
"name": name,
|
|
2066
|
+
"displayName": name.replace("_", " ").title(),
|
|
2067
|
+
"category": category,
|
|
2068
|
+
"description": "",
|
|
2069
|
+
"protocolId": protocol_id,
|
|
2070
|
+
"commandType": "single",
|
|
2071
|
+
"commandTemplate": "",
|
|
2072
|
+
"responsePattern": "",
|
|
2073
|
+
"parameters": [],
|
|
2074
|
+
"executionTimeMs": None,
|
|
2075
|
+
"settleTimeMs": None,
|
|
2076
|
+
"cooldownMs": None,
|
|
2077
|
+
"safetyNotes": "",
|
|
2078
|
+
"status": "experimental",
|
|
2079
|
+
"version": "1.0.0",
|
|
2080
|
+
}
|
|
2081
|
+
|
|
2082
|
+
# If importing from a recording, populate command data
|
|
2083
|
+
if from_recording:
|
|
2084
|
+
recording_path = Path(from_recording)
|
|
2085
|
+
if recording_path.exists():
|
|
2086
|
+
with open(recording_path) as f:
|
|
2087
|
+
recording = json.load(f)
|
|
2088
|
+
|
|
2089
|
+
commands = recording.get("commands", [])
|
|
2090
|
+
if commands:
|
|
2091
|
+
# Use first command as template
|
|
2092
|
+
primitive_data["commandTemplate"] = commands[0].get("command", "")
|
|
2093
|
+
if len(commands) > 1:
|
|
2094
|
+
primitive_data["commandType"] = "sequence"
|
|
2095
|
+
primitive_data["commandSequence"] = [
|
|
2096
|
+
{"command": c.get("command"), "delayMs": int((c.get("timestamp", 0) * 1000))}
|
|
2097
|
+
for c in commands
|
|
2098
|
+
]
|
|
2099
|
+
|
|
2100
|
+
# Extract responses for pattern
|
|
2101
|
+
responses = [c.get("response") for c in commands if c.get("response")]
|
|
2102
|
+
if responses:
|
|
2103
|
+
primitive_data["responsePattern"] = responses[0]
|
|
2104
|
+
|
|
2105
|
+
print(f"✓ Imported {len(commands)} commands from recording")
|
|
2106
|
+
else:
|
|
2107
|
+
print(f"Warning: Recording file not found: {from_recording}", file=sys.stderr)
|
|
2108
|
+
|
|
2109
|
+
# Write primitive file
|
|
2110
|
+
primitive_file = out_path / f"{name}.primitive.json"
|
|
2111
|
+
with open(primitive_file, "w") as f:
|
|
2112
|
+
json.dump(primitive_data, f, indent=2)
|
|
2113
|
+
|
|
2114
|
+
print(f"✓ Created: {primitive_file}")
|
|
2115
|
+
print(f"\nNext steps:")
|
|
2116
|
+
print(f" 1. Edit {primitive_file} to define command template and parameters")
|
|
2117
|
+
print(f" 2. Test with: ate primitive test <primitive_id> --params '{{...}}'")
|
|
2118
|
+
print(f" 3. Publish with: ate primitive push {primitive_file}")
|
|
2119
|
+
|
|
2120
|
+
def primitive_push(self, primitive_file: str) -> None:
|
|
2121
|
+
"""Push a primitive skill definition to FoodforThought"""
|
|
2122
|
+
file_path = Path(primitive_file)
|
|
2123
|
+
if not file_path.exists():
|
|
2124
|
+
print(f"Error: Primitive file not found: {primitive_file}", file=sys.stderr)
|
|
2125
|
+
sys.exit(1)
|
|
2126
|
+
|
|
2127
|
+
print(f"\n{'=' * 60}")
|
|
2128
|
+
print("Publishing Primitive Skill")
|
|
2129
|
+
print(f"{'=' * 60}\n")
|
|
2130
|
+
|
|
2131
|
+
with open(file_path) as f:
|
|
2132
|
+
primitive_data = json.load(f)
|
|
2133
|
+
|
|
2134
|
+
print(f"Name: {primitive_data.get('displayName') or primitive_data.get('name')}")
|
|
2135
|
+
print(f"Category: {primitive_data.get('category')}")
|
|
2136
|
+
|
|
2137
|
+
# Validate required fields
|
|
2138
|
+
if not primitive_data.get("name"):
|
|
2139
|
+
print("Error: Primitive must have a name", file=sys.stderr)
|
|
2140
|
+
sys.exit(1)
|
|
2141
|
+
|
|
2142
|
+
if not primitive_data.get("commandTemplate") and not primitive_data.get("commandSequence"):
|
|
2143
|
+
print("Error: Primitive must have a commandTemplate or commandSequence", file=sys.stderr)
|
|
2144
|
+
sys.exit(1)
|
|
2145
|
+
|
|
2146
|
+
if not primitive_data.get("protocolId"):
|
|
2147
|
+
print("Warning: No protocol ID specified. The primitive won't be linked to a protocol.", file=sys.stderr)
|
|
2148
|
+
|
|
2149
|
+
try:
|
|
2150
|
+
response = self._request("POST", "/primitives", json=primitive_data)
|
|
2151
|
+
prim = response.get("primitive", {})
|
|
2152
|
+
print(f"\n✓ Primitive published successfully!")
|
|
2153
|
+
print(f" ID: {prim.get('id')}")
|
|
2154
|
+
print(f" Status: {prim.get('status')}")
|
|
2155
|
+
print(f"\nNext steps:")
|
|
2156
|
+
print(f" - Test it: ate primitive test {prim.get('id')} --params '{{...}}'")
|
|
2157
|
+
print(f" - View it: ate primitive get {prim.get('id')}")
|
|
2158
|
+
except Exception as e:
|
|
2159
|
+
print(f"Error publishing primitive: {e}", file=sys.stderr)
|
|
2160
|
+
sys.exit(1)
|
|
2161
|
+
|
|
2162
|
+
# =========================================================================
|
|
2163
|
+
# Skill Abstractions (Layer 2) - Composed from Primitives
|
|
2164
|
+
# =========================================================================
|
|
2165
|
+
|
|
2166
|
+
def skill_init(self, name: str, robot_model: Optional[str] = None,
|
|
2167
|
+
template: str = "basic", output_dir: str = ".") -> None:
|
|
2168
|
+
"""Initialize a new skill abstraction (composes primitives)"""
|
|
2169
|
+
print(f"\n{'=' * 60}")
|
|
2170
|
+
print("Initializing Skill Abstraction")
|
|
2171
|
+
print(f"{'=' * 60}\n")
|
|
2172
|
+
|
|
2173
|
+
out_path = Path(output_dir)
|
|
2174
|
+
out_path.mkdir(parents=True, exist_ok=True)
|
|
2175
|
+
|
|
2176
|
+
skill_data = {
|
|
2177
|
+
"name": name,
|
|
2178
|
+
"displayName": name.replace("_", " ").title(),
|
|
2179
|
+
"description": "",
|
|
2180
|
+
"robotModel": robot_model,
|
|
2181
|
+
"version": "1.0.0",
|
|
2182
|
+
"status": "experimental",
|
|
2183
|
+
"primitives": [], # List of primitive IDs this skill uses
|
|
2184
|
+
"sequence": [], # Ordered execution sequence
|
|
2185
|
+
"parameters": [], # Skill-level parameters
|
|
2186
|
+
"preconditions": [],
|
|
2187
|
+
"postconditions": [],
|
|
2188
|
+
"errorHandling": {
|
|
2189
|
+
"retryCount": 3,
|
|
2190
|
+
"retryDelayMs": 1000,
|
|
2191
|
+
"fallbackAction": None,
|
|
2192
|
+
},
|
|
2193
|
+
"metadata": {
|
|
2194
|
+
"author": "",
|
|
2195
|
+
"license": "MIT",
|
|
2196
|
+
"tags": [],
|
|
2197
|
+
}
|
|
2198
|
+
}
|
|
2199
|
+
|
|
2200
|
+
# Write skill file
|
|
2201
|
+
skill_file = out_path / f"{name}.skill.json"
|
|
2202
|
+
with open(skill_file, "w") as f:
|
|
2203
|
+
json.dump(skill_data, f, indent=2)
|
|
2204
|
+
|
|
2205
|
+
print(f"✓ Created: {skill_file}")
|
|
2206
|
+
print(f"\nNext steps:")
|
|
2207
|
+
print(f" 1. Add primitives: ate skill compose {skill_file} <primitive_ids...>")
|
|
2208
|
+
print(f" 2. Edit {skill_file} to define sequence and parameters")
|
|
2209
|
+
print(f" 3. Test with: ate skill test {skill_file}")
|
|
2210
|
+
print(f" 4. Publish with: ate skill push {skill_file}")
|
|
2211
|
+
|
|
2212
|
+
def skill_compose(self, skill_file: str, primitive_ids: List[str]) -> None:
|
|
2213
|
+
"""Add primitives to a skill's composition"""
|
|
2214
|
+
file_path = Path(skill_file)
|
|
2215
|
+
if not file_path.exists():
|
|
2216
|
+
print(f"Error: Skill file not found: {skill_file}", file=sys.stderr)
|
|
2217
|
+
sys.exit(1)
|
|
2218
|
+
|
|
2219
|
+
with open(file_path) as f:
|
|
2220
|
+
skill_data = json.load(f)
|
|
2221
|
+
|
|
2222
|
+
print(f"\n{'=' * 60}")
|
|
2223
|
+
print(f"Composing Skill: {skill_data.get('name')}")
|
|
2224
|
+
print(f"{'=' * 60}\n")
|
|
2225
|
+
|
|
2226
|
+
# Fetch primitive details to validate
|
|
2227
|
+
for prim_id in primitive_ids:
|
|
2228
|
+
try:
|
|
2229
|
+
response = self._request("GET", f"/primitives/{prim_id}")
|
|
2230
|
+
prim = response.get("primitive", {})
|
|
2231
|
+
print(f" ✓ Adding: {prim.get('name')} ({prim.get('category')})")
|
|
2232
|
+
|
|
2233
|
+
# Add to primitives list if not already present
|
|
2234
|
+
if prim_id not in skill_data.get("primitives", []):
|
|
2235
|
+
skill_data.setdefault("primitives", []).append(prim_id)
|
|
2236
|
+
|
|
2237
|
+
# Add to sequence
|
|
2238
|
+
skill_data.setdefault("sequence", []).append({
|
|
2239
|
+
"primitiveId": prim_id,
|
|
2240
|
+
"primitiveName": prim.get("name"),
|
|
2241
|
+
"parameterMapping": {}, # Map skill params to primitive params
|
|
2242
|
+
"conditionCheck": None,
|
|
2243
|
+
"onError": "abort",
|
|
2244
|
+
})
|
|
2245
|
+
|
|
2246
|
+
except Exception as e:
|
|
2247
|
+
print(f" ✗ Failed to fetch {prim_id}: {e}", file=sys.stderr)
|
|
2248
|
+
|
|
2249
|
+
# Write updated skill file
|
|
2250
|
+
with open(file_path, "w") as f:
|
|
2251
|
+
json.dump(skill_data, f, indent=2)
|
|
2252
|
+
|
|
2253
|
+
print(f"\n✓ Updated {skill_file}")
|
|
2254
|
+
print(f" Primitives: {len(skill_data.get('primitives', []))}")
|
|
2255
|
+
print(f" Sequence steps: {len(skill_data.get('sequence', []))}")
|
|
2256
|
+
|
|
2257
|
+
def skill_list(self, robot_model: Optional[str] = None,
|
|
2258
|
+
status: Optional[str] = None) -> None:
|
|
2259
|
+
"""List skill abstractions"""
|
|
2260
|
+
params = {}
|
|
2261
|
+
if robot_model:
|
|
2262
|
+
params["robotModel"] = robot_model
|
|
2263
|
+
if status:
|
|
2264
|
+
params["status"] = status
|
|
2265
|
+
|
|
2266
|
+
try:
|
|
2267
|
+
response = self._request("GET", "/skills", params=params)
|
|
2268
|
+
skills = response.get("skills", [])
|
|
2269
|
+
|
|
2270
|
+
print(f"\n{'=' * 70}")
|
|
2271
|
+
print(f"Skill Abstractions ({len(skills)} total)")
|
|
2272
|
+
print(f"{'=' * 70}")
|
|
2273
|
+
|
|
2274
|
+
if not skills:
|
|
2275
|
+
print("\nNo skills found. Create one with: ate skill init <name>")
|
|
2276
|
+
return
|
|
2277
|
+
|
|
2278
|
+
for skill in skills:
|
|
2279
|
+
status_icons = {"verified": "✓", "tested": "○", "experimental": "◌"}
|
|
2280
|
+
icon = status_icons.get(skill.get("status", "experimental"), "?")
|
|
2281
|
+
prim_count = len(skill.get("primitives", []))
|
|
2282
|
+
print(f"\n{icon} {skill.get('name')}")
|
|
2283
|
+
print(f" Robot: {skill.get('robotModel', 'Any')}")
|
|
2284
|
+
print(f" Primitives: {prim_count}")
|
|
2285
|
+
print(f" ID: {skill.get('id')}")
|
|
2286
|
+
|
|
2287
|
+
except Exception as e:
|
|
2288
|
+
print(f"Error listing skills: {e}", file=sys.stderr)
|
|
2289
|
+
sys.exit(1)
|
|
2290
|
+
|
|
2291
|
+
def skill_get(self, skill_id: str) -> None:
|
|
2292
|
+
"""Get detailed information about a skill"""
|
|
2293
|
+
try:
|
|
2294
|
+
response = self._request("GET", f"/skills/{skill_id}")
|
|
2295
|
+
skill = response.get("skill", {})
|
|
2296
|
+
|
|
2297
|
+
print(f"\n{'=' * 70}")
|
|
2298
|
+
print(f"Skill: {skill.get('displayName') or skill.get('name')}")
|
|
2299
|
+
print(f"{'=' * 70}")
|
|
2300
|
+
|
|
2301
|
+
print(f"\nDescription: {skill.get('description') or 'No description'}")
|
|
2302
|
+
print(f"Robot: {skill.get('robotModel', 'Any')}")
|
|
2303
|
+
print(f"Status: {skill.get('status')}")
|
|
2304
|
+
print(f"Version: {skill.get('version')}")
|
|
2305
|
+
|
|
2306
|
+
# Show sequence
|
|
2307
|
+
sequence = skill.get("sequence", [])
|
|
2308
|
+
if sequence:
|
|
2309
|
+
print(f"\nExecution Sequence ({len(sequence)} steps):")
|
|
2310
|
+
for i, step in enumerate(sequence, 1):
|
|
2311
|
+
print(f" {i}. {step.get('primitiveName', step.get('primitiveId'))}")
|
|
2312
|
+
if step.get("conditionCheck"):
|
|
2313
|
+
print(f" Condition: {step.get('conditionCheck')}")
|
|
2314
|
+
|
|
2315
|
+
# Show parameters
|
|
2316
|
+
params = skill.get("parameters", [])
|
|
2317
|
+
if params:
|
|
2318
|
+
print(f"\nParameters:")
|
|
2319
|
+
for p in params:
|
|
2320
|
+
print(f" - {p.get('name')}: {p.get('type')}")
|
|
2321
|
+
|
|
2322
|
+
except Exception as e:
|
|
2323
|
+
print(f"Error fetching skill: {e}", file=sys.stderr)
|
|
2324
|
+
sys.exit(1)
|
|
2325
|
+
|
|
2326
|
+
def skill_push(self, skill_file: str) -> None:
|
|
2327
|
+
"""Push a skill abstraction to FoodforThought"""
|
|
2328
|
+
file_path = Path(skill_file)
|
|
2329
|
+
if not file_path.exists():
|
|
2330
|
+
print(f"Error: Skill file not found: {skill_file}", file=sys.stderr)
|
|
2331
|
+
sys.exit(1)
|
|
2332
|
+
|
|
2333
|
+
print(f"\n{'=' * 60}")
|
|
2334
|
+
print("Publishing Skill Abstraction")
|
|
2335
|
+
print(f"{'=' * 60}\n")
|
|
2336
|
+
|
|
2337
|
+
with open(file_path) as f:
|
|
2338
|
+
skill_data = json.load(f)
|
|
2339
|
+
|
|
2340
|
+
print(f"Name: {skill_data.get('displayName') or skill_data.get('name')}")
|
|
2341
|
+
print(f"Primitives: {len(skill_data.get('primitives', []))}")
|
|
2342
|
+
|
|
2343
|
+
if not skill_data.get("name"):
|
|
2344
|
+
print("Error: Skill must have a name", file=sys.stderr)
|
|
2345
|
+
sys.exit(1)
|
|
2346
|
+
|
|
2347
|
+
if not skill_data.get("primitives"):
|
|
2348
|
+
print("Warning: Skill has no primitives. Add with: ate skill compose", file=sys.stderr)
|
|
2349
|
+
|
|
2350
|
+
try:
|
|
2351
|
+
response = self._request("POST", "/skills", json=skill_data)
|
|
2352
|
+
skill = response.get("skill", {})
|
|
2353
|
+
print(f"\n✓ Skill published successfully!")
|
|
2354
|
+
print(f" ID: {skill.get('id')}")
|
|
2355
|
+
print(f" Status: {skill.get('status')}")
|
|
2356
|
+
except Exception as e:
|
|
2357
|
+
print(f"Error publishing skill: {e}", file=sys.stderr)
|
|
2358
|
+
sys.exit(1)
|
|
2359
|
+
|
|
2360
|
+
def skill_test(self, skill_file_or_id: str, params_json: Optional[str] = None,
|
|
2361
|
+
dry_run: bool = True) -> None:
|
|
2362
|
+
"""Test a skill (simulated or real execution)"""
|
|
2363
|
+
print(f"\n{'=' * 60}")
|
|
2364
|
+
print("Testing Skill")
|
|
2365
|
+
print(f"{'=' * 60}\n")
|
|
2366
|
+
|
|
2367
|
+
# Check if it's a file or ID
|
|
2368
|
+
if Path(skill_file_or_id).exists():
|
|
2369
|
+
with open(skill_file_or_id) as f:
|
|
2370
|
+
skill_data = json.load(f)
|
|
2371
|
+
print(f"Testing local skill: {skill_data.get('name')}")
|
|
2372
|
+
else:
|
|
2373
|
+
# Fetch from API
|
|
2374
|
+
response = self._request("GET", f"/skills/{skill_file_or_id}")
|
|
2375
|
+
skill_data = response.get("skill", {})
|
|
2376
|
+
print(f"Testing remote skill: {skill_data.get('name')}")
|
|
2377
|
+
|
|
2378
|
+
params = json.loads(params_json) if params_json else {}
|
|
2379
|
+
|
|
2380
|
+
sequence = skill_data.get("sequence", [])
|
|
2381
|
+
print(f"\nSequence ({len(sequence)} steps):")
|
|
2382
|
+
|
|
2383
|
+
for i, step in enumerate(sequence, 1):
|
|
2384
|
+
prim_name = step.get("primitiveName", step.get("primitiveId"))
|
|
2385
|
+
if dry_run:
|
|
2386
|
+
print(f" [{i}/{len(sequence)}] Would execute: {prim_name}")
|
|
2387
|
+
else:
|
|
2388
|
+
print(f" [{i}/{len(sequence)}] Executing: {prim_name}...")
|
|
2389
|
+
# In real mode, would invoke the primitive via protocol
|
|
2390
|
+
time.sleep(0.5) # Simulate execution
|
|
2391
|
+
print(f" ✓ Complete")
|
|
2392
|
+
|
|
2393
|
+
print(f"\n{'✓ Dry run complete' if dry_run else '✓ Execution complete'}")
|
|
2394
|
+
|
|
2395
|
+
# ========================================
|
|
2396
|
+
# Phase 3: AI Bridge - Interactive Robot Communication
|
|
2397
|
+
# ========================================
|
|
2398
|
+
|
|
2399
|
+
def bridge_connect(self, port: str, transport: str = "serial",
|
|
2400
|
+
baud_rate: int = 115200, protocol_id: Optional[str] = None) -> None:
|
|
2401
|
+
"""Connect to a robot via serial or BLE and start interactive session"""
|
|
2402
|
+
print(f"\n{'=' * 60}")
|
|
2403
|
+
print("FoodForThought Bridge - Robot Communication Interface")
|
|
2404
|
+
print(f"{'=' * 60}\n")
|
|
2405
|
+
|
|
2406
|
+
if transport == "serial":
|
|
2407
|
+
try:
|
|
2408
|
+
import serial
|
|
2409
|
+
except ImportError:
|
|
2410
|
+
print("Error: pyserial is required. Install with: pip install pyserial", file=sys.stderr)
|
|
2411
|
+
sys.exit(1)
|
|
2412
|
+
|
|
2413
|
+
print(f"Connecting to {port} at {baud_rate} baud...")
|
|
2414
|
+
try:
|
|
2415
|
+
ser = serial.Serial(port, baud_rate, timeout=1)
|
|
2416
|
+
print(f"✓ Connected to {port}")
|
|
2417
|
+
print(f"\nInteractive mode. Type commands to send to robot.")
|
|
2418
|
+
print("Special commands:")
|
|
2419
|
+
print(" .quit - Exit bridge")
|
|
2420
|
+
print(" .record - Start recording for primitive creation")
|
|
2421
|
+
print(" .stop - Stop recording")
|
|
2422
|
+
print(" .save - Save recorded session")
|
|
2423
|
+
print(" .protocol - Show loaded protocol info")
|
|
2424
|
+
print("-" * 60 + "\n")
|
|
2425
|
+
|
|
2426
|
+
recording = False
|
|
2427
|
+
recorded_commands = []
|
|
2428
|
+
|
|
2429
|
+
while True:
|
|
2430
|
+
try:
|
|
2431
|
+
cmd = input("bridge> ").strip()
|
|
2432
|
+
if not cmd:
|
|
2433
|
+
continue
|
|
2434
|
+
|
|
2435
|
+
if cmd == ".quit":
|
|
2436
|
+
break
|
|
2437
|
+
elif cmd == ".record":
|
|
2438
|
+
recording = True
|
|
2439
|
+
recorded_commands = []
|
|
2440
|
+
print("Recording started...")
|
|
2441
|
+
continue
|
|
2442
|
+
elif cmd == ".stop":
|
|
2443
|
+
recording = False
|
|
2444
|
+
print(f"Recording stopped. {len(recorded_commands)} commands recorded.")
|
|
2445
|
+
continue
|
|
2446
|
+
elif cmd == ".save":
|
|
2447
|
+
if recorded_commands:
|
|
2448
|
+
filename = f"session_{int(time.time())}.json"
|
|
2449
|
+
with open(filename, "w") as f:
|
|
2450
|
+
json.dump({
|
|
2451
|
+
"port": port,
|
|
2452
|
+
"baud_rate": baud_rate,
|
|
2453
|
+
"commands": recorded_commands
|
|
2454
|
+
}, f, indent=2)
|
|
2455
|
+
print(f"Session saved to {filename}")
|
|
2456
|
+
else:
|
|
2457
|
+
print("No commands recorded yet.")
|
|
2458
|
+
continue
|
|
2459
|
+
elif cmd == ".protocol":
|
|
2460
|
+
if protocol_id:
|
|
2461
|
+
self.protocol_get(protocol_id)
|
|
2462
|
+
else:
|
|
2463
|
+
print("No protocol loaded. Use --protocol flag to specify.")
|
|
2464
|
+
continue
|
|
2465
|
+
|
|
2466
|
+
# Send command to robot
|
|
2467
|
+
ser.write((cmd + "\n").encode())
|
|
2468
|
+
if recording:
|
|
2469
|
+
recorded_commands.append({
|
|
2470
|
+
"command": cmd,
|
|
2471
|
+
"timestamp": time.time()
|
|
2472
|
+
})
|
|
2473
|
+
|
|
2474
|
+
# Read response
|
|
2475
|
+
time.sleep(0.1)
|
|
2476
|
+
response = ser.read_all().decode("utf-8", errors="replace").strip()
|
|
2477
|
+
if response:
|
|
2478
|
+
print(f"< {response}")
|
|
2479
|
+
if recording:
|
|
2480
|
+
recorded_commands[-1]["response"] = response
|
|
2481
|
+
|
|
2482
|
+
except KeyboardInterrupt:
|
|
2483
|
+
break
|
|
2484
|
+
|
|
2485
|
+
ser.close()
|
|
2486
|
+
print("\n✓ Disconnected")
|
|
2487
|
+
|
|
2488
|
+
except Exception as e:
|
|
2489
|
+
print(f"Error connecting to {port}: {e}", file=sys.stderr)
|
|
2490
|
+
sys.exit(1)
|
|
2491
|
+
|
|
2492
|
+
elif transport == "ble":
|
|
2493
|
+
print("BLE bridge requires async operation. Starting BLE session...")
|
|
2494
|
+
self._bridge_ble_connect(port)
|
|
2495
|
+
|
|
2496
|
+
def _bridge_ble_connect(self, address: str) -> None:
|
|
2497
|
+
"""Connect to robot via BLE (requires bleak)"""
|
|
2498
|
+
try:
|
|
2499
|
+
import asyncio
|
|
2500
|
+
from bleak import BleakClient
|
|
2501
|
+
except ImportError:
|
|
2502
|
+
print("Error: bleak is required for BLE. Install with: pip install bleak", file=sys.stderr)
|
|
2503
|
+
sys.exit(1)
|
|
2504
|
+
|
|
2505
|
+
async def ble_session():
|
|
2506
|
+
print(f"Connecting to BLE device {address}...")
|
|
2507
|
+
try:
|
|
2508
|
+
async with BleakClient(address) as client:
|
|
2509
|
+
print(f"✓ Connected to {address}")
|
|
2510
|
+
|
|
2511
|
+
# List services
|
|
2512
|
+
print("\nAvailable services:")
|
|
2513
|
+
for service in client.services:
|
|
2514
|
+
print(f" {service.uuid}: {service.description or 'Unknown'}")
|
|
2515
|
+
for char in service.characteristics:
|
|
2516
|
+
props = ", ".join(char.properties)
|
|
2517
|
+
print(f" └─ {char.uuid} [{props}]")
|
|
2518
|
+
|
|
2519
|
+
print("\nInteractive mode. Commands:")
|
|
2520
|
+
print(" read <uuid> - Read characteristic")
|
|
2521
|
+
print(" write <uuid> <hex> - Write to characteristic")
|
|
2522
|
+
print(" notify <uuid> - Subscribe to notifications")
|
|
2523
|
+
print(" .quit - Exit")
|
|
2524
|
+
print("-" * 60 + "\n")
|
|
2525
|
+
|
|
2526
|
+
while True:
|
|
2527
|
+
try:
|
|
2528
|
+
cmd = input("ble> ").strip()
|
|
2529
|
+
if not cmd:
|
|
2530
|
+
continue
|
|
2531
|
+
|
|
2532
|
+
if cmd == ".quit":
|
|
2533
|
+
break
|
|
2534
|
+
|
|
2535
|
+
parts = cmd.split()
|
|
2536
|
+
if parts[0] == "read" and len(parts) >= 2:
|
|
2537
|
+
uuid = parts[1]
|
|
2538
|
+
try:
|
|
2539
|
+
data = await client.read_gatt_char(uuid)
|
|
2540
|
+
print(f"< {data.hex()} ({data})")
|
|
2541
|
+
except Exception as e:
|
|
2542
|
+
print(f"Error reading: {e}")
|
|
2543
|
+
|
|
2544
|
+
elif parts[0] == "write" and len(parts) >= 3:
|
|
2545
|
+
uuid = parts[1]
|
|
2546
|
+
hex_data = parts[2]
|
|
2547
|
+
try:
|
|
2548
|
+
data = bytes.fromhex(hex_data)
|
|
2549
|
+
await client.write_gatt_char(uuid, data)
|
|
2550
|
+
print(f"✓ Written {hex_data}")
|
|
2551
|
+
except Exception as e:
|
|
2552
|
+
print(f"Error writing: {e}")
|
|
2553
|
+
|
|
2554
|
+
elif parts[0] == "notify" and len(parts) >= 2:
|
|
2555
|
+
uuid = parts[1]
|
|
2556
|
+
def callback(sender, data):
|
|
2557
|
+
print(f"[{sender}] {data.hex()}")
|
|
2558
|
+
try:
|
|
2559
|
+
await client.start_notify(uuid, callback)
|
|
2560
|
+
print(f"✓ Subscribed to {uuid}")
|
|
2561
|
+
except Exception as e:
|
|
2562
|
+
print(f"Error subscribing: {e}")
|
|
2563
|
+
|
|
2564
|
+
else:
|
|
2565
|
+
print("Unknown command. Use read/write/notify or .quit")
|
|
2566
|
+
|
|
2567
|
+
except KeyboardInterrupt:
|
|
2568
|
+
break
|
|
2569
|
+
|
|
2570
|
+
print("\n✓ Disconnected")
|
|
2571
|
+
|
|
2572
|
+
except Exception as e:
|
|
2573
|
+
print(f"Error connecting to BLE device: {e}", file=sys.stderr)
|
|
2574
|
+
sys.exit(1)
|
|
2575
|
+
|
|
2576
|
+
asyncio.run(ble_session())
|
|
2577
|
+
|
|
2578
|
+
def bridge_send(self, port: str, command: str, transport: str = "serial",
|
|
2579
|
+
baud_rate: int = 115200, wait: float = 0.5) -> None:
|
|
2580
|
+
"""Send a single command to robot and print response"""
|
|
2581
|
+
if transport == "serial":
|
|
2582
|
+
try:
|
|
2583
|
+
import serial
|
|
2584
|
+
except ImportError:
|
|
2585
|
+
print("Error: pyserial is required. Install with: pip install pyserial", file=sys.stderr)
|
|
2586
|
+
sys.exit(1)
|
|
2587
|
+
|
|
2588
|
+
try:
|
|
2589
|
+
ser = serial.Serial(port, baud_rate, timeout=1)
|
|
2590
|
+
ser.write((command + "\n").encode())
|
|
2591
|
+
time.sleep(wait)
|
|
2592
|
+
response = ser.read_all().decode("utf-8", errors="replace").strip()
|
|
2593
|
+
if response:
|
|
2594
|
+
print(response)
|
|
2595
|
+
ser.close()
|
|
2596
|
+
except Exception as e:
|
|
2597
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
2598
|
+
sys.exit(1)
|
|
2599
|
+
else:
|
|
2600
|
+
print("BLE send not yet implemented. Use bridge connect for BLE.", file=sys.stderr)
|
|
2601
|
+
|
|
2602
|
+
def bridge_record(self, port: str, output: str, transport: str = "serial",
|
|
2603
|
+
baud_rate: int = 115200, primitive_name: Optional[str] = None) -> None:
|
|
2604
|
+
"""Record a command session for creating a primitive skill"""
|
|
2605
|
+
print(f"\n{'=' * 60}")
|
|
2606
|
+
print("FoodForThought Bridge - Recording Mode")
|
|
2607
|
+
print(f"{'=' * 60}\n")
|
|
2608
|
+
|
|
2609
|
+
if transport != "serial":
|
|
2610
|
+
print("Recording currently only supports serial connections", file=sys.stderr)
|
|
2611
|
+
sys.exit(1)
|
|
2612
|
+
|
|
2613
|
+
try:
|
|
2614
|
+
import serial
|
|
2615
|
+
except ImportError:
|
|
2616
|
+
print("Error: pyserial is required. Install with: pip install pyserial", file=sys.stderr)
|
|
2617
|
+
sys.exit(1)
|
|
2618
|
+
|
|
2619
|
+
print(f"Connecting to {port} at {baud_rate} baud...")
|
|
2620
|
+
try:
|
|
2621
|
+
ser = serial.Serial(port, baud_rate, timeout=1)
|
|
2622
|
+
print(f"✓ Connected and recording to {output}")
|
|
2623
|
+
print(f"\nType commands to send. Ctrl+C to stop and save.\n")
|
|
2624
|
+
|
|
2625
|
+
recorded = {
|
|
2626
|
+
"name": primitive_name or f"recorded_skill_{int(time.time())}",
|
|
2627
|
+
"port": port,
|
|
2628
|
+
"baud_rate": baud_rate,
|
|
2629
|
+
"start_time": time.time(),
|
|
2630
|
+
"commands": []
|
|
2631
|
+
}
|
|
2632
|
+
|
|
2633
|
+
while True:
|
|
2634
|
+
try:
|
|
2635
|
+
cmd = input(f"[REC] bridge> ").strip()
|
|
2636
|
+
if not cmd:
|
|
2637
|
+
continue
|
|
2638
|
+
|
|
2639
|
+
timestamp = time.time()
|
|
2640
|
+
ser.write((cmd + "\n").encode())
|
|
2641
|
+
time.sleep(0.1)
|
|
2642
|
+
response = ser.read_all().decode("utf-8", errors="replace").strip()
|
|
2643
|
+
|
|
2644
|
+
entry = {
|
|
2645
|
+
"command": cmd,
|
|
2646
|
+
"timestamp": timestamp - recorded["start_time"],
|
|
2647
|
+
"response": response
|
|
2648
|
+
}
|
|
2649
|
+
recorded["commands"].append(entry)
|
|
2650
|
+
|
|
2651
|
+
if response:
|
|
2652
|
+
print(f"< {response}")
|
|
2653
|
+
|
|
2654
|
+
except KeyboardInterrupt:
|
|
2655
|
+
break
|
|
2656
|
+
|
|
2657
|
+
ser.close()
|
|
2658
|
+
recorded["end_time"] = time.time()
|
|
2659
|
+
recorded["duration"] = recorded["end_time"] - recorded["start_time"]
|
|
2660
|
+
|
|
2661
|
+
# Save recording
|
|
2662
|
+
with open(output, "w") as f:
|
|
2663
|
+
json.dump(recorded, f, indent=2)
|
|
2664
|
+
|
|
2665
|
+
print(f"\n✓ Recorded {len(recorded['commands'])} commands")
|
|
2666
|
+
print(f"✓ Saved to {output}")
|
|
2667
|
+
print(f"\nTo create a primitive from this recording:")
|
|
2668
|
+
print(f" ate primitive create --from-recording {output}")
|
|
2669
|
+
|
|
2670
|
+
except Exception as e:
|
|
2671
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
2672
|
+
sys.exit(1)
|
|
2673
|
+
|
|
2674
|
+
def bridge_replay(self, recording_file: str, port: str, transport: str = "serial",
|
|
2675
|
+
baud_rate: int = 115200, speed: float = 1.0) -> None:
|
|
2676
|
+
"""Replay a recorded session"""
|
|
2677
|
+
if not Path(recording_file).exists():
|
|
2678
|
+
print(f"Error: Recording file not found: {recording_file}", file=sys.stderr)
|
|
2679
|
+
sys.exit(1)
|
|
2680
|
+
|
|
2681
|
+
with open(recording_file) as f:
|
|
2682
|
+
recording = json.load(f)
|
|
2683
|
+
|
|
2684
|
+
if transport != "serial":
|
|
2685
|
+
print("Replay currently only supports serial connections", file=sys.stderr)
|
|
2686
|
+
sys.exit(1)
|
|
2687
|
+
|
|
2688
|
+
try:
|
|
2689
|
+
import serial
|
|
2690
|
+
except ImportError:
|
|
2691
|
+
print("Error: pyserial is required. Install with: pip install pyserial", file=sys.stderr)
|
|
2692
|
+
sys.exit(1)
|
|
2693
|
+
|
|
2694
|
+
print(f"\n{'=' * 60}")
|
|
2695
|
+
print(f"Replaying: {recording.get('name', recording_file)}")
|
|
2696
|
+
print(f"Commands: {len(recording.get('commands', []))}")
|
|
2697
|
+
print(f"Speed: {speed}x")
|
|
2698
|
+
print(f"{'=' * 60}\n")
|
|
2699
|
+
|
|
2700
|
+
try:
|
|
2701
|
+
ser = serial.Serial(port, baud_rate, timeout=1)
|
|
2702
|
+
commands = recording.get("commands", [])
|
|
2703
|
+
|
|
2704
|
+
prev_timestamp = 0
|
|
2705
|
+
for i, entry in enumerate(commands):
|
|
2706
|
+
timestamp = entry.get("timestamp", 0)
|
|
2707
|
+
delay = (timestamp - prev_timestamp) / speed
|
|
2708
|
+
if delay > 0 and i > 0:
|
|
2709
|
+
time.sleep(delay)
|
|
2710
|
+
prev_timestamp = timestamp
|
|
2711
|
+
|
|
2712
|
+
cmd = entry.get("command", "")
|
|
2713
|
+
print(f"[{i+1}/{len(commands)}] > {cmd}")
|
|
2714
|
+
ser.write((cmd + "\n").encode())
|
|
2715
|
+
|
|
2716
|
+
time.sleep(0.1)
|
|
2717
|
+
response = ser.read_all().decode("utf-8", errors="replace").strip()
|
|
2718
|
+
if response:
|
|
2719
|
+
print(f" < {response}")
|
|
2720
|
+
|
|
2721
|
+
ser.close()
|
|
2722
|
+
print(f"\n✓ Replay complete")
|
|
2723
|
+
|
|
2724
|
+
except Exception as e:
|
|
2725
|
+
print(f"Error during replay: {e}", file=sys.stderr)
|
|
2726
|
+
sys.exit(1)
|
|
2727
|
+
|
|
2728
|
+
# =========================================================================
|
|
2729
|
+
# Skill Compiler Commands
|
|
2730
|
+
# =========================================================================
|
|
2731
|
+
|
|
2732
|
+
def compile_skill(
|
|
2733
|
+
self,
|
|
2734
|
+
skill_path: str,
|
|
2735
|
+
output: str = "./output",
|
|
2736
|
+
target: str = "ros2",
|
|
2737
|
+
robot: Optional[str] = None,
|
|
2738
|
+
ate_dir: Optional[str] = None,
|
|
2739
|
+
) -> None:
|
|
2740
|
+
"""
|
|
2741
|
+
Compile a skill specification into a deployable package.
|
|
2742
|
+
|
|
2743
|
+
Args:
|
|
2744
|
+
skill_path: Path to skill.yaml specification
|
|
2745
|
+
output: Output directory for generated files
|
|
2746
|
+
target: Target platform (ros2, docker, python)
|
|
2747
|
+
robot: Path to robot URDF file for hardware config
|
|
2748
|
+
ate_dir: Path to ATE config directory for servo mapping
|
|
2749
|
+
"""
|
|
2750
|
+
from pathlib import Path
|
|
2751
|
+
from ate.skill_schema import SkillSpecification
|
|
2752
|
+
from ate.generators import (
|
|
2753
|
+
SkillCodeGenerator,
|
|
2754
|
+
ROS2PackageGenerator,
|
|
2755
|
+
DockerGenerator,
|
|
2756
|
+
generate_hardware_config,
|
|
2757
|
+
)
|
|
2758
|
+
|
|
2759
|
+
skill_path = Path(skill_path)
|
|
2760
|
+
output_path = Path(output)
|
|
2761
|
+
|
|
2762
|
+
if not skill_path.exists():
|
|
2763
|
+
print(f"Error: Skill specification not found: {skill_path}", file=sys.stderr)
|
|
2764
|
+
sys.exit(1)
|
|
2765
|
+
|
|
2766
|
+
print(f"\n{'=' * 60}")
|
|
2767
|
+
print(f" Skill Compiler v1.0.0")
|
|
2768
|
+
print(f"{'=' * 60}")
|
|
2769
|
+
print(f" Input: {skill_path}")
|
|
2770
|
+
print(f" Output: {output_path}")
|
|
2771
|
+
print(f" Target: {target}")
|
|
2772
|
+
if robot:
|
|
2773
|
+
print(f" Robot: {robot}")
|
|
2774
|
+
print(f"{'=' * 60}\n")
|
|
2775
|
+
|
|
2776
|
+
# Load skill specification
|
|
2777
|
+
print("Loading skill specification...")
|
|
2778
|
+
try:
|
|
2779
|
+
spec = SkillSpecification.from_yaml(str(skill_path))
|
|
2780
|
+
except Exception as e:
|
|
2781
|
+
print(f"Error parsing skill specification: {e}", file=sys.stderr)
|
|
2782
|
+
sys.exit(1)
|
|
2783
|
+
|
|
2784
|
+
# Validate specification
|
|
2785
|
+
print("Validating specification...")
|
|
2786
|
+
errors = spec.validate()
|
|
2787
|
+
if errors:
|
|
2788
|
+
print(f"\nValidation errors:", file=sys.stderr)
|
|
2789
|
+
for error in errors:
|
|
2790
|
+
print(f" - {error}", file=sys.stderr)
|
|
2791
|
+
sys.exit(1)
|
|
2792
|
+
print(f" ✓ Specification valid: {spec.name} v{spec.version}")
|
|
2793
|
+
print(f" ✓ Primitives: {len(spec.primitives)}")
|
|
2794
|
+
print(f" ✓ Hardware requirements: {len(spec.hardware_requirements)}")
|
|
2795
|
+
|
|
2796
|
+
# Generate skill code
|
|
2797
|
+
print("\nGenerating skill code...")
|
|
2798
|
+
skill_gen = SkillCodeGenerator(spec)
|
|
2799
|
+
skill_files = skill_gen.generate(output_path / "src")
|
|
2800
|
+
print(f" ✓ Generated {len(skill_files)} files")
|
|
2801
|
+
|
|
2802
|
+
# Generate platform-specific package
|
|
2803
|
+
if target == "ros2":
|
|
2804
|
+
print("\nGenerating ROS2 package...")
|
|
2805
|
+
ros2_gen = ROS2PackageGenerator(spec)
|
|
2806
|
+
ros2_files = ros2_gen.generate(output_path)
|
|
2807
|
+
print(f" ✓ Generated ROS2 package with {len(ros2_files)} files")
|
|
2808
|
+
|
|
2809
|
+
elif target == "docker":
|
|
2810
|
+
print("\nGenerating Docker configuration...")
|
|
2811
|
+
docker_gen = DockerGenerator(spec)
|
|
2812
|
+
docker_files = docker_gen.generate(output_path)
|
|
2813
|
+
print(f" ✓ Generated Docker files: {len(docker_files)}")
|
|
2814
|
+
|
|
2815
|
+
elif target == "python":
|
|
2816
|
+
print("\nGenerating Python package...")
|
|
2817
|
+
# Python package is already generated by skill_gen
|
|
2818
|
+
# Just add setup.py
|
|
2819
|
+
setup_content = f'''from setuptools import setup, find_packages
|
|
2820
|
+
|
|
2821
|
+
setup(
|
|
2822
|
+
name="{spec.name}",
|
|
2823
|
+
version="{spec.version}",
|
|
2824
|
+
packages=find_packages(),
|
|
2825
|
+
description="{spec.description}",
|
|
2826
|
+
python_requires=">=3.8",
|
|
2827
|
+
)
|
|
2828
|
+
'''
|
|
2829
|
+
(output_path / "setup.py").write_text(setup_content)
|
|
2830
|
+
print(" ✓ Generated setup.py")
|
|
2831
|
+
|
|
2832
|
+
# Generate hardware config if robot provided
|
|
2833
|
+
if robot or ate_dir:
|
|
2834
|
+
print("\nGenerating hardware configuration...")
|
|
2835
|
+
try:
|
|
2836
|
+
hw_config = generate_hardware_config(
|
|
2837
|
+
spec,
|
|
2838
|
+
urdf_path=robot,
|
|
2839
|
+
ate_dir=ate_dir,
|
|
2840
|
+
)
|
|
2841
|
+
import yaml
|
|
2842
|
+
config_dir = output_path / "config"
|
|
2843
|
+
config_dir.mkdir(parents=True, exist_ok=True)
|
|
2844
|
+
config_path = config_dir / "hardware_config.yaml"
|
|
2845
|
+
with open(config_path, "w") as f:
|
|
2846
|
+
yaml.dump(hw_config, f, default_flow_style=False)
|
|
2847
|
+
print(f" ✓ Generated hardware config: {config_path}")
|
|
2848
|
+
except Exception as e:
|
|
2849
|
+
print(f" ⚠ Warning: Could not generate hardware config: {e}")
|
|
2850
|
+
|
|
2851
|
+
# Copy skill.yaml to output
|
|
2852
|
+
import shutil
|
|
2853
|
+
shutil.copy(skill_path, output_path / "skill.yaml")
|
|
2854
|
+
|
|
2855
|
+
print(f"\n{'=' * 60}")
|
|
2856
|
+
print(f" ✓ Compilation complete!")
|
|
2857
|
+
print(f" Output: {output_path.absolute()}")
|
|
2858
|
+
print(f"{'=' * 60}\n")
|
|
2859
|
+
|
|
2860
|
+
# Print next steps
|
|
2861
|
+
print("Next steps:")
|
|
2862
|
+
if target == "ros2":
|
|
2863
|
+
print(" 1. cd output && colcon build")
|
|
2864
|
+
print(" 2. source install/setup.bash")
|
|
2865
|
+
print(f" 3. ros2 launch {spec.name}_skill skill.launch.py")
|
|
2866
|
+
elif target == "docker":
|
|
2867
|
+
print(" 1. cd output && docker build -t skill .")
|
|
2868
|
+
print(" 2. docker run skill")
|
|
2869
|
+
elif target == "python":
|
|
2870
|
+
print(" 1. cd output && pip install -e .")
|
|
2871
|
+
print(f" 2. python -c 'from {spec.name} import {spec.name.title().replace('_', '')}Skill'")
|
|
2872
|
+
|
|
2873
|
+
def test_compiled_skill(
|
|
2874
|
+
self,
|
|
2875
|
+
skill_path: str,
|
|
2876
|
+
mode: str = "sim",
|
|
2877
|
+
robot_port: Optional[str] = None,
|
|
2878
|
+
params: Optional[str] = None,
|
|
2879
|
+
) -> None:
|
|
2880
|
+
"""
|
|
2881
|
+
Test a compiled skill in simulation or on hardware.
|
|
2882
|
+
|
|
2883
|
+
Args:
|
|
2884
|
+
skill_path: Path to compiled skill directory
|
|
2885
|
+
mode: Test mode (sim, hardware, mock)
|
|
2886
|
+
robot_port: Serial port for hardware testing
|
|
2887
|
+
params: JSON parameters for skill execution
|
|
2888
|
+
"""
|
|
2889
|
+
from pathlib import Path
|
|
2890
|
+
|
|
2891
|
+
skill_path = Path(skill_path)
|
|
2892
|
+
|
|
2893
|
+
if not skill_path.exists():
|
|
2894
|
+
print(f"Error: Skill directory not found: {skill_path}", file=sys.stderr)
|
|
2895
|
+
sys.exit(1)
|
|
2896
|
+
|
|
2897
|
+
# Check for skill.yaml
|
|
2898
|
+
skill_yaml = skill_path / "skill.yaml"
|
|
2899
|
+
if not skill_yaml.exists():
|
|
2900
|
+
print(f"Error: skill.yaml not found in {skill_path}", file=sys.stderr)
|
|
2901
|
+
sys.exit(1)
|
|
2902
|
+
|
|
2903
|
+
print(f"\n{'=' * 60}")
|
|
2904
|
+
print(f" Skill Test Runner")
|
|
2905
|
+
print(f"{'=' * 60}")
|
|
2906
|
+
print(f" Skill: {skill_path}")
|
|
2907
|
+
print(f" Mode: {mode}")
|
|
2908
|
+
if robot_port:
|
|
2909
|
+
print(f" Port: {robot_port}")
|
|
2910
|
+
print(f"{'=' * 60}\n")
|
|
2911
|
+
|
|
2912
|
+
# Load skill specification
|
|
2913
|
+
from ate.skill_schema import SkillSpecification
|
|
2914
|
+
spec = SkillSpecification.from_yaml(str(skill_yaml))
|
|
2915
|
+
|
|
2916
|
+
print(f"Testing skill: {spec.name} v{spec.version}")
|
|
2917
|
+
print(f"Primitives: {', '.join(spec.primitives)}")
|
|
2918
|
+
print()
|
|
2919
|
+
|
|
2920
|
+
if mode == "mock":
|
|
2921
|
+
print("Running in mock mode (no hardware)...")
|
|
2922
|
+
# Import and run skill with mock drivers
|
|
2923
|
+
try:
|
|
2924
|
+
import importlib.util
|
|
2925
|
+
skill_module_path = skill_path / "src" / f"{spec.name.replace('-', '_')}" / "skill.py"
|
|
2926
|
+
if skill_module_path.exists():
|
|
2927
|
+
spec_loader = importlib.util.spec_from_file_location("skill", skill_module_path)
|
|
2928
|
+
module = importlib.util.module_from_spec(spec_loader)
|
|
2929
|
+
spec_loader.loader.exec_module(module)
|
|
2930
|
+
|
|
2931
|
+
# Get the skill class
|
|
2932
|
+
class_name = spec.name.replace('_', ' ').title().replace(' ', '') + "Skill"
|
|
2933
|
+
skill_class = getattr(module, class_name)
|
|
2934
|
+
|
|
2935
|
+
# Create instance with mock config
|
|
2936
|
+
skill = skill_class({"driver": "mock"})
|
|
2937
|
+
|
|
2938
|
+
# Parse params if provided
|
|
2939
|
+
input_params = {}
|
|
2940
|
+
if params:
|
|
2941
|
+
input_params = json.loads(params)
|
|
2942
|
+
|
|
2943
|
+
# Get input class
|
|
2944
|
+
input_class_name = class_name.replace("Skill", "Input")
|
|
2945
|
+
input_class = getattr(module, input_class_name)
|
|
2946
|
+
skill_input = input_class(**input_params)
|
|
2947
|
+
|
|
2948
|
+
# Execute
|
|
2949
|
+
print("\nExecuting skill...")
|
|
2950
|
+
result = skill.execute(skill_input)
|
|
2951
|
+
print(f"\nResult:")
|
|
2952
|
+
print(f" Success: {result.success}")
|
|
2953
|
+
print(f" Message: {result.message}")
|
|
2954
|
+
print(f" Time: {result.execution_time:.3f}s")
|
|
2955
|
+
else:
|
|
2956
|
+
print(f"Warning: Skill module not found at {skill_module_path}")
|
|
2957
|
+
print("Running dry-run validation instead...")
|
|
2958
|
+
print("\n ✓ Specification valid")
|
|
2959
|
+
print(" ✓ All primitives available")
|
|
2960
|
+
print(" ✓ Hardware requirements satisfied (mock)")
|
|
2961
|
+
|
|
2962
|
+
except Exception as e:
|
|
2963
|
+
print(f"Error running skill: {e}", file=sys.stderr)
|
|
2964
|
+
import traceback
|
|
2965
|
+
traceback.print_exc()
|
|
2966
|
+
sys.exit(1)
|
|
2967
|
+
|
|
2968
|
+
elif mode == "sim":
|
|
2969
|
+
print("Simulation testing requires MuJoCo or Gazebo integration.")
|
|
2970
|
+
print("For now, running mock test instead...")
|
|
2971
|
+
self.test_compiled_skill(skill_path, mode="mock", params=params)
|
|
2972
|
+
|
|
2973
|
+
elif mode == "hardware":
|
|
2974
|
+
if not robot_port:
|
|
2975
|
+
print("Error: --robot-port required for hardware mode", file=sys.stderr)
|
|
2976
|
+
sys.exit(1)
|
|
2977
|
+
print(f"Hardware testing on {robot_port}...")
|
|
2978
|
+
print("Note: Full hardware testing requires bridge connection.")
|
|
2979
|
+
print("Consider running: ate bridge serve")
|
|
2980
|
+
|
|
2981
|
+
print(f"\n{'=' * 60}")
|
|
2982
|
+
print(f" ✓ Test complete")
|
|
2983
|
+
print(f"{'=' * 60}\n")
|
|
2984
|
+
|
|
2985
|
+
def publish_compiled_skill(
|
|
2986
|
+
self,
|
|
2987
|
+
skill_path: str,
|
|
2988
|
+
visibility: str = "public",
|
|
2989
|
+
) -> None:
|
|
2990
|
+
"""
|
|
2991
|
+
Publish a compiled skill to FoodforThought registry.
|
|
2992
|
+
|
|
2993
|
+
Args:
|
|
2994
|
+
skill_path: Path to compiled skill directory
|
|
2995
|
+
visibility: Visibility (public, private, team)
|
|
2996
|
+
"""
|
|
2997
|
+
from pathlib import Path
|
|
2998
|
+
|
|
2999
|
+
skill_path = Path(skill_path)
|
|
3000
|
+
|
|
3001
|
+
if not skill_path.exists():
|
|
3002
|
+
print(f"Error: Skill directory not found: {skill_path}", file=sys.stderr)
|
|
3003
|
+
sys.exit(1)
|
|
3004
|
+
|
|
3005
|
+
# Check for skill.yaml
|
|
3006
|
+
skill_yaml = skill_path / "skill.yaml"
|
|
3007
|
+
if not skill_yaml.exists():
|
|
3008
|
+
print(f"Error: skill.yaml not found in {skill_path}", file=sys.stderr)
|
|
3009
|
+
sys.exit(1)
|
|
3010
|
+
|
|
3011
|
+
# Load skill specification
|
|
3012
|
+
from ate.skill_schema import SkillSpecification
|
|
3013
|
+
spec = SkillSpecification.from_yaml(str(skill_yaml))
|
|
3014
|
+
|
|
3015
|
+
print(f"\n{'=' * 60}")
|
|
3016
|
+
print(f" Publishing Skill to FoodforThought")
|
|
3017
|
+
print(f"{'=' * 60}")
|
|
3018
|
+
print(f" Name: {spec.name}")
|
|
3019
|
+
print(f" Version: {spec.version}")
|
|
3020
|
+
print(f" Visibility: {visibility}")
|
|
3021
|
+
print(f"{'=' * 60}\n")
|
|
3022
|
+
|
|
3023
|
+
# Prepare upload payload
|
|
3024
|
+
skill_data = spec.to_dict()
|
|
3025
|
+
skill_data["visibility"] = visibility
|
|
3026
|
+
|
|
3027
|
+
# Include file listing
|
|
3028
|
+
files = []
|
|
3029
|
+
for path in skill_path.rglob("*"):
|
|
3030
|
+
if path.is_file() and not path.name.startswith("."):
|
|
3031
|
+
rel_path = path.relative_to(skill_path)
|
|
3032
|
+
files.append(str(rel_path))
|
|
3033
|
+
skill_data["files"] = files
|
|
3034
|
+
|
|
3035
|
+
print(f"Files to publish: {len(files)}")
|
|
3036
|
+
|
|
3037
|
+
# Upload to API
|
|
3038
|
+
try:
|
|
3039
|
+
response = self._request("POST", "/skills/publish", json=skill_data)
|
|
3040
|
+
skill_id = response.get("skillId", response.get("id", "unknown"))
|
|
3041
|
+
skill_url = f"https://kindly.fyi/skills/{skill_id}"
|
|
3042
|
+
|
|
3043
|
+
print(f"\n✓ Skill published successfully!")
|
|
3044
|
+
print(f" ID: {skill_id}")
|
|
3045
|
+
print(f" URL: {skill_url}")
|
|
3046
|
+
|
|
3047
|
+
except Exception as e:
|
|
3048
|
+
# Mock response for offline testing
|
|
3049
|
+
mock_id = f"sk_{spec.name}_{spec.version.replace('.', '_')}"
|
|
3050
|
+
print(f"\n✓ Skill prepared for publishing (API unavailable)")
|
|
3051
|
+
print(f" Mock ID: {mock_id}")
|
|
3052
|
+
print(f" Run with API key to publish: export ATE_API_KEY=your_key")
|
|
3053
|
+
|
|
3054
|
+
def check_skill_compatibility(
|
|
3055
|
+
self,
|
|
3056
|
+
skill_path: str,
|
|
3057
|
+
robot_urdf: Optional[str] = None,
|
|
3058
|
+
robot_ate_dir: Optional[str] = None,
|
|
3059
|
+
) -> None:
|
|
3060
|
+
"""
|
|
3061
|
+
Check if a skill is compatible with a robot.
|
|
3062
|
+
|
|
3063
|
+
Args:
|
|
3064
|
+
skill_path: Path to skill.yaml
|
|
3065
|
+
robot_urdf: Path to robot URDF
|
|
3066
|
+
robot_ate_dir: Path to ATE config directory
|
|
3067
|
+
"""
|
|
3068
|
+
from pathlib import Path
|
|
3069
|
+
from ate.skill_schema import SkillSpecification
|
|
3070
|
+
from ate.compatibility import check_compatibility_from_paths
|
|
3071
|
+
|
|
3072
|
+
skill_path = Path(skill_path)
|
|
3073
|
+
if not skill_path.exists():
|
|
3074
|
+
print(f"Error: Skill not found: {skill_path}", file=sys.stderr)
|
|
3075
|
+
sys.exit(1)
|
|
3076
|
+
|
|
3077
|
+
# Determine robot name
|
|
3078
|
+
robot_name = "unknown"
|
|
3079
|
+
if robot_urdf:
|
|
3080
|
+
robot_name = Path(robot_urdf).stem
|
|
3081
|
+
elif robot_ate_dir:
|
|
3082
|
+
robot_name = Path(robot_ate_dir).name
|
|
3083
|
+
|
|
3084
|
+
print(f"\n{'=' * 60}")
|
|
3085
|
+
print(f" Skill Compatibility Check")
|
|
3086
|
+
print(f"{'=' * 60}")
|
|
3087
|
+
print(f" Skill: {skill_path}")
|
|
3088
|
+
print(f" Robot: {robot_name}")
|
|
3089
|
+
print(f"{'=' * 60}\n")
|
|
3090
|
+
|
|
3091
|
+
report = check_compatibility_from_paths(
|
|
3092
|
+
skill_yaml=str(skill_path),
|
|
3093
|
+
robot_urdf=robot_urdf,
|
|
3094
|
+
robot_ate_dir=robot_ate_dir,
|
|
3095
|
+
robot_name=robot_name,
|
|
3096
|
+
)
|
|
3097
|
+
|
|
3098
|
+
print(report)
|
|
3099
|
+
|
|
3100
|
+
if not report.compatible:
|
|
3101
|
+
sys.exit(1)
|
|
3102
|
+
|
|
3103
|
+
|
|
3104
|
+
def data_upload(self, path: str, skill: str, stage: str) -> None:
|
|
3105
|
+
# Upload dataset/sensor logs
|
|
3106
|
+
ATEClient.data_upload(self, path, skill, stage)
|
|
3107
|
+
|
|
3108
|
+
|
|
3109
|
+
def login_command():
|
|
3110
|
+
"""Interactive login to store API key"""
|
|
3111
|
+
print("Authenticate with FoodforThought")
|
|
3112
|
+
print("1. Go to https://kindly.fyi/settings/keys")
|
|
3113
|
+
print("2. Create a new API key")
|
|
3114
|
+
print("3. Paste it here")
|
|
3115
|
+
|
|
3116
|
+
try:
|
|
3117
|
+
api_key = getpass.getpass("API Key: ").strip()
|
|
3118
|
+
except KeyboardInterrupt:
|
|
3119
|
+
print("\nLogin cancelled.")
|
|
3120
|
+
sys.exit(1)
|
|
3121
|
+
|
|
3122
|
+
if not api_key:
|
|
3123
|
+
print("Error: API key cannot be empty", file=sys.stderr)
|
|
3124
|
+
sys.exit(1)
|
|
3125
|
+
|
|
3126
|
+
if not api_key.startswith("ate_"):
|
|
3127
|
+
print("Warning: API key usually starts with 'ate_'. Please check your key.", file=sys.stderr)
|
|
3128
|
+
if input("Continue anyway? (y/N): ").lower() != "y":
|
|
3129
|
+
sys.exit(1)
|
|
3130
|
+
|
|
3131
|
+
# Create config directory
|
|
3132
|
+
try:
|
|
3133
|
+
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
|
|
3134
|
+
|
|
3135
|
+
# Load existing config if present
|
|
3136
|
+
config = {}
|
|
3137
|
+
if CONFIG_FILE.exists():
|
|
3138
|
+
try:
|
|
3139
|
+
with open(CONFIG_FILE) as f:
|
|
3140
|
+
config = json.load(f)
|
|
3141
|
+
except Exception:
|
|
3142
|
+
pass
|
|
3143
|
+
|
|
3144
|
+
# Update key
|
|
3145
|
+
config["api_key"] = api_key
|
|
3146
|
+
|
|
3147
|
+
# Write config
|
|
3148
|
+
with open(CONFIG_FILE, "w") as f:
|
|
3149
|
+
json.dump(config, f, indent=2)
|
|
3150
|
+
|
|
3151
|
+
print(f"✓ Successfully logged in. Credentials saved to {CONFIG_FILE}")
|
|
3152
|
+
|
|
3153
|
+
except Exception as e:
|
|
3154
|
+
print(f"Error saving credentials: {e}", file=sys.stderr)
|
|
3155
|
+
sys.exit(1)
|
|
3156
|
+
|
|
3157
|
+
|
|
3158
|
+
def main():
|
|
3159
|
+
"""Main CLI entry point"""
|
|
3160
|
+
parser = argparse.ArgumentParser(description="FoodforThought CLI")
|
|
3161
|
+
subparsers = parser.add_subparsers(dest="command", help="Command to run")
|
|
3162
|
+
|
|
3163
|
+
# init command
|
|
3164
|
+
init_parser = subparsers.add_parser("init", help="Initialize a new repository")
|
|
3165
|
+
init_parser.add_argument("name", help="Repository name")
|
|
3166
|
+
init_parser.add_argument("-d", "--description", default="", help="Repository description")
|
|
3167
|
+
init_parser.add_argument(
|
|
3168
|
+
"-v", "--visibility", choices=["public", "private"], default="public", help="Repository visibility"
|
|
3169
|
+
)
|
|
3170
|
+
|
|
3171
|
+
# clone command
|
|
3172
|
+
clone_parser = subparsers.add_parser("clone", help="Clone a repository")
|
|
3173
|
+
clone_parser.add_argument("repo_id", help="Repository ID")
|
|
3174
|
+
clone_parser.add_argument("target_dir", nargs="?", help="Target directory")
|
|
3175
|
+
|
|
3176
|
+
# commit command
|
|
3177
|
+
commit_parser = subparsers.add_parser("commit", help="Create a commit")
|
|
3178
|
+
commit_parser.add_argument("-m", "--message", required=True, help="Commit message")
|
|
3179
|
+
commit_parser.add_argument("files", nargs="*", help="Files to commit")
|
|
3180
|
+
|
|
3181
|
+
# push command
|
|
3182
|
+
push_parser = subparsers.add_parser("push", help="Push commits to remote")
|
|
3183
|
+
push_parser.add_argument("-b", "--branch", default="main", help="Branch name")
|
|
3184
|
+
|
|
3185
|
+
# deploy command
|
|
3186
|
+
deploy_parser = subparsers.add_parser("deploy", help="Deploy to robot")
|
|
3187
|
+
deploy_parser.add_argument("robot_type", help="Robot type (e.g., unitree-r1)")
|
|
3188
|
+
deploy_parser.add_argument("-r", "--repo-id", help="Repository ID (default: current repo)")
|
|
3189
|
+
|
|
3190
|
+
# test command
|
|
3191
|
+
test_parser = subparsers.add_parser("test", help="Test skills in simulation")
|
|
3192
|
+
test_parser.add_argument("-e", "--environment", default="gazebo",
|
|
3193
|
+
choices=["gazebo", "mujoco", "pybullet", "webots"],
|
|
3194
|
+
help="Simulation environment")
|
|
3195
|
+
test_parser.add_argument("-r", "--robot", help="Robot model to test with")
|
|
3196
|
+
test_parser.add_argument("--local", action="store_true", help="Run simulation locally")
|
|
3197
|
+
|
|
3198
|
+
# benchmark command
|
|
3199
|
+
benchmark_parser = subparsers.add_parser("benchmark", help="Run performance benchmarks")
|
|
3200
|
+
benchmark_parser.add_argument("-t", "--type", default="all",
|
|
3201
|
+
choices=["speed", "accuracy", "robustness", "efficiency", "all"],
|
|
3202
|
+
help="Benchmark type")
|
|
3203
|
+
benchmark_parser.add_argument("-n", "--trials", type=int, default=10, help="Number of trials")
|
|
3204
|
+
benchmark_parser.add_argument("--compare", help="Compare with baseline (repository ID)")
|
|
3205
|
+
|
|
3206
|
+
# adapt command
|
|
3207
|
+
adapt_parser = subparsers.add_parser("adapt", help="Adapt skills between robots")
|
|
3208
|
+
adapt_parser.add_argument("source_robot", help="Source robot model")
|
|
3209
|
+
adapt_parser.add_argument("target_robot", help="Target robot model")
|
|
3210
|
+
adapt_parser.add_argument("-r", "--repo-id", help="Repository ID to adapt")
|
|
3211
|
+
adapt_parser.add_argument("--analyze-only", action="store_true",
|
|
3212
|
+
help="Only show compatibility analysis")
|
|
3213
|
+
|
|
3214
|
+
# validate command
|
|
3215
|
+
validate_parser = subparsers.add_parser("validate", help="Validate safety and compliance")
|
|
3216
|
+
validate_parser.add_argument("-c", "--checks", nargs="+",
|
|
3217
|
+
choices=["collision", "speed", "workspace", "force", "all"],
|
|
3218
|
+
default=["all"], help="Safety checks to run")
|
|
3219
|
+
validate_parser.add_argument("--strict", action="store_true", help="Use strict validation")
|
|
3220
|
+
validate_parser.add_argument("-f", "--files", nargs="*", help="Specific files to validate")
|
|
3221
|
+
|
|
3222
|
+
# stream command
|
|
3223
|
+
stream_parser = subparsers.add_parser("stream", help="Stream sensor data")
|
|
3224
|
+
stream_parser.add_argument("action", choices=["start", "stop", "status"],
|
|
3225
|
+
help="Streaming action")
|
|
3226
|
+
stream_parser.add_argument("-s", "--sensors", nargs="+",
|
|
3227
|
+
help="Sensors to stream (e.g., camera, lidar, imu)")
|
|
3228
|
+
stream_parser.add_argument("-o", "--output", help="Output file or URL")
|
|
3229
|
+
stream_parser.add_argument("--format", default="rosbag",
|
|
3230
|
+
choices=["rosbag", "hdf5", "json", "live"],
|
|
3231
|
+
help="Data format")
|
|
3232
|
+
|
|
3233
|
+
# pull command - Pull skill data for training
|
|
3234
|
+
pull_parser = subparsers.add_parser("pull", help="Pull skill data for training")
|
|
3235
|
+
pull_parser.add_argument("skill_id", help="Skill ID to pull")
|
|
3236
|
+
pull_parser.add_argument("-r", "--robot", help="Filter by robot model")
|
|
3237
|
+
pull_parser.add_argument("-f", "--format", default="json",
|
|
3238
|
+
choices=["json", "rlds", "lerobot"],
|
|
3239
|
+
help="Output format (default: json)")
|
|
3240
|
+
pull_parser.add_argument("-o", "--output", default="./data",
|
|
3241
|
+
help="Output directory (default: ./data)")
|
|
3242
|
+
|
|
3243
|
+
# upload command - Upload demonstrations for labeling
|
|
3244
|
+
upload_parser = subparsers.add_parser("upload", help="Upload demonstrations for labeling")
|
|
3245
|
+
upload_parser.add_argument("path", help="Path to video file")
|
|
3246
|
+
upload_parser.add_argument("-r", "--robot", required=True,
|
|
3247
|
+
help="Robot model in the video")
|
|
3248
|
+
upload_parser.add_argument("-t", "--task", required=True,
|
|
3249
|
+
help="Task being demonstrated")
|
|
3250
|
+
upload_parser.add_argument("-p", "--project", help="Project ID to associate with")
|
|
3251
|
+
|
|
3252
|
+
# check-transfer command - Check skill transfer compatibility
|
|
3253
|
+
check_transfer_parser = subparsers.add_parser("check-transfer",
|
|
3254
|
+
help="Check skill transfer compatibility")
|
|
3255
|
+
check_transfer_parser.add_argument("-s", "--skill", help="Skill ID to check (optional)")
|
|
3256
|
+
check_transfer_parser.add_argument("--from", dest="source", required=True,
|
|
3257
|
+
help="Source robot model")
|
|
3258
|
+
check_transfer_parser.add_argument("--to", dest="target", required=True,
|
|
3259
|
+
help="Target robot model")
|
|
3260
|
+
check_transfer_parser.add_argument("--min-score", type=float, default=0.0,
|
|
3261
|
+
help="Minimum score threshold (0.0-1.0)")
|
|
3262
|
+
|
|
3263
|
+
# labeling-status command - Check labeling job status
|
|
3264
|
+
labeling_status_parser = subparsers.add_parser("labeling-status",
|
|
3265
|
+
help="Check labeling job status")
|
|
3266
|
+
labeling_status_parser.add_argument("job_id", help="Labeling job ID")
|
|
3267
|
+
|
|
3268
|
+
# parts command - Manage hardware parts
|
|
3269
|
+
parts_parser = subparsers.add_parser("parts", help="Manage hardware parts catalog")
|
|
3270
|
+
parts_subparsers = parts_parser.add_subparsers(dest="parts_action", help="Parts action")
|
|
3271
|
+
|
|
3272
|
+
# parts list
|
|
3273
|
+
parts_list_parser = parts_subparsers.add_parser("list", help="List available parts")
|
|
3274
|
+
parts_list_parser.add_argument("-c", "--category",
|
|
3275
|
+
choices=["gripper", "sensor", "actuator", "controller",
|
|
3276
|
+
"end-effector", "camera", "lidar", "force-torque"],
|
|
3277
|
+
help="Filter by category")
|
|
3278
|
+
parts_list_parser.add_argument("-m", "--manufacturer", help="Filter by manufacturer")
|
|
3279
|
+
parts_list_parser.add_argument("-s", "--search", help="Search by name or part number")
|
|
3280
|
+
|
|
3281
|
+
# parts check
|
|
3282
|
+
parts_check_parser = parts_subparsers.add_parser("check",
|
|
3283
|
+
help="Check part compatibility for skill")
|
|
3284
|
+
parts_check_parser.add_argument("skill_id", help="Skill ID to check")
|
|
3285
|
+
|
|
3286
|
+
# parts require
|
|
3287
|
+
parts_require_parser = parts_subparsers.add_parser("require",
|
|
3288
|
+
help="Add part dependency to skill")
|
|
3289
|
+
parts_require_parser.add_argument("part_id", help="Part ID to require")
|
|
3290
|
+
parts_require_parser.add_argument("-s", "--skill", required=True, help="Skill ID")
|
|
3291
|
+
parts_require_parser.add_argument("-v", "--version", default="1.0.0",
|
|
3292
|
+
help="Minimum version (default: 1.0.0)")
|
|
3293
|
+
parts_require_parser.add_argument("--required", action="store_true",
|
|
3294
|
+
help="Mark as required (not optional)")
|
|
3295
|
+
|
|
3296
|
+
# deps command - Dependency management
|
|
3297
|
+
deps_parser = subparsers.add_parser("deps", help="Dependency management")
|
|
3298
|
+
deps_subparsers = deps_parser.add_subparsers(dest="deps_action", help="Deps action")
|
|
3299
|
+
|
|
3300
|
+
# deps audit
|
|
3301
|
+
deps_audit_parser = deps_subparsers.add_parser("audit",
|
|
3302
|
+
help="Verify all dependencies compatible")
|
|
3303
|
+
deps_audit_parser.add_argument("-s", "--skill", help="Skill ID (default: current repo)")
|
|
3304
|
+
|
|
3305
|
+
# protocol command - Protocol registry management
|
|
3306
|
+
protocol_parser = subparsers.add_parser("protocol", help="Manage protocol registry")
|
|
3307
|
+
protocol_subparsers = protocol_parser.add_subparsers(dest="protocol_action", help="Protocol action")
|
|
3308
|
+
|
|
3309
|
+
# protocol list
|
|
3310
|
+
protocol_list_parser = protocol_subparsers.add_parser("list", help="List protocols")
|
|
3311
|
+
protocol_list_parser.add_argument("-r", "--robot", help="Filter by robot model")
|
|
3312
|
+
protocol_list_parser.add_argument("-t", "--transport",
|
|
3313
|
+
choices=["ble", "serial", "wifi", "can", "i2c", "spi", "mqtt", "ros2"],
|
|
3314
|
+
help="Filter by transport type")
|
|
3315
|
+
protocol_list_parser.add_argument("--verified", action="store_true", help="Show only verified protocols")
|
|
3316
|
+
protocol_list_parser.add_argument("-s", "--search", help="Search in command format and notes")
|
|
3317
|
+
|
|
3318
|
+
# protocol get
|
|
3319
|
+
protocol_get_parser = protocol_subparsers.add_parser("get", help="Get protocol details")
|
|
3320
|
+
protocol_get_parser.add_argument("protocol_id", help="Protocol ID")
|
|
3321
|
+
|
|
3322
|
+
# protocol init
|
|
3323
|
+
protocol_init_parser = protocol_subparsers.add_parser("init", help="Initialize new protocol template")
|
|
3324
|
+
protocol_init_parser.add_argument("robot_model", help="Robot model name (e.g., hiwonder-mechdog-pro)")
|
|
3325
|
+
protocol_init_parser.add_argument("-t", "--transport", required=True,
|
|
3326
|
+
choices=["ble", "serial", "wifi", "can", "i2c", "spi", "mqtt", "ros2"],
|
|
3327
|
+
help="Transport type")
|
|
3328
|
+
protocol_init_parser.add_argument("-o", "--output", default="./protocol",
|
|
3329
|
+
help="Output directory (default: ./protocol)")
|
|
3330
|
+
|
|
3331
|
+
# protocol push
|
|
3332
|
+
protocol_push_parser = protocol_subparsers.add_parser("push", help="Upload protocol to FoodForThought")
|
|
3333
|
+
protocol_push_parser.add_argument("file", nargs="?", help="Path to protocol.json (default: ./protocol.json)")
|
|
3334
|
+
|
|
3335
|
+
# protocol scan-serial
|
|
3336
|
+
protocol_subparsers.add_parser("scan-serial", help="Scan for serial ports")
|
|
3337
|
+
|
|
3338
|
+
# protocol scan-ble
|
|
3339
|
+
protocol_subparsers.add_parser("scan-ble", help="Scan for BLE devices")
|
|
3340
|
+
|
|
3341
|
+
# primitive command - Primitive skills management
|
|
3342
|
+
primitive_parser = subparsers.add_parser("primitive", help="Manage primitive skills")
|
|
3343
|
+
primitive_subparsers = primitive_parser.add_subparsers(dest="primitive_action", help="Primitive action")
|
|
3344
|
+
|
|
3345
|
+
# primitive list
|
|
3346
|
+
primitive_list_parser = primitive_subparsers.add_parser("list", help="List primitive skills")
|
|
3347
|
+
primitive_list_parser.add_argument("-r", "--robot", help="Filter by robot model")
|
|
3348
|
+
primitive_list_parser.add_argument("-c", "--category",
|
|
3349
|
+
choices=["body_pose", "arm", "gripper", "locomotion",
|
|
3350
|
+
"head", "sensing", "manipulation", "navigation"],
|
|
3351
|
+
help="Filter by category")
|
|
3352
|
+
primitive_list_parser.add_argument("--status",
|
|
3353
|
+
choices=["experimental", "tested", "verified", "deprecated"],
|
|
3354
|
+
help="Filter by status")
|
|
3355
|
+
primitive_list_parser.add_argument("--tested", action="store_true",
|
|
3356
|
+
help="Show only tested/verified primitives")
|
|
3357
|
+
|
|
3358
|
+
# primitive get
|
|
3359
|
+
primitive_get_parser = primitive_subparsers.add_parser("get", help="Get primitive details")
|
|
3360
|
+
primitive_get_parser.add_argument("primitive_id", help="Primitive ID")
|
|
3361
|
+
|
|
3362
|
+
# primitive test
|
|
3363
|
+
primitive_test_parser = primitive_subparsers.add_parser("test", help="Submit test result")
|
|
3364
|
+
primitive_test_parser.add_argument("primitive_id", help="Primitive ID to test")
|
|
3365
|
+
primitive_test_parser.add_argument("-p", "--params", required=True,
|
|
3366
|
+
help="Parameters used in test as JSON (e.g., '{\"pitch\": 15}')")
|
|
3367
|
+
primitive_test_parser.add_argument("-r", "--result", required=True,
|
|
3368
|
+
choices=["pass", "fail", "partial"],
|
|
3369
|
+
help="Test result")
|
|
3370
|
+
primitive_test_parser.add_argument("-n", "--notes", help="Test notes")
|
|
3371
|
+
primitive_test_parser.add_argument("-v", "--video", help="Video URL of test")
|
|
3372
|
+
|
|
3373
|
+
# primitive deps (nested subcommand for dependency management)
|
|
3374
|
+
primitive_deps_parser = primitive_subparsers.add_parser("deps", help="Manage primitive dependencies")
|
|
3375
|
+
primitive_deps_subparsers = primitive_deps_parser.add_subparsers(dest="primitive_deps_action",
|
|
3376
|
+
help="Dependency action")
|
|
3377
|
+
|
|
3378
|
+
# primitive deps show
|
|
3379
|
+
primitive_deps_show_parser = primitive_deps_subparsers.add_parser("show", help="Show dependencies")
|
|
3380
|
+
primitive_deps_show_parser.add_argument("primitive_id", help="Primitive ID")
|
|
3381
|
+
|
|
3382
|
+
# primitive deps add
|
|
3383
|
+
primitive_deps_add_parser = primitive_deps_subparsers.add_parser("add", help="Add dependency")
|
|
3384
|
+
primitive_deps_add_parser.add_argument("primitive_id", help="Primitive ID (the one that depends)")
|
|
3385
|
+
primitive_deps_add_parser.add_argument("required_id", help="Required primitive ID")
|
|
3386
|
+
primitive_deps_add_parser.add_argument("-t", "--type", default="requires",
|
|
3387
|
+
choices=["requires", "extends", "overrides", "optional"],
|
|
3388
|
+
help="Dependency type (default: requires)")
|
|
3389
|
+
primitive_deps_add_parser.add_argument("--min-status", default="tested",
|
|
3390
|
+
choices=["experimental", "tested", "verified"],
|
|
3391
|
+
help="Minimum required status (default: tested)")
|
|
3392
|
+
|
|
3393
|
+
# primitive init
|
|
3394
|
+
primitive_init_parser = primitive_subparsers.add_parser("init", help="Initialize primitive skill template")
|
|
3395
|
+
primitive_init_parser.add_argument("name", help="Primitive name (e.g., move_joint, grip_close)")
|
|
3396
|
+
primitive_init_parser.add_argument("-p", "--protocol", help="Protocol ID to link to")
|
|
3397
|
+
primitive_init_parser.add_argument("-r", "--from-recording", help="Import from recording file")
|
|
3398
|
+
primitive_init_parser.add_argument("-c", "--category", default="motion",
|
|
3399
|
+
choices=["body_pose", "arm", "gripper", "locomotion",
|
|
3400
|
+
"head", "sensing", "manipulation", "navigation"],
|
|
3401
|
+
help="Primitive category (default: motion)")
|
|
3402
|
+
primitive_init_parser.add_argument("-o", "--output", default=".", help="Output directory")
|
|
3403
|
+
|
|
3404
|
+
# primitive push
|
|
3405
|
+
primitive_push_parser = primitive_subparsers.add_parser("push", help="Publish primitive to FoodforThought")
|
|
3406
|
+
primitive_push_parser.add_argument("primitive_file", help="Path to .primitive.json file")
|
|
3407
|
+
|
|
3408
|
+
# skill command - Skill abstractions (composed from primitives)
|
|
3409
|
+
skill_parser = subparsers.add_parser("skill", help="Manage skill abstractions (Layer 2)")
|
|
3410
|
+
skill_subparsers = skill_parser.add_subparsers(dest="skill_action", help="Skill action")
|
|
3411
|
+
|
|
3412
|
+
# skill init
|
|
3413
|
+
skill_init_parser = skill_subparsers.add_parser("init", help="Initialize a new skill abstraction")
|
|
3414
|
+
skill_init_parser.add_argument("name", help="Skill name (e.g., pick_and_place)")
|
|
3415
|
+
skill_init_parser.add_argument("-r", "--robot", help="Target robot model")
|
|
3416
|
+
skill_init_parser.add_argument("-t", "--template", default="basic",
|
|
3417
|
+
choices=["basic", "pick_place", "navigation", "inspection"],
|
|
3418
|
+
help="Skill template (default: basic)")
|
|
3419
|
+
skill_init_parser.add_argument("-o", "--output", default=".", help="Output directory")
|
|
3420
|
+
|
|
3421
|
+
# skill compose
|
|
3422
|
+
skill_compose_parser = skill_subparsers.add_parser("compose", help="Add primitives to skill")
|
|
3423
|
+
skill_compose_parser.add_argument("skill_file", help="Path to .skill.json file")
|
|
3424
|
+
skill_compose_parser.add_argument("primitives", nargs="+", help="Primitive IDs to add")
|
|
3425
|
+
|
|
3426
|
+
# skill list
|
|
3427
|
+
skill_list_parser = skill_subparsers.add_parser("list", help="List skill abstractions")
|
|
3428
|
+
skill_list_parser.add_argument("-r", "--robot", help="Filter by robot model")
|
|
3429
|
+
skill_list_parser.add_argument("--status",
|
|
3430
|
+
choices=["experimental", "tested", "verified"],
|
|
3431
|
+
help="Filter by status")
|
|
3432
|
+
|
|
3433
|
+
# skill get
|
|
3434
|
+
skill_get_parser = skill_subparsers.add_parser("get", help="Get skill details")
|
|
3435
|
+
skill_get_parser.add_argument("skill_id", help="Skill ID")
|
|
3436
|
+
|
|
3437
|
+
# skill push
|
|
3438
|
+
skill_push_parser = skill_subparsers.add_parser("push", help="Publish skill to FoodforThought")
|
|
3439
|
+
skill_push_parser.add_argument("skill_file", help="Path to .skill.json file")
|
|
3440
|
+
|
|
3441
|
+
# skill test
|
|
3442
|
+
skill_test_parser = skill_subparsers.add_parser("test", help="Test a skill")
|
|
3443
|
+
skill_test_parser.add_argument("skill", help="Skill file or ID")
|
|
3444
|
+
skill_test_parser.add_argument("-p", "--params", help="Skill parameters as JSON")
|
|
3445
|
+
skill_test_parser.add_argument("--execute", action="store_true",
|
|
3446
|
+
help="Actually execute (default is dry run)")
|
|
3447
|
+
|
|
3448
|
+
# bridge command - Interactive robot communication
|
|
3449
|
+
bridge_parser = subparsers.add_parser("bridge", help="Interactive robot communication bridge")
|
|
3450
|
+
bridge_subparsers = bridge_parser.add_subparsers(dest="bridge_action", help="Bridge action")
|
|
3451
|
+
|
|
3452
|
+
# bridge connect
|
|
3453
|
+
bridge_connect_parser = bridge_subparsers.add_parser("connect",
|
|
3454
|
+
help="Connect to robot interactively")
|
|
3455
|
+
bridge_connect_parser.add_argument("port", help="Serial port or BLE address")
|
|
3456
|
+
bridge_connect_parser.add_argument("-t", "--transport", default="serial",
|
|
3457
|
+
choices=["serial", "ble"],
|
|
3458
|
+
help="Transport type (default: serial)")
|
|
3459
|
+
bridge_connect_parser.add_argument("-b", "--baud", type=int, default=115200,
|
|
3460
|
+
help="Baud rate for serial (default: 115200)")
|
|
3461
|
+
bridge_connect_parser.add_argument("-p", "--protocol", help="Protocol ID for command hints")
|
|
3462
|
+
|
|
3463
|
+
# bridge send
|
|
3464
|
+
bridge_send_parser = bridge_subparsers.add_parser("send", help="Send single command")
|
|
3465
|
+
bridge_send_parser.add_argument("port", help="Serial port or BLE address")
|
|
3466
|
+
bridge_send_parser.add_argument("command", help="Command to send")
|
|
3467
|
+
bridge_send_parser.add_argument("-t", "--transport", default="serial",
|
|
3468
|
+
choices=["serial", "ble"],
|
|
3469
|
+
help="Transport type (default: serial)")
|
|
3470
|
+
bridge_send_parser.add_argument("-b", "--baud", type=int, default=115200,
|
|
3471
|
+
help="Baud rate for serial (default: 115200)")
|
|
3472
|
+
bridge_send_parser.add_argument("-w", "--wait", type=float, default=0.5,
|
|
3473
|
+
help="Wait time for response in seconds (default: 0.5)")
|
|
3474
|
+
|
|
3475
|
+
# bridge record
|
|
3476
|
+
bridge_record_parser = bridge_subparsers.add_parser("record",
|
|
3477
|
+
help="Record session for primitive creation")
|
|
3478
|
+
bridge_record_parser.add_argument("port", help="Serial port or BLE address")
|
|
3479
|
+
bridge_record_parser.add_argument("-o", "--output", default="./recording.json",
|
|
3480
|
+
help="Output file (default: ./recording.json)")
|
|
3481
|
+
bridge_record_parser.add_argument("-t", "--transport", default="serial",
|
|
3482
|
+
choices=["serial", "ble"],
|
|
3483
|
+
help="Transport type (default: serial)")
|
|
3484
|
+
bridge_record_parser.add_argument("-b", "--baud", type=int, default=115200,
|
|
3485
|
+
help="Baud rate for serial (default: 115200)")
|
|
3486
|
+
bridge_record_parser.add_argument("-n", "--name", help="Primitive skill name")
|
|
3487
|
+
|
|
3488
|
+
# bridge replay
|
|
3489
|
+
bridge_replay_parser = bridge_subparsers.add_parser("replay", help="Replay a recorded session")
|
|
3490
|
+
bridge_replay_parser.add_argument("recording", help="Path to recording file")
|
|
3491
|
+
bridge_replay_parser.add_argument("port", help="Serial port or BLE address")
|
|
3492
|
+
bridge_replay_parser.add_argument("-t", "--transport", default="serial",
|
|
3493
|
+
choices=["serial", "ble"],
|
|
3494
|
+
help="Transport type (default: serial)")
|
|
3495
|
+
bridge_replay_parser.add_argument("-b", "--baud", type=int, default=115200,
|
|
3496
|
+
help="Baud rate for serial (default: 115200)")
|
|
3497
|
+
bridge_replay_parser.add_argument("-s", "--speed", type=float, default=1.0,
|
|
3498
|
+
help="Playback speed multiplier (default: 1.0)")
|
|
3499
|
+
|
|
3500
|
+
# bridge serve - WebSocket server for Artifex integration
|
|
3501
|
+
bridge_serve_parser = bridge_subparsers.add_parser("serve",
|
|
3502
|
+
help="Start WebSocket server for Artifex Desktop integration",
|
|
3503
|
+
description="""Start the ATE Bridge Server for Artifex Desktop integration.
|
|
3504
|
+
|
|
3505
|
+
This server enables sim-to-real transfer by bridging Artifex Desktop to physical robot hardware.
|
|
3506
|
+
|
|
3507
|
+
WORKFLOW:
|
|
3508
|
+
1. Start this server: ate bridge serve -v
|
|
3509
|
+
2. Connect your robot via USB serial
|
|
3510
|
+
3. In Artifex Desktop, use the Hardware panel or AI tools to connect
|
|
3511
|
+
4. Control your robot directly from the Artifex interface
|
|
3512
|
+
|
|
3513
|
+
CAPABILITIES:
|
|
3514
|
+
- Serial port discovery and robot connection
|
|
3515
|
+
- Real-time servo state monitoring (position, velocity, temperature, load)
|
|
3516
|
+
- Joint control with URDF-to-servo mapping
|
|
3517
|
+
- Trajectory execution
|
|
3518
|
+
- Skill deployment and execution
|
|
3519
|
+
|
|
3520
|
+
EXAMPLE:
|
|
3521
|
+
ate bridge serve -p 8765 -v
|
|
3522
|
+
|
|
3523
|
+
The server listens on ws://localhost:8765 by default.
|
|
3524
|
+
Artifex Desktop will auto-connect when the Hardware panel is opened.""",
|
|
3525
|
+
formatter_class=argparse.RawDescriptionHelpFormatter)
|
|
3526
|
+
bridge_serve_parser.add_argument("-p", "--port", type=int, default=8765,
|
|
3527
|
+
help="WebSocket port (default: 8765)")
|
|
3528
|
+
bridge_serve_parser.add_argument("-v", "--verbose", action="store_true",
|
|
3529
|
+
help="Enable verbose logging (shows all messages)")
|
|
3530
|
+
|
|
3531
|
+
# generate command - Generate skill from text description
|
|
3532
|
+
generate_parser = subparsers.add_parser("generate",
|
|
3533
|
+
help="Generate skill scaffolding from text description")
|
|
3534
|
+
generate_parser.add_argument("description",
|
|
3535
|
+
help="Natural language task description (e.g., 'pick up box and place on pallet')")
|
|
3536
|
+
generate_parser.add_argument("-r", "--robot", default="ur5",
|
|
3537
|
+
help="Target robot model (default: ur5)")
|
|
3538
|
+
generate_parser.add_argument("-o", "--output", default="./new-skill",
|
|
3539
|
+
help="Output directory (default: ./new-skill)")
|
|
3540
|
+
|
|
3541
|
+
# compile command - Compile skill specification to deployable package
|
|
3542
|
+
compile_parser = subparsers.add_parser("compile",
|
|
3543
|
+
help="Compile skill.yaml into deployable package (ROS2, Docker, Python)",
|
|
3544
|
+
description="""Compile a skill specification into a deployable package.
|
|
3545
|
+
|
|
3546
|
+
The skill compiler transforms a skill.yaml specification into:
|
|
3547
|
+
- Python skill implementation with primitives wrappers
|
|
3548
|
+
- ROS2 package with action/service interfaces
|
|
3549
|
+
- Docker container for deployment
|
|
3550
|
+
- Hardware configuration mapping
|
|
3551
|
+
|
|
3552
|
+
EXAMPLES:
|
|
3553
|
+
ate compile skill.yaml
|
|
3554
|
+
ate compile skill.yaml --target docker
|
|
3555
|
+
ate compile skill.yaml --target ros2 --robot my_arm.urdf
|
|
3556
|
+
ate compile skill.yaml --ate-dir ./robot_config
|
|
3557
|
+
""",
|
|
3558
|
+
formatter_class=argparse.RawDescriptionHelpFormatter)
|
|
3559
|
+
compile_parser.add_argument("skill_path",
|
|
3560
|
+
help="Path to skill.yaml specification")
|
|
3561
|
+
compile_parser.add_argument("-o", "--output", default="./output",
|
|
3562
|
+
help="Output directory (default: ./output)")
|
|
3563
|
+
compile_parser.add_argument("-t", "--target", default="ros2",
|
|
3564
|
+
choices=["ros2", "docker", "python"],
|
|
3565
|
+
help="Target platform (default: ros2)")
|
|
3566
|
+
compile_parser.add_argument("-r", "--robot",
|
|
3567
|
+
help="Path to robot URDF for hardware config")
|
|
3568
|
+
compile_parser.add_argument("--ate-dir",
|
|
3569
|
+
help="Path to ATE config directory for servo mapping")
|
|
3570
|
+
|
|
3571
|
+
# test-skill command - Test compiled skill
|
|
3572
|
+
test_skill_parser = subparsers.add_parser("test-skill",
|
|
3573
|
+
help="Test a compiled skill in simulation or on hardware",
|
|
3574
|
+
description="""Test a compiled skill package.
|
|
3575
|
+
|
|
3576
|
+
MODES:
|
|
3577
|
+
mock - Run with mock hardware drivers (no hardware needed)
|
|
3578
|
+
sim - Run in MuJoCo/Gazebo simulation (requires setup)
|
|
3579
|
+
hardware - Run on physical robot (requires bridge connection)
|
|
3580
|
+
|
|
3581
|
+
EXAMPLES:
|
|
3582
|
+
ate test-skill ./output --mode mock
|
|
3583
|
+
ate test-skill ./output --mode hardware --robot-port /dev/ttyUSB0
|
|
3584
|
+
ate test-skill ./output --mode mock --params '{"pick_pose": [0.5, 0, 0.3]}'
|
|
3585
|
+
""",
|
|
3586
|
+
formatter_class=argparse.RawDescriptionHelpFormatter)
|
|
3587
|
+
test_skill_parser.add_argument("skill_path",
|
|
3588
|
+
help="Path to compiled skill directory")
|
|
3589
|
+
test_skill_parser.add_argument("-m", "--mode", default="mock",
|
|
3590
|
+
choices=["sim", "hardware", "mock"],
|
|
3591
|
+
help="Test mode (default: mock)")
|
|
3592
|
+
test_skill_parser.add_argument("--robot-port",
|
|
3593
|
+
help="Robot serial port for hardware mode")
|
|
3594
|
+
test_skill_parser.add_argument("-p", "--params",
|
|
3595
|
+
help="Skill parameters as JSON string")
|
|
3596
|
+
|
|
3597
|
+
# publish-skill command - Publish compiled skill to registry
|
|
3598
|
+
publish_skill_parser = subparsers.add_parser("publish-skill",
|
|
3599
|
+
help="Publish compiled skill to FoodforThought registry",
|
|
3600
|
+
description="""Publish a compiled skill package to FoodforThought.
|
|
3601
|
+
|
|
3602
|
+
The skill will be uploaded to the skill registry and made available
|
|
3603
|
+
for other users to discover and deploy.
|
|
3604
|
+
|
|
3605
|
+
EXAMPLES:
|
|
3606
|
+
ate publish-skill ./output
|
|
3607
|
+
ate publish-skill ./output --visibility private
|
|
3608
|
+
ate publish-skill ./output --visibility team
|
|
3609
|
+
""",
|
|
3610
|
+
formatter_class=argparse.RawDescriptionHelpFormatter)
|
|
3611
|
+
publish_skill_parser.add_argument("skill_path",
|
|
3612
|
+
help="Path to compiled skill directory")
|
|
3613
|
+
publish_skill_parser.add_argument("-v", "--visibility", default="public",
|
|
3614
|
+
choices=["public", "private", "team"],
|
|
3615
|
+
help="Visibility (default: public)")
|
|
3616
|
+
|
|
3617
|
+
# check-compatibility command - Check skill-robot compatibility
|
|
3618
|
+
check_compat_parser = subparsers.add_parser("check-compatibility",
|
|
3619
|
+
help="Check if a skill is compatible with a robot",
|
|
3620
|
+
description="""Check if a skill specification is compatible with a robot.
|
|
3621
|
+
|
|
3622
|
+
Analyzes hardware requirements, kinematic constraints, and primitive
|
|
3623
|
+
support to determine if a skill can run on a given robot.
|
|
3624
|
+
|
|
3625
|
+
EXAMPLES:
|
|
3626
|
+
ate check-compatibility skill.yaml --robot-urdf my_arm.urdf
|
|
3627
|
+
ate check-compatibility skill.yaml --ate-dir ./robot_config
|
|
3628
|
+
""",
|
|
3629
|
+
formatter_class=argparse.RawDescriptionHelpFormatter)
|
|
3630
|
+
check_compat_parser.add_argument("skill_path",
|
|
3631
|
+
help="Path to skill.yaml")
|
|
3632
|
+
check_compat_parser.add_argument("--robot-urdf",
|
|
3633
|
+
help="Path to robot URDF")
|
|
3634
|
+
check_compat_parser.add_argument("--ate-dir",
|
|
3635
|
+
help="Path to ATE config directory")
|
|
3636
|
+
|
|
3637
|
+
# publish-protocol command - Convenient alias for protocol push
|
|
3638
|
+
publish_protocol_parser = subparsers.add_parser("publish-protocol",
|
|
3639
|
+
help="Publish a robot protocol to FoodForThought (alias for 'protocol push')")
|
|
3640
|
+
publish_protocol_parser.add_argument("file", nargs="?",
|
|
3641
|
+
help="Path to protocol.json (default: ./protocol.json)")
|
|
3642
|
+
|
|
3643
|
+
# workflow command - Workflow/pipeline management
|
|
3644
|
+
workflow_parser = subparsers.add_parser("workflow", help="Manage skill workflows/pipelines")
|
|
3645
|
+
workflow_subparsers = workflow_parser.add_subparsers(dest="workflow_action", help="Workflow action")
|
|
3646
|
+
|
|
3647
|
+
# workflow validate
|
|
3648
|
+
workflow_validate_parser = workflow_subparsers.add_parser("validate",
|
|
3649
|
+
help="Validate workflow YAML")
|
|
3650
|
+
workflow_validate_parser.add_argument("path", help="Path to workflow YAML file")
|
|
3651
|
+
|
|
3652
|
+
# workflow run
|
|
3653
|
+
workflow_run_parser = workflow_subparsers.add_parser("run", help="Run a workflow")
|
|
3654
|
+
workflow_run_parser.add_argument("path", help="Path to workflow YAML file")
|
|
3655
|
+
workflow_run_parser.add_argument("--sim", action="store_true",
|
|
3656
|
+
help="Run in simulation mode")
|
|
3657
|
+
workflow_run_parser.add_argument("--dry-run", action="store_true",
|
|
3658
|
+
help="Show execution plan without running")
|
|
3659
|
+
|
|
3660
|
+
# workflow export
|
|
3661
|
+
workflow_export_parser = workflow_subparsers.add_parser("export",
|
|
3662
|
+
help="Export workflow to other formats")
|
|
3663
|
+
workflow_export_parser.add_argument("path", help="Path to workflow YAML file")
|
|
3664
|
+
workflow_export_parser.add_argument("-f", "--format", default="ros2",
|
|
3665
|
+
choices=["ros2", "json"],
|
|
3666
|
+
help="Export format (default: ros2)")
|
|
3667
|
+
workflow_export_parser.add_argument("-o", "--output", help="Output file path")
|
|
3668
|
+
|
|
3669
|
+
# team command - Team collaboration
|
|
3670
|
+
team_parser = subparsers.add_parser("team", help="Team collaboration management")
|
|
3671
|
+
team_subparsers = team_parser.add_subparsers(dest="team_action", help="Team action")
|
|
3672
|
+
|
|
3673
|
+
# team create
|
|
3674
|
+
team_create_parser = team_subparsers.add_parser("create", help="Create a new team")
|
|
3675
|
+
team_create_parser.add_argument("name", help="Team name")
|
|
3676
|
+
team_create_parser.add_argument("-d", "--description", help="Team description")
|
|
3677
|
+
|
|
3678
|
+
# team invite
|
|
3679
|
+
team_invite_parser = team_subparsers.add_parser("invite", help="Invite user to team")
|
|
3680
|
+
team_invite_parser.add_argument("email", help="Email of user to invite")
|
|
3681
|
+
team_invite_parser.add_argument("-t", "--team", required=True, help="Team slug")
|
|
3682
|
+
team_invite_parser.add_argument("-r", "--role", default="member",
|
|
3683
|
+
choices=["owner", "admin", "member", "viewer"],
|
|
3684
|
+
help="Role to assign (default: member)")
|
|
3685
|
+
|
|
3686
|
+
# team list
|
|
3687
|
+
team_subparsers.add_parser("list", help="List teams you belong to")
|
|
3688
|
+
|
|
3689
|
+
# team share (skill share with team)
|
|
3690
|
+
team_share_parser = team_subparsers.add_parser("share", help="Share skill with team")
|
|
3691
|
+
team_share_parser.add_argument("skill_id", help="Skill ID to share")
|
|
3692
|
+
team_share_parser.add_argument("-t", "--team", required=True, help="Team slug")
|
|
3693
|
+
|
|
3694
|
+
# data command - Dataset management
|
|
3695
|
+
data_parser = subparsers.add_parser("data", help="Dataset and telemetry management")
|
|
3696
|
+
data_subparsers = data_parser.add_subparsers(dest="data_action", help="Data action")
|
|
3697
|
+
|
|
3698
|
+
# data upload
|
|
3699
|
+
data_upload_parser = data_subparsers.add_parser("upload", help="Upload sensor data")
|
|
3700
|
+
data_upload_parser.add_argument("path", help="Path to data directory or file")
|
|
3701
|
+
data_upload_parser.add_argument("-s", "--skill", required=True, help="Associated skill ID")
|
|
3702
|
+
data_upload_parser.add_argument("--stage", default="raw",
|
|
3703
|
+
choices=["raw", "annotated", "skill-abstracted", "production"],
|
|
3704
|
+
help="Data stage (default: raw)")
|
|
3705
|
+
|
|
3706
|
+
# data list
|
|
3707
|
+
data_list_parser = data_subparsers.add_parser("list", help="List datasets")
|
|
3708
|
+
data_list_parser.add_argument("-s", "--skill", help="Filter by skill ID")
|
|
3709
|
+
data_list_parser.add_argument("--stage", help="Filter by stage")
|
|
3710
|
+
|
|
3711
|
+
# data promote
|
|
3712
|
+
data_promote_parser = data_subparsers.add_parser("promote", help="Promote dataset stage")
|
|
3713
|
+
data_promote_parser.add_argument("dataset_id", help="Dataset ID")
|
|
3714
|
+
data_promote_parser.add_argument("--to", required=True, dest="to_stage",
|
|
3715
|
+
choices=["annotated", "skill-abstracted", "production"],
|
|
3716
|
+
help="Target stage")
|
|
3717
|
+
|
|
3718
|
+
# data export
|
|
3719
|
+
data_export_parser = data_subparsers.add_parser("export", help="Export dataset")
|
|
3720
|
+
data_export_parser.add_argument("dataset_id", help="Dataset ID")
|
|
3721
|
+
data_export_parser.add_argument("-f", "--format", default="rlds",
|
|
3722
|
+
choices=["json", "rlds", "lerobot", "hdf5"],
|
|
3723
|
+
help="Export format (default: rlds)")
|
|
3724
|
+
data_export_parser.add_argument("-o", "--output", default="./export",
|
|
3725
|
+
help="Output directory")
|
|
3726
|
+
|
|
3727
|
+
# deploy command - Enhanced deployment management
|
|
3728
|
+
deploy_subparsers = deploy_parser.add_subparsers(dest="deploy_action", help="Deploy action")
|
|
3729
|
+
|
|
3730
|
+
# deploy config (hybrid edge/cloud deployment)
|
|
3731
|
+
deploy_config_parser = deploy_subparsers.add_parser("config",
|
|
3732
|
+
help="Deploy using config file")
|
|
3733
|
+
deploy_config_parser.add_argument("config_path", help="Path to deploy.yaml")
|
|
3734
|
+
deploy_config_parser.add_argument("-t", "--target", required=True,
|
|
3735
|
+
help="Target fleet or robot")
|
|
3736
|
+
deploy_config_parser.add_argument("--dry-run", action="store_true",
|
|
3737
|
+
help="Show plan without deploying")
|
|
3738
|
+
|
|
3739
|
+
# deploy status
|
|
3740
|
+
deploy_status_parser = deploy_subparsers.add_parser("status",
|
|
3741
|
+
help="Check deployment status")
|
|
3742
|
+
deploy_status_parser.add_argument("target", help="Target fleet or robot")
|
|
3743
|
+
|
|
3744
|
+
# robot-setup command - Interactive wizard for robot discovery and primitive skill generation
|
|
3745
|
+
robot_setup_parser = subparsers.add_parser("robot-setup",
|
|
3746
|
+
help="Interactive wizard to discover robot and generate primitive skills")
|
|
3747
|
+
robot_setup_parser.add_argument("-p", "--port", help="Serial port (skip device selection)")
|
|
3748
|
+
robot_setup_parser.add_argument("-o", "--output", default="./robot",
|
|
3749
|
+
help="Output directory for generated files (default: ./robot)")
|
|
3750
|
+
robot_setup_parser.add_argument("--skip-labeling", action="store_true",
|
|
3751
|
+
help="Skip labeling entirely (use generic servo_N names)")
|
|
3752
|
+
robot_setup_parser.add_argument("--robot-type",
|
|
3753
|
+
choices=["quadruped", "quadruped_with_arm", "hexapod",
|
|
3754
|
+
"6dof_arm", "humanoid_basic", "humanoid",
|
|
3755
|
+
"humanoid_advanced", "humanoid_full", "custom"],
|
|
3756
|
+
help="Force robot type for AI-suggested labels")
|
|
3757
|
+
robot_setup_parser.add_argument("--non-interactive", action="store_true",
|
|
3758
|
+
help="Run without user prompts (use defaults and AI suggestions)")
|
|
3759
|
+
robot_setup_parser.add_argument("--push", action="store_true",
|
|
3760
|
+
help="Auto-push generated primitives to FoodforThought (requires FOODFORTHOUGHT_TOKEN)")
|
|
3761
|
+
robot_setup_parser.add_argument("--api-url",
|
|
3762
|
+
help="FoodforThought API URL (defaults to https://kindlyrobotics.com)")
|
|
3763
|
+
robot_setup_parser.add_argument("--scan-only", action="store_true",
|
|
3764
|
+
help="Only scan for devices, don't run full wizard")
|
|
3765
|
+
|
|
3766
|
+
# marketplace command - Skill Marketplace ("npm for robot skills")
|
|
3767
|
+
marketplace_parser = subparsers.add_parser("marketplace",
|
|
3768
|
+
help="Skill Marketplace - discover, install, and publish robot skills",
|
|
3769
|
+
description="""Skill Marketplace - "npm for robot skills"
|
|
3770
|
+
|
|
3771
|
+
Discover community-contributed skills, install them for your robot,
|
|
3772
|
+
and publish your own skills to share with others.
|
|
3773
|
+
|
|
3774
|
+
EXAMPLES:
|
|
3775
|
+
ate marketplace search "pick and place"
|
|
3776
|
+
ate marketplace show pick-and-place
|
|
3777
|
+
ate marketplace install pick-and-place --robot my-arm
|
|
3778
|
+
ate marketplace publish ./my-skill
|
|
3779
|
+
ate marketplace report pick-and-place my-arm --works
|
|
3780
|
+
""",
|
|
3781
|
+
formatter_class=argparse.RawDescriptionHelpFormatter)
|
|
3782
|
+
marketplace_subparsers = marketplace_parser.add_subparsers(dest="marketplace_action", help="Marketplace action")
|
|
3783
|
+
|
|
3784
|
+
# marketplace search
|
|
3785
|
+
marketplace_search_parser = marketplace_subparsers.add_parser("search",
|
|
3786
|
+
help="Search for skills in the marketplace")
|
|
3787
|
+
marketplace_search_parser.add_argument("query", help="Search query")
|
|
3788
|
+
marketplace_search_parser.add_argument("-c", "--category",
|
|
3789
|
+
choices=["manipulation", "navigation", "perception", "locomotion",
|
|
3790
|
+
"interaction", "inspection", "assembly", "pick_and_place",
|
|
3791
|
+
"cleaning", "logistics", "other"],
|
|
3792
|
+
help="Filter by category")
|
|
3793
|
+
marketplace_search_parser.add_argument("-r", "--robot-type", help="Filter by robot type")
|
|
3794
|
+
marketplace_search_parser.add_argument("-l", "--license", help="Filter by license (mit, apache2, etc.)")
|
|
3795
|
+
marketplace_search_parser.add_argument("-p", "--pricing", choices=["free", "paid"],
|
|
3796
|
+
help="Filter by pricing")
|
|
3797
|
+
marketplace_search_parser.add_argument("-s", "--sort", default="downloads",
|
|
3798
|
+
choices=["downloads", "rating", "recent", "executions", "installs"],
|
|
3799
|
+
help="Sort results (default: downloads)")
|
|
3800
|
+
marketplace_search_parser.add_argument("--limit", type=int, default=20,
|
|
3801
|
+
help="Number of results (default: 20)")
|
|
3802
|
+
|
|
3803
|
+
# marketplace show
|
|
3804
|
+
marketplace_show_parser = marketplace_subparsers.add_parser("show",
|
|
3805
|
+
help="Show detailed information about a skill")
|
|
3806
|
+
marketplace_show_parser.add_argument("slug", help="Skill slug/name")
|
|
3807
|
+
|
|
3808
|
+
# marketplace install
|
|
3809
|
+
marketplace_install_parser = marketplace_subparsers.add_parser("install",
|
|
3810
|
+
help="Install a skill from the marketplace")
|
|
3811
|
+
marketplace_install_parser.add_argument("skill_name", help="Skill name or slug")
|
|
3812
|
+
marketplace_install_parser.add_argument("-v", "--version", help="Specific version to install")
|
|
3813
|
+
marketplace_install_parser.add_argument("-r", "--robot", help="Target robot ID for compatibility check")
|
|
3814
|
+
marketplace_install_parser.add_argument("-o", "--output", help="Output directory (default: ./<skill_name>)")
|
|
3815
|
+
|
|
3816
|
+
# marketplace publish
|
|
3817
|
+
marketplace_publish_parser = marketplace_subparsers.add_parser("publish",
|
|
3818
|
+
help="Publish a skill to the marketplace")
|
|
3819
|
+
marketplace_publish_parser.add_argument("path", help="Path to skill directory")
|
|
3820
|
+
marketplace_publish_parser.add_argument("--no-public", action="store_true",
|
|
3821
|
+
help="Keep skill private (not listed publicly)")
|
|
3822
|
+
|
|
3823
|
+
# marketplace report
|
|
3824
|
+
marketplace_report_parser = marketplace_subparsers.add_parser("report",
|
|
3825
|
+
help="Report skill compatibility with a robot")
|
|
3826
|
+
marketplace_report_parser.add_argument("skill_name", help="Skill name or slug")
|
|
3827
|
+
marketplace_report_parser.add_argument("robot", help="Robot ID")
|
|
3828
|
+
marketplace_report_parser.add_argument("--works", dest="works", action="store_true",
|
|
3829
|
+
help="Report that skill works on this robot")
|
|
3830
|
+
marketplace_report_parser.add_argument("--no-works", dest="works", action="store_false",
|
|
3831
|
+
help="Report that skill does NOT work on this robot")
|
|
3832
|
+
marketplace_report_parser.add_argument("-n", "--notes", help="Additional notes")
|
|
3833
|
+
marketplace_report_parser.add_argument("-v", "--version", help="Version tested")
|
|
3834
|
+
marketplace_report_parser.set_defaults(works=None)
|
|
3835
|
+
|
|
3836
|
+
# marketplace list (installed)
|
|
3837
|
+
marketplace_subparsers.add_parser("installed", help="List installed skills")
|
|
3838
|
+
|
|
3839
|
+
args = parser.parse_args()
|
|
3840
|
+
|
|
3841
|
+
if not args.command:
|
|
3842
|
+
parser.print_help()
|
|
3843
|
+
sys.exit(1)
|
|
3844
|
+
|
|
3845
|
+
# login command
|
|
3846
|
+
subparsers.add_parser("login", help="Authenticate with FoodforThought")
|
|
1653
3847
|
|
|
1654
3848
|
client = ATEClient()
|
|
1655
3849
|
|
|
3850
|
+
if args.command == "login":
|
|
3851
|
+
login_command()
|
|
3852
|
+
return
|
|
3853
|
+
|
|
1656
3854
|
if args.command == "init":
|
|
1657
3855
|
result = client.init(args.name, args.description, args.visibility)
|
|
1658
3856
|
print(f"Created repository: {result['repository']['id']}")
|
|
@@ -1712,9 +3910,107 @@ def main():
|
|
|
1712
3910
|
else:
|
|
1713
3911
|
deps_parser.print_help()
|
|
1714
3912
|
|
|
3913
|
+
elif args.command == "protocol":
|
|
3914
|
+
if args.protocol_action == "list":
|
|
3915
|
+
client.protocol_list(args.robot, args.transport, args.verified, args.search)
|
|
3916
|
+
elif args.protocol_action == "get":
|
|
3917
|
+
client.protocol_get(args.protocol_id)
|
|
3918
|
+
elif args.protocol_action == "init":
|
|
3919
|
+
client.protocol_init(args.robot_model, args.transport, args.output)
|
|
3920
|
+
elif args.protocol_action == "push":
|
|
3921
|
+
client.protocol_push(args.file)
|
|
3922
|
+
elif args.protocol_action == "scan-serial":
|
|
3923
|
+
client.protocol_scan_serial()
|
|
3924
|
+
elif args.protocol_action == "scan-ble":
|
|
3925
|
+
client.protocol_scan_ble()
|
|
3926
|
+
else:
|
|
3927
|
+
protocol_parser.print_help()
|
|
3928
|
+
|
|
3929
|
+
elif args.command == "primitive":
|
|
3930
|
+
if args.primitive_action == "list":
|
|
3931
|
+
client.primitive_list(args.robot, args.category, args.status, args.tested)
|
|
3932
|
+
elif args.primitive_action == "get":
|
|
3933
|
+
client.primitive_get(args.primitive_id)
|
|
3934
|
+
elif args.primitive_action == "test":
|
|
3935
|
+
client.primitive_test(args.primitive_id, args.params, args.result, args.notes, args.video)
|
|
3936
|
+
elif args.primitive_action == "init":
|
|
3937
|
+
client.primitive_init(args.name, args.protocol, args.from_recording, args.category, args.output)
|
|
3938
|
+
elif args.primitive_action == "push":
|
|
3939
|
+
client.primitive_push(args.primitive_file)
|
|
3940
|
+
elif args.primitive_action == "deps":
|
|
3941
|
+
if args.primitive_deps_action == "show":
|
|
3942
|
+
client.primitive_deps_show(args.primitive_id)
|
|
3943
|
+
elif args.primitive_deps_action == "add":
|
|
3944
|
+
client.primitive_deps_add(args.primitive_id, args.required_id, args.type, args.min_status)
|
|
3945
|
+
else:
|
|
3946
|
+
primitive_deps_parser.print_help()
|
|
3947
|
+
else:
|
|
3948
|
+
primitive_parser.print_help()
|
|
3949
|
+
|
|
3950
|
+
elif args.command == "skill":
|
|
3951
|
+
if args.skill_action == "init":
|
|
3952
|
+
client.skill_init(args.name, args.robot, args.template, args.output)
|
|
3953
|
+
elif args.skill_action == "compose":
|
|
3954
|
+
client.skill_compose(args.skill_file, args.primitives)
|
|
3955
|
+
elif args.skill_action == "list":
|
|
3956
|
+
client.skill_list(args.robot, args.status)
|
|
3957
|
+
elif args.skill_action == "get":
|
|
3958
|
+
client.skill_get(args.skill_id)
|
|
3959
|
+
elif args.skill_action == "push":
|
|
3960
|
+
client.skill_push(args.skill_file)
|
|
3961
|
+
elif args.skill_action == "test":
|
|
3962
|
+
client.skill_test(args.skill, args.params, not args.execute)
|
|
3963
|
+
else:
|
|
3964
|
+
skill_parser.print_help()
|
|
3965
|
+
|
|
3966
|
+
elif args.command == "bridge":
|
|
3967
|
+
if args.bridge_action == "connect":
|
|
3968
|
+
client.bridge_connect(args.port, args.transport, args.baud, args.protocol)
|
|
3969
|
+
elif args.bridge_action == "send":
|
|
3970
|
+
client.bridge_send(args.port, args.command, args.transport, args.baud, args.wait)
|
|
3971
|
+
elif args.bridge_action == "record":
|
|
3972
|
+
client.bridge_record(args.port, args.output, args.transport, args.baud, args.name)
|
|
3973
|
+
elif args.bridge_action == "replay":
|
|
3974
|
+
client.bridge_replay(args.recording, args.port, args.transport, args.baud, args.speed)
|
|
3975
|
+
elif args.bridge_action == "serve":
|
|
3976
|
+
from ate.bridge_server import run_bridge_server
|
|
3977
|
+
run_bridge_server(port=args.port, verbose=args.verbose)
|
|
3978
|
+
else:
|
|
3979
|
+
bridge_parser.print_help()
|
|
3980
|
+
|
|
1715
3981
|
elif args.command == "generate":
|
|
1716
3982
|
client.generate(args.description, args.robot, args.output)
|
|
1717
3983
|
|
|
3984
|
+
elif args.command == "compile":
|
|
3985
|
+
client.compile_skill(
|
|
3986
|
+
args.skill_path,
|
|
3987
|
+
args.output,
|
|
3988
|
+
args.target,
|
|
3989
|
+
args.robot,
|
|
3990
|
+
getattr(args, 'ate_dir', None)
|
|
3991
|
+
)
|
|
3992
|
+
|
|
3993
|
+
elif args.command == "test-skill":
|
|
3994
|
+
client.test_compiled_skill(
|
|
3995
|
+
args.skill_path,
|
|
3996
|
+
args.mode,
|
|
3997
|
+
args.robot_port,
|
|
3998
|
+
args.params
|
|
3999
|
+
)
|
|
4000
|
+
|
|
4001
|
+
elif args.command == "publish-skill":
|
|
4002
|
+
client.publish_compiled_skill(args.skill_path, args.visibility)
|
|
4003
|
+
|
|
4004
|
+
elif args.command == "check-compatibility":
|
|
4005
|
+
client.check_skill_compatibility(
|
|
4006
|
+
args.skill_path,
|
|
4007
|
+
args.robot_urdf,
|
|
4008
|
+
getattr(args, 'ate_dir', None)
|
|
4009
|
+
)
|
|
4010
|
+
|
|
4011
|
+
elif args.command == "publish-protocol":
|
|
4012
|
+
client.publish_protocol(args.file)
|
|
4013
|
+
|
|
1718
4014
|
elif args.command == "workflow":
|
|
1719
4015
|
if args.workflow_action == "validate":
|
|
1720
4016
|
client.workflow_validate(args.path)
|
|
@@ -1760,6 +4056,93 @@ def main():
|
|
|
1760
4056
|
else:
|
|
1761
4057
|
deploy_parser.print_help()
|
|
1762
4058
|
|
|
4059
|
+
elif args.command == "robot-setup":
|
|
4060
|
+
from ate.robot_setup import run_wizard, RobotSetupWizard
|
|
4061
|
+
|
|
4062
|
+
if args.scan_only:
|
|
4063
|
+
# Just scan for devices
|
|
4064
|
+
try:
|
|
4065
|
+
import serial.tools.list_ports
|
|
4066
|
+
print("\nScanning for serial ports...\n")
|
|
4067
|
+
ports = list(serial.tools.list_ports.comports())
|
|
4068
|
+
|
|
4069
|
+
if not ports:
|
|
4070
|
+
print("No serial ports found.")
|
|
4071
|
+
else:
|
|
4072
|
+
print(f"Found {len(ports)} port(s):\n")
|
|
4073
|
+
for port in ports:
|
|
4074
|
+
print(f" Port: {port.device}")
|
|
4075
|
+
if port.description:
|
|
4076
|
+
print(f" Description: {port.description}")
|
|
4077
|
+
if port.manufacturer:
|
|
4078
|
+
print(f" Manufacturer: {port.manufacturer}")
|
|
4079
|
+
if port.vid and port.pid:
|
|
4080
|
+
print(f" VID:PID: {port.vid:04x}:{port.pid:04x}")
|
|
4081
|
+
if port.serial_number:
|
|
4082
|
+
print(f" Serial: {port.serial_number}")
|
|
4083
|
+
print()
|
|
4084
|
+
except ImportError:
|
|
4085
|
+
print("Error: pyserial not installed. Run: pip install pyserial")
|
|
4086
|
+
sys.exit(1)
|
|
4087
|
+
else:
|
|
4088
|
+
# Run full wizard
|
|
4089
|
+
success = run_wizard(
|
|
4090
|
+
port=args.port,
|
|
4091
|
+
output=args.output,
|
|
4092
|
+
skip_labeling=args.skip_labeling,
|
|
4093
|
+
robot_type=getattr(args, 'robot_type', None),
|
|
4094
|
+
non_interactive=getattr(args, 'non_interactive', False),
|
|
4095
|
+
push=getattr(args, 'push', False),
|
|
4096
|
+
api_url=getattr(args, 'api_url', None)
|
|
4097
|
+
)
|
|
4098
|
+
sys.exit(0 if success else 1)
|
|
4099
|
+
|
|
4100
|
+
elif args.command == "marketplace":
|
|
4101
|
+
from ate.marketplace import (
|
|
4102
|
+
search_skills, show_skill, install_skill,
|
|
4103
|
+
publish_skill, report_compatibility, list_installed
|
|
4104
|
+
)
|
|
4105
|
+
|
|
4106
|
+
if args.marketplace_action == "search":
|
|
4107
|
+
search_skills(
|
|
4108
|
+
query=args.query,
|
|
4109
|
+
category=args.category,
|
|
4110
|
+
robot_type=getattr(args, 'robot_type', None),
|
|
4111
|
+
license_type=args.license,
|
|
4112
|
+
pricing=args.pricing,
|
|
4113
|
+
sort=args.sort,
|
|
4114
|
+
limit=args.limit,
|
|
4115
|
+
)
|
|
4116
|
+
elif args.marketplace_action == "show":
|
|
4117
|
+
show_skill(args.slug)
|
|
4118
|
+
elif args.marketplace_action == "install":
|
|
4119
|
+
install_skill(
|
|
4120
|
+
skill_name=args.skill_name,
|
|
4121
|
+
version=args.version,
|
|
4122
|
+
robot=args.robot,
|
|
4123
|
+
output_dir=args.output,
|
|
4124
|
+
)
|
|
4125
|
+
elif args.marketplace_action == "publish":
|
|
4126
|
+
publish_skill(
|
|
4127
|
+
path=args.path,
|
|
4128
|
+
public=not args.no_public,
|
|
4129
|
+
)
|
|
4130
|
+
elif args.marketplace_action == "report":
|
|
4131
|
+
if args.works is None:
|
|
4132
|
+
print("Error: Must specify --works or --no-works", file=sys.stderr)
|
|
4133
|
+
sys.exit(1)
|
|
4134
|
+
report_compatibility(
|
|
4135
|
+
skill_name=args.skill_name,
|
|
4136
|
+
robot=args.robot,
|
|
4137
|
+
works=args.works,
|
|
4138
|
+
notes=args.notes,
|
|
4139
|
+
version=args.version,
|
|
4140
|
+
)
|
|
4141
|
+
elif args.marketplace_action == "installed":
|
|
4142
|
+
list_installed()
|
|
4143
|
+
else:
|
|
4144
|
+
marketplace_parser.print_help()
|
|
4145
|
+
|
|
1763
4146
|
|
|
1764
4147
|
if __name__ == "__main__":
|
|
1765
4148
|
main()
|