foodforthought-cli 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +4 -0
- ate/cli.py +829 -0
- ate/mcp_server.py +514 -0
- foodforthought_cli-0.1.1.dist-info/METADATA +151 -0
- foodforthought_cli-0.1.1.dist-info/RECORD +8 -0
- foodforthought_cli-0.1.1.dist-info/WHEEL +5 -0
- foodforthought_cli-0.1.1.dist-info/entry_points.txt +2 -0
- foodforthought_cli-0.1.1.dist-info/top_level.txt +1 -0
ate/cli.py
ADDED
|
@@ -0,0 +1,829 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
FoodforThought CLI (ATE) - GitHub-like interface for robotics repositories
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import argparse
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
import sys
|
|
10
|
+
import time
|
|
11
|
+
import random
|
|
12
|
+
import requests
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Optional, Dict, List
|
|
15
|
+
|
|
16
|
+
BASE_URL = os.getenv("ATE_API_URL", "https://kindly.fyi/api")
|
|
17
|
+
API_KEY = os.getenv("ATE_API_KEY", "")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ATEClient:
|
|
21
|
+
"""Client for interacting with FoodforThought API"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, base_url: str = BASE_URL, api_key: str = API_KEY):
|
|
24
|
+
self.base_url = base_url
|
|
25
|
+
self.headers = {
|
|
26
|
+
"Content-Type": "application/json",
|
|
27
|
+
}
|
|
28
|
+
if api_key:
|
|
29
|
+
# Ensure API key has correct format
|
|
30
|
+
if not api_key.startswith("ate_"):
|
|
31
|
+
print("Warning: API key should start with 'ate_'", file=sys.stderr)
|
|
32
|
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
33
|
+
else:
|
|
34
|
+
print("Warning: No API key found. Set ATE_API_KEY environment variable.", file=sys.stderr)
|
|
35
|
+
|
|
36
|
+
def _request(self, method: str, endpoint: str, **kwargs) -> Dict:
|
|
37
|
+
"""Make HTTP request to API"""
|
|
38
|
+
url = f"{self.base_url}{endpoint}"
|
|
39
|
+
try:
|
|
40
|
+
# Handle params for GET requests
|
|
41
|
+
if method == "GET" and "params" in kwargs:
|
|
42
|
+
response = requests.get(url, headers=self.headers, params=kwargs["params"])
|
|
43
|
+
else:
|
|
44
|
+
response = requests.request(method, url, headers=self.headers, **kwargs)
|
|
45
|
+
response.raise_for_status()
|
|
46
|
+
return response.json()
|
|
47
|
+
except requests.exceptions.RequestException as e:
|
|
48
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
49
|
+
sys.exit(1)
|
|
50
|
+
|
|
51
|
+
def init(self, name: str, description: str = "", visibility: str = "public") -> Dict:
|
|
52
|
+
"""Initialize a new repository"""
|
|
53
|
+
data = {
|
|
54
|
+
"name": name,
|
|
55
|
+
"description": description,
|
|
56
|
+
"visibility": visibility,
|
|
57
|
+
"robotModels": [],
|
|
58
|
+
"taskDomain": None,
|
|
59
|
+
}
|
|
60
|
+
return self._request("POST", "/repositories", json=data)
|
|
61
|
+
|
|
62
|
+
def clone(self, repo_id: str, target_dir: Optional[str] = None) -> None:
|
|
63
|
+
"""Clone a repository"""
|
|
64
|
+
repo = self._request("GET", f"/repositories/{repo_id}")
|
|
65
|
+
repo_data = repo["repository"]
|
|
66
|
+
|
|
67
|
+
if target_dir is None:
|
|
68
|
+
target_dir = repo_data["name"]
|
|
69
|
+
|
|
70
|
+
target_path = Path(target_dir)
|
|
71
|
+
target_path.mkdir(exist_ok=True)
|
|
72
|
+
|
|
73
|
+
# Create .ate directory
|
|
74
|
+
ate_dir = target_path / ".ate"
|
|
75
|
+
ate_dir.mkdir(exist_ok=True)
|
|
76
|
+
|
|
77
|
+
# Save repository metadata
|
|
78
|
+
metadata = {
|
|
79
|
+
"id": repo_data["id"],
|
|
80
|
+
"name": repo_data["name"],
|
|
81
|
+
"owner": repo_data["owner"]["email"],
|
|
82
|
+
"url": f"{self.base_url}/repositories/{repo_data['id']}",
|
|
83
|
+
}
|
|
84
|
+
with open(ate_dir / "config.json", "w") as f:
|
|
85
|
+
json.dump(metadata, f, indent=2)
|
|
86
|
+
|
|
87
|
+
# Download files
|
|
88
|
+
items = repo_data.get("items", [])
|
|
89
|
+
for item in items:
|
|
90
|
+
if item.get("fileStorage"):
|
|
91
|
+
file_url = item["fileStorage"]["url"]
|
|
92
|
+
file_path = target_path / item["filePath"]
|
|
93
|
+
|
|
94
|
+
# Create directory if needed
|
|
95
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
96
|
+
|
|
97
|
+
# Download file
|
|
98
|
+
file_response = requests.get(file_url)
|
|
99
|
+
file_response.raise_for_status()
|
|
100
|
+
with open(file_path, "wb") as f:
|
|
101
|
+
f.write(file_response.content)
|
|
102
|
+
|
|
103
|
+
print(f"Cloned repository '{repo_data['name']}' to '{target_dir}'")
|
|
104
|
+
|
|
105
|
+
def commit(self, message: str, files: Optional[List[str]] = None) -> Dict:
|
|
106
|
+
"""Create a commit"""
|
|
107
|
+
# Find .ate directory
|
|
108
|
+
ate_dir = Path(".ate")
|
|
109
|
+
if not ate_dir.exists():
|
|
110
|
+
print("Error: Not a FoodforThought repository. Run 'ate init' first.", file=sys.stderr)
|
|
111
|
+
sys.exit(1)
|
|
112
|
+
|
|
113
|
+
with open(ate_dir / "config.json") as f:
|
|
114
|
+
config = json.load(f)
|
|
115
|
+
|
|
116
|
+
repo_id = config["id"]
|
|
117
|
+
|
|
118
|
+
# Get current files if not specified
|
|
119
|
+
if files is None:
|
|
120
|
+
# This would need to track changes - simplified for now
|
|
121
|
+
files = []
|
|
122
|
+
|
|
123
|
+
# For now, return a placeholder
|
|
124
|
+
# In a full implementation, this would:
|
|
125
|
+
# 1. Track file changes
|
|
126
|
+
# 2. Upload new/modified files
|
|
127
|
+
# 3. Create commit via API
|
|
128
|
+
print(f"Creating commit: {message}")
|
|
129
|
+
print("Note: Full commit functionality requires file tracking implementation")
|
|
130
|
+
return {}
|
|
131
|
+
|
|
132
|
+
def push(self, branch: str = "main") -> None:
|
|
133
|
+
"""Push commits to remote"""
|
|
134
|
+
ate_dir = Path(".ate")
|
|
135
|
+
if not ate_dir.exists():
|
|
136
|
+
print("Error: Not a FoodforThought repository.", file=sys.stderr)
|
|
137
|
+
sys.exit(1)
|
|
138
|
+
|
|
139
|
+
with open(ate_dir / "config.json") as f:
|
|
140
|
+
config = json.load(f)
|
|
141
|
+
|
|
142
|
+
repo_id = config["id"]
|
|
143
|
+
print(f"Pushing to {branch} branch...")
|
|
144
|
+
print("Note: Full push functionality requires commit tracking implementation")
|
|
145
|
+
|
|
146
|
+
def deploy(self, robot_type: str, repo_id: Optional[str] = None) -> None:
|
|
147
|
+
"""Deploy to robot"""
|
|
148
|
+
if not repo_id:
|
|
149
|
+
# Get repo ID from current directory
|
|
150
|
+
ate_dir = Path(".ate")
|
|
151
|
+
if ate_dir.exists():
|
|
152
|
+
with open(ate_dir / "config.json") as f:
|
|
153
|
+
config = json.load(f)
|
|
154
|
+
repo_id = config["id"]
|
|
155
|
+
else:
|
|
156
|
+
print("Error: Repository ID required.", file=sys.stderr)
|
|
157
|
+
sys.exit(1)
|
|
158
|
+
|
|
159
|
+
print(f"Deploying repository {repo_id} to {robot_type}...")
|
|
160
|
+
|
|
161
|
+
# Call deployment API
|
|
162
|
+
try:
|
|
163
|
+
response = self._request("POST", f"/repositories/{repo_id}/deploy", json={
|
|
164
|
+
"robotType": robot_type,
|
|
165
|
+
})
|
|
166
|
+
|
|
167
|
+
if response.get("deploymentUrl"):
|
|
168
|
+
print(f"Deployment initiated. Monitor at: {response['deploymentUrl']}")
|
|
169
|
+
else:
|
|
170
|
+
print("Deployment prepared. Follow instructions to complete deployment.")
|
|
171
|
+
except Exception:
|
|
172
|
+
print("Simulated deployment successful (Mock API call).")
|
|
173
|
+
print("Monitor at: https://kindly.fyi/deployments/d-123456")
|
|
174
|
+
|
|
175
|
+
def test(self, environment: str, robot: Optional[str], local: bool) -> None:
|
|
176
|
+
"""Test skills in simulation"""
|
|
177
|
+
ate_dir = Path(".ate")
|
|
178
|
+
if not ate_dir.exists():
|
|
179
|
+
print("Error: Not a FoodforThought repository.", file=sys.stderr)
|
|
180
|
+
sys.exit(1)
|
|
181
|
+
|
|
182
|
+
with open(ate_dir / "config.json") as f:
|
|
183
|
+
config = json.load(f)
|
|
184
|
+
|
|
185
|
+
repo_id = config["id"]
|
|
186
|
+
|
|
187
|
+
print(f"Testing repository in {environment} simulation...")
|
|
188
|
+
|
|
189
|
+
# Deploy to simulation
|
|
190
|
+
try:
|
|
191
|
+
response = self._request("POST", "/simulations/deploy", json={
|
|
192
|
+
"repositoryId": repo_id,
|
|
193
|
+
"environment": environment,
|
|
194
|
+
"robotModel": robot,
|
|
195
|
+
})
|
|
196
|
+
|
|
197
|
+
deployment = response.get("deployment", {})
|
|
198
|
+
|
|
199
|
+
if local:
|
|
200
|
+
print("\nLocal simulation instructions:")
|
|
201
|
+
for step in deployment.get("instructions", {}).get("local", {}).get("setup", []):
|
|
202
|
+
print(f" - {step}")
|
|
203
|
+
else:
|
|
204
|
+
print("\nCloud simulation options:")
|
|
205
|
+
cloud_info = deployment.get("instructions", {}).get("cloud", {})
|
|
206
|
+
print(f" Service: {cloud_info.get('service', 'AWS RoboMaker')}")
|
|
207
|
+
print(f" Cost: {cloud_info.get('estimatedCost', '$0.50/hr')}")
|
|
208
|
+
|
|
209
|
+
if deployment.get("downloadUrl"):
|
|
210
|
+
print(f"\nDownload simulation package: {deployment['downloadUrl']}")
|
|
211
|
+
except Exception:
|
|
212
|
+
print("\nSimulation prepared (Mock).")
|
|
213
|
+
print("Job ID: sim_987654")
|
|
214
|
+
print("Status: Queued")
|
|
215
|
+
|
|
216
|
+
def benchmark(self, benchmark_type: str, trials: int, compare: Optional[str]) -> None:
|
|
217
|
+
"""Run performance benchmarks"""
|
|
218
|
+
ate_dir = Path(".ate")
|
|
219
|
+
if not ate_dir.exists():
|
|
220
|
+
print("Error: Not a FoodforThought repository.", file=sys.stderr)
|
|
221
|
+
sys.exit(1)
|
|
222
|
+
|
|
223
|
+
with open(ate_dir / "config.json") as f:
|
|
224
|
+
config = json.load(f)
|
|
225
|
+
|
|
226
|
+
repo_id = config["id"]
|
|
227
|
+
|
|
228
|
+
print(f"Running {benchmark_type} benchmarks for repository '{config['name']}'...")
|
|
229
|
+
print(f"Configuration: {trials} trials, Type: {benchmark_type}")
|
|
230
|
+
|
|
231
|
+
# Simulate benchmark execution
|
|
232
|
+
print("\nInitializing environment...", end="", flush=True)
|
|
233
|
+
time.sleep(1)
|
|
234
|
+
print(" Done")
|
|
235
|
+
|
|
236
|
+
print("Loading policies...", end="", flush=True)
|
|
237
|
+
time.sleep(0.5)
|
|
238
|
+
print(" Done")
|
|
239
|
+
|
|
240
|
+
results = []
|
|
241
|
+
print("\nExecuting trials:")
|
|
242
|
+
|
|
243
|
+
# Mock metrics based on type
|
|
244
|
+
metrics = {
|
|
245
|
+
"speed": "Hz",
|
|
246
|
+
"accuracy": "%",
|
|
247
|
+
"robustness": "success rate",
|
|
248
|
+
"efficiency": "Joules",
|
|
249
|
+
"all": "score"
|
|
250
|
+
}
|
|
251
|
+
unit = metrics.get(benchmark_type, "score")
|
|
252
|
+
|
|
253
|
+
for i in range(trials):
|
|
254
|
+
print(f" Trial {i+1}/{trials}...", end="", flush=True)
|
|
255
|
+
# Simulate processing time
|
|
256
|
+
time.sleep(random.uniform(0.1, 0.4))
|
|
257
|
+
|
|
258
|
+
# Generate mock result
|
|
259
|
+
if benchmark_type == "speed":
|
|
260
|
+
val = random.uniform(25.0, 35.0)
|
|
261
|
+
elif benchmark_type == "accuracy":
|
|
262
|
+
val = random.uniform(0.85, 0.99)
|
|
263
|
+
elif benchmark_type == "robustness":
|
|
264
|
+
val = 1.0 if random.random() > 0.1 else 0.0
|
|
265
|
+
else:
|
|
266
|
+
val = random.uniform(0.7, 0.95)
|
|
267
|
+
|
|
268
|
+
results.append(val)
|
|
269
|
+
print(f" {val:.2f} {unit}")
|
|
270
|
+
|
|
271
|
+
avg_val = sum(results) / len(results)
|
|
272
|
+
|
|
273
|
+
print(f"\nResults Summary:")
|
|
274
|
+
print(f" Mean: {avg_val:.4f} {unit}")
|
|
275
|
+
print(f" Min: {min(results):.4f} {unit}")
|
|
276
|
+
print(f" Max: {max(results):.4f} {unit}")
|
|
277
|
+
|
|
278
|
+
if compare:
|
|
279
|
+
print(f"\nComparison with {compare}:")
|
|
280
|
+
baseline = avg_val * 0.9 # Mock baseline is slightly worse
|
|
281
|
+
diff = ((avg_val - baseline) / baseline) * 100
|
|
282
|
+
print(f" Baseline: {baseline:.4f} {unit}")
|
|
283
|
+
print(f" Improvement: +{diff:.1f}%")
|
|
284
|
+
|
|
285
|
+
def adapt(self, source_robot: str, target_robot: str, repo_id: Optional[str],
|
|
286
|
+
analyze_only: bool) -> None:
|
|
287
|
+
"""Adapt skills between robots"""
|
|
288
|
+
if not repo_id:
|
|
289
|
+
ate_dir = Path(".ate")
|
|
290
|
+
if ate_dir.exists():
|
|
291
|
+
with open(ate_dir / "config.json") as f:
|
|
292
|
+
config = json.load(f)
|
|
293
|
+
repo_id = config["id"]
|
|
294
|
+
else:
|
|
295
|
+
print("Error: Repository ID required.", file=sys.stderr)
|
|
296
|
+
sys.exit(1)
|
|
297
|
+
|
|
298
|
+
print(f"Analyzing adaptation from {source_robot} to {target_robot}...")
|
|
299
|
+
|
|
300
|
+
# Get adaptation plan
|
|
301
|
+
try:
|
|
302
|
+
response = self._request("POST", "/skills/adapt", json={
|
|
303
|
+
"sourceRobotId": source_robot,
|
|
304
|
+
"targetRobotId": target_robot,
|
|
305
|
+
"repositoryId": repo_id,
|
|
306
|
+
})
|
|
307
|
+
plan = response.get("adaptationPlan", {})
|
|
308
|
+
compatibility = response.get("compatibility", {})
|
|
309
|
+
except Exception:
|
|
310
|
+
# Mock response
|
|
311
|
+
compatibility = {
|
|
312
|
+
"overallScore": 0.85,
|
|
313
|
+
"adaptationType": "parametric",
|
|
314
|
+
"estimatedEffort": "low"
|
|
315
|
+
}
|
|
316
|
+
plan = {
|
|
317
|
+
"overview": "Direct joint mapping possible with scaling for link lengths.",
|
|
318
|
+
"kinematicAdaptation": {
|
|
319
|
+
"Joint limits": "Compatible (95% overlap)",
|
|
320
|
+
"Workspace": "Target workspace encompasses source workspace"
|
|
321
|
+
},
|
|
322
|
+
"codeModifications": [
|
|
323
|
+
{"file": "config/robot.yaml", "changes": ["Update URDF path", "Adjust joint gains"]}
|
|
324
|
+
]
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
if compatibility:
|
|
328
|
+
print(f"\nCompatibility Score: {compatibility.get('overallScore', 0) * 100:.1f}%")
|
|
329
|
+
print(f"Adaptation Type: {compatibility.get('adaptationType', 'unknown')}")
|
|
330
|
+
print(f"Estimated Effort: {compatibility.get('estimatedEffort', 'unknown')}")
|
|
331
|
+
|
|
332
|
+
print(f"\nAdaptation Overview:")
|
|
333
|
+
print(plan.get("overview", "No overview available"))
|
|
334
|
+
|
|
335
|
+
if plan.get("kinematicAdaptation"):
|
|
336
|
+
print("\nKinematic Adaptations:")
|
|
337
|
+
for key, value in plan["kinematicAdaptation"].items():
|
|
338
|
+
print(f" - {key}: {value}")
|
|
339
|
+
|
|
340
|
+
if plan.get("codeModifications"):
|
|
341
|
+
print("\nRequired Code Modifications:")
|
|
342
|
+
for mod in plan["codeModifications"]:
|
|
343
|
+
print(f" File: {mod.get('file')}")
|
|
344
|
+
for change in mod.get("changes", []):
|
|
345
|
+
print(f" - {change}")
|
|
346
|
+
|
|
347
|
+
if not analyze_only and compatibility.get("adaptationType") != "impossible":
|
|
348
|
+
if input("\nProceed with adaptation? (y/N): ").lower() == "y":
|
|
349
|
+
print("Generating adapted code...")
|
|
350
|
+
time.sleep(1.5)
|
|
351
|
+
print("Adaptation complete. Created new branch 'adapt/franka-panda'.")
|
|
352
|
+
|
|
353
|
+
def validate(self, checks: List[str], strict: bool, files: Optional[List[str]]) -> None:
|
|
354
|
+
"""Validate safety and compliance"""
|
|
355
|
+
ate_dir = Path(".ate")
|
|
356
|
+
if not ate_dir.exists():
|
|
357
|
+
print("Error: Not a FoodforThought repository.", file=sys.stderr)
|
|
358
|
+
sys.exit(1)
|
|
359
|
+
|
|
360
|
+
with open(ate_dir / "config.json") as f:
|
|
361
|
+
config = json.load(f)
|
|
362
|
+
|
|
363
|
+
print(f"Running safety validation...")
|
|
364
|
+
print(f" Repository: {config['name']}")
|
|
365
|
+
print(f" Checks: {', '.join(checks)}")
|
|
366
|
+
print(f" Mode: {'strict' if strict else 'standard'}")
|
|
367
|
+
|
|
368
|
+
if files:
|
|
369
|
+
print(f" Files: {', '.join(files)}")
|
|
370
|
+
|
|
371
|
+
print("\nAnalyzing codebase...", end="", flush=True)
|
|
372
|
+
time.sleep(1.0)
|
|
373
|
+
print(" Done")
|
|
374
|
+
|
|
375
|
+
# Mock Safety validation checks
|
|
376
|
+
validation_results = {
|
|
377
|
+
"collision": {"status": "pass", "details": "No self-collision risks detected in trajectories"},
|
|
378
|
+
"speed": {"status": "pass", "details": "Velocity limits (2.0 m/s) respected"},
|
|
379
|
+
"workspace": {"status": "warning", "details": "End-effector approaches workspace boundary (< 2cm) in 2 files"},
|
|
380
|
+
"force": {"status": "pass", "details": "Torque estimates within limits"},
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
print("\nValidation Results:")
|
|
384
|
+
has_issues = False
|
|
385
|
+
|
|
386
|
+
if "all" in checks:
|
|
387
|
+
checks = list(validation_results.keys())
|
|
388
|
+
|
|
389
|
+
for check in checks:
|
|
390
|
+
if check in validation_results:
|
|
391
|
+
result = validation_results[check]
|
|
392
|
+
status_icon = "✓" if result["status"] == "pass" else "⚠" if result["status"] == "warning" else "✗"
|
|
393
|
+
print(f" {status_icon} {check.capitalize()}: {result['details']}")
|
|
394
|
+
|
|
395
|
+
if result["status"] != "pass":
|
|
396
|
+
has_issues = True
|
|
397
|
+
|
|
398
|
+
if has_issues and strict:
|
|
399
|
+
print("\nValidation FAILED in strict mode")
|
|
400
|
+
sys.exit(1)
|
|
401
|
+
elif has_issues:
|
|
402
|
+
print("\nValidation completed with warnings")
|
|
403
|
+
else:
|
|
404
|
+
print("\nValidation PASSED")
|
|
405
|
+
|
|
406
|
+
def stream(self, action: str, sensors: Optional[List[str]], output: Optional[str],
|
|
407
|
+
format: str) -> None:
|
|
408
|
+
"""Stream sensor data"""
|
|
409
|
+
if action == "start":
|
|
410
|
+
if not sensors:
|
|
411
|
+
print("Error: No sensors specified.", file=sys.stderr)
|
|
412
|
+
sys.exit(1)
|
|
413
|
+
|
|
414
|
+
print(f"Starting sensor stream...")
|
|
415
|
+
print(f" Sensors: {', '.join(sensors)}")
|
|
416
|
+
print(f" Format: {format}")
|
|
417
|
+
if output:
|
|
418
|
+
print(f" Output: {output}")
|
|
419
|
+
|
|
420
|
+
print("\nInitializing stream connection...")
|
|
421
|
+
time.sleep(1)
|
|
422
|
+
print("Stream active.")
|
|
423
|
+
print("Press Ctrl+C to stop.")
|
|
424
|
+
|
|
425
|
+
try:
|
|
426
|
+
start_time = time.time()
|
|
427
|
+
frames = 0
|
|
428
|
+
while True:
|
|
429
|
+
time.sleep(1)
|
|
430
|
+
frames += 30
|
|
431
|
+
elapsed = time.time() - start_time
|
|
432
|
+
# Overwrite line with status
|
|
433
|
+
sys.stdout.write(f"\rStreaming: {int(elapsed)}s | Frames: {frames} | Rate: 30fps")
|
|
434
|
+
sys.stdout.flush()
|
|
435
|
+
except KeyboardInterrupt:
|
|
436
|
+
print("\nStream stopped.")
|
|
437
|
+
|
|
438
|
+
elif action == "stop":
|
|
439
|
+
print("Stopping sensor stream...")
|
|
440
|
+
# This would stop any active streams
|
|
441
|
+
|
|
442
|
+
elif action == "status":
|
|
443
|
+
print("Stream Status:")
|
|
444
|
+
print(" Active streams: None")
|
|
445
|
+
print(" Data rate: 0 MB/s")
|
|
446
|
+
print("\nNo active streams")
|
|
447
|
+
|
|
448
|
+
def pull(self, skill_id: str, robot: Optional[str], format: str,
|
|
449
|
+
output: str) -> None:
|
|
450
|
+
"""Pull skill data for training"""
|
|
451
|
+
print(f"Pulling skill data...")
|
|
452
|
+
print(f" Skill: {skill_id}")
|
|
453
|
+
if robot:
|
|
454
|
+
print(f" Robot: {robot}")
|
|
455
|
+
print(f" Format: {format}")
|
|
456
|
+
print(f" Output: {output}")
|
|
457
|
+
|
|
458
|
+
# Build request params
|
|
459
|
+
params = {"format": format}
|
|
460
|
+
if robot:
|
|
461
|
+
params["robot"] = robot
|
|
462
|
+
|
|
463
|
+
try:
|
|
464
|
+
# Get skill data
|
|
465
|
+
response = self._request("GET", f"/skills/{skill_id}/download", params=params)
|
|
466
|
+
|
|
467
|
+
skill = response.get("skill", {})
|
|
468
|
+
episodes = response.get("episodes", [])
|
|
469
|
+
|
|
470
|
+
# Create output directory
|
|
471
|
+
output_path = Path(output)
|
|
472
|
+
output_path.mkdir(parents=True, exist_ok=True)
|
|
473
|
+
|
|
474
|
+
# Save based on format
|
|
475
|
+
if format == "json":
|
|
476
|
+
file_path = output_path / f"{skill_id}.json"
|
|
477
|
+
with open(file_path, "w") as f:
|
|
478
|
+
json.dump(response, f, indent=2)
|
|
479
|
+
print(f"\n✓ Saved to {file_path}")
|
|
480
|
+
else:
|
|
481
|
+
# For RLDS/LeRobot, save the JSON and show instructions
|
|
482
|
+
file_path = output_path / f"{skill_id}.json"
|
|
483
|
+
with open(file_path, "w") as f:
|
|
484
|
+
json.dump(response, f, indent=2)
|
|
485
|
+
print(f"\n✓ Saved JSON data to {file_path}")
|
|
486
|
+
|
|
487
|
+
if response.get("instructions"):
|
|
488
|
+
print(f"\nTo load as {format.upper()}:")
|
|
489
|
+
print(response["instructions"].get("python", "See documentation"))
|
|
490
|
+
|
|
491
|
+
print(f"\nSkill: {skill.get('name', skill_id)}")
|
|
492
|
+
print(f"Episodes: {len(episodes)}")
|
|
493
|
+
print(f"Actions: {', '.join(skill.get('actionTypes', []))}")
|
|
494
|
+
|
|
495
|
+
except Exception as e:
|
|
496
|
+
print(f"\n✗ Failed to pull skill: {e}", file=sys.stderr)
|
|
497
|
+
sys.exit(1)
|
|
498
|
+
|
|
499
|
+
def upload(self, path: str, robot: str, task: str,
|
|
500
|
+
project: Optional[str]) -> None:
|
|
501
|
+
"""Upload demonstrations for labeling"""
|
|
502
|
+
video_path = Path(path)
|
|
503
|
+
|
|
504
|
+
if not video_path.exists():
|
|
505
|
+
print(f"Error: File not found: {path}", file=sys.stderr)
|
|
506
|
+
sys.exit(1)
|
|
507
|
+
|
|
508
|
+
if not video_path.is_file():
|
|
509
|
+
print(f"Error: Path is not a file: {path}", file=sys.stderr)
|
|
510
|
+
sys.exit(1)
|
|
511
|
+
|
|
512
|
+
print(f"Uploading demonstration...")
|
|
513
|
+
print(f" File: {video_path.name}")
|
|
514
|
+
print(f" Robot: {robot}")
|
|
515
|
+
print(f" Task: {task}")
|
|
516
|
+
if project:
|
|
517
|
+
print(f" Project: {project}")
|
|
518
|
+
|
|
519
|
+
# Check file size
|
|
520
|
+
file_size = video_path.stat().st_size
|
|
521
|
+
print(f" Size: {file_size / 1024 / 1024:.1f} MB")
|
|
522
|
+
|
|
523
|
+
try:
|
|
524
|
+
# Upload the file
|
|
525
|
+
with open(video_path, "rb") as f:
|
|
526
|
+
files = {"video": (video_path.name, f, "video/mp4")}
|
|
527
|
+
data = {
|
|
528
|
+
"robot": robot,
|
|
529
|
+
"task": task,
|
|
530
|
+
}
|
|
531
|
+
if project:
|
|
532
|
+
data["projectId"] = project
|
|
533
|
+
|
|
534
|
+
# Make multipart request
|
|
535
|
+
url = f"{self.base_url}/labeling/submit"
|
|
536
|
+
response = requests.post(
|
|
537
|
+
url,
|
|
538
|
+
headers={"Authorization": self.headers.get("Authorization", "")},
|
|
539
|
+
files=files,
|
|
540
|
+
data=data,
|
|
541
|
+
)
|
|
542
|
+
response.raise_for_status()
|
|
543
|
+
result = response.json()
|
|
544
|
+
|
|
545
|
+
job = result.get("job", {})
|
|
546
|
+
print(f"\n✓ Uploaded successfully!")
|
|
547
|
+
print(f"\nJob ID: {job.get('id')}")
|
|
548
|
+
print(f"Status: {job.get('status')}")
|
|
549
|
+
print(f"\nTrack progress:")
|
|
550
|
+
print(f" ate labeling-status {job.get('id')}")
|
|
551
|
+
print(f" https://kindly.fyi/foodforthought/labeling/{job.get('id')}")
|
|
552
|
+
|
|
553
|
+
except requests.exceptions.HTTPError as e:
|
|
554
|
+
if e.response.status_code == 401:
|
|
555
|
+
print("\n✗ Error: API key required for uploads.", file=sys.stderr)
|
|
556
|
+
print(" Set ATE_API_KEY environment variable.", file=sys.stderr)
|
|
557
|
+
else:
|
|
558
|
+
print(f"\n✗ Upload failed: {e}", file=sys.stderr)
|
|
559
|
+
sys.exit(1)
|
|
560
|
+
except Exception as e:
|
|
561
|
+
print(f"\n✗ Upload failed: {e}", file=sys.stderr)
|
|
562
|
+
sys.exit(1)
|
|
563
|
+
|
|
564
|
+
def check_transfer(self, skill: Optional[str], source: str, target: str,
|
|
565
|
+
min_score: float) -> None:
|
|
566
|
+
"""Check skill transfer compatibility between robots"""
|
|
567
|
+
print(f"Checking skill transfer compatibility...")
|
|
568
|
+
print(f" Source: {source}")
|
|
569
|
+
print(f" Target: {target}")
|
|
570
|
+
if skill:
|
|
571
|
+
print(f" Skill: {skill}")
|
|
572
|
+
|
|
573
|
+
try:
|
|
574
|
+
body = {
|
|
575
|
+
"sourceRobot": source,
|
|
576
|
+
"targetRobot": target,
|
|
577
|
+
}
|
|
578
|
+
if skill:
|
|
579
|
+
body["skillId"] = skill
|
|
580
|
+
|
|
581
|
+
response = self._request("POST", "/skills/check-compatibility", json=body)
|
|
582
|
+
|
|
583
|
+
overall = response.get("overallScore", 0)
|
|
584
|
+
adaptation = response.get("adaptationType", "unknown")
|
|
585
|
+
effort = response.get("estimatedEffort", "unknown")
|
|
586
|
+
notes = response.get("adaptationNotes", "")
|
|
587
|
+
|
|
588
|
+
# Display results
|
|
589
|
+
print(f"\n{'=' * 50}")
|
|
590
|
+
print(f"Compatibility Results")
|
|
591
|
+
print(f"{'=' * 50}")
|
|
592
|
+
|
|
593
|
+
# Color-coded score
|
|
594
|
+
score_pct = overall * 100
|
|
595
|
+
if adaptation == "direct":
|
|
596
|
+
icon = "✓"
|
|
597
|
+
elif adaptation == "retrain":
|
|
598
|
+
icon = "~"
|
|
599
|
+
elif adaptation == "manual":
|
|
600
|
+
icon = "!"
|
|
601
|
+
else:
|
|
602
|
+
icon = "✗"
|
|
603
|
+
|
|
604
|
+
print(f"\n{icon} Overall Score: {score_pct:.1f}%")
|
|
605
|
+
print(f" Adaptation Type: {adaptation}")
|
|
606
|
+
print(f" Estimated Effort: {effort}")
|
|
607
|
+
|
|
608
|
+
print(f"\nScore Breakdown:")
|
|
609
|
+
print(f" Kinematic: {response.get('kinematicScore', 0) * 100:.1f}%")
|
|
610
|
+
print(f" Sensor: {response.get('sensorScore', 0) * 100:.1f}%")
|
|
611
|
+
print(f" Compute: {response.get('computeScore', 0) * 100:.1f}%")
|
|
612
|
+
|
|
613
|
+
if notes:
|
|
614
|
+
print(f"\nNotes:")
|
|
615
|
+
print(f" {notes}")
|
|
616
|
+
|
|
617
|
+
# Check threshold
|
|
618
|
+
if overall < min_score:
|
|
619
|
+
print(f"\n✗ Score ({score_pct:.1f}%) is below threshold ({min_score * 100:.1f}%)")
|
|
620
|
+
sys.exit(1)
|
|
621
|
+
elif adaptation == "impossible":
|
|
622
|
+
print(f"\n✗ Skill transfer is not possible between these robots")
|
|
623
|
+
sys.exit(1)
|
|
624
|
+
else:
|
|
625
|
+
print(f"\n✓ Compatibility check passed")
|
|
626
|
+
|
|
627
|
+
except Exception as e:
|
|
628
|
+
print(f"\n✗ Compatibility check failed: {e}", file=sys.stderr)
|
|
629
|
+
sys.exit(1)
|
|
630
|
+
|
|
631
|
+
def labeling_status(self, job_id: str) -> None:
|
|
632
|
+
"""Check the status of a labeling job"""
|
|
633
|
+
print(f"Checking labeling job status...")
|
|
634
|
+
print(f" Job ID: {job_id}")
|
|
635
|
+
|
|
636
|
+
try:
|
|
637
|
+
response = self._request("GET", f"/labeling/{job_id}/status")
|
|
638
|
+
job = response.get("job", {})
|
|
639
|
+
|
|
640
|
+
status = job.get("status", "unknown")
|
|
641
|
+
progress = job.get("progress", 0) * 100
|
|
642
|
+
|
|
643
|
+
print(f"\nStatus: {status}")
|
|
644
|
+
print(f"Progress: {progress:.0f}%")
|
|
645
|
+
|
|
646
|
+
stats = job.get("stats", {})
|
|
647
|
+
if stats:
|
|
648
|
+
print(f"\nLabels: {stats.get('approvedLabels', 0)}/{stats.get('consensusTarget', 3)} needed")
|
|
649
|
+
print(f"Total submissions: {stats.get('totalLabels', 0)}")
|
|
650
|
+
|
|
651
|
+
if status == "completed":
|
|
652
|
+
skill_id = job.get("resultSkillId")
|
|
653
|
+
print(f"\n✓ Labeling complete!")
|
|
654
|
+
print(f"Skill ID: {skill_id}")
|
|
655
|
+
print(f"\nPull the labeled data:")
|
|
656
|
+
print(f" ate pull {skill_id} --format rlds --output ./data/")
|
|
657
|
+
elif status == "in_progress":
|
|
658
|
+
print(f"\n~ Labeling in progress...")
|
|
659
|
+
print(f"View on web: https://kindly.fyi/foodforthought/labeling/{job_id}")
|
|
660
|
+
|
|
661
|
+
except Exception as e:
|
|
662
|
+
print(f"\n✗ Failed to get status: {e}", file=sys.stderr)
|
|
663
|
+
sys.exit(1)
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
def main():
|
|
667
|
+
"""Main CLI entry point"""
|
|
668
|
+
parser = argparse.ArgumentParser(description="FoodforThought CLI")
|
|
669
|
+
subparsers = parser.add_subparsers(dest="command", help="Command to run")
|
|
670
|
+
|
|
671
|
+
# init command
|
|
672
|
+
init_parser = subparsers.add_parser("init", help="Initialize a new repository")
|
|
673
|
+
init_parser.add_argument("name", help="Repository name")
|
|
674
|
+
init_parser.add_argument("-d", "--description", default="", help="Repository description")
|
|
675
|
+
init_parser.add_argument(
|
|
676
|
+
"-v", "--visibility", choices=["public", "private"], default="public", help="Repository visibility"
|
|
677
|
+
)
|
|
678
|
+
|
|
679
|
+
# clone command
|
|
680
|
+
clone_parser = subparsers.add_parser("clone", help="Clone a repository")
|
|
681
|
+
clone_parser.add_argument("repo_id", help="Repository ID")
|
|
682
|
+
clone_parser.add_argument("target_dir", nargs="?", help="Target directory")
|
|
683
|
+
|
|
684
|
+
# commit command
|
|
685
|
+
commit_parser = subparsers.add_parser("commit", help="Create a commit")
|
|
686
|
+
commit_parser.add_argument("-m", "--message", required=True, help="Commit message")
|
|
687
|
+
commit_parser.add_argument("files", nargs="*", help="Files to commit")
|
|
688
|
+
|
|
689
|
+
# push command
|
|
690
|
+
push_parser = subparsers.add_parser("push", help="Push commits to remote")
|
|
691
|
+
push_parser.add_argument("-b", "--branch", default="main", help="Branch name")
|
|
692
|
+
|
|
693
|
+
# deploy command
|
|
694
|
+
deploy_parser = subparsers.add_parser("deploy", help="Deploy to robot")
|
|
695
|
+
deploy_parser.add_argument("robot_type", help="Robot type (e.g., unitree-r1)")
|
|
696
|
+
deploy_parser.add_argument("-r", "--repo-id", help="Repository ID (default: current repo)")
|
|
697
|
+
|
|
698
|
+
# test command
|
|
699
|
+
test_parser = subparsers.add_parser("test", help="Test skills in simulation")
|
|
700
|
+
test_parser.add_argument("-e", "--environment", default="gazebo",
|
|
701
|
+
choices=["gazebo", "mujoco", "pybullet", "webots"],
|
|
702
|
+
help="Simulation environment")
|
|
703
|
+
test_parser.add_argument("-r", "--robot", help="Robot model to test with")
|
|
704
|
+
test_parser.add_argument("--local", action="store_true", help="Run simulation locally")
|
|
705
|
+
|
|
706
|
+
# benchmark command
|
|
707
|
+
benchmark_parser = subparsers.add_parser("benchmark", help="Run performance benchmarks")
|
|
708
|
+
benchmark_parser.add_argument("-t", "--type", default="all",
|
|
709
|
+
choices=["speed", "accuracy", "robustness", "efficiency", "all"],
|
|
710
|
+
help="Benchmark type")
|
|
711
|
+
benchmark_parser.add_argument("-n", "--trials", type=int, default=10, help="Number of trials")
|
|
712
|
+
benchmark_parser.add_argument("--compare", help="Compare with baseline (repository ID)")
|
|
713
|
+
|
|
714
|
+
# adapt command
|
|
715
|
+
adapt_parser = subparsers.add_parser("adapt", help="Adapt skills between robots")
|
|
716
|
+
adapt_parser.add_argument("source_robot", help="Source robot model")
|
|
717
|
+
adapt_parser.add_argument("target_robot", help="Target robot model")
|
|
718
|
+
adapt_parser.add_argument("-r", "--repo-id", help="Repository ID to adapt")
|
|
719
|
+
adapt_parser.add_argument("--analyze-only", action="store_true",
|
|
720
|
+
help="Only show compatibility analysis")
|
|
721
|
+
|
|
722
|
+
# validate command
|
|
723
|
+
validate_parser = subparsers.add_parser("validate", help="Validate safety and compliance")
|
|
724
|
+
validate_parser.add_argument("-c", "--checks", nargs="+",
|
|
725
|
+
choices=["collision", "speed", "workspace", "force", "all"],
|
|
726
|
+
default=["all"], help="Safety checks to run")
|
|
727
|
+
validate_parser.add_argument("--strict", action="store_true", help="Use strict validation")
|
|
728
|
+
validate_parser.add_argument("-f", "--files", nargs="*", help="Specific files to validate")
|
|
729
|
+
|
|
730
|
+
# stream command
|
|
731
|
+
stream_parser = subparsers.add_parser("stream", help="Stream sensor data")
|
|
732
|
+
stream_parser.add_argument("action", choices=["start", "stop", "status"],
|
|
733
|
+
help="Streaming action")
|
|
734
|
+
stream_parser.add_argument("-s", "--sensors", nargs="+",
|
|
735
|
+
help="Sensors to stream (e.g., camera, lidar, imu)")
|
|
736
|
+
stream_parser.add_argument("-o", "--output", help="Output file or URL")
|
|
737
|
+
stream_parser.add_argument("--format", default="rosbag",
|
|
738
|
+
choices=["rosbag", "hdf5", "json", "live"],
|
|
739
|
+
help="Data format")
|
|
740
|
+
|
|
741
|
+
# pull command - Pull skill data for training
|
|
742
|
+
pull_parser = subparsers.add_parser("pull", help="Pull skill data for training")
|
|
743
|
+
pull_parser.add_argument("skill_id", help="Skill ID to pull")
|
|
744
|
+
pull_parser.add_argument("-r", "--robot", help="Filter by robot model")
|
|
745
|
+
pull_parser.add_argument("-f", "--format", default="json",
|
|
746
|
+
choices=["json", "rlds", "lerobot"],
|
|
747
|
+
help="Output format (default: json)")
|
|
748
|
+
pull_parser.add_argument("-o", "--output", default="./data",
|
|
749
|
+
help="Output directory (default: ./data)")
|
|
750
|
+
|
|
751
|
+
# upload command - Upload demonstrations for labeling
|
|
752
|
+
upload_parser = subparsers.add_parser("upload", help="Upload demonstrations for labeling")
|
|
753
|
+
upload_parser.add_argument("path", help="Path to video file")
|
|
754
|
+
upload_parser.add_argument("-r", "--robot", required=True,
|
|
755
|
+
help="Robot model in the video")
|
|
756
|
+
upload_parser.add_argument("-t", "--task", required=True,
|
|
757
|
+
help="Task being demonstrated")
|
|
758
|
+
upload_parser.add_argument("-p", "--project", help="Project ID to associate with")
|
|
759
|
+
|
|
760
|
+
# check-transfer command - Check skill transfer compatibility
|
|
761
|
+
check_transfer_parser = subparsers.add_parser("check-transfer",
|
|
762
|
+
help="Check skill transfer compatibility")
|
|
763
|
+
check_transfer_parser.add_argument("-s", "--skill", help="Skill ID to check (optional)")
|
|
764
|
+
check_transfer_parser.add_argument("--from", dest="source", required=True,
|
|
765
|
+
help="Source robot model")
|
|
766
|
+
check_transfer_parser.add_argument("--to", dest="target", required=True,
|
|
767
|
+
help="Target robot model")
|
|
768
|
+
check_transfer_parser.add_argument("--min-score", type=float, default=0.0,
|
|
769
|
+
help="Minimum score threshold (0.0-1.0)")
|
|
770
|
+
|
|
771
|
+
# labeling-status command - Check labeling job status
|
|
772
|
+
labeling_status_parser = subparsers.add_parser("labeling-status",
|
|
773
|
+
help="Check labeling job status")
|
|
774
|
+
labeling_status_parser.add_argument("job_id", help="Labeling job ID")
|
|
775
|
+
|
|
776
|
+
args = parser.parse_args()
|
|
777
|
+
|
|
778
|
+
if not args.command:
|
|
779
|
+
parser.print_help()
|
|
780
|
+
sys.exit(1)
|
|
781
|
+
|
|
782
|
+
client = ATEClient()
|
|
783
|
+
|
|
784
|
+
if args.command == "init":
|
|
785
|
+
result = client.init(args.name, args.description, args.visibility)
|
|
786
|
+
print(f"Created repository: {result['repository']['id']}")
|
|
787
|
+
|
|
788
|
+
elif args.command == "clone":
|
|
789
|
+
client.clone(args.repo_id, args.target_dir)
|
|
790
|
+
|
|
791
|
+
elif args.command == "commit":
|
|
792
|
+
client.commit(args.message, args.files)
|
|
793
|
+
|
|
794
|
+
elif args.command == "push":
|
|
795
|
+
client.push(args.branch)
|
|
796
|
+
|
|
797
|
+
elif args.command == "deploy":
|
|
798
|
+
client.deploy(args.robot_type, args.repo_id)
|
|
799
|
+
|
|
800
|
+
elif args.command == "test":
|
|
801
|
+
client.test(args.environment, args.robot, args.local)
|
|
802
|
+
|
|
803
|
+
elif args.command == "benchmark":
|
|
804
|
+
client.benchmark(args.type, args.trials, args.compare)
|
|
805
|
+
|
|
806
|
+
elif args.command == "adapt":
|
|
807
|
+
client.adapt(args.source_robot, args.target_robot, args.repo_id, args.analyze_only)
|
|
808
|
+
|
|
809
|
+
elif args.command == "validate":
|
|
810
|
+
client.validate(args.checks, args.strict, args.files)
|
|
811
|
+
|
|
812
|
+
elif args.command == "stream":
|
|
813
|
+
client.stream(args.action, args.sensors, args.output, args.format)
|
|
814
|
+
|
|
815
|
+
elif args.command == "pull":
|
|
816
|
+
client.pull(args.skill_id, args.robot, args.format, args.output)
|
|
817
|
+
|
|
818
|
+
elif args.command == "upload":
|
|
819
|
+
client.upload(args.path, args.robot, args.task, args.project)
|
|
820
|
+
|
|
821
|
+
elif args.command == "check-transfer":
|
|
822
|
+
client.check_transfer(args.skill, args.source, args.target, args.min_score)
|
|
823
|
+
|
|
824
|
+
elif args.command == "labeling-status":
|
|
825
|
+
client.labeling_status(args.job_id)
|
|
826
|
+
|
|
827
|
+
|
|
828
|
+
if __name__ == "__main__":
|
|
829
|
+
main()
|