foodforthought-cli 0.2.0__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +1 -1
- ate/bridge_server.py +622 -0
- ate/cli.py +2625 -242
- ate/compatibility.py +580 -0
- ate/generators/__init__.py +19 -0
- ate/generators/docker_generator.py +461 -0
- ate/generators/hardware_config.py +469 -0
- ate/generators/ros2_generator.py +617 -0
- ate/generators/skill_generator.py +783 -0
- ate/marketplace.py +524 -0
- ate/mcp_server.py +2424 -148
- ate/primitives.py +1016 -0
- ate/robot_setup.py +2222 -0
- ate/skill_schema.py +537 -0
- ate/telemetry/__init__.py +33 -0
- ate/telemetry/cli.py +455 -0
- ate/telemetry/collector.py +444 -0
- ate/telemetry/context.py +318 -0
- ate/telemetry/fleet_agent.py +419 -0
- ate/telemetry/formats/__init__.py +18 -0
- ate/telemetry/formats/hdf5_serializer.py +503 -0
- ate/telemetry/formats/mcap_serializer.py +457 -0
- ate/telemetry/types.py +334 -0
- foodforthought_cli-0.2.3.dist-info/METADATA +300 -0
- foodforthought_cli-0.2.3.dist-info/RECORD +44 -0
- foodforthought_cli-0.2.3.dist-info/top_level.txt +6 -0
- mechdog_labeled/__init__.py +3 -0
- mechdog_labeled/primitives.py +113 -0
- mechdog_labeled/servo_map.py +209 -0
- mechdog_output/__init__.py +3 -0
- mechdog_output/primitives.py +59 -0
- mechdog_output/servo_map.py +203 -0
- test_autodetect/__init__.py +3 -0
- test_autodetect/primitives.py +113 -0
- test_autodetect/servo_map.py +209 -0
- test_full_auto/__init__.py +3 -0
- test_full_auto/primitives.py +113 -0
- test_full_auto/servo_map.py +209 -0
- test_smart_detect/__init__.py +3 -0
- test_smart_detect/primitives.py +113 -0
- test_smart_detect/servo_map.py +209 -0
- foodforthought_cli-0.2.0.dist-info/METADATA +0 -151
- foodforthought_cli-0.2.0.dist-info/RECORD +0 -9
- foodforthought_cli-0.2.0.dist-info/top_level.txt +0 -1
- {foodforthought_cli-0.2.0.dist-info → foodforthought_cli-0.2.3.dist-info}/WHEEL +0 -0
- {foodforthought_cli-0.2.0.dist-info → foodforthought_cli-0.2.3.dist-info}/entry_points.txt +0 -0
ate/mcp_server.py
CHANGED
|
@@ -29,6 +29,7 @@ import json
|
|
|
29
29
|
import os
|
|
30
30
|
import sys
|
|
31
31
|
from typing import Any, Dict, List, Optional
|
|
32
|
+
from pathlib import Path
|
|
32
33
|
|
|
33
34
|
# Import the existing CLI client
|
|
34
35
|
from ate.cli import ATEClient
|
|
@@ -64,13 +65,16 @@ server = Server("foodforthought")
|
|
|
64
65
|
client = ATEClient()
|
|
65
66
|
|
|
66
67
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
68
|
+
# ============================================================================
|
|
69
|
+
# Tool Definitions
|
|
70
|
+
# ============================================================================
|
|
71
|
+
|
|
72
|
+
def get_repository_tools() -> List[Tool]:
|
|
73
|
+
"""Repository management tools"""
|
|
70
74
|
return [
|
|
71
75
|
Tool(
|
|
72
76
|
name="ate_init",
|
|
73
|
-
description="Initialize a new FoodforThought repository",
|
|
77
|
+
description="Initialize a new FoodforThought repository for robot skills",
|
|
74
78
|
inputSchema={
|
|
75
79
|
"type": "object",
|
|
76
80
|
"properties": {
|
|
@@ -132,6 +136,26 @@ async def list_tools() -> List[Tool]:
|
|
|
132
136
|
},
|
|
133
137
|
},
|
|
134
138
|
),
|
|
139
|
+
Tool(
|
|
140
|
+
name="ate_get_repository",
|
|
141
|
+
description="Get details of a specific repository",
|
|
142
|
+
inputSchema={
|
|
143
|
+
"type": "object",
|
|
144
|
+
"properties": {
|
|
145
|
+
"repo_id": {
|
|
146
|
+
"type": "string",
|
|
147
|
+
"description": "Repository ID",
|
|
148
|
+
},
|
|
149
|
+
},
|
|
150
|
+
"required": ["repo_id"],
|
|
151
|
+
},
|
|
152
|
+
),
|
|
153
|
+
]
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def get_robot_tools() -> List[Tool]:
|
|
157
|
+
"""Robot profile tools"""
|
|
158
|
+
return [
|
|
135
159
|
Tool(
|
|
136
160
|
name="ate_list_robots",
|
|
137
161
|
description="List available robot profiles",
|
|
@@ -155,25 +179,202 @@ async def list_tools() -> List[Tool]:
|
|
|
155
179
|
},
|
|
156
180
|
),
|
|
157
181
|
Tool(
|
|
158
|
-
name="
|
|
159
|
-
description="
|
|
182
|
+
name="ate_get_robot",
|
|
183
|
+
description="Get details of a specific robot profile",
|
|
184
|
+
inputSchema={
|
|
185
|
+
"type": "object",
|
|
186
|
+
"properties": {
|
|
187
|
+
"robot_id": {
|
|
188
|
+
"type": "string",
|
|
189
|
+
"description": "Robot profile ID",
|
|
190
|
+
},
|
|
191
|
+
},
|
|
192
|
+
"required": ["robot_id"],
|
|
193
|
+
},
|
|
194
|
+
),
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def get_marketplace_tools() -> List[Tool]:
|
|
199
|
+
"""Unified marketplace tools (Phase 6)"""
|
|
200
|
+
return [
|
|
201
|
+
Tool(
|
|
202
|
+
name="ate_marketplace_robots",
|
|
203
|
+
description="List community robots from the unified marketplace. Includes both Artifex-published and imported robots.",
|
|
204
|
+
inputSchema={
|
|
205
|
+
"type": "object",
|
|
206
|
+
"properties": {
|
|
207
|
+
"search": {
|
|
208
|
+
"type": "string",
|
|
209
|
+
"description": "Search by name or description",
|
|
210
|
+
},
|
|
211
|
+
"category": {
|
|
212
|
+
"type": "string",
|
|
213
|
+
"enum": ["arm", "gripper", "mobile_base", "quadruped", "humanoid",
|
|
214
|
+
"dual_arm", "manipulator", "cobot", "drone", "custom"],
|
|
215
|
+
"description": "Filter by robot category",
|
|
216
|
+
},
|
|
217
|
+
"sort": {
|
|
218
|
+
"type": "string",
|
|
219
|
+
"enum": ["downloads", "rating", "recent", "name"],
|
|
220
|
+
"description": "Sort order",
|
|
221
|
+
"default": "downloads",
|
|
222
|
+
},
|
|
223
|
+
"limit": {
|
|
224
|
+
"type": "number",
|
|
225
|
+
"description": "Maximum results",
|
|
226
|
+
"default": 20,
|
|
227
|
+
},
|
|
228
|
+
},
|
|
229
|
+
},
|
|
230
|
+
),
|
|
231
|
+
Tool(
|
|
232
|
+
name="ate_marketplace_robot",
|
|
233
|
+
description="Get detailed information about a specific robot from the marketplace, including URDF, links, joints, and parts.",
|
|
234
|
+
inputSchema={
|
|
235
|
+
"type": "object",
|
|
236
|
+
"properties": {
|
|
237
|
+
"robot_id": {
|
|
238
|
+
"type": "string",
|
|
239
|
+
"description": "Robot ID or slug",
|
|
240
|
+
},
|
|
241
|
+
},
|
|
242
|
+
"required": ["robot_id"],
|
|
243
|
+
},
|
|
244
|
+
),
|
|
245
|
+
Tool(
|
|
246
|
+
name="ate_marketplace_components",
|
|
247
|
+
description="List components from the parts marketplace. Components can be grippers, sensors, actuators, etc.",
|
|
248
|
+
inputSchema={
|
|
249
|
+
"type": "object",
|
|
250
|
+
"properties": {
|
|
251
|
+
"search": {
|
|
252
|
+
"type": "string",
|
|
253
|
+
"description": "Search by name or description",
|
|
254
|
+
},
|
|
255
|
+
"type": {
|
|
256
|
+
"type": "string",
|
|
257
|
+
"enum": ["gripper", "end_effector", "sensor", "camera",
|
|
258
|
+
"actuator", "link", "base", "arm_segment", "custom"],
|
|
259
|
+
"description": "Filter by component type",
|
|
260
|
+
},
|
|
261
|
+
"sort": {
|
|
262
|
+
"type": "string",
|
|
263
|
+
"enum": ["downloads", "rating", "recent", "name"],
|
|
264
|
+
"description": "Sort order",
|
|
265
|
+
"default": "downloads",
|
|
266
|
+
},
|
|
267
|
+
"limit": {
|
|
268
|
+
"type": "number",
|
|
269
|
+
"description": "Maximum results",
|
|
270
|
+
"default": 20,
|
|
271
|
+
},
|
|
272
|
+
},
|
|
273
|
+
},
|
|
274
|
+
),
|
|
275
|
+
Tool(
|
|
276
|
+
name="ate_marketplace_component",
|
|
277
|
+
description="Get detailed information about a specific component, including compatible robots and specifications.",
|
|
278
|
+
inputSchema={
|
|
279
|
+
"type": "object",
|
|
280
|
+
"properties": {
|
|
281
|
+
"component_id": {
|
|
282
|
+
"type": "string",
|
|
283
|
+
"description": "Component ID",
|
|
284
|
+
},
|
|
285
|
+
},
|
|
286
|
+
"required": ["component_id"],
|
|
287
|
+
},
|
|
288
|
+
),
|
|
289
|
+
Tool(
|
|
290
|
+
name="ate_skill_transfer_check",
|
|
291
|
+
description="Calculate skill transfer compatibility between robots. Shows which robots can receive skills from a source robot.",
|
|
160
292
|
inputSchema={
|
|
161
293
|
"type": "object",
|
|
162
294
|
"properties": {
|
|
163
|
-
"
|
|
295
|
+
"robot_id": {
|
|
164
296
|
"type": "string",
|
|
165
297
|
"description": "Source robot ID",
|
|
166
298
|
},
|
|
167
|
-
"
|
|
299
|
+
"direction": {
|
|
300
|
+
"type": "string",
|
|
301
|
+
"enum": ["from", "to"],
|
|
302
|
+
"description": "Direction: 'from' = skills from this robot can transfer to others",
|
|
303
|
+
"default": "from",
|
|
304
|
+
},
|
|
305
|
+
"min_score": {
|
|
306
|
+
"type": "number",
|
|
307
|
+
"description": "Minimum compatibility score (0.0-1.0)",
|
|
308
|
+
"default": 0.4,
|
|
309
|
+
},
|
|
310
|
+
"limit": {
|
|
311
|
+
"type": "number",
|
|
312
|
+
"description": "Maximum results",
|
|
313
|
+
"default": 10,
|
|
314
|
+
},
|
|
315
|
+
},
|
|
316
|
+
"required": ["robot_id"],
|
|
317
|
+
},
|
|
318
|
+
),
|
|
319
|
+
Tool(
|
|
320
|
+
name="ate_robot_parts",
|
|
321
|
+
description="Get parts required by or compatible with a robot.",
|
|
322
|
+
inputSchema={
|
|
323
|
+
"type": "object",
|
|
324
|
+
"properties": {
|
|
325
|
+
"robot_id": {
|
|
326
|
+
"type": "string",
|
|
327
|
+
"description": "Robot ID",
|
|
328
|
+
},
|
|
329
|
+
},
|
|
330
|
+
"required": ["robot_id"],
|
|
331
|
+
},
|
|
332
|
+
),
|
|
333
|
+
Tool(
|
|
334
|
+
name="ate_component_robots",
|
|
335
|
+
description="Get robots that use or are compatible with a component.",
|
|
336
|
+
inputSchema={
|
|
337
|
+
"type": "object",
|
|
338
|
+
"properties": {
|
|
339
|
+
"component_id": {
|
|
340
|
+
"type": "string",
|
|
341
|
+
"description": "Component ID",
|
|
342
|
+
},
|
|
343
|
+
},
|
|
344
|
+
"required": ["component_id"],
|
|
345
|
+
},
|
|
346
|
+
),
|
|
347
|
+
]
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def get_compatibility_tools() -> List[Tool]:
|
|
351
|
+
"""Skill compatibility and adaptation tools"""
|
|
352
|
+
return [
|
|
353
|
+
Tool(
|
|
354
|
+
name="ate_check_transfer",
|
|
355
|
+
description="Check skill transfer compatibility between two robot models",
|
|
356
|
+
inputSchema={
|
|
357
|
+
"type": "object",
|
|
358
|
+
"properties": {
|
|
359
|
+
"source_robot": {
|
|
360
|
+
"type": "string",
|
|
361
|
+
"description": "Source robot model name",
|
|
362
|
+
},
|
|
363
|
+
"target_robot": {
|
|
168
364
|
"type": "string",
|
|
169
|
-
"description": "Target robot
|
|
365
|
+
"description": "Target robot model name",
|
|
170
366
|
},
|
|
171
|
-
"
|
|
367
|
+
"skill_id": {
|
|
172
368
|
"type": "string",
|
|
173
|
-
"description": "
|
|
369
|
+
"description": "Optional skill ID to check",
|
|
370
|
+
},
|
|
371
|
+
"min_score": {
|
|
372
|
+
"type": "number",
|
|
373
|
+
"description": "Minimum compatibility score threshold (0.0-1.0)",
|
|
374
|
+
"default": 0.0,
|
|
174
375
|
},
|
|
175
376
|
},
|
|
176
|
-
"required": ["
|
|
377
|
+
"required": ["source_robot", "target_robot"],
|
|
177
378
|
},
|
|
178
379
|
),
|
|
179
380
|
Tool(
|
|
@@ -182,191 +383,2008 @@ async def list_tools() -> List[Tool]:
|
|
|
182
383
|
inputSchema={
|
|
183
384
|
"type": "object",
|
|
184
385
|
"properties": {
|
|
185
|
-
"
|
|
386
|
+
"source_robot": {
|
|
186
387
|
"type": "string",
|
|
187
|
-
"description": "Source robot
|
|
388
|
+
"description": "Source robot model",
|
|
188
389
|
},
|
|
189
|
-
"
|
|
390
|
+
"target_robot": {
|
|
190
391
|
"type": "string",
|
|
191
|
-
"description": "Target robot
|
|
392
|
+
"description": "Target robot model",
|
|
192
393
|
},
|
|
193
|
-
"
|
|
394
|
+
"repo_id": {
|
|
194
395
|
"type": "string",
|
|
195
396
|
"description": "Repository ID to adapt",
|
|
196
397
|
},
|
|
197
398
|
"analyze_only": {
|
|
198
399
|
"type": "boolean",
|
|
199
400
|
"description": "Only show compatibility analysis",
|
|
401
|
+
"default": True,
|
|
402
|
+
},
|
|
403
|
+
},
|
|
404
|
+
"required": ["source_robot", "target_robot"],
|
|
405
|
+
},
|
|
406
|
+
),
|
|
407
|
+
]
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
def get_skill_tools() -> List[Tool]:
|
|
411
|
+
"""Skill data tools"""
|
|
412
|
+
return [
|
|
413
|
+
Tool(
|
|
414
|
+
name="ate_pull",
|
|
415
|
+
description="Pull skill data for training in various formats (JSON, RLDS, LeRobot)",
|
|
416
|
+
inputSchema={
|
|
417
|
+
"type": "object",
|
|
418
|
+
"properties": {
|
|
419
|
+
"skill_id": {
|
|
420
|
+
"type": "string",
|
|
421
|
+
"description": "Skill ID to pull",
|
|
422
|
+
},
|
|
423
|
+
"robot": {
|
|
424
|
+
"type": "string",
|
|
425
|
+
"description": "Filter by robot model",
|
|
426
|
+
},
|
|
427
|
+
"format": {
|
|
428
|
+
"type": "string",
|
|
429
|
+
"enum": ["json", "rlds", "lerobot"],
|
|
430
|
+
"description": "Output format",
|
|
431
|
+
"default": "json",
|
|
432
|
+
},
|
|
433
|
+
"output": {
|
|
434
|
+
"type": "string",
|
|
435
|
+
"description": "Output directory",
|
|
436
|
+
"default": "./data",
|
|
437
|
+
},
|
|
438
|
+
},
|
|
439
|
+
"required": ["skill_id"],
|
|
440
|
+
},
|
|
441
|
+
),
|
|
442
|
+
Tool(
|
|
443
|
+
name="ate_upload",
|
|
444
|
+
description="Upload demonstration videos for community labeling",
|
|
445
|
+
inputSchema={
|
|
446
|
+
"type": "object",
|
|
447
|
+
"properties": {
|
|
448
|
+
"path": {
|
|
449
|
+
"type": "string",
|
|
450
|
+
"description": "Path to video file",
|
|
451
|
+
},
|
|
452
|
+
"robot": {
|
|
453
|
+
"type": "string",
|
|
454
|
+
"description": "Robot model in the video",
|
|
455
|
+
},
|
|
456
|
+
"task": {
|
|
457
|
+
"type": "string",
|
|
458
|
+
"description": "Task being demonstrated",
|
|
459
|
+
},
|
|
460
|
+
"project": {
|
|
461
|
+
"type": "string",
|
|
462
|
+
"description": "Project ID to associate with",
|
|
463
|
+
},
|
|
464
|
+
},
|
|
465
|
+
"required": ["path", "robot", "task"],
|
|
466
|
+
},
|
|
467
|
+
),
|
|
468
|
+
]
|
|
469
|
+
|
|
470
|
+
|
|
471
|
+
def get_parts_tools() -> List[Tool]:
|
|
472
|
+
"""Hardware parts management tools"""
|
|
473
|
+
return [
|
|
474
|
+
Tool(
|
|
475
|
+
name="ate_parts_list",
|
|
476
|
+
description="List available hardware parts in the catalog",
|
|
477
|
+
inputSchema={
|
|
478
|
+
"type": "object",
|
|
479
|
+
"properties": {
|
|
480
|
+
"category": {
|
|
481
|
+
"type": "string",
|
|
482
|
+
"enum": ["gripper", "sensor", "actuator", "controller",
|
|
483
|
+
"end-effector", "camera", "lidar", "force-torque"],
|
|
484
|
+
"description": "Filter by part category",
|
|
485
|
+
},
|
|
486
|
+
"manufacturer": {
|
|
487
|
+
"type": "string",
|
|
488
|
+
"description": "Filter by manufacturer",
|
|
489
|
+
},
|
|
490
|
+
"search": {
|
|
491
|
+
"type": "string",
|
|
492
|
+
"description": "Search by name or part number",
|
|
493
|
+
},
|
|
494
|
+
},
|
|
495
|
+
},
|
|
496
|
+
),
|
|
497
|
+
Tool(
|
|
498
|
+
name="ate_parts_check",
|
|
499
|
+
description="Check part compatibility requirements for a skill",
|
|
500
|
+
inputSchema={
|
|
501
|
+
"type": "object",
|
|
502
|
+
"properties": {
|
|
503
|
+
"skill_id": {
|
|
504
|
+
"type": "string",
|
|
505
|
+
"description": "Skill ID to check parts for",
|
|
506
|
+
},
|
|
507
|
+
},
|
|
508
|
+
"required": ["skill_id"],
|
|
509
|
+
},
|
|
510
|
+
),
|
|
511
|
+
Tool(
|
|
512
|
+
name="ate_parts_require",
|
|
513
|
+
description="Add a part dependency to a skill",
|
|
514
|
+
inputSchema={
|
|
515
|
+
"type": "object",
|
|
516
|
+
"properties": {
|
|
517
|
+
"part_id": {
|
|
518
|
+
"type": "string",
|
|
519
|
+
"description": "Part ID to require",
|
|
520
|
+
},
|
|
521
|
+
"skill_id": {
|
|
522
|
+
"type": "string",
|
|
523
|
+
"description": "Skill ID",
|
|
524
|
+
},
|
|
525
|
+
"version": {
|
|
526
|
+
"type": "string",
|
|
527
|
+
"description": "Minimum version",
|
|
528
|
+
"default": "1.0.0",
|
|
529
|
+
},
|
|
530
|
+
"required": {
|
|
531
|
+
"type": "boolean",
|
|
532
|
+
"description": "Mark as required (not optional)",
|
|
200
533
|
"default": False,
|
|
201
534
|
},
|
|
202
535
|
},
|
|
203
|
-
"required": ["
|
|
536
|
+
"required": ["part_id", "skill_id"],
|
|
204
537
|
},
|
|
205
538
|
),
|
|
206
539
|
Tool(
|
|
207
|
-
name="
|
|
208
|
-
description="
|
|
540
|
+
name="ate_deps_audit",
|
|
541
|
+
description="Audit and verify all dependencies are compatible for a skill",
|
|
209
542
|
inputSchema={
|
|
210
543
|
"type": "object",
|
|
211
544
|
"properties": {
|
|
212
|
-
"
|
|
545
|
+
"skill_id": {
|
|
213
546
|
"type": "string",
|
|
214
|
-
"description": "
|
|
547
|
+
"description": "Skill ID (optional, uses current repo if not specified)",
|
|
215
548
|
},
|
|
216
549
|
},
|
|
217
|
-
"required": ["repo_id"],
|
|
218
550
|
},
|
|
219
551
|
),
|
|
552
|
+
]
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
def get_generate_tools() -> List[Tool]:
|
|
556
|
+
"""Skill generation tools"""
|
|
557
|
+
return [
|
|
220
558
|
Tool(
|
|
221
|
-
name="
|
|
222
|
-
description="
|
|
559
|
+
name="ate_generate",
|
|
560
|
+
description="Generate skill scaffolding from a natural language task description",
|
|
223
561
|
inputSchema={
|
|
224
562
|
"type": "object",
|
|
225
563
|
"properties": {
|
|
226
|
-
"
|
|
564
|
+
"description": {
|
|
227
565
|
"type": "string",
|
|
228
|
-
"description": "
|
|
566
|
+
"description": "Natural language task description (e.g., 'pick up box and place on pallet')",
|
|
567
|
+
},
|
|
568
|
+
"robot": {
|
|
569
|
+
"type": "string",
|
|
570
|
+
"description": "Target robot model",
|
|
571
|
+
"default": "ur5",
|
|
572
|
+
},
|
|
573
|
+
"output": {
|
|
574
|
+
"type": "string",
|
|
575
|
+
"description": "Output directory for generated files",
|
|
576
|
+
"default": "./new-skill",
|
|
229
577
|
},
|
|
230
578
|
},
|
|
231
|
-
"required": ["
|
|
579
|
+
"required": ["description"],
|
|
232
580
|
},
|
|
233
581
|
),
|
|
234
582
|
]
|
|
235
583
|
|
|
236
584
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
585
|
+
def get_workflow_tools() -> List[Tool]:
|
|
586
|
+
"""Workflow composition tools"""
|
|
587
|
+
return [
|
|
588
|
+
Tool(
|
|
589
|
+
name="ate_workflow_validate",
|
|
590
|
+
description="Validate a workflow YAML file for skill composition",
|
|
591
|
+
inputSchema={
|
|
592
|
+
"type": "object",
|
|
593
|
+
"properties": {
|
|
594
|
+
"path": {
|
|
595
|
+
"type": "string",
|
|
596
|
+
"description": "Path to workflow YAML file",
|
|
597
|
+
},
|
|
598
|
+
},
|
|
599
|
+
"required": ["path"],
|
|
600
|
+
},
|
|
601
|
+
),
|
|
602
|
+
Tool(
|
|
603
|
+
name="ate_workflow_run",
|
|
604
|
+
description="Run a skill workflow/pipeline",
|
|
605
|
+
inputSchema={
|
|
606
|
+
"type": "object",
|
|
607
|
+
"properties": {
|
|
608
|
+
"path": {
|
|
609
|
+
"type": "string",
|
|
610
|
+
"description": "Path to workflow YAML file",
|
|
611
|
+
},
|
|
612
|
+
"sim": {
|
|
613
|
+
"type": "boolean",
|
|
614
|
+
"description": "Run in simulation mode",
|
|
615
|
+
"default": True,
|
|
616
|
+
},
|
|
617
|
+
"dry_run": {
|
|
618
|
+
"type": "boolean",
|
|
619
|
+
"description": "Show execution plan without running",
|
|
620
|
+
"default": False,
|
|
621
|
+
},
|
|
622
|
+
},
|
|
623
|
+
"required": ["path"],
|
|
624
|
+
},
|
|
625
|
+
),
|
|
626
|
+
Tool(
|
|
627
|
+
name="ate_workflow_export",
|
|
628
|
+
description="Export workflow to other formats (ROS2 launch, JSON)",
|
|
629
|
+
inputSchema={
|
|
630
|
+
"type": "object",
|
|
631
|
+
"properties": {
|
|
632
|
+
"path": {
|
|
633
|
+
"type": "string",
|
|
634
|
+
"description": "Path to workflow YAML file",
|
|
635
|
+
},
|
|
636
|
+
"format": {
|
|
637
|
+
"type": "string",
|
|
638
|
+
"enum": ["ros2", "json"],
|
|
639
|
+
"description": "Export format",
|
|
640
|
+
"default": "ros2",
|
|
641
|
+
},
|
|
642
|
+
"output": {
|
|
643
|
+
"type": "string",
|
|
644
|
+
"description": "Output file path",
|
|
645
|
+
},
|
|
646
|
+
},
|
|
647
|
+
"required": ["path"],
|
|
648
|
+
},
|
|
649
|
+
),
|
|
650
|
+
]
|
|
651
|
+
|
|
652
|
+
|
|
653
|
+
def get_team_tools() -> List[Tool]:
|
|
654
|
+
"""Team collaboration tools"""
|
|
655
|
+
return [
|
|
656
|
+
Tool(
|
|
657
|
+
name="ate_team_create",
|
|
658
|
+
description="Create a new team for collaboration",
|
|
659
|
+
inputSchema={
|
|
660
|
+
"type": "object",
|
|
661
|
+
"properties": {
|
|
662
|
+
"name": {
|
|
663
|
+
"type": "string",
|
|
664
|
+
"description": "Team name",
|
|
665
|
+
},
|
|
666
|
+
"description": {
|
|
667
|
+
"type": "string",
|
|
668
|
+
"description": "Team description",
|
|
669
|
+
},
|
|
670
|
+
},
|
|
671
|
+
"required": ["name"],
|
|
672
|
+
},
|
|
673
|
+
),
|
|
674
|
+
Tool(
|
|
675
|
+
name="ate_team_list",
|
|
676
|
+
description="List teams you belong to",
|
|
677
|
+
inputSchema={
|
|
678
|
+
"type": "object",
|
|
679
|
+
"properties": {},
|
|
680
|
+
},
|
|
681
|
+
),
|
|
682
|
+
Tool(
|
|
683
|
+
name="ate_team_invite",
|
|
684
|
+
description="Invite a user to a team",
|
|
685
|
+
inputSchema={
|
|
686
|
+
"type": "object",
|
|
687
|
+
"properties": {
|
|
688
|
+
"email": {
|
|
689
|
+
"type": "string",
|
|
690
|
+
"description": "Email of user to invite",
|
|
691
|
+
},
|
|
692
|
+
"team": {
|
|
693
|
+
"type": "string",
|
|
694
|
+
"description": "Team slug",
|
|
695
|
+
},
|
|
696
|
+
"role": {
|
|
697
|
+
"type": "string",
|
|
698
|
+
"enum": ["owner", "admin", "member", "viewer"],
|
|
699
|
+
"description": "Role to assign",
|
|
700
|
+
"default": "member",
|
|
701
|
+
},
|
|
702
|
+
},
|
|
703
|
+
"required": ["email", "team"],
|
|
704
|
+
},
|
|
705
|
+
),
|
|
706
|
+
Tool(
|
|
707
|
+
name="ate_team_share",
|
|
708
|
+
description="Share a skill with a team",
|
|
709
|
+
inputSchema={
|
|
710
|
+
"type": "object",
|
|
711
|
+
"properties": {
|
|
712
|
+
"skill_id": {
|
|
713
|
+
"type": "string",
|
|
714
|
+
"description": "Skill ID to share",
|
|
715
|
+
},
|
|
716
|
+
"team": {
|
|
717
|
+
"type": "string",
|
|
718
|
+
"description": "Team slug",
|
|
719
|
+
},
|
|
720
|
+
},
|
|
721
|
+
"required": ["skill_id", "team"],
|
|
722
|
+
},
|
|
723
|
+
),
|
|
724
|
+
]
|
|
725
|
+
|
|
726
|
+
|
|
727
|
+
def get_data_tools() -> List[Tool]:
|
|
728
|
+
"""Dataset management tools"""
|
|
729
|
+
return [
|
|
730
|
+
Tool(
|
|
731
|
+
name="ate_data_upload",
|
|
732
|
+
description="Upload sensor data or demonstration logs for a skill",
|
|
733
|
+
inputSchema={
|
|
734
|
+
"type": "object",
|
|
735
|
+
"properties": {
|
|
736
|
+
"path": {
|
|
737
|
+
"type": "string",
|
|
738
|
+
"description": "Path to data directory or file",
|
|
739
|
+
},
|
|
740
|
+
"skill": {
|
|
741
|
+
"type": "string",
|
|
742
|
+
"description": "Associated skill ID",
|
|
743
|
+
},
|
|
744
|
+
"stage": {
|
|
745
|
+
"type": "string",
|
|
746
|
+
"enum": ["raw", "annotated", "skill-abstracted", "production"],
|
|
747
|
+
"description": "Data stage",
|
|
748
|
+
"default": "raw",
|
|
749
|
+
},
|
|
750
|
+
},
|
|
751
|
+
"required": ["path", "skill"],
|
|
752
|
+
},
|
|
753
|
+
),
|
|
754
|
+
Tool(
|
|
755
|
+
name="ate_data_list",
|
|
756
|
+
description="List datasets for a skill",
|
|
757
|
+
inputSchema={
|
|
758
|
+
"type": "object",
|
|
759
|
+
"properties": {
|
|
760
|
+
"skill": {
|
|
761
|
+
"type": "string",
|
|
762
|
+
"description": "Filter by skill ID",
|
|
763
|
+
},
|
|
764
|
+
"stage": {
|
|
765
|
+
"type": "string",
|
|
766
|
+
"description": "Filter by data stage",
|
|
767
|
+
},
|
|
768
|
+
},
|
|
769
|
+
},
|
|
770
|
+
),
|
|
771
|
+
Tool(
|
|
772
|
+
name="ate_data_promote",
|
|
773
|
+
description="Promote a dataset to the next stage in the pipeline",
|
|
774
|
+
inputSchema={
|
|
775
|
+
"type": "object",
|
|
776
|
+
"properties": {
|
|
777
|
+
"dataset_id": {
|
|
778
|
+
"type": "string",
|
|
779
|
+
"description": "Dataset ID",
|
|
780
|
+
},
|
|
781
|
+
"to_stage": {
|
|
782
|
+
"type": "string",
|
|
783
|
+
"enum": ["annotated", "skill-abstracted", "production"],
|
|
784
|
+
"description": "Target stage",
|
|
785
|
+
},
|
|
786
|
+
},
|
|
787
|
+
"required": ["dataset_id", "to_stage"],
|
|
788
|
+
},
|
|
789
|
+
),
|
|
790
|
+
Tool(
|
|
791
|
+
name="ate_data_export",
|
|
792
|
+
description="Export a dataset in various formats",
|
|
793
|
+
inputSchema={
|
|
794
|
+
"type": "object",
|
|
795
|
+
"properties": {
|
|
796
|
+
"dataset_id": {
|
|
797
|
+
"type": "string",
|
|
798
|
+
"description": "Dataset ID",
|
|
799
|
+
},
|
|
800
|
+
"format": {
|
|
801
|
+
"type": "string",
|
|
802
|
+
"enum": ["json", "rlds", "lerobot", "hdf5"],
|
|
803
|
+
"description": "Export format",
|
|
804
|
+
"default": "rlds",
|
|
805
|
+
},
|
|
806
|
+
"output": {
|
|
807
|
+
"type": "string",
|
|
808
|
+
"description": "Output directory",
|
|
809
|
+
"default": "./export",
|
|
810
|
+
},
|
|
811
|
+
},
|
|
812
|
+
"required": ["dataset_id"],
|
|
813
|
+
},
|
|
814
|
+
),
|
|
815
|
+
]
|
|
816
|
+
|
|
817
|
+
|
|
818
|
+
def get_deploy_tools() -> List[Tool]:
|
|
819
|
+
"""Deployment management tools"""
|
|
820
|
+
return [
|
|
821
|
+
Tool(
|
|
822
|
+
name="ate_deploy",
|
|
823
|
+
description="Deploy skills to a robot",
|
|
824
|
+
inputSchema={
|
|
825
|
+
"type": "object",
|
|
826
|
+
"properties": {
|
|
827
|
+
"robot_type": {
|
|
828
|
+
"type": "string",
|
|
829
|
+
"description": "Robot type to deploy to",
|
|
830
|
+
},
|
|
831
|
+
"repo_id": {
|
|
832
|
+
"type": "string",
|
|
833
|
+
"description": "Repository ID (uses current repo if not specified)",
|
|
834
|
+
},
|
|
835
|
+
},
|
|
836
|
+
"required": ["robot_type"],
|
|
837
|
+
},
|
|
838
|
+
),
|
|
839
|
+
Tool(
|
|
840
|
+
name="ate_deploy_config",
|
|
841
|
+
description="Deploy skills using a deployment configuration file (supports hybrid edge/cloud)",
|
|
842
|
+
inputSchema={
|
|
843
|
+
"type": "object",
|
|
844
|
+
"properties": {
|
|
845
|
+
"config_path": {
|
|
846
|
+
"type": "string",
|
|
847
|
+
"description": "Path to deploy.yaml configuration",
|
|
848
|
+
},
|
|
849
|
+
"target": {
|
|
850
|
+
"type": "string",
|
|
851
|
+
"description": "Target fleet or robot",
|
|
852
|
+
},
|
|
853
|
+
"dry_run": {
|
|
854
|
+
"type": "boolean",
|
|
855
|
+
"description": "Show deployment plan without executing",
|
|
856
|
+
"default": False,
|
|
857
|
+
},
|
|
858
|
+
},
|
|
859
|
+
"required": ["config_path", "target"],
|
|
860
|
+
},
|
|
861
|
+
),
|
|
862
|
+
Tool(
|
|
863
|
+
name="ate_deploy_status",
|
|
864
|
+
description="Check deployment status for a fleet or robot",
|
|
865
|
+
inputSchema={
|
|
866
|
+
"type": "object",
|
|
867
|
+
"properties": {
|
|
868
|
+
"target": {
|
|
869
|
+
"type": "string",
|
|
870
|
+
"description": "Target fleet or robot",
|
|
871
|
+
},
|
|
872
|
+
},
|
|
873
|
+
"required": ["target"],
|
|
874
|
+
},
|
|
875
|
+
),
|
|
876
|
+
]
|
|
877
|
+
|
|
878
|
+
|
|
879
|
+
def get_test_tools() -> List[Tool]:
|
|
880
|
+
"""Testing and validation tools"""
|
|
881
|
+
return [
|
|
882
|
+
Tool(
|
|
883
|
+
name="ate_test",
|
|
884
|
+
description="Test skills in simulation (Gazebo, MuJoCo, PyBullet, Webots)",
|
|
885
|
+
inputSchema={
|
|
886
|
+
"type": "object",
|
|
887
|
+
"properties": {
|
|
888
|
+
"environment": {
|
|
889
|
+
"type": "string",
|
|
890
|
+
"enum": ["gazebo", "mujoco", "pybullet", "webots"],
|
|
891
|
+
"description": "Simulation environment",
|
|
892
|
+
"default": "pybullet",
|
|
893
|
+
},
|
|
894
|
+
"robot": {
|
|
895
|
+
"type": "string",
|
|
896
|
+
"description": "Robot model to test with",
|
|
897
|
+
},
|
|
898
|
+
"local": {
|
|
899
|
+
"type": "boolean",
|
|
900
|
+
"description": "Run simulation locally",
|
|
901
|
+
"default": False,
|
|
902
|
+
},
|
|
903
|
+
},
|
|
904
|
+
},
|
|
905
|
+
),
|
|
906
|
+
Tool(
|
|
907
|
+
name="ate_validate",
|
|
908
|
+
description="Run safety and compliance validation checks",
|
|
909
|
+
inputSchema={
|
|
910
|
+
"type": "object",
|
|
911
|
+
"properties": {
|
|
912
|
+
"checks": {
|
|
913
|
+
"type": "array",
|
|
914
|
+
"items": {"type": "string"},
|
|
915
|
+
"description": "Safety checks to run (collision, speed, workspace, force, all)",
|
|
916
|
+
"default": ["all"],
|
|
917
|
+
},
|
|
918
|
+
"strict": {
|
|
919
|
+
"type": "boolean",
|
|
920
|
+
"description": "Use strict validation (fail on warnings)",
|
|
921
|
+
"default": False,
|
|
922
|
+
},
|
|
923
|
+
},
|
|
924
|
+
},
|
|
925
|
+
),
|
|
926
|
+
Tool(
|
|
927
|
+
name="ate_benchmark",
|
|
928
|
+
description="Run performance benchmarks on skills",
|
|
929
|
+
inputSchema={
|
|
930
|
+
"type": "object",
|
|
931
|
+
"properties": {
|
|
932
|
+
"type": {
|
|
933
|
+
"type": "string",
|
|
934
|
+
"enum": ["speed", "accuracy", "robustness", "efficiency", "all"],
|
|
935
|
+
"description": "Benchmark type",
|
|
936
|
+
"default": "all",
|
|
937
|
+
},
|
|
938
|
+
"trials": {
|
|
939
|
+
"type": "number",
|
|
940
|
+
"description": "Number of trials",
|
|
941
|
+
"default": 10,
|
|
942
|
+
},
|
|
943
|
+
"compare": {
|
|
944
|
+
"type": "string",
|
|
945
|
+
"description": "Compare with baseline repository ID",
|
|
946
|
+
},
|
|
947
|
+
},
|
|
948
|
+
},
|
|
949
|
+
),
|
|
950
|
+
]
|
|
951
|
+
|
|
952
|
+
|
|
953
|
+
def get_compiler_tools() -> List[Tool]:
|
|
954
|
+
"""
|
|
955
|
+
Skill Compiler Tools - Transform skill.yaml specifications into deployable robot skill packages.
|
|
956
|
+
|
|
957
|
+
WORKFLOW FOR AI ASSISTANTS:
|
|
958
|
+
1. Use ate_list_primitives to discover available building blocks
|
|
959
|
+
2. Help user create skill.yaml with proper format
|
|
960
|
+
3. Use ate_validate_skill_spec to check for errors
|
|
961
|
+
4. Use ate_check_skill_compatibility to verify robot compatibility
|
|
962
|
+
5. Use ate_compile_skill to generate deployable code
|
|
963
|
+
6. Use ate_test_compiled_skill to test in dry-run or simulation
|
|
964
|
+
7. Use ate_publish_compiled_skill to share with community
|
|
965
|
+
|
|
966
|
+
See docs/SKILL_COMPILER.md for full documentation.
|
|
967
|
+
"""
|
|
968
|
+
return [
|
|
969
|
+
Tool(
|
|
970
|
+
name="ate_compile_skill",
|
|
971
|
+
description="""Compile a skill.yaml specification into deployable code.
|
|
972
|
+
|
|
973
|
+
WHEN TO USE: After creating and validating a skill.yaml file, use this to generate
|
|
974
|
+
executable Python code, ROS2 packages, or Docker containers.
|
|
975
|
+
|
|
976
|
+
TARGETS:
|
|
977
|
+
- python: Standalone Python package (default, simplest)
|
|
978
|
+
- ros2: ROS2-compatible package with launch files
|
|
979
|
+
- docker: Containerized deployment with Dockerfile
|
|
980
|
+
- all: Generate all formats
|
|
981
|
+
|
|
982
|
+
EXAMPLE:
|
|
983
|
+
{
|
|
984
|
+
"skill_path": "pick_and_place.skill.yaml",
|
|
985
|
+
"target": "python",
|
|
986
|
+
"output": "./dist/pick_and_place"
|
|
987
|
+
}
|
|
988
|
+
|
|
989
|
+
OUTPUT: Creates a directory with generated code, config files, and dependencies.""",
|
|
990
|
+
inputSchema={
|
|
991
|
+
"type": "object",
|
|
992
|
+
"properties": {
|
|
993
|
+
"skill_path": {
|
|
994
|
+
"type": "string",
|
|
995
|
+
"description": "Path to skill.yaml file (e.g., 'skills/pick_place.skill.yaml')",
|
|
996
|
+
},
|
|
997
|
+
"output": {
|
|
998
|
+
"type": "string",
|
|
999
|
+
"description": "Output directory (default: ./output). Will be created if doesn't exist.",
|
|
1000
|
+
},
|
|
1001
|
+
"target": {
|
|
1002
|
+
"type": "string",
|
|
1003
|
+
"enum": ["python", "ros2", "docker", "all"],
|
|
1004
|
+
"description": "Compilation target: python (simplest), ros2 (for ROS2 robots), docker (containerized), all",
|
|
1005
|
+
"default": "python",
|
|
1006
|
+
},
|
|
1007
|
+
"robot": {
|
|
1008
|
+
"type": "string",
|
|
1009
|
+
"description": "Optional: Path to robot URDF for hardware-specific config generation",
|
|
1010
|
+
},
|
|
1011
|
+
},
|
|
1012
|
+
"required": ["skill_path"],
|
|
1013
|
+
},
|
|
1014
|
+
),
|
|
1015
|
+
Tool(
|
|
1016
|
+
name="ate_test_compiled_skill",
|
|
1017
|
+
description="""Test a compiled skill without deploying to a real robot.
|
|
1018
|
+
|
|
1019
|
+
WHEN TO USE: After compiling a skill, verify it works correctly before deployment.
|
|
1020
|
+
|
|
1021
|
+
MODES:
|
|
1022
|
+
- dry-run: Traces execution without any robot (fastest, always available)
|
|
1023
|
+
- sim: Runs in simulation (requires simulation setup)
|
|
1024
|
+
- hardware: Runs on real robot (requires robot_port)
|
|
1025
|
+
|
|
1026
|
+
EXAMPLE:
|
|
1027
|
+
{
|
|
1028
|
+
"skill_path": "./dist/pick_and_place",
|
|
1029
|
+
"mode": "dry-run",
|
|
1030
|
+
"params": {"speed": 0.3, "grip_force": 15.0}
|
|
1031
|
+
}
|
|
1032
|
+
|
|
1033
|
+
OUTPUT: Execution trace showing each primitive call and its result.""",
|
|
1034
|
+
inputSchema={
|
|
1035
|
+
"type": "object",
|
|
1036
|
+
"properties": {
|
|
1037
|
+
"skill_path": {
|
|
1038
|
+
"type": "string",
|
|
1039
|
+
"description": "Path to compiled skill directory (output from ate_compile_skill)",
|
|
1040
|
+
},
|
|
1041
|
+
"mode": {
|
|
1042
|
+
"type": "string",
|
|
1043
|
+
"enum": ["sim", "dry-run", "hardware"],
|
|
1044
|
+
"description": "dry-run=trace only, sim=simulation, hardware=real robot",
|
|
1045
|
+
"default": "dry-run",
|
|
1046
|
+
},
|
|
1047
|
+
"robot_port": {
|
|
1048
|
+
"type": "string",
|
|
1049
|
+
"description": "Robot serial port (only for hardware mode, e.g., '/dev/ttyUSB0')",
|
|
1050
|
+
},
|
|
1051
|
+
"params": {
|
|
1052
|
+
"type": "object",
|
|
1053
|
+
"description": "Override skill parameters (e.g., {\"speed\": 0.5})",
|
|
1054
|
+
},
|
|
1055
|
+
},
|
|
1056
|
+
"required": ["skill_path"],
|
|
1057
|
+
},
|
|
1058
|
+
),
|
|
1059
|
+
Tool(
|
|
1060
|
+
name="ate_publish_compiled_skill",
|
|
1061
|
+
description="""Publish a compiled skill to the FoodForThought registry.
|
|
1062
|
+
|
|
1063
|
+
WHEN TO USE: When the skill is tested and ready to share with others.
|
|
1064
|
+
|
|
1065
|
+
VISIBILITY:
|
|
1066
|
+
- public: Anyone can use this skill
|
|
1067
|
+
- private: Only you can see it
|
|
1068
|
+
- team: Shared with your team members
|
|
1069
|
+
|
|
1070
|
+
EXAMPLE:
|
|
1071
|
+
{
|
|
1072
|
+
"skill_path": "./dist/pick_and_place",
|
|
1073
|
+
"visibility": "public"
|
|
1074
|
+
}
|
|
1075
|
+
|
|
1076
|
+
OUTPUT: Registry URL where the skill is published.""",
|
|
1077
|
+
inputSchema={
|
|
1078
|
+
"type": "object",
|
|
1079
|
+
"properties": {
|
|
1080
|
+
"skill_path": {
|
|
1081
|
+
"type": "string",
|
|
1082
|
+
"description": "Path to compiled skill directory to publish",
|
|
1083
|
+
},
|
|
1084
|
+
"visibility": {
|
|
1085
|
+
"type": "string",
|
|
1086
|
+
"enum": ["public", "private", "team"],
|
|
1087
|
+
"description": "Who can access this skill",
|
|
1088
|
+
"default": "public",
|
|
1089
|
+
},
|
|
1090
|
+
},
|
|
1091
|
+
"required": ["skill_path"],
|
|
1092
|
+
},
|
|
1093
|
+
),
|
|
1094
|
+
Tool(
|
|
1095
|
+
name="ate_check_skill_compatibility",
|
|
1096
|
+
description="""Check if a skill can run on a specific robot.
|
|
1097
|
+
|
|
1098
|
+
WHEN TO USE: Before compiling, verify the skill's hardware requirements
|
|
1099
|
+
match the target robot's capabilities (DOF, sensors, payload, etc.).
|
|
1100
|
+
|
|
1101
|
+
CHECKS PERFORMED:
|
|
1102
|
+
- Arm DOF and reach requirements
|
|
1103
|
+
- Gripper type and force capabilities
|
|
1104
|
+
- Required sensors (cameras, F/T sensors)
|
|
1105
|
+
- Workspace bounds
|
|
1106
|
+
|
|
1107
|
+
EXAMPLE:
|
|
1108
|
+
{
|
|
1109
|
+
"skill_path": "pick_and_place.skill.yaml",
|
|
1110
|
+
"robot_urdf": "robots/ur5/ur5.urdf"
|
|
1111
|
+
}
|
|
1112
|
+
|
|
1113
|
+
OUTPUT: Compatibility report with score and list of issues/adaptations needed.""",
|
|
1114
|
+
inputSchema={
|
|
1115
|
+
"type": "object",
|
|
1116
|
+
"properties": {
|
|
1117
|
+
"skill_path": {
|
|
1118
|
+
"type": "string",
|
|
1119
|
+
"description": "Path to skill.yaml file",
|
|
1120
|
+
},
|
|
1121
|
+
"robot_urdf": {
|
|
1122
|
+
"type": "string",
|
|
1123
|
+
"description": "Path to robot URDF file",
|
|
1124
|
+
},
|
|
1125
|
+
"robot_ate_dir": {
|
|
1126
|
+
"type": "string",
|
|
1127
|
+
"description": "Alternative: Path to directory containing ate.yaml robot config",
|
|
1128
|
+
},
|
|
1129
|
+
},
|
|
1130
|
+
"required": ["skill_path"],
|
|
1131
|
+
},
|
|
1132
|
+
),
|
|
1133
|
+
Tool(
|
|
1134
|
+
name="ate_list_primitives",
|
|
1135
|
+
description="""List available primitives (building blocks) for creating skills.
|
|
1136
|
+
|
|
1137
|
+
WHEN TO USE: When starting to create a skill, or when you need to know what
|
|
1138
|
+
operations are available for a specific hardware type.
|
|
1139
|
+
|
|
1140
|
+
CATEGORIES:
|
|
1141
|
+
- motion: Movement primitives (move_to_pose, move_linear, etc.)
|
|
1142
|
+
- gripper: Gripper actions (open_gripper, close_gripper)
|
|
1143
|
+
- sensing: Sensor reading (capture_image, read_force_torque)
|
|
1144
|
+
- wait: Timing and conditions (wait_time, wait_for_contact)
|
|
1145
|
+
- control: Control modes (set_control_mode, enable_compliance)
|
|
1146
|
+
|
|
1147
|
+
EXAMPLE - List all motion primitives:
|
|
1148
|
+
{
|
|
1149
|
+
"category": "motion"
|
|
1150
|
+
}
|
|
1151
|
+
|
|
1152
|
+
EXAMPLE - List primitives that need a camera:
|
|
1153
|
+
{
|
|
1154
|
+
"hardware": "camera"
|
|
1155
|
+
}
|
|
1156
|
+
|
|
1157
|
+
OUTPUT: List of primitives with their descriptions and parameters.""",
|
|
1158
|
+
inputSchema={
|
|
1159
|
+
"type": "object",
|
|
1160
|
+
"properties": {
|
|
1161
|
+
"category": {
|
|
1162
|
+
"type": "string",
|
|
1163
|
+
"enum": ["motion", "gripper", "sensing", "wait", "control", "all"],
|
|
1164
|
+
"description": "Filter by category (use 'all' to see everything)",
|
|
1165
|
+
"default": "all",
|
|
1166
|
+
},
|
|
1167
|
+
"hardware": {
|
|
1168
|
+
"type": "string",
|
|
1169
|
+
"description": "Filter by hardware type: arm, gripper, camera, force_torque_sensor",
|
|
1170
|
+
},
|
|
1171
|
+
},
|
|
1172
|
+
},
|
|
1173
|
+
),
|
|
1174
|
+
Tool(
|
|
1175
|
+
name="ate_get_primitive",
|
|
1176
|
+
description="""Get detailed information about a specific primitive.
|
|
1177
|
+
|
|
1178
|
+
WHEN TO USE: When you need to know the exact parameters and requirements
|
|
1179
|
+
for a primitive before using it in a skill.
|
|
1180
|
+
|
|
1181
|
+
COMMON PRIMITIVES:
|
|
1182
|
+
- move_to_pose: Move end-effector to a 7D pose [x,y,z,qx,qy,qz,qw]
|
|
1183
|
+
- move_to_joint_positions: Move to specific joint angles
|
|
1184
|
+
- open_gripper / close_gripper: Gripper control
|
|
1185
|
+
- wait_for_contact: Wait until force threshold is reached
|
|
1186
|
+
- capture_image: Take a camera image
|
|
1187
|
+
|
|
1188
|
+
EXAMPLE:
|
|
1189
|
+
{
|
|
1190
|
+
"name": "move_to_pose"
|
|
1191
|
+
}
|
|
1192
|
+
|
|
1193
|
+
OUTPUT: Full primitive definition with all parameters, types, defaults, and hardware requirements.""",
|
|
1194
|
+
inputSchema={
|
|
1195
|
+
"type": "object",
|
|
1196
|
+
"properties": {
|
|
1197
|
+
"name": {
|
|
1198
|
+
"type": "string",
|
|
1199
|
+
"description": "Primitive name (e.g., 'move_to_pose', 'close_gripper', 'wait_for_contact')",
|
|
1200
|
+
},
|
|
1201
|
+
},
|
|
1202
|
+
"required": ["name"],
|
|
1203
|
+
},
|
|
1204
|
+
),
|
|
1205
|
+
Tool(
|
|
1206
|
+
name="ate_validate_skill_spec",
|
|
1207
|
+
description="""Validate a skill.yaml file without compiling.
|
|
1208
|
+
|
|
1209
|
+
WHEN TO USE: After creating or modifying a skill.yaml, check for errors before compiling.
|
|
1210
|
+
|
|
1211
|
+
CHECKS PERFORMED:
|
|
1212
|
+
- YAML syntax validity
|
|
1213
|
+
- Required fields (name, version, description, execution)
|
|
1214
|
+
- Parameter type validity
|
|
1215
|
+
- Primitive names exist in registry
|
|
1216
|
+
- Template expression syntax
|
|
1217
|
+
- Hardware requirement format
|
|
1218
|
+
|
|
1219
|
+
EXAMPLE:
|
|
1220
|
+
{
|
|
1221
|
+
"skill_path": "my_skill.skill.yaml"
|
|
1222
|
+
}
|
|
1223
|
+
|
|
1224
|
+
OUTPUT:
|
|
1225
|
+
- If valid: Summary of skill (name, version, parameter count, etc.)
|
|
1226
|
+
- If invalid: List of errors with line numbers and fix suggestions.""",
|
|
1227
|
+
inputSchema={
|
|
1228
|
+
"type": "object",
|
|
1229
|
+
"properties": {
|
|
1230
|
+
"skill_path": {
|
|
1231
|
+
"type": "string",
|
|
1232
|
+
"description": "Path to skill.yaml file to validate",
|
|
1233
|
+
},
|
|
1234
|
+
},
|
|
1235
|
+
"required": ["skill_path"],
|
|
1236
|
+
},
|
|
1237
|
+
),
|
|
1238
|
+
]
|
|
1239
|
+
|
|
1240
|
+
|
|
1241
|
+
def get_protocol_tools() -> List[Tool]:
|
|
1242
|
+
"""Protocol registry tools"""
|
|
1243
|
+
return [
|
|
1244
|
+
Tool(
|
|
1245
|
+
name="ate_protocol_list",
|
|
1246
|
+
description="List protocols from the FoodForThought protocol registry. Protocols document how to communicate with robot hardware (BLE, serial, WiFi, CAN, etc.)",
|
|
1247
|
+
inputSchema={
|
|
1248
|
+
"type": "object",
|
|
1249
|
+
"properties": {
|
|
1250
|
+
"robot_model": {
|
|
1251
|
+
"type": "string",
|
|
1252
|
+
"description": "Filter by robot model name (e.g., 'mechdog', 'ur5')",
|
|
1253
|
+
},
|
|
1254
|
+
"transport_type": {
|
|
1255
|
+
"type": "string",
|
|
1256
|
+
"enum": ["ble", "serial", "wifi", "can", "i2c", "spi", "mqtt", "ros2"],
|
|
1257
|
+
"description": "Filter by transport type",
|
|
1258
|
+
},
|
|
1259
|
+
"verified_only": {
|
|
1260
|
+
"type": "boolean",
|
|
1261
|
+
"description": "Show only community-verified protocols",
|
|
1262
|
+
"default": False,
|
|
1263
|
+
},
|
|
1264
|
+
"search": {
|
|
1265
|
+
"type": "string",
|
|
1266
|
+
"description": "Search in command format and discovery notes",
|
|
1267
|
+
},
|
|
1268
|
+
},
|
|
1269
|
+
},
|
|
1270
|
+
),
|
|
1271
|
+
Tool(
|
|
1272
|
+
name="ate_protocol_get",
|
|
1273
|
+
description="Get detailed protocol information including BLE characteristics, serial config, command schema, and associated primitive skills",
|
|
1274
|
+
inputSchema={
|
|
1275
|
+
"type": "object",
|
|
1276
|
+
"properties": {
|
|
1277
|
+
"protocol_id": {
|
|
1278
|
+
"type": "string",
|
|
1279
|
+
"description": "Protocol ID to fetch",
|
|
1280
|
+
},
|
|
1281
|
+
},
|
|
1282
|
+
"required": ["protocol_id"],
|
|
1283
|
+
},
|
|
1284
|
+
),
|
|
1285
|
+
Tool(
|
|
1286
|
+
name="ate_protocol_init",
|
|
1287
|
+
description="Initialize a new protocol template for a robot. Creates protocol.json and README with transport-specific fields to fill in",
|
|
1288
|
+
inputSchema={
|
|
1289
|
+
"type": "object",
|
|
1290
|
+
"properties": {
|
|
1291
|
+
"robot_model": {
|
|
1292
|
+
"type": "string",
|
|
1293
|
+
"description": "Robot model name (e.g., 'hiwonder-mechdog-pro')",
|
|
1294
|
+
},
|
|
1295
|
+
"transport_type": {
|
|
1296
|
+
"type": "string",
|
|
1297
|
+
"enum": ["ble", "serial", "wifi", "can", "i2c", "spi", "mqtt", "ros2"],
|
|
1298
|
+
"description": "Transport type for communication",
|
|
1299
|
+
},
|
|
1300
|
+
"output_dir": {
|
|
1301
|
+
"type": "string",
|
|
1302
|
+
"description": "Output directory for protocol files",
|
|
1303
|
+
"default": "./protocol",
|
|
1304
|
+
},
|
|
1305
|
+
},
|
|
1306
|
+
"required": ["robot_model", "transport_type"],
|
|
1307
|
+
},
|
|
1308
|
+
),
|
|
1309
|
+
Tool(
|
|
1310
|
+
name="ate_protocol_push",
|
|
1311
|
+
description="Upload a protocol definition to FoodForThought registry for community use",
|
|
1312
|
+
inputSchema={
|
|
1313
|
+
"type": "object",
|
|
1314
|
+
"properties": {
|
|
1315
|
+
"protocol_file": {
|
|
1316
|
+
"type": "string",
|
|
1317
|
+
"description": "Path to protocol.json file (default: ./protocol.json)",
|
|
1318
|
+
},
|
|
1319
|
+
},
|
|
1320
|
+
},
|
|
1321
|
+
),
|
|
1322
|
+
Tool(
|
|
1323
|
+
name="ate_protocol_scan_serial",
|
|
1324
|
+
description="Scan for available serial ports on the system. Useful for discovering connected robot hardware",
|
|
1325
|
+
inputSchema={
|
|
1326
|
+
"type": "object",
|
|
1327
|
+
"properties": {},
|
|
1328
|
+
},
|
|
1329
|
+
),
|
|
1330
|
+
Tool(
|
|
1331
|
+
name="ate_protocol_scan_ble",
|
|
1332
|
+
description="Scan for BLE devices in range. Useful for discovering robot devices before connecting",
|
|
1333
|
+
inputSchema={
|
|
1334
|
+
"type": "object",
|
|
1335
|
+
"properties": {},
|
|
1336
|
+
},
|
|
1337
|
+
),
|
|
1338
|
+
]
|
|
1339
|
+
|
|
1340
|
+
|
|
1341
|
+
def get_primitive_tools() -> List[Tool]:
|
|
1342
|
+
"""Primitive skills tools"""
|
|
1343
|
+
return [
|
|
1344
|
+
Tool(
|
|
1345
|
+
name="ate_primitive_list",
|
|
1346
|
+
description="List primitive skills - tested atomic robot operations like 'tilt_forward', 'gripper_close', etc. with safe parameter ranges",
|
|
1347
|
+
inputSchema={
|
|
1348
|
+
"type": "object",
|
|
1349
|
+
"properties": {
|
|
1350
|
+
"robot_model": {
|
|
1351
|
+
"type": "string",
|
|
1352
|
+
"description": "Filter by robot model name",
|
|
1353
|
+
},
|
|
1354
|
+
"category": {
|
|
1355
|
+
"type": "string",
|
|
1356
|
+
"enum": ["body_pose", "arm", "gripper", "locomotion", "head", "sensing", "manipulation", "navigation"],
|
|
1357
|
+
"description": "Filter by primitive category",
|
|
1358
|
+
},
|
|
1359
|
+
"status": {
|
|
1360
|
+
"type": "string",
|
|
1361
|
+
"enum": ["experimental", "tested", "verified", "deprecated"],
|
|
1362
|
+
"description": "Filter by status",
|
|
1363
|
+
},
|
|
1364
|
+
"tested_only": {
|
|
1365
|
+
"type": "boolean",
|
|
1366
|
+
"description": "Show only tested/verified primitives",
|
|
1367
|
+
"default": False,
|
|
1368
|
+
},
|
|
1369
|
+
},
|
|
1370
|
+
},
|
|
1371
|
+
),
|
|
1372
|
+
Tool(
|
|
1373
|
+
name="ate_primitive_get",
|
|
1374
|
+
description="Get detailed primitive skill info including command template, tested parameters with safe ranges, timing, safety notes, and dependencies",
|
|
1375
|
+
inputSchema={
|
|
1376
|
+
"type": "object",
|
|
1377
|
+
"properties": {
|
|
1378
|
+
"primitive_id": {
|
|
1379
|
+
"type": "string",
|
|
1380
|
+
"description": "Primitive skill ID to fetch",
|
|
1381
|
+
},
|
|
1382
|
+
},
|
|
1383
|
+
"required": ["primitive_id"],
|
|
1384
|
+
},
|
|
1385
|
+
),
|
|
1386
|
+
Tool(
|
|
1387
|
+
name="ate_primitive_test",
|
|
1388
|
+
description="Submit a test result for a primitive skill. Contributes to reliability score and helps verify safe operation ranges",
|
|
1389
|
+
inputSchema={
|
|
1390
|
+
"type": "object",
|
|
1391
|
+
"properties": {
|
|
1392
|
+
"primitive_id": {
|
|
1393
|
+
"type": "string",
|
|
1394
|
+
"description": "Primitive skill ID to test",
|
|
1395
|
+
},
|
|
1396
|
+
"params": {
|
|
1397
|
+
"type": "string",
|
|
1398
|
+
"description": "Parameters used in test as JSON string (e.g., '{\"pitch\": 15}')",
|
|
1399
|
+
},
|
|
1400
|
+
"result": {
|
|
1401
|
+
"type": "string",
|
|
1402
|
+
"enum": ["pass", "fail", "partial"],
|
|
1403
|
+
"description": "Test result",
|
|
1404
|
+
},
|
|
1405
|
+
"notes": {
|
|
1406
|
+
"type": "string",
|
|
1407
|
+
"description": "Additional notes about the test",
|
|
1408
|
+
},
|
|
1409
|
+
"video_url": {
|
|
1410
|
+
"type": "string",
|
|
1411
|
+
"description": "URL to video recording of test",
|
|
1412
|
+
},
|
|
1413
|
+
},
|
|
1414
|
+
"required": ["primitive_id", "params", "result"],
|
|
1415
|
+
},
|
|
1416
|
+
),
|
|
1417
|
+
Tool(
|
|
1418
|
+
name="ate_primitive_deps_show",
|
|
1419
|
+
description="Show dependency graph for a primitive skill. Shows what primitives it depends on and what depends on it. Indicates deployment readiness",
|
|
1420
|
+
inputSchema={
|
|
1421
|
+
"type": "object",
|
|
1422
|
+
"properties": {
|
|
1423
|
+
"primitive_id": {
|
|
1424
|
+
"type": "string",
|
|
1425
|
+
"description": "Primitive skill ID",
|
|
1426
|
+
},
|
|
1427
|
+
},
|
|
1428
|
+
"required": ["primitive_id"],
|
|
1429
|
+
},
|
|
1430
|
+
),
|
|
1431
|
+
Tool(
|
|
1432
|
+
name="ate_primitive_deps_add",
|
|
1433
|
+
description="Add a dependency to a primitive skill. Creates deployment gates to ensure required primitives are tested before deployment",
|
|
1434
|
+
inputSchema={
|
|
1435
|
+
"type": "object",
|
|
1436
|
+
"properties": {
|
|
1437
|
+
"primitive_id": {
|
|
1438
|
+
"type": "string",
|
|
1439
|
+
"description": "Primitive skill ID (the one that depends)",
|
|
1440
|
+
},
|
|
1441
|
+
"required_id": {
|
|
1442
|
+
"type": "string",
|
|
1443
|
+
"description": "Required primitive skill ID",
|
|
1444
|
+
},
|
|
1445
|
+
"dependency_type": {
|
|
1446
|
+
"type": "string",
|
|
1447
|
+
"enum": ["requires", "extends", "overrides", "optional"],
|
|
1448
|
+
"description": "Type of dependency",
|
|
1449
|
+
"default": "requires",
|
|
1450
|
+
},
|
|
1451
|
+
"min_status": {
|
|
1452
|
+
"type": "string",
|
|
1453
|
+
"enum": ["experimental", "tested", "verified"],
|
|
1454
|
+
"description": "Minimum required status for deployment",
|
|
1455
|
+
"default": "tested",
|
|
1456
|
+
},
|
|
1457
|
+
},
|
|
1458
|
+
"required": ["primitive_id", "required_id"],
|
|
1459
|
+
},
|
|
1460
|
+
),
|
|
1461
|
+
]
|
|
1462
|
+
|
|
1463
|
+
|
|
1464
|
+
def get_bridge_tools() -> List[Tool]:
|
|
1465
|
+
"""Robot bridge tools for interactive communication"""
|
|
1466
|
+
return [
|
|
1467
|
+
Tool(
|
|
1468
|
+
name="ate_bridge_scan_serial",
|
|
1469
|
+
description="Scan for available serial ports. Use this to discover connected robots before using bridge connect",
|
|
1470
|
+
inputSchema={
|
|
1471
|
+
"type": "object",
|
|
1472
|
+
"properties": {},
|
|
1473
|
+
"required": [],
|
|
1474
|
+
},
|
|
1475
|
+
),
|
|
1476
|
+
Tool(
|
|
1477
|
+
name="ate_bridge_scan_ble",
|
|
1478
|
+
description="Scan for BLE devices. Use this to discover bluetooth robots before using bridge connect",
|
|
1479
|
+
inputSchema={
|
|
1480
|
+
"type": "object",
|
|
1481
|
+
"properties": {},
|
|
1482
|
+
"required": [],
|
|
1483
|
+
},
|
|
1484
|
+
),
|
|
1485
|
+
Tool(
|
|
1486
|
+
name="ate_bridge_send",
|
|
1487
|
+
description="Send a single command to a robot and get the response. Useful for quick tests without opening a full interactive session",
|
|
1488
|
+
inputSchema={
|
|
1489
|
+
"type": "object",
|
|
1490
|
+
"properties": {
|
|
1491
|
+
"port": {
|
|
1492
|
+
"type": "string",
|
|
1493
|
+
"description": "Serial port (e.g., /dev/tty.usbserial-0001) or BLE address",
|
|
1494
|
+
},
|
|
1495
|
+
"command": {
|
|
1496
|
+
"type": "string",
|
|
1497
|
+
"description": "Command to send to the robot",
|
|
1498
|
+
},
|
|
1499
|
+
"transport": {
|
|
1500
|
+
"type": "string",
|
|
1501
|
+
"enum": ["serial", "ble"],
|
|
1502
|
+
"description": "Transport type",
|
|
1503
|
+
"default": "serial",
|
|
1504
|
+
},
|
|
1505
|
+
"baud_rate": {
|
|
1506
|
+
"type": "integer",
|
|
1507
|
+
"description": "Baud rate for serial connection",
|
|
1508
|
+
"default": 115200,
|
|
1509
|
+
},
|
|
1510
|
+
"wait": {
|
|
1511
|
+
"type": "number",
|
|
1512
|
+
"description": "Wait time for response in seconds",
|
|
1513
|
+
"default": 0.5,
|
|
1514
|
+
},
|
|
1515
|
+
},
|
|
1516
|
+
"required": ["port", "command"],
|
|
1517
|
+
},
|
|
1518
|
+
),
|
|
1519
|
+
Tool(
|
|
1520
|
+
name="ate_bridge_replay",
|
|
1521
|
+
description="Replay a recorded robot session. Useful for testing primitives or reproducing sequences",
|
|
1522
|
+
inputSchema={
|
|
1523
|
+
"type": "object",
|
|
1524
|
+
"properties": {
|
|
1525
|
+
"recording": {
|
|
1526
|
+
"type": "string",
|
|
1527
|
+
"description": "Path to recording JSON file",
|
|
1528
|
+
},
|
|
1529
|
+
"port": {
|
|
1530
|
+
"type": "string",
|
|
1531
|
+
"description": "Serial port or BLE address",
|
|
1532
|
+
},
|
|
1533
|
+
"transport": {
|
|
1534
|
+
"type": "string",
|
|
1535
|
+
"enum": ["serial", "ble"],
|
|
1536
|
+
"description": "Transport type",
|
|
1537
|
+
"default": "serial",
|
|
1538
|
+
},
|
|
1539
|
+
"baud_rate": {
|
|
1540
|
+
"type": "integer",
|
|
1541
|
+
"description": "Baud rate for serial connection",
|
|
1542
|
+
"default": 115200,
|
|
1543
|
+
},
|
|
1544
|
+
"speed": {
|
|
1545
|
+
"type": "number",
|
|
1546
|
+
"description": "Playback speed multiplier (1.0 = normal, 2.0 = 2x speed)",
|
|
1547
|
+
"default": 1.0,
|
|
1548
|
+
},
|
|
1549
|
+
},
|
|
1550
|
+
"required": ["recording", "port"],
|
|
1551
|
+
},
|
|
1552
|
+
),
|
|
1553
|
+
]
|
|
1554
|
+
|
|
1555
|
+
|
|
1556
|
+
@server.list_tools()
|
|
1557
|
+
async def list_tools() -> List[Tool]:
|
|
1558
|
+
"""List all available MCP tools"""
|
|
1559
|
+
tools = []
|
|
1560
|
+
tools.extend(get_repository_tools())
|
|
1561
|
+
tools.extend(get_robot_tools())
|
|
1562
|
+
tools.extend(get_marketplace_tools()) # Phase 6: Unified marketplace
|
|
1563
|
+
tools.extend(get_compatibility_tools())
|
|
1564
|
+
tools.extend(get_skill_tools())
|
|
1565
|
+
tools.extend(get_protocol_tools())
|
|
1566
|
+
tools.extend(get_primitive_tools())
|
|
1567
|
+
tools.extend(get_bridge_tools())
|
|
1568
|
+
tools.extend(get_parts_tools())
|
|
1569
|
+
tools.extend(get_generate_tools())
|
|
1570
|
+
tools.extend(get_workflow_tools())
|
|
1571
|
+
tools.extend(get_team_tools())
|
|
1572
|
+
tools.extend(get_data_tools())
|
|
1573
|
+
tools.extend(get_deploy_tools())
|
|
1574
|
+
tools.extend(get_test_tools())
|
|
1575
|
+
tools.extend(get_compiler_tools())
|
|
1576
|
+
return tools
|
|
1577
|
+
|
|
1578
|
+
|
|
1579
|
+
# ============================================================================
|
|
1580
|
+
# Tool Handlers
|
|
1581
|
+
# ============================================================================
|
|
1582
|
+
|
|
1583
|
+
def capture_output(func, *args, **kwargs):
|
|
1584
|
+
"""Capture printed output from a function"""
|
|
1585
|
+
import io
|
|
1586
|
+
import contextlib
|
|
1587
|
+
|
|
1588
|
+
f = io.StringIO()
|
|
1589
|
+
with contextlib.redirect_stdout(f):
|
|
1590
|
+
try:
|
|
1591
|
+
result = func(*args, **kwargs)
|
|
1592
|
+
except SystemExit:
|
|
1593
|
+
pass # CLI functions may call sys.exit
|
|
1594
|
+
return f.getvalue()
|
|
1595
|
+
|
|
1596
|
+
|
|
1597
|
+
@server.call_tool()
|
|
1598
|
+
async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
|
|
1599
|
+
"""Handle tool calls"""
|
|
1600
|
+
try:
|
|
1601
|
+
# Repository tools
|
|
1602
|
+
if name == "ate_init":
|
|
1603
|
+
result = client.init(
|
|
1604
|
+
arguments["name"],
|
|
1605
|
+
arguments.get("description", ""),
|
|
1606
|
+
arguments.get("visibility", "public"),
|
|
1607
|
+
)
|
|
1608
|
+
return [
|
|
1609
|
+
TextContent(
|
|
1610
|
+
type="text",
|
|
1611
|
+
text=f"Repository created successfully!\nID: {result['repository']['id']}\nName: {result['repository']['name']}",
|
|
1612
|
+
)
|
|
1613
|
+
]
|
|
1614
|
+
|
|
1615
|
+
elif name == "ate_clone":
|
|
1616
|
+
output = capture_output(
|
|
1617
|
+
client.clone,
|
|
1618
|
+
arguments["repo_id"],
|
|
1619
|
+
arguments.get("target_dir")
|
|
1620
|
+
)
|
|
1621
|
+
return [TextContent(type="text", text=output or f"Repository cloned successfully")]
|
|
1622
|
+
|
|
1623
|
+
elif name == "ate_list_repositories":
|
|
1624
|
+
params = {}
|
|
1625
|
+
if arguments.get("search"):
|
|
1626
|
+
params["search"] = arguments["search"]
|
|
1627
|
+
if arguments.get("robot_model"):
|
|
1628
|
+
params["robotModel"] = arguments["robot_model"]
|
|
1629
|
+
params["limit"] = arguments.get("limit", 20)
|
|
1630
|
+
|
|
1631
|
+
response = client._request("GET", "/repositories", params=params)
|
|
1632
|
+
repos = response.get("repositories", [])
|
|
1633
|
+
|
|
1634
|
+
result_text = f"Found {len(repos)} repositories:\n\n"
|
|
1635
|
+
for repo in repos[:10]:
|
|
1636
|
+
result_text += f"- {repo['name']} (ID: {repo['id']})\n"
|
|
1637
|
+
if repo.get("description"):
|
|
1638
|
+
result_text += f" {repo['description'][:100]}...\n"
|
|
1639
|
+
|
|
1640
|
+
return [TextContent(type="text", text=result_text)]
|
|
1641
|
+
|
|
1642
|
+
elif name == "ate_get_repository":
|
|
1643
|
+
response = client._request("GET", f"/repositories/{arguments['repo_id']}")
|
|
1644
|
+
repo = response.get("repository", {})
|
|
1645
|
+
|
|
1646
|
+
result_text = f"Repository: {repo.get('name', 'Unknown')}\n"
|
|
1647
|
+
result_text += f"ID: {repo.get('id', 'Unknown')}\n"
|
|
1648
|
+
result_text += f"Description: {repo.get('description', 'No description')}\n"
|
|
1649
|
+
result_text += f"Visibility: {repo.get('visibility', 'unknown')}\n"
|
|
1650
|
+
|
|
1651
|
+
return [TextContent(type="text", text=result_text)]
|
|
1652
|
+
|
|
1653
|
+
# Robot tools
|
|
1654
|
+
elif name == "ate_list_robots":
|
|
1655
|
+
params = {}
|
|
1656
|
+
if arguments.get("search"):
|
|
1657
|
+
params["search"] = arguments["search"]
|
|
1658
|
+
if arguments.get("category"):
|
|
1659
|
+
params["category"] = arguments["category"]
|
|
1660
|
+
params["limit"] = arguments.get("limit", 20)
|
|
1661
|
+
|
|
1662
|
+
response = client._request("GET", "/robots/profiles", params=params)
|
|
1663
|
+
robots = response.get("profiles", [])
|
|
1664
|
+
|
|
1665
|
+
result_text = f"Found {len(robots)} robot profiles:\n\n"
|
|
1666
|
+
for robot in robots[:10]:
|
|
1667
|
+
result_text += f"- {robot['modelName']} by {robot['manufacturer']} (ID: {robot['id']})\n"
|
|
1668
|
+
if robot.get("description"):
|
|
1669
|
+
result_text += f" {robot['description'][:100]}...\n"
|
|
1670
|
+
|
|
1671
|
+
return [TextContent(type="text", text=result_text)]
|
|
1672
|
+
|
|
1673
|
+
elif name == "ate_get_robot":
|
|
1674
|
+
response = client._request("GET", f"/robots/profiles/{arguments['robot_id']}")
|
|
1675
|
+
robot = response.get("profile", {})
|
|
1676
|
+
|
|
1677
|
+
result_text = f"Robot: {robot.get('modelName', 'Unknown')}\n"
|
|
1678
|
+
result_text += f"Manufacturer: {robot.get('manufacturer', 'Unknown')}\n"
|
|
1679
|
+
result_text += f"Category: {robot.get('category', 'Unknown')}\n"
|
|
1680
|
+
result_text += f"Description: {robot.get('description', 'No description')}\n"
|
|
1681
|
+
|
|
1682
|
+
return [TextContent(type="text", text=result_text)]
|
|
1683
|
+
|
|
1684
|
+
# Marketplace tools (Phase 6)
|
|
1685
|
+
elif name == "ate_marketplace_robots":
|
|
1686
|
+
params = {}
|
|
1687
|
+
if arguments.get("search"):
|
|
1688
|
+
params["q"] = arguments["search"]
|
|
1689
|
+
if arguments.get("category"):
|
|
1690
|
+
params["category"] = arguments["category"]
|
|
1691
|
+
if arguments.get("sort"):
|
|
1692
|
+
params["sortBy"] = arguments["sort"]
|
|
1693
|
+
params["limit"] = arguments.get("limit", 20)
|
|
1694
|
+
|
|
1695
|
+
response = client._request("GET", "/robots/unified", params=params)
|
|
1696
|
+
robots = response.get("robots", [])
|
|
1697
|
+
|
|
1698
|
+
if not robots:
|
|
1699
|
+
return [TextContent(type="text", text="No robots found matching your criteria.")]
|
|
1700
|
+
|
|
1701
|
+
result_text = f"Found {len(robots)} robots:\n\n"
|
|
1702
|
+
for robot in robots[:20]:
|
|
1703
|
+
result_text += f"- **{robot.get('name', 'Unknown')}** ({robot.get('manufacturer', 'Unknown')})\n"
|
|
1704
|
+
result_text += f" ID: {robot.get('id')} | Category: {robot.get('category')} | DOF: {robot.get('dof', 'N/A')}\n"
|
|
1705
|
+
if robot.get("description"):
|
|
1706
|
+
result_text += f" {robot['description'][:100]}...\n"
|
|
1707
|
+
result_text += "\n"
|
|
1708
|
+
|
|
1709
|
+
return [TextContent(type="text", text=result_text)]
|
|
1710
|
+
|
|
1711
|
+
elif name == "ate_marketplace_robot":
|
|
1712
|
+
robot_id = arguments["robot_id"]
|
|
1713
|
+
response = client._request("GET", f"/robots/unified/{robot_id}")
|
|
1714
|
+
robot = response.get("robot", {})
|
|
1715
|
+
|
|
1716
|
+
if not robot:
|
|
1717
|
+
return [TextContent(type="text", text=f"Robot not found: {robot_id}")]
|
|
1718
|
+
|
|
1719
|
+
result_text = f"# {robot.get('name', 'Unknown')}\n\n"
|
|
1720
|
+
result_text += f"**Manufacturer:** {robot.get('manufacturer', 'Unknown')}\n"
|
|
1721
|
+
result_text += f"**Category:** {robot.get('category', 'Unknown')}\n"
|
|
1722
|
+
result_text += f"**DOF:** {robot.get('dof', 'N/A')}\n"
|
|
1723
|
+
result_text += f"**Downloads:** {robot.get('downloads', 0)}\n\n"
|
|
1724
|
+
|
|
1725
|
+
if robot.get("description"):
|
|
1726
|
+
result_text += f"## Description\n{robot['description']}\n\n"
|
|
1727
|
+
|
|
1728
|
+
links = robot.get("links", [])
|
|
1729
|
+
if links:
|
|
1730
|
+
result_text += f"## Links ({len(links)})\n"
|
|
1731
|
+
for link in links[:5]:
|
|
1732
|
+
result_text += f"- {link.get('name', 'Unnamed')}\n"
|
|
1733
|
+
|
|
1734
|
+
joints = robot.get("joints", [])
|
|
1735
|
+
if joints:
|
|
1736
|
+
result_text += f"\n## Joints ({len(joints)})\n"
|
|
1737
|
+
for joint in joints[:5]:
|
|
1738
|
+
result_text += f"- {joint.get('name', 'Unnamed')}: {joint.get('type', 'unknown')}\n"
|
|
1739
|
+
|
|
1740
|
+
return [TextContent(type="text", text=result_text)]
|
|
1741
|
+
|
|
1742
|
+
elif name == "ate_marketplace_components":
|
|
1743
|
+
params = {}
|
|
1744
|
+
if arguments.get("search"):
|
|
1745
|
+
params["q"] = arguments["search"]
|
|
1746
|
+
if arguments.get("type"):
|
|
1747
|
+
params["type"] = arguments["type"]
|
|
1748
|
+
if arguments.get("sort"):
|
|
1749
|
+
params["sortBy"] = arguments["sort"]
|
|
1750
|
+
params["limit"] = arguments.get("limit", 20)
|
|
1751
|
+
|
|
1752
|
+
response = client._request("GET", "/components", params=params)
|
|
1753
|
+
components = response.get("components", [])
|
|
1754
|
+
|
|
1755
|
+
if not components:
|
|
1756
|
+
return [TextContent(type="text", text="No components found matching your criteria.")]
|
|
1757
|
+
|
|
1758
|
+
result_text = f"Found {len(components)} components:\n\n"
|
|
1759
|
+
for comp in components[:20]:
|
|
1760
|
+
verified = " ✓" if comp.get("verified") else ""
|
|
1761
|
+
result_text += f"- **{comp.get('name', 'Unknown')}** v{comp.get('version', '1.0')}{verified}\n"
|
|
1762
|
+
result_text += f" ID: {comp.get('id')} | Type: {comp.get('type')} | Downloads: {comp.get('downloads', 0)}\n"
|
|
1763
|
+
if comp.get("description"):
|
|
1764
|
+
result_text += f" {comp['description'][:80]}...\n"
|
|
1765
|
+
result_text += "\n"
|
|
1766
|
+
|
|
1767
|
+
return [TextContent(type="text", text=result_text)]
|
|
1768
|
+
|
|
1769
|
+
elif name == "ate_marketplace_component":
|
|
1770
|
+
component_id = arguments["component_id"]
|
|
1771
|
+
response = client._request("GET", f"/components/{component_id}")
|
|
1772
|
+
comp = response.get("component", {})
|
|
1773
|
+
|
|
1774
|
+
if not comp:
|
|
1775
|
+
return [TextContent(type="text", text=f"Component not found: {component_id}")]
|
|
1776
|
+
|
|
1777
|
+
verified = "Yes ✓" if comp.get("verified") else "No"
|
|
1778
|
+
result_text = f"# {comp.get('name', 'Unknown')}\n\n"
|
|
1779
|
+
result_text += f"**Type:** {comp.get('type', 'Unknown')}\n"
|
|
1780
|
+
result_text += f"**Version:** {comp.get('version', '1.0')}\n"
|
|
1781
|
+
result_text += f"**Verified:** {verified}\n"
|
|
1782
|
+
result_text += f"**Downloads:** {comp.get('downloads', 0)}\n\n"
|
|
1783
|
+
|
|
1784
|
+
if comp.get("description"):
|
|
1785
|
+
result_text += f"## Description\n{comp['description']}\n\n"
|
|
1786
|
+
|
|
1787
|
+
return [TextContent(type="text", text=result_text)]
|
|
1788
|
+
|
|
1789
|
+
elif name == "ate_skill_transfer_check":
|
|
1790
|
+
robot_id = arguments["robot_id"]
|
|
1791
|
+
direction = arguments.get("direction", "from")
|
|
1792
|
+
min_score = arguments.get("min_score", 0.4)
|
|
1793
|
+
limit = arguments.get("limit", 10)
|
|
1794
|
+
|
|
1795
|
+
params = {
|
|
1796
|
+
"direction": direction,
|
|
1797
|
+
"minScore": min_score,
|
|
1798
|
+
"limit": limit,
|
|
1799
|
+
}
|
|
1800
|
+
response = client._request("GET", f"/robots/unified/{robot_id}/skill-transfer", params=params)
|
|
1801
|
+
|
|
1802
|
+
if response.get("error"):
|
|
1803
|
+
return [TextContent(type="text", text=f"Error: {response['error']}")]
|
|
1804
|
+
|
|
1805
|
+
source = response.get("sourceRobot", {})
|
|
1806
|
+
results = response.get("results", [])
|
|
1807
|
+
|
|
1808
|
+
if not results:
|
|
1809
|
+
return [TextContent(type="text", text=f"No compatible robots found for skill transfer from {source.get('name', robot_id)}.")]
|
|
1810
|
+
|
|
1811
|
+
result_text = f"# Skill Transfer Compatibility for {source.get('name', 'Unknown')}\n\n"
|
|
1812
|
+
result_text += f"Direction: Skills can transfer **{direction}** this robot\n\n"
|
|
1813
|
+
|
|
1814
|
+
result_text += f"## Compatible Robots ({len(results)})\n\n"
|
|
1815
|
+
for item in results:
|
|
1816
|
+
robot = item.get("robot", {})
|
|
1817
|
+
scores = item.get("scores", {})
|
|
1818
|
+
adaptation = item.get("adaptationType", "unknown")
|
|
1819
|
+
result_text += f"- **{robot.get('name', 'Unknown')}** - {int(scores.get('overall', 0) * 100)}% ({adaptation})\n"
|
|
1820
|
+
result_text += f" Category: {robot.get('category')} | DOF: {robot.get('dof', 'N/A')}\n\n"
|
|
1821
|
+
|
|
1822
|
+
return [TextContent(type="text", text=result_text)]
|
|
1823
|
+
|
|
1824
|
+
elif name == "ate_robot_parts":
|
|
1825
|
+
robot_id = arguments["robot_id"]
|
|
1826
|
+
response = client._request("GET", f"/robots/unified/{robot_id}/parts")
|
|
1827
|
+
|
|
1828
|
+
if response.get("error"):
|
|
1829
|
+
return [TextContent(type="text", text=f"Error: {response['error']}")]
|
|
1830
|
+
|
|
1831
|
+
robot_name = response.get("robotName", robot_id)
|
|
1832
|
+
required = response.get("requiredParts", [])
|
|
1833
|
+
compatible = response.get("compatibleParts", [])
|
|
1834
|
+
|
|
1835
|
+
result_text = f"# Parts for {robot_name}\n\n"
|
|
1836
|
+
|
|
1837
|
+
if required:
|
|
1838
|
+
result_text += f"## Required Parts ({len(required)})\n"
|
|
1839
|
+
for req in required:
|
|
1840
|
+
comp = req.get("component", {})
|
|
1841
|
+
result_text += f"- **{comp.get('name', 'Unknown')}** ({comp.get('type', 'unknown')})\n"
|
|
1842
|
+
result_text += f" Quantity: {req.get('quantity', 1)} | Required: {'Yes' if req.get('required') else 'Optional'}\n\n"
|
|
1843
|
+
else:
|
|
1844
|
+
result_text += "## Required Parts\nNo required parts specified.\n\n"
|
|
1845
|
+
|
|
1846
|
+
if compatible:
|
|
1847
|
+
result_text += f"## Compatible Parts ({len(compatible)})\n"
|
|
1848
|
+
for compat in compatible[:10]:
|
|
1849
|
+
comp = compat.get("component", {})
|
|
1850
|
+
score = compat.get("compatibilityScore", 0)
|
|
1851
|
+
result_text += f"- **{comp.get('name', 'Unknown')}** ({comp.get('type', 'unknown')})\n"
|
|
1852
|
+
result_text += f" Compatibility: {int(score * 100)}%\n\n"
|
|
1853
|
+
|
|
1854
|
+
return [TextContent(type="text", text=result_text)]
|
|
1855
|
+
|
|
1856
|
+
elif name == "ate_component_robots":
|
|
1857
|
+
component_id = arguments["component_id"]
|
|
1858
|
+
response = client._request("GET", f"/components/{component_id}/compatible-robots")
|
|
1859
|
+
|
|
1860
|
+
if response.get("error"):
|
|
1861
|
+
return [TextContent(type="text", text=f"Error: {response['error']}")]
|
|
1862
|
+
|
|
1863
|
+
comp_name = response.get("componentName", component_id)
|
|
1864
|
+
required_by = response.get("requiredBy", [])
|
|
1865
|
+
compatible_with = response.get("compatibleWith", [])
|
|
1866
|
+
|
|
1867
|
+
result_text = f"# Robots for {comp_name}\n\n"
|
|
1868
|
+
|
|
1869
|
+
if required_by:
|
|
1870
|
+
result_text += f"## Required By ({len(required_by)} robots)\n"
|
|
1871
|
+
for req in required_by:
|
|
1872
|
+
robot = req.get("robot", {})
|
|
1873
|
+
result_text += f"- **{robot.get('name', 'Unknown')}** ({robot.get('category', 'unknown')})\n"
|
|
1874
|
+
result_text += f" Quantity: {req.get('quantity', 1)}\n\n"
|
|
1875
|
+
else:
|
|
1876
|
+
result_text += "## Required By\nNo robots require this component.\n\n"
|
|
1877
|
+
|
|
1878
|
+
if compatible_with:
|
|
1879
|
+
result_text += f"## Compatible With ({len(compatible_with)} robots)\n"
|
|
1880
|
+
for compat in compatible_with[:10]:
|
|
1881
|
+
robot = compat.get("robot", {})
|
|
1882
|
+
score = compat.get("compatibilityScore", 0)
|
|
1883
|
+
verified = " ✓" if compat.get("verified") else ""
|
|
1884
|
+
result_text += f"- **{robot.get('name', 'Unknown')}** ({robot.get('category', 'unknown')})\n"
|
|
1885
|
+
result_text += f" Compatibility: {int(score * 100)}%{verified}\n\n"
|
|
1886
|
+
|
|
1887
|
+
return [TextContent(type="text", text=result_text)]
|
|
1888
|
+
|
|
1889
|
+
# Compatibility tools
|
|
1890
|
+
elif name == "ate_check_transfer":
|
|
1891
|
+
output = capture_output(
|
|
1892
|
+
client.check_transfer,
|
|
1893
|
+
arguments.get("skill_id"),
|
|
1894
|
+
arguments["source_robot"],
|
|
1895
|
+
arguments["target_robot"],
|
|
1896
|
+
arguments.get("min_score", 0.0)
|
|
1897
|
+
)
|
|
1898
|
+
return [TextContent(type="text", text=output)]
|
|
1899
|
+
|
|
1900
|
+
elif name == "ate_adapt":
|
|
1901
|
+
output = capture_output(
|
|
1902
|
+
client.adapt,
|
|
1903
|
+
arguments["source_robot"],
|
|
1904
|
+
arguments["target_robot"],
|
|
1905
|
+
arguments.get("repo_id"),
|
|
1906
|
+
arguments.get("analyze_only", True)
|
|
1907
|
+
)
|
|
1908
|
+
return [TextContent(type="text", text=output)]
|
|
1909
|
+
|
|
1910
|
+
# Skill tools
|
|
1911
|
+
elif name == "ate_pull":
|
|
1912
|
+
output = capture_output(
|
|
1913
|
+
client.pull,
|
|
1914
|
+
arguments["skill_id"],
|
|
1915
|
+
arguments.get("robot"),
|
|
1916
|
+
arguments.get("format", "json"),
|
|
1917
|
+
arguments.get("output", "./data")
|
|
1918
|
+
)
|
|
1919
|
+
return [TextContent(type="text", text=output)]
|
|
1920
|
+
|
|
1921
|
+
elif name == "ate_upload":
|
|
1922
|
+
output = capture_output(
|
|
1923
|
+
client.upload,
|
|
1924
|
+
arguments["path"],
|
|
1925
|
+
arguments["robot"],
|
|
1926
|
+
arguments["task"],
|
|
1927
|
+
arguments.get("project")
|
|
1928
|
+
)
|
|
1929
|
+
return [TextContent(type="text", text=output)]
|
|
1930
|
+
|
|
1931
|
+
# Parts tools
|
|
1932
|
+
elif name == "ate_parts_list":
|
|
1933
|
+
output = capture_output(
|
|
1934
|
+
client.parts_list,
|
|
1935
|
+
arguments.get("category"),
|
|
1936
|
+
arguments.get("manufacturer"),
|
|
1937
|
+
arguments.get("search")
|
|
1938
|
+
)
|
|
1939
|
+
return [TextContent(type="text", text=output)]
|
|
1940
|
+
|
|
1941
|
+
elif name == "ate_parts_check":
|
|
1942
|
+
output = capture_output(
|
|
1943
|
+
client.parts_check,
|
|
1944
|
+
arguments["skill_id"]
|
|
1945
|
+
)
|
|
1946
|
+
return [TextContent(type="text", text=output)]
|
|
1947
|
+
|
|
1948
|
+
elif name == "ate_parts_require":
|
|
1949
|
+
output = capture_output(
|
|
1950
|
+
client.parts_require,
|
|
1951
|
+
arguments["part_id"],
|
|
1952
|
+
arguments["skill_id"],
|
|
1953
|
+
arguments.get("version", "1.0.0"),
|
|
1954
|
+
arguments.get("required", False)
|
|
1955
|
+
)
|
|
1956
|
+
return [TextContent(type="text", text=output)]
|
|
1957
|
+
|
|
1958
|
+
elif name == "ate_deps_audit":
|
|
1959
|
+
output = capture_output(
|
|
1960
|
+
client.deps_audit,
|
|
1961
|
+
arguments.get("skill_id")
|
|
1962
|
+
)
|
|
1963
|
+
return [TextContent(type="text", text=output)]
|
|
1964
|
+
|
|
1965
|
+
# Generate tools
|
|
1966
|
+
elif name == "ate_generate":
|
|
1967
|
+
output = capture_output(
|
|
1968
|
+
client.generate,
|
|
1969
|
+
arguments["description"],
|
|
1970
|
+
arguments.get("robot", "ur5"),
|
|
1971
|
+
arguments.get("output", "./new-skill")
|
|
1972
|
+
)
|
|
1973
|
+
return [TextContent(type="text", text=output)]
|
|
1974
|
+
|
|
1975
|
+
# Workflow tools
|
|
1976
|
+
elif name == "ate_workflow_validate":
|
|
1977
|
+
output = capture_output(
|
|
1978
|
+
client.workflow_validate,
|
|
1979
|
+
arguments["path"]
|
|
1980
|
+
)
|
|
1981
|
+
return [TextContent(type="text", text=output)]
|
|
1982
|
+
|
|
1983
|
+
elif name == "ate_workflow_run":
|
|
1984
|
+
output = capture_output(
|
|
1985
|
+
client.workflow_run,
|
|
1986
|
+
arguments["path"],
|
|
1987
|
+
arguments.get("sim", True),
|
|
1988
|
+
arguments.get("dry_run", False)
|
|
1989
|
+
)
|
|
1990
|
+
return [TextContent(type="text", text=output)]
|
|
1991
|
+
|
|
1992
|
+
elif name == "ate_workflow_export":
|
|
1993
|
+
output = capture_output(
|
|
1994
|
+
client.workflow_export,
|
|
1995
|
+
arguments["path"],
|
|
1996
|
+
arguments.get("format", "ros2"),
|
|
1997
|
+
arguments.get("output")
|
|
1998
|
+
)
|
|
1999
|
+
return [TextContent(type="text", text=output)]
|
|
2000
|
+
|
|
2001
|
+
# Team tools
|
|
2002
|
+
elif name == "ate_team_create":
|
|
2003
|
+
output = capture_output(
|
|
2004
|
+
client.team_create,
|
|
243
2005
|
arguments["name"],
|
|
244
|
-
arguments.get("description"
|
|
245
|
-
arguments.get("visibility", "public"),
|
|
2006
|
+
arguments.get("description")
|
|
246
2007
|
)
|
|
247
|
-
return [
|
|
248
|
-
TextContent(
|
|
249
|
-
type="text",
|
|
250
|
-
text=f"Repository created successfully!\nID: {result['repository']['id']}\nName: {result['repository']['name']}",
|
|
251
|
-
)
|
|
252
|
-
]
|
|
2008
|
+
return [TextContent(type="text", text=output)]
|
|
253
2009
|
|
|
254
|
-
elif name == "
|
|
255
|
-
client.
|
|
256
|
-
|
|
2010
|
+
elif name == "ate_team_list":
|
|
2011
|
+
output = capture_output(client.team_list)
|
|
2012
|
+
return [TextContent(type="text", text=output)]
|
|
2013
|
+
|
|
2014
|
+
elif name == "ate_team_invite":
|
|
2015
|
+
output = capture_output(
|
|
2016
|
+
client.team_invite,
|
|
2017
|
+
arguments["email"],
|
|
2018
|
+
arguments["team"],
|
|
2019
|
+
arguments.get("role", "member")
|
|
257
2020
|
)
|
|
258
|
-
return [
|
|
259
|
-
TextContent(
|
|
260
|
-
type="text",
|
|
261
|
-
text=f"Repository cloned successfully to {arguments.get('target_dir', 'current directory')}",
|
|
262
|
-
)
|
|
263
|
-
]
|
|
2021
|
+
return [TextContent(type="text", text=output)]
|
|
264
2022
|
|
|
265
|
-
elif name == "
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
params["limit"] = arguments.get("limit", 20)
|
|
2023
|
+
elif name == "ate_team_share":
|
|
2024
|
+
output = capture_output(
|
|
2025
|
+
client.team_share,
|
|
2026
|
+
arguments["skill_id"],
|
|
2027
|
+
arguments["team"]
|
|
2028
|
+
)
|
|
2029
|
+
return [TextContent(type="text", text=output)]
|
|
273
2030
|
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
2031
|
+
# Data tools
|
|
2032
|
+
elif name == "ate_data_upload":
|
|
2033
|
+
output = capture_output(
|
|
2034
|
+
client.data_upload,
|
|
2035
|
+
arguments["path"],
|
|
2036
|
+
arguments["skill"],
|
|
2037
|
+
arguments.get("stage", "raw")
|
|
2038
|
+
)
|
|
2039
|
+
return [TextContent(type="text", text=output)]
|
|
277
2040
|
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
2041
|
+
elif name == "ate_data_list":
|
|
2042
|
+
output = capture_output(
|
|
2043
|
+
client.data_list,
|
|
2044
|
+
arguments.get("skill"),
|
|
2045
|
+
arguments.get("stage")
|
|
2046
|
+
)
|
|
2047
|
+
return [TextContent(type="text", text=output)]
|
|
283
2048
|
|
|
284
|
-
|
|
2049
|
+
elif name == "ate_data_promote":
|
|
2050
|
+
output = capture_output(
|
|
2051
|
+
client.data_promote,
|
|
2052
|
+
arguments["dataset_id"],
|
|
2053
|
+
arguments["to_stage"]
|
|
2054
|
+
)
|
|
2055
|
+
return [TextContent(type="text", text=output)]
|
|
285
2056
|
|
|
286
|
-
elif name == "
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
2057
|
+
elif name == "ate_data_export":
|
|
2058
|
+
output = capture_output(
|
|
2059
|
+
client.data_export,
|
|
2060
|
+
arguments["dataset_id"],
|
|
2061
|
+
arguments.get("format", "rlds"),
|
|
2062
|
+
arguments.get("output", "./export")
|
|
2063
|
+
)
|
|
2064
|
+
return [TextContent(type="text", text=output)]
|
|
294
2065
|
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
2066
|
+
# Deploy tools
|
|
2067
|
+
elif name == "ate_deploy":
|
|
2068
|
+
output = capture_output(
|
|
2069
|
+
client.deploy,
|
|
2070
|
+
arguments["robot_type"],
|
|
2071
|
+
arguments.get("repo_id")
|
|
2072
|
+
)
|
|
2073
|
+
return [TextContent(type="text", text=output)]
|
|
298
2074
|
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
2075
|
+
elif name == "ate_deploy_config":
|
|
2076
|
+
output = capture_output(
|
|
2077
|
+
client.deploy_config,
|
|
2078
|
+
arguments["config_path"],
|
|
2079
|
+
arguments["target"],
|
|
2080
|
+
arguments.get("dry_run", False)
|
|
2081
|
+
)
|
|
2082
|
+
return [TextContent(type="text", text=output)]
|
|
304
2083
|
|
|
305
|
-
|
|
2084
|
+
elif name == "ate_deploy_status":
|
|
2085
|
+
output = capture_output(
|
|
2086
|
+
client.deploy_status,
|
|
2087
|
+
arguments["target"]
|
|
2088
|
+
)
|
|
2089
|
+
return [TextContent(type="text", text=output)]
|
|
306
2090
|
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
"repositoryId": arguments["repository_id"],
|
|
315
|
-
},
|
|
2091
|
+
# Test tools
|
|
2092
|
+
elif name == "ate_test":
|
|
2093
|
+
output = capture_output(
|
|
2094
|
+
client.test,
|
|
2095
|
+
arguments.get("environment", "pybullet"),
|
|
2096
|
+
arguments.get("robot"),
|
|
2097
|
+
arguments.get("local", False)
|
|
316
2098
|
)
|
|
317
|
-
|
|
318
|
-
score = compatibility.get("overallScore", 0) * 100
|
|
2099
|
+
return [TextContent(type="text", text=output)]
|
|
319
2100
|
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
2101
|
+
elif name == "ate_validate":
|
|
2102
|
+
output = capture_output(
|
|
2103
|
+
client.validate,
|
|
2104
|
+
arguments.get("checks", ["all"]),
|
|
2105
|
+
arguments.get("strict", False),
|
|
2106
|
+
None # files
|
|
2107
|
+
)
|
|
2108
|
+
return [TextContent(type="text", text=output)]
|
|
323
2109
|
|
|
324
|
-
|
|
2110
|
+
elif name == "ate_benchmark":
|
|
2111
|
+
output = capture_output(
|
|
2112
|
+
client.benchmark,
|
|
2113
|
+
arguments.get("type", "all"),
|
|
2114
|
+
arguments.get("trials", 10),
|
|
2115
|
+
arguments.get("compare")
|
|
2116
|
+
)
|
|
2117
|
+
return [TextContent(type="text", text=output)]
|
|
325
2118
|
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
},
|
|
2119
|
+
# Protocol tools
|
|
2120
|
+
elif name == "ate_protocol_list":
|
|
2121
|
+
output = capture_output(
|
|
2122
|
+
client.protocol_list,
|
|
2123
|
+
arguments.get("robot_model"),
|
|
2124
|
+
arguments.get("transport_type"),
|
|
2125
|
+
arguments.get("verified_only", False),
|
|
2126
|
+
arguments.get("search")
|
|
335
2127
|
)
|
|
336
|
-
|
|
337
|
-
compatibility = response.get("compatibility", {})
|
|
2128
|
+
return [TextContent(type="text", text=output)]
|
|
338
2129
|
|
|
339
|
-
|
|
340
|
-
|
|
2130
|
+
elif name == "ate_protocol_get":
|
|
2131
|
+
output = capture_output(
|
|
2132
|
+
client.protocol_get,
|
|
2133
|
+
arguments["protocol_id"]
|
|
2134
|
+
)
|
|
2135
|
+
return [TextContent(type="text", text=output)]
|
|
341
2136
|
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
2137
|
+
elif name == "ate_protocol_init":
|
|
2138
|
+
output = capture_output(
|
|
2139
|
+
client.protocol_init,
|
|
2140
|
+
arguments["robot_model"],
|
|
2141
|
+
arguments["transport_type"],
|
|
2142
|
+
arguments.get("output_dir", "./protocol")
|
|
2143
|
+
)
|
|
2144
|
+
return [TextContent(type="text", text=output)]
|
|
345
2145
|
|
|
346
|
-
|
|
2146
|
+
elif name == "ate_protocol_push":
|
|
2147
|
+
output = capture_output(
|
|
2148
|
+
client.protocol_push,
|
|
2149
|
+
arguments.get("protocol_file")
|
|
2150
|
+
)
|
|
2151
|
+
return [TextContent(type="text", text=output)]
|
|
347
2152
|
|
|
348
|
-
elif name == "
|
|
349
|
-
|
|
350
|
-
|
|
2153
|
+
elif name == "ate_protocol_scan_serial":
|
|
2154
|
+
output = capture_output(client.protocol_scan_serial)
|
|
2155
|
+
return [TextContent(type="text", text=output)]
|
|
351
2156
|
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
2157
|
+
elif name == "ate_protocol_scan_ble":
|
|
2158
|
+
output = capture_output(client.protocol_scan_ble)
|
|
2159
|
+
return [TextContent(type="text", text=output)]
|
|
2160
|
+
|
|
2161
|
+
# Primitive tools
|
|
2162
|
+
elif name == "ate_primitive_list":
|
|
2163
|
+
output = capture_output(
|
|
2164
|
+
client.primitive_list,
|
|
2165
|
+
arguments.get("robot_model"),
|
|
2166
|
+
arguments.get("category"),
|
|
2167
|
+
arguments.get("status"),
|
|
2168
|
+
arguments.get("tested_only", False)
|
|
2169
|
+
)
|
|
2170
|
+
return [TextContent(type="text", text=output)]
|
|
2171
|
+
|
|
2172
|
+
elif name == "ate_primitive_get":
|
|
2173
|
+
output = capture_output(
|
|
2174
|
+
client.primitive_get,
|
|
2175
|
+
arguments["primitive_id"]
|
|
2176
|
+
)
|
|
2177
|
+
return [TextContent(type="text", text=output)]
|
|
2178
|
+
|
|
2179
|
+
elif name == "ate_primitive_test":
|
|
2180
|
+
output = capture_output(
|
|
2181
|
+
client.primitive_test,
|
|
2182
|
+
arguments["primitive_id"],
|
|
2183
|
+
arguments["params"],
|
|
2184
|
+
arguments["result"],
|
|
2185
|
+
arguments.get("notes"),
|
|
2186
|
+
arguments.get("video_url")
|
|
2187
|
+
)
|
|
2188
|
+
return [TextContent(type="text", text=output)]
|
|
2189
|
+
|
|
2190
|
+
elif name == "ate_primitive_deps_show":
|
|
2191
|
+
output = capture_output(
|
|
2192
|
+
client.primitive_deps_show,
|
|
2193
|
+
arguments["primitive_id"]
|
|
2194
|
+
)
|
|
2195
|
+
return [TextContent(type="text", text=output)]
|
|
2196
|
+
|
|
2197
|
+
elif name == "ate_primitive_deps_add":
|
|
2198
|
+
output = capture_output(
|
|
2199
|
+
client.primitive_deps_add,
|
|
2200
|
+
arguments["primitive_id"],
|
|
2201
|
+
arguments["required_id"],
|
|
2202
|
+
arguments.get("dependency_type", "requires"),
|
|
2203
|
+
arguments.get("min_status", "tested")
|
|
2204
|
+
)
|
|
2205
|
+
return [TextContent(type="text", text=output)]
|
|
2206
|
+
|
|
2207
|
+
# Bridge tools
|
|
2208
|
+
elif name == "ate_bridge_scan_serial":
|
|
2209
|
+
output = capture_output(client.protocol_scan_serial)
|
|
2210
|
+
return [TextContent(type="text", text=output)]
|
|
2211
|
+
|
|
2212
|
+
elif name == "ate_bridge_scan_ble":
|
|
2213
|
+
output = capture_output(client.protocol_scan_ble)
|
|
2214
|
+
return [TextContent(type="text", text=output)]
|
|
2215
|
+
|
|
2216
|
+
elif name == "ate_bridge_send":
|
|
2217
|
+
output = capture_output(
|
|
2218
|
+
client.bridge_send,
|
|
2219
|
+
arguments["port"],
|
|
2220
|
+
arguments["command"],
|
|
2221
|
+
arguments.get("transport", "serial"),
|
|
2222
|
+
arguments.get("baud_rate", 115200),
|
|
2223
|
+
arguments.get("wait", 0.5)
|
|
2224
|
+
)
|
|
2225
|
+
return [TextContent(type="text", text=output if output else "Command sent (no response)")]
|
|
2226
|
+
|
|
2227
|
+
elif name == "ate_bridge_replay":
|
|
2228
|
+
output = capture_output(
|
|
2229
|
+
client.bridge_replay,
|
|
2230
|
+
arguments["recording"],
|
|
2231
|
+
arguments["port"],
|
|
2232
|
+
arguments.get("transport", "serial"),
|
|
2233
|
+
arguments.get("baud_rate", 115200),
|
|
2234
|
+
arguments.get("speed", 1.0)
|
|
2235
|
+
)
|
|
2236
|
+
return [TextContent(type="text", text=output)]
|
|
2237
|
+
|
|
2238
|
+
# Compiler tools
|
|
2239
|
+
elif name == "ate_compile_skill":
|
|
2240
|
+
output = capture_output(
|
|
2241
|
+
client.compile_skill,
|
|
2242
|
+
arguments["skill_path"],
|
|
2243
|
+
arguments.get("output", "./output"),
|
|
2244
|
+
arguments.get("target", "python"),
|
|
2245
|
+
arguments.get("robot"),
|
|
2246
|
+
arguments.get("ate_dir")
|
|
2247
|
+
)
|
|
2248
|
+
return [TextContent(type="text", text=output or "Skill compiled successfully")]
|
|
2249
|
+
|
|
2250
|
+
elif name == "ate_test_compiled_skill":
|
|
2251
|
+
output = capture_output(
|
|
2252
|
+
client.test_compiled_skill,
|
|
2253
|
+
arguments["skill_path"],
|
|
2254
|
+
arguments.get("mode", "dry-run"),
|
|
2255
|
+
arguments.get("robot_port"),
|
|
2256
|
+
arguments.get("params", {})
|
|
2257
|
+
)
|
|
2258
|
+
return [TextContent(type="text", text=output or "Skill test completed")]
|
|
2259
|
+
|
|
2260
|
+
elif name == "ate_publish_compiled_skill":
|
|
2261
|
+
output = capture_output(
|
|
2262
|
+
client.publish_compiled_skill,
|
|
2263
|
+
arguments["skill_path"],
|
|
2264
|
+
arguments.get("visibility", "public")
|
|
2265
|
+
)
|
|
2266
|
+
return [TextContent(type="text", text=output or "Skill published successfully")]
|
|
2267
|
+
|
|
2268
|
+
elif name == "ate_check_skill_compatibility":
|
|
2269
|
+
output = capture_output(
|
|
2270
|
+
client.check_skill_compatibility,
|
|
2271
|
+
arguments["skill_path"],
|
|
2272
|
+
arguments.get("robot_urdf"),
|
|
2273
|
+
arguments.get("robot_ate_dir")
|
|
2274
|
+
)
|
|
2275
|
+
return [TextContent(type="text", text=output or "Compatibility check completed")]
|
|
2276
|
+
|
|
2277
|
+
elif name == "ate_list_primitives":
|
|
2278
|
+
from ate.primitives import PRIMITIVE_REGISTRY, PrimitiveCategory
|
|
2279
|
+
|
|
2280
|
+
category = arguments.get("category", "all")
|
|
2281
|
+
hardware = arguments.get("hardware")
|
|
2282
|
+
|
|
2283
|
+
result_text = "# Available Primitives\n\n"
|
|
2284
|
+
|
|
2285
|
+
for prim_name, prim_def in PRIMITIVE_REGISTRY.items():
|
|
2286
|
+
# Filter by category
|
|
2287
|
+
if category != "all":
|
|
2288
|
+
cat_match = prim_def.get("category", "").lower() == category.lower()
|
|
2289
|
+
if not cat_match:
|
|
2290
|
+
continue
|
|
2291
|
+
|
|
2292
|
+
# Filter by hardware
|
|
2293
|
+
if hardware:
|
|
2294
|
+
req_hardware = prim_def.get("hardware", [])
|
|
2295
|
+
if hardware.lower() not in [h.lower() for h in req_hardware]:
|
|
2296
|
+
continue
|
|
2297
|
+
|
|
2298
|
+
result_text += f"## {prim_name}\n"
|
|
2299
|
+
result_text += f"**Category:** {prim_def.get('category', 'unknown')}\n"
|
|
2300
|
+
result_text += f"**Description:** {prim_def.get('description', 'No description')}\n"
|
|
2301
|
+
|
|
2302
|
+
# Parameters
|
|
2303
|
+
params = prim_def.get("parameters", {})
|
|
2304
|
+
if params:
|
|
2305
|
+
result_text += "**Parameters:**\n"
|
|
2306
|
+
for param_name, param_def in params.items():
|
|
2307
|
+
required = "required" if param_def.get("required", False) else "optional"
|
|
2308
|
+
result_text += f" - `{param_name}` ({param_def.get('type', 'any')}, {required}): {param_def.get('description', '')}\n"
|
|
2309
|
+
|
|
2310
|
+
# Hardware requirements
|
|
2311
|
+
hw_reqs = prim_def.get("hardware", [])
|
|
2312
|
+
if hw_reqs:
|
|
2313
|
+
result_text += f"**Hardware:** {', '.join(hw_reqs)}\n"
|
|
2314
|
+
|
|
2315
|
+
result_text += "\n"
|
|
356
2316
|
|
|
357
2317
|
return [TextContent(type="text", text=result_text)]
|
|
358
2318
|
|
|
359
|
-
elif name == "
|
|
360
|
-
|
|
361
|
-
robot = response.get("profile", {})
|
|
2319
|
+
elif name == "ate_get_primitive":
|
|
2320
|
+
from ate.primitives import get_primitive
|
|
362
2321
|
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
2322
|
+
prim_name = arguments["name"]
|
|
2323
|
+
prim_def = get_primitive(prim_name)
|
|
2324
|
+
|
|
2325
|
+
if not prim_def:
|
|
2326
|
+
return [TextContent(type="text", text=f"Primitive not found: {prim_name}")]
|
|
2327
|
+
|
|
2328
|
+
result_text = f"# {prim_name}\n\n"
|
|
2329
|
+
result_text += f"**Category:** {prim_def.get('category', 'unknown')}\n"
|
|
2330
|
+
result_text += f"**Description:** {prim_def.get('description', 'No description')}\n\n"
|
|
2331
|
+
|
|
2332
|
+
# Parameters
|
|
2333
|
+
params = prim_def.get("parameters", {})
|
|
2334
|
+
if params:
|
|
2335
|
+
result_text += "## Parameters\n\n"
|
|
2336
|
+
for param_name, param_def in params.items():
|
|
2337
|
+
required = "✓ Required" if param_def.get("required", False) else "○ Optional"
|
|
2338
|
+
default = f", default: `{param_def.get('default')}`" if "default" in param_def else ""
|
|
2339
|
+
result_text += f"### `{param_name}`\n"
|
|
2340
|
+
result_text += f"- **Type:** {param_def.get('type', 'any')}\n"
|
|
2341
|
+
result_text += f"- **Status:** {required}{default}\n"
|
|
2342
|
+
result_text += f"- **Description:** {param_def.get('description', '')}\n\n"
|
|
2343
|
+
|
|
2344
|
+
# Hardware requirements
|
|
2345
|
+
hw_reqs = prim_def.get("hardware", [])
|
|
2346
|
+
if hw_reqs:
|
|
2347
|
+
result_text += f"## Hardware Requirements\n\n"
|
|
2348
|
+
for hw in hw_reqs:
|
|
2349
|
+
result_text += f"- {hw}\n"
|
|
2350
|
+
|
|
2351
|
+
# Return type
|
|
2352
|
+
result_text += f"\n## Returns\n\n`{prim_def.get('returns', 'bool')}`\n"
|
|
367
2353
|
|
|
368
2354
|
return [TextContent(type="text", text=result_text)]
|
|
369
2355
|
|
|
2356
|
+
elif name == "ate_validate_skill_spec":
|
|
2357
|
+
from ate.skill_schema import SkillSpecification
|
|
2358
|
+
|
|
2359
|
+
skill_path = arguments["skill_path"]
|
|
2360
|
+
|
|
2361
|
+
try:
|
|
2362
|
+
spec = SkillSpecification.from_yaml(skill_path)
|
|
2363
|
+
errors = spec.validate()
|
|
2364
|
+
|
|
2365
|
+
if errors:
|
|
2366
|
+
result_text = f"# Validation Failed\n\n"
|
|
2367
|
+
result_text += f"Found {len(errors)} error(s) in `{skill_path}`:\n\n"
|
|
2368
|
+
for error in errors:
|
|
2369
|
+
result_text += f"- ❌ {error}\n"
|
|
2370
|
+
return [TextContent(type="text", text=result_text)]
|
|
2371
|
+
|
|
2372
|
+
result_text = f"# Validation Passed ✓\n\n"
|
|
2373
|
+
result_text += f"**Skill:** {spec.name}\n"
|
|
2374
|
+
result_text += f"**Version:** {spec.version}\n"
|
|
2375
|
+
result_text += f"**Description:** {spec.description}\n\n"
|
|
2376
|
+
|
|
2377
|
+
# Summary
|
|
2378
|
+
result_text += "## Summary\n\n"
|
|
2379
|
+
result_text += f"- **Parameters:** {len(spec.parameters)}\n"
|
|
2380
|
+
result_text += f"- **Hardware Requirements:** {len(spec.hardware_requirements)}\n"
|
|
2381
|
+
result_text += f"- **Execution Steps:** {len(spec.execution)}\n"
|
|
2382
|
+
result_text += f"- **Success Criteria:** {len(spec.success_criteria)}\n"
|
|
2383
|
+
|
|
2384
|
+
return [TextContent(type="text", text=result_text)]
|
|
2385
|
+
except Exception as e:
|
|
2386
|
+
return [TextContent(type="text", text=f"# Validation Error\n\nFailed to parse skill specification:\n\n```\n{str(e)}\n```")]
|
|
2387
|
+
|
|
370
2388
|
else:
|
|
371
2389
|
return [
|
|
372
2390
|
TextContent(
|
|
@@ -384,6 +2402,10 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
|
|
|
384
2402
|
]
|
|
385
2403
|
|
|
386
2404
|
|
|
2405
|
+
# ============================================================================
|
|
2406
|
+
# Resources
|
|
2407
|
+
# ============================================================================
|
|
2408
|
+
|
|
387
2409
|
@server.list_resources()
|
|
388
2410
|
async def list_resources() -> List[Resource]:
|
|
389
2411
|
"""List available resources"""
|
|
@@ -400,6 +2422,30 @@ async def list_resources() -> List[Resource]:
|
|
|
400
2422
|
description="Access robot profile details",
|
|
401
2423
|
mimeType="application/json",
|
|
402
2424
|
),
|
|
2425
|
+
Resource(
|
|
2426
|
+
uri="skill://*",
|
|
2427
|
+
name="Skill",
|
|
2428
|
+
description="Access skill/artifact details",
|
|
2429
|
+
mimeType="application/json",
|
|
2430
|
+
),
|
|
2431
|
+
Resource(
|
|
2432
|
+
uri="part://*",
|
|
2433
|
+
name="Hardware Part",
|
|
2434
|
+
description="Access hardware part details",
|
|
2435
|
+
mimeType="application/json",
|
|
2436
|
+
),
|
|
2437
|
+
Resource(
|
|
2438
|
+
uri="workflow://*",
|
|
2439
|
+
name="Workflow",
|
|
2440
|
+
description="Access workflow definition",
|
|
2441
|
+
mimeType="application/yaml",
|
|
2442
|
+
),
|
|
2443
|
+
Resource(
|
|
2444
|
+
uri="team://*",
|
|
2445
|
+
name="Team",
|
|
2446
|
+
description="Access team details",
|
|
2447
|
+
mimeType="application/json",
|
|
2448
|
+
),
|
|
403
2449
|
]
|
|
404
2450
|
|
|
405
2451
|
|
|
@@ -414,17 +2460,33 @@ async def read_resource(uri: str) -> str:
|
|
|
414
2460
|
robot_id = uri.replace("robot://", "")
|
|
415
2461
|
response = client._request("GET", f"/robots/profiles/{robot_id}")
|
|
416
2462
|
return json.dumps(response.get("profile", {}), indent=2)
|
|
2463
|
+
elif uri.startswith("skill://"):
|
|
2464
|
+
skill_id = uri.replace("skill://", "")
|
|
2465
|
+
response = client._request("GET", f"/skills/{skill_id}")
|
|
2466
|
+
return json.dumps(response.get("skill", {}), indent=2)
|
|
2467
|
+
elif uri.startswith("part://"):
|
|
2468
|
+
part_id = uri.replace("part://", "")
|
|
2469
|
+
response = client._request("GET", f"/parts/{part_id}")
|
|
2470
|
+
return json.dumps(response.get("part", {}), indent=2)
|
|
2471
|
+
elif uri.startswith("team://"):
|
|
2472
|
+
team_slug = uri.replace("team://", "")
|
|
2473
|
+
response = client._request("GET", f"/teams/{team_slug}")
|
|
2474
|
+
return json.dumps(response.get("team", {}), indent=2)
|
|
417
2475
|
else:
|
|
418
2476
|
raise ValueError(f"Unknown resource URI: {uri}")
|
|
419
2477
|
|
|
420
2478
|
|
|
2479
|
+
# ============================================================================
|
|
2480
|
+
# Prompts
|
|
2481
|
+
# ============================================================================
|
|
2482
|
+
|
|
421
2483
|
@server.list_prompts()
|
|
422
2484
|
async def list_prompts() -> List[Prompt]:
|
|
423
2485
|
"""List available prompts"""
|
|
424
2486
|
return [
|
|
425
2487
|
Prompt(
|
|
426
2488
|
name="create_skill",
|
|
427
|
-
description="Guided workflow for creating a new robot skill
|
|
2489
|
+
description="Guided workflow for creating a new robot skill from scratch",
|
|
428
2490
|
arguments=[
|
|
429
2491
|
PromptArgument(
|
|
430
2492
|
name="robot_model",
|
|
@@ -433,14 +2495,14 @@ async def list_prompts() -> List[Prompt]:
|
|
|
433
2495
|
),
|
|
434
2496
|
PromptArgument(
|
|
435
2497
|
name="task_description",
|
|
436
|
-
description="
|
|
2498
|
+
description="Natural language description of the skill/task",
|
|
437
2499
|
required=True,
|
|
438
2500
|
),
|
|
439
2501
|
],
|
|
440
2502
|
),
|
|
441
2503
|
Prompt(
|
|
442
2504
|
name="adapt_skill",
|
|
443
|
-
description="Guided workflow for adapting a skill between robots",
|
|
2505
|
+
description="Guided workflow for adapting a skill between different robots",
|
|
444
2506
|
arguments=[
|
|
445
2507
|
PromptArgument(
|
|
446
2508
|
name="source_robot",
|
|
@@ -459,6 +2521,59 @@ async def list_prompts() -> List[Prompt]:
|
|
|
459
2521
|
),
|
|
460
2522
|
],
|
|
461
2523
|
),
|
|
2524
|
+
Prompt(
|
|
2525
|
+
name="setup_workflow",
|
|
2526
|
+
description="Create a multi-skill workflow/pipeline",
|
|
2527
|
+
arguments=[
|
|
2528
|
+
PromptArgument(
|
|
2529
|
+
name="task_description",
|
|
2530
|
+
description="Description of the overall task",
|
|
2531
|
+
required=True,
|
|
2532
|
+
),
|
|
2533
|
+
PromptArgument(
|
|
2534
|
+
name="robot",
|
|
2535
|
+
description="Target robot model",
|
|
2536
|
+
required=True,
|
|
2537
|
+
),
|
|
2538
|
+
],
|
|
2539
|
+
),
|
|
2540
|
+
Prompt(
|
|
2541
|
+
name="deploy_skill",
|
|
2542
|
+
description="Deploy a skill to production robots",
|
|
2543
|
+
arguments=[
|
|
2544
|
+
PromptArgument(
|
|
2545
|
+
name="skill_id",
|
|
2546
|
+
description="Skill ID to deploy",
|
|
2547
|
+
required=True,
|
|
2548
|
+
),
|
|
2549
|
+
PromptArgument(
|
|
2550
|
+
name="target",
|
|
2551
|
+
description="Target fleet or robot",
|
|
2552
|
+
required=True,
|
|
2553
|
+
),
|
|
2554
|
+
],
|
|
2555
|
+
),
|
|
2556
|
+
Prompt(
|
|
2557
|
+
name="debug_compatibility",
|
|
2558
|
+
description="Debug why a skill isn't transferring well between robots",
|
|
2559
|
+
arguments=[
|
|
2560
|
+
PromptArgument(
|
|
2561
|
+
name="source_robot",
|
|
2562
|
+
description="Source robot model",
|
|
2563
|
+
required=True,
|
|
2564
|
+
),
|
|
2565
|
+
PromptArgument(
|
|
2566
|
+
name="target_robot",
|
|
2567
|
+
description="Target robot model",
|
|
2568
|
+
required=True,
|
|
2569
|
+
),
|
|
2570
|
+
PromptArgument(
|
|
2571
|
+
name="skill_id",
|
|
2572
|
+
description="Skill ID having issues",
|
|
2573
|
+
required=True,
|
|
2574
|
+
),
|
|
2575
|
+
],
|
|
2576
|
+
),
|
|
462
2577
|
]
|
|
463
2578
|
|
|
464
2579
|
|
|
@@ -469,32 +2584,195 @@ async def get_prompt(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
|
|
|
469
2584
|
return [
|
|
470
2585
|
TextContent(
|
|
471
2586
|
type="text",
|
|
472
|
-
text=f"""Create a
|
|
2587
|
+
text=f"""# Create a Robot Skill for {arguments.get('robot_model', 'your robot')}
|
|
2588
|
+
|
|
2589
|
+
## Task: {arguments.get('task_description', 'Not specified')}
|
|
2590
|
+
|
|
2591
|
+
### Steps:
|
|
473
2592
|
|
|
474
|
-
1.
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
4. Push to FoodforThought: Use ate_push
|
|
2593
|
+
1. **Generate Scaffolding**
|
|
2594
|
+
Use `ate_generate` to create skill files from your task description:
|
|
2595
|
+
- This creates skill.yaml, main.py, test_skill.py, and README.md
|
|
478
2596
|
|
|
479
|
-
|
|
2597
|
+
2. **Review Generated Files**
|
|
2598
|
+
- Check skill.yaml for correct parameters
|
|
2599
|
+
- Implement the TODO sections in main.py
|
|
2600
|
+
|
|
2601
|
+
3. **Add Part Dependencies**
|
|
2602
|
+
Use `ate_parts_list` to find required hardware, then `ate_parts_require` to add dependencies
|
|
2603
|
+
|
|
2604
|
+
4. **Test in Simulation**
|
|
2605
|
+
Use `ate_test` with environment="pybullet" to validate the skill
|
|
2606
|
+
|
|
2607
|
+
5. **Run Safety Validation**
|
|
2608
|
+
Use `ate_validate` with checks=["collision", "speed", "workspace", "force"]
|
|
2609
|
+
|
|
2610
|
+
6. **Upload Demonstrations**
|
|
2611
|
+
Use `ate_upload` to submit demo videos for community labeling
|
|
2612
|
+
|
|
2613
|
+
7. **Check Transfer Compatibility**
|
|
2614
|
+
Use `ate_check_transfer` to see which other robots can use this skill
|
|
480
2615
|
""",
|
|
481
2616
|
)
|
|
482
2617
|
]
|
|
2618
|
+
|
|
483
2619
|
elif name == "adapt_skill":
|
|
484
2620
|
return [
|
|
485
2621
|
TextContent(
|
|
486
2622
|
type="text",
|
|
487
|
-
text=f"""Adapt
|
|
2623
|
+
text=f"""# Adapt Skill from {arguments.get('source_robot')} to {arguments.get('target_robot')}
|
|
2624
|
+
|
|
2625
|
+
## Repository: {arguments.get('repository_id')}
|
|
2626
|
+
|
|
2627
|
+
### Steps:
|
|
2628
|
+
|
|
2629
|
+
1. **Check Compatibility**
|
|
2630
|
+
Use `ate_check_transfer` to get the compatibility score and adaptation type
|
|
2631
|
+
|
|
2632
|
+
2. **Generate Adaptation Plan**
|
|
2633
|
+
Use `ate_adapt` to see what changes are needed:
|
|
2634
|
+
- Kinematic adaptations
|
|
2635
|
+
- Sensor mappings
|
|
2636
|
+
- Code modifications
|
|
2637
|
+
|
|
2638
|
+
3. **Review Requirements**
|
|
2639
|
+
- Check if new parts are needed with `ate_parts_check`
|
|
2640
|
+
- Verify hardware compatibility
|
|
2641
|
+
|
|
2642
|
+
4. **Apply Adaptations**
|
|
2643
|
+
Based on the adaptation type:
|
|
2644
|
+
- **Direct**: No changes needed
|
|
2645
|
+
- **Parametric**: Adjust configuration values
|
|
2646
|
+
- **Retrain**: Collect new demonstrations
|
|
2647
|
+
- **Manual**: Significant code changes required
|
|
2648
|
+
|
|
2649
|
+
5. **Test Adapted Skill**
|
|
2650
|
+
Use `ate_test` with robot="{arguments.get('target_robot')}"
|
|
2651
|
+
|
|
2652
|
+
6. **Validate Safety**
|
|
2653
|
+
Use `ate_validate` with strict=true for production deployment
|
|
2654
|
+
""",
|
|
2655
|
+
)
|
|
2656
|
+
]
|
|
2657
|
+
|
|
2658
|
+
elif name == "setup_workflow":
|
|
2659
|
+
return [
|
|
2660
|
+
TextContent(
|
|
2661
|
+
type="text",
|
|
2662
|
+
text=f"""# Create Multi-Skill Workflow
|
|
2663
|
+
|
|
2664
|
+
## Task: {arguments.get('task_description', 'Not specified')}
|
|
2665
|
+
## Robot: {arguments.get('robot', 'Not specified')}
|
|
2666
|
+
|
|
2667
|
+
### Steps:
|
|
2668
|
+
|
|
2669
|
+
1. **Define Workflow Steps**
|
|
2670
|
+
Create a workflow.yaml file with your skill pipeline:
|
|
2671
|
+
```yaml
|
|
2672
|
+
name: My Workflow
|
|
2673
|
+
version: 1.0.0
|
|
2674
|
+
robot:
|
|
2675
|
+
model: {arguments.get('robot', 'ur5')}
|
|
2676
|
+
steps:
|
|
2677
|
+
- id: step1
|
|
2678
|
+
skill: perception/detect-object
|
|
2679
|
+
- id: step2
|
|
2680
|
+
skill: manipulation/pick
|
|
2681
|
+
depends_on: [step1]
|
|
2682
|
+
```
|
|
2683
|
+
|
|
2684
|
+
2. **Validate Workflow**
|
|
2685
|
+
Use `ate_workflow_validate` to check for errors
|
|
2686
|
+
|
|
2687
|
+
3. **Test in Simulation**
|
|
2688
|
+
Use `ate_workflow_run` with sim=true
|
|
2689
|
+
|
|
2690
|
+
4. **Dry Run**
|
|
2691
|
+
Use `ate_workflow_run` with dry_run=true to see execution plan
|
|
2692
|
+
|
|
2693
|
+
5. **Export for Production**
|
|
2694
|
+
Use `ate_workflow_export` with format="ros2" for ROS2 launch file
|
|
2695
|
+
""",
|
|
2696
|
+
)
|
|
2697
|
+
]
|
|
2698
|
+
|
|
2699
|
+
elif name == "deploy_skill":
|
|
2700
|
+
return [
|
|
2701
|
+
TextContent(
|
|
2702
|
+
type="text",
|
|
2703
|
+
text=f"""# Deploy Skill to Production
|
|
2704
|
+
|
|
2705
|
+
## Skill: {arguments.get('skill_id')}
|
|
2706
|
+
## Target: {arguments.get('target')}
|
|
2707
|
+
|
|
2708
|
+
### Pre-Deployment Checklist:
|
|
2709
|
+
|
|
2710
|
+
1. **Audit Dependencies**
|
|
2711
|
+
Use `ate_deps_audit` to verify all parts are available
|
|
2712
|
+
|
|
2713
|
+
2. **Run Validation**
|
|
2714
|
+
Use `ate_validate` with strict=true
|
|
2715
|
+
|
|
2716
|
+
3. **Benchmark Performance**
|
|
2717
|
+
Use `ate_benchmark` to ensure acceptable performance
|
|
2718
|
+
|
|
2719
|
+
4. **Check Deployment Config**
|
|
2720
|
+
Create deploy.yaml for hybrid edge/cloud deployment if needed
|
|
2721
|
+
|
|
2722
|
+
### Deployment Steps:
|
|
2723
|
+
|
|
2724
|
+
1. **Dry Run**
|
|
2725
|
+
Use `ate_deploy_config` with dry_run=true to preview
|
|
2726
|
+
|
|
2727
|
+
2. **Deploy**
|
|
2728
|
+
Use `ate_deploy` or `ate_deploy_config` to push to target
|
|
2729
|
+
|
|
2730
|
+
3. **Monitor Status**
|
|
2731
|
+
Use `ate_deploy_status` to check deployment health
|
|
2732
|
+
""",
|
|
2733
|
+
)
|
|
2734
|
+
]
|
|
2735
|
+
|
|
2736
|
+
elif name == "debug_compatibility":
|
|
2737
|
+
return [
|
|
2738
|
+
TextContent(
|
|
2739
|
+
type="text",
|
|
2740
|
+
text=f"""# Debug Skill Transfer Compatibility
|
|
488
2741
|
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
4. Test the adapted skill
|
|
2742
|
+
## Source: {arguments.get('source_robot')}
|
|
2743
|
+
## Target: {arguments.get('target_robot')}
|
|
2744
|
+
## Skill: {arguments.get('skill_id')}
|
|
493
2745
|
|
|
494
|
-
|
|
2746
|
+
### Diagnostic Steps:
|
|
2747
|
+
|
|
2748
|
+
1. **Get Compatibility Score**
|
|
2749
|
+
Use `ate_check_transfer` to see overall compatibility
|
|
2750
|
+
|
|
2751
|
+
2. **Check Score Breakdown**
|
|
2752
|
+
Look at individual scores:
|
|
2753
|
+
- Kinematic score: Joint configurations, workspace overlap
|
|
2754
|
+
- Sensor score: Camera, force/torque sensor compatibility
|
|
2755
|
+
- Compute score: Processing power requirements
|
|
2756
|
+
|
|
2757
|
+
3. **Review Part Requirements**
|
|
2758
|
+
Use `ate_parts_check` to see required hardware for the skill
|
|
2759
|
+
|
|
2760
|
+
4. **Compare Robot Profiles**
|
|
2761
|
+
Use `ate_get_robot` for both source and target to compare specs
|
|
2762
|
+
|
|
2763
|
+
5. **Generate Adaptation Plan**
|
|
2764
|
+
Use `ate_adapt` to get specific recommendations
|
|
2765
|
+
|
|
2766
|
+
### Common Issues:
|
|
2767
|
+
|
|
2768
|
+
- **Low Kinematic Score**: Check joint limits, reach, payload
|
|
2769
|
+
- **Low Sensor Score**: Missing cameras or sensors
|
|
2770
|
+
- **Low Compute Score**: Target robot has less processing power
|
|
2771
|
+
- **Impossible Transfer**: Fundamentally incompatible hardware
|
|
495
2772
|
""",
|
|
496
2773
|
)
|
|
497
2774
|
]
|
|
2775
|
+
|
|
498
2776
|
else:
|
|
499
2777
|
return [TextContent(type="text", text=f"Unknown prompt: {name}")]
|
|
500
2778
|
|
|
@@ -502,7 +2780,6 @@ Repository ID: {arguments.get('repository_id')}
|
|
|
502
2780
|
async def main():
|
|
503
2781
|
"""Main entry point for MCP server"""
|
|
504
2782
|
# Run the server using stdio transport
|
|
505
|
-
# stdio_server() returns (stdin, stdout) streams
|
|
506
2783
|
stdin, stdout = stdio_server()
|
|
507
2784
|
await server.run(
|
|
508
2785
|
stdin, stdout, server.create_initialization_options()
|
|
@@ -511,4 +2788,3 @@ async def main():
|
|
|
511
2788
|
|
|
512
2789
|
if __name__ == "__main__":
|
|
513
2790
|
asyncio.run(main())
|
|
514
|
-
|