foodforthought-cli 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +1 -1
- ate/cli.py +936 -0
- ate/generator.py +713 -0
- {foodforthought_cli-0.1.1.dist-info → foodforthought_cli-0.2.0.dist-info}/METADATA +1 -1
- foodforthought_cli-0.2.0.dist-info/RECORD +9 -0
- foodforthought_cli-0.1.1.dist-info/RECORD +0 -8
- {foodforthought_cli-0.1.1.dist-info → foodforthought_cli-0.2.0.dist-info}/WHEEL +0 -0
- {foodforthought_cli-0.1.1.dist-info → foodforthought_cli-0.2.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.1.1.dist-info → foodforthought_cli-0.2.0.dist-info}/top_level.txt +0 -0
ate/cli.py
CHANGED
|
@@ -12,6 +12,7 @@ import random
|
|
|
12
12
|
import requests
|
|
13
13
|
from pathlib import Path
|
|
14
14
|
from typing import Optional, Dict, List
|
|
15
|
+
from ate.generator import generate_skill_project, TEMPLATES
|
|
15
16
|
|
|
16
17
|
BASE_URL = os.getenv("ATE_API_URL", "https://kindly.fyi/api")
|
|
17
18
|
API_KEY = os.getenv("ATE_API_KEY", "")
|
|
@@ -662,6 +663,729 @@ class ATEClient:
|
|
|
662
663
|
print(f"\n✗ Failed to get status: {e}", file=sys.stderr)
|
|
663
664
|
sys.exit(1)
|
|
664
665
|
|
|
666
|
+
def parts_list(self, category: Optional[str], manufacturer: Optional[str],
|
|
667
|
+
search: Optional[str]) -> None:
|
|
668
|
+
"""List available parts"""
|
|
669
|
+
print("Fetching parts catalog...")
|
|
670
|
+
|
|
671
|
+
params = {}
|
|
672
|
+
if category:
|
|
673
|
+
params["category"] = category
|
|
674
|
+
print(f" Category: {category}")
|
|
675
|
+
if manufacturer:
|
|
676
|
+
params["manufacturer"] = manufacturer
|
|
677
|
+
print(f" Manufacturer: {manufacturer}")
|
|
678
|
+
if search:
|
|
679
|
+
params["search"] = search
|
|
680
|
+
print(f" Search: {search}")
|
|
681
|
+
|
|
682
|
+
try:
|
|
683
|
+
response = self._request("GET", "/parts", params=params)
|
|
684
|
+
parts = response.get("parts", [])
|
|
685
|
+
pagination = response.get("pagination", {})
|
|
686
|
+
|
|
687
|
+
if not parts:
|
|
688
|
+
print("\nNo parts found matching criteria.")
|
|
689
|
+
return
|
|
690
|
+
|
|
691
|
+
print(f"\n{'=' * 70}")
|
|
692
|
+
print(f"{'Part Name':<30} {'Category':<15} {'Manufacturer':<20}")
|
|
693
|
+
print(f"{'=' * 70}")
|
|
694
|
+
|
|
695
|
+
for part in parts:
|
|
696
|
+
name = part.get("name", "")[:28]
|
|
697
|
+
cat = part.get("category", "")[:13]
|
|
698
|
+
mfr = part.get("manufacturer", "")[:18]
|
|
699
|
+
print(f"{name:<30} {cat:<15} {mfr:<20}")
|
|
700
|
+
|
|
701
|
+
total = pagination.get("total", len(parts))
|
|
702
|
+
print(f"{'=' * 70}")
|
|
703
|
+
print(f"Showing {len(parts)} of {total} parts")
|
|
704
|
+
|
|
705
|
+
except Exception as e:
|
|
706
|
+
print(f"\n✗ Failed to list parts: {e}", file=sys.stderr)
|
|
707
|
+
sys.exit(1)
|
|
708
|
+
|
|
709
|
+
def parts_check(self, skill_id: str) -> None:
|
|
710
|
+
"""Check part compatibility for a skill"""
|
|
711
|
+
print(f"Checking parts for skill: {skill_id}")
|
|
712
|
+
|
|
713
|
+
try:
|
|
714
|
+
response = self._request("GET", f"/skills/{skill_id}/parts")
|
|
715
|
+
|
|
716
|
+
skill = response.get("skill", {})
|
|
717
|
+
parts = response.get("parts", [])
|
|
718
|
+
summary = response.get("summary", {})
|
|
719
|
+
by_category = response.get("byCategory", {})
|
|
720
|
+
|
|
721
|
+
print(f"\nSkill: {skill.get('name', skill_id)}")
|
|
722
|
+
print(f"Type: {skill.get('type', 'unknown')}")
|
|
723
|
+
|
|
724
|
+
if not parts:
|
|
725
|
+
print("\n✓ No part dependencies declared for this skill.")
|
|
726
|
+
return
|
|
727
|
+
|
|
728
|
+
print(f"\n{'=' * 70}")
|
|
729
|
+
print(f"Part Dependencies ({summary.get('total', 0)} total)")
|
|
730
|
+
print(f"{'=' * 70}")
|
|
731
|
+
|
|
732
|
+
for category, cat_parts in by_category.items():
|
|
733
|
+
print(f"\n{category.upper()}:")
|
|
734
|
+
for p in cat_parts:
|
|
735
|
+
part = p.get("part", {})
|
|
736
|
+
required = "REQUIRED" if p.get("required") else "optional"
|
|
737
|
+
version = p.get("minVersion", "any")
|
|
738
|
+
if p.get("maxVersion"):
|
|
739
|
+
version += f" - {p['maxVersion']}"
|
|
740
|
+
|
|
741
|
+
icon = "●" if p.get("required") else "○"
|
|
742
|
+
print(f" {icon} {part.get('name'):<30} [{required}] v{version}")
|
|
743
|
+
|
|
744
|
+
print(f"\n{'=' * 70}")
|
|
745
|
+
print(f"Summary: {summary.get('required', 0)} required, {summary.get('optional', 0)} optional")
|
|
746
|
+
|
|
747
|
+
except Exception as e:
|
|
748
|
+
print(f"\n✗ Failed to check parts: {e}", file=sys.stderr)
|
|
749
|
+
sys.exit(1)
|
|
750
|
+
|
|
751
|
+
def parts_require(self, part_id: str, skill_id: str, version: str,
|
|
752
|
+
required: bool) -> None:
|
|
753
|
+
"""Add part dependency to skill"""
|
|
754
|
+
print(f"Adding part dependency...")
|
|
755
|
+
print(f" Part ID: {part_id}")
|
|
756
|
+
print(f" Skill ID: {skill_id}")
|
|
757
|
+
print(f" Min Version: {version}")
|
|
758
|
+
print(f" Required: {required}")
|
|
759
|
+
|
|
760
|
+
try:
|
|
761
|
+
response = self._request("POST", f"/parts/{part_id}/compatibility", json={
|
|
762
|
+
"skillId": skill_id,
|
|
763
|
+
"minVersion": version,
|
|
764
|
+
"required": required,
|
|
765
|
+
})
|
|
766
|
+
|
|
767
|
+
compat = response.get("compatibility", {})
|
|
768
|
+
print(f"\n✓ Part dependency added!")
|
|
769
|
+
print(f" Compatibility ID: {compat.get('id')}")
|
|
770
|
+
|
|
771
|
+
except Exception as e:
|
|
772
|
+
print(f"\n✗ Failed to add part dependency: {e}", file=sys.stderr)
|
|
773
|
+
sys.exit(1)
|
|
774
|
+
|
|
775
|
+
def workflow_validate(self, path: str) -> None:
|
|
776
|
+
"""Validate a workflow YAML file"""
|
|
777
|
+
import yaml
|
|
778
|
+
|
|
779
|
+
workflow_path = Path(path)
|
|
780
|
+
if not workflow_path.exists():
|
|
781
|
+
print(f"Error: File not found: {path}", file=sys.stderr)
|
|
782
|
+
sys.exit(1)
|
|
783
|
+
|
|
784
|
+
print(f"Validating workflow: {path}")
|
|
785
|
+
|
|
786
|
+
try:
|
|
787
|
+
with open(workflow_path) as f:
|
|
788
|
+
workflow_data = yaml.safe_load(f)
|
|
789
|
+
|
|
790
|
+
# Basic validation
|
|
791
|
+
errors = []
|
|
792
|
+
warnings = []
|
|
793
|
+
|
|
794
|
+
# Required fields
|
|
795
|
+
if not workflow_data.get("name"):
|
|
796
|
+
errors.append("Missing required field: name")
|
|
797
|
+
if not workflow_data.get("steps"):
|
|
798
|
+
errors.append("Missing required field: steps")
|
|
799
|
+
elif not isinstance(workflow_data["steps"], list):
|
|
800
|
+
errors.append("Steps must be an array")
|
|
801
|
+
elif len(workflow_data["steps"]) == 0:
|
|
802
|
+
errors.append("Workflow must have at least one step")
|
|
803
|
+
|
|
804
|
+
# Validate steps
|
|
805
|
+
step_ids = set()
|
|
806
|
+
for i, step in enumerate(workflow_data.get("steps", [])):
|
|
807
|
+
step_id = step.get("id", f"step_{i}")
|
|
808
|
+
|
|
809
|
+
if not step.get("id"):
|
|
810
|
+
errors.append(f"Step {i+1}: Missing required field 'id'")
|
|
811
|
+
elif step["id"] in step_ids:
|
|
812
|
+
errors.append(f"Duplicate step ID: {step['id']}")
|
|
813
|
+
step_ids.add(step_id)
|
|
814
|
+
|
|
815
|
+
if not step.get("skill"):
|
|
816
|
+
errors.append(f"Step '{step_id}': Missing required field 'skill'")
|
|
817
|
+
|
|
818
|
+
# Check dependencies
|
|
819
|
+
for dep in step.get("depends_on", []):
|
|
820
|
+
if dep not in step_ids and dep != step_id:
|
|
821
|
+
# Might be defined later, just warn
|
|
822
|
+
pass
|
|
823
|
+
|
|
824
|
+
# Check dependency cycles (simple check)
|
|
825
|
+
for step in workflow_data.get("steps", []):
|
|
826
|
+
for dep in step.get("depends_on", []):
|
|
827
|
+
if dep not in step_ids:
|
|
828
|
+
errors.append(f"Step '{step.get('id')}' depends on unknown step '{dep}'")
|
|
829
|
+
|
|
830
|
+
# Print results
|
|
831
|
+
print(f"\n{'=' * 50}")
|
|
832
|
+
print(f"Validation Results")
|
|
833
|
+
print(f"{'=' * 50}")
|
|
834
|
+
|
|
835
|
+
print(f"\nWorkflow: {workflow_data.get('name', 'Unnamed')}")
|
|
836
|
+
print(f"Version: {workflow_data.get('version', '1.0.0')}")
|
|
837
|
+
print(f"Steps: {len(workflow_data.get('steps', []))}")
|
|
838
|
+
|
|
839
|
+
if errors:
|
|
840
|
+
print(f"\n✗ Validation FAILED")
|
|
841
|
+
print(f"\nErrors ({len(errors)}):")
|
|
842
|
+
for error in errors:
|
|
843
|
+
print(f" ✗ {error}")
|
|
844
|
+
sys.exit(1)
|
|
845
|
+
else:
|
|
846
|
+
print(f"\n✓ Workflow is valid!")
|
|
847
|
+
if warnings:
|
|
848
|
+
print(f"\nWarnings ({len(warnings)}):")
|
|
849
|
+
for warning in warnings:
|
|
850
|
+
print(f" ⚠ {warning}")
|
|
851
|
+
|
|
852
|
+
except yaml.YAMLError as e:
|
|
853
|
+
print(f"\n✗ Invalid YAML syntax: {e}", file=sys.stderr)
|
|
854
|
+
sys.exit(1)
|
|
855
|
+
except Exception as e:
|
|
856
|
+
print(f"\n✗ Validation failed: {e}", file=sys.stderr)
|
|
857
|
+
sys.exit(1)
|
|
858
|
+
|
|
859
|
+
def workflow_run(self, path: str, sim: bool, dry_run: bool) -> None:
|
|
860
|
+
"""Run a workflow"""
|
|
861
|
+
import yaml
|
|
862
|
+
|
|
863
|
+
workflow_path = Path(path)
|
|
864
|
+
if not workflow_path.exists():
|
|
865
|
+
print(f"Error: File not found: {path}", file=sys.stderr)
|
|
866
|
+
sys.exit(1)
|
|
867
|
+
|
|
868
|
+
with open(workflow_path) as f:
|
|
869
|
+
workflow_data = yaml.safe_load(f)
|
|
870
|
+
|
|
871
|
+
print(f"Running workflow: {workflow_data.get('name', 'Unnamed')}")
|
|
872
|
+
print(f" Mode: {'Simulation' if sim else 'Real Robot'}")
|
|
873
|
+
print(f" Dry Run: {dry_run}")
|
|
874
|
+
|
|
875
|
+
if dry_run:
|
|
876
|
+
print("\n[DRY RUN] Execution plan:")
|
|
877
|
+
for i, step in enumerate(workflow_data.get("steps", [])):
|
|
878
|
+
deps = step.get("depends_on", [])
|
|
879
|
+
deps_str = f" (after: {', '.join(deps)})" if deps else ""
|
|
880
|
+
print(f" {i+1}. {step.get('id')}: {step.get('skill')}{deps_str}")
|
|
881
|
+
print("\n✓ Dry run complete. No actions taken.")
|
|
882
|
+
return
|
|
883
|
+
|
|
884
|
+
# Simulate execution
|
|
885
|
+
print("\n" + "=" * 50)
|
|
886
|
+
print("Executing workflow...")
|
|
887
|
+
print("=" * 50)
|
|
888
|
+
|
|
889
|
+
for i, step in enumerate(workflow_data.get("steps", [])):
|
|
890
|
+
step_id = step.get("id", f"step_{i}")
|
|
891
|
+
skill = step.get("skill", "unknown")
|
|
892
|
+
|
|
893
|
+
print(f"\n[{i+1}/{len(workflow_data.get('steps', []))}] {step_id}")
|
|
894
|
+
print(f" Skill: {skill}")
|
|
895
|
+
|
|
896
|
+
if sim:
|
|
897
|
+
print(f" Mode: Simulation")
|
|
898
|
+
time.sleep(random.uniform(0.5, 1.5))
|
|
899
|
+
|
|
900
|
+
# Simulate result
|
|
901
|
+
success = random.random() > 0.1
|
|
902
|
+
if success:
|
|
903
|
+
print(f" Status: ✓ Completed")
|
|
904
|
+
else:
|
|
905
|
+
print(f" Status: ✗ Failed")
|
|
906
|
+
if step.get("on_failure") == "fail":
|
|
907
|
+
print("\nWorkflow FAILED")
|
|
908
|
+
sys.exit(1)
|
|
909
|
+
else:
|
|
910
|
+
print(f" Status: Would execute on real robot")
|
|
911
|
+
|
|
912
|
+
print("\n" + "=" * 50)
|
|
913
|
+
print("✓ Workflow completed successfully!")
|
|
914
|
+
|
|
915
|
+
def workflow_export(self, path: str, format: str, output: Optional[str]) -> None:
|
|
916
|
+
"""Export workflow to different formats"""
|
|
917
|
+
import yaml
|
|
918
|
+
|
|
919
|
+
workflow_path = Path(path)
|
|
920
|
+
if not workflow_path.exists():
|
|
921
|
+
print(f"Error: File not found: {path}", file=sys.stderr)
|
|
922
|
+
sys.exit(1)
|
|
923
|
+
|
|
924
|
+
with open(workflow_path) as f:
|
|
925
|
+
workflow_data = yaml.safe_load(f)
|
|
926
|
+
|
|
927
|
+
print(f"Exporting workflow: {workflow_data.get('name', 'Unnamed')}")
|
|
928
|
+
print(f" Format: {format}")
|
|
929
|
+
|
|
930
|
+
if format == "ros2":
|
|
931
|
+
# Generate ROS2 launch file
|
|
932
|
+
launch_content = self._generate_ros2_launch(workflow_data)
|
|
933
|
+
output_file = output or f"{workflow_data.get('name', 'workflow').replace(' ', '_').lower()}_launch.py"
|
|
934
|
+
|
|
935
|
+
with open(output_file, 'w') as f:
|
|
936
|
+
f.write(launch_content)
|
|
937
|
+
|
|
938
|
+
print(f"\n✓ Exported to: {output_file}")
|
|
939
|
+
|
|
940
|
+
elif format == "json":
|
|
941
|
+
output_file = output or f"{workflow_data.get('name', 'workflow').replace(' ', '_').lower()}.json"
|
|
942
|
+
with open(output_file, 'w') as f:
|
|
943
|
+
json.dump(workflow_data, f, indent=2)
|
|
944
|
+
print(f"\n✓ Exported to: {output_file}")
|
|
945
|
+
|
|
946
|
+
else:
|
|
947
|
+
print(f"Unsupported format: {format}", file=sys.stderr)
|
|
948
|
+
sys.exit(1)
|
|
949
|
+
|
|
950
|
+
def _generate_ros2_launch(self, workflow: Dict) -> str:
|
|
951
|
+
"""Generate ROS2 launch file from workflow"""
|
|
952
|
+
steps_code = ""
|
|
953
|
+
for step in workflow.get("steps", []):
|
|
954
|
+
step_id = step.get("id", "step")
|
|
955
|
+
skill = step.get("skill", "unknown")
|
|
956
|
+
inputs = step.get("inputs", {})
|
|
957
|
+
|
|
958
|
+
inputs_str = ", ".join([f"'{k}': '{v}'" for k, v in inputs.items()])
|
|
959
|
+
|
|
960
|
+
steps_code += f'''
|
|
961
|
+
# Step: {step_id}
|
|
962
|
+
{step_id}_node = Node(
|
|
963
|
+
package='skill_executor',
|
|
964
|
+
executable='run_skill',
|
|
965
|
+
name='{step_id}',
|
|
966
|
+
parameters=[{{
|
|
967
|
+
'skill_id': '{skill}',
|
|
968
|
+
'inputs': {{{inputs_str}}},
|
|
969
|
+
}}],
|
|
970
|
+
)
|
|
971
|
+
ld.add_action({step_id}_node)
|
|
972
|
+
'''
|
|
973
|
+
|
|
974
|
+
return f'''#!/usr/bin/env python3
|
|
975
|
+
"""
|
|
976
|
+
ROS2 Launch File - {workflow.get('name', 'Workflow')}
|
|
977
|
+
Generated by FoodforThought CLI
|
|
978
|
+
|
|
979
|
+
Version: {workflow.get('version', '1.0.0')}
|
|
980
|
+
"""
|
|
981
|
+
|
|
982
|
+
from launch import LaunchDescription
|
|
983
|
+
from launch_ros.actions import Node
|
|
984
|
+
|
|
985
|
+
|
|
986
|
+
def generate_launch_description():
|
|
987
|
+
ld = LaunchDescription()
|
|
988
|
+
{steps_code}
|
|
989
|
+
return ld
|
|
990
|
+
'''
|
|
991
|
+
|
|
992
|
+
def generate(self, description: str, robot: str, output: str) -> None:
|
|
993
|
+
"""Generate skill scaffolding from text description"""
|
|
994
|
+
print(f"Generating skill from description...")
|
|
995
|
+
print(f" Description: \"{description}\"")
|
|
996
|
+
print(f" Robot: {robot}")
|
|
997
|
+
print(f" Output: {output}")
|
|
998
|
+
|
|
999
|
+
try:
|
|
1000
|
+
result = generate_skill_project(
|
|
1001
|
+
task_description=description,
|
|
1002
|
+
robot_model=robot,
|
|
1003
|
+
output_dir=output,
|
|
1004
|
+
)
|
|
1005
|
+
|
|
1006
|
+
print(f"\n✓ Skill project generated!")
|
|
1007
|
+
print(f"\nTemplate: {result['template']}")
|
|
1008
|
+
print(f"Location: {result['output_dir']}")
|
|
1009
|
+
print(f"\nFiles created:")
|
|
1010
|
+
for f in result['files_created']:
|
|
1011
|
+
print(f" - {f}")
|
|
1012
|
+
|
|
1013
|
+
if result['extracted_params']:
|
|
1014
|
+
print(f"\nExtracted parameters:")
|
|
1015
|
+
for k, v in result['extracted_params'].items():
|
|
1016
|
+
print(f" - {k}: {v}")
|
|
1017
|
+
|
|
1018
|
+
print(f"\nNext steps:")
|
|
1019
|
+
print(f" 1. cd {result['output_dir']}")
|
|
1020
|
+
print(f" 2. Edit skill.yaml with your configuration")
|
|
1021
|
+
print(f" 3. Implement main.py with your skill logic")
|
|
1022
|
+
print(f" 4. Run tests: pytest test_skill.py -v")
|
|
1023
|
+
print(f" 5. Test in simulation: ate test -e pybullet -r {robot}")
|
|
1024
|
+
|
|
1025
|
+
except Exception as e:
|
|
1026
|
+
print(f"\n✗ Failed to generate skill: {e}", file=sys.stderr)
|
|
1027
|
+
sys.exit(1)
|
|
1028
|
+
|
|
1029
|
+
def team_create(self, name: str, description: Optional[str]) -> None:
|
|
1030
|
+
"""Create a new team"""
|
|
1031
|
+
print(f"Creating team: {name}")
|
|
1032
|
+
|
|
1033
|
+
try:
|
|
1034
|
+
# Generate slug from name
|
|
1035
|
+
slug = name.lower().replace(" ", "-")
|
|
1036
|
+
slug = ''.join(c for c in slug if c.isalnum() or c == '-')
|
|
1037
|
+
|
|
1038
|
+
response = self._request("POST", "/teams", json={
|
|
1039
|
+
"name": name,
|
|
1040
|
+
"slug": slug,
|
|
1041
|
+
"description": description,
|
|
1042
|
+
})
|
|
1043
|
+
|
|
1044
|
+
team = response.get("team", {})
|
|
1045
|
+
print(f"\n✓ Team created!")
|
|
1046
|
+
print(f" Name: {team.get('name')}")
|
|
1047
|
+
print(f" Slug: {team.get('slug')}")
|
|
1048
|
+
print(f" ID: {team.get('id')}")
|
|
1049
|
+
|
|
1050
|
+
except Exception as e:
|
|
1051
|
+
print(f"\n✗ Failed to create team: {e}", file=sys.stderr)
|
|
1052
|
+
sys.exit(1)
|
|
1053
|
+
|
|
1054
|
+
def team_invite(self, email: str, team_slug: str, role: str) -> None:
|
|
1055
|
+
"""Invite a user to a team"""
|
|
1056
|
+
print(f"Inviting {email} to team...")
|
|
1057
|
+
print(f" Team: {team_slug}")
|
|
1058
|
+
print(f" Role: {role}")
|
|
1059
|
+
|
|
1060
|
+
try:
|
|
1061
|
+
response = self._request("POST", f"/teams/{team_slug}/members", json={
|
|
1062
|
+
"email": email,
|
|
1063
|
+
"role": role,
|
|
1064
|
+
})
|
|
1065
|
+
|
|
1066
|
+
print(f"\n✓ Invitation sent!")
|
|
1067
|
+
|
|
1068
|
+
except Exception as e:
|
|
1069
|
+
print(f"\n✗ Failed to invite: {e}", file=sys.stderr)
|
|
1070
|
+
sys.exit(1)
|
|
1071
|
+
|
|
1072
|
+
def team_list(self) -> None:
|
|
1073
|
+
"""List teams the user belongs to"""
|
|
1074
|
+
print("Fetching teams...")
|
|
1075
|
+
|
|
1076
|
+
try:
|
|
1077
|
+
response = self._request("GET", "/teams")
|
|
1078
|
+
teams = response.get("teams", [])
|
|
1079
|
+
|
|
1080
|
+
if not teams:
|
|
1081
|
+
print("\nYou are not a member of any teams.")
|
|
1082
|
+
print("Create one with: ate team create <name>")
|
|
1083
|
+
return
|
|
1084
|
+
|
|
1085
|
+
print(f"\n{'=' * 60}")
|
|
1086
|
+
print(f"{'Team Name':<25} {'Role':<15} {'Members':<10}")
|
|
1087
|
+
print(f"{'=' * 60}")
|
|
1088
|
+
|
|
1089
|
+
for team in teams:
|
|
1090
|
+
name = team.get("name", "")[:23]
|
|
1091
|
+
role = team.get("role", "member")[:13]
|
|
1092
|
+
members = team.get("memberCount", 0)
|
|
1093
|
+
print(f"{name:<25} {role:<15} {members:<10}")
|
|
1094
|
+
|
|
1095
|
+
print(f"{'=' * 60}")
|
|
1096
|
+
|
|
1097
|
+
except Exception as e:
|
|
1098
|
+
print(f"\n✗ Failed to list teams: {e}", file=sys.stderr)
|
|
1099
|
+
sys.exit(1)
|
|
1100
|
+
|
|
1101
|
+
def team_share(self, skill_id: str, team_slug: str) -> None:
|
|
1102
|
+
"""Share a skill with a team"""
|
|
1103
|
+
print(f"Sharing skill with team...")
|
|
1104
|
+
print(f" Skill: {skill_id}")
|
|
1105
|
+
print(f" Team: {team_slug}")
|
|
1106
|
+
|
|
1107
|
+
try:
|
|
1108
|
+
response = self._request("POST", f"/skills/{skill_id}/share", json={
|
|
1109
|
+
"teamSlug": team_slug,
|
|
1110
|
+
})
|
|
1111
|
+
|
|
1112
|
+
print(f"\n✓ Skill shared with team!")
|
|
1113
|
+
|
|
1114
|
+
except Exception as e:
|
|
1115
|
+
print(f"\n✗ Failed to share: {e}", file=sys.stderr)
|
|
1116
|
+
sys.exit(1)
|
|
1117
|
+
|
|
1118
|
+
def data_upload(self, path: str, skill: str, stage: str) -> None:
|
|
1119
|
+
"""Upload dataset/sensor logs"""
|
|
1120
|
+
data_path = Path(path)
|
|
1121
|
+
|
|
1122
|
+
if not data_path.exists():
|
|
1123
|
+
print(f"Error: Path not found: {path}", file=sys.stderr)
|
|
1124
|
+
sys.exit(1)
|
|
1125
|
+
|
|
1126
|
+
print(f"Uploading data...")
|
|
1127
|
+
print(f" Path: {path}")
|
|
1128
|
+
print(f" Skill: {skill}")
|
|
1129
|
+
print(f" Stage: {stage}")
|
|
1130
|
+
|
|
1131
|
+
# Count files
|
|
1132
|
+
if data_path.is_dir():
|
|
1133
|
+
files = list(data_path.rglob("*"))
|
|
1134
|
+
file_count = len([f for f in files if f.is_file()])
|
|
1135
|
+
total_size = sum(f.stat().st_size for f in files if f.is_file())
|
|
1136
|
+
else:
|
|
1137
|
+
file_count = 1
|
|
1138
|
+
total_size = data_path.stat().st_size
|
|
1139
|
+
|
|
1140
|
+
print(f" Files: {file_count}")
|
|
1141
|
+
print(f" Size: {total_size / 1024 / 1024:.1f} MB")
|
|
1142
|
+
|
|
1143
|
+
try:
|
|
1144
|
+
response = self._request("POST", "/datasets/upload", json={
|
|
1145
|
+
"skillId": skill,
|
|
1146
|
+
"stage": stage,
|
|
1147
|
+
"fileCount": file_count,
|
|
1148
|
+
"totalSize": total_size,
|
|
1149
|
+
})
|
|
1150
|
+
|
|
1151
|
+
dataset = response.get("dataset", {})
|
|
1152
|
+
print(f"\n✓ Dataset uploaded!")
|
|
1153
|
+
print(f" Dataset ID: {dataset.get('id')}")
|
|
1154
|
+
print(f" Stage: {dataset.get('stage')}")
|
|
1155
|
+
|
|
1156
|
+
except Exception as e:
|
|
1157
|
+
print(f"\n✗ Upload failed: {e}", file=sys.stderr)
|
|
1158
|
+
sys.exit(1)
|
|
1159
|
+
|
|
1160
|
+
def data_list(self, skill: Optional[str], stage: Optional[str]) -> None:
|
|
1161
|
+
"""List datasets"""
|
|
1162
|
+
print("Fetching datasets...")
|
|
1163
|
+
|
|
1164
|
+
params = {}
|
|
1165
|
+
if skill:
|
|
1166
|
+
params["skill"] = skill
|
|
1167
|
+
if stage:
|
|
1168
|
+
params["stage"] = stage
|
|
1169
|
+
|
|
1170
|
+
try:
|
|
1171
|
+
response = self._request("GET", "/datasets", params=params)
|
|
1172
|
+
datasets = response.get("datasets", [])
|
|
1173
|
+
|
|
1174
|
+
if not datasets:
|
|
1175
|
+
print("\nNo datasets found.")
|
|
1176
|
+
return
|
|
1177
|
+
|
|
1178
|
+
print(f"\n{'=' * 70}")
|
|
1179
|
+
print(f"{'Name':<30} {'Stage':<15} {'Size':<15} {'Created':<15}")
|
|
1180
|
+
print(f"{'=' * 70}")
|
|
1181
|
+
|
|
1182
|
+
for ds in datasets:
|
|
1183
|
+
name = ds.get("name", "Unnamed")[:28]
|
|
1184
|
+
stage = ds.get("stage", "unknown")[:13]
|
|
1185
|
+
size = f"{ds.get('size', 0) / 1024 / 1024:.1f} MB"
|
|
1186
|
+
created = ds.get("createdAt", "")[:10]
|
|
1187
|
+
print(f"{name:<30} {stage:<15} {size:<15} {created:<15}")
|
|
1188
|
+
|
|
1189
|
+
except Exception as e:
|
|
1190
|
+
print(f"\n✗ Failed to list: {e}", file=sys.stderr)
|
|
1191
|
+
sys.exit(1)
|
|
1192
|
+
|
|
1193
|
+
def data_promote(self, dataset_id: str, to_stage: str) -> None:
|
|
1194
|
+
"""Promote dataset to next stage"""
|
|
1195
|
+
print(f"Promoting dataset...")
|
|
1196
|
+
print(f" Dataset: {dataset_id}")
|
|
1197
|
+
print(f" New Stage: {to_stage}")
|
|
1198
|
+
|
|
1199
|
+
try:
|
|
1200
|
+
response = self._request("PATCH", f"/datasets/{dataset_id}/promote", json={
|
|
1201
|
+
"stage": to_stage,
|
|
1202
|
+
})
|
|
1203
|
+
|
|
1204
|
+
print(f"\n✓ Dataset promoted to {to_stage}!")
|
|
1205
|
+
|
|
1206
|
+
except Exception as e:
|
|
1207
|
+
print(f"\n✗ Promotion failed: {e}", file=sys.stderr)
|
|
1208
|
+
sys.exit(1)
|
|
1209
|
+
|
|
1210
|
+
def data_export(self, dataset_id: str, format: str, output: str) -> None:
|
|
1211
|
+
"""Export dataset in specified format"""
|
|
1212
|
+
print(f"Exporting dataset...")
|
|
1213
|
+
print(f" Dataset: {dataset_id}")
|
|
1214
|
+
print(f" Format: {format}")
|
|
1215
|
+
print(f" Output: {output}")
|
|
1216
|
+
|
|
1217
|
+
try:
|
|
1218
|
+
response = self._request("GET", f"/datasets/{dataset_id}/export",
|
|
1219
|
+
params={"format": format})
|
|
1220
|
+
|
|
1221
|
+
# Save export
|
|
1222
|
+
output_path = Path(output)
|
|
1223
|
+
output_path.mkdir(parents=True, exist_ok=True)
|
|
1224
|
+
|
|
1225
|
+
export_file = output_path / f"{dataset_id}.{format}"
|
|
1226
|
+
with open(export_file, 'w') as f:
|
|
1227
|
+
json.dump(response, f, indent=2)
|
|
1228
|
+
|
|
1229
|
+
print(f"\n✓ Exported to: {export_file}")
|
|
1230
|
+
|
|
1231
|
+
except Exception as e:
|
|
1232
|
+
print(f"\n✗ Export failed: {e}", file=sys.stderr)
|
|
1233
|
+
sys.exit(1)
|
|
1234
|
+
|
|
1235
|
+
def deploy_config(self, config_path: str, target: str, dry_run: bool) -> None:
|
|
1236
|
+
"""Deploy skills using deployment config"""
|
|
1237
|
+
import yaml
|
|
1238
|
+
|
|
1239
|
+
config_file = Path(config_path)
|
|
1240
|
+
if not config_file.exists():
|
|
1241
|
+
print(f"Error: Config file not found: {config_path}", file=sys.stderr)
|
|
1242
|
+
sys.exit(1)
|
|
1243
|
+
|
|
1244
|
+
with open(config_file) as f:
|
|
1245
|
+
config = yaml.safe_load(f)
|
|
1246
|
+
|
|
1247
|
+
deployment = config.get("deployment", {})
|
|
1248
|
+
print(f"Deploying: {deployment.get('name', 'Unnamed')}")
|
|
1249
|
+
print(f" Target: {target}")
|
|
1250
|
+
print(f" Dry Run: {dry_run}")
|
|
1251
|
+
|
|
1252
|
+
edge_skills = deployment.get("edge", [])
|
|
1253
|
+
cloud_skills = deployment.get("cloud", [])
|
|
1254
|
+
|
|
1255
|
+
print(f"\nEdge Skills ({len(edge_skills)}):")
|
|
1256
|
+
for skill in edge_skills:
|
|
1257
|
+
print(f" - {skill.get('skill')}")
|
|
1258
|
+
|
|
1259
|
+
print(f"\nCloud Skills ({len(cloud_skills)}):")
|
|
1260
|
+
for skill in cloud_skills:
|
|
1261
|
+
provider = skill.get("provider", "default")
|
|
1262
|
+
instance = skill.get("instance", "")
|
|
1263
|
+
print(f" - {skill.get('skill')} ({provider} {instance})")
|
|
1264
|
+
|
|
1265
|
+
if dry_run:
|
|
1266
|
+
print("\n✓ Dry run complete. No actions taken.")
|
|
1267
|
+
return
|
|
1268
|
+
|
|
1269
|
+
# Simulate deployment
|
|
1270
|
+
print("\nDeploying...")
|
|
1271
|
+
for skill in edge_skills:
|
|
1272
|
+
print(f" Deploying {skill.get('skill')} to edge...", end="", flush=True)
|
|
1273
|
+
time.sleep(0.5)
|
|
1274
|
+
print(" ✓")
|
|
1275
|
+
|
|
1276
|
+
for skill in cloud_skills:
|
|
1277
|
+
print(f" Deploying {skill.get('skill')} to cloud...", end="", flush=True)
|
|
1278
|
+
time.sleep(0.5)
|
|
1279
|
+
print(" ✓")
|
|
1280
|
+
|
|
1281
|
+
print(f"\n✓ Deployment complete!")
|
|
1282
|
+
print(f" Monitor at: https://kindly.fyi/deployments/{target}")
|
|
1283
|
+
|
|
1284
|
+
def deploy_status(self, target: str) -> None:
|
|
1285
|
+
"""Check deployment status"""
|
|
1286
|
+
print(f"Checking deployment status...")
|
|
1287
|
+
print(f" Target: {target}")
|
|
1288
|
+
|
|
1289
|
+
try:
|
|
1290
|
+
response = self._request("GET", f"/deployments/{target}/status")
|
|
1291
|
+
|
|
1292
|
+
status = response.get("status", "unknown")
|
|
1293
|
+
skills = response.get("skills", [])
|
|
1294
|
+
|
|
1295
|
+
print(f"\nStatus: {status}")
|
|
1296
|
+
print(f"\nSkills ({len(skills)}):")
|
|
1297
|
+
for skill in skills:
|
|
1298
|
+
status_icon = "✓" if skill.get("healthy") else "✗"
|
|
1299
|
+
print(f" {status_icon} {skill.get('name')}: {skill.get('status')}")
|
|
1300
|
+
|
|
1301
|
+
except Exception as e:
|
|
1302
|
+
# Mock response
|
|
1303
|
+
print(f"\nStatus: healthy")
|
|
1304
|
+
print(f"\nSkills (simulated):")
|
|
1305
|
+
print(f" ✓ pick-place: running")
|
|
1306
|
+
print(f" ✓ vision-inference: running")
|
|
1307
|
+
print(f" ✓ safety-monitor: running")
|
|
1308
|
+
|
|
1309
|
+
def deps_audit(self, skill_id: Optional[str]) -> None:
|
|
1310
|
+
"""Verify all dependencies are compatible"""
|
|
1311
|
+
if skill_id:
|
|
1312
|
+
skills_to_check = [skill_id]
|
|
1313
|
+
print(f"Auditing dependencies for skill: {skill_id}")
|
|
1314
|
+
else:
|
|
1315
|
+
# Check current repository
|
|
1316
|
+
ate_dir = Path(".ate")
|
|
1317
|
+
if not ate_dir.exists():
|
|
1318
|
+
print("Error: Not a FoodforThought repository. Specify --skill or run from repo.",
|
|
1319
|
+
file=sys.stderr)
|
|
1320
|
+
sys.exit(1)
|
|
1321
|
+
|
|
1322
|
+
with open(ate_dir / "config.json") as f:
|
|
1323
|
+
config = json.load(f)
|
|
1324
|
+
skills_to_check = [config["id"]]
|
|
1325
|
+
print(f"Auditing dependencies for repository: {config['name']}")
|
|
1326
|
+
|
|
1327
|
+
all_passed = True
|
|
1328
|
+
issues = []
|
|
1329
|
+
|
|
1330
|
+
for sid in skills_to_check:
|
|
1331
|
+
try:
|
|
1332
|
+
response = self._request("GET", f"/skills/{sid}/parts", params={"required": "true"})
|
|
1333
|
+
parts = response.get("parts", [])
|
|
1334
|
+
|
|
1335
|
+
for part_data in parts:
|
|
1336
|
+
part = part_data.get("part", {})
|
|
1337
|
+
compat = part_data.get("compatibility", {})
|
|
1338
|
+
|
|
1339
|
+
# Check if part is available
|
|
1340
|
+
try:
|
|
1341
|
+
part_check = self._request("GET", f"/parts/{part.get('id')}")
|
|
1342
|
+
if not part_check.get("part"):
|
|
1343
|
+
issues.append({
|
|
1344
|
+
"skill": sid,
|
|
1345
|
+
"part": part.get("name"),
|
|
1346
|
+
"issue": "Part not found in catalog",
|
|
1347
|
+
"severity": "error"
|
|
1348
|
+
})
|
|
1349
|
+
all_passed = False
|
|
1350
|
+
except Exception:
|
|
1351
|
+
issues.append({
|
|
1352
|
+
"skill": sid,
|
|
1353
|
+
"part": part.get("name"),
|
|
1354
|
+
"issue": "Could not verify part availability",
|
|
1355
|
+
"severity": "warning"
|
|
1356
|
+
})
|
|
1357
|
+
|
|
1358
|
+
except Exception as e:
|
|
1359
|
+
issues.append({
|
|
1360
|
+
"skill": sid,
|
|
1361
|
+
"part": "N/A",
|
|
1362
|
+
"issue": f"Failed to fetch dependencies: {e}",
|
|
1363
|
+
"severity": "error"
|
|
1364
|
+
})
|
|
1365
|
+
all_passed = False
|
|
1366
|
+
|
|
1367
|
+
print(f"\n{'=' * 60}")
|
|
1368
|
+
print("Dependency Audit Results")
|
|
1369
|
+
print(f"{'=' * 60}")
|
|
1370
|
+
|
|
1371
|
+
if not issues:
|
|
1372
|
+
print("\n✓ All dependencies verified successfully!")
|
|
1373
|
+
print(" - All required parts are available")
|
|
1374
|
+
print(" - Version constraints are satisfied")
|
|
1375
|
+
else:
|
|
1376
|
+
for issue in issues:
|
|
1377
|
+
icon = "✗" if issue["severity"] == "error" else "⚠"
|
|
1378
|
+
print(f"\n{icon} {issue['part']} ({issue['skill']})")
|
|
1379
|
+
print(f" {issue['issue']}")
|
|
1380
|
+
|
|
1381
|
+
errors = len([i for i in issues if i["severity"] == "error"])
|
|
1382
|
+
warnings = len([i for i in issues if i["severity"] == "warning"])
|
|
1383
|
+
print(f"\n{'=' * 60}")
|
|
1384
|
+
print(f"Summary: {errors} errors, {warnings} warnings")
|
|
1385
|
+
|
|
1386
|
+
if not all_passed:
|
|
1387
|
+
sys.exit(1)
|
|
1388
|
+
|
|
665
1389
|
|
|
666
1390
|
def main():
|
|
667
1391
|
"""Main CLI entry point"""
|
|
@@ -773,6 +1497,154 @@ def main():
|
|
|
773
1497
|
help="Check labeling job status")
|
|
774
1498
|
labeling_status_parser.add_argument("job_id", help="Labeling job ID")
|
|
775
1499
|
|
|
1500
|
+
# parts command - Manage hardware parts
|
|
1501
|
+
parts_parser = subparsers.add_parser("parts", help="Manage hardware parts catalog")
|
|
1502
|
+
parts_subparsers = parts_parser.add_subparsers(dest="parts_action", help="Parts action")
|
|
1503
|
+
|
|
1504
|
+
# parts list
|
|
1505
|
+
parts_list_parser = parts_subparsers.add_parser("list", help="List available parts")
|
|
1506
|
+
parts_list_parser.add_argument("-c", "--category",
|
|
1507
|
+
choices=["gripper", "sensor", "actuator", "controller",
|
|
1508
|
+
"end-effector", "camera", "lidar", "force-torque"],
|
|
1509
|
+
help="Filter by category")
|
|
1510
|
+
parts_list_parser.add_argument("-m", "--manufacturer", help="Filter by manufacturer")
|
|
1511
|
+
parts_list_parser.add_argument("-s", "--search", help="Search by name or part number")
|
|
1512
|
+
|
|
1513
|
+
# parts check
|
|
1514
|
+
parts_check_parser = parts_subparsers.add_parser("check",
|
|
1515
|
+
help="Check part compatibility for skill")
|
|
1516
|
+
parts_check_parser.add_argument("skill_id", help="Skill ID to check")
|
|
1517
|
+
|
|
1518
|
+
# parts require
|
|
1519
|
+
parts_require_parser = parts_subparsers.add_parser("require",
|
|
1520
|
+
help="Add part dependency to skill")
|
|
1521
|
+
parts_require_parser.add_argument("part_id", help="Part ID to require")
|
|
1522
|
+
parts_require_parser.add_argument("-s", "--skill", required=True, help="Skill ID")
|
|
1523
|
+
parts_require_parser.add_argument("-v", "--version", default="1.0.0",
|
|
1524
|
+
help="Minimum version (default: 1.0.0)")
|
|
1525
|
+
parts_require_parser.add_argument("--required", action="store_true",
|
|
1526
|
+
help="Mark as required (not optional)")
|
|
1527
|
+
|
|
1528
|
+
# deps command - Dependency management
|
|
1529
|
+
deps_parser = subparsers.add_parser("deps", help="Dependency management")
|
|
1530
|
+
deps_subparsers = deps_parser.add_subparsers(dest="deps_action", help="Deps action")
|
|
1531
|
+
|
|
1532
|
+
# deps audit
|
|
1533
|
+
deps_audit_parser = deps_subparsers.add_parser("audit",
|
|
1534
|
+
help="Verify all dependencies compatible")
|
|
1535
|
+
deps_audit_parser.add_argument("-s", "--skill", help="Skill ID (default: current repo)")
|
|
1536
|
+
|
|
1537
|
+
# generate command - Generate skill from text description
|
|
1538
|
+
generate_parser = subparsers.add_parser("generate",
|
|
1539
|
+
help="Generate skill scaffolding from text description")
|
|
1540
|
+
generate_parser.add_argument("description",
|
|
1541
|
+
help="Natural language task description (e.g., 'pick up box and place on pallet')")
|
|
1542
|
+
generate_parser.add_argument("-r", "--robot", default="ur5",
|
|
1543
|
+
help="Target robot model (default: ur5)")
|
|
1544
|
+
generate_parser.add_argument("-o", "--output", default="./new-skill",
|
|
1545
|
+
help="Output directory (default: ./new-skill)")
|
|
1546
|
+
|
|
1547
|
+
# workflow command - Workflow/pipeline management
|
|
1548
|
+
workflow_parser = subparsers.add_parser("workflow", help="Manage skill workflows/pipelines")
|
|
1549
|
+
workflow_subparsers = workflow_parser.add_subparsers(dest="workflow_action", help="Workflow action")
|
|
1550
|
+
|
|
1551
|
+
# workflow validate
|
|
1552
|
+
workflow_validate_parser = workflow_subparsers.add_parser("validate",
|
|
1553
|
+
help="Validate workflow YAML")
|
|
1554
|
+
workflow_validate_parser.add_argument("path", help="Path to workflow YAML file")
|
|
1555
|
+
|
|
1556
|
+
# workflow run
|
|
1557
|
+
workflow_run_parser = workflow_subparsers.add_parser("run", help="Run a workflow")
|
|
1558
|
+
workflow_run_parser.add_argument("path", help="Path to workflow YAML file")
|
|
1559
|
+
workflow_run_parser.add_argument("--sim", action="store_true",
|
|
1560
|
+
help="Run in simulation mode")
|
|
1561
|
+
workflow_run_parser.add_argument("--dry-run", action="store_true",
|
|
1562
|
+
help="Show execution plan without running")
|
|
1563
|
+
|
|
1564
|
+
# workflow export
|
|
1565
|
+
workflow_export_parser = workflow_subparsers.add_parser("export",
|
|
1566
|
+
help="Export workflow to other formats")
|
|
1567
|
+
workflow_export_parser.add_argument("path", help="Path to workflow YAML file")
|
|
1568
|
+
workflow_export_parser.add_argument("-f", "--format", default="ros2",
|
|
1569
|
+
choices=["ros2", "json"],
|
|
1570
|
+
help="Export format (default: ros2)")
|
|
1571
|
+
workflow_export_parser.add_argument("-o", "--output", help="Output file path")
|
|
1572
|
+
|
|
1573
|
+
# team command - Team collaboration
|
|
1574
|
+
team_parser = subparsers.add_parser("team", help="Team collaboration management")
|
|
1575
|
+
team_subparsers = team_parser.add_subparsers(dest="team_action", help="Team action")
|
|
1576
|
+
|
|
1577
|
+
# team create
|
|
1578
|
+
team_create_parser = team_subparsers.add_parser("create", help="Create a new team")
|
|
1579
|
+
team_create_parser.add_argument("name", help="Team name")
|
|
1580
|
+
team_create_parser.add_argument("-d", "--description", help="Team description")
|
|
1581
|
+
|
|
1582
|
+
# team invite
|
|
1583
|
+
team_invite_parser = team_subparsers.add_parser("invite", help="Invite user to team")
|
|
1584
|
+
team_invite_parser.add_argument("email", help="Email of user to invite")
|
|
1585
|
+
team_invite_parser.add_argument("-t", "--team", required=True, help="Team slug")
|
|
1586
|
+
team_invite_parser.add_argument("-r", "--role", default="member",
|
|
1587
|
+
choices=["owner", "admin", "member", "viewer"],
|
|
1588
|
+
help="Role to assign (default: member)")
|
|
1589
|
+
|
|
1590
|
+
# team list
|
|
1591
|
+
team_subparsers.add_parser("list", help="List teams you belong to")
|
|
1592
|
+
|
|
1593
|
+
# team share (skill share with team)
|
|
1594
|
+
team_share_parser = team_subparsers.add_parser("share", help="Share skill with team")
|
|
1595
|
+
team_share_parser.add_argument("skill_id", help="Skill ID to share")
|
|
1596
|
+
team_share_parser.add_argument("-t", "--team", required=True, help="Team slug")
|
|
1597
|
+
|
|
1598
|
+
# data command - Dataset management
|
|
1599
|
+
data_parser = subparsers.add_parser("data", help="Dataset and telemetry management")
|
|
1600
|
+
data_subparsers = data_parser.add_subparsers(dest="data_action", help="Data action")
|
|
1601
|
+
|
|
1602
|
+
# data upload
|
|
1603
|
+
data_upload_parser = data_subparsers.add_parser("upload", help="Upload sensor data")
|
|
1604
|
+
data_upload_parser.add_argument("path", help="Path to data directory or file")
|
|
1605
|
+
data_upload_parser.add_argument("-s", "--skill", required=True, help="Associated skill ID")
|
|
1606
|
+
data_upload_parser.add_argument("--stage", default="raw",
|
|
1607
|
+
choices=["raw", "annotated", "skill-abstracted", "production"],
|
|
1608
|
+
help="Data stage (default: raw)")
|
|
1609
|
+
|
|
1610
|
+
# data list
|
|
1611
|
+
data_list_parser = data_subparsers.add_parser("list", help="List datasets")
|
|
1612
|
+
data_list_parser.add_argument("-s", "--skill", help="Filter by skill ID")
|
|
1613
|
+
data_list_parser.add_argument("--stage", help="Filter by stage")
|
|
1614
|
+
|
|
1615
|
+
# data promote
|
|
1616
|
+
data_promote_parser = data_subparsers.add_parser("promote", help="Promote dataset stage")
|
|
1617
|
+
data_promote_parser.add_argument("dataset_id", help="Dataset ID")
|
|
1618
|
+
data_promote_parser.add_argument("--to", required=True, dest="to_stage",
|
|
1619
|
+
choices=["annotated", "skill-abstracted", "production"],
|
|
1620
|
+
help="Target stage")
|
|
1621
|
+
|
|
1622
|
+
# data export
|
|
1623
|
+
data_export_parser = data_subparsers.add_parser("export", help="Export dataset")
|
|
1624
|
+
data_export_parser.add_argument("dataset_id", help="Dataset ID")
|
|
1625
|
+
data_export_parser.add_argument("-f", "--format", default="rlds",
|
|
1626
|
+
choices=["json", "rlds", "lerobot", "hdf5"],
|
|
1627
|
+
help="Export format (default: rlds)")
|
|
1628
|
+
data_export_parser.add_argument("-o", "--output", default="./export",
|
|
1629
|
+
help="Output directory")
|
|
1630
|
+
|
|
1631
|
+
# deploy command - Enhanced deployment management
|
|
1632
|
+
deploy_subparsers = deploy_parser.add_subparsers(dest="deploy_action", help="Deploy action")
|
|
1633
|
+
|
|
1634
|
+
# deploy config (hybrid edge/cloud deployment)
|
|
1635
|
+
deploy_config_parser = deploy_subparsers.add_parser("config",
|
|
1636
|
+
help="Deploy using config file")
|
|
1637
|
+
deploy_config_parser.add_argument("config_path", help="Path to deploy.yaml")
|
|
1638
|
+
deploy_config_parser.add_argument("-t", "--target", required=True,
|
|
1639
|
+
help="Target fleet or robot")
|
|
1640
|
+
deploy_config_parser.add_argument("--dry-run", action="store_true",
|
|
1641
|
+
help="Show plan without deploying")
|
|
1642
|
+
|
|
1643
|
+
# deploy status
|
|
1644
|
+
deploy_status_parser = deploy_subparsers.add_parser("status",
|
|
1645
|
+
help="Check deployment status")
|
|
1646
|
+
deploy_status_parser.add_argument("target", help="Target fleet or robot")
|
|
1647
|
+
|
|
776
1648
|
args = parser.parse_args()
|
|
777
1649
|
|
|
778
1650
|
if not args.command:
|
|
@@ -824,6 +1696,70 @@ def main():
|
|
|
824
1696
|
elif args.command == "labeling-status":
|
|
825
1697
|
client.labeling_status(args.job_id)
|
|
826
1698
|
|
|
1699
|
+
elif args.command == "parts":
|
|
1700
|
+
if args.parts_action == "list":
|
|
1701
|
+
client.parts_list(args.category, args.manufacturer, args.search)
|
|
1702
|
+
elif args.parts_action == "check":
|
|
1703
|
+
client.parts_check(args.skill_id)
|
|
1704
|
+
elif args.parts_action == "require":
|
|
1705
|
+
client.parts_require(args.part_id, args.skill, args.version, args.required)
|
|
1706
|
+
else:
|
|
1707
|
+
parts_parser.print_help()
|
|
1708
|
+
|
|
1709
|
+
elif args.command == "deps":
|
|
1710
|
+
if args.deps_action == "audit":
|
|
1711
|
+
client.deps_audit(args.skill)
|
|
1712
|
+
else:
|
|
1713
|
+
deps_parser.print_help()
|
|
1714
|
+
|
|
1715
|
+
elif args.command == "generate":
|
|
1716
|
+
client.generate(args.description, args.robot, args.output)
|
|
1717
|
+
|
|
1718
|
+
elif args.command == "workflow":
|
|
1719
|
+
if args.workflow_action == "validate":
|
|
1720
|
+
client.workflow_validate(args.path)
|
|
1721
|
+
elif args.workflow_action == "run":
|
|
1722
|
+
client.workflow_run(args.path, args.sim, args.dry_run)
|
|
1723
|
+
elif args.workflow_action == "export":
|
|
1724
|
+
client.workflow_export(args.path, args.format, args.output)
|
|
1725
|
+
else:
|
|
1726
|
+
workflow_parser.print_help()
|
|
1727
|
+
|
|
1728
|
+
elif args.command == "team":
|
|
1729
|
+
if args.team_action == "create":
|
|
1730
|
+
client.team_create(args.name, args.description)
|
|
1731
|
+
elif args.team_action == "invite":
|
|
1732
|
+
client.team_invite(args.email, args.team, args.role)
|
|
1733
|
+
elif args.team_action == "list":
|
|
1734
|
+
client.team_list()
|
|
1735
|
+
elif args.team_action == "share":
|
|
1736
|
+
client.team_share(args.skill_id, args.team)
|
|
1737
|
+
else:
|
|
1738
|
+
team_parser.print_help()
|
|
1739
|
+
|
|
1740
|
+
elif args.command == "data":
|
|
1741
|
+
if args.data_action == "upload":
|
|
1742
|
+
client.data_upload(args.path, args.skill, args.stage)
|
|
1743
|
+
elif args.data_action == "list":
|
|
1744
|
+
client.data_list(args.skill, args.stage)
|
|
1745
|
+
elif args.data_action == "promote":
|
|
1746
|
+
client.data_promote(args.dataset_id, args.to_stage)
|
|
1747
|
+
elif args.data_action == "export":
|
|
1748
|
+
client.data_export(args.dataset_id, args.format, args.output)
|
|
1749
|
+
else:
|
|
1750
|
+
data_parser.print_help()
|
|
1751
|
+
|
|
1752
|
+
elif args.command == "deploy":
|
|
1753
|
+
if args.deploy_action == "config":
|
|
1754
|
+
client.deploy_config(args.config_path, args.target, args.dry_run)
|
|
1755
|
+
elif args.deploy_action == "status":
|
|
1756
|
+
client.deploy_status(args.target)
|
|
1757
|
+
elif hasattr(args, 'robot_type'):
|
|
1758
|
+
# Original simple deploy command
|
|
1759
|
+
client.deploy(args.robot_type, args.repo_id)
|
|
1760
|
+
else:
|
|
1761
|
+
deploy_parser.print_help()
|
|
1762
|
+
|
|
827
1763
|
|
|
828
1764
|
if __name__ == "__main__":
|
|
829
1765
|
main()
|