foodforthought-cli 0.2.4__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +1 -1
- ate/behaviors/__init__.py +88 -0
- ate/behaviors/common.py +686 -0
- ate/behaviors/tree.py +454 -0
- ate/cli.py +610 -54
- ate/drivers/__init__.py +27 -0
- ate/drivers/mechdog.py +606 -0
- ate/interfaces/__init__.py +171 -0
- ate/interfaces/base.py +271 -0
- ate/interfaces/body.py +267 -0
- ate/interfaces/detection.py +282 -0
- ate/interfaces/locomotion.py +422 -0
- ate/interfaces/manipulation.py +408 -0
- ate/interfaces/navigation.py +389 -0
- ate/interfaces/perception.py +362 -0
- ate/interfaces/types.py +371 -0
- ate/mcp_server.py +387 -0
- ate/recording/__init__.py +44 -0
- ate/recording/demonstration.py +378 -0
- ate/recording/session.py +405 -0
- ate/recording/upload.py +304 -0
- ate/recording/wrapper.py +95 -0
- ate/robot/__init__.py +79 -0
- ate/robot/calibration.py +583 -0
- ate/robot/commands.py +3603 -0
- ate/robot/discovery.py +339 -0
- ate/robot/introspection.py +330 -0
- ate/robot/manager.py +270 -0
- ate/robot/profiles.py +275 -0
- ate/robot/registry.py +319 -0
- ate/robot/skill_upload.py +393 -0
- ate/robot/visual_labeler.py +1039 -0
- {foodforthought_cli-0.2.4.dist-info → foodforthought_cli-0.2.8.dist-info}/METADATA +9 -1
- {foodforthought_cli-0.2.4.dist-info → foodforthought_cli-0.2.8.dist-info}/RECORD +37 -8
- {foodforthought_cli-0.2.4.dist-info → foodforthought_cli-0.2.8.dist-info}/WHEEL +0 -0
- {foodforthought_cli-0.2.4.dist-info → foodforthought_cli-0.2.8.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.4.dist-info → foodforthought_cli-0.2.8.dist-info}/top_level.txt +0 -0
ate/robot/registry.py
ADDED
|
@@ -0,0 +1,319 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Registry of known robot types and their configurations.
|
|
3
|
+
|
|
4
|
+
This is how we know what robots are supported and how to configure them.
|
|
5
|
+
Community contributions can add new robots here.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from typing import Dict, List, Set, Optional, Type, Any
|
|
10
|
+
from enum import Enum, auto
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ConnectionType(Enum):
|
|
14
|
+
"""How to connect to the robot."""
|
|
15
|
+
SERIAL = auto() # USB serial (pyserial)
|
|
16
|
+
WIFI = auto() # WiFi/HTTP
|
|
17
|
+
ROS2 = auto() # ROS2 topics/services
|
|
18
|
+
BLUETOOTH = auto() # Bluetooth serial
|
|
19
|
+
ETHERNET = auto() # Direct ethernet
|
|
20
|
+
SIMULATION = auto() # No hardware, simulated
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class RobotType:
|
|
25
|
+
"""Definition of a known robot type."""
|
|
26
|
+
id: str # Unique identifier
|
|
27
|
+
name: str # Display name
|
|
28
|
+
manufacturer: str # Who makes it
|
|
29
|
+
archetype: str # quadruped, humanoid, arm, etc.
|
|
30
|
+
description: str # Human description
|
|
31
|
+
|
|
32
|
+
# Connection
|
|
33
|
+
connection_types: Set[ConnectionType] = field(default_factory=set)
|
|
34
|
+
default_connection: Optional[ConnectionType] = None
|
|
35
|
+
|
|
36
|
+
# Serial settings
|
|
37
|
+
serial_patterns: List[str] = field(default_factory=list) # USB patterns to match
|
|
38
|
+
baud_rate: int = 115200
|
|
39
|
+
|
|
40
|
+
# Network settings
|
|
41
|
+
default_ports: Dict[str, int] = field(default_factory=dict) # camera_port, stream_port, etc.
|
|
42
|
+
mdns_service: Optional[str] = None # mDNS service type to discover
|
|
43
|
+
|
|
44
|
+
# Capabilities
|
|
45
|
+
capabilities: Set[str] = field(default_factory=set)
|
|
46
|
+
optional_capabilities: Set[str] = field(default_factory=set)
|
|
47
|
+
|
|
48
|
+
# Driver info
|
|
49
|
+
driver_module: str = "" # Python module path
|
|
50
|
+
driver_class: str = "" # Class name
|
|
51
|
+
config_class: str = "" # Config class name
|
|
52
|
+
|
|
53
|
+
# Documentation
|
|
54
|
+
setup_url: Optional[str] = None
|
|
55
|
+
image_url: Optional[str] = None
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
# Registry of known robot types
|
|
59
|
+
KNOWN_ROBOTS: Dict[str, RobotType] = {}
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def register_robot(robot: RobotType) -> None:
|
|
63
|
+
"""Register a robot type."""
|
|
64
|
+
KNOWN_ROBOTS[robot.id] = robot
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def get_robot_info(robot_id: str) -> Optional[RobotType]:
|
|
68
|
+
"""Get information about a robot type."""
|
|
69
|
+
return KNOWN_ROBOTS.get(robot_id)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def list_robot_types() -> List[RobotType]:
|
|
73
|
+
"""List all known robot types."""
|
|
74
|
+
return list(KNOWN_ROBOTS.values())
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def find_by_archetype(archetype: str) -> List[RobotType]:
|
|
78
|
+
"""Find robots by archetype (quadruped, humanoid, etc.)."""
|
|
79
|
+
return [r for r in KNOWN_ROBOTS.values() if r.archetype == archetype]
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
# =============================================================================
|
|
83
|
+
# Built-in robot definitions
|
|
84
|
+
# =============================================================================
|
|
85
|
+
|
|
86
|
+
# HiWonder MechDog
|
|
87
|
+
register_robot(RobotType(
|
|
88
|
+
id="hiwonder_mechdog",
|
|
89
|
+
name="MechDog",
|
|
90
|
+
manufacturer="HiWonder",
|
|
91
|
+
archetype="quadruped",
|
|
92
|
+
description="12 DOF quadruped robot with optional arm and camera. ESP32-based with MicroPython.",
|
|
93
|
+
|
|
94
|
+
connection_types={ConnectionType.SERIAL, ConnectionType.WIFI},
|
|
95
|
+
default_connection=ConnectionType.SERIAL,
|
|
96
|
+
|
|
97
|
+
serial_patterns=[
|
|
98
|
+
"/dev/cu.usbserial-*", # macOS
|
|
99
|
+
"/dev/ttyUSB*", # Linux
|
|
100
|
+
"COM*", # Windows
|
|
101
|
+
],
|
|
102
|
+
baud_rate=115200,
|
|
103
|
+
|
|
104
|
+
default_ports={
|
|
105
|
+
"camera_port": 80,
|
|
106
|
+
"camera_stream_port": 81,
|
|
107
|
+
},
|
|
108
|
+
|
|
109
|
+
capabilities={
|
|
110
|
+
"quadruped_locomotion",
|
|
111
|
+
"body_pose",
|
|
112
|
+
"imu",
|
|
113
|
+
},
|
|
114
|
+
optional_capabilities={
|
|
115
|
+
"camera",
|
|
116
|
+
"arm",
|
|
117
|
+
"gripper",
|
|
118
|
+
},
|
|
119
|
+
|
|
120
|
+
driver_module="ate.drivers.mechdog",
|
|
121
|
+
driver_class="MechDogDriver",
|
|
122
|
+
config_class="MechDogConfig",
|
|
123
|
+
|
|
124
|
+
setup_url="https://docs.kindly.fyi/robots/mechdog",
|
|
125
|
+
))
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
# Unitree Go1
|
|
129
|
+
register_robot(RobotType(
|
|
130
|
+
id="unitree_go1",
|
|
131
|
+
name="Go1",
|
|
132
|
+
manufacturer="Unitree",
|
|
133
|
+
archetype="quadruped",
|
|
134
|
+
description="High-performance quadruped robot with cameras and optional arm.",
|
|
135
|
+
|
|
136
|
+
connection_types={ConnectionType.WIFI, ConnectionType.ETHERNET},
|
|
137
|
+
default_connection=ConnectionType.WIFI,
|
|
138
|
+
|
|
139
|
+
default_ports={
|
|
140
|
+
"control_port": 8082,
|
|
141
|
+
"camera_port": 8080,
|
|
142
|
+
},
|
|
143
|
+
|
|
144
|
+
capabilities={
|
|
145
|
+
"quadruped_locomotion",
|
|
146
|
+
"body_pose",
|
|
147
|
+
"imu",
|
|
148
|
+
"camera",
|
|
149
|
+
"depth_camera",
|
|
150
|
+
},
|
|
151
|
+
optional_capabilities={
|
|
152
|
+
"arm",
|
|
153
|
+
"gripper",
|
|
154
|
+
"lidar",
|
|
155
|
+
},
|
|
156
|
+
|
|
157
|
+
driver_module="ate.drivers.go1",
|
|
158
|
+
driver_class="Go1Driver",
|
|
159
|
+
config_class="Go1Config",
|
|
160
|
+
))
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
# Unitree Go2
|
|
164
|
+
register_robot(RobotType(
|
|
165
|
+
id="unitree_go2",
|
|
166
|
+
name="Go2",
|
|
167
|
+
manufacturer="Unitree",
|
|
168
|
+
archetype="quadruped",
|
|
169
|
+
description="Advanced quadruped with AI capabilities, LiDAR, and ROS2 support.",
|
|
170
|
+
|
|
171
|
+
connection_types={ConnectionType.ROS2, ConnectionType.WIFI},
|
|
172
|
+
default_connection=ConnectionType.ROS2,
|
|
173
|
+
|
|
174
|
+
capabilities={
|
|
175
|
+
"quadruped_locomotion",
|
|
176
|
+
"body_pose",
|
|
177
|
+
"imu",
|
|
178
|
+
"camera",
|
|
179
|
+
"depth_camera",
|
|
180
|
+
"lidar",
|
|
181
|
+
},
|
|
182
|
+
optional_capabilities={
|
|
183
|
+
"arm",
|
|
184
|
+
"gripper",
|
|
185
|
+
},
|
|
186
|
+
|
|
187
|
+
driver_module="ate.drivers.go2",
|
|
188
|
+
driver_class="Go2Driver",
|
|
189
|
+
config_class="Go2Config",
|
|
190
|
+
))
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
# Boston Dynamics Spot
|
|
194
|
+
register_robot(RobotType(
|
|
195
|
+
id="boston_dynamics_spot",
|
|
196
|
+
name="Spot",
|
|
197
|
+
manufacturer="Boston Dynamics",
|
|
198
|
+
archetype="quadruped",
|
|
199
|
+
description="Industrial quadruped robot with arm, cameras, and high autonomy.",
|
|
200
|
+
|
|
201
|
+
connection_types={ConnectionType.WIFI, ConnectionType.ETHERNET},
|
|
202
|
+
default_connection=ConnectionType.WIFI,
|
|
203
|
+
|
|
204
|
+
capabilities={
|
|
205
|
+
"quadruped_locomotion",
|
|
206
|
+
"body_pose",
|
|
207
|
+
"imu",
|
|
208
|
+
"camera",
|
|
209
|
+
"depth_camera",
|
|
210
|
+
},
|
|
211
|
+
optional_capabilities={
|
|
212
|
+
"arm",
|
|
213
|
+
"gripper",
|
|
214
|
+
"lidar",
|
|
215
|
+
},
|
|
216
|
+
|
|
217
|
+
driver_module="ate.drivers.spot",
|
|
218
|
+
driver_class="SpotDriver",
|
|
219
|
+
config_class="SpotConfig",
|
|
220
|
+
))
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
# Generic ROS2 robot
|
|
224
|
+
register_robot(RobotType(
|
|
225
|
+
id="ros2_generic",
|
|
226
|
+
name="Generic ROS2 Robot",
|
|
227
|
+
manufacturer="Various",
|
|
228
|
+
archetype="custom",
|
|
229
|
+
description="Any robot accessible via ROS2 topics and services.",
|
|
230
|
+
|
|
231
|
+
connection_types={ConnectionType.ROS2},
|
|
232
|
+
default_connection=ConnectionType.ROS2,
|
|
233
|
+
|
|
234
|
+
capabilities=set(), # Discovered from ROS2 topics
|
|
235
|
+
optional_capabilities={
|
|
236
|
+
"quadruped_locomotion",
|
|
237
|
+
"bipedal_locomotion",
|
|
238
|
+
"wheeled_locomotion",
|
|
239
|
+
"arm",
|
|
240
|
+
"gripper",
|
|
241
|
+
"camera",
|
|
242
|
+
"depth_camera",
|
|
243
|
+
"lidar",
|
|
244
|
+
"imu",
|
|
245
|
+
},
|
|
246
|
+
|
|
247
|
+
driver_module="ate.drivers.ros2_bridge",
|
|
248
|
+
driver_class="ROS2Bridge",
|
|
249
|
+
config_class="ROS2Config",
|
|
250
|
+
))
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
# Simulation robot
|
|
254
|
+
register_robot(RobotType(
|
|
255
|
+
id="simulation",
|
|
256
|
+
name="Simulated Robot",
|
|
257
|
+
manufacturer="FoodforThought",
|
|
258
|
+
archetype="custom",
|
|
259
|
+
description="Software simulation for testing without hardware.",
|
|
260
|
+
|
|
261
|
+
connection_types={ConnectionType.SIMULATION},
|
|
262
|
+
default_connection=ConnectionType.SIMULATION,
|
|
263
|
+
|
|
264
|
+
capabilities={
|
|
265
|
+
"quadruped_locomotion",
|
|
266
|
+
"body_pose",
|
|
267
|
+
"camera",
|
|
268
|
+
},
|
|
269
|
+
optional_capabilities={
|
|
270
|
+
"arm",
|
|
271
|
+
"gripper",
|
|
272
|
+
"lidar",
|
|
273
|
+
},
|
|
274
|
+
|
|
275
|
+
driver_module="ate.drivers.simulation",
|
|
276
|
+
driver_class="SimulationDriver",
|
|
277
|
+
config_class="SimulationConfig",
|
|
278
|
+
))
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
class RobotRegistry:
|
|
282
|
+
"""
|
|
283
|
+
Registry interface for robot types.
|
|
284
|
+
|
|
285
|
+
Provides methods to find and filter known robots.
|
|
286
|
+
"""
|
|
287
|
+
|
|
288
|
+
@staticmethod
|
|
289
|
+
def list_all() -> List[RobotType]:
|
|
290
|
+
"""List all known robot types."""
|
|
291
|
+
return list(KNOWN_ROBOTS.values())
|
|
292
|
+
|
|
293
|
+
@staticmethod
|
|
294
|
+
def get(robot_id: str) -> Optional[RobotType]:
|
|
295
|
+
"""Get robot type by ID."""
|
|
296
|
+
return KNOWN_ROBOTS.get(robot_id)
|
|
297
|
+
|
|
298
|
+
@staticmethod
|
|
299
|
+
def find_by_archetype(archetype: str) -> List[RobotType]:
|
|
300
|
+
"""Find robots by archetype."""
|
|
301
|
+
return [r for r in KNOWN_ROBOTS.values() if r.archetype == archetype]
|
|
302
|
+
|
|
303
|
+
@staticmethod
|
|
304
|
+
def find_by_capability(capability: str) -> List[RobotType]:
|
|
305
|
+
"""Find robots that have a specific capability."""
|
|
306
|
+
return [
|
|
307
|
+
r for r in KNOWN_ROBOTS.values()
|
|
308
|
+
if capability in r.capabilities or capability in r.optional_capabilities
|
|
309
|
+
]
|
|
310
|
+
|
|
311
|
+
@staticmethod
|
|
312
|
+
def find_by_connection(conn_type: ConnectionType) -> List[RobotType]:
|
|
313
|
+
"""Find robots that support a connection type."""
|
|
314
|
+
return [r for r in KNOWN_ROBOTS.values() if conn_type in r.connection_types]
|
|
315
|
+
|
|
316
|
+
@staticmethod
|
|
317
|
+
def register(robot: RobotType) -> None:
|
|
318
|
+
"""Register a new robot type."""
|
|
319
|
+
KNOWN_ROBOTS[robot.id] = robot
|
|
@@ -0,0 +1,393 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Upload skill libraries and calibrations to FoodforThought.
|
|
3
|
+
|
|
4
|
+
Creates artifacts with proper lineage:
|
|
5
|
+
- Raw: pose images from dual cameras
|
|
6
|
+
- Processed: servo calibration data
|
|
7
|
+
- Labeled: named poses with semantic labels
|
|
8
|
+
- Skill: generated Python skill code
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
import json
|
|
13
|
+
import base64
|
|
14
|
+
import requests
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
from typing import Optional, Dict, Any, List
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
|
|
19
|
+
from .calibration import RobotCalibration, load_calibration
|
|
20
|
+
from .visual_labeler import SkillLibrary, load_skill_library
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# API configuration
|
|
24
|
+
BASE_URL = os.getenv("ATE_API_URL", "https://www.kindly.fyi/api")
|
|
25
|
+
CONFIG_FILE = Path.home() / ".ate" / "config.json"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class SkillLibraryUploader:
|
|
29
|
+
"""
|
|
30
|
+
Uploads skill libraries to FoodforThought.
|
|
31
|
+
|
|
32
|
+
Creates a complete data lineage:
|
|
33
|
+
raw (images) → processed (calibration) → labeled (poses) → skill (code)
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
base_url: str = BASE_URL,
|
|
39
|
+
api_key: Optional[str] = None,
|
|
40
|
+
):
|
|
41
|
+
self.base_url = base_url
|
|
42
|
+
self.headers = {
|
|
43
|
+
"Content-Type": "application/json",
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
token = None
|
|
47
|
+
|
|
48
|
+
# Load from config file (device auth flow)
|
|
49
|
+
if CONFIG_FILE.exists():
|
|
50
|
+
try:
|
|
51
|
+
with open(CONFIG_FILE) as f:
|
|
52
|
+
config = json.load(f)
|
|
53
|
+
token = config.get("access_token") or config.get("api_key")
|
|
54
|
+
except Exception:
|
|
55
|
+
pass
|
|
56
|
+
|
|
57
|
+
# Override with explicit api_key or env var
|
|
58
|
+
if api_key:
|
|
59
|
+
token = api_key
|
|
60
|
+
elif os.getenv("ATE_API_KEY"):
|
|
61
|
+
token = os.getenv("ATE_API_KEY")
|
|
62
|
+
|
|
63
|
+
if token:
|
|
64
|
+
self.headers["Authorization"] = f"Bearer {token}"
|
|
65
|
+
else:
|
|
66
|
+
raise ValueError(
|
|
67
|
+
"Not logged in. Run 'ate login' to authenticate."
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
def _request(self, method: str, endpoint: str, **kwargs) -> Dict:
|
|
71
|
+
"""Make HTTP request to API."""
|
|
72
|
+
url = f"{self.base_url}{endpoint}"
|
|
73
|
+
response = requests.request(method, url, headers=self.headers, **kwargs)
|
|
74
|
+
response.raise_for_status()
|
|
75
|
+
return response.json()
|
|
76
|
+
|
|
77
|
+
def get_or_create_project(self, name: str, description: str = "") -> str:
|
|
78
|
+
"""Get existing project or create a new one."""
|
|
79
|
+
# Try to find existing project
|
|
80
|
+
try:
|
|
81
|
+
projects = self._request("GET", "/projects")
|
|
82
|
+
for project in projects.get("projects", []):
|
|
83
|
+
if project.get("name") == name:
|
|
84
|
+
return project["id"]
|
|
85
|
+
except Exception:
|
|
86
|
+
pass
|
|
87
|
+
|
|
88
|
+
# Create new project
|
|
89
|
+
response = self._request("POST", "/projects", json={
|
|
90
|
+
"name": name,
|
|
91
|
+
"description": description or f"Robot skill library for {name}",
|
|
92
|
+
"visibility": "private",
|
|
93
|
+
})
|
|
94
|
+
return response.get("project", {}).get("id")
|
|
95
|
+
|
|
96
|
+
def upload_skill_library(
|
|
97
|
+
self,
|
|
98
|
+
library: SkillLibrary,
|
|
99
|
+
calibration: RobotCalibration,
|
|
100
|
+
project_id: Optional[str] = None,
|
|
101
|
+
include_images: bool = True,
|
|
102
|
+
) -> Dict[str, Any]:
|
|
103
|
+
"""
|
|
104
|
+
Upload a complete skill library to FoodforThought.
|
|
105
|
+
|
|
106
|
+
Creates artifacts with lineage:
|
|
107
|
+
1. Raw: pose images
|
|
108
|
+
2. Processed: servo calibration
|
|
109
|
+
3. Labeled: poses with semantic labels
|
|
110
|
+
4. Skill: generated Python code
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
library: SkillLibrary to upload
|
|
114
|
+
calibration: RobotCalibration with servo data
|
|
115
|
+
project_id: Optional project ID (will create if not provided)
|
|
116
|
+
include_images: Whether to upload pose images
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
Dict with artifact IDs and URLs
|
|
120
|
+
"""
|
|
121
|
+
result = {
|
|
122
|
+
"project_id": None,
|
|
123
|
+
"artifacts": [],
|
|
124
|
+
"lineage": [],
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
# Get or create project
|
|
128
|
+
if not project_id:
|
|
129
|
+
project_id = self.get_or_create_project(
|
|
130
|
+
f"{library.robot_name}_skills",
|
|
131
|
+
f"Skill library for {library.robot_model}",
|
|
132
|
+
)
|
|
133
|
+
result["project_id"] = project_id
|
|
134
|
+
|
|
135
|
+
# 1. Upload pose images as raw artifacts
|
|
136
|
+
image_artifact_ids = []
|
|
137
|
+
if include_images:
|
|
138
|
+
images_dir = Path.home() / ".ate" / "skill_images" / library.robot_name
|
|
139
|
+
if images_dir.exists():
|
|
140
|
+
for img_path in images_dir.glob("*.jpg"):
|
|
141
|
+
try:
|
|
142
|
+
artifact_id = self._upload_image_artifact(
|
|
143
|
+
project_id=project_id,
|
|
144
|
+
image_path=img_path,
|
|
145
|
+
robot_name=library.robot_name,
|
|
146
|
+
)
|
|
147
|
+
image_artifact_ids.append(artifact_id)
|
|
148
|
+
result["artifacts"].append({
|
|
149
|
+
"id": artifact_id,
|
|
150
|
+
"stage": "raw",
|
|
151
|
+
"name": img_path.stem,
|
|
152
|
+
})
|
|
153
|
+
except Exception as e:
|
|
154
|
+
print(f"Warning: Failed to upload {img_path.name}: {e}")
|
|
155
|
+
|
|
156
|
+
# 2. Upload calibration as processed artifact
|
|
157
|
+
calibration_artifact_id = self._upload_calibration_artifact(
|
|
158
|
+
project_id=project_id,
|
|
159
|
+
calibration=calibration,
|
|
160
|
+
parent_ids=image_artifact_ids[:5] if image_artifact_ids else None, # Link to some images
|
|
161
|
+
)
|
|
162
|
+
result["artifacts"].append({
|
|
163
|
+
"id": calibration_artifact_id,
|
|
164
|
+
"stage": "processed",
|
|
165
|
+
"name": f"{library.robot_name}_calibration",
|
|
166
|
+
})
|
|
167
|
+
|
|
168
|
+
# 3. Upload poses as labeled artifacts
|
|
169
|
+
pose_artifact_ids = []
|
|
170
|
+
for pose_name, pose in calibration.poses.items():
|
|
171
|
+
pose_artifact_id = self._upload_pose_artifact(
|
|
172
|
+
project_id=project_id,
|
|
173
|
+
pose_name=pose_name,
|
|
174
|
+
pose=pose,
|
|
175
|
+
calibration=calibration,
|
|
176
|
+
parent_id=calibration_artifact_id,
|
|
177
|
+
)
|
|
178
|
+
pose_artifact_ids.append(pose_artifact_id)
|
|
179
|
+
result["artifacts"].append({
|
|
180
|
+
"id": pose_artifact_id,
|
|
181
|
+
"stage": "labeled",
|
|
182
|
+
"name": pose_name,
|
|
183
|
+
})
|
|
184
|
+
|
|
185
|
+
# 4. Upload skills as skill artifacts
|
|
186
|
+
for action_name, action in library.actions.items():
|
|
187
|
+
skill_artifact_id = self._upload_skill_artifact(
|
|
188
|
+
project_id=project_id,
|
|
189
|
+
action_name=action_name,
|
|
190
|
+
action=action,
|
|
191
|
+
library=library,
|
|
192
|
+
calibration=calibration,
|
|
193
|
+
trained_on=pose_artifact_ids, # Skills trained on poses
|
|
194
|
+
)
|
|
195
|
+
result["artifacts"].append({
|
|
196
|
+
"id": skill_artifact_id,
|
|
197
|
+
"stage": "skill",
|
|
198
|
+
"name": action_name,
|
|
199
|
+
})
|
|
200
|
+
|
|
201
|
+
return result
|
|
202
|
+
|
|
203
|
+
def _upload_image_artifact(
|
|
204
|
+
self,
|
|
205
|
+
project_id: str,
|
|
206
|
+
image_path: Path,
|
|
207
|
+
robot_name: str,
|
|
208
|
+
) -> str:
|
|
209
|
+
"""Upload an image as a raw artifact."""
|
|
210
|
+
# Read and encode image
|
|
211
|
+
with open(image_path, "rb") as f:
|
|
212
|
+
image_data = base64.b64encode(f.read()).decode("utf-8")
|
|
213
|
+
|
|
214
|
+
response = self._request("POST", "/artifacts", json={
|
|
215
|
+
"projectId": project_id,
|
|
216
|
+
"name": image_path.stem,
|
|
217
|
+
"stage": "raw",
|
|
218
|
+
"type": "dataset",
|
|
219
|
+
"metadata": {
|
|
220
|
+
"robot_name": robot_name,
|
|
221
|
+
"image_type": "pose_capture",
|
|
222
|
+
"filename": image_path.name,
|
|
223
|
+
"format": "jpeg",
|
|
224
|
+
"source": "visual_labeler",
|
|
225
|
+
"captured_at": datetime.now().isoformat(),
|
|
226
|
+
},
|
|
227
|
+
})
|
|
228
|
+
return response.get("artifact", {}).get("id")
|
|
229
|
+
|
|
230
|
+
def _upload_calibration_artifact(
|
|
231
|
+
self,
|
|
232
|
+
project_id: str,
|
|
233
|
+
calibration: RobotCalibration,
|
|
234
|
+
parent_ids: Optional[List[str]] = None,
|
|
235
|
+
) -> str:
|
|
236
|
+
"""Upload servo calibration as a processed artifact."""
|
|
237
|
+
# Serialize calibration
|
|
238
|
+
calibration_data = {
|
|
239
|
+
"robot_model": calibration.robot_model,
|
|
240
|
+
"robot_name": calibration.robot_name,
|
|
241
|
+
"serial_port": calibration.serial_port,
|
|
242
|
+
"baud_rate": calibration.baud_rate,
|
|
243
|
+
"camera_url": calibration.camera_url,
|
|
244
|
+
"servos": {
|
|
245
|
+
str(sid): {
|
|
246
|
+
"servo_id": s.servo_id,
|
|
247
|
+
"name": s.name,
|
|
248
|
+
"joint_type": s.joint_type.value,
|
|
249
|
+
"min_value": s.min_value,
|
|
250
|
+
"max_value": s.max_value,
|
|
251
|
+
"center_value": s.center_value,
|
|
252
|
+
"positions": s.positions,
|
|
253
|
+
}
|
|
254
|
+
for sid, s in calibration.servos.items()
|
|
255
|
+
},
|
|
256
|
+
"calibrated_at": calibration.calibrated_at,
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
response = self._request("POST", "/artifacts", json={
|
|
260
|
+
"projectId": project_id,
|
|
261
|
+
"name": f"{calibration.robot_name}_calibration",
|
|
262
|
+
"stage": "processed",
|
|
263
|
+
"type": "dataset",
|
|
264
|
+
"metadata": {
|
|
265
|
+
"robot_model": calibration.robot_model,
|
|
266
|
+
"robot_name": calibration.robot_name,
|
|
267
|
+
"servo_count": len(calibration.servos),
|
|
268
|
+
"pose_count": len(calibration.poses),
|
|
269
|
+
"calibration_data": calibration_data,
|
|
270
|
+
"source": "visual_labeler",
|
|
271
|
+
},
|
|
272
|
+
})
|
|
273
|
+
return response.get("artifact", {}).get("id")
|
|
274
|
+
|
|
275
|
+
def _upload_pose_artifact(
|
|
276
|
+
self,
|
|
277
|
+
project_id: str,
|
|
278
|
+
pose_name: str,
|
|
279
|
+
pose,
|
|
280
|
+
calibration: RobotCalibration,
|
|
281
|
+
parent_id: str,
|
|
282
|
+
) -> str:
|
|
283
|
+
"""Upload a named pose as a labeled artifact."""
|
|
284
|
+
# Build pose data with semantic labels
|
|
285
|
+
servo_labels = {}
|
|
286
|
+
for sid, value in pose.servo_positions.items():
|
|
287
|
+
servo_cal = calibration.servos.get(sid)
|
|
288
|
+
if servo_cal:
|
|
289
|
+
servo_labels[str(sid)] = {
|
|
290
|
+
"name": servo_cal.name,
|
|
291
|
+
"joint_type": servo_cal.joint_type.value,
|
|
292
|
+
"value": value,
|
|
293
|
+
"normalized": (value - servo_cal.min_value) / max(1, servo_cal.max_value - servo_cal.min_value),
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
response = self._request("POST", "/artifacts", json={
|
|
297
|
+
"projectId": project_id,
|
|
298
|
+
"name": pose_name,
|
|
299
|
+
"stage": "labeled",
|
|
300
|
+
"type": "dataset",
|
|
301
|
+
"parentArtifactId": parent_id,
|
|
302
|
+
"transformationType": "labeling",
|
|
303
|
+
"transformationNotes": f"Pose '{pose_name}' labeled from calibration",
|
|
304
|
+
"metadata": {
|
|
305
|
+
"robot_name": calibration.robot_name,
|
|
306
|
+
"pose_name": pose_name,
|
|
307
|
+
"description": pose.description,
|
|
308
|
+
"servo_positions": pose.servo_positions,
|
|
309
|
+
"servo_labels": servo_labels,
|
|
310
|
+
"transition_time_ms": pose.transition_time_ms,
|
|
311
|
+
"image_path": pose.image_path,
|
|
312
|
+
"source": "visual_labeler",
|
|
313
|
+
},
|
|
314
|
+
})
|
|
315
|
+
return response.get("artifact", {}).get("id")
|
|
316
|
+
|
|
317
|
+
def _upload_skill_artifact(
|
|
318
|
+
self,
|
|
319
|
+
project_id: str,
|
|
320
|
+
action_name: str,
|
|
321
|
+
action,
|
|
322
|
+
library: SkillLibrary,
|
|
323
|
+
calibration: RobotCalibration,
|
|
324
|
+
trained_on: List[str],
|
|
325
|
+
) -> str:
|
|
326
|
+
"""Upload a generated skill as a skill artifact."""
|
|
327
|
+
# Generate skill code
|
|
328
|
+
from .visual_labeler import DualCameraLabeler
|
|
329
|
+
labeler = DualCameraLabeler(
|
|
330
|
+
serial_port=calibration.serial_port or "",
|
|
331
|
+
robot_name=library.robot_name,
|
|
332
|
+
robot_model=library.robot_model,
|
|
333
|
+
)
|
|
334
|
+
labeler.calibrator.calibration = calibration
|
|
335
|
+
skill_code = labeler.generate_skill_code(action)
|
|
336
|
+
|
|
337
|
+
response = self._request("POST", "/artifacts", json={
|
|
338
|
+
"projectId": project_id,
|
|
339
|
+
"name": action_name,
|
|
340
|
+
"stage": "skill",
|
|
341
|
+
"type": "code",
|
|
342
|
+
"trainedOn": trained_on,
|
|
343
|
+
"trainingNotes": f"Skill '{action_name}' generated from {len(action.steps)} poses",
|
|
344
|
+
"metadata": {
|
|
345
|
+
"robot_model": library.robot_model,
|
|
346
|
+
"robot_name": library.robot_name,
|
|
347
|
+
"action_type": action.action_type.value,
|
|
348
|
+
"description": action.description,
|
|
349
|
+
"steps": [s.to_dict() for s in action.steps],
|
|
350
|
+
"skill_code": skill_code,
|
|
351
|
+
"tags": action.tags,
|
|
352
|
+
"source": "visual_labeler",
|
|
353
|
+
"generated_at": datetime.now().isoformat(),
|
|
354
|
+
},
|
|
355
|
+
})
|
|
356
|
+
return response.get("artifact", {}).get("id")
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
def upload_skill_library(
|
|
360
|
+
robot_name: str,
|
|
361
|
+
project_id: Optional[str] = None,
|
|
362
|
+
include_images: bool = True,
|
|
363
|
+
api_key: Optional[str] = None,
|
|
364
|
+
) -> Dict[str, Any]:
|
|
365
|
+
"""
|
|
366
|
+
Convenience function to upload a skill library.
|
|
367
|
+
|
|
368
|
+
Args:
|
|
369
|
+
robot_name: Name of the robot (matches calibration/library filenames)
|
|
370
|
+
project_id: Optional project ID
|
|
371
|
+
include_images: Whether to upload pose images
|
|
372
|
+
api_key: Optional API key
|
|
373
|
+
|
|
374
|
+
Returns:
|
|
375
|
+
Dict with upload results
|
|
376
|
+
"""
|
|
377
|
+
# Load calibration
|
|
378
|
+
calibration = load_calibration(robot_name)
|
|
379
|
+
if not calibration:
|
|
380
|
+
raise ValueError(f"No calibration found for: {robot_name}")
|
|
381
|
+
|
|
382
|
+
# Load skill library
|
|
383
|
+
library = load_skill_library(robot_name)
|
|
384
|
+
if not library:
|
|
385
|
+
raise ValueError(f"No skill library found for: {robot_name}")
|
|
386
|
+
|
|
387
|
+
uploader = SkillLibraryUploader(api_key=api_key)
|
|
388
|
+
return uploader.upload_skill_library(
|
|
389
|
+
library=library,
|
|
390
|
+
calibration=calibration,
|
|
391
|
+
project_id=project_id,
|
|
392
|
+
include_images=include_images,
|
|
393
|
+
)
|