foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +100 -0
- ate/behaviors/approach.py +399 -0
- ate/behaviors/common.py +686 -0
- ate/behaviors/tree.py +454 -0
- ate/cli.py +855 -3995
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +39 -0
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +942 -0
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +187 -0
- ate/interfaces/base.py +273 -0
- ate/interfaces/body.py +267 -0
- ate/interfaces/detection.py +282 -0
- ate/interfaces/locomotion.py +422 -0
- ate/interfaces/manipulation.py +408 -0
- ate/interfaces/navigation.py +389 -0
- ate/interfaces/perception.py +362 -0
- ate/interfaces/sensors.py +247 -0
- ate/interfaces/types.py +371 -0
- ate/llm_proxy.py +239 -0
- ate/mcp_server.py +387 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +83 -0
- ate/recording/demonstration.py +378 -0
- ate/recording/session.py +415 -0
- ate/recording/upload.py +304 -0
- ate/recording/visual.py +416 -0
- ate/recording/wrapper.py +95 -0
- ate/robot/__init__.py +221 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +668 -0
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +3735 -0
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +441 -0
- ate/robot/introspection.py +330 -0
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/manager.py +270 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +281 -0
- ate/robot/registry.py +322 -0
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +675 -0
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +1048 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,675 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Upload skill libraries and calibrations to FoodforThought.
|
|
3
|
+
|
|
4
|
+
Creates artifacts with proper lineage:
|
|
5
|
+
- Raw: pose images from dual cameras
|
|
6
|
+
- Processed: servo calibration data
|
|
7
|
+
- Labeled: named poses with semantic labels
|
|
8
|
+
- Skill: generated Python skill code
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
import json
|
|
13
|
+
import base64
|
|
14
|
+
import requests
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
from typing import Optional, Dict, Any, List
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
|
|
19
|
+
from .calibration import RobotCalibration, load_calibration
|
|
20
|
+
from .visual_labeler import SkillLibrary, load_skill_library
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# API configuration
|
|
24
|
+
BASE_URL = os.getenv("ATE_API_URL", "https://www.kindly.fyi/api")
|
|
25
|
+
CONFIG_FILE = Path.home() / ".ate" / "config.json"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class APIError(Exception):
|
|
29
|
+
"""
|
|
30
|
+
Custom exception for API errors with helpful context.
|
|
31
|
+
|
|
32
|
+
Provides:
|
|
33
|
+
- Status code and reason
|
|
34
|
+
- Parsed error message from API response
|
|
35
|
+
- Suggestions for fixing common issues
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
message: str,
|
|
41
|
+
status_code: int = 0,
|
|
42
|
+
response_body: Optional[Dict] = None,
|
|
43
|
+
suggestions: Optional[List[str]] = None,
|
|
44
|
+
):
|
|
45
|
+
self.status_code = status_code
|
|
46
|
+
self.response_body = response_body or {}
|
|
47
|
+
self.suggestions = suggestions or []
|
|
48
|
+
|
|
49
|
+
# Build helpful error message
|
|
50
|
+
parts = [message]
|
|
51
|
+
|
|
52
|
+
if self.response_body:
|
|
53
|
+
# Try to extract error details from response
|
|
54
|
+
error_detail = (
|
|
55
|
+
self.response_body.get("error") or
|
|
56
|
+
self.response_body.get("message") or
|
|
57
|
+
self.response_body.get("detail") or
|
|
58
|
+
self.response_body.get("errors")
|
|
59
|
+
)
|
|
60
|
+
if error_detail:
|
|
61
|
+
if isinstance(error_detail, list):
|
|
62
|
+
error_detail = "; ".join(str(e) for e in error_detail)
|
|
63
|
+
parts.append(f"Details: {error_detail}")
|
|
64
|
+
|
|
65
|
+
if self.suggestions:
|
|
66
|
+
parts.append("Suggestions:")
|
|
67
|
+
for s in self.suggestions:
|
|
68
|
+
parts.append(f" - {s}")
|
|
69
|
+
|
|
70
|
+
super().__init__("\n".join(parts))
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def parse_api_error(response: requests.Response, endpoint: str) -> APIError:
|
|
74
|
+
"""
|
|
75
|
+
Parse API error response and return helpful APIError.
|
|
76
|
+
|
|
77
|
+
Analyzes common error patterns and provides actionable suggestions.
|
|
78
|
+
"""
|
|
79
|
+
status = response.status_code
|
|
80
|
+
suggestions = []
|
|
81
|
+
|
|
82
|
+
# Try to parse response body
|
|
83
|
+
try:
|
|
84
|
+
body = response.json()
|
|
85
|
+
except Exception:
|
|
86
|
+
body = {"raw": response.text[:500] if response.text else "No response body"}
|
|
87
|
+
|
|
88
|
+
# Common error patterns and suggestions
|
|
89
|
+
if status == 400:
|
|
90
|
+
error_text = str(body).lower()
|
|
91
|
+
|
|
92
|
+
if "project" in endpoint:
|
|
93
|
+
suggestions.append("Use --project-id to specify an existing project")
|
|
94
|
+
suggestions.append("Check project name doesn't contain special characters")
|
|
95
|
+
elif "calibration" in endpoint:
|
|
96
|
+
suggestions.append("Ensure calibration file contains required fields: name, version, method")
|
|
97
|
+
suggestions.append("Check robot slug exists: ate robot identify --search <robot-name>")
|
|
98
|
+
|
|
99
|
+
elif status == 401:
|
|
100
|
+
suggestions.append("Session may have expired. Run: ate login")
|
|
101
|
+
suggestions.append("Check ATE_API_KEY environment variable if using API key auth")
|
|
102
|
+
|
|
103
|
+
elif status == 403:
|
|
104
|
+
suggestions.append("You may not have permission for this operation")
|
|
105
|
+
suggestions.append("Check if the project/resource is owned by another user")
|
|
106
|
+
|
|
107
|
+
elif status == 404:
|
|
108
|
+
error_text = str(body).lower()
|
|
109
|
+
|
|
110
|
+
if "robot" in error_text and "not found" in error_text:
|
|
111
|
+
suggestions.append("Robot slug may not exist in the database")
|
|
112
|
+
suggestions.append("Search for robots: ate robot identify --search <partial-name>")
|
|
113
|
+
suggestions.append("List available robots: ate robot list")
|
|
114
|
+
|
|
115
|
+
elif "project" in endpoint:
|
|
116
|
+
suggestions.append("Project ID may be invalid or deleted")
|
|
117
|
+
suggestions.append("List your projects to find valid IDs")
|
|
118
|
+
|
|
119
|
+
elif status == 422:
|
|
120
|
+
suggestions.append("Request data validation failed")
|
|
121
|
+
if body.get("errors"):
|
|
122
|
+
for field, errors in body.get("errors", {}).items():
|
|
123
|
+
suggestions.append(f"Field '{field}': {errors}")
|
|
124
|
+
|
|
125
|
+
elif status >= 500:
|
|
126
|
+
suggestions.append("Server error - this is likely temporary")
|
|
127
|
+
suggestions.append("Try again in a few minutes")
|
|
128
|
+
suggestions.append("Report persistent issues at: https://github.com/kindlyrobotics/monorepo/issues")
|
|
129
|
+
|
|
130
|
+
return APIError(
|
|
131
|
+
message=f"API request failed: {status} {response.reason} for {endpoint}",
|
|
132
|
+
status_code=status,
|
|
133
|
+
response_body=body,
|
|
134
|
+
suggestions=suggestions,
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class SkillLibraryUploader:
|
|
139
|
+
"""
|
|
140
|
+
Uploads skill libraries to FoodforThought.
|
|
141
|
+
|
|
142
|
+
Creates a complete data lineage:
|
|
143
|
+
raw (images) → processed (calibration) → labeled (poses) → skill (code)
|
|
144
|
+
"""
|
|
145
|
+
|
|
146
|
+
def __init__(
|
|
147
|
+
self,
|
|
148
|
+
base_url: str = BASE_URL,
|
|
149
|
+
api_key: Optional[str] = None,
|
|
150
|
+
):
|
|
151
|
+
self.base_url = base_url
|
|
152
|
+
self.headers = {
|
|
153
|
+
"Content-Type": "application/json",
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
token = None
|
|
157
|
+
|
|
158
|
+
# Load from config file (device auth flow)
|
|
159
|
+
if CONFIG_FILE.exists():
|
|
160
|
+
try:
|
|
161
|
+
with open(CONFIG_FILE) as f:
|
|
162
|
+
config = json.load(f)
|
|
163
|
+
token = config.get("access_token") or config.get("api_key")
|
|
164
|
+
except Exception:
|
|
165
|
+
pass
|
|
166
|
+
|
|
167
|
+
# Override with explicit api_key or env var
|
|
168
|
+
if api_key:
|
|
169
|
+
token = api_key
|
|
170
|
+
elif os.getenv("ATE_API_KEY"):
|
|
171
|
+
token = os.getenv("ATE_API_KEY")
|
|
172
|
+
|
|
173
|
+
if token:
|
|
174
|
+
self.headers["Authorization"] = f"Bearer {token}"
|
|
175
|
+
else:
|
|
176
|
+
raise ValueError(
|
|
177
|
+
"Not logged in. Run 'ate login' to authenticate."
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
def _request(self, method: str, endpoint: str, **kwargs) -> Dict:
|
|
181
|
+
"""
|
|
182
|
+
Make HTTP request to API with improved error handling.
|
|
183
|
+
|
|
184
|
+
Raises APIError with helpful suggestions on failure.
|
|
185
|
+
"""
|
|
186
|
+
url = f"{self.base_url}{endpoint}"
|
|
187
|
+
response = requests.request(method, url, headers=self.headers, **kwargs)
|
|
188
|
+
|
|
189
|
+
if not response.ok:
|
|
190
|
+
raise parse_api_error(response, endpoint)
|
|
191
|
+
|
|
192
|
+
try:
|
|
193
|
+
return response.json()
|
|
194
|
+
except json.JSONDecodeError:
|
|
195
|
+
return {"raw": response.text}
|
|
196
|
+
|
|
197
|
+
def get_or_create_project(self, name: str, description: str = "") -> str:
|
|
198
|
+
"""Get existing project or create a new one."""
|
|
199
|
+
# Try to find existing project
|
|
200
|
+
try:
|
|
201
|
+
projects = self._request("GET", "/projects")
|
|
202
|
+
for project in projects.get("projects", []):
|
|
203
|
+
if project.get("name") == name:
|
|
204
|
+
return project["id"]
|
|
205
|
+
except Exception:
|
|
206
|
+
pass
|
|
207
|
+
|
|
208
|
+
# Create new project
|
|
209
|
+
response = self._request("POST", "/projects", json={
|
|
210
|
+
"name": name,
|
|
211
|
+
"description": description or f"Robot skill library for {name}",
|
|
212
|
+
"visibility": "private",
|
|
213
|
+
})
|
|
214
|
+
return response.get("project", {}).get("id")
|
|
215
|
+
|
|
216
|
+
def upload_skill_library(
|
|
217
|
+
self,
|
|
218
|
+
library: SkillLibrary,
|
|
219
|
+
calibration: RobotCalibration,
|
|
220
|
+
project_id: Optional[str] = None,
|
|
221
|
+
include_images: bool = True,
|
|
222
|
+
) -> Dict[str, Any]:
|
|
223
|
+
"""
|
|
224
|
+
Upload a complete skill library to FoodforThought.
|
|
225
|
+
|
|
226
|
+
Creates artifacts with lineage:
|
|
227
|
+
1. Raw: pose images
|
|
228
|
+
2. Processed: servo calibration
|
|
229
|
+
3. Labeled: poses with semantic labels
|
|
230
|
+
4. Skill: generated Python code
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
library: SkillLibrary to upload
|
|
234
|
+
calibration: RobotCalibration with servo data
|
|
235
|
+
project_id: Optional project ID (will create if not provided)
|
|
236
|
+
include_images: Whether to upload pose images
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
Dict with artifact IDs and URLs
|
|
240
|
+
"""
|
|
241
|
+
result = {
|
|
242
|
+
"project_id": None,
|
|
243
|
+
"artifacts": [],
|
|
244
|
+
"lineage": [],
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
# Get or create project
|
|
248
|
+
if not project_id:
|
|
249
|
+
project_id = self.get_or_create_project(
|
|
250
|
+
f"{library.robot_name}_skills",
|
|
251
|
+
f"Skill library for {library.robot_model}",
|
|
252
|
+
)
|
|
253
|
+
result["project_id"] = project_id
|
|
254
|
+
|
|
255
|
+
# 1. Upload pose images as raw artifacts
|
|
256
|
+
image_artifact_ids = []
|
|
257
|
+
if include_images:
|
|
258
|
+
images_dir = Path.home() / ".ate" / "skill_images" / library.robot_name
|
|
259
|
+
if images_dir.exists():
|
|
260
|
+
for img_path in images_dir.glob("*.jpg"):
|
|
261
|
+
try:
|
|
262
|
+
artifact_id = self._upload_image_artifact(
|
|
263
|
+
project_id=project_id,
|
|
264
|
+
image_path=img_path,
|
|
265
|
+
robot_name=library.robot_name,
|
|
266
|
+
)
|
|
267
|
+
image_artifact_ids.append(artifact_id)
|
|
268
|
+
result["artifacts"].append({
|
|
269
|
+
"id": artifact_id,
|
|
270
|
+
"stage": "raw",
|
|
271
|
+
"name": img_path.stem,
|
|
272
|
+
})
|
|
273
|
+
except Exception as e:
|
|
274
|
+
print(f"Warning: Failed to upload {img_path.name}: {e}")
|
|
275
|
+
|
|
276
|
+
# 2. Upload calibration as processed artifact
|
|
277
|
+
calibration_artifact_id = self._upload_calibration_artifact(
|
|
278
|
+
project_id=project_id,
|
|
279
|
+
calibration=calibration,
|
|
280
|
+
parent_ids=image_artifact_ids[:5] if image_artifact_ids else None, # Link to some images
|
|
281
|
+
)
|
|
282
|
+
result["artifacts"].append({
|
|
283
|
+
"id": calibration_artifact_id,
|
|
284
|
+
"stage": "processed",
|
|
285
|
+
"name": f"{library.robot_name}_calibration",
|
|
286
|
+
})
|
|
287
|
+
|
|
288
|
+
# 3. Upload poses as labeled artifacts
|
|
289
|
+
pose_artifact_ids = []
|
|
290
|
+
for pose_name, pose in calibration.poses.items():
|
|
291
|
+
pose_artifact_id = self._upload_pose_artifact(
|
|
292
|
+
project_id=project_id,
|
|
293
|
+
pose_name=pose_name,
|
|
294
|
+
pose=pose,
|
|
295
|
+
calibration=calibration,
|
|
296
|
+
parent_id=calibration_artifact_id,
|
|
297
|
+
)
|
|
298
|
+
pose_artifact_ids.append(pose_artifact_id)
|
|
299
|
+
result["artifacts"].append({
|
|
300
|
+
"id": pose_artifact_id,
|
|
301
|
+
"stage": "labeled",
|
|
302
|
+
"name": pose_name,
|
|
303
|
+
})
|
|
304
|
+
|
|
305
|
+
# 4. Upload skills as skill artifacts
|
|
306
|
+
for action_name, action in library.actions.items():
|
|
307
|
+
skill_artifact_id = self._upload_skill_artifact(
|
|
308
|
+
project_id=project_id,
|
|
309
|
+
action_name=action_name,
|
|
310
|
+
action=action,
|
|
311
|
+
library=library,
|
|
312
|
+
calibration=calibration,
|
|
313
|
+
trained_on=pose_artifact_ids, # Skills trained on poses
|
|
314
|
+
)
|
|
315
|
+
result["artifacts"].append({
|
|
316
|
+
"id": skill_artifact_id,
|
|
317
|
+
"stage": "skill",
|
|
318
|
+
"name": action_name,
|
|
319
|
+
})
|
|
320
|
+
|
|
321
|
+
return result
|
|
322
|
+
|
|
323
|
+
def _upload_image_artifact(
|
|
324
|
+
self,
|
|
325
|
+
project_id: str,
|
|
326
|
+
image_path: Path,
|
|
327
|
+
robot_name: str,
|
|
328
|
+
) -> str:
|
|
329
|
+
"""Upload an image as a raw artifact."""
|
|
330
|
+
# Read and encode image
|
|
331
|
+
with open(image_path, "rb") as f:
|
|
332
|
+
image_data = base64.b64encode(f.read()).decode("utf-8")
|
|
333
|
+
|
|
334
|
+
response = self._request("POST", "/artifacts", json={
|
|
335
|
+
"projectId": project_id,
|
|
336
|
+
"name": image_path.stem,
|
|
337
|
+
"stage": "raw",
|
|
338
|
+
"type": "dataset",
|
|
339
|
+
"metadata": {
|
|
340
|
+
"robot_name": robot_name,
|
|
341
|
+
"image_type": "pose_capture",
|
|
342
|
+
"filename": image_path.name,
|
|
343
|
+
"format": "jpeg",
|
|
344
|
+
"source": "visual_labeler",
|
|
345
|
+
"captured_at": datetime.now().isoformat(),
|
|
346
|
+
},
|
|
347
|
+
})
|
|
348
|
+
return response.get("artifact", {}).get("id")
|
|
349
|
+
|
|
350
|
+
def _upload_calibration_artifact(
|
|
351
|
+
self,
|
|
352
|
+
project_id: str,
|
|
353
|
+
calibration: RobotCalibration,
|
|
354
|
+
parent_ids: Optional[List[str]] = None,
|
|
355
|
+
) -> str:
|
|
356
|
+
"""Upload servo calibration as a processed artifact."""
|
|
357
|
+
# Serialize calibration
|
|
358
|
+
calibration_data = {
|
|
359
|
+
"robot_model": calibration.robot_model,
|
|
360
|
+
"robot_name": calibration.robot_name,
|
|
361
|
+
"serial_port": calibration.serial_port,
|
|
362
|
+
"baud_rate": calibration.baud_rate,
|
|
363
|
+
"camera_url": calibration.camera_url,
|
|
364
|
+
"servos": {
|
|
365
|
+
str(sid): {
|
|
366
|
+
"servo_id": s.servo_id,
|
|
367
|
+
"name": s.name,
|
|
368
|
+
"joint_type": s.joint_type.value,
|
|
369
|
+
"min_value": s.min_value,
|
|
370
|
+
"max_value": s.max_value,
|
|
371
|
+
"center_value": s.center_value,
|
|
372
|
+
"positions": s.positions,
|
|
373
|
+
}
|
|
374
|
+
for sid, s in calibration.servos.items()
|
|
375
|
+
},
|
|
376
|
+
"calibrated_at": calibration.calibrated_at,
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
response = self._request("POST", "/artifacts", json={
|
|
380
|
+
"projectId": project_id,
|
|
381
|
+
"name": f"{calibration.robot_name}_calibration",
|
|
382
|
+
"stage": "processed",
|
|
383
|
+
"type": "dataset",
|
|
384
|
+
"metadata": {
|
|
385
|
+
"robot_model": calibration.robot_model,
|
|
386
|
+
"robot_name": calibration.robot_name,
|
|
387
|
+
"servo_count": len(calibration.servos),
|
|
388
|
+
"pose_count": len(calibration.poses),
|
|
389
|
+
"calibration_data": calibration_data,
|
|
390
|
+
"source": "visual_labeler",
|
|
391
|
+
},
|
|
392
|
+
})
|
|
393
|
+
return response.get("artifact", {}).get("id")
|
|
394
|
+
|
|
395
|
+
def _upload_pose_artifact(
|
|
396
|
+
self,
|
|
397
|
+
project_id: str,
|
|
398
|
+
pose_name: str,
|
|
399
|
+
pose,
|
|
400
|
+
calibration: RobotCalibration,
|
|
401
|
+
parent_id: str,
|
|
402
|
+
) -> str:
|
|
403
|
+
"""Upload a named pose as a labeled artifact."""
|
|
404
|
+
# Build pose data with semantic labels
|
|
405
|
+
servo_labels = {}
|
|
406
|
+
for sid, value in pose.servo_positions.items():
|
|
407
|
+
servo_cal = calibration.servos.get(sid)
|
|
408
|
+
if servo_cal:
|
|
409
|
+
servo_labels[str(sid)] = {
|
|
410
|
+
"name": servo_cal.name,
|
|
411
|
+
"joint_type": servo_cal.joint_type.value,
|
|
412
|
+
"value": value,
|
|
413
|
+
"normalized": (value - servo_cal.min_value) / max(1, servo_cal.max_value - servo_cal.min_value),
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
response = self._request("POST", "/artifacts", json={
|
|
417
|
+
"projectId": project_id,
|
|
418
|
+
"name": pose_name,
|
|
419
|
+
"stage": "labeled",
|
|
420
|
+
"type": "dataset",
|
|
421
|
+
"parentArtifactId": parent_id,
|
|
422
|
+
"transformationType": "labeling",
|
|
423
|
+
"transformationNotes": f"Pose '{pose_name}' labeled from calibration",
|
|
424
|
+
"metadata": {
|
|
425
|
+
"robot_name": calibration.robot_name,
|
|
426
|
+
"pose_name": pose_name,
|
|
427
|
+
"description": pose.description,
|
|
428
|
+
"servo_positions": pose.servo_positions,
|
|
429
|
+
"servo_labels": servo_labels,
|
|
430
|
+
"transition_time_ms": pose.transition_time_ms,
|
|
431
|
+
"image_path": pose.image_path,
|
|
432
|
+
"source": "visual_labeler",
|
|
433
|
+
},
|
|
434
|
+
})
|
|
435
|
+
return response.get("artifact", {}).get("id")
|
|
436
|
+
|
|
437
|
+
def _upload_skill_artifact(
|
|
438
|
+
self,
|
|
439
|
+
project_id: str,
|
|
440
|
+
action_name: str,
|
|
441
|
+
action,
|
|
442
|
+
library: SkillLibrary,
|
|
443
|
+
calibration: RobotCalibration,
|
|
444
|
+
trained_on: List[str],
|
|
445
|
+
) -> str:
|
|
446
|
+
"""Upload a generated skill as a skill artifact."""
|
|
447
|
+
# Generate skill code
|
|
448
|
+
from .visual_labeler import DualCameraLabeler
|
|
449
|
+
labeler = DualCameraLabeler(
|
|
450
|
+
serial_port=calibration.serial_port or "",
|
|
451
|
+
robot_name=library.robot_name,
|
|
452
|
+
robot_model=library.robot_model,
|
|
453
|
+
)
|
|
454
|
+
labeler.calibrator.calibration = calibration
|
|
455
|
+
skill_code = labeler.generate_skill_code(action)
|
|
456
|
+
|
|
457
|
+
# Determine skill type based on action structure
|
|
458
|
+
skill_type = "primitive" # Default
|
|
459
|
+
dependencies = []
|
|
460
|
+
hardware_requirements = []
|
|
461
|
+
|
|
462
|
+
# Infer hardware requirements from servo usage
|
|
463
|
+
servo_ids = set()
|
|
464
|
+
for step in action.steps:
|
|
465
|
+
if hasattr(step, 'pose_name') and step.pose_name:
|
|
466
|
+
pose = calibration.poses.get(step.pose_name)
|
|
467
|
+
if pose:
|
|
468
|
+
servo_ids.update(pose.servo_positions.keys())
|
|
469
|
+
|
|
470
|
+
# Map servo IDs to hardware requirements
|
|
471
|
+
for sid in servo_ids:
|
|
472
|
+
servo = calibration.servos.get(sid)
|
|
473
|
+
if servo:
|
|
474
|
+
if "gripper" in servo.name.lower():
|
|
475
|
+
if "gripper" not in hardware_requirements:
|
|
476
|
+
hardware_requirements.append("gripper")
|
|
477
|
+
elif "arm" in servo.name.lower() or "shoulder" in servo.name.lower() or "elbow" in servo.name.lower():
|
|
478
|
+
if "arm" not in hardware_requirements:
|
|
479
|
+
hardware_requirements.append("arm")
|
|
480
|
+
elif "leg" in servo.name.lower() or "hip" in servo.name.lower() or "thigh" in servo.name.lower():
|
|
481
|
+
if "legs" not in hardware_requirements:
|
|
482
|
+
hardware_requirements.append("legs")
|
|
483
|
+
|
|
484
|
+
# Determine skill type based on complexity
|
|
485
|
+
if len(action.steps) == 1:
|
|
486
|
+
skill_type = "primitive"
|
|
487
|
+
elif len(action.steps) <= 5:
|
|
488
|
+
skill_type = "compound"
|
|
489
|
+
else:
|
|
490
|
+
skill_type = "sequence"
|
|
491
|
+
|
|
492
|
+
# If action has perception tags, it's a behavior
|
|
493
|
+
if action.tags:
|
|
494
|
+
if any(t in ["perception", "detection", "visual", "feedback"] for t in action.tags):
|
|
495
|
+
skill_type = "behavior"
|
|
496
|
+
if "camera" not in hardware_requirements:
|
|
497
|
+
hardware_requirements.append("camera")
|
|
498
|
+
|
|
499
|
+
response = self._request("POST", "/artifacts", json={
|
|
500
|
+
"projectId": project_id,
|
|
501
|
+
"name": action_name,
|
|
502
|
+
"stage": "skill",
|
|
503
|
+
"type": "code",
|
|
504
|
+
"trainedOn": trained_on,
|
|
505
|
+
"trainingNotes": f"Skill '{action_name}' generated from {len(action.steps)} poses",
|
|
506
|
+
"metadata": {
|
|
507
|
+
"robot_model": library.robot_model,
|
|
508
|
+
"robot_name": library.robot_name,
|
|
509
|
+
"action_type": action.action_type.value,
|
|
510
|
+
"description": action.description,
|
|
511
|
+
"steps": [s.to_dict() for s in action.steps],
|
|
512
|
+
"skill_code": skill_code,
|
|
513
|
+
"tags": action.tags,
|
|
514
|
+
"source": "visual_labeler",
|
|
515
|
+
"generated_at": datetime.now().isoformat(),
|
|
516
|
+
# New skill hierarchy fields
|
|
517
|
+
"skill_type": skill_type, # "primitive", "compound", "behavior"
|
|
518
|
+
"dependencies": dependencies, # List of skill names this depends on
|
|
519
|
+
"hardware_requirements": hardware_requirements, # ["arm", "gripper", "legs", "camera"]
|
|
520
|
+
"servo_count": len(servo_ids),
|
|
521
|
+
},
|
|
522
|
+
})
|
|
523
|
+
return response.get("artifact", {}).get("id")
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
def upload_primitives(
|
|
527
|
+
robot_name: str,
|
|
528
|
+
project_id: Optional[str] = None,
|
|
529
|
+
api_key: Optional[str] = None,
|
|
530
|
+
) -> Dict[str, Any]:
|
|
531
|
+
"""
|
|
532
|
+
Upload programmatic primitives from PrimitiveLibrary.
|
|
533
|
+
|
|
534
|
+
These are the cleanly-generated skills from primitives.py with proper
|
|
535
|
+
skill_type, dependencies, and hardware_requirements.
|
|
536
|
+
|
|
537
|
+
Args:
|
|
538
|
+
robot_name: Name of the robot
|
|
539
|
+
project_id: Optional project ID
|
|
540
|
+
api_key: Optional API key
|
|
541
|
+
|
|
542
|
+
Returns:
|
|
543
|
+
Dict with upload results
|
|
544
|
+
"""
|
|
545
|
+
from .primitives import PrimitiveLibrary
|
|
546
|
+
|
|
547
|
+
uploader = SkillLibraryUploader(api_key=api_key)
|
|
548
|
+
|
|
549
|
+
# Get or create project
|
|
550
|
+
if not project_id:
|
|
551
|
+
project_id = uploader.get_or_create_project(
|
|
552
|
+
f"{robot_name}_primitives",
|
|
553
|
+
f"Primitive skill library for {robot_name}",
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
result = {
|
|
557
|
+
"project_id": project_id,
|
|
558
|
+
"artifacts": [],
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
# Create primitive library (without robot for now)
|
|
562
|
+
lib = PrimitiveLibrary(robot_interface=None)
|
|
563
|
+
|
|
564
|
+
# Upload primitives
|
|
565
|
+
for name, prim in lib.primitives.items():
|
|
566
|
+
response = uploader._request("POST", "/artifacts", json={
|
|
567
|
+
"projectId": project_id,
|
|
568
|
+
"name": name,
|
|
569
|
+
"stage": "skill",
|
|
570
|
+
"type": "code",
|
|
571
|
+
"metadata": {
|
|
572
|
+
"robot_name": robot_name,
|
|
573
|
+
"skill_type": "primitive",
|
|
574
|
+
"description": prim.description,
|
|
575
|
+
"servo_targets": {str(k): v for k, v in prim.servo_targets.items()},
|
|
576
|
+
"duration_ms": prim.duration_ms,
|
|
577
|
+
"hardware_requirements": [r.value for r in prim.hardware],
|
|
578
|
+
"dependencies": [],
|
|
579
|
+
"source": "primitives_library",
|
|
580
|
+
},
|
|
581
|
+
})
|
|
582
|
+
result["artifacts"].append({
|
|
583
|
+
"id": response.get("artifact", {}).get("id"),
|
|
584
|
+
"stage": "skill",
|
|
585
|
+
"name": name,
|
|
586
|
+
"skill_type": "primitive",
|
|
587
|
+
})
|
|
588
|
+
|
|
589
|
+
# Upload compound skills
|
|
590
|
+
for name, compound in lib.compounds.items():
|
|
591
|
+
response = uploader._request("POST", "/artifacts", json={
|
|
592
|
+
"projectId": project_id,
|
|
593
|
+
"name": name,
|
|
594
|
+
"stage": "skill",
|
|
595
|
+
"type": "code",
|
|
596
|
+
"metadata": {
|
|
597
|
+
"robot_name": robot_name,
|
|
598
|
+
"skill_type": "compound",
|
|
599
|
+
"description": compound.description,
|
|
600
|
+
"steps": compound.steps,
|
|
601
|
+
"hardware_requirements": [r.value for r in compound.hardware],
|
|
602
|
+
"dependencies": compound.steps, # Each step is a dependency
|
|
603
|
+
"source": "primitives_library",
|
|
604
|
+
},
|
|
605
|
+
})
|
|
606
|
+
result["artifacts"].append({
|
|
607
|
+
"id": response.get("artifact", {}).get("id"),
|
|
608
|
+
"stage": "skill",
|
|
609
|
+
"name": name,
|
|
610
|
+
"skill_type": "compound",
|
|
611
|
+
})
|
|
612
|
+
|
|
613
|
+
# Upload behaviors
|
|
614
|
+
for name, behavior in lib.behaviors.items():
|
|
615
|
+
response = uploader._request("POST", "/artifacts", json={
|
|
616
|
+
"projectId": project_id,
|
|
617
|
+
"name": name,
|
|
618
|
+
"stage": "skill",
|
|
619
|
+
"type": "code",
|
|
620
|
+
"metadata": {
|
|
621
|
+
"robot_name": robot_name,
|
|
622
|
+
"skill_type": "behavior",
|
|
623
|
+
"description": behavior.description,
|
|
624
|
+
"steps": behavior.steps,
|
|
625
|
+
"hardware_requirements": [r.value for r in behavior.hardware],
|
|
626
|
+
"dependencies": [], # Behaviors infer deps from steps
|
|
627
|
+
"requires_perception": True,
|
|
628
|
+
"source": "primitives_library",
|
|
629
|
+
},
|
|
630
|
+
})
|
|
631
|
+
result["artifacts"].append({
|
|
632
|
+
"id": response.get("artifact", {}).get("id"),
|
|
633
|
+
"stage": "skill",
|
|
634
|
+
"name": name,
|
|
635
|
+
"skill_type": "behavior",
|
|
636
|
+
})
|
|
637
|
+
|
|
638
|
+
return result
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
def upload_skill_library(
|
|
642
|
+
robot_name: str,
|
|
643
|
+
project_id: Optional[str] = None,
|
|
644
|
+
include_images: bool = True,
|
|
645
|
+
api_key: Optional[str] = None,
|
|
646
|
+
) -> Dict[str, Any]:
|
|
647
|
+
"""
|
|
648
|
+
Convenience function to upload a skill library.
|
|
649
|
+
|
|
650
|
+
Args:
|
|
651
|
+
robot_name: Name of the robot (matches calibration/library filenames)
|
|
652
|
+
project_id: Optional project ID
|
|
653
|
+
include_images: Whether to upload pose images
|
|
654
|
+
api_key: Optional API key
|
|
655
|
+
|
|
656
|
+
Returns:
|
|
657
|
+
Dict with upload results
|
|
658
|
+
"""
|
|
659
|
+
# Load calibration
|
|
660
|
+
calibration = load_calibration(robot_name)
|
|
661
|
+
if not calibration:
|
|
662
|
+
raise ValueError(f"No calibration found for: {robot_name}")
|
|
663
|
+
|
|
664
|
+
# Load skill library
|
|
665
|
+
library = load_skill_library(robot_name)
|
|
666
|
+
if not library:
|
|
667
|
+
raise ValueError(f"No skill library found for: {robot_name}")
|
|
668
|
+
|
|
669
|
+
uploader = SkillLibraryUploader(api_key=api_key)
|
|
670
|
+
return uploader.upload_skill_library(
|
|
671
|
+
library=library,
|
|
672
|
+
calibration=calibration,
|
|
673
|
+
project_id=project_id,
|
|
674
|
+
include_images=include_images,
|
|
675
|
+
)
|