foodforthought-cli 0.2.8__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +12 -0
- ate/behaviors/approach.py +399 -0
- ate/cli.py +855 -4551
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +18 -6
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +360 -24
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +16 -0
- ate/interfaces/base.py +2 -0
- ate/interfaces/sensors.py +247 -0
- ate/llm_proxy.py +239 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +42 -3
- ate/recording/session.py +12 -2
- ate/recording/visual.py +416 -0
- ate/robot/__init__.py +142 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +88 -3
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +143 -11
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +104 -2
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +6 -0
- ate/robot/registry.py +5 -2
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +285 -3
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +9 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +1 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.8.dist-info/RECORD +0 -73
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
ate/urdf/cloud.py
ADDED
|
@@ -0,0 +1,491 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Cloud client for URDF scan processing.
|
|
3
|
+
|
|
4
|
+
This module provides the client-side logic for uploading videos to the
|
|
5
|
+
cloud GPU pipeline and retrieving the generated URDF artifacts.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import time
|
|
10
|
+
import sys
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Optional, Dict, Any
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
|
|
15
|
+
import requests
|
|
16
|
+
|
|
17
|
+
# Import the shared client for authentication
|
|
18
|
+
from ..client import ATEClient, BASE_URL
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class CloudJobResult:
|
|
23
|
+
"""Result from a cloud scan job."""
|
|
24
|
+
success: bool
|
|
25
|
+
job_id: str
|
|
26
|
+
artifact_url: Optional[str] = None
|
|
27
|
+
error: Optional[str] = None
|
|
28
|
+
link_count: int = 0
|
|
29
|
+
joint_count: int = 0
|
|
30
|
+
processing_time_seconds: float = 0.0
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class CloudScanClient:
|
|
34
|
+
"""Client for cloud-based URDF scan processing."""
|
|
35
|
+
|
|
36
|
+
def __init__(self, base_url: str = BASE_URL):
|
|
37
|
+
"""Initialize the cloud scan client."""
|
|
38
|
+
self.client = ATEClient(base_url=base_url)
|
|
39
|
+
self.base_url = base_url
|
|
40
|
+
|
|
41
|
+
def check_credits(self) -> Dict[str, Any]:
|
|
42
|
+
"""
|
|
43
|
+
Check the user's credit balance.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Dict with 'balance' key, or 'error' if failed
|
|
47
|
+
"""
|
|
48
|
+
try:
|
|
49
|
+
response = self.client.get("/scan/jobs")
|
|
50
|
+
return response.get("credits", {"balance": 0})
|
|
51
|
+
except Exception as e:
|
|
52
|
+
return {"error": str(e), "balance": 0}
|
|
53
|
+
|
|
54
|
+
def list_jobs(self) -> Dict[str, Any]:
|
|
55
|
+
"""
|
|
56
|
+
List all scan jobs for the authenticated user.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Dict with 'jobs' list and 'credits' info
|
|
60
|
+
"""
|
|
61
|
+
try:
|
|
62
|
+
response = self.client.get("/scan/jobs")
|
|
63
|
+
return response
|
|
64
|
+
except Exception as e:
|
|
65
|
+
return {"error": str(e), "jobs": []}
|
|
66
|
+
|
|
67
|
+
def get_job(self, job_id: str) -> Dict[str, Any]:
|
|
68
|
+
"""
|
|
69
|
+
Get details for a specific scan job.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
job_id: The job ID to retrieve
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
Dict with job details or error
|
|
76
|
+
"""
|
|
77
|
+
try:
|
|
78
|
+
response = self.client.get(f"/scan/jobs/{job_id}")
|
|
79
|
+
return response
|
|
80
|
+
except Exception as e:
|
|
81
|
+
return {"error": str(e)}
|
|
82
|
+
|
|
83
|
+
def create_job(
|
|
84
|
+
self,
|
|
85
|
+
robot_name: Optional[str] = None,
|
|
86
|
+
scale_ref: Optional[str] = None,
|
|
87
|
+
) -> tuple[str, str, str, Dict[str, Any]]:
|
|
88
|
+
"""
|
|
89
|
+
Create a new cloud scan job.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
robot_name: Optional robot name for the URDF
|
|
93
|
+
scale_ref: Optional scale reference (e.g., "gripper:85mm")
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
Tuple of (job_id, upload_url, blob_token, credits_info)
|
|
97
|
+
|
|
98
|
+
Raises:
|
|
99
|
+
RuntimeError: If job creation fails
|
|
100
|
+
"""
|
|
101
|
+
data = {}
|
|
102
|
+
if robot_name:
|
|
103
|
+
data["robotName"] = robot_name
|
|
104
|
+
if scale_ref:
|
|
105
|
+
data["scaleRef"] = scale_ref
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
response = self.client.post("/scan/jobs", data=data)
|
|
109
|
+
except SystemExit:
|
|
110
|
+
raise RuntimeError("Failed to create job. Please check your authentication.")
|
|
111
|
+
|
|
112
|
+
if "error" in response:
|
|
113
|
+
raise RuntimeError(response["error"])
|
|
114
|
+
|
|
115
|
+
job_id = response.get("jobId")
|
|
116
|
+
upload_url = response.get("uploadUrl") # Server returns full upload URL
|
|
117
|
+
blob_token = response.get("blobToken") # Server returns blob token for auth
|
|
118
|
+
credits_info = response.get("credits", {})
|
|
119
|
+
|
|
120
|
+
if not job_id or not upload_url or not blob_token:
|
|
121
|
+
raise RuntimeError(f"Invalid response from server: {response}")
|
|
122
|
+
|
|
123
|
+
return job_id, upload_url, blob_token, credits_info
|
|
124
|
+
|
|
125
|
+
def upload_video(
|
|
126
|
+
self,
|
|
127
|
+
video_path: Path,
|
|
128
|
+
upload_url: str,
|
|
129
|
+
blob_token: str,
|
|
130
|
+
progress_callback: Optional[callable] = None,
|
|
131
|
+
) -> bool:
|
|
132
|
+
"""
|
|
133
|
+
Upload a video file to Vercel Blob storage.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
video_path: Path to the video file
|
|
137
|
+
upload_url: Full URL for upload (from create_job)
|
|
138
|
+
blob_token: Authentication token for blob upload (from create_job)
|
|
139
|
+
progress_callback: Optional callback(bytes_sent, total_bytes)
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
True if upload succeeded
|
|
143
|
+
|
|
144
|
+
Raises:
|
|
145
|
+
RuntimeError: If upload fails
|
|
146
|
+
"""
|
|
147
|
+
if not video_path.exists():
|
|
148
|
+
raise RuntimeError(f"Video file not found: {video_path}")
|
|
149
|
+
|
|
150
|
+
file_size = video_path.stat().st_size
|
|
151
|
+
|
|
152
|
+
with open(video_path, "rb") as f:
|
|
153
|
+
try:
|
|
154
|
+
response = requests.put(
|
|
155
|
+
upload_url,
|
|
156
|
+
data=f.read(),
|
|
157
|
+
headers={
|
|
158
|
+
"Authorization": f"Bearer {blob_token}",
|
|
159
|
+
"Content-Type": "video/mp4",
|
|
160
|
+
"x-api-version": "7",
|
|
161
|
+
},
|
|
162
|
+
timeout=600, # 10 minute timeout for large files
|
|
163
|
+
)
|
|
164
|
+
response.raise_for_status()
|
|
165
|
+
|
|
166
|
+
# Extract the blob URL from response
|
|
167
|
+
result = response.json()
|
|
168
|
+
blob_url = result.get("url")
|
|
169
|
+
return blob_url # Return the full public URL
|
|
170
|
+
except requests.exceptions.RequestException as e:
|
|
171
|
+
raise RuntimeError(f"Upload failed: {e}")
|
|
172
|
+
|
|
173
|
+
def update_video_url(self, job_id: str, video_url: str) -> bool:
|
|
174
|
+
"""
|
|
175
|
+
Update the job's video URL after upload completes.
|
|
176
|
+
|
|
177
|
+
The Vercel Blob upload returns the full public URL, which we need
|
|
178
|
+
to store in the job so the trigger route can use it for download.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
job_id: The job ID
|
|
182
|
+
video_url: The full Vercel Blob public URL
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
True if update succeeded
|
|
186
|
+
|
|
187
|
+
Raises:
|
|
188
|
+
RuntimeError: If update fails
|
|
189
|
+
"""
|
|
190
|
+
try:
|
|
191
|
+
response = self.client.put(f"/scan/jobs/{job_id}", data={"videoUrl": video_url})
|
|
192
|
+
except SystemExit:
|
|
193
|
+
raise RuntimeError("Failed to update video URL. Please check your authentication.")
|
|
194
|
+
|
|
195
|
+
if "error" in response:
|
|
196
|
+
raise RuntimeError(f"Failed to update video URL: {response['error']}")
|
|
197
|
+
|
|
198
|
+
return True
|
|
199
|
+
|
|
200
|
+
def start_processing(self, job_id: str) -> bool:
|
|
201
|
+
"""
|
|
202
|
+
Signal the server to start GPU processing after upload is complete.
|
|
203
|
+
|
|
204
|
+
This calls the trigger endpoint which initiates the Modal GPU worker.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
job_id: The job ID
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
True if processing started
|
|
211
|
+
|
|
212
|
+
Raises:
|
|
213
|
+
RuntimeError: If trigger fails
|
|
214
|
+
"""
|
|
215
|
+
try:
|
|
216
|
+
response = self.client.post(f"/scan/jobs/{job_id}/trigger")
|
|
217
|
+
except SystemExit:
|
|
218
|
+
raise RuntimeError("Failed to trigger processing. Please check your authentication.")
|
|
219
|
+
|
|
220
|
+
if "error" in response:
|
|
221
|
+
raise RuntimeError(f"Failed to start processing: {response['error']}")
|
|
222
|
+
|
|
223
|
+
return True
|
|
224
|
+
|
|
225
|
+
def poll_job(
|
|
226
|
+
self,
|
|
227
|
+
job_id: str,
|
|
228
|
+
timeout_seconds: int = 600,
|
|
229
|
+
poll_interval: float = 5.0,
|
|
230
|
+
status_callback: Optional[callable] = None,
|
|
231
|
+
) -> CloudJobResult:
|
|
232
|
+
"""
|
|
233
|
+
Poll a job until completion or timeout.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
job_id: The job ID to poll
|
|
237
|
+
timeout_seconds: Maximum time to wait
|
|
238
|
+
poll_interval: Seconds between polls
|
|
239
|
+
status_callback: Optional callback(status_string)
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
CloudJobResult with the final status
|
|
243
|
+
"""
|
|
244
|
+
start_time = time.time()
|
|
245
|
+
|
|
246
|
+
while time.time() - start_time < timeout_seconds:
|
|
247
|
+
try:
|
|
248
|
+
response = self.client.get(f"/scan/jobs/{job_id}")
|
|
249
|
+
except SystemExit:
|
|
250
|
+
return CloudJobResult(
|
|
251
|
+
success=False,
|
|
252
|
+
job_id=job_id,
|
|
253
|
+
error="Failed to poll job status",
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
status = response.get("status", "unknown")
|
|
257
|
+
|
|
258
|
+
if status_callback:
|
|
259
|
+
status_callback(status)
|
|
260
|
+
|
|
261
|
+
if status == "completed":
|
|
262
|
+
return CloudJobResult(
|
|
263
|
+
success=True,
|
|
264
|
+
job_id=job_id,
|
|
265
|
+
artifact_url=response.get("artifactUrl"),
|
|
266
|
+
link_count=response.get("linkCount", 0),
|
|
267
|
+
joint_count=response.get("jointCount", 0),
|
|
268
|
+
processing_time_seconds=response.get("processingTimeSeconds", 0),
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
if status == "failed":
|
|
272
|
+
return CloudJobResult(
|
|
273
|
+
success=False,
|
|
274
|
+
job_id=job_id,
|
|
275
|
+
error=response.get("error", "Processing failed"),
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
time.sleep(poll_interval)
|
|
279
|
+
|
|
280
|
+
return CloudJobResult(
|
|
281
|
+
success=False,
|
|
282
|
+
job_id=job_id,
|
|
283
|
+
error=f"Timeout after {timeout_seconds} seconds",
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
def download_artifact(
|
|
287
|
+
self,
|
|
288
|
+
artifact_url: str,
|
|
289
|
+
output_dir: Path,
|
|
290
|
+
progress_callback: Optional[callable] = None,
|
|
291
|
+
) -> Path:
|
|
292
|
+
"""
|
|
293
|
+
Download the generated URDF artifact.
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
artifact_url: URL to the artifact zip
|
|
297
|
+
output_dir: Directory to extract to
|
|
298
|
+
progress_callback: Optional callback(bytes_downloaded, total_bytes)
|
|
299
|
+
|
|
300
|
+
Returns:
|
|
301
|
+
Path to the extracted directory
|
|
302
|
+
|
|
303
|
+
Raises:
|
|
304
|
+
RuntimeError: If download or extraction fails
|
|
305
|
+
"""
|
|
306
|
+
import zipfile
|
|
307
|
+
import tempfile
|
|
308
|
+
|
|
309
|
+
output_dir = Path(output_dir)
|
|
310
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
311
|
+
|
|
312
|
+
# Download the artifact
|
|
313
|
+
try:
|
|
314
|
+
response = requests.get(artifact_url, stream=True, timeout=300)
|
|
315
|
+
response.raise_for_status()
|
|
316
|
+
except requests.exceptions.RequestException as e:
|
|
317
|
+
raise RuntimeError(f"Download failed: {e}")
|
|
318
|
+
|
|
319
|
+
total_size = int(response.headers.get("content-length", 0))
|
|
320
|
+
|
|
321
|
+
# Save to temp file
|
|
322
|
+
with tempfile.NamedTemporaryFile(suffix=".zip", delete=False) as tmp:
|
|
323
|
+
tmp_path = Path(tmp.name)
|
|
324
|
+
downloaded = 0
|
|
325
|
+
|
|
326
|
+
for chunk in response.iter_content(chunk_size=8192):
|
|
327
|
+
tmp.write(chunk)
|
|
328
|
+
downloaded += len(chunk)
|
|
329
|
+
if progress_callback and total_size:
|
|
330
|
+
progress_callback(downloaded, total_size)
|
|
331
|
+
|
|
332
|
+
# Extract the zip
|
|
333
|
+
try:
|
|
334
|
+
with zipfile.ZipFile(tmp_path, "r") as zf:
|
|
335
|
+
zf.extractall(output_dir)
|
|
336
|
+
except zipfile.BadZipFile as e:
|
|
337
|
+
tmp_path.unlink()
|
|
338
|
+
raise RuntimeError(f"Failed to extract artifact: {e}")
|
|
339
|
+
|
|
340
|
+
# Cleanup
|
|
341
|
+
tmp_path.unlink()
|
|
342
|
+
|
|
343
|
+
return output_dir
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
def run_cloud_pipeline(
|
|
347
|
+
video_path: Optional[str] = None,
|
|
348
|
+
output_dir: str = "./urdf_output",
|
|
349
|
+
robot_name: Optional[str] = None,
|
|
350
|
+
scale_ref: Optional[str] = None,
|
|
351
|
+
camera_id: int = 0,
|
|
352
|
+
) -> Path:
|
|
353
|
+
"""
|
|
354
|
+
Run the full cloud pipeline for URDF generation.
|
|
355
|
+
|
|
356
|
+
This is the main entry point for cloud-based scanning.
|
|
357
|
+
|
|
358
|
+
Args:
|
|
359
|
+
video_path: Path to existing video file (if None, will capture)
|
|
360
|
+
output_dir: Output directory for the URDF
|
|
361
|
+
robot_name: Optional robot name
|
|
362
|
+
scale_ref: Optional scale reference
|
|
363
|
+
camera_id: Camera ID for capture (if no video provided)
|
|
364
|
+
|
|
365
|
+
Returns:
|
|
366
|
+
Path to the generated URDF
|
|
367
|
+
|
|
368
|
+
Raises:
|
|
369
|
+
RuntimeError: If any step fails
|
|
370
|
+
"""
|
|
371
|
+
print("\n" + "#" * 60)
|
|
372
|
+
print("# CLOUD URDF GENERATION PIPELINE")
|
|
373
|
+
print("#" * 60)
|
|
374
|
+
|
|
375
|
+
client = CloudScanClient()
|
|
376
|
+
|
|
377
|
+
# Check credits first
|
|
378
|
+
credits = client.check_credits()
|
|
379
|
+
if "error" in credits:
|
|
380
|
+
raise RuntimeError(f"Failed to check credits: {credits['error']}")
|
|
381
|
+
|
|
382
|
+
balance = credits.get("balance", 0)
|
|
383
|
+
print(f"\nCredit balance: {balance}")
|
|
384
|
+
|
|
385
|
+
if balance < 5:
|
|
386
|
+
raise RuntimeError(
|
|
387
|
+
f"Insufficient credits ({balance}). Cloud scan requires 5 credits.\n"
|
|
388
|
+
"Visit https://kindly.fyi/dashboard/billing to purchase credits."
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
# Handle video capture if no video provided
|
|
392
|
+
if not video_path:
|
|
393
|
+
print("\nCapturing video for cloud processing...")
|
|
394
|
+
from .capture import run_capture_only
|
|
395
|
+
|
|
396
|
+
video_path = run_capture_only(
|
|
397
|
+
output_dir=output_dir,
|
|
398
|
+
camera_id=camera_id,
|
|
399
|
+
robot_name=robot_name,
|
|
400
|
+
)
|
|
401
|
+
print(f"Captured: {video_path}")
|
|
402
|
+
|
|
403
|
+
video_path = Path(video_path)
|
|
404
|
+
if not video_path.exists():
|
|
405
|
+
raise RuntimeError(f"Video file not found: {video_path}")
|
|
406
|
+
|
|
407
|
+
print(f"\nVideo: {video_path} ({video_path.stat().st_size / 1024 / 1024:.1f} MB)")
|
|
408
|
+
|
|
409
|
+
# Create job
|
|
410
|
+
print("\nCreating cloud job...")
|
|
411
|
+
job_id, upload_url, blob_token, credits_info = client.create_job(robot_name, scale_ref)
|
|
412
|
+
print(f"Job ID: {job_id}")
|
|
413
|
+
print(f"Cost: {credits_info.get('cost', 5)} credits")
|
|
414
|
+
|
|
415
|
+
# Upload video
|
|
416
|
+
print("\nUploading video...")
|
|
417
|
+
|
|
418
|
+
def upload_progress(sent, total):
|
|
419
|
+
pct = int(100 * sent / total)
|
|
420
|
+
bar_len = 30
|
|
421
|
+
filled = int(bar_len * sent / total)
|
|
422
|
+
bar = "=" * filled + "-" * (bar_len - filled)
|
|
423
|
+
print(f"\r [{bar}] {pct}% ({sent / 1024 / 1024:.1f} / {total / 1024 / 1024:.1f} MB)", end="", flush=True)
|
|
424
|
+
|
|
425
|
+
blob_url = client.upload_video(video_path, upload_url, blob_token, upload_progress)
|
|
426
|
+
print("\n Upload complete!")
|
|
427
|
+
|
|
428
|
+
# Update job with full blob URL for download
|
|
429
|
+
if blob_url:
|
|
430
|
+
client.update_video_url(job_id, blob_url)
|
|
431
|
+
print(f" Stored blob URL: {blob_url[:60]}...")
|
|
432
|
+
|
|
433
|
+
# Trigger GPU processing
|
|
434
|
+
print("\nStarting GPU processing...")
|
|
435
|
+
client.start_processing(job_id)
|
|
436
|
+
print(" Processing triggered on cloud GPU (A10G)")
|
|
437
|
+
|
|
438
|
+
# Poll for completion
|
|
439
|
+
print("\nWaiting for processing to complete...")
|
|
440
|
+
|
|
441
|
+
last_status = None
|
|
442
|
+
def status_callback(status):
|
|
443
|
+
nonlocal last_status
|
|
444
|
+
if status != last_status:
|
|
445
|
+
print(f" Status: {status}")
|
|
446
|
+
last_status = status
|
|
447
|
+
|
|
448
|
+
result = client.poll_job(job_id, status_callback=status_callback)
|
|
449
|
+
|
|
450
|
+
if not result.success:
|
|
451
|
+
raise RuntimeError(f"Cloud processing failed: {result.error}")
|
|
452
|
+
|
|
453
|
+
print(f"\nProcessing complete in {result.processing_time_seconds:.1f}s")
|
|
454
|
+
print(f" Links: {result.link_count}")
|
|
455
|
+
print(f" Joints: {result.joint_count}")
|
|
456
|
+
|
|
457
|
+
# Download artifact
|
|
458
|
+
print("\nDownloading artifact...")
|
|
459
|
+
output_path = Path(output_dir)
|
|
460
|
+
|
|
461
|
+
def download_progress(downloaded, total):
|
|
462
|
+
pct = int(100 * downloaded / total)
|
|
463
|
+
bar_len = 30
|
|
464
|
+
filled = int(bar_len * downloaded / total)
|
|
465
|
+
bar = "=" * filled + "-" * (bar_len - filled)
|
|
466
|
+
print(f"\r [{bar}] {pct}%", end="", flush=True)
|
|
467
|
+
|
|
468
|
+
client.download_artifact(result.artifact_url, output_path, download_progress)
|
|
469
|
+
print("\n Download complete!")
|
|
470
|
+
|
|
471
|
+
# Find the URDF file
|
|
472
|
+
urdf_files = list(output_path.glob("*.urdf"))
|
|
473
|
+
if urdf_files:
|
|
474
|
+
urdf_path = urdf_files[0]
|
|
475
|
+
else:
|
|
476
|
+
urdf_path = output_path / f"{robot_name or 'robot'}.urdf"
|
|
477
|
+
|
|
478
|
+
print("\n" + "#" * 60)
|
|
479
|
+
print("# CLOUD PIPELINE COMPLETE")
|
|
480
|
+
print("#" * 60)
|
|
481
|
+
print(f"\nOutput: {output_path}")
|
|
482
|
+
print(f"URDF: {urdf_path}")
|
|
483
|
+
|
|
484
|
+
return urdf_path
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
__all__ = [
|
|
488
|
+
"CloudScanClient",
|
|
489
|
+
"CloudJobResult",
|
|
490
|
+
"run_cloud_pipeline",
|
|
491
|
+
]
|