foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +100 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/behaviors/common.py +686 -0
  9. ate/behaviors/tree.py +454 -0
  10. ate/cli.py +855 -3995
  11. ate/client.py +90 -0
  12. ate/commands/__init__.py +168 -0
  13. ate/commands/auth.py +389 -0
  14. ate/commands/bridge.py +448 -0
  15. ate/commands/data.py +185 -0
  16. ate/commands/deps.py +111 -0
  17. ate/commands/generate.py +384 -0
  18. ate/commands/memory.py +907 -0
  19. ate/commands/parts.py +166 -0
  20. ate/commands/primitive.py +399 -0
  21. ate/commands/protocol.py +288 -0
  22. ate/commands/recording.py +524 -0
  23. ate/commands/repo.py +154 -0
  24. ate/commands/simulation.py +291 -0
  25. ate/commands/skill.py +303 -0
  26. ate/commands/skills.py +487 -0
  27. ate/commands/team.py +147 -0
  28. ate/commands/workflow.py +271 -0
  29. ate/detection/__init__.py +38 -0
  30. ate/detection/base.py +142 -0
  31. ate/detection/color_detector.py +399 -0
  32. ate/detection/trash_detector.py +322 -0
  33. ate/drivers/__init__.py +39 -0
  34. ate/drivers/ble_transport.py +405 -0
  35. ate/drivers/mechdog.py +942 -0
  36. ate/drivers/wifi_camera.py +477 -0
  37. ate/interfaces/__init__.py +187 -0
  38. ate/interfaces/base.py +273 -0
  39. ate/interfaces/body.py +267 -0
  40. ate/interfaces/detection.py +282 -0
  41. ate/interfaces/locomotion.py +422 -0
  42. ate/interfaces/manipulation.py +408 -0
  43. ate/interfaces/navigation.py +389 -0
  44. ate/interfaces/perception.py +362 -0
  45. ate/interfaces/sensors.py +247 -0
  46. ate/interfaces/types.py +371 -0
  47. ate/llm_proxy.py +239 -0
  48. ate/mcp_server.py +387 -0
  49. ate/memory/__init__.py +35 -0
  50. ate/memory/cloud.py +244 -0
  51. ate/memory/context.py +269 -0
  52. ate/memory/embeddings.py +184 -0
  53. ate/memory/export.py +26 -0
  54. ate/memory/merge.py +146 -0
  55. ate/memory/migrate/__init__.py +34 -0
  56. ate/memory/migrate/base.py +89 -0
  57. ate/memory/migrate/pipeline.py +189 -0
  58. ate/memory/migrate/sources/__init__.py +13 -0
  59. ate/memory/migrate/sources/chroma.py +170 -0
  60. ate/memory/migrate/sources/pinecone.py +120 -0
  61. ate/memory/migrate/sources/qdrant.py +110 -0
  62. ate/memory/migrate/sources/weaviate.py +160 -0
  63. ate/memory/reranker.py +353 -0
  64. ate/memory/search.py +26 -0
  65. ate/memory/store.py +548 -0
  66. ate/recording/__init__.py +83 -0
  67. ate/recording/demonstration.py +378 -0
  68. ate/recording/session.py +415 -0
  69. ate/recording/upload.py +304 -0
  70. ate/recording/visual.py +416 -0
  71. ate/recording/wrapper.py +95 -0
  72. ate/robot/__init__.py +221 -0
  73. ate/robot/agentic_servo.py +856 -0
  74. ate/robot/behaviors.py +493 -0
  75. ate/robot/ble_capture.py +1000 -0
  76. ate/robot/ble_enumerate.py +506 -0
  77. ate/robot/calibration.py +668 -0
  78. ate/robot/calibration_state.py +388 -0
  79. ate/robot/commands.py +3735 -0
  80. ate/robot/direction_calibration.py +554 -0
  81. ate/robot/discovery.py +441 -0
  82. ate/robot/introspection.py +330 -0
  83. ate/robot/llm_system_id.py +654 -0
  84. ate/robot/locomotion_calibration.py +508 -0
  85. ate/robot/manager.py +270 -0
  86. ate/robot/marker_generator.py +611 -0
  87. ate/robot/perception.py +502 -0
  88. ate/robot/primitives.py +614 -0
  89. ate/robot/profiles.py +281 -0
  90. ate/robot/registry.py +322 -0
  91. ate/robot/servo_mapper.py +1153 -0
  92. ate/robot/skill_upload.py +675 -0
  93. ate/robot/target_calibration.py +500 -0
  94. ate/robot/teach.py +515 -0
  95. ate/robot/types.py +242 -0
  96. ate/robot/visual_labeler.py +1048 -0
  97. ate/robot/visual_servo_loop.py +494 -0
  98. ate/robot/visual_servoing.py +570 -0
  99. ate/robot/visual_system_id.py +906 -0
  100. ate/transports/__init__.py +121 -0
  101. ate/transports/base.py +394 -0
  102. ate/transports/ble.py +405 -0
  103. ate/transports/hybrid.py +444 -0
  104. ate/transports/serial.py +345 -0
  105. ate/urdf/__init__.py +30 -0
  106. ate/urdf/capture.py +582 -0
  107. ate/urdf/cloud.py +491 -0
  108. ate/urdf/collision.py +271 -0
  109. ate/urdf/commands.py +708 -0
  110. ate/urdf/depth.py +360 -0
  111. ate/urdf/inertial.py +312 -0
  112. ate/urdf/kinematics.py +330 -0
  113. ate/urdf/lifting.py +415 -0
  114. ate/urdf/meshing.py +300 -0
  115. ate/urdf/models/__init__.py +110 -0
  116. ate/urdf/models/depth_anything.py +253 -0
  117. ate/urdf/models/sam2.py +324 -0
  118. ate/urdf/motion_analysis.py +396 -0
  119. ate/urdf/pipeline.py +468 -0
  120. ate/urdf/scale.py +256 -0
  121. ate/urdf/scan_session.py +411 -0
  122. ate/urdf/segmentation.py +299 -0
  123. ate/urdf/synthesis.py +319 -0
  124. ate/urdf/topology.py +336 -0
  125. ate/urdf/validation.py +371 -0
  126. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
  127. foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
  128. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
  129. foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
  130. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
  131. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
ate/urdf/commands.py ADDED
@@ -0,0 +1,708 @@
1
+ """
2
+ CLI command definitions for URDF scan pipeline.
3
+
4
+ Provides the following commands:
5
+ - ate urdf scan capture - Capture video and annotate links
6
+ - ate urdf scan segment - Generate segmented point clouds
7
+ - ate urdf scan optimize - Estimate joint parameters
8
+ - ate urdf scan mesh - Generate visual/collision meshes
9
+ - ate urdf scan synthesize - Generate final URDF
10
+ - ate urdf scan - Run full pipeline
11
+ - ate urdf scan status - Show session status
12
+ """
13
+
14
+ import sys
15
+ import argparse
16
+ from pathlib import Path
17
+ from typing import Optional
18
+
19
+
20
+ def _has_mps() -> bool:
21
+ """Check if Apple Metal Performance Shaders (MPS) is available."""
22
+ try:
23
+ import torch
24
+ return torch.backends.mps.is_available()
25
+ except (ImportError, AttributeError):
26
+ return False
27
+
28
+ def register_parser(subparsers):
29
+ """Register URDF scan commands with argparse."""
30
+ urdf_parser = subparsers.add_parser(
31
+ "urdf",
32
+ help="URDF generation and management",
33
+ formatter_class=argparse.RawDescriptionHelpFormatter,
34
+ )
35
+
36
+ urdf_subparsers = urdf_parser.add_subparsers(
37
+ dest="urdf_action",
38
+ help="URDF action"
39
+ )
40
+
41
+ # urdf scan - parent for scan subcommands
42
+ scan_parser = urdf_subparsers.add_parser(
43
+ "scan",
44
+ help="Markerless URDF generation from webcam video",
45
+ formatter_class=argparse.RawDescriptionHelpFormatter,
46
+ description="""
47
+ Markerless URDF Generation Pipeline
48
+
49
+ Generates simulation-ready URDF files from webcam video without
50
+ physical markers, using foundation vision models (SAM 2, Depth Anything V2).
51
+
52
+ QUICK START:
53
+ ate urdf scan --output ./my_robot/ --scale-ref "gripper:85mm"
54
+
55
+ STEP-BY-STEP:
56
+ ate urdf scan capture --output ./my_robot/
57
+ ate urdf scan segment ./my_robot/ --scale-ref "gripper:85mm"
58
+ ate urdf scan optimize ./my_robot/
59
+ ate urdf scan mesh ./my_robot/
60
+ ate urdf scan synthesize ./my_robot/ --name my_robot
61
+ """
62
+ )
63
+
64
+ scan_subparsers = scan_parser.add_subparsers(
65
+ dest="scan_action",
66
+ help="Scan action"
67
+ )
68
+
69
+ # Common arguments
70
+ def add_common_args(parser):
71
+ parser.add_argument(
72
+ "--device", choices=["cpu", "cuda"], default="cpu",
73
+ help="Compute device (default: cpu)"
74
+ )
75
+ parser.add_argument(
76
+ "--dry-run", action="store_true",
77
+ help="Preview operations without executing"
78
+ )
79
+
80
+ # urdf scan (full pipeline)
81
+ scan_parser.add_argument(
82
+ "-o", "--output", default="./urdf_output",
83
+ help="Output directory for scan session (default: ./urdf_output)"
84
+ )
85
+ scan_parser.add_argument(
86
+ "-n", "--name",
87
+ help="Robot name (default: directory name)"
88
+ )
89
+ scan_parser.add_argument(
90
+ "-s", "--scale-ref",
91
+ help="Scale reference (e.g., 'gripper:85mm', 'base_width:150mm')"
92
+ )
93
+ scan_parser.add_argument(
94
+ "-v", "--video",
95
+ help="Path to existing video (skip capture)"
96
+ )
97
+ scan_parser.add_argument(
98
+ "--camera", type=int, default=0,
99
+ help="Camera device ID (default: 0)"
100
+ )
101
+ scan_parser.add_argument(
102
+ "--density", type=float, default=1200.0,
103
+ help="Material density in kg/m^3 (default: 1200, plastic)"
104
+ )
105
+ scan_parser.add_argument(
106
+ "--upload", action="store_true",
107
+ help="Upload to FoodforThought platform after generation"
108
+ )
109
+ scan_parser.add_argument(
110
+ "--cloud", action="store_true",
111
+ help="Process scan on cloud GPU (uses credits, no local GPU required)"
112
+ )
113
+ scan_parser.add_argument(
114
+ "--local", action="store_true",
115
+ help="Force local processing (requires GPU with 8GB+ VRAM or Apple Silicon)"
116
+ )
117
+ add_common_args(scan_parser)
118
+
119
+ # urdf scan capture
120
+ capture_parser = scan_subparsers.add_parser(
121
+ "capture",
122
+ help="Capture video and annotate robot links"
123
+ )
124
+ capture_parser.add_argument(
125
+ "-o", "--output", required=True,
126
+ help="Output directory for scan session"
127
+ )
128
+ capture_parser.add_argument(
129
+ "-v", "--video",
130
+ help="Path to existing video (skip webcam capture)"
131
+ )
132
+ capture_parser.add_argument(
133
+ "--camera", type=int, default=0,
134
+ help="Camera device ID (default: 0)"
135
+ )
136
+ capture_parser.add_argument(
137
+ "-n", "--name",
138
+ help="Robot name"
139
+ )
140
+ capture_parser.add_argument(
141
+ "-s", "--scale-ref",
142
+ help="Scale reference"
143
+ )
144
+ add_common_args(capture_parser)
145
+
146
+ # urdf scan segment
147
+ segment_parser = scan_subparsers.add_parser(
148
+ "segment",
149
+ help="Generate segmented point clouds from video"
150
+ )
151
+ segment_parser.add_argument(
152
+ "session_dir",
153
+ help="Path to scan session directory"
154
+ )
155
+ segment_parser.add_argument(
156
+ "-s", "--scale-ref",
157
+ help="Scale reference (e.g., 'gripper:85mm')"
158
+ )
159
+ segment_parser.add_argument(
160
+ "--frame-skip", type=int, default=1,
161
+ help="Process every Nth frame (default: 1)"
162
+ )
163
+ segment_parser.add_argument(
164
+ "--voxel-size", type=float, default=0.005,
165
+ help="Voxel size for point cloud downsampling (meters, default: 0.005)"
166
+ )
167
+ add_common_args(segment_parser)
168
+
169
+ # urdf scan optimize
170
+ optimize_parser = scan_subparsers.add_parser(
171
+ "optimize",
172
+ help="Estimate kinematic joint parameters"
173
+ )
174
+ optimize_parser.add_argument(
175
+ "session_dir",
176
+ help="Path to scan session directory"
177
+ )
178
+ optimize_parser.add_argument(
179
+ "--joint-types",
180
+ help="Override joint types (e.g., 'shoulder:revolute,gripper:prismatic')"
181
+ )
182
+ add_common_args(optimize_parser)
183
+
184
+ # urdf scan mesh
185
+ mesh_parser = scan_subparsers.add_parser(
186
+ "mesh",
187
+ help="Generate visual and collision meshes"
188
+ )
189
+ mesh_parser.add_argument(
190
+ "session_dir",
191
+ help="Path to scan session directory"
192
+ )
193
+ mesh_parser.add_argument(
194
+ "--visual-only", action="store_true",
195
+ help="Skip collision mesh generation"
196
+ )
197
+ mesh_parser.add_argument(
198
+ "--collision-hulls", type=int, default=8,
199
+ help="Max convex hulls per link (default: 8)"
200
+ )
201
+ mesh_parser.add_argument(
202
+ "--simplify-to", type=int, default=5000,
203
+ help="Target face count for mesh simplification (default: 5000)"
204
+ )
205
+ add_common_args(mesh_parser)
206
+
207
+ # urdf scan synthesize
208
+ synthesize_parser = scan_subparsers.add_parser(
209
+ "synthesize",
210
+ help="Generate final URDF file"
211
+ )
212
+ synthesize_parser.add_argument(
213
+ "session_dir",
214
+ help="Path to scan session directory"
215
+ )
216
+ synthesize_parser.add_argument(
217
+ "-n", "--name",
218
+ help="Robot name in URDF"
219
+ )
220
+ synthesize_parser.add_argument(
221
+ "--density", type=float, default=1200.0,
222
+ help="Material density for inertial estimation (kg/m^3)"
223
+ )
224
+ synthesize_parser.add_argument(
225
+ "--upload", action="store_true",
226
+ help="Upload to FoodforThought platform"
227
+ )
228
+ add_common_args(synthesize_parser)
229
+
230
+ # urdf scan status
231
+ status_parser = scan_subparsers.add_parser(
232
+ "status",
233
+ help="Show scan session status"
234
+ )
235
+ status_parser.add_argument(
236
+ "session_dir",
237
+ help="Path to scan session directory"
238
+ )
239
+ status_parser.add_argument(
240
+ "--json", action="store_true", dest="json_output",
241
+ help="Output as JSON"
242
+ )
243
+
244
+ # urdf scan validate
245
+ validate_parser = scan_subparsers.add_parser(
246
+ "validate",
247
+ help="Validate generated URDF"
248
+ )
249
+ validate_parser.add_argument(
250
+ "urdf_path",
251
+ help="Path to URDF file"
252
+ )
253
+ validate_parser.add_argument(
254
+ "--no-mesh-check", action="store_true",
255
+ help="Skip mesh file existence checks"
256
+ )
257
+
258
+ # urdf scan jobs - list cloud scan jobs
259
+ jobs_parser = scan_subparsers.add_parser(
260
+ "jobs",
261
+ help="List cloud scan jobs"
262
+ )
263
+ jobs_parser.add_argument(
264
+ "--json", action="store_true", dest="json_output",
265
+ help="Output as JSON"
266
+ )
267
+ jobs_parser.add_argument(
268
+ "--limit", type=int, default=10,
269
+ help="Maximum jobs to display (default: 10)"
270
+ )
271
+
272
+ # urdf scan job - show details of a specific cloud job
273
+ job_parser = scan_subparsers.add_parser(
274
+ "job",
275
+ help="Show details of a cloud scan job"
276
+ )
277
+ job_parser.add_argument(
278
+ "job_id",
279
+ help="Job ID to check"
280
+ )
281
+ job_parser.add_argument(
282
+ "--json", action="store_true", dest="json_output",
283
+ help="Output as JSON"
284
+ )
285
+
286
+ # urdf scan download - download artifact from completed cloud job
287
+ download_parser = scan_subparsers.add_parser(
288
+ "download",
289
+ help="Download artifact from completed cloud scan job"
290
+ )
291
+ download_parser.add_argument(
292
+ "job_id",
293
+ help="Job ID to download artifact from"
294
+ )
295
+ download_parser.add_argument(
296
+ "--output", "-o", default=".",
297
+ help="Output directory (default: current directory)"
298
+ )
299
+
300
+
301
+ def handle(client, args):
302
+ """Handle URDF commands."""
303
+ if args.urdf_action == "scan":
304
+ handle_scan(args)
305
+ else:
306
+ print(f"Unknown URDF action: {args.urdf_action}")
307
+ print("Available actions: scan")
308
+ sys.exit(1)
309
+
310
+
311
+ def handle_scan(args):
312
+ """Handle urdf scan commands."""
313
+ from .pipeline import PipelineConfig, run_full_pipeline, resume_pipeline
314
+ from .scan_session import ScanSession
315
+
316
+ # If no scan action, run full pipeline
317
+ if args.scan_action is None:
318
+ # Check for cloud mode
319
+ use_cloud = getattr(args, 'cloud', False)
320
+ use_local = getattr(args, 'local', False)
321
+
322
+ if use_cloud and use_local:
323
+ print("Error: Cannot specify both --cloud and --local")
324
+ sys.exit(1)
325
+
326
+ if use_cloud:
327
+ # Run cloud pipeline
328
+ from .cloud import run_cloud_pipeline
329
+ try:
330
+ run_cloud_pipeline(
331
+ video_path=getattr(args, 'video', None),
332
+ output_dir=args.output,
333
+ robot_name=getattr(args, 'name', None),
334
+ scale_ref=getattr(args, 'scale_ref', None),
335
+ camera_id=getattr(args, 'camera', 0),
336
+ )
337
+ except RuntimeError as e:
338
+ print(f"\nCloud processing failed: {e}")
339
+ sys.exit(1)
340
+ return
341
+
342
+ # Local pipeline (default)
343
+ # Check hardware capabilities if not forced
344
+ if not use_local:
345
+ # Recommend cloud if local hardware is insufficient
346
+ try:
347
+ import torch
348
+ if not torch.cuda.is_available() and not _has_mps():
349
+ print("\nNote: No GPU detected. Consider using --cloud for faster processing.")
350
+ print(" Cloud processing costs 5 credits per scan.")
351
+ print(" Run 'ate urdf scan --cloud' to use cloud GPU.\n")
352
+ except ImportError:
353
+ pass
354
+
355
+ # Full local pipeline
356
+ config = PipelineConfig(
357
+ output_dir=args.output,
358
+ robot_name=getattr(args, 'name', None),
359
+ scale_ref=getattr(args, 'scale_ref', None),
360
+ video_path=getattr(args, 'video', None),
361
+ device=getattr(args, 'device', 'cpu'),
362
+ camera_id=getattr(args, 'camera', 0),
363
+ density=getattr(args, 'density', 1200.0),
364
+ dry_run=getattr(args, 'dry_run', False),
365
+ upload=getattr(args, 'upload', False),
366
+ )
367
+ run_full_pipeline(config)
368
+
369
+ elif args.scan_action == "capture":
370
+ from .capture import run_capture
371
+ session = run_capture(
372
+ output_dir=args.output,
373
+ video_path=getattr(args, 'video', None),
374
+ camera_id=getattr(args, 'camera', 0),
375
+ robot_name=getattr(args, 'name', None),
376
+ scale_ref=getattr(args, 'scale_ref', None),
377
+ )
378
+ print(f"\nSession created: {session.session_dir}")
379
+
380
+ elif args.scan_action == "segment":
381
+ from .segmentation import run_segmentation
382
+ from .depth import run_depth_estimation
383
+ from .lifting import run_lifting
384
+
385
+ session = ScanSession.load(args.session_dir)
386
+
387
+ # Update scale ref if provided
388
+ if args.scale_ref:
389
+ session.metadata.scale_ref = args.scale_ref
390
+ session.save_metadata()
391
+
392
+ print("Running segmentation...")
393
+ masks = run_segmentation(session)
394
+
395
+ print("Running depth estimation...")
396
+ depth_maps, _ = run_depth_estimation(
397
+ session, masks,
398
+ frame_skip=args.frame_skip,
399
+ )
400
+
401
+ print("Generating point clouds...")
402
+ run_lifting(
403
+ session, masks, depth_maps,
404
+ frame_skip=args.frame_skip,
405
+ voxel_size=args.voxel_size,
406
+ )
407
+
408
+ print(f"\nSegmentation complete: {session.session_dir}")
409
+
410
+ elif args.scan_action == "optimize":
411
+ from .kinematics import run_kinematic_optimization
412
+ from .segmentation import run_segmentation
413
+ from .depth import run_depth_estimation
414
+
415
+ session = ScanSession.load(args.session_dir)
416
+
417
+ # Parse joint type hints
418
+ joint_hints = None
419
+ if args.joint_types:
420
+ joint_hints = {}
421
+ for pair in args.joint_types.split(","):
422
+ link, jtype = pair.split(":")
423
+ joint_hints[link.strip()] = jtype.strip()
424
+
425
+ # Need masks and depth_maps - re-run if needed
426
+ print("Running segmentation...")
427
+ masks = run_segmentation(session)
428
+
429
+ print("Running depth estimation...")
430
+ depth_maps, _ = run_depth_estimation(session, masks)
431
+
432
+ print("Optimizing kinematics...")
433
+ joints = run_kinematic_optimization(
434
+ session, masks, depth_maps,
435
+ joint_type_hints=joint_hints,
436
+ )
437
+
438
+ print(f"\nOptimization complete: {len(joints)} joints")
439
+
440
+ elif args.scan_action == "mesh":
441
+ from .meshing import generate_all_visual_meshes
442
+ from .collision import generate_all_collision_meshes
443
+
444
+ session = ScanSession.load(args.session_dir)
445
+
446
+ print("Generating visual meshes...")
447
+ visual_paths = generate_all_visual_meshes(
448
+ session,
449
+ simplify_to=args.simplify_to,
450
+ )
451
+
452
+ if not args.visual_only:
453
+ print("Generating collision meshes...")
454
+ generate_all_collision_meshes(
455
+ session,
456
+ max_hulls=args.collision_hulls,
457
+ )
458
+
459
+ print(f"\nMesh generation complete: {len(visual_paths)} links")
460
+
461
+ elif args.scan_action == "synthesize":
462
+ from .synthesis import run_synthesis
463
+ from .validation import run_validation
464
+
465
+ session = ScanSession.load(args.session_dir)
466
+
467
+ print("Synthesizing URDF...")
468
+ urdf_path = run_synthesis(
469
+ session,
470
+ density=args.density,
471
+ robot_name=getattr(args, 'name', None),
472
+ )
473
+
474
+ print("\nValidating URDF...")
475
+ result = run_validation(session)
476
+ print(result.summary())
477
+
478
+ if args.upload:
479
+ print("\nUploading to FoodforThought...")
480
+ try:
481
+ from ..robot.skill_upload import SkillLibraryUploader, APIError
482
+
483
+ uploader = SkillLibraryUploader()
484
+ robot_name = session.metadata.robot_name or "unknown_robot"
485
+
486
+ # Get or create project for this robot
487
+ project = uploader.get_or_create_project(
488
+ name=f"{robot_name}_urdf",
489
+ description=f"URDF generated from markerless scan for {robot_name}",
490
+ )
491
+ project_id = project["id"]
492
+
493
+ # Upload URDF as artifact
494
+ urdf_content = urdf_path.read_text()
495
+ response = uploader._request(
496
+ "POST",
497
+ f"/projects/{project_id}/artifacts",
498
+ json={
499
+ "name": f"{robot_name}.urdf",
500
+ "artifact_type": "processed",
501
+ "content_type": "application/xml",
502
+ "metadata": {
503
+ "robot_name": robot_name,
504
+ "scale_ref": session.metadata.scale_ref,
505
+ "link_count": len(session.links),
506
+ "joint_count": len(session.joints),
507
+ "generated_by": "ate urdf scan",
508
+ },
509
+ "content": urdf_content,
510
+ },
511
+ )
512
+
513
+ artifact_id = response.get("id", "unknown")
514
+ print(f"Uploaded URDF: {robot_name}.urdf (artifact {artifact_id})")
515
+ print(f"Project: {project.get('name', project_id)}")
516
+
517
+ except ImportError:
518
+ print("Upload requires authentication. Run 'ate login' first.")
519
+ except APIError as e:
520
+ print(f"Upload failed: {e}")
521
+ except Exception as e:
522
+ print(f"Upload error: {e}")
523
+
524
+ print(f"\nGenerated: {urdf_path}")
525
+
526
+ elif args.scan_action == "status":
527
+ import json
528
+ session = ScanSession.load(args.session_dir)
529
+ status = session.get_status()
530
+
531
+ if args.json_output:
532
+ print(json.dumps(status, indent=2))
533
+ else:
534
+ print(f"\nSession: {status['session_dir']}")
535
+ print(f"Robot: {status['robot_name']}")
536
+ print(f"Scale: {status['scale_ref'] or 'not set'}")
537
+ print(f"\nStages:")
538
+ for stage, complete in status['stages'].items():
539
+ check = "[x]" if complete else "[ ]"
540
+ print(f" {check} {stage}")
541
+ print(f"\nData:")
542
+ print(f" Video: {'yes' if status['data']['has_video'] else 'no'}")
543
+ print(f" Links: {status['data']['link_count']}")
544
+ print(f" Joints: {status['data']['joint_count']}")
545
+ print(f" Point clouds: {'yes' if status['data']['has_clouds'] else 'no'}")
546
+ print(f" Meshes: {'yes' if status['data']['has_meshes'] else 'no'}")
547
+ print(f" URDF: {'yes' if status['data']['has_urdf'] else 'no'}")
548
+
549
+ elif args.scan_action == "validate":
550
+ from .validation import validate_urdf
551
+
552
+ urdf_path = Path(args.urdf_path)
553
+ result = validate_urdf(urdf_path, check_meshes=not args.no_mesh_check)
554
+ print(result.summary())
555
+ sys.exit(0 if result.valid else 1)
556
+
557
+ elif args.scan_action == "jobs":
558
+ import json
559
+ from datetime import datetime
560
+ from .cloud import CloudScanClient
561
+
562
+ client = CloudScanClient()
563
+ result = client.list_jobs()
564
+
565
+ if "error" in result:
566
+ print(f"Error: {result['error']}")
567
+ sys.exit(1)
568
+
569
+ jobs = result.get("jobs", [])
570
+ credits = result.get("credits", {})
571
+
572
+ if args.json_output:
573
+ print(json.dumps(result, indent=2))
574
+ else:
575
+ print(f"\nCloud Scan Jobs (Credits: {credits.get('balance', 0)})")
576
+ print("=" * 70)
577
+
578
+ if not jobs:
579
+ print("No jobs found. Run 'ate urdf scan --cloud' to create one.")
580
+ else:
581
+ # Show most recent first, up to limit
582
+ jobs = sorted(jobs, key=lambda j: j.get("createdAt", ""), reverse=True)
583
+ jobs = jobs[:args.limit]
584
+
585
+ for job in jobs:
586
+ job_id = job.get("id", "unknown")[:25]
587
+ status = job.get("status", "unknown")
588
+ robot = job.get("robotName") or "-"
589
+ created = job.get("createdAt", "")
590
+
591
+ # Format relative time
592
+ time_str = ""
593
+ if created:
594
+ try:
595
+ dt = datetime.fromisoformat(created.replace("Z", "+00:00"))
596
+ diff = datetime.now(dt.tzinfo) - dt
597
+ if diff.days > 0:
598
+ time_str = f"{diff.days}d ago"
599
+ elif diff.seconds > 3600:
600
+ time_str = f"{diff.seconds // 3600}h ago"
601
+ else:
602
+ time_str = f"{diff.seconds // 60}m ago"
603
+ except Exception:
604
+ time_str = created[:10]
605
+
606
+ # Status emoji
607
+ status_icon = {
608
+ "completed": "✓",
609
+ "processing": "⏳",
610
+ "uploading": "↑",
611
+ "pending": "○",
612
+ "failed": "✗"
613
+ }.get(status, "?")
614
+
615
+ print(f"{status_icon} {job_id} {status:12} {robot:12} {time_str}")
616
+
617
+ print()
618
+
619
+ elif args.scan_action == "job":
620
+ import json
621
+ from .cloud import CloudScanClient
622
+
623
+ client = CloudScanClient()
624
+ job = client.get_job(args.job_id)
625
+
626
+ if "error" in job:
627
+ print(f"Error: {job['error']}")
628
+ sys.exit(1)
629
+
630
+ if args.json_output:
631
+ print(json.dumps(job, indent=2))
632
+ else:
633
+ status = job.get("status", "unknown")
634
+ status_icon = {
635
+ "completed": "✓",
636
+ "processing": "⏳",
637
+ "uploading": "↑",
638
+ "pending": "○",
639
+ "failed": "✗"
640
+ }.get(status, "?")
641
+
642
+ print(f"\nJob: {job.get('id', 'unknown')}")
643
+ print(f"Status: {status_icon} {status}")
644
+ print(f"Robot: {job.get('robotName') or '-'}")
645
+ print(f"Scale: {job.get('scaleRef') or '-'}")
646
+ print(f"Created: {job.get('createdAt', '-')}")
647
+
648
+ if status == "completed":
649
+ print(f"Links: {job.get('linkCount', '-')}")
650
+ print(f"Joints: {job.get('jointCount', '-')}")
651
+ print(f"Processing: {job.get('processingTimeSeconds', '-')}s")
652
+ if job.get("artifactUrl"):
653
+ print(f"Artifact: {job['artifactUrl'][:60]}...")
654
+ print(f"\nDownload: ate urdf scan download {job.get('id')}")
655
+
656
+ elif status == "failed":
657
+ print(f"Error: {job.get('error', 'Unknown error')}")
658
+
659
+ print()
660
+
661
+ elif args.scan_action == "download":
662
+ from pathlib import Path
663
+ from .cloud import CloudScanClient
664
+
665
+ client = CloudScanClient()
666
+ job = client.get_job(args.job_id)
667
+
668
+ if "error" in job:
669
+ print(f"Error: {job['error']}")
670
+ sys.exit(1)
671
+
672
+ if job.get("status") != "completed":
673
+ print(f"Error: Job is '{job.get('status')}', not 'completed'")
674
+ print("Wait for processing to complete before downloading.")
675
+ sys.exit(1)
676
+
677
+ artifact_url = job.get("artifactUrl")
678
+ if not artifact_url:
679
+ print("Error: No artifact URL found for this job")
680
+ sys.exit(1)
681
+
682
+ output_dir = Path(args.output)
683
+ output_dir.mkdir(parents=True, exist_ok=True)
684
+
685
+ print(f"Downloading artifact from job {args.job_id}...")
686
+
687
+ def download_progress(downloaded, total):
688
+ pct = int(100 * downloaded / total) if total > 0 else 0
689
+ print(f"\r {pct}% ({downloaded // 1024} KB)", end="", flush=True)
690
+
691
+ try:
692
+ urdf_path = client.download_artifact(artifact_url, output_dir, download_progress)
693
+ print(f"\n\nDownloaded to: {output_dir}/")
694
+ print(f"URDF: {urdf_path}")
695
+ except RuntimeError as e:
696
+ print(f"\nError: {e}")
697
+ sys.exit(1)
698
+
699
+ else:
700
+ print(f"Unknown scan action: {args.scan_action}")
701
+ print("Available actions: capture, segment, optimize, mesh, synthesize, status, validate, jobs, job, download")
702
+ sys.exit(1)
703
+
704
+
705
+ __all__ = [
706
+ "register_parser",
707
+ "handle",
708
+ ]