foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +100 -0
- ate/behaviors/approach.py +399 -0
- ate/behaviors/common.py +686 -0
- ate/behaviors/tree.py +454 -0
- ate/cli.py +855 -3995
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +39 -0
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +942 -0
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +187 -0
- ate/interfaces/base.py +273 -0
- ate/interfaces/body.py +267 -0
- ate/interfaces/detection.py +282 -0
- ate/interfaces/locomotion.py +422 -0
- ate/interfaces/manipulation.py +408 -0
- ate/interfaces/navigation.py +389 -0
- ate/interfaces/perception.py +362 -0
- ate/interfaces/sensors.py +247 -0
- ate/interfaces/types.py +371 -0
- ate/llm_proxy.py +239 -0
- ate/mcp_server.py +387 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +83 -0
- ate/recording/demonstration.py +378 -0
- ate/recording/session.py +415 -0
- ate/recording/upload.py +304 -0
- ate/recording/visual.py +416 -0
- ate/recording/wrapper.py +95 -0
- ate/robot/__init__.py +221 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +668 -0
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +3735 -0
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +441 -0
- ate/robot/introspection.py +330 -0
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/manager.py +270 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +281 -0
- ate/robot/registry.py +322 -0
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +675 -0
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +1048 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
ate/urdf/pipeline.py
ADDED
|
@@ -0,0 +1,468 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Full URDF generation pipeline orchestration.
|
|
3
|
+
|
|
4
|
+
This module provides the unified pipeline that runs all phases:
|
|
5
|
+
1. Capture: Video capture and link annotation
|
|
6
|
+
2. Segment: SAM 2 segmentation + Depth Anything depth estimation + 3D lifting
|
|
7
|
+
3. Optimize: Kinematic parameter estimation
|
|
8
|
+
4. Mesh: Visual and collision mesh generation
|
|
9
|
+
5. Synthesize: URDF XML generation and validation
|
|
10
|
+
|
|
11
|
+
Usage:
|
|
12
|
+
ate urdf scan --output ./my_robot/ --name my_robot --scale-ref "gripper:85mm"
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
import sys
|
|
17
|
+
from typing import Optional, Dict, Any
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from dataclasses import dataclass
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class PipelineError(Exception):
|
|
25
|
+
"""Error during pipeline execution."""
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class PipelineConfig:
|
|
31
|
+
"""Configuration for the URDF generation pipeline."""
|
|
32
|
+
output_dir: str
|
|
33
|
+
robot_name: Optional[str] = None
|
|
34
|
+
scale_ref: Optional[str] = None
|
|
35
|
+
video_path: Optional[str] = None
|
|
36
|
+
device: str = "cpu"
|
|
37
|
+
camera_id: int = 0
|
|
38
|
+
density: float = 1200.0 # kg/m^3
|
|
39
|
+
max_hulls: int = 8
|
|
40
|
+
frame_skip: int = 1
|
|
41
|
+
voxel_size: float = 0.005 # meters
|
|
42
|
+
simplify_to: int = 5000 # faces
|
|
43
|
+
dry_run: bool = False
|
|
44
|
+
upload: bool = False
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def create_progress_callback(stage_name: str):
|
|
48
|
+
"""Create a progress callback for a pipeline stage."""
|
|
49
|
+
def callback(current: int, total: int):
|
|
50
|
+
pct = int(100 * current / total)
|
|
51
|
+
bar_len = 30
|
|
52
|
+
filled = int(bar_len * current / total)
|
|
53
|
+
bar = "=" * filled + "-" * (bar_len - filled)
|
|
54
|
+
print(f"\r [{bar}] {pct}% ({current}/{total})", end="", flush=True)
|
|
55
|
+
if current >= total:
|
|
56
|
+
print() # New line when complete
|
|
57
|
+
return callback
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def run_capture_stage(config: PipelineConfig) -> "ScanSession":
|
|
61
|
+
"""
|
|
62
|
+
Run the capture stage.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
config: Pipeline configuration
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
ScanSession with captured data
|
|
69
|
+
"""
|
|
70
|
+
from .capture import run_capture
|
|
71
|
+
|
|
72
|
+
print("\n" + "=" * 60)
|
|
73
|
+
print("PHASE 1: DATA ACQUISITION")
|
|
74
|
+
print("=" * 60)
|
|
75
|
+
|
|
76
|
+
if config.dry_run:
|
|
77
|
+
print(" [DRY RUN] Would capture video and annotate links")
|
|
78
|
+
from .scan_session import ScanSession
|
|
79
|
+
return ScanSession.create(
|
|
80
|
+
config.output_dir,
|
|
81
|
+
robot_name=config.robot_name,
|
|
82
|
+
scale_ref=config.scale_ref,
|
|
83
|
+
device=config.device,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
session = run_capture(
|
|
87
|
+
output_dir=config.output_dir,
|
|
88
|
+
video_path=config.video_path,
|
|
89
|
+
camera_id=config.camera_id,
|
|
90
|
+
robot_name=config.robot_name,
|
|
91
|
+
scale_ref=config.scale_ref,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
print(f"\nCapture complete: {len(session.links)} links annotated")
|
|
95
|
+
return session
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def run_segment_stage(
|
|
99
|
+
session: "ScanSession",
|
|
100
|
+
config: PipelineConfig,
|
|
101
|
+
) -> Dict[str, Any]:
|
|
102
|
+
"""
|
|
103
|
+
Run the segmentation and depth estimation stage.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
session: ScanSession from capture stage
|
|
107
|
+
config: Pipeline configuration
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Dict with masks, depth_maps, and cloud_paths
|
|
111
|
+
"""
|
|
112
|
+
from .segmentation import run_segmentation
|
|
113
|
+
from .depth import run_depth_estimation
|
|
114
|
+
from .lifting import run_lifting
|
|
115
|
+
|
|
116
|
+
print("\n" + "=" * 60)
|
|
117
|
+
print("PHASE 2: SEGMENTATION & DEPTH")
|
|
118
|
+
print("=" * 60)
|
|
119
|
+
|
|
120
|
+
if config.dry_run:
|
|
121
|
+
print(" [DRY RUN] Would run SAM 2 segmentation")
|
|
122
|
+
print(" [DRY RUN] Would run Depth Anything V2")
|
|
123
|
+
print(" [DRY RUN] Would generate point clouds")
|
|
124
|
+
return {"masks": {}, "depth_maps": {}, "cloud_paths": {}}
|
|
125
|
+
|
|
126
|
+
# Run segmentation
|
|
127
|
+
print("\nRunning SAM 2 segmentation...")
|
|
128
|
+
masks = run_segmentation(
|
|
129
|
+
session,
|
|
130
|
+
progress_callback=create_progress_callback("Segmentation"),
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Run depth estimation
|
|
134
|
+
print("\nRunning depth estimation...")
|
|
135
|
+
depth_maps, scale_factor = run_depth_estimation(
|
|
136
|
+
session,
|
|
137
|
+
masks,
|
|
138
|
+
frame_skip=config.frame_skip,
|
|
139
|
+
progress_callback=create_progress_callback("Depth"),
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Generate point clouds
|
|
143
|
+
print("\nGenerating point clouds...")
|
|
144
|
+
cloud_paths = run_lifting(
|
|
145
|
+
session,
|
|
146
|
+
masks,
|
|
147
|
+
depth_maps,
|
|
148
|
+
frame_skip=config.frame_skip,
|
|
149
|
+
voxel_size=config.voxel_size,
|
|
150
|
+
progress_callback=create_progress_callback("Lifting"),
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
print(f"\nSegmentation complete: {len(cloud_paths)} point clouds generated")
|
|
154
|
+
return {
|
|
155
|
+
"masks": masks,
|
|
156
|
+
"depth_maps": depth_maps,
|
|
157
|
+
"cloud_paths": cloud_paths,
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def run_optimize_stage(
|
|
162
|
+
session: "ScanSession",
|
|
163
|
+
segment_data: Dict[str, Any],
|
|
164
|
+
config: PipelineConfig,
|
|
165
|
+
):
|
|
166
|
+
"""
|
|
167
|
+
Run the kinematic optimization stage.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
session: ScanSession
|
|
171
|
+
segment_data: Output from segment stage
|
|
172
|
+
config: Pipeline configuration
|
|
173
|
+
"""
|
|
174
|
+
from .kinematics import run_kinematic_optimization
|
|
175
|
+
|
|
176
|
+
print("\n" + "=" * 60)
|
|
177
|
+
print("PHASE 3: KINEMATIC OPTIMIZATION")
|
|
178
|
+
print("=" * 60)
|
|
179
|
+
|
|
180
|
+
if config.dry_run:
|
|
181
|
+
print(" [DRY RUN] Would estimate joint parameters")
|
|
182
|
+
return
|
|
183
|
+
|
|
184
|
+
print("\nEstimating joint parameters...")
|
|
185
|
+
joints = run_kinematic_optimization(
|
|
186
|
+
session,
|
|
187
|
+
segment_data["masks"],
|
|
188
|
+
segment_data["depth_maps"],
|
|
189
|
+
progress_callback=create_progress_callback("Kinematics"),
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
print(f"\nOptimization complete: {len(joints)} joints discovered")
|
|
193
|
+
for joint in joints:
|
|
194
|
+
print(f" - {joint.name}: {joint.joint_type} (conf={joint.confidence:.2f})")
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def run_mesh_stage(session: "ScanSession", config: PipelineConfig):
|
|
198
|
+
"""
|
|
199
|
+
Run the mesh generation stage.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
session: ScanSession
|
|
203
|
+
config: Pipeline configuration
|
|
204
|
+
"""
|
|
205
|
+
from .meshing import generate_all_visual_meshes
|
|
206
|
+
from .collision import generate_all_collision_meshes
|
|
207
|
+
|
|
208
|
+
print("\n" + "=" * 60)
|
|
209
|
+
print("PHASE 4: MESH GENERATION")
|
|
210
|
+
print("=" * 60)
|
|
211
|
+
|
|
212
|
+
if config.dry_run:
|
|
213
|
+
print(" [DRY RUN] Would generate visual meshes")
|
|
214
|
+
print(" [DRY RUN] Would generate collision meshes")
|
|
215
|
+
return
|
|
216
|
+
|
|
217
|
+
print("\nGenerating visual meshes...")
|
|
218
|
+
visual_paths = generate_all_visual_meshes(
|
|
219
|
+
session,
|
|
220
|
+
simplify_to=config.simplify_to,
|
|
221
|
+
progress_callback=create_progress_callback("Visual meshes"),
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
print("\nGenerating collision meshes...")
|
|
225
|
+
collision_paths = generate_all_collision_meshes(
|
|
226
|
+
session,
|
|
227
|
+
max_hulls=config.max_hulls,
|
|
228
|
+
progress_callback=create_progress_callback("Collision meshes"),
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
print(f"\nMesh generation complete: {len(visual_paths)} links")
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def run_synthesize_stage(session: "ScanSession", config: PipelineConfig) -> Path:
|
|
235
|
+
"""
|
|
236
|
+
Run the URDF synthesis stage.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
session: ScanSession
|
|
240
|
+
config: Pipeline configuration
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
Path to generated URDF
|
|
244
|
+
"""
|
|
245
|
+
from .synthesis import run_synthesis
|
|
246
|
+
from .validation import run_validation
|
|
247
|
+
|
|
248
|
+
print("\n" + "=" * 60)
|
|
249
|
+
print("PHASE 5: URDF SYNTHESIS")
|
|
250
|
+
print("=" * 60)
|
|
251
|
+
|
|
252
|
+
if config.dry_run:
|
|
253
|
+
print(" [DRY RUN] Would synthesize URDF")
|
|
254
|
+
print(" [DRY RUN] Would validate URDF")
|
|
255
|
+
return session.urdf_path
|
|
256
|
+
|
|
257
|
+
print("\nSynthesizing URDF...")
|
|
258
|
+
urdf_path = run_synthesis(
|
|
259
|
+
session,
|
|
260
|
+
density=config.density,
|
|
261
|
+
robot_name=config.robot_name,
|
|
262
|
+
progress_callback=create_progress_callback("Synthesis"),
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
print("\nValidating URDF...")
|
|
266
|
+
result = run_validation(session)
|
|
267
|
+
|
|
268
|
+
if result.valid:
|
|
269
|
+
print(" URDF validation: PASSED")
|
|
270
|
+
else:
|
|
271
|
+
print(" URDF validation: FAILED")
|
|
272
|
+
for error in result.errors:
|
|
273
|
+
print(f" ERROR: {error}")
|
|
274
|
+
|
|
275
|
+
for warning in result.warnings:
|
|
276
|
+
print(f" WARNING: {warning}")
|
|
277
|
+
|
|
278
|
+
print(f"\nGenerated: {urdf_path}")
|
|
279
|
+
return urdf_path
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
def run_full_pipeline(config: PipelineConfig) -> Path:
|
|
283
|
+
"""
|
|
284
|
+
Run the complete URDF generation pipeline.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
config: Pipeline configuration
|
|
288
|
+
|
|
289
|
+
Returns:
|
|
290
|
+
Path to generated URDF file
|
|
291
|
+
"""
|
|
292
|
+
print("\n" + "#" * 60)
|
|
293
|
+
print("# MARKERLESS URDF GENERATION PIPELINE")
|
|
294
|
+
print("#" * 60)
|
|
295
|
+
print(f"\nOutput: {config.output_dir}")
|
|
296
|
+
print(f"Robot: {config.robot_name or 'auto'}")
|
|
297
|
+
print(f"Scale: {config.scale_ref or 'unspecified'}")
|
|
298
|
+
print(f"Device: {config.device}")
|
|
299
|
+
|
|
300
|
+
if config.dry_run:
|
|
301
|
+
print("\n*** DRY RUN MODE - No changes will be made ***")
|
|
302
|
+
|
|
303
|
+
try:
|
|
304
|
+
# Phase 1: Capture
|
|
305
|
+
session = run_capture_stage(config)
|
|
306
|
+
|
|
307
|
+
# Phase 2: Segment
|
|
308
|
+
segment_data = run_segment_stage(session, config)
|
|
309
|
+
|
|
310
|
+
# Phase 3: Optimize
|
|
311
|
+
run_optimize_stage(session, segment_data, config)
|
|
312
|
+
|
|
313
|
+
# Phase 4: Mesh
|
|
314
|
+
run_mesh_stage(session, config)
|
|
315
|
+
|
|
316
|
+
# Phase 5: Synthesize
|
|
317
|
+
urdf_path = run_synthesize_stage(session, config)
|
|
318
|
+
|
|
319
|
+
# Summary
|
|
320
|
+
print("\n" + "#" * 60)
|
|
321
|
+
print("# PIPELINE COMPLETE")
|
|
322
|
+
print("#" * 60)
|
|
323
|
+
status = session.get_status()
|
|
324
|
+
print(f"\nSession: {status['session_dir']}")
|
|
325
|
+
print(f"Links: {status['data']['link_count']}")
|
|
326
|
+
print(f"Joints: {status['data']['joint_count']}")
|
|
327
|
+
print(f"URDF: {urdf_path}")
|
|
328
|
+
|
|
329
|
+
if config.upload:
|
|
330
|
+
print("\nUploading to FoodforThought...")
|
|
331
|
+
try:
|
|
332
|
+
from ..robot.skill_upload import SkillLibraryUploader, APIError
|
|
333
|
+
|
|
334
|
+
uploader = SkillLibraryUploader()
|
|
335
|
+
robot_name = session.metadata.robot_name or "unknown_robot"
|
|
336
|
+
|
|
337
|
+
project = uploader.get_or_create_project(
|
|
338
|
+
name=f"{robot_name}_urdf",
|
|
339
|
+
description=f"URDF from markerless scan for {robot_name}",
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
urdf_content = urdf_path.read_text()
|
|
343
|
+
response = uploader._request(
|
|
344
|
+
"POST",
|
|
345
|
+
f"/projects/{project['id']}/artifacts",
|
|
346
|
+
json={
|
|
347
|
+
"name": f"{robot_name}.urdf",
|
|
348
|
+
"artifact_type": "processed",
|
|
349
|
+
"content_type": "application/xml",
|
|
350
|
+
"metadata": {
|
|
351
|
+
"robot_name": robot_name,
|
|
352
|
+
"scale_ref": session.metadata.scale_ref,
|
|
353
|
+
"link_count": len(session.links),
|
|
354
|
+
"joint_count": len(session.joints),
|
|
355
|
+
"generated_by": "ate urdf scan",
|
|
356
|
+
},
|
|
357
|
+
"content": urdf_content,
|
|
358
|
+
},
|
|
359
|
+
)
|
|
360
|
+
print(f"Uploaded: {robot_name}.urdf (artifact {response.get('id', 'unknown')})")
|
|
361
|
+
|
|
362
|
+
except ImportError:
|
|
363
|
+
print("Upload requires authentication. Run 'ate login' first.")
|
|
364
|
+
except Exception as e:
|
|
365
|
+
print(f"Upload error: {e}")
|
|
366
|
+
|
|
367
|
+
return urdf_path
|
|
368
|
+
|
|
369
|
+
except KeyboardInterrupt:
|
|
370
|
+
print("\n\nPipeline interrupted by user")
|
|
371
|
+
sys.exit(1)
|
|
372
|
+
except Exception as e:
|
|
373
|
+
logger.exception("Pipeline failed")
|
|
374
|
+
print(f"\nPipeline failed: {e}")
|
|
375
|
+
raise PipelineError(str(e)) from e
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
def resume_pipeline(
|
|
379
|
+
session_dir: str,
|
|
380
|
+
from_stage: str = "auto",
|
|
381
|
+
config: Optional[PipelineConfig] = None,
|
|
382
|
+
) -> Path:
|
|
383
|
+
"""
|
|
384
|
+
Resume a pipeline from a saved session.
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
session_dir: Path to existing session
|
|
388
|
+
from_stage: Stage to resume from ("capture", "segment", "optimize", "mesh", "synthesize")
|
|
389
|
+
config: Optional configuration overrides
|
|
390
|
+
|
|
391
|
+
Returns:
|
|
392
|
+
Path to generated URDF
|
|
393
|
+
"""
|
|
394
|
+
from .scan_session import ScanSession
|
|
395
|
+
|
|
396
|
+
print(f"\nResuming pipeline from: {session_dir}")
|
|
397
|
+
|
|
398
|
+
# Load session
|
|
399
|
+
session = ScanSession.load(session_dir)
|
|
400
|
+
status = session.get_status()
|
|
401
|
+
|
|
402
|
+
# Determine resume point
|
|
403
|
+
if from_stage == "auto":
|
|
404
|
+
stages = status["stages"]
|
|
405
|
+
if not stages["capture"]:
|
|
406
|
+
from_stage = "capture"
|
|
407
|
+
elif not stages["segment"]:
|
|
408
|
+
from_stage = "segment"
|
|
409
|
+
elif not stages["optimize"]:
|
|
410
|
+
from_stage = "optimize"
|
|
411
|
+
elif not stages["mesh"]:
|
|
412
|
+
from_stage = "mesh"
|
|
413
|
+
else:
|
|
414
|
+
from_stage = "synthesize"
|
|
415
|
+
|
|
416
|
+
print(f"Resuming from: {from_stage}")
|
|
417
|
+
|
|
418
|
+
# Create config from session if not provided
|
|
419
|
+
if config is None:
|
|
420
|
+
config = PipelineConfig(
|
|
421
|
+
output_dir=str(session.session_dir),
|
|
422
|
+
robot_name=session.metadata.robot_name,
|
|
423
|
+
scale_ref=session.metadata.scale_ref,
|
|
424
|
+
device=session.metadata.device,
|
|
425
|
+
density=session.metadata.density_kg_m3,
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
# Run remaining stages
|
|
429
|
+
segment_data = None
|
|
430
|
+
|
|
431
|
+
if from_stage in ["capture"]:
|
|
432
|
+
session = run_capture_stage(config)
|
|
433
|
+
from_stage = "segment"
|
|
434
|
+
|
|
435
|
+
if from_stage == "segment":
|
|
436
|
+
segment_data = run_segment_stage(session, config)
|
|
437
|
+
from_stage = "optimize"
|
|
438
|
+
|
|
439
|
+
if from_stage == "optimize":
|
|
440
|
+
if segment_data is None:
|
|
441
|
+
# Need to reload segment data
|
|
442
|
+
print("\nNote: Segment data not in memory, re-running segmentation...")
|
|
443
|
+
segment_data = run_segment_stage(session, config)
|
|
444
|
+
run_optimize_stage(session, segment_data, config)
|
|
445
|
+
from_stage = "mesh"
|
|
446
|
+
|
|
447
|
+
if from_stage == "mesh":
|
|
448
|
+
run_mesh_stage(session, config)
|
|
449
|
+
from_stage = "synthesize"
|
|
450
|
+
|
|
451
|
+
if from_stage == "synthesize":
|
|
452
|
+
urdf_path = run_synthesize_stage(session, config)
|
|
453
|
+
return urdf_path
|
|
454
|
+
|
|
455
|
+
raise PipelineError(f"Unknown stage: {from_stage}")
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
__all__ = [
|
|
459
|
+
"PipelineError",
|
|
460
|
+
"PipelineConfig",
|
|
461
|
+
"run_capture_stage",
|
|
462
|
+
"run_segment_stage",
|
|
463
|
+
"run_optimize_stage",
|
|
464
|
+
"run_mesh_stage",
|
|
465
|
+
"run_synthesize_stage",
|
|
466
|
+
"run_full_pipeline",
|
|
467
|
+
"resume_pipeline",
|
|
468
|
+
]
|