foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +100 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/behaviors/common.py +686 -0
  9. ate/behaviors/tree.py +454 -0
  10. ate/cli.py +855 -3995
  11. ate/client.py +90 -0
  12. ate/commands/__init__.py +168 -0
  13. ate/commands/auth.py +389 -0
  14. ate/commands/bridge.py +448 -0
  15. ate/commands/data.py +185 -0
  16. ate/commands/deps.py +111 -0
  17. ate/commands/generate.py +384 -0
  18. ate/commands/memory.py +907 -0
  19. ate/commands/parts.py +166 -0
  20. ate/commands/primitive.py +399 -0
  21. ate/commands/protocol.py +288 -0
  22. ate/commands/recording.py +524 -0
  23. ate/commands/repo.py +154 -0
  24. ate/commands/simulation.py +291 -0
  25. ate/commands/skill.py +303 -0
  26. ate/commands/skills.py +487 -0
  27. ate/commands/team.py +147 -0
  28. ate/commands/workflow.py +271 -0
  29. ate/detection/__init__.py +38 -0
  30. ate/detection/base.py +142 -0
  31. ate/detection/color_detector.py +399 -0
  32. ate/detection/trash_detector.py +322 -0
  33. ate/drivers/__init__.py +39 -0
  34. ate/drivers/ble_transport.py +405 -0
  35. ate/drivers/mechdog.py +942 -0
  36. ate/drivers/wifi_camera.py +477 -0
  37. ate/interfaces/__init__.py +187 -0
  38. ate/interfaces/base.py +273 -0
  39. ate/interfaces/body.py +267 -0
  40. ate/interfaces/detection.py +282 -0
  41. ate/interfaces/locomotion.py +422 -0
  42. ate/interfaces/manipulation.py +408 -0
  43. ate/interfaces/navigation.py +389 -0
  44. ate/interfaces/perception.py +362 -0
  45. ate/interfaces/sensors.py +247 -0
  46. ate/interfaces/types.py +371 -0
  47. ate/llm_proxy.py +239 -0
  48. ate/mcp_server.py +387 -0
  49. ate/memory/__init__.py +35 -0
  50. ate/memory/cloud.py +244 -0
  51. ate/memory/context.py +269 -0
  52. ate/memory/embeddings.py +184 -0
  53. ate/memory/export.py +26 -0
  54. ate/memory/merge.py +146 -0
  55. ate/memory/migrate/__init__.py +34 -0
  56. ate/memory/migrate/base.py +89 -0
  57. ate/memory/migrate/pipeline.py +189 -0
  58. ate/memory/migrate/sources/__init__.py +13 -0
  59. ate/memory/migrate/sources/chroma.py +170 -0
  60. ate/memory/migrate/sources/pinecone.py +120 -0
  61. ate/memory/migrate/sources/qdrant.py +110 -0
  62. ate/memory/migrate/sources/weaviate.py +160 -0
  63. ate/memory/reranker.py +353 -0
  64. ate/memory/search.py +26 -0
  65. ate/memory/store.py +548 -0
  66. ate/recording/__init__.py +83 -0
  67. ate/recording/demonstration.py +378 -0
  68. ate/recording/session.py +415 -0
  69. ate/recording/upload.py +304 -0
  70. ate/recording/visual.py +416 -0
  71. ate/recording/wrapper.py +95 -0
  72. ate/robot/__init__.py +221 -0
  73. ate/robot/agentic_servo.py +856 -0
  74. ate/robot/behaviors.py +493 -0
  75. ate/robot/ble_capture.py +1000 -0
  76. ate/robot/ble_enumerate.py +506 -0
  77. ate/robot/calibration.py +668 -0
  78. ate/robot/calibration_state.py +388 -0
  79. ate/robot/commands.py +3735 -0
  80. ate/robot/direction_calibration.py +554 -0
  81. ate/robot/discovery.py +441 -0
  82. ate/robot/introspection.py +330 -0
  83. ate/robot/llm_system_id.py +654 -0
  84. ate/robot/locomotion_calibration.py +508 -0
  85. ate/robot/manager.py +270 -0
  86. ate/robot/marker_generator.py +611 -0
  87. ate/robot/perception.py +502 -0
  88. ate/robot/primitives.py +614 -0
  89. ate/robot/profiles.py +281 -0
  90. ate/robot/registry.py +322 -0
  91. ate/robot/servo_mapper.py +1153 -0
  92. ate/robot/skill_upload.py +675 -0
  93. ate/robot/target_calibration.py +500 -0
  94. ate/robot/teach.py +515 -0
  95. ate/robot/types.py +242 -0
  96. ate/robot/visual_labeler.py +1048 -0
  97. ate/robot/visual_servo_loop.py +494 -0
  98. ate/robot/visual_servoing.py +570 -0
  99. ate/robot/visual_system_id.py +906 -0
  100. ate/transports/__init__.py +121 -0
  101. ate/transports/base.py +394 -0
  102. ate/transports/ble.py +405 -0
  103. ate/transports/hybrid.py +444 -0
  104. ate/transports/serial.py +345 -0
  105. ate/urdf/__init__.py +30 -0
  106. ate/urdf/capture.py +582 -0
  107. ate/urdf/cloud.py +491 -0
  108. ate/urdf/collision.py +271 -0
  109. ate/urdf/commands.py +708 -0
  110. ate/urdf/depth.py +360 -0
  111. ate/urdf/inertial.py +312 -0
  112. ate/urdf/kinematics.py +330 -0
  113. ate/urdf/lifting.py +415 -0
  114. ate/urdf/meshing.py +300 -0
  115. ate/urdf/models/__init__.py +110 -0
  116. ate/urdf/models/depth_anything.py +253 -0
  117. ate/urdf/models/sam2.py +324 -0
  118. ate/urdf/motion_analysis.py +396 -0
  119. ate/urdf/pipeline.py +468 -0
  120. ate/urdf/scale.py +256 -0
  121. ate/urdf/scan_session.py +411 -0
  122. ate/urdf/segmentation.py +299 -0
  123. ate/urdf/synthesis.py +319 -0
  124. ate/urdf/topology.py +336 -0
  125. ate/urdf/validation.py +371 -0
  126. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
  127. foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
  128. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
  129. foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
  130. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
  131. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,299 @@
1
+ """
2
+ Temporal segmentation for URDF generation using SAM 2.
3
+
4
+ This module handles Phase 2a of the pipeline:
5
+ 1. Load SAM 2 model (with caching)
6
+ 2. Initialize masks from user click points
7
+ 3. Propagate masks across all video frames
8
+
9
+ Output: Per-frame binary masks for each annotated link.
10
+ """
11
+
12
+ import logging
13
+ from typing import Dict, List, Optional, Tuple
14
+ from pathlib import Path
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ try:
19
+ import numpy as np
20
+ import cv2
21
+ DEPS_AVAILABLE = True
22
+ except ImportError:
23
+ DEPS_AVAILABLE = False
24
+ np = None
25
+ cv2 = None
26
+
27
+
28
+ class SegmentationError(Exception):
29
+ """Error during segmentation."""
30
+ pass
31
+
32
+
33
+ class LinkSegmenter:
34
+ """
35
+ Temporal segmentation of robot links using SAM 2.
36
+
37
+ Given a video and initial click points, propagates segmentation
38
+ masks for each link across all frames.
39
+ """
40
+
41
+ def __init__(self, device: str = "cpu"):
42
+ """
43
+ Initialize segmenter.
44
+
45
+ Args:
46
+ device: Compute device ("cuda" or "cpu")
47
+ """
48
+ if not DEPS_AVAILABLE:
49
+ raise SegmentationError(
50
+ "Required dependencies not installed. "
51
+ "Run: pip install numpy opencv-python"
52
+ )
53
+
54
+ self.device = device
55
+ self._predictor = None
56
+
57
+ def _get_predictor(self):
58
+ """Get or load the SAM 2 predictor."""
59
+ if self._predictor is None:
60
+ from .models import ModelCache
61
+ ModelCache.set_device(self.device)
62
+ self._predictor = ModelCache.get_sam2()
63
+ return self._predictor
64
+
65
+ def segment_video(
66
+ self,
67
+ video_path: str,
68
+ links: List[Tuple[str, List[float], bool]],
69
+ progress_callback: Optional[callable] = None,
70
+ ) -> Dict[str, Dict[int, "np.ndarray"]]:
71
+ """
72
+ Segment a video given initial link annotations.
73
+
74
+ Args:
75
+ video_path: Path to video file
76
+ links: List of (name, [x, y], is_fixed) tuples
77
+ progress_callback: Optional callback(current, total)
78
+
79
+ Returns:
80
+ Dict mapping link_name -> {frame_idx: mask}
81
+ """
82
+ predictor = self._get_predictor()
83
+
84
+ # Initialize predictor with video
85
+ predictor.initialize(video_path)
86
+
87
+ # Add point prompts for each link
88
+ logger.info(f"Initializing {len(links)} link masks...")
89
+ for obj_id, (name, point, is_fixed) in enumerate(links):
90
+ predictor.add_point_prompt(
91
+ frame_idx=0,
92
+ obj_id=obj_id,
93
+ point=(point[0], point[1]),
94
+ label=1, # Foreground
95
+ )
96
+ logger.debug(f"Added prompt for '{name}' at {point}")
97
+
98
+ # Propagate masks
99
+ logger.info("Propagating masks across video frames...")
100
+ all_masks = predictor.propagate()
101
+
102
+ # Reorganize by link name
103
+ link_names = [name for name, _, _ in links]
104
+ result = {name: {} for name in link_names}
105
+
106
+ for frame_idx, frame_masks in all_masks.items():
107
+ for obj_id, mask in frame_masks.items():
108
+ if obj_id < len(link_names):
109
+ link_name = link_names[obj_id]
110
+ result[link_name][frame_idx] = mask
111
+
112
+ if progress_callback:
113
+ progress_callback(frame_idx + 1, len(all_masks))
114
+
115
+ # Validate results
116
+ for name in link_names:
117
+ if not result[name]:
118
+ raise SegmentationError(
119
+ f"No masks generated for link '{name}'. "
120
+ "Check that the click point is on the robot."
121
+ )
122
+
123
+ logger.info(f"Segmentation complete: {len(all_masks)} frames")
124
+ return result
125
+
126
+ def extract_masked_regions(
127
+ self,
128
+ video_path: str,
129
+ masks: Dict[str, Dict[int, "np.ndarray"]],
130
+ output_dir: Path,
131
+ ) -> Dict[str, List[Path]]:
132
+ """
133
+ Extract masked RGB regions from video frames.
134
+
135
+ Args:
136
+ video_path: Path to video file
137
+ masks: Dict from segment_video()
138
+ output_dir: Directory to save extracted regions
139
+
140
+ Returns:
141
+ Dict mapping link_name -> list of image paths
142
+ """
143
+ output_dir = Path(output_dir)
144
+ output_dir.mkdir(parents=True, exist_ok=True)
145
+
146
+ cap = cv2.VideoCapture(video_path)
147
+ if not cap.isOpened():
148
+ raise SegmentationError(f"Could not open video: {video_path}")
149
+
150
+ result = {name: [] for name in masks.keys()}
151
+ frame_idx = 0
152
+
153
+ try:
154
+ while True:
155
+ ret, frame = cap.read()
156
+ if not ret:
157
+ break
158
+
159
+ for link_name, link_masks in masks.items():
160
+ if frame_idx in link_masks:
161
+ mask = link_masks[frame_idx]
162
+
163
+ # Apply mask to frame
164
+ masked = frame.copy()
165
+ masked[~mask] = 0
166
+
167
+ # Crop to bounding box
168
+ coords = np.where(mask)
169
+ if len(coords[0]) > 0:
170
+ y_min, y_max = coords[0].min(), coords[0].max()
171
+ x_min, x_max = coords[1].min(), coords[1].max()
172
+ cropped = masked[y_min:y_max, x_min:x_max]
173
+
174
+ # Save
175
+ out_path = output_dir / f"{link_name}_frame_{frame_idx:04d}.png"
176
+ cv2.imwrite(str(out_path), cropped)
177
+ result[link_name].append(out_path)
178
+
179
+ frame_idx += 1
180
+
181
+ finally:
182
+ cap.release()
183
+
184
+ logger.info(f"Extracted masked regions to {output_dir}")
185
+ return result
186
+
187
+
188
+ def visualize_masks(
189
+ frame: "np.ndarray",
190
+ masks: Dict[str, "np.ndarray"],
191
+ alpha: float = 0.5,
192
+ ) -> "np.ndarray":
193
+ """
194
+ Visualize segmentation masks overlaid on a frame.
195
+
196
+ Args:
197
+ frame: BGR image
198
+ masks: Dict mapping link_name -> mask
199
+ alpha: Overlay transparency
200
+
201
+ Returns:
202
+ Visualization image
203
+ """
204
+ if not DEPS_AVAILABLE:
205
+ raise SegmentationError("OpenCV not available")
206
+
207
+ display = frame.copy()
208
+
209
+ # Color palette for different links
210
+ colors = [
211
+ (255, 0, 0), # Blue
212
+ (0, 255, 0), # Green
213
+ (0, 0, 255), # Red
214
+ (255, 255, 0), # Cyan
215
+ (255, 0, 255), # Magenta
216
+ (0, 255, 255), # Yellow
217
+ (128, 0, 128), # Purple
218
+ (255, 165, 0), # Orange
219
+ ]
220
+
221
+ for i, (name, mask) in enumerate(masks.items()):
222
+ color = colors[i % len(colors)]
223
+
224
+ # Create colored overlay
225
+ overlay = display.copy()
226
+ overlay[mask] = color
227
+
228
+ # Blend
229
+ display = cv2.addWeighted(overlay, alpha, display, 1 - alpha, 0)
230
+
231
+ # Draw contour
232
+ contours, _ = cv2.findContours(
233
+ mask.astype(np.uint8),
234
+ cv2.RETR_EXTERNAL,
235
+ cv2.CHAIN_APPROX_SIMPLE,
236
+ )
237
+ cv2.drawContours(display, contours, -1, color, 2)
238
+
239
+ # Label
240
+ if contours:
241
+ M = cv2.moments(contours[0])
242
+ if M["m00"] > 0:
243
+ cx = int(M["m10"] / M["m00"])
244
+ cy = int(M["m01"] / M["m00"])
245
+ cv2.putText(
246
+ display, name, (cx - 30, cy),
247
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2
248
+ )
249
+ cv2.putText(
250
+ display, name, (cx - 30, cy),
251
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 1
252
+ )
253
+
254
+ return display
255
+
256
+
257
+ def run_segmentation(
258
+ session: "ScanSession",
259
+ progress_callback: Optional[callable] = None,
260
+ ) -> Dict[str, Dict[int, "np.ndarray"]]:
261
+ """
262
+ Run segmentation phase on a scan session.
263
+
264
+ Args:
265
+ session: ScanSession with captured video and link annotations
266
+ progress_callback: Optional progress callback
267
+
268
+ Returns:
269
+ Dict mapping link_name -> {frame_idx: mask}
270
+ """
271
+ from .scan_session import ScanSession
272
+
273
+ # Check prerequisites
274
+ session.check_prerequisites("segment")
275
+
276
+ # Prepare link data
277
+ links = [
278
+ (link.name, link.point, link.is_fixed)
279
+ for link in session.links
280
+ ]
281
+
282
+ # Run segmentation
283
+ segmenter = LinkSegmenter(device=session.metadata.device)
284
+ masks = segmenter.segment_video(
285
+ str(session.video_path),
286
+ links,
287
+ progress_callback,
288
+ )
289
+
290
+ logger.info(f"Segmentation complete for {len(masks)} links")
291
+ return masks
292
+
293
+
294
+ __all__ = [
295
+ "SegmentationError",
296
+ "LinkSegmenter",
297
+ "visualize_masks",
298
+ "run_segmentation",
299
+ ]
ate/urdf/synthesis.py ADDED
@@ -0,0 +1,319 @@
1
+ """
2
+ URDF synthesis from discovered kinematics and generated meshes.
3
+
4
+ This module handles Phase 5b of the pipeline:
5
+ 1. Load kinematics, meshes, and inertial properties
6
+ 2. Generate URDF XML structure
7
+ 3. Validate and write final file
8
+
9
+ Output: Complete URDF file ready for simulation.
10
+ """
11
+
12
+ import logging
13
+ from typing import Dict, List, Optional
14
+ from pathlib import Path
15
+ from datetime import datetime
16
+ import xml.etree.ElementTree as ET
17
+ from xml.dom import minidom
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class SynthesisError(Exception):
23
+ """Error during URDF synthesis."""
24
+ pass
25
+
26
+
27
+ def create_urdf_link(
28
+ name: str,
29
+ visual_mesh: Optional[Path] = None,
30
+ collision_mesh: Optional[Path] = None,
31
+ inertial: Optional["InertialProperties"] = None,
32
+ mesh_base_path: str = "meshes",
33
+ ) -> ET.Element:
34
+ """
35
+ Create a URDF <link> element.
36
+
37
+ Args:
38
+ name: Link name
39
+ visual_mesh: Path to visual mesh file
40
+ collision_mesh: Path to collision mesh file
41
+ inertial: Inertial properties
42
+ mesh_base_path: Relative path prefix for mesh files
43
+
44
+ Returns:
45
+ ElementTree Element for the link
46
+ """
47
+ link = ET.Element("link", name=name)
48
+
49
+ # Visual geometry
50
+ if visual_mesh:
51
+ visual = ET.SubElement(link, "visual")
52
+ geometry = ET.SubElement(visual, "geometry")
53
+ mesh_path = f"{mesh_base_path}/{visual_mesh.name}"
54
+ ET.SubElement(geometry, "mesh", filename=mesh_path)
55
+
56
+ # Default material
57
+ material = ET.SubElement(visual, "material", name=f"{name}_material")
58
+ ET.SubElement(material, "color", rgba="0.7 0.7 0.7 1.0")
59
+
60
+ # Collision geometry
61
+ if collision_mesh:
62
+ collision = ET.SubElement(link, "collision")
63
+ geometry = ET.SubElement(collision, "geometry")
64
+ mesh_path = f"{mesh_base_path}/{collision_mesh.name}"
65
+ ET.SubElement(geometry, "mesh", filename=mesh_path)
66
+
67
+ # Inertial properties
68
+ if inertial:
69
+ inertial_elem = ET.SubElement(link, "inertial")
70
+ ET.SubElement(inertial_elem, "mass", value=f"{inertial.mass:.6f}")
71
+
72
+ com = inertial.center_of_mass
73
+ ET.SubElement(
74
+ inertial_elem, "origin",
75
+ xyz=f"{com[0]:.6f} {com[1]:.6f} {com[2]:.6f}",
76
+ rpy="0 0 0",
77
+ )
78
+
79
+ inertia = inertial.inertia
80
+ ET.SubElement(
81
+ inertial_elem, "inertia",
82
+ ixx=f"{inertia['ixx']:.9f}",
83
+ ixy=f"{inertia['ixy']:.9f}",
84
+ ixz=f"{inertia['ixz']:.9f}",
85
+ iyy=f"{inertia['iyy']:.9f}",
86
+ iyz=f"{inertia['iyz']:.9f}",
87
+ izz=f"{inertia['izz']:.9f}",
88
+ )
89
+
90
+ return link
91
+
92
+
93
+ def create_urdf_joint(
94
+ name: str,
95
+ parent_link: str,
96
+ child_link: str,
97
+ joint_type: str,
98
+ axis: List[float],
99
+ origin: List[float],
100
+ limits: Dict[str, float],
101
+ effort: float = 100.0,
102
+ velocity: float = 1.0,
103
+ ) -> ET.Element:
104
+ """
105
+ Create a URDF <joint> element.
106
+
107
+ Args:
108
+ name: Joint name
109
+ parent_link: Parent link name
110
+ child_link: Child link name
111
+ joint_type: Joint type (revolute, prismatic, fixed)
112
+ axis: Rotation/translation axis [x, y, z]
113
+ origin: Joint origin [x, y, z]
114
+ limits: Joint limits {lower, upper}
115
+ effort: Maximum effort (N or Nm)
116
+ velocity: Maximum velocity (rad/s or m/s)
117
+
118
+ Returns:
119
+ ElementTree Element for the joint
120
+ """
121
+ joint = ET.Element("joint", name=name, type=joint_type)
122
+
123
+ ET.SubElement(joint, "parent", link=parent_link)
124
+ ET.SubElement(joint, "child", link=child_link)
125
+
126
+ ET.SubElement(
127
+ joint, "origin",
128
+ xyz=f"{origin[0]:.6f} {origin[1]:.6f} {origin[2]:.6f}",
129
+ rpy="0 0 0",
130
+ )
131
+
132
+ if joint_type != "fixed":
133
+ ET.SubElement(
134
+ joint, "axis",
135
+ xyz=f"{axis[0]:.6f} {axis[1]:.6f} {axis[2]:.6f}",
136
+ )
137
+
138
+ ET.SubElement(
139
+ joint, "limit",
140
+ lower=f"{limits['lower']:.6f}",
141
+ upper=f"{limits['upper']:.6f}",
142
+ effort=f"{effort:.1f}",
143
+ velocity=f"{velocity:.2f}",
144
+ )
145
+
146
+ return joint
147
+
148
+
149
+ def synthesize_urdf(
150
+ session: "ScanSession",
151
+ inertials: Dict[str, "InertialProperties"],
152
+ robot_name: Optional[str] = None,
153
+ mesh_base_path: str = "meshes",
154
+ ) -> str:
155
+ """
156
+ Synthesize complete URDF from session data.
157
+
158
+ Args:
159
+ session: ScanSession with kinematics
160
+ inertials: Dict of InertialProperties per link
161
+ robot_name: Name for the robot (uses session name if not provided)
162
+ mesh_base_path: Relative path prefix for mesh files
163
+
164
+ Returns:
165
+ URDF XML string
166
+ """
167
+ robot_name = robot_name or session.metadata.robot_name or "robot"
168
+
169
+ # Create root element
170
+ robot = ET.Element("robot", name=robot_name)
171
+
172
+ # Add comment with generation info
173
+ comment = ET.Comment(
174
+ f"\n Generated by ate urdf scan\n"
175
+ f" Source: {session.session_dir}\n"
176
+ f" Date: {datetime.now().isoformat()}\n "
177
+ )
178
+ robot.insert(0, comment)
179
+
180
+ # Get mesh paths
181
+ meshes_dir = session.meshes_dir
182
+
183
+ # Create links
184
+ link_names = set()
185
+ for link in session.links:
186
+ link_names.add(link.name)
187
+ for joint in session.joints:
188
+ link_names.add(joint.parent_link)
189
+ link_names.add(joint.child_link)
190
+
191
+ for link_name in sorted(link_names):
192
+ visual_mesh = meshes_dir / f"{link_name}_visual.obj"
193
+ collision_mesh = meshes_dir / f"{link_name}_collision.obj"
194
+
195
+ visual_path = visual_mesh if visual_mesh.exists() else None
196
+ collision_path = collision_mesh if collision_mesh.exists() else None
197
+ inertial = inertials.get(link_name)
198
+
199
+ link_elem = create_urdf_link(
200
+ link_name,
201
+ visual_path,
202
+ collision_path,
203
+ inertial,
204
+ mesh_base_path,
205
+ )
206
+ robot.append(link_elem)
207
+
208
+ # Create joints
209
+ for joint in session.joints:
210
+ joint_elem = create_urdf_joint(
211
+ name=joint.name,
212
+ parent_link=joint.parent_link,
213
+ child_link=joint.child_link,
214
+ joint_type=joint.joint_type,
215
+ axis=joint.axis,
216
+ origin=joint.origin,
217
+ limits=joint.limits,
218
+ )
219
+ robot.append(joint_elem)
220
+
221
+ # Convert to string with pretty formatting
222
+ xml_string = ET.tostring(robot, encoding='unicode')
223
+ dom = minidom.parseString(xml_string)
224
+ pretty_xml = dom.toprettyxml(indent=" ")
225
+
226
+ # Remove extra blank lines
227
+ lines = [line for line in pretty_xml.split('\n') if line.strip()]
228
+ # Re-add XML declaration
229
+ pretty_xml = '\n'.join(lines)
230
+
231
+ return pretty_xml
232
+
233
+
234
+ def write_urdf(
235
+ session: "ScanSession",
236
+ inertials: Dict[str, "InertialProperties"],
237
+ output_path: Optional[Path] = None,
238
+ robot_name: Optional[str] = None,
239
+ ) -> Path:
240
+ """
241
+ Write URDF file to disk.
242
+
243
+ Args:
244
+ session: ScanSession
245
+ inertials: Inertial properties
246
+ output_path: Output path (uses session default if not provided)
247
+ robot_name: Robot name
248
+
249
+ Returns:
250
+ Path to written URDF file
251
+ """
252
+ output_path = output_path or session.urdf_path
253
+
254
+ urdf_content = synthesize_urdf(session, inertials, robot_name)
255
+
256
+ output_path.parent.mkdir(parents=True, exist_ok=True)
257
+ with open(output_path, 'w') as f:
258
+ f.write(urdf_content)
259
+
260
+ logger.info(f"Wrote URDF to {output_path}")
261
+ return output_path
262
+
263
+
264
+ def run_synthesis(
265
+ session: "ScanSession",
266
+ density: float = 1200.0,
267
+ robot_name: Optional[str] = None,
268
+ progress_callback: Optional[callable] = None,
269
+ ) -> Path:
270
+ """
271
+ Run the URDF synthesis phase.
272
+
273
+ Args:
274
+ session: ScanSession with kinematics and meshes
275
+ density: Material density for inertial estimation
276
+ robot_name: Robot name
277
+ progress_callback: Optional progress callback
278
+
279
+ Returns:
280
+ Path to generated URDF
281
+ """
282
+ from .inertial import estimate_all_inertials
283
+
284
+ # Check prerequisites
285
+ session.check_prerequisites("synthesize")
286
+
287
+ # Estimate inertials
288
+ logger.info("Estimating inertial properties...")
289
+ if progress_callback:
290
+ progress_callback(1, 3)
291
+
292
+ inertials = estimate_all_inertials(session, density)
293
+
294
+ # Synthesize URDF
295
+ logger.info("Synthesizing URDF...")
296
+ if progress_callback:
297
+ progress_callback(2, 3)
298
+
299
+ urdf_path = write_urdf(session, inertials, robot_name=robot_name)
300
+
301
+ # Update session
302
+ session.metadata.synthesize_complete = True
303
+ session.save_metadata()
304
+
305
+ if progress_callback:
306
+ progress_callback(3, 3)
307
+
308
+ logger.info(f"URDF synthesis complete: {urdf_path}")
309
+ return urdf_path
310
+
311
+
312
+ __all__ = [
313
+ "SynthesisError",
314
+ "create_urdf_link",
315
+ "create_urdf_joint",
316
+ "synthesize_urdf",
317
+ "write_urdf",
318
+ "run_synthesis",
319
+ ]