foodforthought-cli 0.2.8__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +12 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/cli.py +855 -4551
  9. ate/client.py +90 -0
  10. ate/commands/__init__.py +168 -0
  11. ate/commands/auth.py +389 -0
  12. ate/commands/bridge.py +448 -0
  13. ate/commands/data.py +185 -0
  14. ate/commands/deps.py +111 -0
  15. ate/commands/generate.py +384 -0
  16. ate/commands/memory.py +907 -0
  17. ate/commands/parts.py +166 -0
  18. ate/commands/primitive.py +399 -0
  19. ate/commands/protocol.py +288 -0
  20. ate/commands/recording.py +524 -0
  21. ate/commands/repo.py +154 -0
  22. ate/commands/simulation.py +291 -0
  23. ate/commands/skill.py +303 -0
  24. ate/commands/skills.py +487 -0
  25. ate/commands/team.py +147 -0
  26. ate/commands/workflow.py +271 -0
  27. ate/detection/__init__.py +38 -0
  28. ate/detection/base.py +142 -0
  29. ate/detection/color_detector.py +399 -0
  30. ate/detection/trash_detector.py +322 -0
  31. ate/drivers/__init__.py +18 -6
  32. ate/drivers/ble_transport.py +405 -0
  33. ate/drivers/mechdog.py +360 -24
  34. ate/drivers/wifi_camera.py +477 -0
  35. ate/interfaces/__init__.py +16 -0
  36. ate/interfaces/base.py +2 -0
  37. ate/interfaces/sensors.py +247 -0
  38. ate/llm_proxy.py +239 -0
  39. ate/memory/__init__.py +35 -0
  40. ate/memory/cloud.py +244 -0
  41. ate/memory/context.py +269 -0
  42. ate/memory/embeddings.py +184 -0
  43. ate/memory/export.py +26 -0
  44. ate/memory/merge.py +146 -0
  45. ate/memory/migrate/__init__.py +34 -0
  46. ate/memory/migrate/base.py +89 -0
  47. ate/memory/migrate/pipeline.py +189 -0
  48. ate/memory/migrate/sources/__init__.py +13 -0
  49. ate/memory/migrate/sources/chroma.py +170 -0
  50. ate/memory/migrate/sources/pinecone.py +120 -0
  51. ate/memory/migrate/sources/qdrant.py +110 -0
  52. ate/memory/migrate/sources/weaviate.py +160 -0
  53. ate/memory/reranker.py +353 -0
  54. ate/memory/search.py +26 -0
  55. ate/memory/store.py +548 -0
  56. ate/recording/__init__.py +42 -3
  57. ate/recording/session.py +12 -2
  58. ate/recording/visual.py +416 -0
  59. ate/robot/__init__.py +142 -0
  60. ate/robot/agentic_servo.py +856 -0
  61. ate/robot/behaviors.py +493 -0
  62. ate/robot/ble_capture.py +1000 -0
  63. ate/robot/ble_enumerate.py +506 -0
  64. ate/robot/calibration.py +88 -3
  65. ate/robot/calibration_state.py +388 -0
  66. ate/robot/commands.py +143 -11
  67. ate/robot/direction_calibration.py +554 -0
  68. ate/robot/discovery.py +104 -2
  69. ate/robot/llm_system_id.py +654 -0
  70. ate/robot/locomotion_calibration.py +508 -0
  71. ate/robot/marker_generator.py +611 -0
  72. ate/robot/perception.py +502 -0
  73. ate/robot/primitives.py +614 -0
  74. ate/robot/profiles.py +6 -0
  75. ate/robot/registry.py +5 -2
  76. ate/robot/servo_mapper.py +1153 -0
  77. ate/robot/skill_upload.py +285 -3
  78. ate/robot/target_calibration.py +500 -0
  79. ate/robot/teach.py +515 -0
  80. ate/robot/types.py +242 -0
  81. ate/robot/visual_labeler.py +9 -0
  82. ate/robot/visual_servo_loop.py +494 -0
  83. ate/robot/visual_servoing.py +570 -0
  84. ate/robot/visual_system_id.py +906 -0
  85. ate/transports/__init__.py +121 -0
  86. ate/transports/base.py +394 -0
  87. ate/transports/ble.py +405 -0
  88. ate/transports/hybrid.py +444 -0
  89. ate/transports/serial.py +345 -0
  90. ate/urdf/__init__.py +30 -0
  91. ate/urdf/capture.py +582 -0
  92. ate/urdf/cloud.py +491 -0
  93. ate/urdf/collision.py +271 -0
  94. ate/urdf/commands.py +708 -0
  95. ate/urdf/depth.py +360 -0
  96. ate/urdf/inertial.py +312 -0
  97. ate/urdf/kinematics.py +330 -0
  98. ate/urdf/lifting.py +415 -0
  99. ate/urdf/meshing.py +300 -0
  100. ate/urdf/models/__init__.py +110 -0
  101. ate/urdf/models/depth_anything.py +253 -0
  102. ate/urdf/models/sam2.py +324 -0
  103. ate/urdf/motion_analysis.py +396 -0
  104. ate/urdf/pipeline.py +468 -0
  105. ate/urdf/scale.py +256 -0
  106. ate/urdf/scan_session.py +411 -0
  107. ate/urdf/segmentation.py +299 -0
  108. ate/urdf/synthesis.py +319 -0
  109. ate/urdf/topology.py +336 -0
  110. ate/urdf/validation.py +371 -0
  111. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +1 -1
  112. foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
  113. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
  114. foodforthought_cli-0.2.8.dist-info/RECORD +0 -73
  115. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
  116. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
ate/urdf/depth.py ADDED
@@ -0,0 +1,360 @@
1
+ """
2
+ Depth estimation for URDF generation using Depth Anything V2.
3
+
4
+ This module handles Phase 2b of the pipeline:
5
+ 1. Load Depth Anything V2 model (with caching)
6
+ 2. Estimate depth for each video frame
7
+ 3. Apply scale factor from reference measurement
8
+
9
+ Output: Per-frame metric depth maps.
10
+ """
11
+
12
+ import logging
13
+ from typing import Dict, Generator, List, Optional, Tuple
14
+ from pathlib import Path
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ try:
19
+ import numpy as np
20
+ import cv2
21
+ DEPS_AVAILABLE = True
22
+ except ImportError:
23
+ DEPS_AVAILABLE = False
24
+ np = None
25
+ cv2 = None
26
+
27
+
28
+ class DepthError(Exception):
29
+ """Error during depth estimation."""
30
+ pass
31
+
32
+
33
+ class DepthProcessor:
34
+ """
35
+ Depth estimation processor using Depth Anything V2.
36
+
37
+ Provides metric depth maps from RGB video frames.
38
+ """
39
+
40
+ def __init__(self, device: str = "cpu"):
41
+ """
42
+ Initialize processor.
43
+
44
+ Args:
45
+ device: Compute device ("cuda" or "cpu")
46
+ """
47
+ if not DEPS_AVAILABLE:
48
+ raise DepthError(
49
+ "Required dependencies not installed. "
50
+ "Run: pip install numpy opencv-python"
51
+ )
52
+
53
+ self.device = device
54
+ self._estimator = None
55
+
56
+ def _get_estimator(self):
57
+ """Get or load the depth estimator."""
58
+ if self._estimator is None:
59
+ from .models import ModelCache
60
+ ModelCache.set_device(self.device)
61
+ self._estimator = ModelCache.get_depth_model()
62
+ return self._estimator
63
+
64
+ def estimate_frame(
65
+ self,
66
+ frame: "np.ndarray",
67
+ scale_factor: float = 1.0,
68
+ ) -> "np.ndarray":
69
+ """
70
+ Estimate metric depth for a single frame.
71
+
72
+ Args:
73
+ frame: BGR image from OpenCV
74
+ scale_factor: Scale factor to convert to meters
75
+
76
+ Returns:
77
+ Depth map (H, W) in meters
78
+ """
79
+ estimator = self._get_estimator()
80
+ return estimator.estimate_metric(frame, scale_factor)
81
+
82
+ def process_video(
83
+ self,
84
+ video_path: str,
85
+ scale_factor: float = 1.0,
86
+ frame_skip: int = 1,
87
+ progress_callback: Optional[callable] = None,
88
+ ) -> Generator[Tuple[int, "np.ndarray", "np.ndarray"], None, None]:
89
+ """
90
+ Process video frames and yield depth maps.
91
+
92
+ Args:
93
+ video_path: Path to video file
94
+ scale_factor: Scale factor for metric depth
95
+ frame_skip: Process every Nth frame (1 = all frames)
96
+ progress_callback: Optional callback(current, total)
97
+
98
+ Yields:
99
+ Tuple of (frame_idx, rgb_frame, depth_map)
100
+ """
101
+ cap = cv2.VideoCapture(video_path)
102
+ if not cap.isOpened():
103
+ raise DepthError(f"Could not open video: {video_path}")
104
+
105
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
106
+ frame_idx = 0
107
+
108
+ try:
109
+ while True:
110
+ ret, frame = cap.read()
111
+ if not ret:
112
+ break
113
+
114
+ if frame_idx % frame_skip == 0:
115
+ depth = self.estimate_frame(frame, scale_factor)
116
+ yield frame_idx, frame, depth
117
+
118
+ if progress_callback:
119
+ progress_callback(frame_idx + 1, total_frames)
120
+
121
+ frame_idx += 1
122
+
123
+ finally:
124
+ cap.release()
125
+
126
+ def process_video_batch(
127
+ self,
128
+ video_path: str,
129
+ scale_factor: float = 1.0,
130
+ frame_skip: int = 1,
131
+ progress_callback: Optional[callable] = None,
132
+ ) -> Dict[int, "np.ndarray"]:
133
+ """
134
+ Process all video frames and return depth maps.
135
+
136
+ Args:
137
+ video_path: Path to video file
138
+ scale_factor: Scale factor for metric depth
139
+ frame_skip: Process every Nth frame
140
+ progress_callback: Optional progress callback
141
+
142
+ Returns:
143
+ Dict mapping frame_idx -> depth_map
144
+ """
145
+ result = {}
146
+ for frame_idx, _, depth in self.process_video(
147
+ video_path, scale_factor, frame_skip, progress_callback
148
+ ):
149
+ result[frame_idx] = depth
150
+
151
+ logger.info(f"Processed {len(result)} depth frames")
152
+ return result
153
+
154
+
155
+ def compute_scale_from_mask(
156
+ depth: "np.ndarray",
157
+ mask: "np.ndarray",
158
+ reference_meters: float,
159
+ dimension: str = "width",
160
+ ) -> float:
161
+ """
162
+ Compute scale factor from a masked region and known dimension.
163
+
164
+ This implements the Kalib method for scale calibration:
165
+ Correlate a known physical dimension with its measured size
166
+ in the depth map.
167
+
168
+ Args:
169
+ depth: Depth map (H, W)
170
+ mask: Binary mask for the reference object
171
+ reference_meters: Known physical dimension in meters
172
+ dimension: "width" or "height" of the masked region
173
+
174
+ Returns:
175
+ Scale factor (meters per depth unit)
176
+ """
177
+ if not DEPS_AVAILABLE:
178
+ raise DepthError("NumPy not available")
179
+
180
+ # Get bounding box of mask
181
+ coords = np.where(mask)
182
+ if len(coords[0]) == 0:
183
+ raise DepthError("Empty mask - cannot compute scale")
184
+
185
+ y_min, y_max = coords[0].min(), coords[0].max()
186
+ x_min, x_max = coords[1].min(), coords[1].max()
187
+
188
+ # Compute dimension in pixels
189
+ if dimension == "width":
190
+ pixel_size = x_max - x_min
191
+ else:
192
+ pixel_size = y_max - y_min
193
+
194
+ if pixel_size == 0:
195
+ raise DepthError(f"Zero {dimension} in mask - cannot compute scale")
196
+
197
+ # Get median depth in the mask
198
+ masked_depth = depth[mask]
199
+ median_depth = np.median(masked_depth)
200
+
201
+ if median_depth == 0:
202
+ raise DepthError("Zero depth in masked region")
203
+
204
+ # Compute scale: physical_size / (pixel_size * depth)
205
+ # This gives us a factor to convert depth * pixels to meters
206
+ scale = reference_meters / (pixel_size * median_depth)
207
+
208
+ logger.info(
209
+ f"Scale computed: {scale:.6f} m/unit "
210
+ f"(ref={reference_meters}m, pixels={pixel_size}, depth={median_depth:.2f})"
211
+ )
212
+
213
+ return scale
214
+
215
+
216
+ def calibrate_depth_scale(
217
+ depth_processor: DepthProcessor,
218
+ video_path: str,
219
+ masks: Dict[str, Dict[int, "np.ndarray"]],
220
+ reference_link: str,
221
+ reference_meters: float,
222
+ dimension: str = "width",
223
+ ) -> float:
224
+ """
225
+ Calibrate global depth scale from a reference link.
226
+
227
+ Args:
228
+ depth_processor: DepthProcessor instance
229
+ video_path: Path to video
230
+ masks: Segmentation masks from segmentation phase
231
+ reference_link: Name of the link with known dimension
232
+ reference_meters: Known dimension in meters
233
+ dimension: "width" or "height"
234
+
235
+ Returns:
236
+ Calibrated scale factor
237
+ """
238
+ if reference_link not in masks:
239
+ raise DepthError(
240
+ f"Reference link '{reference_link}' not found in masks. "
241
+ f"Available: {list(masks.keys())}"
242
+ )
243
+
244
+ # Get first frame's data
245
+ link_masks = masks[reference_link]
246
+ first_frame_idx = min(link_masks.keys())
247
+ mask = link_masks[first_frame_idx]
248
+
249
+ # Get depth for first frame
250
+ cap = cv2.VideoCapture(video_path)
251
+ cap.set(cv2.CAP_PROP_POS_FRAMES, first_frame_idx)
252
+ ret, frame = cap.read()
253
+ cap.release()
254
+
255
+ if not ret:
256
+ raise DepthError(f"Could not read frame {first_frame_idx}")
257
+
258
+ # Estimate depth without scaling
259
+ depth = depth_processor.estimate_frame(frame, scale_factor=1.0)
260
+
261
+ # Compute scale
262
+ scale = compute_scale_from_mask(depth, mask, reference_meters, dimension)
263
+
264
+ return scale
265
+
266
+
267
+ def visualize_depth(
268
+ depth: "np.ndarray",
269
+ colormap: int = cv2.COLORMAP_VIRIDIS,
270
+ min_depth: Optional[float] = None,
271
+ max_depth: Optional[float] = None,
272
+ ) -> "np.ndarray":
273
+ """
274
+ Visualize a depth map as a colored image.
275
+
276
+ Args:
277
+ depth: Depth map (H, W)
278
+ colormap: OpenCV colormap
279
+ min_depth: Minimum depth for normalization
280
+ max_depth: Maximum depth for normalization
281
+
282
+ Returns:
283
+ BGR visualization image
284
+ """
285
+ if not DEPS_AVAILABLE:
286
+ raise DepthError("OpenCV not available")
287
+
288
+ # Normalize
289
+ if min_depth is None:
290
+ min_depth = depth.min()
291
+ if max_depth is None:
292
+ max_depth = depth.max()
293
+
294
+ normalized = (depth - min_depth) / (max_depth - min_depth + 1e-8)
295
+ normalized = np.clip(normalized * 255, 0, 255).astype(np.uint8)
296
+
297
+ # Apply colormap
298
+ colored = cv2.applyColorMap(normalized, colormap)
299
+
300
+ return colored
301
+
302
+
303
+ def run_depth_estimation(
304
+ session: "ScanSession",
305
+ masks: Dict[str, Dict[int, "np.ndarray"]],
306
+ frame_skip: int = 1,
307
+ progress_callback: Optional[callable] = None,
308
+ ) -> Tuple[Dict[int, "np.ndarray"], float]:
309
+ """
310
+ Run depth estimation phase on a scan session.
311
+
312
+ Args:
313
+ session: ScanSession with video
314
+ masks: Segmentation masks from Phase 2a
315
+ frame_skip: Process every Nth frame
316
+ progress_callback: Optional progress callback
317
+
318
+ Returns:
319
+ Tuple of (depth_maps, scale_factor)
320
+ """
321
+ # Initialize processor
322
+ processor = DepthProcessor(device=session.metadata.device)
323
+
324
+ # Calibrate scale if reference provided
325
+ scale_factor = 1.0
326
+ if session.metadata.scale_ref:
327
+ from .scale import parse_scale_ref
328
+ ref_link, ref_meters = parse_scale_ref(session.metadata.scale_ref)
329
+
330
+ logger.info(f"Calibrating scale from '{ref_link}' = {ref_meters}m")
331
+ scale_factor = calibrate_depth_scale(
332
+ processor,
333
+ str(session.video_path),
334
+ masks,
335
+ ref_link,
336
+ ref_meters,
337
+ )
338
+ session.metadata.scale_factor = scale_factor
339
+ session.save_metadata()
340
+
341
+ # Process all frames
342
+ depth_maps = processor.process_video_batch(
343
+ str(session.video_path),
344
+ scale_factor=scale_factor,
345
+ frame_skip=frame_skip,
346
+ progress_callback=progress_callback,
347
+ )
348
+
349
+ logger.info(f"Depth estimation complete: {len(depth_maps)} frames, scale={scale_factor:.6f}")
350
+ return depth_maps, scale_factor
351
+
352
+
353
+ __all__ = [
354
+ "DepthError",
355
+ "DepthProcessor",
356
+ "compute_scale_from_mask",
357
+ "calibrate_depth_scale",
358
+ "visualize_depth",
359
+ "run_depth_estimation",
360
+ ]
ate/urdf/inertial.py ADDED
@@ -0,0 +1,312 @@
1
+ """
2
+ Inertial property estimation for URDF generation.
3
+
4
+ This module handles Phase 5a of the pipeline:
5
+ 1. Compute mesh volume from visual/collision meshes
6
+ 2. Estimate mass from assumed material density
7
+ 3. Calculate center of mass and inertia tensor
8
+
9
+ Video cannot measure mass directly, so we use geometric estimation
10
+ with configurable material density assumptions.
11
+ """
12
+
13
+ import logging
14
+ from typing import Dict, List, Optional, Tuple
15
+ from pathlib import Path
16
+ from dataclasses import dataclass
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+ try:
21
+ import numpy as np
22
+ NUMPY_AVAILABLE = True
23
+ except ImportError:
24
+ NUMPY_AVAILABLE = False
25
+ np = None
26
+
27
+ try:
28
+ import trimesh
29
+ TRIMESH_AVAILABLE = True
30
+ except ImportError:
31
+ TRIMESH_AVAILABLE = False
32
+ trimesh = None
33
+
34
+
35
+ class InertialError(Exception):
36
+ """Error during inertial estimation."""
37
+ pass
38
+
39
+
40
+ # Common material densities in kg/m^3
41
+ MATERIAL_DENSITIES = {
42
+ "pla": 1250.0, # PLA plastic
43
+ "abs": 1040.0, # ABS plastic
44
+ "petg": 1270.0, # PETG plastic
45
+ "nylon": 1150.0, # Nylon
46
+ "aluminum": 2700.0, # Aluminum
47
+ "steel": 7850.0, # Steel
48
+ "carbon_fiber": 1600.0, # Carbon fiber composite
49
+ "wood": 700.0, # Average wood
50
+ "foam": 100.0, # Foam/hollow structures
51
+ "default": 1200.0, # Default (generic plastic)
52
+ }
53
+
54
+
55
+ @dataclass
56
+ class InertialProperties:
57
+ """Inertial properties for a single link."""
58
+ link_name: str
59
+ mass: float # kg
60
+ center_of_mass: List[float] # [x, y, z] meters
61
+ inertia: Dict[str, float] # ixx, ixy, ixz, iyy, iyz, izz (kg*m^2)
62
+
63
+ def to_dict(self) -> Dict:
64
+ return {
65
+ "link_name": self.link_name,
66
+ "mass": self.mass,
67
+ "center_of_mass": self.center_of_mass,
68
+ "inertia": self.inertia,
69
+ }
70
+
71
+ @property
72
+ def inertia_tensor(self) -> "np.ndarray":
73
+ """Get 3x3 inertia tensor matrix."""
74
+ return np.array([
75
+ [self.inertia["ixx"], self.inertia["ixy"], self.inertia["ixz"]],
76
+ [self.inertia["ixy"], self.inertia["iyy"], self.inertia["iyz"]],
77
+ [self.inertia["ixz"], self.inertia["iyz"], self.inertia["izz"]],
78
+ ])
79
+
80
+
81
+ def compute_mesh_volume(mesh: "trimesh.Trimesh") -> float:
82
+ """
83
+ Compute volume of a mesh.
84
+
85
+ Args:
86
+ mesh: Trimesh object (should be watertight)
87
+
88
+ Returns:
89
+ Volume in cubic meters
90
+ """
91
+ if not TRIMESH_AVAILABLE:
92
+ raise InertialError("trimesh not available")
93
+
94
+ if not mesh.is_watertight:
95
+ logger.warning("Mesh is not watertight, volume may be inaccurate")
96
+ # Attempt to fix
97
+ mesh.fill_holes()
98
+
99
+ return abs(mesh.volume)
100
+
101
+
102
+ def compute_inertia_tensor(
103
+ mesh: "trimesh.Trimesh",
104
+ density: float,
105
+ ) -> Tuple[float, "np.ndarray", "np.ndarray"]:
106
+ """
107
+ Compute mass, center of mass, and inertia tensor.
108
+
109
+ Uses trimesh's built-in moment_inertia calculation.
110
+
111
+ Args:
112
+ mesh: Trimesh object
113
+ density: Material density (kg/m^3)
114
+
115
+ Returns:
116
+ Tuple of (mass, center_of_mass, inertia_tensor)
117
+ """
118
+ if not TRIMESH_AVAILABLE:
119
+ raise InertialError("trimesh not available")
120
+
121
+ # Compute volume and mass
122
+ volume = compute_mesh_volume(mesh)
123
+ mass = volume * density
124
+
125
+ # Ensure minimum mass for stability
126
+ MIN_MASS = 0.001 # 1 gram
127
+ if mass < MIN_MASS:
128
+ logger.warning(f"Very small mass {mass:.6f}kg, clamping to {MIN_MASS}kg")
129
+ mass = MIN_MASS
130
+
131
+ # Get center of mass
132
+ com = mesh.center_mass
133
+
134
+ # Get inertia tensor at center of mass
135
+ # trimesh returns inertia in mesh units, we need to scale by density
136
+ inertia = mesh.moment_inertia * density
137
+
138
+ return mass, com, inertia
139
+
140
+
141
+ def estimate_inertial_from_mesh(
142
+ mesh_path: Path,
143
+ density: float = 1200.0,
144
+ ) -> InertialProperties:
145
+ """
146
+ Estimate inertial properties from a mesh file.
147
+
148
+ Args:
149
+ mesh_path: Path to mesh file (OBJ, STL, etc.)
150
+ density: Material density (kg/m^3)
151
+
152
+ Returns:
153
+ InertialProperties for the mesh
154
+ """
155
+ if not TRIMESH_AVAILABLE:
156
+ raise InertialError("trimesh not available")
157
+
158
+ # Load mesh
159
+ mesh = trimesh.load(str(mesh_path))
160
+
161
+ if not isinstance(mesh, trimesh.Trimesh):
162
+ if hasattr(mesh, 'geometry'):
163
+ meshes = list(mesh.geometry.values())
164
+ if meshes:
165
+ mesh = meshes[0]
166
+ else:
167
+ raise InertialError(f"No geometry in {mesh_path}")
168
+ else:
169
+ raise InertialError(f"Invalid mesh in {mesh_path}")
170
+
171
+ # Compute inertial properties
172
+ mass, com, inertia = compute_inertia_tensor(mesh, density)
173
+
174
+ # Extract link name from path
175
+ link_name = mesh_path.stem.replace("_visual", "").replace("_collision", "")
176
+
177
+ return InertialProperties(
178
+ link_name=link_name,
179
+ mass=float(mass),
180
+ center_of_mass=com.tolist(),
181
+ inertia={
182
+ "ixx": float(inertia[0, 0]),
183
+ "ixy": float(inertia[0, 1]),
184
+ "ixz": float(inertia[0, 2]),
185
+ "iyy": float(inertia[1, 1]),
186
+ "iyz": float(inertia[1, 2]),
187
+ "izz": float(inertia[2, 2]),
188
+ },
189
+ )
190
+
191
+
192
+ def estimate_inertial_box(
193
+ dimensions: List[float],
194
+ density: float = 1200.0,
195
+ link_name: str = "link",
196
+ ) -> InertialProperties:
197
+ """
198
+ Estimate inertial properties for a box shape.
199
+
200
+ Useful for simple approximations or fallback.
201
+
202
+ Args:
203
+ dimensions: [width, height, depth] in meters
204
+ density: Material density
205
+ link_name: Name for the link
206
+
207
+ Returns:
208
+ InertialProperties
209
+ """
210
+ if not NUMPY_AVAILABLE:
211
+ raise InertialError("NumPy not available")
212
+
213
+ w, h, d = dimensions
214
+ volume = w * h * d
215
+ mass = volume * density
216
+
217
+ # Box inertia formulas
218
+ ixx = (1/12) * mass * (h**2 + d**2)
219
+ iyy = (1/12) * mass * (w**2 + d**2)
220
+ izz = (1/12) * mass * (w**2 + h**2)
221
+
222
+ return InertialProperties(
223
+ link_name=link_name,
224
+ mass=mass,
225
+ center_of_mass=[0.0, 0.0, 0.0],
226
+ inertia={
227
+ "ixx": ixx, "ixy": 0.0, "ixz": 0.0,
228
+ "iyy": iyy, "iyz": 0.0,
229
+ "izz": izz,
230
+ },
231
+ )
232
+
233
+
234
+ def estimate_all_inertials(
235
+ session: "ScanSession",
236
+ density: float = 1200.0,
237
+ use_collision_mesh: bool = True,
238
+ progress_callback: Optional[callable] = None,
239
+ ) -> Dict[str, InertialProperties]:
240
+ """
241
+ Estimate inertial properties for all links.
242
+
243
+ Args:
244
+ session: ScanSession with meshes
245
+ density: Material density
246
+ use_collision_mesh: Use collision (simpler) or visual mesh
247
+ progress_callback: Optional progress callback
248
+
249
+ Returns:
250
+ Dict mapping link_name -> InertialProperties
251
+ """
252
+ meshes_dir = session.meshes_dir
253
+
254
+ # Find meshes
255
+ suffix = "_collision.obj" if use_collision_mesh else "_visual.obj"
256
+ mesh_files = list(meshes_dir.glob(f"*{suffix}"))
257
+
258
+ if not mesh_files:
259
+ # Fall back to other type
260
+ alt_suffix = "_visual.obj" if use_collision_mesh else "_collision.obj"
261
+ mesh_files = list(meshes_dir.glob(f"*{alt_suffix}"))
262
+
263
+ if not mesh_files:
264
+ raise InertialError(f"No mesh files found in {meshes_dir}")
265
+
266
+ result = {}
267
+ total = len(mesh_files)
268
+
269
+ for i, mesh_path in enumerate(mesh_files):
270
+ try:
271
+ props = estimate_inertial_from_mesh(mesh_path, density)
272
+ result[props.link_name] = props
273
+
274
+ logger.info(
275
+ f"Estimated inertial for '{props.link_name}': "
276
+ f"mass={props.mass:.4f}kg, com={props.center_of_mass}"
277
+ )
278
+ except Exception as e:
279
+ logger.error(f"Failed to estimate inertial for {mesh_path}: {e}")
280
+
281
+ if progress_callback:
282
+ progress_callback(i + 1, total)
283
+
284
+ logger.info(f"Estimated inertials for {len(result)} links")
285
+ return result
286
+
287
+
288
+ def get_density_for_material(material: str) -> float:
289
+ """
290
+ Get density for a material name.
291
+
292
+ Args:
293
+ material: Material name (e.g., "pla", "aluminum")
294
+
295
+ Returns:
296
+ Density in kg/m^3
297
+ """
298
+ material = material.lower().replace(" ", "_").replace("-", "_")
299
+ return MATERIAL_DENSITIES.get(material, MATERIAL_DENSITIES["default"])
300
+
301
+
302
+ __all__ = [
303
+ "InertialError",
304
+ "MATERIAL_DENSITIES",
305
+ "InertialProperties",
306
+ "compute_mesh_volume",
307
+ "compute_inertia_tensor",
308
+ "estimate_inertial_from_mesh",
309
+ "estimate_inertial_box",
310
+ "estimate_all_inertials",
311
+ "get_density_for_material",
312
+ ]