foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +100 -0
- ate/behaviors/approach.py +399 -0
- ate/behaviors/common.py +686 -0
- ate/behaviors/tree.py +454 -0
- ate/cli.py +855 -3995
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +39 -0
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +942 -0
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +187 -0
- ate/interfaces/base.py +273 -0
- ate/interfaces/body.py +267 -0
- ate/interfaces/detection.py +282 -0
- ate/interfaces/locomotion.py +422 -0
- ate/interfaces/manipulation.py +408 -0
- ate/interfaces/navigation.py +389 -0
- ate/interfaces/perception.py +362 -0
- ate/interfaces/sensors.py +247 -0
- ate/interfaces/types.py +371 -0
- ate/llm_proxy.py +239 -0
- ate/mcp_server.py +387 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +83 -0
- ate/recording/demonstration.py +378 -0
- ate/recording/session.py +415 -0
- ate/recording/upload.py +304 -0
- ate/recording/visual.py +416 -0
- ate/recording/wrapper.py +95 -0
- ate/robot/__init__.py +221 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +668 -0
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +3735 -0
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +441 -0
- ate/robot/introspection.py +330 -0
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/manager.py +270 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +281 -0
- ate/robot/registry.py +322 -0
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +675 -0
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +1048 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
ate/urdf/lifting.py
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
1
|
+
"""
|
|
2
|
+
2D to 3D lifting utilities for point cloud generation.
|
|
3
|
+
|
|
4
|
+
This module handles Phase 2c of the pipeline:
|
|
5
|
+
1. Back-project masked pixels to 3D using depth and intrinsics
|
|
6
|
+
2. Aggregate per-frame point clouds
|
|
7
|
+
3. Save point clouds in PLY format
|
|
8
|
+
|
|
9
|
+
Uses the standard pinhole camera model for projection.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import logging
|
|
13
|
+
from typing import Dict, List, Optional, Tuple
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
import numpy as np
|
|
20
|
+
NUMPY_AVAILABLE = True
|
|
21
|
+
except ImportError:
|
|
22
|
+
NUMPY_AVAILABLE = False
|
|
23
|
+
np = None
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class LiftingError(Exception):
|
|
27
|
+
"""Error during 2D to 3D lifting."""
|
|
28
|
+
pass
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def lift_to_3d(
|
|
32
|
+
rgb: "np.ndarray",
|
|
33
|
+
depth: "np.ndarray",
|
|
34
|
+
mask: "np.ndarray",
|
|
35
|
+
intrinsics: Tuple[float, float, float, float],
|
|
36
|
+
) -> Tuple["np.ndarray", "np.ndarray"]:
|
|
37
|
+
"""
|
|
38
|
+
Back-project masked pixels to 3D points.
|
|
39
|
+
|
|
40
|
+
Uses the standard pinhole camera model:
|
|
41
|
+
X = (u - cx) * Z / fx
|
|
42
|
+
Y = (v - cy) * Z / fy
|
|
43
|
+
Z = depth
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
rgb: RGB image (H, W, 3)
|
|
47
|
+
depth: Depth map (H, W) in meters
|
|
48
|
+
mask: Binary mask (H, W)
|
|
49
|
+
intrinsics: (fx, fy, cx, cy) camera intrinsics
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Tuple of (points, colors):
|
|
53
|
+
- points: (N, 3) XYZ coordinates in meters
|
|
54
|
+
- colors: (N, 3) RGB values [0-255]
|
|
55
|
+
"""
|
|
56
|
+
if not NUMPY_AVAILABLE:
|
|
57
|
+
raise LiftingError("NumPy not available")
|
|
58
|
+
|
|
59
|
+
fx, fy, cx, cy = intrinsics
|
|
60
|
+
|
|
61
|
+
# Get masked pixel coordinates
|
|
62
|
+
v, u = np.where(mask) # v = row = y, u = col = x
|
|
63
|
+
|
|
64
|
+
if len(u) == 0:
|
|
65
|
+
return np.zeros((0, 3)), np.zeros((0, 3), dtype=np.uint8)
|
|
66
|
+
|
|
67
|
+
# Get depth values
|
|
68
|
+
z = depth[v, u]
|
|
69
|
+
|
|
70
|
+
# Filter out invalid depths
|
|
71
|
+
valid = (z > 0.01) & (z < 100.0) # 1cm to 100m
|
|
72
|
+
u, v, z = u[valid], v[valid], z[valid]
|
|
73
|
+
|
|
74
|
+
if len(u) == 0:
|
|
75
|
+
return np.zeros((0, 3)), np.zeros((0, 3), dtype=np.uint8)
|
|
76
|
+
|
|
77
|
+
# Back-project to 3D
|
|
78
|
+
x = (u - cx) * z / fx
|
|
79
|
+
y = (v - cy) * z / fy
|
|
80
|
+
|
|
81
|
+
points = np.stack([x, y, z], axis=1)
|
|
82
|
+
|
|
83
|
+
# Get colors
|
|
84
|
+
colors = rgb[v, u]
|
|
85
|
+
|
|
86
|
+
return points.astype(np.float32), colors.astype(np.uint8)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def save_ply(
|
|
90
|
+
path: Path,
|
|
91
|
+
points: "np.ndarray",
|
|
92
|
+
colors: Optional["np.ndarray"] = None,
|
|
93
|
+
) -> None:
|
|
94
|
+
"""
|
|
95
|
+
Save point cloud to PLY file.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
path: Output file path
|
|
99
|
+
points: (N, 3) XYZ coordinates
|
|
100
|
+
colors: Optional (N, 3) RGB colors [0-255]
|
|
101
|
+
"""
|
|
102
|
+
if not NUMPY_AVAILABLE:
|
|
103
|
+
raise LiftingError("NumPy not available")
|
|
104
|
+
|
|
105
|
+
n_points = len(points)
|
|
106
|
+
|
|
107
|
+
with open(path, 'w') as f:
|
|
108
|
+
# Header
|
|
109
|
+
f.write("ply\n")
|
|
110
|
+
f.write("format ascii 1.0\n")
|
|
111
|
+
f.write(f"element vertex {n_points}\n")
|
|
112
|
+
f.write("property float x\n")
|
|
113
|
+
f.write("property float y\n")
|
|
114
|
+
f.write("property float z\n")
|
|
115
|
+
if colors is not None:
|
|
116
|
+
f.write("property uchar red\n")
|
|
117
|
+
f.write("property uchar green\n")
|
|
118
|
+
f.write("property uchar blue\n")
|
|
119
|
+
f.write("end_header\n")
|
|
120
|
+
|
|
121
|
+
# Data
|
|
122
|
+
for i in range(n_points):
|
|
123
|
+
x, y, z = points[i]
|
|
124
|
+
if colors is not None:
|
|
125
|
+
r, g, b = colors[i]
|
|
126
|
+
f.write(f"{x:.6f} {y:.6f} {z:.6f} {int(r)} {int(g)} {int(b)}\n")
|
|
127
|
+
else:
|
|
128
|
+
f.write(f"{x:.6f} {y:.6f} {z:.6f}\n")
|
|
129
|
+
|
|
130
|
+
logger.debug(f"Saved {n_points} points to {path}")
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def load_ply(path: Path) -> Tuple["np.ndarray", Optional["np.ndarray"]]:
|
|
134
|
+
"""
|
|
135
|
+
Load point cloud from PLY file.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
path: Path to PLY file
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
Tuple of (points, colors) or (points, None)
|
|
142
|
+
"""
|
|
143
|
+
if not NUMPY_AVAILABLE:
|
|
144
|
+
raise LiftingError("NumPy not available")
|
|
145
|
+
|
|
146
|
+
points = []
|
|
147
|
+
colors = []
|
|
148
|
+
has_color = False
|
|
149
|
+
in_header = True
|
|
150
|
+
n_vertices = 0
|
|
151
|
+
|
|
152
|
+
with open(path, 'r') as f:
|
|
153
|
+
for line in f:
|
|
154
|
+
line = line.strip()
|
|
155
|
+
|
|
156
|
+
if in_header:
|
|
157
|
+
if line.startswith("element vertex"):
|
|
158
|
+
n_vertices = int(line.split()[-1])
|
|
159
|
+
elif "property" in line and "red" in line:
|
|
160
|
+
has_color = True
|
|
161
|
+
elif line == "end_header":
|
|
162
|
+
in_header = False
|
|
163
|
+
else:
|
|
164
|
+
parts = line.split()
|
|
165
|
+
x, y, z = float(parts[0]), float(parts[1]), float(parts[2])
|
|
166
|
+
points.append([x, y, z])
|
|
167
|
+
|
|
168
|
+
if has_color and len(parts) >= 6:
|
|
169
|
+
r, g, b = int(parts[3]), int(parts[4]), int(parts[5])
|
|
170
|
+
colors.append([r, g, b])
|
|
171
|
+
|
|
172
|
+
points = np.array(points, dtype=np.float32)
|
|
173
|
+
colors = np.array(colors, dtype=np.uint8) if colors else None
|
|
174
|
+
|
|
175
|
+
return points, colors
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def aggregate_point_clouds(
|
|
179
|
+
clouds: List["np.ndarray"],
|
|
180
|
+
colors_list: Optional[List["np.ndarray"]] = None,
|
|
181
|
+
voxel_size: Optional[float] = None,
|
|
182
|
+
) -> Tuple["np.ndarray", Optional["np.ndarray"]]:
|
|
183
|
+
"""
|
|
184
|
+
Aggregate multiple point clouds into one.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
clouds: List of (N, 3) point clouds
|
|
188
|
+
colors_list: Optional list of (N, 3) color arrays
|
|
189
|
+
voxel_size: Optional voxel size for downsampling
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
Tuple of (aggregated_points, aggregated_colors)
|
|
193
|
+
"""
|
|
194
|
+
if not NUMPY_AVAILABLE:
|
|
195
|
+
raise LiftingError("NumPy not available")
|
|
196
|
+
|
|
197
|
+
if not clouds:
|
|
198
|
+
return np.zeros((0, 3)), None
|
|
199
|
+
|
|
200
|
+
# Concatenate all clouds
|
|
201
|
+
points = np.vstack(clouds)
|
|
202
|
+
|
|
203
|
+
colors = None
|
|
204
|
+
if colors_list:
|
|
205
|
+
colors = np.vstack(colors_list)
|
|
206
|
+
|
|
207
|
+
# Optional voxel downsampling
|
|
208
|
+
if voxel_size and voxel_size > 0:
|
|
209
|
+
points, colors = voxel_downsample(points, colors, voxel_size)
|
|
210
|
+
|
|
211
|
+
return points, colors
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def voxel_downsample(
|
|
215
|
+
points: "np.ndarray",
|
|
216
|
+
colors: Optional["np.ndarray"],
|
|
217
|
+
voxel_size: float,
|
|
218
|
+
) -> Tuple["np.ndarray", Optional["np.ndarray"]]:
|
|
219
|
+
"""
|
|
220
|
+
Downsample point cloud using voxel grid.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
points: (N, 3) point cloud
|
|
224
|
+
colors: Optional (N, 3) colors
|
|
225
|
+
voxel_size: Voxel edge length
|
|
226
|
+
|
|
227
|
+
Returns:
|
|
228
|
+
Downsampled (points, colors)
|
|
229
|
+
"""
|
|
230
|
+
if len(points) == 0:
|
|
231
|
+
return points, colors
|
|
232
|
+
|
|
233
|
+
# Compute voxel indices
|
|
234
|
+
voxel_indices = np.floor(points / voxel_size).astype(np.int32)
|
|
235
|
+
|
|
236
|
+
# Hash voxels
|
|
237
|
+
unique_voxels, inverse_indices = np.unique(
|
|
238
|
+
voxel_indices, axis=0, return_inverse=True
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
# Average points in each voxel
|
|
242
|
+
n_voxels = len(unique_voxels)
|
|
243
|
+
new_points = np.zeros((n_voxels, 3))
|
|
244
|
+
new_colors = np.zeros((n_voxels, 3)) if colors is not None else None
|
|
245
|
+
counts = np.zeros(n_voxels)
|
|
246
|
+
|
|
247
|
+
for i, idx in enumerate(inverse_indices):
|
|
248
|
+
new_points[idx] += points[i]
|
|
249
|
+
if colors is not None:
|
|
250
|
+
new_colors[idx] += colors[i]
|
|
251
|
+
counts[idx] += 1
|
|
252
|
+
|
|
253
|
+
new_points /= counts[:, np.newaxis]
|
|
254
|
+
if new_colors is not None:
|
|
255
|
+
new_colors /= counts[:, np.newaxis]
|
|
256
|
+
new_colors = new_colors.astype(np.uint8)
|
|
257
|
+
|
|
258
|
+
logger.debug(f"Voxel downsampled: {len(points)} -> {len(new_points)} points")
|
|
259
|
+
return new_points.astype(np.float32), new_colors
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
def generate_link_clouds(
|
|
263
|
+
session: "ScanSession",
|
|
264
|
+
masks: Dict[str, Dict[int, "np.ndarray"]],
|
|
265
|
+
depth_maps: Dict[int, "np.ndarray"],
|
|
266
|
+
intrinsics: Optional[Tuple[float, float, float, float]] = None,
|
|
267
|
+
frame_skip: int = 1,
|
|
268
|
+
voxel_size: float = 0.005,
|
|
269
|
+
progress_callback: Optional[callable] = None,
|
|
270
|
+
) -> Dict[str, Path]:
|
|
271
|
+
"""
|
|
272
|
+
Generate and save point clouds for each link.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
session: ScanSession with video
|
|
276
|
+
masks: Segmentation masks
|
|
277
|
+
depth_maps: Depth maps from depth estimation
|
|
278
|
+
intrinsics: Camera intrinsics (fx, fy, cx, cy)
|
|
279
|
+
frame_skip: Use every Nth frame
|
|
280
|
+
voxel_size: Voxel size for downsampling (meters)
|
|
281
|
+
progress_callback: Optional progress callback
|
|
282
|
+
|
|
283
|
+
Returns:
|
|
284
|
+
Dict mapping link_name -> cloud_path
|
|
285
|
+
"""
|
|
286
|
+
import cv2
|
|
287
|
+
|
|
288
|
+
# Get intrinsics
|
|
289
|
+
if intrinsics is None:
|
|
290
|
+
from .scale import estimate_intrinsics_from_resolution
|
|
291
|
+
w, h = session.metadata.resolution
|
|
292
|
+
intrinsics = estimate_intrinsics_from_resolution(w, h)
|
|
293
|
+
logger.info(f"Estimated intrinsics: fx={intrinsics[0]:.1f}, fy={intrinsics[1]:.1f}")
|
|
294
|
+
|
|
295
|
+
# Open video
|
|
296
|
+
cap = cv2.VideoCapture(str(session.video_path))
|
|
297
|
+
if not cap.isOpened():
|
|
298
|
+
raise LiftingError(f"Could not open video: {session.video_path}")
|
|
299
|
+
|
|
300
|
+
# Generate clouds for each link
|
|
301
|
+
link_clouds = {name: [] for name in masks.keys()}
|
|
302
|
+
link_colors = {name: [] for name in masks.keys()}
|
|
303
|
+
|
|
304
|
+
total_frames = len(depth_maps)
|
|
305
|
+
processed = 0
|
|
306
|
+
|
|
307
|
+
try:
|
|
308
|
+
for frame_idx in sorted(depth_maps.keys()):
|
|
309
|
+
if frame_idx % frame_skip != 0:
|
|
310
|
+
continue
|
|
311
|
+
|
|
312
|
+
# Read frame
|
|
313
|
+
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
|
|
314
|
+
ret, frame = cap.read()
|
|
315
|
+
if not ret:
|
|
316
|
+
continue
|
|
317
|
+
|
|
318
|
+
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
319
|
+
depth = depth_maps[frame_idx]
|
|
320
|
+
|
|
321
|
+
# Lift each link's masked region
|
|
322
|
+
for link_name, link_masks in masks.items():
|
|
323
|
+
if frame_idx not in link_masks:
|
|
324
|
+
continue
|
|
325
|
+
|
|
326
|
+
mask = link_masks[frame_idx]
|
|
327
|
+
points, colors = lift_to_3d(rgb, depth, mask, intrinsics)
|
|
328
|
+
|
|
329
|
+
if len(points) > 0:
|
|
330
|
+
link_clouds[link_name].append(points)
|
|
331
|
+
link_colors[link_name].append(colors)
|
|
332
|
+
|
|
333
|
+
processed += 1
|
|
334
|
+
if progress_callback:
|
|
335
|
+
progress_callback(processed, total_frames)
|
|
336
|
+
|
|
337
|
+
finally:
|
|
338
|
+
cap.release()
|
|
339
|
+
|
|
340
|
+
# Aggregate and save each link's cloud
|
|
341
|
+
result = {}
|
|
342
|
+
clouds_dir = session.clouds_dir
|
|
343
|
+
clouds_dir.mkdir(parents=True, exist_ok=True)
|
|
344
|
+
|
|
345
|
+
for link_name in masks.keys():
|
|
346
|
+
if not link_clouds[link_name]:
|
|
347
|
+
logger.warning(f"No points generated for link '{link_name}'")
|
|
348
|
+
continue
|
|
349
|
+
|
|
350
|
+
# Aggregate
|
|
351
|
+
points, colors = aggregate_point_clouds(
|
|
352
|
+
link_clouds[link_name],
|
|
353
|
+
link_colors[link_name],
|
|
354
|
+
voxel_size=voxel_size,
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
# Save
|
|
358
|
+
cloud_path = clouds_dir / f"{link_name}.ply"
|
|
359
|
+
save_ply(cloud_path, points, colors)
|
|
360
|
+
result[link_name] = cloud_path
|
|
361
|
+
|
|
362
|
+
logger.info(f"Saved cloud for '{link_name}': {len(points)} points")
|
|
363
|
+
|
|
364
|
+
return result
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
def run_lifting(
|
|
368
|
+
session: "ScanSession",
|
|
369
|
+
masks: Dict[str, Dict[int, "np.ndarray"]],
|
|
370
|
+
depth_maps: Dict[int, "np.ndarray"],
|
|
371
|
+
frame_skip: int = 1,
|
|
372
|
+
voxel_size: float = 0.005,
|
|
373
|
+
progress_callback: Optional[callable] = None,
|
|
374
|
+
) -> Dict[str, Path]:
|
|
375
|
+
"""
|
|
376
|
+
Run the lifting phase to generate point clouds.
|
|
377
|
+
|
|
378
|
+
Args:
|
|
379
|
+
session: ScanSession
|
|
380
|
+
masks: Segmentation masks
|
|
381
|
+
depth_maps: Depth maps
|
|
382
|
+
frame_skip: Process every Nth frame
|
|
383
|
+
voxel_size: Voxel grid size for downsampling
|
|
384
|
+
progress_callback: Optional progress callback
|
|
385
|
+
|
|
386
|
+
Returns:
|
|
387
|
+
Dict mapping link_name -> cloud_path
|
|
388
|
+
"""
|
|
389
|
+
cloud_paths = generate_link_clouds(
|
|
390
|
+
session,
|
|
391
|
+
masks,
|
|
392
|
+
depth_maps,
|
|
393
|
+
frame_skip=frame_skip,
|
|
394
|
+
voxel_size=voxel_size,
|
|
395
|
+
progress_callback=progress_callback,
|
|
396
|
+
)
|
|
397
|
+
|
|
398
|
+
# Update session metadata
|
|
399
|
+
session.metadata.segment_complete = True
|
|
400
|
+
session.save_metadata()
|
|
401
|
+
|
|
402
|
+
logger.info(f"Lifting complete: {len(cloud_paths)} point clouds generated")
|
|
403
|
+
return cloud_paths
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
__all__ = [
|
|
407
|
+
"LiftingError",
|
|
408
|
+
"lift_to_3d",
|
|
409
|
+
"save_ply",
|
|
410
|
+
"load_ply",
|
|
411
|
+
"aggregate_point_clouds",
|
|
412
|
+
"voxel_downsample",
|
|
413
|
+
"generate_link_clouds",
|
|
414
|
+
"run_lifting",
|
|
415
|
+
]
|
ate/urdf/meshing.py
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Mesh generation from point clouds for URDF.
|
|
3
|
+
|
|
4
|
+
This module handles Phase 4a of the pipeline:
|
|
5
|
+
1. Aggregate per-frame point clouds into canonical link shapes
|
|
6
|
+
2. Generate visual meshes via surface reconstruction
|
|
7
|
+
3. Support multiple reconstruction methods (Poisson, SuGaR)
|
|
8
|
+
|
|
9
|
+
Output: Visual mesh files (OBJ format) for each link.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import logging
|
|
13
|
+
from typing import Dict, List, Optional, Tuple
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
import numpy as np
|
|
20
|
+
NUMPY_AVAILABLE = True
|
|
21
|
+
except ImportError:
|
|
22
|
+
NUMPY_AVAILABLE = False
|
|
23
|
+
np = None
|
|
24
|
+
|
|
25
|
+
try:
|
|
26
|
+
import trimesh
|
|
27
|
+
TRIMESH_AVAILABLE = True
|
|
28
|
+
except ImportError:
|
|
29
|
+
TRIMESH_AVAILABLE = False
|
|
30
|
+
trimesh = None
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class MeshingError(Exception):
|
|
34
|
+
"""Error during mesh generation."""
|
|
35
|
+
pass
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def poisson_reconstruct(
|
|
39
|
+
points: "np.ndarray",
|
|
40
|
+
colors: Optional["np.ndarray"] = None,
|
|
41
|
+
depth: int = 8,
|
|
42
|
+
) -> "trimesh.Trimesh":
|
|
43
|
+
"""
|
|
44
|
+
Reconstruct mesh using Poisson surface reconstruction.
|
|
45
|
+
|
|
46
|
+
Requires Open3D for the actual Poisson reconstruction.
|
|
47
|
+
Falls back to convex hull if Open3D is not available.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
points: (N, 3) point cloud
|
|
51
|
+
colors: Optional (N, 3) RGB colors
|
|
52
|
+
depth: Octree depth for Poisson (higher = more detail)
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Trimesh mesh object
|
|
56
|
+
"""
|
|
57
|
+
if not NUMPY_AVAILABLE:
|
|
58
|
+
raise MeshingError("NumPy not available")
|
|
59
|
+
|
|
60
|
+
if len(points) < 4:
|
|
61
|
+
raise MeshingError("Need at least 4 points for mesh reconstruction")
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
import open3d as o3d
|
|
65
|
+
|
|
66
|
+
# Create Open3D point cloud
|
|
67
|
+
pcd = o3d.geometry.PointCloud()
|
|
68
|
+
pcd.points = o3d.utility.Vector3dVector(points)
|
|
69
|
+
|
|
70
|
+
if colors is not None:
|
|
71
|
+
pcd.colors = o3d.utility.Vector3dVector(colors / 255.0)
|
|
72
|
+
|
|
73
|
+
# Estimate normals
|
|
74
|
+
pcd.estimate_normals(
|
|
75
|
+
search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.05, max_nn=30)
|
|
76
|
+
)
|
|
77
|
+
pcd.orient_normals_consistent_tangent_plane(k=15)
|
|
78
|
+
|
|
79
|
+
# Poisson reconstruction
|
|
80
|
+
mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
|
|
81
|
+
pcd, depth=depth
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Remove low-density vertices (artifacts)
|
|
85
|
+
densities = np.asarray(densities)
|
|
86
|
+
density_threshold = np.quantile(densities, 0.1)
|
|
87
|
+
vertices_to_remove = densities < density_threshold
|
|
88
|
+
mesh.remove_vertices_by_mask(vertices_to_remove)
|
|
89
|
+
|
|
90
|
+
# Convert to trimesh
|
|
91
|
+
vertices = np.asarray(mesh.vertices)
|
|
92
|
+
faces = np.asarray(mesh.triangles)
|
|
93
|
+
|
|
94
|
+
result = trimesh.Trimesh(vertices=vertices, faces=faces)
|
|
95
|
+
logger.debug(f"Poisson mesh: {len(result.vertices)} vertices, {len(result.faces)} faces")
|
|
96
|
+
return result
|
|
97
|
+
|
|
98
|
+
except ImportError:
|
|
99
|
+
logger.warning("Open3D not available, falling back to convex hull")
|
|
100
|
+
return convex_hull_mesh(points)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def convex_hull_mesh(points: "np.ndarray") -> "trimesh.Trimesh":
|
|
104
|
+
"""
|
|
105
|
+
Generate mesh using convex hull.
|
|
106
|
+
|
|
107
|
+
Simple fallback when more sophisticated methods aren't available.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
points: (N, 3) point cloud
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
Trimesh mesh object
|
|
114
|
+
"""
|
|
115
|
+
if not TRIMESH_AVAILABLE:
|
|
116
|
+
raise MeshingError("trimesh not available. Run: pip install trimesh")
|
|
117
|
+
|
|
118
|
+
if len(points) < 4:
|
|
119
|
+
raise MeshingError("Need at least 4 points for convex hull")
|
|
120
|
+
|
|
121
|
+
from scipy.spatial import ConvexHull
|
|
122
|
+
|
|
123
|
+
hull = ConvexHull(points)
|
|
124
|
+
mesh = trimesh.Trimesh(vertices=points[hull.vertices], faces=hull.simplices)
|
|
125
|
+
|
|
126
|
+
logger.debug(f"Convex hull mesh: {len(mesh.vertices)} vertices")
|
|
127
|
+
return mesh
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def alpha_shape_mesh(
|
|
131
|
+
points: "np.ndarray",
|
|
132
|
+
alpha: float = 0.1,
|
|
133
|
+
) -> "trimesh.Trimesh":
|
|
134
|
+
"""
|
|
135
|
+
Generate mesh using alpha shapes.
|
|
136
|
+
|
|
137
|
+
Good for non-convex shapes when Poisson isn't available.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
points: (N, 3) point cloud
|
|
141
|
+
alpha: Alpha value (smaller = tighter fit)
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
Trimesh mesh object
|
|
145
|
+
"""
|
|
146
|
+
if not TRIMESH_AVAILABLE:
|
|
147
|
+
raise MeshingError("trimesh not available")
|
|
148
|
+
|
|
149
|
+
# Use trimesh's built-in alpha shape
|
|
150
|
+
cloud = trimesh.PointCloud(points)
|
|
151
|
+
mesh = cloud.convex_hull # Start with convex hull
|
|
152
|
+
|
|
153
|
+
# TODO: Implement proper alpha shape when scipy supports it
|
|
154
|
+
# For now, fall back to convex hull
|
|
155
|
+
|
|
156
|
+
return mesh
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def simplify_mesh(
|
|
160
|
+
mesh: "trimesh.Trimesh",
|
|
161
|
+
target_faces: int = 5000,
|
|
162
|
+
) -> "trimesh.Trimesh":
|
|
163
|
+
"""
|
|
164
|
+
Simplify mesh to reduce face count.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
mesh: Input mesh
|
|
168
|
+
target_faces: Target number of faces
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Simplified mesh
|
|
172
|
+
"""
|
|
173
|
+
if not TRIMESH_AVAILABLE:
|
|
174
|
+
raise MeshingError("trimesh not available")
|
|
175
|
+
|
|
176
|
+
if len(mesh.faces) <= target_faces:
|
|
177
|
+
return mesh
|
|
178
|
+
|
|
179
|
+
try:
|
|
180
|
+
# Try quadric decimation
|
|
181
|
+
simplified = mesh.simplify_quadric_decimation(target_faces)
|
|
182
|
+
logger.debug(f"Simplified mesh: {len(mesh.faces)} -> {len(simplified.faces)} faces")
|
|
183
|
+
return simplified
|
|
184
|
+
except Exception as e:
|
|
185
|
+
logger.warning(f"Mesh simplification failed: {e}")
|
|
186
|
+
return mesh
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def generate_visual_mesh(
|
|
190
|
+
cloud_path: Path,
|
|
191
|
+
output_path: Path,
|
|
192
|
+
method: str = "auto",
|
|
193
|
+
simplify_to: int = 5000,
|
|
194
|
+
) -> Path:
|
|
195
|
+
"""
|
|
196
|
+
Generate visual mesh from point cloud file.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
cloud_path: Path to PLY point cloud
|
|
200
|
+
output_path: Path to output OBJ file
|
|
201
|
+
method: "poisson", "hull", or "auto"
|
|
202
|
+
simplify_to: Target face count for simplification
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
Path to generated mesh file
|
|
206
|
+
"""
|
|
207
|
+
from .lifting import load_ply
|
|
208
|
+
|
|
209
|
+
# Load point cloud
|
|
210
|
+
points, colors = load_ply(cloud_path)
|
|
211
|
+
|
|
212
|
+
if len(points) < 4:
|
|
213
|
+
raise MeshingError(f"Insufficient points in {cloud_path}: {len(points)}")
|
|
214
|
+
|
|
215
|
+
# Generate mesh
|
|
216
|
+
if method == "auto":
|
|
217
|
+
try:
|
|
218
|
+
mesh = poisson_reconstruct(points, colors)
|
|
219
|
+
except Exception as e:
|
|
220
|
+
logger.warning(f"Poisson failed, using convex hull: {e}")
|
|
221
|
+
mesh = convex_hull_mesh(points)
|
|
222
|
+
elif method == "poisson":
|
|
223
|
+
mesh = poisson_reconstruct(points, colors)
|
|
224
|
+
elif method == "hull":
|
|
225
|
+
mesh = convex_hull_mesh(points)
|
|
226
|
+
else:
|
|
227
|
+
raise MeshingError(f"Unknown meshing method: {method}")
|
|
228
|
+
|
|
229
|
+
# Simplify if needed
|
|
230
|
+
if simplify_to > 0 and len(mesh.faces) > simplify_to:
|
|
231
|
+
mesh = simplify_mesh(mesh, simplify_to)
|
|
232
|
+
|
|
233
|
+
# Ensure mesh is watertight
|
|
234
|
+
if not mesh.is_watertight:
|
|
235
|
+
logger.warning(f"Mesh is not watertight, attempting repair")
|
|
236
|
+
mesh.fill_holes()
|
|
237
|
+
|
|
238
|
+
# Export
|
|
239
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
240
|
+
mesh.export(str(output_path))
|
|
241
|
+
|
|
242
|
+
logger.info(f"Generated visual mesh: {output_path} ({len(mesh.faces)} faces)")
|
|
243
|
+
return output_path
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def generate_all_visual_meshes(
|
|
247
|
+
session: "ScanSession",
|
|
248
|
+
method: str = "auto",
|
|
249
|
+
simplify_to: int = 5000,
|
|
250
|
+
progress_callback: Optional[callable] = None,
|
|
251
|
+
) -> Dict[str, Path]:
|
|
252
|
+
"""
|
|
253
|
+
Generate visual meshes for all links in a session.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
session: ScanSession with point clouds
|
|
257
|
+
method: Meshing method
|
|
258
|
+
simplify_to: Target face count
|
|
259
|
+
progress_callback: Optional progress callback
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
Dict mapping link_name -> mesh_path
|
|
263
|
+
"""
|
|
264
|
+
clouds_dir = session.clouds_dir
|
|
265
|
+
meshes_dir = session.meshes_dir
|
|
266
|
+
meshes_dir.mkdir(parents=True, exist_ok=True)
|
|
267
|
+
|
|
268
|
+
cloud_files = list(clouds_dir.glob("*.ply"))
|
|
269
|
+
if not cloud_files:
|
|
270
|
+
raise MeshingError(f"No point clouds found in {clouds_dir}")
|
|
271
|
+
|
|
272
|
+
result = {}
|
|
273
|
+
total = len(cloud_files)
|
|
274
|
+
|
|
275
|
+
for i, cloud_path in enumerate(cloud_files):
|
|
276
|
+
link_name = cloud_path.stem
|
|
277
|
+
output_path = meshes_dir / f"{link_name}_visual.obj"
|
|
278
|
+
|
|
279
|
+
try:
|
|
280
|
+
generate_visual_mesh(cloud_path, output_path, method, simplify_to)
|
|
281
|
+
result[link_name] = output_path
|
|
282
|
+
except Exception as e:
|
|
283
|
+
logger.error(f"Failed to generate mesh for {link_name}: {e}")
|
|
284
|
+
|
|
285
|
+
if progress_callback:
|
|
286
|
+
progress_callback(i + 1, total)
|
|
287
|
+
|
|
288
|
+
logger.info(f"Generated {len(result)} visual meshes")
|
|
289
|
+
return result
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
__all__ = [
|
|
293
|
+
"MeshingError",
|
|
294
|
+
"poisson_reconstruct",
|
|
295
|
+
"convex_hull_mesh",
|
|
296
|
+
"alpha_shape_mesh",
|
|
297
|
+
"simplify_mesh",
|
|
298
|
+
"generate_visual_mesh",
|
|
299
|
+
"generate_all_visual_meshes",
|
|
300
|
+
]
|