foodforthought-cli 0.2.8__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +12 -0
- ate/behaviors/approach.py +399 -0
- ate/cli.py +855 -4551
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +402 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +18 -6
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +360 -24
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +16 -0
- ate/interfaces/base.py +2 -0
- ate/interfaces/sensors.py +247 -0
- ate/llm_proxy.py +239 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +42 -3
- ate/recording/session.py +12 -2
- ate/recording/visual.py +416 -0
- ate/robot/__init__.py +142 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +88 -3
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +143 -11
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +104 -2
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +6 -0
- ate/robot/registry.py +5 -2
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +285 -3
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +9 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/METADATA +1 -1
- foodforthought_cli-0.3.1.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.8.dist-info/RECORD +0 -73
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/top_level.txt +0 -0
ate/urdf/scale.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Scale calibration for markerless URDF generation.
|
|
3
|
+
|
|
4
|
+
In a markerless setting, the global scale factor (sim2real gap) must be
|
|
5
|
+
determined from a known physical measurement. This module provides:
|
|
6
|
+
|
|
7
|
+
- parse_scale_ref: Parse "part:measurement" strings (e.g., "gripper:85mm")
|
|
8
|
+
- parse_measurement: Parse measurement values with units
|
|
9
|
+
- ScaleCalibrator: Compute scale factor from reference measurements
|
|
10
|
+
|
|
11
|
+
The Kalib method uses a single known dimension to normalize depth estimates
|
|
12
|
+
from monocular depth models like Depth Anything V2.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import re
|
|
16
|
+
from typing import Tuple, Optional
|
|
17
|
+
from dataclasses import dataclass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ScaleError(Exception):
|
|
21
|
+
"""Error in scale parsing or calibration."""
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# Unit conversion factors to meters
|
|
26
|
+
UNIT_CONVERSIONS = {
|
|
27
|
+
# Metric
|
|
28
|
+
"m": 1.0,
|
|
29
|
+
"meter": 1.0,
|
|
30
|
+
"meters": 1.0,
|
|
31
|
+
"cm": 0.01,
|
|
32
|
+
"centimeter": 0.01,
|
|
33
|
+
"centimeters": 0.01,
|
|
34
|
+
"mm": 0.001,
|
|
35
|
+
"millimeter": 0.001,
|
|
36
|
+
"millimeters": 0.001,
|
|
37
|
+
# Imperial
|
|
38
|
+
"in": 0.0254,
|
|
39
|
+
"inch": 0.0254,
|
|
40
|
+
"inches": 0.0254,
|
|
41
|
+
'"': 0.0254,
|
|
42
|
+
"ft": 0.3048,
|
|
43
|
+
"foot": 0.3048,
|
|
44
|
+
"feet": 0.3048,
|
|
45
|
+
"'": 0.3048,
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def parse_measurement(measurement: str) -> float:
|
|
50
|
+
"""
|
|
51
|
+
Parse a measurement string with units to meters.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
measurement: Value with unit (e.g., "85mm", "3.5in", "0.15m")
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
Value in meters
|
|
58
|
+
|
|
59
|
+
Raises:
|
|
60
|
+
ScaleError: If format is invalid or unit is unknown
|
|
61
|
+
|
|
62
|
+
Examples:
|
|
63
|
+
>>> parse_measurement("85mm")
|
|
64
|
+
0.085
|
|
65
|
+
>>> parse_measurement("3.5in")
|
|
66
|
+
0.0889
|
|
67
|
+
>>> parse_measurement("150cm")
|
|
68
|
+
1.5
|
|
69
|
+
"""
|
|
70
|
+
if not measurement:
|
|
71
|
+
raise ScaleError("Empty measurement string")
|
|
72
|
+
|
|
73
|
+
# Normalize: strip whitespace, lowercase
|
|
74
|
+
measurement = measurement.strip().lower()
|
|
75
|
+
|
|
76
|
+
# Pattern: number (with optional decimal) followed by unit
|
|
77
|
+
pattern = r"^([\d.]+)\s*([a-z\"']+)$"
|
|
78
|
+
match = re.match(pattern, measurement)
|
|
79
|
+
|
|
80
|
+
if not match:
|
|
81
|
+
raise ScaleError(
|
|
82
|
+
f"Invalid measurement format: '{measurement}'. "
|
|
83
|
+
f"Expected format like '85mm', '3.5in', '0.15m'"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
value_str, unit = match.groups()
|
|
87
|
+
|
|
88
|
+
try:
|
|
89
|
+
value = float(value_str)
|
|
90
|
+
except ValueError:
|
|
91
|
+
raise ScaleError(f"Invalid numeric value: '{value_str}'")
|
|
92
|
+
|
|
93
|
+
if value <= 0:
|
|
94
|
+
raise ScaleError(f"Measurement must be positive: {value}")
|
|
95
|
+
|
|
96
|
+
if unit not in UNIT_CONVERSIONS:
|
|
97
|
+
valid_units = sorted(set(UNIT_CONVERSIONS.keys()) - {'"', "'"})
|
|
98
|
+
raise ScaleError(
|
|
99
|
+
f"Unknown unit: '{unit}'. Valid units: {', '.join(valid_units)}"
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
return value * UNIT_CONVERSIONS[unit]
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def parse_scale_ref(ref: str) -> Tuple[str, float]:
|
|
106
|
+
"""
|
|
107
|
+
Parse a scale reference string.
|
|
108
|
+
|
|
109
|
+
Format: "part_name:measurement"
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
ref: Scale reference (e.g., "gripper:85mm", "base_width:150mm")
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
Tuple of (part_name, value_in_meters)
|
|
116
|
+
|
|
117
|
+
Raises:
|
|
118
|
+
ScaleError: If format is invalid
|
|
119
|
+
|
|
120
|
+
Examples:
|
|
121
|
+
>>> parse_scale_ref("gripper:85mm")
|
|
122
|
+
('gripper', 0.085)
|
|
123
|
+
>>> parse_scale_ref("base_width:6in")
|
|
124
|
+
('base_width', 0.1524)
|
|
125
|
+
"""
|
|
126
|
+
if not ref:
|
|
127
|
+
raise ScaleError("Empty scale reference")
|
|
128
|
+
|
|
129
|
+
if ":" not in ref:
|
|
130
|
+
raise ScaleError(
|
|
131
|
+
f"Invalid scale reference format: '{ref}'. "
|
|
132
|
+
f"Expected 'part:measurement' (e.g., 'gripper:85mm')"
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
parts = ref.split(":", 1)
|
|
136
|
+
if len(parts) != 2:
|
|
137
|
+
raise ScaleError(f"Invalid scale reference: '{ref}'")
|
|
138
|
+
|
|
139
|
+
part_name = parts[0].strip()
|
|
140
|
+
measurement = parts[1].strip()
|
|
141
|
+
|
|
142
|
+
if not part_name:
|
|
143
|
+
raise ScaleError("Part name cannot be empty in scale reference")
|
|
144
|
+
|
|
145
|
+
value_meters = parse_measurement(measurement)
|
|
146
|
+
|
|
147
|
+
return part_name, value_meters
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
@dataclass
|
|
151
|
+
class ScaleCalibration:
|
|
152
|
+
"""Result of scale calibration."""
|
|
153
|
+
part_name: str
|
|
154
|
+
reference_meters: float
|
|
155
|
+
measured_pixels: float
|
|
156
|
+
scale_factor: float # meters per pixel (or depth unit)
|
|
157
|
+
|
|
158
|
+
def apply(self, depth_value: float) -> float:
|
|
159
|
+
"""Apply scale factor to convert raw depth to meters."""
|
|
160
|
+
return depth_value * self.scale_factor
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class ScaleCalibrator:
|
|
164
|
+
"""
|
|
165
|
+
Calibrate global scale from a known physical measurement.
|
|
166
|
+
|
|
167
|
+
Uses the Kalib method: correlate a known physical dimension with
|
|
168
|
+
its measured size in the depth map or point cloud.
|
|
169
|
+
|
|
170
|
+
Usage:
|
|
171
|
+
calibrator = ScaleCalibrator("gripper:85mm")
|
|
172
|
+
# Measure gripper width in depth units
|
|
173
|
+
measured = compute_dimension_from_cloud(gripper_cloud)
|
|
174
|
+
calibration = calibrator.calibrate(measured)
|
|
175
|
+
# Apply to all depth values
|
|
176
|
+
scaled_depth = calibration.apply(raw_depth)
|
|
177
|
+
"""
|
|
178
|
+
|
|
179
|
+
def __init__(self, scale_ref: str):
|
|
180
|
+
"""
|
|
181
|
+
Initialize calibrator with a scale reference.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
scale_ref: Scale reference string (e.g., "gripper:85mm")
|
|
185
|
+
"""
|
|
186
|
+
self.part_name, self.reference_meters = parse_scale_ref(scale_ref)
|
|
187
|
+
|
|
188
|
+
def calibrate(self, measured_value: float) -> ScaleCalibration:
|
|
189
|
+
"""
|
|
190
|
+
Compute scale factor from measured value.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
measured_value: Size of reference part in raw depth units
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
ScaleCalibration with computed scale factor
|
|
197
|
+
"""
|
|
198
|
+
if measured_value <= 0:
|
|
199
|
+
raise ScaleError(
|
|
200
|
+
f"Measured value must be positive: {measured_value}. "
|
|
201
|
+
f"Check that the reference part '{self.part_name}' is visible."
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
scale_factor = self.reference_meters / measured_value
|
|
205
|
+
|
|
206
|
+
return ScaleCalibration(
|
|
207
|
+
part_name=self.part_name,
|
|
208
|
+
reference_meters=self.reference_meters,
|
|
209
|
+
measured_pixels=measured_value,
|
|
210
|
+
scale_factor=scale_factor,
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def estimate_intrinsics_from_resolution(
|
|
215
|
+
width: int,
|
|
216
|
+
height: int,
|
|
217
|
+
fov_degrees: float = 60.0,
|
|
218
|
+
) -> Tuple[float, float, float, float]:
|
|
219
|
+
"""
|
|
220
|
+
Estimate camera intrinsics from resolution and assumed FOV.
|
|
221
|
+
|
|
222
|
+
This is a fallback when actual camera calibration is not available.
|
|
223
|
+
Most webcams have FOV between 55-75 degrees.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
width: Image width in pixels
|
|
227
|
+
height: Image height in pixels
|
|
228
|
+
fov_degrees: Assumed horizontal field of view
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
Tuple of (fx, fy, cx, cy) camera intrinsics
|
|
232
|
+
"""
|
|
233
|
+
import math
|
|
234
|
+
|
|
235
|
+
# Compute focal length from FOV
|
|
236
|
+
fov_rad = math.radians(fov_degrees)
|
|
237
|
+
fx = (width / 2) / math.tan(fov_rad / 2)
|
|
238
|
+
|
|
239
|
+
# Assume square pixels
|
|
240
|
+
fy = fx
|
|
241
|
+
|
|
242
|
+
# Principal point at image center
|
|
243
|
+
cx = width / 2
|
|
244
|
+
cy = height / 2
|
|
245
|
+
|
|
246
|
+
return fx, fy, cx, cy
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
__all__ = [
|
|
250
|
+
"ScaleError",
|
|
251
|
+
"parse_measurement",
|
|
252
|
+
"parse_scale_ref",
|
|
253
|
+
"ScaleCalibration",
|
|
254
|
+
"ScaleCalibrator",
|
|
255
|
+
"estimate_intrinsics_from_resolution",
|
|
256
|
+
]
|
ate/urdf/scan_session.py
ADDED
|
@@ -0,0 +1,411 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Scan session management for URDF generation.
|
|
3
|
+
|
|
4
|
+
A scan session represents the state of a URDF generation process,
|
|
5
|
+
including captured video, link annotations, point clouds, meshes,
|
|
6
|
+
and final URDF output.
|
|
7
|
+
|
|
8
|
+
Session structure:
|
|
9
|
+
my_robot_scan/
|
|
10
|
+
├── metadata.json # Session metadata and settings
|
|
11
|
+
├── video.mp4 # Captured robot video
|
|
12
|
+
├── links.json # User-annotated link click points
|
|
13
|
+
├── kinematics.json # Discovered joint parameters
|
|
14
|
+
├── clouds/ # Per-link point clouds
|
|
15
|
+
│ ├── base_frame_0.ply
|
|
16
|
+
│ ├── shoulder_frame_0.ply
|
|
17
|
+
│ └── ...
|
|
18
|
+
└── meshes/ # Generated meshes
|
|
19
|
+
├── base_visual.obj
|
|
20
|
+
├── base_collision.obj
|
|
21
|
+
└── ...
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
import json
|
|
25
|
+
import os
|
|
26
|
+
from dataclasses import dataclass, field, asdict
|
|
27
|
+
from datetime import datetime
|
|
28
|
+
from pathlib import Path
|
|
29
|
+
from typing import Dict, List, Optional, Any
|
|
30
|
+
import logging
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class ScanSessionError(Exception):
|
|
36
|
+
"""Base exception for scan session errors."""
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class SessionNotFoundError(ScanSessionError):
|
|
41
|
+
"""Session directory does not exist."""
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class SessionCorruptError(ScanSessionError):
|
|
46
|
+
"""Session metadata is missing or invalid."""
|
|
47
|
+
pass
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class SessionIncompleteError(ScanSessionError):
|
|
51
|
+
"""Session is missing required data for the requested operation."""
|
|
52
|
+
|
|
53
|
+
def __init__(self, message: str, missing_steps: List[str]):
|
|
54
|
+
super().__init__(message)
|
|
55
|
+
self.missing_steps = missing_steps
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclass
|
|
59
|
+
class LinkAnnotation:
|
|
60
|
+
"""A user-annotated link in the first video frame."""
|
|
61
|
+
name: str
|
|
62
|
+
point: List[float] # [x, y] coordinates in frame
|
|
63
|
+
is_fixed: bool = False # True for base/world link
|
|
64
|
+
|
|
65
|
+
def to_dict(self) -> Dict:
|
|
66
|
+
return asdict(self)
|
|
67
|
+
|
|
68
|
+
@classmethod
|
|
69
|
+
def from_dict(cls, data: Dict) -> "LinkAnnotation":
|
|
70
|
+
return cls(
|
|
71
|
+
name=data["name"],
|
|
72
|
+
point=data["point"],
|
|
73
|
+
is_fixed=data.get("is_fixed", False),
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@dataclass
|
|
78
|
+
class JointInfo:
|
|
79
|
+
"""Discovered joint parameters."""
|
|
80
|
+
name: str
|
|
81
|
+
parent_link: str
|
|
82
|
+
child_link: str
|
|
83
|
+
joint_type: str # revolute, prismatic, fixed
|
|
84
|
+
axis: List[float] # [x, y, z] unit vector
|
|
85
|
+
origin: List[float] # [x, y, z] position
|
|
86
|
+
limits: Dict[str, float] # lower, upper bounds (radians or meters)
|
|
87
|
+
|
|
88
|
+
def to_dict(self) -> Dict:
|
|
89
|
+
return asdict(self)
|
|
90
|
+
|
|
91
|
+
@classmethod
|
|
92
|
+
def from_dict(cls, data: Dict) -> "JointInfo":
|
|
93
|
+
return cls(
|
|
94
|
+
name=data["name"],
|
|
95
|
+
parent_link=data["parent_link"],
|
|
96
|
+
child_link=data["child_link"],
|
|
97
|
+
joint_type=data["type"],
|
|
98
|
+
axis=data["axis"],
|
|
99
|
+
origin=data["origin"],
|
|
100
|
+
limits=data.get("limits", {"lower": -3.14, "upper": 3.14}),
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@dataclass
|
|
105
|
+
class ScanMetadata:
|
|
106
|
+
"""Session metadata and settings."""
|
|
107
|
+
version: str = "1.0.0"
|
|
108
|
+
created_at: str = field(default_factory=lambda: datetime.now().isoformat())
|
|
109
|
+
updated_at: str = field(default_factory=lambda: datetime.now().isoformat())
|
|
110
|
+
robot_name: Optional[str] = None
|
|
111
|
+
scale_ref: Optional[str] = None # e.g., "gripper:85mm"
|
|
112
|
+
scale_factor: Optional[float] = None # Computed from scale_ref
|
|
113
|
+
device: str = "cpu" # cpu or cuda
|
|
114
|
+
video_path: Optional[str] = None
|
|
115
|
+
frame_count: int = 0
|
|
116
|
+
fps: float = 30.0
|
|
117
|
+
resolution: List[int] = field(default_factory=lambda: [0, 0]) # [width, height]
|
|
118
|
+
density_kg_m3: float = 1200.0 # Default plastic density
|
|
119
|
+
|
|
120
|
+
# Pipeline completion status
|
|
121
|
+
capture_complete: bool = False
|
|
122
|
+
segment_complete: bool = False
|
|
123
|
+
optimize_complete: bool = False
|
|
124
|
+
mesh_complete: bool = False
|
|
125
|
+
synthesize_complete: bool = False
|
|
126
|
+
|
|
127
|
+
def to_dict(self) -> Dict:
|
|
128
|
+
return asdict(self)
|
|
129
|
+
|
|
130
|
+
@classmethod
|
|
131
|
+
def from_dict(cls, data: Dict) -> "ScanMetadata":
|
|
132
|
+
return cls(**{k: v for k, v in data.items() if k in cls.__dataclass_fields__})
|
|
133
|
+
|
|
134
|
+
def update(self) -> None:
|
|
135
|
+
"""Update the modification timestamp."""
|
|
136
|
+
self.updated_at = datetime.now().isoformat()
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class ScanSession:
|
|
140
|
+
"""
|
|
141
|
+
Manages a URDF generation session.
|
|
142
|
+
|
|
143
|
+
Provides methods for:
|
|
144
|
+
- Creating new sessions
|
|
145
|
+
- Loading existing sessions
|
|
146
|
+
- Saving/loading video, annotations, point clouds, meshes
|
|
147
|
+
- Tracking pipeline progress
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
METADATA_FILE = "metadata.json"
|
|
151
|
+
VIDEO_FILE = "video.mp4"
|
|
152
|
+
LINKS_FILE = "links.json"
|
|
153
|
+
KINEMATICS_FILE = "kinematics.json"
|
|
154
|
+
CLOUDS_DIR = "clouds"
|
|
155
|
+
MESHES_DIR = "meshes"
|
|
156
|
+
URDF_FILE = "robot.urdf"
|
|
157
|
+
|
|
158
|
+
def __init__(self, session_dir: Path):
|
|
159
|
+
"""
|
|
160
|
+
Initialize a scan session.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
session_dir: Directory for session data
|
|
164
|
+
"""
|
|
165
|
+
self.session_dir = Path(session_dir)
|
|
166
|
+
self.metadata: ScanMetadata = ScanMetadata()
|
|
167
|
+
self.links: List[LinkAnnotation] = []
|
|
168
|
+
self.joints: List[JointInfo] = []
|
|
169
|
+
|
|
170
|
+
@classmethod
|
|
171
|
+
def create(
|
|
172
|
+
cls,
|
|
173
|
+
output_dir: str,
|
|
174
|
+
robot_name: Optional[str] = None,
|
|
175
|
+
scale_ref: Optional[str] = None,
|
|
176
|
+
device: str = "cpu",
|
|
177
|
+
) -> "ScanSession":
|
|
178
|
+
"""
|
|
179
|
+
Create a new scan session.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
output_dir: Directory to create session in
|
|
183
|
+
robot_name: Name for the robot
|
|
184
|
+
scale_ref: Scale reference (e.g., "gripper:85mm")
|
|
185
|
+
device: Compute device (cpu or cuda)
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
New ScanSession instance
|
|
189
|
+
"""
|
|
190
|
+
session_dir = Path(output_dir)
|
|
191
|
+
session_dir.mkdir(parents=True, exist_ok=True)
|
|
192
|
+
|
|
193
|
+
# Create subdirectories
|
|
194
|
+
(session_dir / cls.CLOUDS_DIR).mkdir(exist_ok=True)
|
|
195
|
+
(session_dir / cls.MESHES_DIR).mkdir(exist_ok=True)
|
|
196
|
+
|
|
197
|
+
session = cls(session_dir)
|
|
198
|
+
session.metadata.robot_name = robot_name or session_dir.name
|
|
199
|
+
session.metadata.scale_ref = scale_ref
|
|
200
|
+
session.metadata.device = device
|
|
201
|
+
|
|
202
|
+
# Parse scale reference if provided
|
|
203
|
+
if scale_ref:
|
|
204
|
+
from .scale import parse_scale_ref
|
|
205
|
+
_, scale_meters = parse_scale_ref(scale_ref)
|
|
206
|
+
session.metadata.scale_factor = scale_meters
|
|
207
|
+
|
|
208
|
+
session.save_metadata()
|
|
209
|
+
logger.info(f"Created new scan session at {session_dir}")
|
|
210
|
+
return session
|
|
211
|
+
|
|
212
|
+
@classmethod
|
|
213
|
+
def load(cls, session_dir: str) -> "ScanSession":
|
|
214
|
+
"""
|
|
215
|
+
Load an existing scan session.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
session_dir: Path to session directory
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
Loaded ScanSession instance
|
|
222
|
+
|
|
223
|
+
Raises:
|
|
224
|
+
SessionNotFoundError: If directory doesn't exist
|
|
225
|
+
SessionCorruptError: If metadata is invalid
|
|
226
|
+
"""
|
|
227
|
+
path = Path(session_dir)
|
|
228
|
+
|
|
229
|
+
if not path.exists():
|
|
230
|
+
raise SessionNotFoundError(f"Session directory not found: {session_dir}")
|
|
231
|
+
|
|
232
|
+
metadata_path = path / cls.METADATA_FILE
|
|
233
|
+
if not metadata_path.exists():
|
|
234
|
+
raise SessionCorruptError(f"Missing metadata file: {metadata_path}")
|
|
235
|
+
|
|
236
|
+
session = cls(path)
|
|
237
|
+
|
|
238
|
+
# Load metadata
|
|
239
|
+
try:
|
|
240
|
+
with open(metadata_path, "r") as f:
|
|
241
|
+
data = json.load(f)
|
|
242
|
+
session.metadata = ScanMetadata.from_dict(data)
|
|
243
|
+
except json.JSONDecodeError as e:
|
|
244
|
+
raise SessionCorruptError(f"Invalid metadata JSON: {e}")
|
|
245
|
+
|
|
246
|
+
# Load links if present
|
|
247
|
+
links_path = path / cls.LINKS_FILE
|
|
248
|
+
if links_path.exists():
|
|
249
|
+
session.links = session._load_links()
|
|
250
|
+
|
|
251
|
+
# Load kinematics if present
|
|
252
|
+
kinematics_path = path / cls.KINEMATICS_FILE
|
|
253
|
+
if kinematics_path.exists():
|
|
254
|
+
session.joints = session._load_kinematics()
|
|
255
|
+
|
|
256
|
+
logger.info(f"Loaded scan session from {session_dir}")
|
|
257
|
+
return session
|
|
258
|
+
|
|
259
|
+
def save_metadata(self) -> None:
|
|
260
|
+
"""Save session metadata to disk."""
|
|
261
|
+
self.metadata.update()
|
|
262
|
+
metadata_path = self.session_dir / self.METADATA_FILE
|
|
263
|
+
with open(metadata_path, "w") as f:
|
|
264
|
+
json.dump(self.metadata.to_dict(), f, indent=2)
|
|
265
|
+
logger.debug(f"Saved metadata to {metadata_path}")
|
|
266
|
+
|
|
267
|
+
def save_links(self) -> None:
|
|
268
|
+
"""Save link annotations to disk."""
|
|
269
|
+
links_path = self.session_dir / self.LINKS_FILE
|
|
270
|
+
data = {
|
|
271
|
+
"frame_index": 0,
|
|
272
|
+
"links": [link.to_dict() for link in self.links],
|
|
273
|
+
}
|
|
274
|
+
with open(links_path, "w") as f:
|
|
275
|
+
json.dump(data, f, indent=2)
|
|
276
|
+
logger.debug(f"Saved {len(self.links)} links to {links_path}")
|
|
277
|
+
|
|
278
|
+
def _load_links(self) -> List[LinkAnnotation]:
|
|
279
|
+
"""Load link annotations from disk."""
|
|
280
|
+
links_path = self.session_dir / self.LINKS_FILE
|
|
281
|
+
with open(links_path, "r") as f:
|
|
282
|
+
data = json.load(f)
|
|
283
|
+
return [LinkAnnotation.from_dict(link) for link in data.get("links", [])]
|
|
284
|
+
|
|
285
|
+
def save_kinematics(self) -> None:
|
|
286
|
+
"""Save discovered kinematics to disk."""
|
|
287
|
+
kinematics_path = self.session_dir / self.KINEMATICS_FILE
|
|
288
|
+
data = {
|
|
289
|
+
"links": [
|
|
290
|
+
{"name": link.name, "parent": None if link.is_fixed else "discovered"}
|
|
291
|
+
for link in self.links
|
|
292
|
+
],
|
|
293
|
+
"joints": [joint.to_dict() for joint in self.joints],
|
|
294
|
+
}
|
|
295
|
+
with open(kinematics_path, "w") as f:
|
|
296
|
+
json.dump(data, f, indent=2)
|
|
297
|
+
logger.debug(f"Saved {len(self.joints)} joints to {kinematics_path}")
|
|
298
|
+
|
|
299
|
+
def _load_kinematics(self) -> List[JointInfo]:
|
|
300
|
+
"""Load kinematics from disk."""
|
|
301
|
+
kinematics_path = self.session_dir / self.KINEMATICS_FILE
|
|
302
|
+
with open(kinematics_path, "r") as f:
|
|
303
|
+
data = json.load(f)
|
|
304
|
+
return [JointInfo.from_dict(joint) for joint in data.get("joints", [])]
|
|
305
|
+
|
|
306
|
+
@property
|
|
307
|
+
def video_path(self) -> Path:
|
|
308
|
+
"""Path to the video file."""
|
|
309
|
+
return self.session_dir / self.VIDEO_FILE
|
|
310
|
+
|
|
311
|
+
@property
|
|
312
|
+
def clouds_dir(self) -> Path:
|
|
313
|
+
"""Path to point clouds directory."""
|
|
314
|
+
return self.session_dir / self.CLOUDS_DIR
|
|
315
|
+
|
|
316
|
+
@property
|
|
317
|
+
def meshes_dir(self) -> Path:
|
|
318
|
+
"""Path to meshes directory."""
|
|
319
|
+
return self.session_dir / self.MESHES_DIR
|
|
320
|
+
|
|
321
|
+
@property
|
|
322
|
+
def urdf_path(self) -> Path:
|
|
323
|
+
"""Path to final URDF file."""
|
|
324
|
+
return self.session_dir / self.URDF_FILE
|
|
325
|
+
|
|
326
|
+
def has_video(self) -> bool:
|
|
327
|
+
"""Check if video has been captured."""
|
|
328
|
+
return self.video_path.exists()
|
|
329
|
+
|
|
330
|
+
def has_links(self) -> bool:
|
|
331
|
+
"""Check if links have been annotated."""
|
|
332
|
+
return len(self.links) > 0
|
|
333
|
+
|
|
334
|
+
def has_clouds(self) -> bool:
|
|
335
|
+
"""Check if point clouds have been generated."""
|
|
336
|
+
if not self.clouds_dir.exists():
|
|
337
|
+
return False
|
|
338
|
+
return len(list(self.clouds_dir.glob("*.ply"))) > 0
|
|
339
|
+
|
|
340
|
+
def has_meshes(self) -> bool:
|
|
341
|
+
"""Check if meshes have been generated."""
|
|
342
|
+
if not self.meshes_dir.exists():
|
|
343
|
+
return False
|
|
344
|
+
return len(list(self.meshes_dir.glob("*_visual.obj"))) > 0
|
|
345
|
+
|
|
346
|
+
def has_urdf(self) -> bool:
|
|
347
|
+
"""Check if URDF has been generated."""
|
|
348
|
+
return self.urdf_path.exists()
|
|
349
|
+
|
|
350
|
+
def check_prerequisites(self, stage: str) -> None:
|
|
351
|
+
"""
|
|
352
|
+
Check if prerequisites for a pipeline stage are met.
|
|
353
|
+
|
|
354
|
+
Args:
|
|
355
|
+
stage: Pipeline stage to check
|
|
356
|
+
|
|
357
|
+
Raises:
|
|
358
|
+
SessionIncompleteError: If prerequisites are not met
|
|
359
|
+
"""
|
|
360
|
+
missing = []
|
|
361
|
+
|
|
362
|
+
if stage == "segment":
|
|
363
|
+
if not self.has_video():
|
|
364
|
+
missing.append("video (run 'ate urdf scan capture' first)")
|
|
365
|
+
if not self.has_links():
|
|
366
|
+
missing.append("link annotations (annotate links during capture)")
|
|
367
|
+
|
|
368
|
+
elif stage == "optimize":
|
|
369
|
+
if not self.metadata.segment_complete:
|
|
370
|
+
missing.append("segmentation (run 'ate urdf scan segment' first)")
|
|
371
|
+
|
|
372
|
+
elif stage == "mesh":
|
|
373
|
+
if not self.metadata.optimize_complete:
|
|
374
|
+
missing.append("kinematics (run 'ate urdf scan optimize' first)")
|
|
375
|
+
|
|
376
|
+
elif stage == "synthesize":
|
|
377
|
+
if not self.metadata.mesh_complete:
|
|
378
|
+
missing.append("meshes (run 'ate urdf scan mesh' first)")
|
|
379
|
+
|
|
380
|
+
if missing:
|
|
381
|
+
raise SessionIncompleteError(
|
|
382
|
+
f"Cannot run '{stage}' stage. Missing: {', '.join(missing)}",
|
|
383
|
+
missing_steps=missing,
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
def get_status(self) -> Dict[str, Any]:
|
|
387
|
+
"""Get current session status summary."""
|
|
388
|
+
return {
|
|
389
|
+
"session_dir": str(self.session_dir),
|
|
390
|
+
"robot_name": self.metadata.robot_name,
|
|
391
|
+
"scale_ref": self.metadata.scale_ref,
|
|
392
|
+
"device": self.metadata.device,
|
|
393
|
+
"stages": {
|
|
394
|
+
"capture": self.metadata.capture_complete,
|
|
395
|
+
"segment": self.metadata.segment_complete,
|
|
396
|
+
"optimize": self.metadata.optimize_complete,
|
|
397
|
+
"mesh": self.metadata.mesh_complete,
|
|
398
|
+
"synthesize": self.metadata.synthesize_complete,
|
|
399
|
+
},
|
|
400
|
+
"data": {
|
|
401
|
+
"has_video": self.has_video(),
|
|
402
|
+
"link_count": len(self.links),
|
|
403
|
+
"joint_count": len(self.joints),
|
|
404
|
+
"has_clouds": self.has_clouds(),
|
|
405
|
+
"has_meshes": self.has_meshes(),
|
|
406
|
+
"has_urdf": self.has_urdf(),
|
|
407
|
+
},
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
def __repr__(self) -> str:
|
|
411
|
+
return f"ScanSession({self.session_dir}, robot={self.metadata.robot_name})"
|