foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +100 -0
- ate/behaviors/approach.py +399 -0
- ate/behaviors/common.py +686 -0
- ate/behaviors/tree.py +454 -0
- ate/cli.py +855 -3995
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +39 -0
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +942 -0
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +187 -0
- ate/interfaces/base.py +273 -0
- ate/interfaces/body.py +267 -0
- ate/interfaces/detection.py +282 -0
- ate/interfaces/locomotion.py +422 -0
- ate/interfaces/manipulation.py +408 -0
- ate/interfaces/navigation.py +389 -0
- ate/interfaces/perception.py +362 -0
- ate/interfaces/sensors.py +247 -0
- ate/interfaces/types.py +371 -0
- ate/llm_proxy.py +239 -0
- ate/mcp_server.py +387 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +83 -0
- ate/recording/demonstration.py +378 -0
- ate/recording/session.py +415 -0
- ate/recording/upload.py +304 -0
- ate/recording/visual.py +416 -0
- ate/recording/wrapper.py +95 -0
- ate/robot/__init__.py +221 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +668 -0
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +3735 -0
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +441 -0
- ate/robot/introspection.py +330 -0
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/manager.py +270 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +281 -0
- ate/robot/registry.py +322 -0
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +675 -0
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +1048 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,906 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Visual System Identification - Robot Self-Modeling via Webcam
|
|
4
|
+
|
|
5
|
+
Instead of guessing what servos do, we MEASURE:
|
|
6
|
+
1. Stick ArUco markers on each robot link
|
|
7
|
+
2. Move motors one at a time
|
|
8
|
+
3. Track how markers move relative to each other
|
|
9
|
+
4. Infer joint types and parameters mathematically
|
|
10
|
+
5. Generate URDF automatically
|
|
11
|
+
|
|
12
|
+
This closes the sim-to-real gap by reverse-engineering
|
|
13
|
+
the kinematic model from observation.
|
|
14
|
+
|
|
15
|
+
Usage:
|
|
16
|
+
python -m ate.robot.visual_system_id --port /dev/cu.usbserial-10
|
|
17
|
+
|
|
18
|
+
Prerequisites:
|
|
19
|
+
pip install opencv-contrib-python numpy
|
|
20
|
+
Print ArUco markers and attach to robot links
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
import cv2
|
|
24
|
+
import numpy as np
|
|
25
|
+
import time
|
|
26
|
+
import json
|
|
27
|
+
import math
|
|
28
|
+
from dataclasses import dataclass, field
|
|
29
|
+
from typing import List, Dict, Optional, Tuple, Any
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from enum import Enum
|
|
32
|
+
|
|
33
|
+
# Import unified JointType from shared types module
|
|
34
|
+
from ate.robot.types import JointType
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class MarkerPose:
|
|
39
|
+
"""6DOF pose of an ArUco marker."""
|
|
40
|
+
marker_id: int
|
|
41
|
+
timestamp: float
|
|
42
|
+
position: np.ndarray # [x, y, z] in camera frame (meters)
|
|
43
|
+
rotation: np.ndarray # 3x3 rotation matrix
|
|
44
|
+
rvec: np.ndarray # Rodrigues rotation vector
|
|
45
|
+
tvec: np.ndarray # Translation vector
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def euler_angles(self) -> Tuple[float, float, float]:
|
|
49
|
+
"""Get roll, pitch, yaw in degrees."""
|
|
50
|
+
# Extract Euler angles from rotation matrix
|
|
51
|
+
sy = math.sqrt(self.rotation[0,0]**2 + self.rotation[1,0]**2)
|
|
52
|
+
singular = sy < 1e-6
|
|
53
|
+
|
|
54
|
+
if not singular:
|
|
55
|
+
roll = math.atan2(self.rotation[2,1], self.rotation[2,2])
|
|
56
|
+
pitch = math.atan2(-self.rotation[2,0], sy)
|
|
57
|
+
yaw = math.atan2(self.rotation[1,0], self.rotation[0,0])
|
|
58
|
+
else:
|
|
59
|
+
roll = math.atan2(-self.rotation[1,2], self.rotation[1,1])
|
|
60
|
+
pitch = math.atan2(-self.rotation[2,0], sy)
|
|
61
|
+
yaw = 0
|
|
62
|
+
|
|
63
|
+
return (math.degrees(roll), math.degrees(pitch), math.degrees(yaw))
|
|
64
|
+
|
|
65
|
+
def to_dict(self) -> dict:
|
|
66
|
+
roll, pitch, yaw = self.euler_angles
|
|
67
|
+
return {
|
|
68
|
+
"marker_id": self.marker_id,
|
|
69
|
+
"timestamp": self.timestamp,
|
|
70
|
+
"position": self.position.tolist(),
|
|
71
|
+
"euler_deg": {"roll": roll, "pitch": pitch, "yaw": yaw},
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@dataclass
|
|
76
|
+
class MotionSample:
|
|
77
|
+
"""A single observation during motor motion."""
|
|
78
|
+
motor_id: int
|
|
79
|
+
pwm_value: int
|
|
80
|
+
marker_poses: Dict[int, MarkerPose] # marker_id -> pose
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
@dataclass
|
|
84
|
+
class JointInference:
|
|
85
|
+
"""Inferred properties of a joint."""
|
|
86
|
+
motor_id: int
|
|
87
|
+
joint_type: JointType
|
|
88
|
+
parent_marker: Optional[int] # Marker on parent link
|
|
89
|
+
child_marker: Optional[int] # Marker on child link
|
|
90
|
+
axis: Optional[np.ndarray] # Joint axis (unit vector)
|
|
91
|
+
origin: Optional[np.ndarray] # Joint origin in parent frame
|
|
92
|
+
range_min: float = 0.0 # Joint limit (radians or meters)
|
|
93
|
+
range_max: float = 0.0
|
|
94
|
+
confidence: float = 0.0
|
|
95
|
+
|
|
96
|
+
def to_dict(self) -> dict:
|
|
97
|
+
return {
|
|
98
|
+
"motor_id": self.motor_id,
|
|
99
|
+
"joint_type": self.joint_type.value,
|
|
100
|
+
"parent_marker": self.parent_marker,
|
|
101
|
+
"child_marker": self.child_marker,
|
|
102
|
+
"axis": self.axis.tolist() if self.axis is not None else None,
|
|
103
|
+
"origin": self.origin.tolist() if self.origin is not None else None,
|
|
104
|
+
"range_min_deg": math.degrees(self.range_min),
|
|
105
|
+
"range_max_deg": math.degrees(self.range_max),
|
|
106
|
+
"confidence": self.confidence,
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class ArUcoTracker:
|
|
111
|
+
"""
|
|
112
|
+
Track ArUco markers and compute 6DOF poses.
|
|
113
|
+
|
|
114
|
+
ArUco markers are like QR codes but optimized for pose estimation.
|
|
115
|
+
Each marker has a unique ID and known geometry, allowing us to
|
|
116
|
+
compute its exact position and orientation in 3D space.
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
def __init__(
|
|
120
|
+
self,
|
|
121
|
+
camera_index: int = 0,
|
|
122
|
+
marker_size_meters: float = 0.03, # 3cm markers
|
|
123
|
+
dictionary: int = cv2.aruco.DICT_4X4_50,
|
|
124
|
+
):
|
|
125
|
+
self.camera_index = camera_index
|
|
126
|
+
self.marker_size = marker_size_meters
|
|
127
|
+
|
|
128
|
+
# ArUco detector setup
|
|
129
|
+
self.aruco_dict = cv2.aruco.getPredefinedDictionary(dictionary)
|
|
130
|
+
self.aruco_params = cv2.aruco.DetectorParameters()
|
|
131
|
+
self.detector = cv2.aruco.ArucoDetector(self.aruco_dict, self.aruco_params)
|
|
132
|
+
|
|
133
|
+
# Camera (will be initialized on connect)
|
|
134
|
+
self.cap = None
|
|
135
|
+
self.frame_size = (640, 480)
|
|
136
|
+
|
|
137
|
+
# Camera calibration (default - should be calibrated properly)
|
|
138
|
+
# These are approximate values for a typical webcam
|
|
139
|
+
self.camera_matrix = None
|
|
140
|
+
self.dist_coeffs = None
|
|
141
|
+
self._set_default_calibration()
|
|
142
|
+
|
|
143
|
+
def _set_default_calibration(self):
|
|
144
|
+
"""Set default camera calibration (approximate)."""
|
|
145
|
+
# Approximate intrinsics for 640x480 webcam
|
|
146
|
+
fx = fy = 600 # Focal length in pixels
|
|
147
|
+
cx, cy = 320, 240 # Principal point
|
|
148
|
+
|
|
149
|
+
self.camera_matrix = np.array([
|
|
150
|
+
[fx, 0, cx],
|
|
151
|
+
[0, fy, cy],
|
|
152
|
+
[0, 0, 1]
|
|
153
|
+
], dtype=np.float32)
|
|
154
|
+
|
|
155
|
+
self.dist_coeffs = np.zeros(5, dtype=np.float32)
|
|
156
|
+
|
|
157
|
+
def connect(self) -> bool:
|
|
158
|
+
"""Open camera connection."""
|
|
159
|
+
self.cap = cv2.VideoCapture(self.camera_index)
|
|
160
|
+
if not self.cap.isOpened():
|
|
161
|
+
return False
|
|
162
|
+
|
|
163
|
+
# Set resolution
|
|
164
|
+
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
|
|
165
|
+
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
|
|
166
|
+
|
|
167
|
+
# Read actual size
|
|
168
|
+
ret, frame = self.cap.read()
|
|
169
|
+
if ret:
|
|
170
|
+
self.frame_size = (frame.shape[1], frame.shape[0])
|
|
171
|
+
# Update principal point for actual resolution
|
|
172
|
+
self.camera_matrix[0, 2] = frame.shape[1] / 2
|
|
173
|
+
self.camera_matrix[1, 2] = frame.shape[0] / 2
|
|
174
|
+
|
|
175
|
+
return True
|
|
176
|
+
|
|
177
|
+
def disconnect(self):
|
|
178
|
+
"""Close camera."""
|
|
179
|
+
if self.cap:
|
|
180
|
+
self.cap.release()
|
|
181
|
+
self.cap = None
|
|
182
|
+
|
|
183
|
+
def detect_markers(self, frame: Optional[np.ndarray] = None) -> Tuple[Dict[int, MarkerPose], np.ndarray]:
|
|
184
|
+
"""
|
|
185
|
+
Detect all ArUco markers and compute their poses.
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
Dict mapping marker_id -> MarkerPose
|
|
189
|
+
Annotated frame for visualization
|
|
190
|
+
"""
|
|
191
|
+
if frame is None:
|
|
192
|
+
if not self.cap:
|
|
193
|
+
return {}, np.array([])
|
|
194
|
+
ret, frame = self.cap.read()
|
|
195
|
+
if not ret:
|
|
196
|
+
return {}, np.array([])
|
|
197
|
+
|
|
198
|
+
# Detect markers
|
|
199
|
+
corners, ids, rejected = self.detector.detectMarkers(frame)
|
|
200
|
+
|
|
201
|
+
poses = {}
|
|
202
|
+
annotated = frame.copy()
|
|
203
|
+
|
|
204
|
+
if ids is not None:
|
|
205
|
+
# Draw detected markers
|
|
206
|
+
cv2.aruco.drawDetectedMarkers(annotated, corners, ids)
|
|
207
|
+
|
|
208
|
+
# Estimate pose for each marker
|
|
209
|
+
for i, marker_id in enumerate(ids.flatten()):
|
|
210
|
+
# Get pose using solvePnP
|
|
211
|
+
rvec, tvec, _ = cv2.aruco.estimatePoseSingleMarkers(
|
|
212
|
+
corners[i:i+1],
|
|
213
|
+
self.marker_size,
|
|
214
|
+
self.camera_matrix,
|
|
215
|
+
self.dist_coeffs
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
rvec = rvec[0][0]
|
|
219
|
+
tvec = tvec[0][0]
|
|
220
|
+
|
|
221
|
+
# Convert to rotation matrix
|
|
222
|
+
rotation, _ = cv2.Rodrigues(rvec)
|
|
223
|
+
|
|
224
|
+
pose = MarkerPose(
|
|
225
|
+
marker_id=int(marker_id),
|
|
226
|
+
timestamp=time.time(),
|
|
227
|
+
position=tvec,
|
|
228
|
+
rotation=rotation,
|
|
229
|
+
rvec=rvec,
|
|
230
|
+
tvec=tvec,
|
|
231
|
+
)
|
|
232
|
+
poses[int(marker_id)] = pose
|
|
233
|
+
|
|
234
|
+
# Draw axis on frame
|
|
235
|
+
cv2.drawFrameAxes(
|
|
236
|
+
annotated,
|
|
237
|
+
self.camera_matrix,
|
|
238
|
+
self.dist_coeffs,
|
|
239
|
+
rvec, tvec,
|
|
240
|
+
self.marker_size * 0.5
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
# Add label
|
|
244
|
+
corner = corners[i][0][0]
|
|
245
|
+
cv2.putText(
|
|
246
|
+
annotated,
|
|
247
|
+
f"ID:{marker_id}",
|
|
248
|
+
(int(corner[0]), int(corner[1]) - 10),
|
|
249
|
+
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
return poses, annotated
|
|
253
|
+
|
|
254
|
+
def get_frame(self) -> np.ndarray:
|
|
255
|
+
"""Get current camera frame."""
|
|
256
|
+
if not self.cap:
|
|
257
|
+
return np.array([])
|
|
258
|
+
ret, frame = self.cap.read()
|
|
259
|
+
return frame if ret else np.array([])
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
class MotionAnalyzer:
|
|
263
|
+
"""
|
|
264
|
+
Analyze marker motion to infer joint properties.
|
|
265
|
+
|
|
266
|
+
The key insight: when a motor moves, markers on the child link
|
|
267
|
+
will move in a predictable pattern:
|
|
268
|
+
- Revolute joint: markers trace an arc
|
|
269
|
+
- Prismatic joint: markers trace a line
|
|
270
|
+
- Fixed: markers don't move relative to parent
|
|
271
|
+
"""
|
|
272
|
+
|
|
273
|
+
def __init__(self, samples: List[MotionSample]):
|
|
274
|
+
self.samples = samples
|
|
275
|
+
|
|
276
|
+
def analyze_motion(self, reference_marker: int, moving_marker: int) -> JointInference:
|
|
277
|
+
"""
|
|
278
|
+
Analyze how one marker moves relative to another.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
reference_marker: Marker ID on parent link (should stay still)
|
|
282
|
+
moving_marker: Marker ID on child link (should move)
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
JointInference with inferred joint type and parameters
|
|
286
|
+
"""
|
|
287
|
+
if not self.samples:
|
|
288
|
+
return JointInference(
|
|
289
|
+
motor_id=0,
|
|
290
|
+
joint_type=JointType.UNKNOWN,
|
|
291
|
+
parent_marker=reference_marker,
|
|
292
|
+
child_marker=moving_marker,
|
|
293
|
+
axis=None,
|
|
294
|
+
origin=None,
|
|
295
|
+
confidence=0.0,
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
motor_id = self.samples[0].motor_id
|
|
299
|
+
|
|
300
|
+
# Extract positions of moving marker relative to reference
|
|
301
|
+
relative_positions = []
|
|
302
|
+
relative_rotations = []
|
|
303
|
+
pwm_values = []
|
|
304
|
+
|
|
305
|
+
for sample in self.samples:
|
|
306
|
+
if reference_marker not in sample.marker_poses:
|
|
307
|
+
continue
|
|
308
|
+
if moving_marker not in sample.marker_poses:
|
|
309
|
+
continue
|
|
310
|
+
|
|
311
|
+
ref_pose = sample.marker_poses[reference_marker]
|
|
312
|
+
mov_pose = sample.marker_poses[moving_marker]
|
|
313
|
+
|
|
314
|
+
# Transform moving marker to reference frame
|
|
315
|
+
# P_rel = R_ref^T * (P_mov - P_ref)
|
|
316
|
+
rel_pos = ref_pose.rotation.T @ (mov_pose.position - ref_pose.position)
|
|
317
|
+
rel_rot = ref_pose.rotation.T @ mov_pose.rotation
|
|
318
|
+
|
|
319
|
+
relative_positions.append(rel_pos)
|
|
320
|
+
relative_rotations.append(rel_rot)
|
|
321
|
+
pwm_values.append(sample.pwm_value)
|
|
322
|
+
|
|
323
|
+
if len(relative_positions) < 3:
|
|
324
|
+
return JointInference(
|
|
325
|
+
motor_id=motor_id,
|
|
326
|
+
joint_type=JointType.UNKNOWN,
|
|
327
|
+
parent_marker=reference_marker,
|
|
328
|
+
child_marker=moving_marker,
|
|
329
|
+
axis=None,
|
|
330
|
+
origin=None,
|
|
331
|
+
confidence=0.0,
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
positions = np.array(relative_positions)
|
|
335
|
+
|
|
336
|
+
# Analyze the trajectory
|
|
337
|
+
joint_type, axis, origin, confidence = self._classify_motion(positions)
|
|
338
|
+
|
|
339
|
+
# Calculate range of motion
|
|
340
|
+
range_min, range_max = self._calculate_range(positions, joint_type, axis, origin)
|
|
341
|
+
|
|
342
|
+
return JointInference(
|
|
343
|
+
motor_id=motor_id,
|
|
344
|
+
joint_type=joint_type,
|
|
345
|
+
parent_marker=reference_marker,
|
|
346
|
+
child_marker=moving_marker,
|
|
347
|
+
axis=axis,
|
|
348
|
+
origin=origin,
|
|
349
|
+
range_min=range_min,
|
|
350
|
+
range_max=range_max,
|
|
351
|
+
confidence=confidence,
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
def _classify_motion(self, positions: np.ndarray) -> Tuple[JointType, Optional[np.ndarray], Optional[np.ndarray], float]:
|
|
355
|
+
"""
|
|
356
|
+
Classify motion type from trajectory.
|
|
357
|
+
|
|
358
|
+
Returns: (joint_type, axis, origin, confidence)
|
|
359
|
+
"""
|
|
360
|
+
# Check if marker moved significantly
|
|
361
|
+
pos_range = np.ptp(positions, axis=0) # Peak-to-peak range
|
|
362
|
+
total_motion = np.linalg.norm(pos_range)
|
|
363
|
+
|
|
364
|
+
if total_motion < 0.005: # Less than 5mm motion
|
|
365
|
+
return JointType.FIXED, None, None, 0.9
|
|
366
|
+
|
|
367
|
+
# Try to fit a plane to the points
|
|
368
|
+
# For revolute joint, points should lie on a plane (the rotation plane)
|
|
369
|
+
centroid = np.mean(positions, axis=0)
|
|
370
|
+
centered = positions - centroid
|
|
371
|
+
|
|
372
|
+
# SVD to find principal components
|
|
373
|
+
U, S, Vt = np.linalg.svd(centered)
|
|
374
|
+
|
|
375
|
+
# The smallest singular value's direction is the plane normal
|
|
376
|
+
# For revolute joint, this should be the rotation axis
|
|
377
|
+
plane_normal = Vt[2] # Normal to the plane of motion
|
|
378
|
+
|
|
379
|
+
# Check planarity: if 3rd singular value is small, points are planar
|
|
380
|
+
planarity = 1.0 - (S[2] / (S[0] + 1e-6))
|
|
381
|
+
|
|
382
|
+
if planarity > 0.9: # Points are mostly planar
|
|
383
|
+
# Check if motion is circular (revolute) or linear (prismatic)
|
|
384
|
+
|
|
385
|
+
# Project to 2D on the plane
|
|
386
|
+
basis1 = Vt[0]
|
|
387
|
+
basis2 = Vt[1]
|
|
388
|
+
projected_2d = np.array([
|
|
389
|
+
[np.dot(p, basis1), np.dot(p, basis2)]
|
|
390
|
+
for p in centered
|
|
391
|
+
])
|
|
392
|
+
|
|
393
|
+
# Fit circle to 2D points
|
|
394
|
+
circle_fit = self._fit_circle_2d(projected_2d)
|
|
395
|
+
|
|
396
|
+
if circle_fit is not None:
|
|
397
|
+
center_2d, radius, circle_error = circle_fit
|
|
398
|
+
|
|
399
|
+
# If circle fit is good, it's revolute
|
|
400
|
+
if circle_error < 0.1 * radius: # Error less than 10% of radius
|
|
401
|
+
# Reconstruct 3D center
|
|
402
|
+
center_3d = centroid + center_2d[0] * basis1 + center_2d[1] * basis2
|
|
403
|
+
|
|
404
|
+
return JointType.REVOLUTE, plane_normal, center_3d, planarity * 0.9
|
|
405
|
+
|
|
406
|
+
# Otherwise check for linear motion (prismatic)
|
|
407
|
+
# Principal direction should explain most variance
|
|
408
|
+
linearity = S[0] / (S[0] + S[1] + 1e-6)
|
|
409
|
+
|
|
410
|
+
if linearity > 0.9:
|
|
411
|
+
return JointType.PRISMATIC, Vt[0], centroid, linearity * 0.8
|
|
412
|
+
|
|
413
|
+
# Can't determine
|
|
414
|
+
return JointType.UNKNOWN, None, None, 0.3
|
|
415
|
+
|
|
416
|
+
def _fit_circle_2d(self, points: np.ndarray) -> Optional[Tuple[np.ndarray, float, float]]:
|
|
417
|
+
"""
|
|
418
|
+
Fit a circle to 2D points.
|
|
419
|
+
|
|
420
|
+
Returns: (center, radius, fit_error) or None if fit fails
|
|
421
|
+
"""
|
|
422
|
+
if len(points) < 3:
|
|
423
|
+
return None
|
|
424
|
+
|
|
425
|
+
# Use algebraic circle fit
|
|
426
|
+
# Circle equation: (x-a)^2 + (y-b)^2 = r^2
|
|
427
|
+
# Expanded: x^2 + y^2 - 2ax - 2by + (a^2 + b^2 - r^2) = 0
|
|
428
|
+
# Let c = a^2 + b^2 - r^2
|
|
429
|
+
# Then: x^2 + y^2 = 2ax + 2by - c
|
|
430
|
+
|
|
431
|
+
A = np.zeros((len(points), 3))
|
|
432
|
+
b = np.zeros(len(points))
|
|
433
|
+
|
|
434
|
+
for i, (x, y) in enumerate(points):
|
|
435
|
+
A[i] = [2*x, 2*y, 1]
|
|
436
|
+
b[i] = x*x + y*y
|
|
437
|
+
|
|
438
|
+
try:
|
|
439
|
+
# Solve least squares
|
|
440
|
+
result, residuals, rank, s = np.linalg.lstsq(A, b, rcond=None)
|
|
441
|
+
|
|
442
|
+
center = result[:2]
|
|
443
|
+
c = result[2]
|
|
444
|
+
radius_sq = center[0]**2 + center[1]**2 - c
|
|
445
|
+
|
|
446
|
+
if radius_sq < 0:
|
|
447
|
+
return None
|
|
448
|
+
|
|
449
|
+
radius = np.sqrt(radius_sq)
|
|
450
|
+
|
|
451
|
+
# Calculate fit error (RMS distance from circle)
|
|
452
|
+
distances = np.sqrt(np.sum((points - center)**2, axis=1))
|
|
453
|
+
error = np.sqrt(np.mean((distances - radius)**2))
|
|
454
|
+
|
|
455
|
+
return center, radius, error
|
|
456
|
+
|
|
457
|
+
except np.linalg.LinAlgError:
|
|
458
|
+
return None
|
|
459
|
+
|
|
460
|
+
def _calculate_range(
|
|
461
|
+
self,
|
|
462
|
+
positions: np.ndarray,
|
|
463
|
+
joint_type: JointType,
|
|
464
|
+
axis: Optional[np.ndarray],
|
|
465
|
+
origin: Optional[np.ndarray],
|
|
466
|
+
) -> Tuple[float, float]:
|
|
467
|
+
"""Calculate range of motion."""
|
|
468
|
+
|
|
469
|
+
if joint_type == JointType.REVOLUTE and axis is not None and origin is not None:
|
|
470
|
+
# Calculate angles around the axis
|
|
471
|
+
angles = []
|
|
472
|
+
for pos in positions:
|
|
473
|
+
# Vector from origin to position
|
|
474
|
+
vec = pos - origin
|
|
475
|
+
# Project out axis component
|
|
476
|
+
vec_proj = vec - np.dot(vec, axis) * axis
|
|
477
|
+
# Angle (arbitrary reference)
|
|
478
|
+
if len(angles) == 0:
|
|
479
|
+
ref_vec = vec_proj / (np.linalg.norm(vec_proj) + 1e-6)
|
|
480
|
+
angles.append(0.0)
|
|
481
|
+
else:
|
|
482
|
+
vec_norm = vec_proj / (np.linalg.norm(vec_proj) + 1e-6)
|
|
483
|
+
cos_angle = np.clip(np.dot(ref_vec, vec_norm), -1, 1)
|
|
484
|
+
angle = np.arccos(cos_angle)
|
|
485
|
+
# Determine sign using cross product
|
|
486
|
+
cross = np.cross(ref_vec, vec_norm)
|
|
487
|
+
if np.dot(cross, axis) < 0:
|
|
488
|
+
angle = -angle
|
|
489
|
+
angles.append(angle)
|
|
490
|
+
|
|
491
|
+
return min(angles), max(angles)
|
|
492
|
+
|
|
493
|
+
elif joint_type == JointType.PRISMATIC and axis is not None:
|
|
494
|
+
# Project positions onto axis
|
|
495
|
+
projections = [np.dot(pos, axis) for pos in positions]
|
|
496
|
+
return min(projections), max(projections)
|
|
497
|
+
|
|
498
|
+
return 0.0, 0.0
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
class VisualSystemIdentifier:
|
|
502
|
+
"""
|
|
503
|
+
Main class for visual system identification.
|
|
504
|
+
|
|
505
|
+
Workflow:
|
|
506
|
+
1. Connect camera and robot
|
|
507
|
+
2. Detect markers in initial frame
|
|
508
|
+
3. For each motor, perform "wiggle test"
|
|
509
|
+
4. Analyze motion patterns
|
|
510
|
+
5. Infer kinematic structure
|
|
511
|
+
6. Generate URDF
|
|
512
|
+
"""
|
|
513
|
+
|
|
514
|
+
def __init__(
|
|
515
|
+
self,
|
|
516
|
+
robot_interface=None,
|
|
517
|
+
camera_index: int = 0,
|
|
518
|
+
marker_size_meters: float = 0.03,
|
|
519
|
+
):
|
|
520
|
+
self.robot = robot_interface
|
|
521
|
+
self.tracker = ArUcoTracker(
|
|
522
|
+
camera_index=camera_index,
|
|
523
|
+
marker_size_meters=marker_size_meters,
|
|
524
|
+
)
|
|
525
|
+
self.results: Dict[int, JointInference] = {}
|
|
526
|
+
|
|
527
|
+
def connect(self) -> bool:
|
|
528
|
+
"""Connect to camera."""
|
|
529
|
+
return self.tracker.connect()
|
|
530
|
+
|
|
531
|
+
def disconnect(self):
|
|
532
|
+
"""Disconnect."""
|
|
533
|
+
self.tracker.disconnect()
|
|
534
|
+
|
|
535
|
+
def run_wiggle_test(
|
|
536
|
+
self,
|
|
537
|
+
motor_id: int,
|
|
538
|
+
pwm_min: int = 500,
|
|
539
|
+
pwm_max: int = 2500,
|
|
540
|
+
steps: int = 10,
|
|
541
|
+
dwell_time: float = 0.3,
|
|
542
|
+
cmd_func=None,
|
|
543
|
+
) -> List[MotionSample]:
|
|
544
|
+
"""
|
|
545
|
+
Run a "wiggle test" on a single motor.
|
|
546
|
+
|
|
547
|
+
Moves the motor from min to max and records marker positions.
|
|
548
|
+
|
|
549
|
+
Args:
|
|
550
|
+
motor_id: Servo/motor to test
|
|
551
|
+
pwm_min: Minimum PWM value
|
|
552
|
+
pwm_max: Maximum PWM value
|
|
553
|
+
steps: Number of positions to sample
|
|
554
|
+
dwell_time: Time to wait at each position
|
|
555
|
+
cmd_func: Function to send commands to robot
|
|
556
|
+
|
|
557
|
+
Returns:
|
|
558
|
+
List of MotionSamples
|
|
559
|
+
"""
|
|
560
|
+
samples = []
|
|
561
|
+
pwm_values = np.linspace(pwm_min, pwm_max, steps, dtype=int)
|
|
562
|
+
|
|
563
|
+
print(f"\n Wiggle test: Motor {motor_id}")
|
|
564
|
+
print(f" PWM range: {pwm_min} - {pwm_max} ({steps} steps)")
|
|
565
|
+
|
|
566
|
+
for i, pwm in enumerate(pwm_values):
|
|
567
|
+
# Move motor
|
|
568
|
+
if cmd_func:
|
|
569
|
+
cmd_func(f"dog.set_servo({motor_id}, {pwm}, 300)")
|
|
570
|
+
elif self.robot:
|
|
571
|
+
self.robot.set_servo(motor_id, pwm, 300)
|
|
572
|
+
|
|
573
|
+
time.sleep(dwell_time)
|
|
574
|
+
|
|
575
|
+
# Capture markers
|
|
576
|
+
poses, annotated = self.tracker.detect_markers()
|
|
577
|
+
|
|
578
|
+
sample = MotionSample(
|
|
579
|
+
motor_id=motor_id,
|
|
580
|
+
pwm_value=int(pwm),
|
|
581
|
+
marker_poses=poses,
|
|
582
|
+
)
|
|
583
|
+
samples.append(sample)
|
|
584
|
+
|
|
585
|
+
# Display progress
|
|
586
|
+
marker_ids = list(poses.keys())
|
|
587
|
+
print(f" Step {i+1}/{steps}: PWM={pwm}, Markers={marker_ids}")
|
|
588
|
+
|
|
589
|
+
# Show frame
|
|
590
|
+
cv2.imshow("Visual System ID", annotated)
|
|
591
|
+
cv2.waitKey(1)
|
|
592
|
+
|
|
593
|
+
return samples
|
|
594
|
+
|
|
595
|
+
def analyze_motor(self, samples: List[MotionSample]) -> Optional[JointInference]:
|
|
596
|
+
"""
|
|
597
|
+
Analyze motion samples to infer joint properties.
|
|
598
|
+
"""
|
|
599
|
+
if not samples:
|
|
600
|
+
return None
|
|
601
|
+
|
|
602
|
+
# Find markers that were visible in most frames
|
|
603
|
+
marker_counts = {}
|
|
604
|
+
for sample in samples:
|
|
605
|
+
for mid in sample.marker_poses.keys():
|
|
606
|
+
marker_counts[mid] = marker_counts.get(mid, 0) + 1
|
|
607
|
+
|
|
608
|
+
# Sort by visibility
|
|
609
|
+
sorted_markers = sorted(
|
|
610
|
+
marker_counts.items(),
|
|
611
|
+
key=lambda x: x[1],
|
|
612
|
+
reverse=True
|
|
613
|
+
)
|
|
614
|
+
|
|
615
|
+
if len(sorted_markers) < 2:
|
|
616
|
+
print(" Need at least 2 markers visible!")
|
|
617
|
+
return None
|
|
618
|
+
|
|
619
|
+
# Assume first marker (most visible) is reference, second is moving
|
|
620
|
+
# In practice, user should specify or we should detect which moves
|
|
621
|
+
reference = sorted_markers[0][0]
|
|
622
|
+
|
|
623
|
+
best_inference = None
|
|
624
|
+
best_confidence = 0.0
|
|
625
|
+
|
|
626
|
+
for marker_id, count in sorted_markers[1:]:
|
|
627
|
+
if count < len(samples) * 0.7: # Need 70% visibility
|
|
628
|
+
continue
|
|
629
|
+
|
|
630
|
+
analyzer = MotionAnalyzer(samples)
|
|
631
|
+
inference = analyzer.analyze_motion(reference, marker_id)
|
|
632
|
+
|
|
633
|
+
if inference.confidence > best_confidence:
|
|
634
|
+
best_confidence = inference.confidence
|
|
635
|
+
best_inference = inference
|
|
636
|
+
|
|
637
|
+
return best_inference
|
|
638
|
+
|
|
639
|
+
def identify_robot(
|
|
640
|
+
self,
|
|
641
|
+
motor_ids: List[int],
|
|
642
|
+
cmd_func=None,
|
|
643
|
+
) -> Dict[int, JointInference]:
|
|
644
|
+
"""
|
|
645
|
+
Full robot identification.
|
|
646
|
+
|
|
647
|
+
Args:
|
|
648
|
+
motor_ids: List of motor IDs to test
|
|
649
|
+
cmd_func: Function to send robot commands
|
|
650
|
+
|
|
651
|
+
Returns:
|
|
652
|
+
Dict mapping motor_id -> JointInference
|
|
653
|
+
"""
|
|
654
|
+
print("\n" + "=" * 60)
|
|
655
|
+
print("VISUAL SYSTEM IDENTIFICATION")
|
|
656
|
+
print("=" * 60)
|
|
657
|
+
|
|
658
|
+
# Check for markers
|
|
659
|
+
poses, annotated = self.tracker.detect_markers()
|
|
660
|
+
print(f"\nDetected {len(poses)} markers: {list(poses.keys())}")
|
|
661
|
+
|
|
662
|
+
if len(poses) < 2:
|
|
663
|
+
print("ERROR: Need at least 2 ArUco markers attached to robot!")
|
|
664
|
+
print("Print markers from: https://chev.me/arucogen/")
|
|
665
|
+
return {}
|
|
666
|
+
|
|
667
|
+
cv2.imshow("Visual System ID", annotated)
|
|
668
|
+
cv2.waitKey(1)
|
|
669
|
+
|
|
670
|
+
results = {}
|
|
671
|
+
|
|
672
|
+
for motor_id in motor_ids:
|
|
673
|
+
print(f"\n--- Testing Motor {motor_id} ---")
|
|
674
|
+
|
|
675
|
+
# Run wiggle test
|
|
676
|
+
samples = self.run_wiggle_test(
|
|
677
|
+
motor_id=motor_id,
|
|
678
|
+
cmd_func=cmd_func,
|
|
679
|
+
)
|
|
680
|
+
|
|
681
|
+
# Analyze
|
|
682
|
+
inference = self.analyze_motor(samples)
|
|
683
|
+
|
|
684
|
+
if inference:
|
|
685
|
+
results[motor_id] = inference
|
|
686
|
+
print(f"\n Result: {inference.joint_type.value}")
|
|
687
|
+
if inference.axis is not None:
|
|
688
|
+
print(f" Axis: {inference.axis}")
|
|
689
|
+
if inference.joint_type == JointType.REVOLUTE:
|
|
690
|
+
range_deg = math.degrees(inference.range_max - inference.range_min)
|
|
691
|
+
print(f" Range: {range_deg:.1f} degrees")
|
|
692
|
+
print(f" Confidence: {inference.confidence:.2f}")
|
|
693
|
+
else:
|
|
694
|
+
print(" Could not infer joint properties")
|
|
695
|
+
|
|
696
|
+
# Return to center
|
|
697
|
+
if cmd_func:
|
|
698
|
+
cmd_func(f"dog.set_servo({motor_id}, 1500, 500)")
|
|
699
|
+
elif self.robot:
|
|
700
|
+
self.robot.set_servo(motor_id, 1500, 500)
|
|
701
|
+
time.sleep(0.5)
|
|
702
|
+
|
|
703
|
+
self.results = results
|
|
704
|
+
return results
|
|
705
|
+
|
|
706
|
+
def generate_urdf(self, robot_name: str = "robot") -> str:
|
|
707
|
+
"""
|
|
708
|
+
Generate URDF from identified joints.
|
|
709
|
+
|
|
710
|
+
This is a simplified URDF generator. A full implementation
|
|
711
|
+
would need link geometries, masses, etc.
|
|
712
|
+
"""
|
|
713
|
+
lines = [
|
|
714
|
+
'<?xml version="1.0"?>',
|
|
715
|
+
f'<robot name="{robot_name}">',
|
|
716
|
+
' <!-- Auto-generated by Visual System Identification -->',
|
|
717
|
+
'',
|
|
718
|
+
' <!-- Base Link (fixed to world) -->',
|
|
719
|
+
' <link name="base_link">',
|
|
720
|
+
' <visual>',
|
|
721
|
+
' <geometry><box size="0.1 0.1 0.02"/></geometry>',
|
|
722
|
+
' </visual>',
|
|
723
|
+
' </link>',
|
|
724
|
+
'',
|
|
725
|
+
]
|
|
726
|
+
|
|
727
|
+
parent_link = "base_link"
|
|
728
|
+
|
|
729
|
+
for i, (motor_id, inference) in enumerate(self.results.items()):
|
|
730
|
+
link_name = f"link_{motor_id}"
|
|
731
|
+
joint_name = f"joint_{motor_id}"
|
|
732
|
+
|
|
733
|
+
# Joint
|
|
734
|
+
joint_type = "revolute" if inference.joint_type == JointType.REVOLUTE else "fixed"
|
|
735
|
+
|
|
736
|
+
lines.append(f' <joint name="{joint_name}" type="{joint_type}">')
|
|
737
|
+
lines.append(f' <parent link="{parent_link}"/>')
|
|
738
|
+
lines.append(f' <child link="{link_name}"/>')
|
|
739
|
+
|
|
740
|
+
if inference.origin is not None:
|
|
741
|
+
ox, oy, oz = inference.origin
|
|
742
|
+
lines.append(f' <origin xyz="{ox:.4f} {oy:.4f} {oz:.4f}"/>')
|
|
743
|
+
|
|
744
|
+
if inference.axis is not None and inference.joint_type == JointType.REVOLUTE:
|
|
745
|
+
ax, ay, az = inference.axis
|
|
746
|
+
lines.append(f' <axis xyz="{ax:.4f} {ay:.4f} {az:.4f}"/>')
|
|
747
|
+
lines.append(f' <limit lower="{inference.range_min:.4f}" upper="{inference.range_max:.4f}" effort="10" velocity="1"/>')
|
|
748
|
+
|
|
749
|
+
lines.append(' </joint>')
|
|
750
|
+
lines.append('')
|
|
751
|
+
|
|
752
|
+
# Link
|
|
753
|
+
lines.append(f' <link name="{link_name}">')
|
|
754
|
+
lines.append(' <visual>')
|
|
755
|
+
lines.append(' <geometry><cylinder radius="0.01" length="0.05"/></geometry>')
|
|
756
|
+
lines.append(' </visual>')
|
|
757
|
+
lines.append(' </link>')
|
|
758
|
+
lines.append('')
|
|
759
|
+
|
|
760
|
+
parent_link = link_name
|
|
761
|
+
|
|
762
|
+
lines.append('</robot>')
|
|
763
|
+
|
|
764
|
+
return '\n'.join(lines)
|
|
765
|
+
|
|
766
|
+
def save_results(self, path: Path):
|
|
767
|
+
"""Save identification results to JSON."""
|
|
768
|
+
data = {
|
|
769
|
+
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
|
|
770
|
+
"joints": {
|
|
771
|
+
str(motor_id): inference.to_dict()
|
|
772
|
+
for motor_id, inference in self.results.items()
|
|
773
|
+
},
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
with open(path, 'w') as f:
|
|
777
|
+
json.dump(data, f, indent=2)
|
|
778
|
+
|
|
779
|
+
|
|
780
|
+
def generate_aruco_markers(output_dir: Path, marker_ids: List[int] = [0, 1, 2, 3, 4]):
|
|
781
|
+
"""
|
|
782
|
+
Generate printable ArUco markers.
|
|
783
|
+
|
|
784
|
+
Print these and attach to robot links.
|
|
785
|
+
"""
|
|
786
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
787
|
+
|
|
788
|
+
aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
|
|
789
|
+
|
|
790
|
+
for marker_id in marker_ids:
|
|
791
|
+
marker_image = cv2.aruco.generateImageMarker(aruco_dict, marker_id, 200)
|
|
792
|
+
|
|
793
|
+
# Add border and ID label
|
|
794
|
+
bordered = cv2.copyMakeBorder(marker_image, 20, 40, 20, 20, cv2.BORDER_CONSTANT, value=255)
|
|
795
|
+
cv2.putText(bordered, f"ID: {marker_id}", (60, bordered.shape[0] - 10),
|
|
796
|
+
cv2.FONT_HERSHEY_SIMPLEX, 0.7, 0, 2)
|
|
797
|
+
|
|
798
|
+
path = output_dir / f"marker_{marker_id}.png"
|
|
799
|
+
cv2.imwrite(str(path), bordered)
|
|
800
|
+
print(f"Saved: {path}")
|
|
801
|
+
|
|
802
|
+
print(f"\nPrint markers at ~3cm size and attach to robot links")
|
|
803
|
+
|
|
804
|
+
|
|
805
|
+
def demo_tracking():
|
|
806
|
+
"""Demo: Just show marker tracking without robot."""
|
|
807
|
+
print("ArUco Marker Tracking Demo")
|
|
808
|
+
print("Press 'q' to quit, 's' to save screenshot")
|
|
809
|
+
|
|
810
|
+
tracker = ArUcoTracker(camera_index=0)
|
|
811
|
+
if not tracker.connect():
|
|
812
|
+
print("Failed to open camera!")
|
|
813
|
+
return
|
|
814
|
+
|
|
815
|
+
try:
|
|
816
|
+
while True:
|
|
817
|
+
poses, annotated = tracker.detect_markers()
|
|
818
|
+
|
|
819
|
+
# Show info
|
|
820
|
+
y = 30
|
|
821
|
+
for marker_id, pose in poses.items():
|
|
822
|
+
pos = pose.position
|
|
823
|
+
roll, pitch, yaw = pose.euler_angles
|
|
824
|
+
text = f"ID {marker_id}: pos=({pos[0]:.3f}, {pos[1]:.3f}, {pos[2]:.3f})"
|
|
825
|
+
cv2.putText(annotated, text, (10, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
|
|
826
|
+
y += 20
|
|
827
|
+
text = f" rot=({roll:.1f}, {pitch:.1f}, {yaw:.1f}) deg"
|
|
828
|
+
cv2.putText(annotated, text, (10, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
|
|
829
|
+
y += 25
|
|
830
|
+
|
|
831
|
+
cv2.imshow("ArUco Tracking", annotated)
|
|
832
|
+
|
|
833
|
+
key = cv2.waitKey(1) & 0xFF
|
|
834
|
+
if key == ord('q'):
|
|
835
|
+
break
|
|
836
|
+
elif key == ord('s'):
|
|
837
|
+
cv2.imwrite("/tmp/aruco_frame.jpg", annotated)
|
|
838
|
+
print("Saved to /tmp/aruco_frame.jpg")
|
|
839
|
+
|
|
840
|
+
finally:
|
|
841
|
+
tracker.disconnect()
|
|
842
|
+
cv2.destroyAllWindows()
|
|
843
|
+
|
|
844
|
+
|
|
845
|
+
if __name__ == "__main__":
|
|
846
|
+
import argparse
|
|
847
|
+
|
|
848
|
+
parser = argparse.ArgumentParser(description="Visual System Identification")
|
|
849
|
+
parser.add_argument("--generate-markers", action="store_true", help="Generate printable ArUco markers")
|
|
850
|
+
parser.add_argument("--demo", action="store_true", help="Demo marker tracking (no robot)")
|
|
851
|
+
parser.add_argument("--port", default="/dev/cu.usbserial-10", help="Robot serial port")
|
|
852
|
+
parser.add_argument("--motors", default="11,12,13", help="Motor IDs to test (comma-separated)")
|
|
853
|
+
parser.add_argument("--output", default="/tmp/visual_id_results.json", help="Output file")
|
|
854
|
+
|
|
855
|
+
args = parser.parse_args()
|
|
856
|
+
|
|
857
|
+
if args.generate_markers:
|
|
858
|
+
generate_aruco_markers(Path("/tmp/aruco_markers"))
|
|
859
|
+
|
|
860
|
+
elif args.demo:
|
|
861
|
+
demo_tracking()
|
|
862
|
+
|
|
863
|
+
else:
|
|
864
|
+
# Full identification
|
|
865
|
+
motor_ids = [int(x) for x in args.motors.split(",")]
|
|
866
|
+
|
|
867
|
+
# Connect to robot
|
|
868
|
+
import serial
|
|
869
|
+
ser = serial.Serial(args.port, 115200, timeout=2)
|
|
870
|
+
time.sleep(0.5)
|
|
871
|
+
ser.read(ser.in_waiting)
|
|
872
|
+
|
|
873
|
+
def cmd(command: str):
|
|
874
|
+
ser.write(f"{command}\r\n".encode())
|
|
875
|
+
time.sleep(0.3)
|
|
876
|
+
return ser.read(ser.in_waiting).decode('utf-8', errors='ignore')
|
|
877
|
+
|
|
878
|
+
cmd("from HW_MechDog import MechDog", 1.5)
|
|
879
|
+
time.sleep(1)
|
|
880
|
+
cmd("dog = MechDog()", 1.5)
|
|
881
|
+
time.sleep(1)
|
|
882
|
+
|
|
883
|
+
try:
|
|
884
|
+
identifier = VisualSystemIdentifier(camera_index=0)
|
|
885
|
+
|
|
886
|
+
if not identifier.connect():
|
|
887
|
+
print("Failed to open camera!")
|
|
888
|
+
exit(1)
|
|
889
|
+
|
|
890
|
+
results = identifier.identify_robot(motor_ids, cmd_func=cmd)
|
|
891
|
+
|
|
892
|
+
# Save results
|
|
893
|
+
identifier.save_results(Path(args.output))
|
|
894
|
+
print(f"\nResults saved to: {args.output}")
|
|
895
|
+
|
|
896
|
+
# Generate URDF
|
|
897
|
+
urdf = identifier.generate_urdf("mechdog_arm")
|
|
898
|
+
urdf_path = Path(args.output).with_suffix(".urdf")
|
|
899
|
+
with open(urdf_path, 'w') as f:
|
|
900
|
+
f.write(urdf)
|
|
901
|
+
print(f"URDF saved to: {urdf_path}")
|
|
902
|
+
|
|
903
|
+
finally:
|
|
904
|
+
identifier.disconnect()
|
|
905
|
+
ser.close()
|
|
906
|
+
cv2.destroyAllWindows()
|