foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +100 -0
- ate/behaviors/approach.py +399 -0
- ate/behaviors/common.py +686 -0
- ate/behaviors/tree.py +454 -0
- ate/cli.py +855 -3995
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +39 -0
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +942 -0
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +187 -0
- ate/interfaces/base.py +273 -0
- ate/interfaces/body.py +267 -0
- ate/interfaces/detection.py +282 -0
- ate/interfaces/locomotion.py +422 -0
- ate/interfaces/manipulation.py +408 -0
- ate/interfaces/navigation.py +389 -0
- ate/interfaces/perception.py +362 -0
- ate/interfaces/sensors.py +247 -0
- ate/interfaces/types.py +371 -0
- ate/llm_proxy.py +239 -0
- ate/mcp_server.py +387 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +83 -0
- ate/recording/demonstration.py +378 -0
- ate/recording/session.py +415 -0
- ate/recording/upload.py +304 -0
- ate/recording/visual.py +416 -0
- ate/recording/wrapper.py +95 -0
- ate/robot/__init__.py +221 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +668 -0
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +3735 -0
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +441 -0
- ate/robot/introspection.py +330 -0
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/manager.py +270 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +281 -0
- ate/robot/registry.py +322 -0
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +675 -0
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +1048 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,654 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
LLM-Based Robot System Identification
|
|
4
|
+
|
|
5
|
+
No markers. No calibration. Just:
|
|
6
|
+
1. Wiggle each motor
|
|
7
|
+
2. Show before/after frames to multimodal LLM
|
|
8
|
+
3. LLM describes what moved and how
|
|
9
|
+
4. Generate URDF with semantic labels
|
|
10
|
+
|
|
11
|
+
This bridges real robots to Artifex Desktop.
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
python -m ate.robot.llm_system_id --port /dev/cu.usbserial-10 --motors 11,12,13
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import os
|
|
18
|
+
import time
|
|
19
|
+
import json
|
|
20
|
+
import base64
|
|
21
|
+
import io
|
|
22
|
+
from dataclasses import dataclass, field
|
|
23
|
+
from typing import List, Dict, Optional, Tuple, Any
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
from enum import Enum
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
import cv2
|
|
29
|
+
import numpy as np
|
|
30
|
+
HAS_CV = True
|
|
31
|
+
except ImportError:
|
|
32
|
+
HAS_CV = False
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
from anthropic import Anthropic
|
|
36
|
+
HAS_ANTHROPIC = True
|
|
37
|
+
except ImportError:
|
|
38
|
+
HAS_ANTHROPIC = False
|
|
39
|
+
|
|
40
|
+
# Import unified JointType from shared types module
|
|
41
|
+
from ate.robot.types import JointType
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class JointDiscovery:
|
|
46
|
+
"""What we learned about a joint from the LLM."""
|
|
47
|
+
motor_id: int
|
|
48
|
+
joint_type: JointType
|
|
49
|
+
body_part: str # "arm", "leg", "gripper", etc.
|
|
50
|
+
joint_name: str # "shoulder", "elbow", "knee", etc.
|
|
51
|
+
axis_description: str # "horizontal", "vertical", etc.
|
|
52
|
+
range_degrees: float # Approximate range of motion
|
|
53
|
+
parent_link: str # What this is attached to
|
|
54
|
+
observations: List[str] # Raw LLM observations
|
|
55
|
+
confidence: float
|
|
56
|
+
|
|
57
|
+
# Images for reference
|
|
58
|
+
frame_min: Optional[np.ndarray] = None
|
|
59
|
+
frame_max: Optional[np.ndarray] = None
|
|
60
|
+
|
|
61
|
+
def to_dict(self) -> dict:
|
|
62
|
+
return {
|
|
63
|
+
"motor_id": self.motor_id,
|
|
64
|
+
"joint_type": self.joint_type.value,
|
|
65
|
+
"body_part": self.body_part,
|
|
66
|
+
"joint_name": self.joint_name,
|
|
67
|
+
"axis_description": self.axis_description,
|
|
68
|
+
"range_degrees": self.range_degrees,
|
|
69
|
+
"parent_link": self.parent_link,
|
|
70
|
+
"observations": self.observations,
|
|
71
|
+
"confidence": self.confidence,
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class LLMSystemIdentifier:
|
|
76
|
+
"""
|
|
77
|
+
Use multimodal LLM to identify robot structure.
|
|
78
|
+
|
|
79
|
+
The LLM acts as a "robot coach" that watches the robot move
|
|
80
|
+
and describes what it sees, enabling us to build a URDF.
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def __init__(
|
|
84
|
+
self,
|
|
85
|
+
camera_index: int = 0,
|
|
86
|
+
model: str = "claude-sonnet-4-20250514",
|
|
87
|
+
):
|
|
88
|
+
if not HAS_CV:
|
|
89
|
+
raise ImportError("OpenCV required: pip install opencv-python")
|
|
90
|
+
if not HAS_ANTHROPIC:
|
|
91
|
+
raise ImportError("Anthropic SDK required: pip install anthropic")
|
|
92
|
+
|
|
93
|
+
self.camera_index = camera_index
|
|
94
|
+
self.model = model
|
|
95
|
+
self.client = Anthropic()
|
|
96
|
+
|
|
97
|
+
# Camera
|
|
98
|
+
self.cap = None
|
|
99
|
+
|
|
100
|
+
# Results
|
|
101
|
+
self.discoveries: Dict[int, JointDiscovery] = {}
|
|
102
|
+
self.robot_overview: Optional[str] = None
|
|
103
|
+
|
|
104
|
+
def connect_camera(self) -> bool:
|
|
105
|
+
"""Open camera connection."""
|
|
106
|
+
self.cap = cv2.VideoCapture(self.camera_index)
|
|
107
|
+
if not self.cap.isOpened():
|
|
108
|
+
print(f"Failed to open camera {self.camera_index}")
|
|
109
|
+
return False
|
|
110
|
+
|
|
111
|
+
# Set resolution
|
|
112
|
+
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
|
|
113
|
+
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
|
|
114
|
+
|
|
115
|
+
# Warm up
|
|
116
|
+
for _ in range(5):
|
|
117
|
+
self.cap.read()
|
|
118
|
+
|
|
119
|
+
return True
|
|
120
|
+
|
|
121
|
+
def disconnect(self):
|
|
122
|
+
"""Close camera."""
|
|
123
|
+
if self.cap:
|
|
124
|
+
self.cap.release()
|
|
125
|
+
self.cap = None
|
|
126
|
+
|
|
127
|
+
def capture_frame(self) -> np.ndarray:
|
|
128
|
+
"""Capture a single frame."""
|
|
129
|
+
if not self.cap:
|
|
130
|
+
return np.array([])
|
|
131
|
+
|
|
132
|
+
# Discard stale frames
|
|
133
|
+
for _ in range(3):
|
|
134
|
+
self.cap.read()
|
|
135
|
+
|
|
136
|
+
ret, frame = self.cap.read()
|
|
137
|
+
return frame if ret else np.array([])
|
|
138
|
+
|
|
139
|
+
def frame_to_base64(self, frame: np.ndarray) -> str:
|
|
140
|
+
"""Convert frame to base64 for LLM."""
|
|
141
|
+
_, buffer = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 85])
|
|
142
|
+
return base64.standard_b64encode(buffer).decode('utf-8')
|
|
143
|
+
|
|
144
|
+
def analyze_robot_overview(self, frame: np.ndarray) -> str:
|
|
145
|
+
"""Get initial overview of the robot."""
|
|
146
|
+
print("\n Analyzing robot structure...")
|
|
147
|
+
|
|
148
|
+
image_data = self.frame_to_base64(frame)
|
|
149
|
+
|
|
150
|
+
response = self.client.messages.create(
|
|
151
|
+
model=self.model,
|
|
152
|
+
max_tokens=1024,
|
|
153
|
+
messages=[{
|
|
154
|
+
"role": "user",
|
|
155
|
+
"content": [
|
|
156
|
+
{
|
|
157
|
+
"type": "image",
|
|
158
|
+
"source": {
|
|
159
|
+
"type": "base64",
|
|
160
|
+
"media_type": "image/jpeg",
|
|
161
|
+
"data": image_data,
|
|
162
|
+
},
|
|
163
|
+
},
|
|
164
|
+
{
|
|
165
|
+
"type": "text",
|
|
166
|
+
"text": """Analyze this robot. Describe:
|
|
167
|
+
|
|
168
|
+
1. What type of robot is this? (quadruped, arm, humanoid, etc.)
|
|
169
|
+
2. How many limbs/appendages do you see?
|
|
170
|
+
3. What are the main body parts? (body, legs, arm, gripper, head, etc.)
|
|
171
|
+
4. Estimate the approximate size.
|
|
172
|
+
|
|
173
|
+
Be concise but specific. This will help us understand the kinematic structure."""
|
|
174
|
+
}
|
|
175
|
+
],
|
|
176
|
+
}],
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
return response.content[0].text
|
|
180
|
+
|
|
181
|
+
def analyze_motor_motion(
|
|
182
|
+
self,
|
|
183
|
+
motor_id: int,
|
|
184
|
+
frame_before: np.ndarray,
|
|
185
|
+
frame_after: np.ndarray,
|
|
186
|
+
pwm_before: int,
|
|
187
|
+
pwm_after: int,
|
|
188
|
+
) -> JointDiscovery:
|
|
189
|
+
"""
|
|
190
|
+
Analyze what happened when a motor moved.
|
|
191
|
+
|
|
192
|
+
Shows before/after frames to LLM and asks it to describe the motion.
|
|
193
|
+
"""
|
|
194
|
+
print(f"\n Analyzing motor {motor_id} motion...")
|
|
195
|
+
|
|
196
|
+
before_b64 = self.frame_to_base64(frame_before)
|
|
197
|
+
after_b64 = self.frame_to_base64(frame_after)
|
|
198
|
+
|
|
199
|
+
response = self.client.messages.create(
|
|
200
|
+
model=self.model,
|
|
201
|
+
max_tokens=1024,
|
|
202
|
+
messages=[{
|
|
203
|
+
"role": "user",
|
|
204
|
+
"content": [
|
|
205
|
+
{
|
|
206
|
+
"type": "text",
|
|
207
|
+
"text": f"I'm testing motor {motor_id} on a robot. Here are two images showing BEFORE (PWM={pwm_before}) and AFTER (PWM={pwm_after}) positions."
|
|
208
|
+
},
|
|
209
|
+
{
|
|
210
|
+
"type": "text",
|
|
211
|
+
"text": "BEFORE position:"
|
|
212
|
+
},
|
|
213
|
+
{
|
|
214
|
+
"type": "image",
|
|
215
|
+
"source": {
|
|
216
|
+
"type": "base64",
|
|
217
|
+
"media_type": "image/jpeg",
|
|
218
|
+
"data": before_b64,
|
|
219
|
+
},
|
|
220
|
+
},
|
|
221
|
+
{
|
|
222
|
+
"type": "text",
|
|
223
|
+
"text": "AFTER position:"
|
|
224
|
+
},
|
|
225
|
+
{
|
|
226
|
+
"type": "image",
|
|
227
|
+
"source": {
|
|
228
|
+
"type": "base64",
|
|
229
|
+
"media_type": "image/jpeg",
|
|
230
|
+
"data": after_b64,
|
|
231
|
+
},
|
|
232
|
+
},
|
|
233
|
+
{
|
|
234
|
+
"type": "text",
|
|
235
|
+
"text": """Analyze the motion. Answer these questions:
|
|
236
|
+
|
|
237
|
+
1. WHAT MOVED? (e.g., "the gripper", "front-left leg", "arm shoulder")
|
|
238
|
+
2. HOW DID IT MOVE? (e.g., "rotated downward", "opened wider", "extended forward")
|
|
239
|
+
3. JOINT TYPE: Is this a rotation (revolute) or linear slide (prismatic)?
|
|
240
|
+
4. AXIS: What axis does it rotate/move around? (horizontal/vertical/forward-back)
|
|
241
|
+
5. RANGE: Approximately how many degrees did it rotate? (or mm if linear)
|
|
242
|
+
6. PARENT: What body part is this attached to?
|
|
243
|
+
7. CONFIDENCE: How certain are you? (high/medium/low)
|
|
244
|
+
|
|
245
|
+
If nothing visibly moved, say "NO VISIBLE MOTION" and explain why (maybe the camera angle, or the motor isn't connected).
|
|
246
|
+
|
|
247
|
+
Format your response as:
|
|
248
|
+
BODY_PART: [part name]
|
|
249
|
+
JOINT_NAME: [specific joint name]
|
|
250
|
+
MOTION: [description]
|
|
251
|
+
JOINT_TYPE: [revolute/prismatic/fixed]
|
|
252
|
+
AXIS: [description]
|
|
253
|
+
RANGE_DEGREES: [number]
|
|
254
|
+
PARENT: [parent link name]
|
|
255
|
+
CONFIDENCE: [high/medium/low]
|
|
256
|
+
NOTES: [any other observations]"""
|
|
257
|
+
}
|
|
258
|
+
],
|
|
259
|
+
}],
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
# Parse response
|
|
263
|
+
text = response.content[0].text
|
|
264
|
+
return self._parse_motion_analysis(motor_id, text, frame_before, frame_after)
|
|
265
|
+
|
|
266
|
+
def _parse_motion_analysis(
|
|
267
|
+
self,
|
|
268
|
+
motor_id: int,
|
|
269
|
+
text: str,
|
|
270
|
+
frame_min: np.ndarray,
|
|
271
|
+
frame_max: np.ndarray,
|
|
272
|
+
) -> JointDiscovery:
|
|
273
|
+
"""Parse LLM response into structured JointDiscovery."""
|
|
274
|
+
|
|
275
|
+
lines = text.strip().split('\n')
|
|
276
|
+
parsed = {}
|
|
277
|
+
|
|
278
|
+
for line in lines:
|
|
279
|
+
if ':' in line:
|
|
280
|
+
key, value = line.split(':', 1)
|
|
281
|
+
parsed[key.strip().upper()] = value.strip()
|
|
282
|
+
|
|
283
|
+
# Extract values with defaults
|
|
284
|
+
body_part = parsed.get('BODY_PART', 'unknown').lower()
|
|
285
|
+
joint_name = parsed.get('JOINT_NAME', f'joint_{motor_id}').lower()
|
|
286
|
+
|
|
287
|
+
# Parse joint type
|
|
288
|
+
joint_type_str = parsed.get('JOINT_TYPE', 'unknown').lower()
|
|
289
|
+
if 'revolute' in joint_type_str or 'rotat' in joint_type_str:
|
|
290
|
+
joint_type = JointType.REVOLUTE
|
|
291
|
+
elif 'prismatic' in joint_type_str or 'linear' in joint_type_str:
|
|
292
|
+
joint_type = JointType.PRISMATIC
|
|
293
|
+
elif 'fixed' in joint_type_str or 'no' in joint_type_str:
|
|
294
|
+
joint_type = JointType.FIXED
|
|
295
|
+
else:
|
|
296
|
+
joint_type = JointType.UNKNOWN
|
|
297
|
+
|
|
298
|
+
# Parse range
|
|
299
|
+
range_str = parsed.get('RANGE_DEGREES', '0')
|
|
300
|
+
try:
|
|
301
|
+
range_degrees = float(''.join(c for c in range_str if c.isdigit() or c == '.'))
|
|
302
|
+
except ValueError:
|
|
303
|
+
range_degrees = 0.0
|
|
304
|
+
|
|
305
|
+
# Parse confidence
|
|
306
|
+
conf_str = parsed.get('CONFIDENCE', 'medium').lower()
|
|
307
|
+
if 'high' in conf_str:
|
|
308
|
+
confidence = 0.9
|
|
309
|
+
elif 'low' in conf_str:
|
|
310
|
+
confidence = 0.4
|
|
311
|
+
else:
|
|
312
|
+
confidence = 0.7
|
|
313
|
+
|
|
314
|
+
return JointDiscovery(
|
|
315
|
+
motor_id=motor_id,
|
|
316
|
+
joint_type=joint_type,
|
|
317
|
+
body_part=body_part,
|
|
318
|
+
joint_name=joint_name,
|
|
319
|
+
axis_description=parsed.get('AXIS', 'unknown'),
|
|
320
|
+
range_degrees=range_degrees,
|
|
321
|
+
parent_link=parsed.get('PARENT', 'base_link'),
|
|
322
|
+
observations=[
|
|
323
|
+
parsed.get('MOTION', ''),
|
|
324
|
+
parsed.get('NOTES', ''),
|
|
325
|
+
],
|
|
326
|
+
confidence=confidence,
|
|
327
|
+
frame_min=frame_min,
|
|
328
|
+
frame_max=frame_max,
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
def run_wiggle_test(
|
|
332
|
+
self,
|
|
333
|
+
motor_id: int,
|
|
334
|
+
pwm_min: int = 500,
|
|
335
|
+
pwm_max: int = 2500,
|
|
336
|
+
cmd_func=None,
|
|
337
|
+
) -> JointDiscovery:
|
|
338
|
+
"""
|
|
339
|
+
Run wiggle test on a single motor.
|
|
340
|
+
|
|
341
|
+
1. Move to min position, capture frame
|
|
342
|
+
2. Move to max position, capture frame
|
|
343
|
+
3. Ask LLM to analyze the motion
|
|
344
|
+
"""
|
|
345
|
+
print(f"\n--- Wiggle Test: Motor {motor_id} ---")
|
|
346
|
+
|
|
347
|
+
# Move to min, capture
|
|
348
|
+
print(f" Moving to PWM {pwm_min}...")
|
|
349
|
+
if cmd_func:
|
|
350
|
+
cmd_func(f"dog.set_servo({motor_id}, {pwm_min}, 800)")
|
|
351
|
+
time.sleep(1.0)
|
|
352
|
+
frame_min = self.capture_frame()
|
|
353
|
+
|
|
354
|
+
# Show frame
|
|
355
|
+
if frame_min.size > 0:
|
|
356
|
+
cv2.imshow("System ID", frame_min)
|
|
357
|
+
cv2.waitKey(100)
|
|
358
|
+
|
|
359
|
+
# Move to max, capture
|
|
360
|
+
print(f" Moving to PWM {pwm_max}...")
|
|
361
|
+
if cmd_func:
|
|
362
|
+
cmd_func(f"dog.set_servo({motor_id}, {pwm_max}, 800)")
|
|
363
|
+
time.sleep(1.0)
|
|
364
|
+
frame_max = self.capture_frame()
|
|
365
|
+
|
|
366
|
+
# Show frame
|
|
367
|
+
if frame_max.size > 0:
|
|
368
|
+
cv2.imshow("System ID", frame_max)
|
|
369
|
+
cv2.waitKey(100)
|
|
370
|
+
|
|
371
|
+
# Save frames for debugging
|
|
372
|
+
cv2.imwrite(f"/tmp/motor_{motor_id}_min.jpg", frame_min)
|
|
373
|
+
cv2.imwrite(f"/tmp/motor_{motor_id}_max.jpg", frame_max)
|
|
374
|
+
|
|
375
|
+
# Return to center
|
|
376
|
+
print(f" Returning to center...")
|
|
377
|
+
if cmd_func:
|
|
378
|
+
cmd_func(f"dog.set_servo({motor_id}, 1500, 500)")
|
|
379
|
+
time.sleep(0.5)
|
|
380
|
+
|
|
381
|
+
# Analyze with LLM
|
|
382
|
+
discovery = self.analyze_motor_motion(
|
|
383
|
+
motor_id,
|
|
384
|
+
frame_min,
|
|
385
|
+
frame_max,
|
|
386
|
+
pwm_min,
|
|
387
|
+
pwm_max,
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
print(f"\n Result:")
|
|
391
|
+
print(f" Body part: {discovery.body_part}")
|
|
392
|
+
print(f" Joint: {discovery.joint_name}")
|
|
393
|
+
print(f" Type: {discovery.joint_type.value}")
|
|
394
|
+
print(f" Range: ~{discovery.range_degrees:.0f}°")
|
|
395
|
+
print(f" Confidence: {discovery.confidence:.0%}")
|
|
396
|
+
|
|
397
|
+
return discovery
|
|
398
|
+
|
|
399
|
+
def identify_robot(
|
|
400
|
+
self,
|
|
401
|
+
motor_ids: List[int],
|
|
402
|
+
cmd_func=None,
|
|
403
|
+
) -> Dict[int, JointDiscovery]:
|
|
404
|
+
"""
|
|
405
|
+
Full robot identification.
|
|
406
|
+
|
|
407
|
+
1. Get overview of robot
|
|
408
|
+
2. Wiggle each motor
|
|
409
|
+
3. Build kinematic understanding
|
|
410
|
+
"""
|
|
411
|
+
print("\n" + "=" * 60)
|
|
412
|
+
print("LLM-BASED ROBOT SYSTEM IDENTIFICATION")
|
|
413
|
+
print("=" * 60)
|
|
414
|
+
|
|
415
|
+
# Initial frame and overview
|
|
416
|
+
frame = self.capture_frame()
|
|
417
|
+
if frame.size == 0:
|
|
418
|
+
print("ERROR: Cannot capture from camera!")
|
|
419
|
+
return {}
|
|
420
|
+
|
|
421
|
+
cv2.imshow("System ID", frame)
|
|
422
|
+
cv2.waitKey(100)
|
|
423
|
+
cv2.imwrite("/tmp/robot_overview.jpg", frame)
|
|
424
|
+
|
|
425
|
+
self.robot_overview = self.analyze_robot_overview(frame)
|
|
426
|
+
print(f"\n Robot Overview:\n{self.robot_overview}")
|
|
427
|
+
|
|
428
|
+
# Test each motor
|
|
429
|
+
for motor_id in motor_ids:
|
|
430
|
+
discovery = self.run_wiggle_test(motor_id, cmd_func=cmd_func)
|
|
431
|
+
self.discoveries[motor_id] = discovery
|
|
432
|
+
|
|
433
|
+
return self.discoveries
|
|
434
|
+
|
|
435
|
+
def generate_urdf(self, robot_name: str = "discovered_robot") -> str:
|
|
436
|
+
"""
|
|
437
|
+
Generate URDF from discoveries.
|
|
438
|
+
|
|
439
|
+
This creates a valid URDF that can be loaded into Artifex Desktop
|
|
440
|
+
for visualization and simulation.
|
|
441
|
+
"""
|
|
442
|
+
lines = [
|
|
443
|
+
'<?xml version="1.0"?>',
|
|
444
|
+
f'<robot name="{robot_name}">',
|
|
445
|
+
' <!-- Generated by LLM System Identification -->',
|
|
446
|
+
f' <!-- Robot Overview: {self.robot_overview[:100] if self.robot_overview else "Unknown"}... -->',
|
|
447
|
+
'',
|
|
448
|
+
' <!-- ========== LINKS ========== -->',
|
|
449
|
+
'',
|
|
450
|
+
' <!-- Base Link -->',
|
|
451
|
+
' <link name="base_link">',
|
|
452
|
+
' <visual>',
|
|
453
|
+
' <geometry>',
|
|
454
|
+
' <box size="0.15 0.10 0.05"/>',
|
|
455
|
+
' </geometry>',
|
|
456
|
+
' <material name="gray">',
|
|
457
|
+
' <color rgba="0.5 0.5 0.5 1"/>',
|
|
458
|
+
' </material>',
|
|
459
|
+
' </visual>',
|
|
460
|
+
' <collision>',
|
|
461
|
+
' <geometry>',
|
|
462
|
+
' <box size="0.15 0.10 0.05"/>',
|
|
463
|
+
' </geometry>',
|
|
464
|
+
' </collision>',
|
|
465
|
+
' <inertial>',
|
|
466
|
+
' <mass value="0.5"/>',
|
|
467
|
+
' <inertia ixx="0.001" ixy="0" ixz="0" iyy="0.001" iyz="0" izz="0.001"/>',
|
|
468
|
+
' </inertial>',
|
|
469
|
+
' </link>',
|
|
470
|
+
'',
|
|
471
|
+
]
|
|
472
|
+
|
|
473
|
+
# Sort discoveries to build kinematic chain
|
|
474
|
+
# For now, assume simple chain: base → joint1 → joint2 → ...
|
|
475
|
+
parent_link = "base_link"
|
|
476
|
+
|
|
477
|
+
for motor_id, discovery in sorted(self.discoveries.items()):
|
|
478
|
+
link_name = f"{discovery.body_part}_{discovery.joint_name}".replace(" ", "_")
|
|
479
|
+
joint_name = f"motor_{motor_id}_{discovery.joint_name}".replace(" ", "_")
|
|
480
|
+
|
|
481
|
+
# Determine axis from description
|
|
482
|
+
axis = "0 0 1" # Default Z-axis
|
|
483
|
+
axis_desc = discovery.axis_description.lower()
|
|
484
|
+
if 'horizontal' in axis_desc or 'side' in axis_desc:
|
|
485
|
+
axis = "1 0 0" # X-axis
|
|
486
|
+
elif 'forward' in axis_desc or 'pitch' in axis_desc:
|
|
487
|
+
axis = "0 1 0" # Y-axis
|
|
488
|
+
elif 'vertical' in axis_desc or 'yaw' in axis_desc:
|
|
489
|
+
axis = "0 0 1" # Z-axis
|
|
490
|
+
|
|
491
|
+
# Joint limits
|
|
492
|
+
range_rad = discovery.range_degrees * 3.14159 / 180.0
|
|
493
|
+
lower = -range_rad / 2
|
|
494
|
+
upper = range_rad / 2
|
|
495
|
+
|
|
496
|
+
# Add joint
|
|
497
|
+
joint_type = discovery.joint_type.value
|
|
498
|
+
if joint_type == "unknown":
|
|
499
|
+
joint_type = "revolute"
|
|
500
|
+
|
|
501
|
+
lines.extend([
|
|
502
|
+
f' <!-- Motor {motor_id}: {discovery.body_part} {discovery.joint_name} -->',
|
|
503
|
+
f' <!-- Observations: {"; ".join(discovery.observations)} -->',
|
|
504
|
+
f' <joint name="{joint_name}" type="{joint_type}">',
|
|
505
|
+
f' <parent link="{parent_link}"/>',
|
|
506
|
+
f' <child link="{link_name}"/>',
|
|
507
|
+
f' <origin xyz="0.05 0 0" rpy="0 0 0"/>',
|
|
508
|
+
f' <axis xyz="{axis}"/>',
|
|
509
|
+
])
|
|
510
|
+
|
|
511
|
+
if joint_type == "revolute":
|
|
512
|
+
lines.append(f' <limit lower="{lower:.3f}" upper="{upper:.3f}" effort="10" velocity="3"/>')
|
|
513
|
+
|
|
514
|
+
lines.extend([
|
|
515
|
+
' </joint>',
|
|
516
|
+
'',
|
|
517
|
+
f' <link name="{link_name}">',
|
|
518
|
+
' <visual>',
|
|
519
|
+
' <origin xyz="0.025 0 0"/>',
|
|
520
|
+
' <geometry>',
|
|
521
|
+
' <cylinder radius="0.01" length="0.05"/>',
|
|
522
|
+
' </geometry>',
|
|
523
|
+
' <material name="blue">',
|
|
524
|
+
' <color rgba="0.2 0.2 0.8 1"/>',
|
|
525
|
+
' </material>',
|
|
526
|
+
' </visual>',
|
|
527
|
+
' <collision>',
|
|
528
|
+
' <origin xyz="0.025 0 0"/>',
|
|
529
|
+
' <geometry>',
|
|
530
|
+
' <cylinder radius="0.01" length="0.05"/>',
|
|
531
|
+
' </geometry>',
|
|
532
|
+
' </collision>',
|
|
533
|
+
' <inertial>',
|
|
534
|
+
' <mass value="0.1"/>',
|
|
535
|
+
' <inertia ixx="0.0001" ixy="0" ixz="0" iyy="0.0001" iyz="0" izz="0.0001"/>',
|
|
536
|
+
' </inertial>',
|
|
537
|
+
' </link>',
|
|
538
|
+
'',
|
|
539
|
+
])
|
|
540
|
+
|
|
541
|
+
parent_link = link_name
|
|
542
|
+
|
|
543
|
+
lines.append('</robot>')
|
|
544
|
+
|
|
545
|
+
return '\n'.join(lines)
|
|
546
|
+
|
|
547
|
+
def save_results(self, output_dir: Path):
|
|
548
|
+
"""Save all results."""
|
|
549
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
550
|
+
|
|
551
|
+
# Save JSON
|
|
552
|
+
data = {
|
|
553
|
+
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
|
|
554
|
+
"robot_overview": self.robot_overview,
|
|
555
|
+
"discoveries": {
|
|
556
|
+
str(k): v.to_dict() for k, v in self.discoveries.items()
|
|
557
|
+
},
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
json_path = output_dir / "system_id_results.json"
|
|
561
|
+
with open(json_path, 'w') as f:
|
|
562
|
+
json.dump(data, f, indent=2)
|
|
563
|
+
print(f"\nSaved results to: {json_path}")
|
|
564
|
+
|
|
565
|
+
# Save URDF
|
|
566
|
+
urdf = self.generate_urdf()
|
|
567
|
+
urdf_path = output_dir / "discovered_robot.urdf"
|
|
568
|
+
with open(urdf_path, 'w') as f:
|
|
569
|
+
f.write(urdf)
|
|
570
|
+
print(f"Saved URDF to: {urdf_path}")
|
|
571
|
+
|
|
572
|
+
# Save before/after images
|
|
573
|
+
for motor_id, discovery in self.discoveries.items():
|
|
574
|
+
if discovery.frame_min is not None:
|
|
575
|
+
cv2.imwrite(str(output_dir / f"motor_{motor_id}_min.jpg"), discovery.frame_min)
|
|
576
|
+
if discovery.frame_max is not None:
|
|
577
|
+
cv2.imwrite(str(output_dir / f"motor_{motor_id}_max.jpg"), discovery.frame_max)
|
|
578
|
+
|
|
579
|
+
print(f"Saved motion images to: {output_dir}")
|
|
580
|
+
|
|
581
|
+
return urdf_path
|
|
582
|
+
|
|
583
|
+
|
|
584
|
+
def main():
|
|
585
|
+
"""Main entry point."""
|
|
586
|
+
import argparse
|
|
587
|
+
|
|
588
|
+
parser = argparse.ArgumentParser(description="LLM-Based Robot System Identification")
|
|
589
|
+
parser.add_argument("--port", default="/dev/cu.usbserial-10", help="Robot serial port")
|
|
590
|
+
parser.add_argument("--motors", default="11,12,13", help="Motor IDs to test")
|
|
591
|
+
parser.add_argument("--camera", type=int, default=0, help="Camera index")
|
|
592
|
+
parser.add_argument("--output", default="/tmp/llm_system_id", help="Output directory")
|
|
593
|
+
parser.add_argument("--demo", action="store_true", help="Demo mode (no robot)")
|
|
594
|
+
|
|
595
|
+
args = parser.parse_args()
|
|
596
|
+
|
|
597
|
+
motor_ids = [int(x) for x in args.motors.split(",")]
|
|
598
|
+
|
|
599
|
+
# Create identifier
|
|
600
|
+
identifier = LLMSystemIdentifier(camera_index=args.camera)
|
|
601
|
+
|
|
602
|
+
if not identifier.connect_camera():
|
|
603
|
+
print("Failed to connect camera!")
|
|
604
|
+
return
|
|
605
|
+
|
|
606
|
+
cmd_func = None
|
|
607
|
+
ser = None
|
|
608
|
+
|
|
609
|
+
if not args.demo:
|
|
610
|
+
# Connect to robot
|
|
611
|
+
import serial
|
|
612
|
+
ser = serial.Serial(args.port, 115200, timeout=2)
|
|
613
|
+
time.sleep(0.5)
|
|
614
|
+
ser.read(ser.in_waiting)
|
|
615
|
+
|
|
616
|
+
def cmd(command: str):
|
|
617
|
+
ser.write(f"{command}\r\n".encode())
|
|
618
|
+
time.sleep(0.3)
|
|
619
|
+
return ser.read(ser.in_waiting).decode('utf-8', errors='ignore')
|
|
620
|
+
|
|
621
|
+
cmd("from HW_MechDog import MechDog")
|
|
622
|
+
time.sleep(1.5)
|
|
623
|
+
cmd("dog = MechDog()")
|
|
624
|
+
time.sleep(1.5)
|
|
625
|
+
|
|
626
|
+
cmd_func = cmd
|
|
627
|
+
|
|
628
|
+
try:
|
|
629
|
+
# Run identification
|
|
630
|
+
discoveries = identifier.identify_robot(motor_ids, cmd_func=cmd_func)
|
|
631
|
+
|
|
632
|
+
if discoveries:
|
|
633
|
+
# Save results
|
|
634
|
+
identifier.save_results(Path(args.output))
|
|
635
|
+
|
|
636
|
+
print("\n" + "=" * 60)
|
|
637
|
+
print("IDENTIFICATION COMPLETE")
|
|
638
|
+
print("=" * 60)
|
|
639
|
+
print(f"\nDiscovered {len(discoveries)} joints:")
|
|
640
|
+
for motor_id, disc in discoveries.items():
|
|
641
|
+
print(f" Motor {motor_id}: {disc.body_part} {disc.joint_name} ({disc.joint_type.value})")
|
|
642
|
+
|
|
643
|
+
print(f"\nURDF saved to: {args.output}/discovered_robot.urdf")
|
|
644
|
+
print("You can import this into Artifex Desktop!")
|
|
645
|
+
|
|
646
|
+
finally:
|
|
647
|
+
identifier.disconnect()
|
|
648
|
+
if ser:
|
|
649
|
+
ser.close()
|
|
650
|
+
cv2.destroyAllWindows()
|
|
651
|
+
|
|
652
|
+
|
|
653
|
+
if __name__ == "__main__":
|
|
654
|
+
main()
|