foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +100 -0
- ate/behaviors/approach.py +399 -0
- ate/behaviors/common.py +686 -0
- ate/behaviors/tree.py +454 -0
- ate/cli.py +855 -3995
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +39 -0
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +942 -0
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +187 -0
- ate/interfaces/base.py +273 -0
- ate/interfaces/body.py +267 -0
- ate/interfaces/detection.py +282 -0
- ate/interfaces/locomotion.py +422 -0
- ate/interfaces/manipulation.py +408 -0
- ate/interfaces/navigation.py +389 -0
- ate/interfaces/perception.py +362 -0
- ate/interfaces/sensors.py +247 -0
- ate/interfaces/types.py +371 -0
- ate/llm_proxy.py +239 -0
- ate/mcp_server.py +387 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +83 -0
- ate/recording/demonstration.py +378 -0
- ate/recording/session.py +415 -0
- ate/recording/upload.py +304 -0
- ate/recording/visual.py +416 -0
- ate/recording/wrapper.py +95 -0
- ate/robot/__init__.py +221 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +668 -0
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +3735 -0
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +441 -0
- ate/robot/introspection.py +330 -0
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/manager.py +270 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +281 -0
- ate/robot/registry.py +322 -0
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +675 -0
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +1048 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,570 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Visual Servoing Controller for MechDog.
|
|
3
|
+
|
|
4
|
+
Implements closed-loop control using camera feedback to navigate
|
|
5
|
+
to and pick up objects. No LLM required for basic operation.
|
|
6
|
+
|
|
7
|
+
For more complex decision-making, can be combined with an LLM subagent.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import time
|
|
11
|
+
import math
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
from typing import Optional, Tuple, Callable, List
|
|
14
|
+
from enum import Enum
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
import cv2
|
|
18
|
+
import numpy as np
|
|
19
|
+
HAS_CV = True
|
|
20
|
+
except ImportError:
|
|
21
|
+
HAS_CV = False
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ServoState(Enum):
|
|
25
|
+
"""State of the visual servoing controller."""
|
|
26
|
+
SEARCHING = "searching" # Looking for target
|
|
27
|
+
ALIGNING = "aligning" # Rotating to center target
|
|
28
|
+
APPROACHING = "approaching" # Moving toward target
|
|
29
|
+
READY_TO_GRAB = "ready" # Close enough to grab
|
|
30
|
+
GRABBING = "grabbing" # Executing pickup
|
|
31
|
+
SUCCESS = "success" # Target acquired
|
|
32
|
+
FAILED = "failed" # Gave up
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class TargetDetection:
|
|
37
|
+
"""Result of detecting a target in an image."""
|
|
38
|
+
found: bool
|
|
39
|
+
x: float = 0.5 # Normalized x (0=left, 0.5=center, 1=right)
|
|
40
|
+
y: float = 0.5 # Normalized y (0=top, 1=bottom)
|
|
41
|
+
size: float = 0.0 # Normalized size (fraction of frame)
|
|
42
|
+
confidence: float = 0.0
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def center_offset(self) -> float:
|
|
46
|
+
"""Offset from center (-0.5 to 0.5, negative=left)."""
|
|
47
|
+
return self.x - 0.5
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def is_centered(self) -> bool:
|
|
51
|
+
"""Is target roughly centered?"""
|
|
52
|
+
return abs(self.center_offset) < 0.1
|
|
53
|
+
|
|
54
|
+
@property
|
|
55
|
+
def is_close(self) -> bool:
|
|
56
|
+
"""Is target close enough to grab? (size > 15% of frame)"""
|
|
57
|
+
return self.size > 0.15
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class GreenBallDetector:
|
|
61
|
+
"""
|
|
62
|
+
Simple green ball detector using HSV color filtering.
|
|
63
|
+
|
|
64
|
+
No ML required - just OpenCV color thresholding.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
def __init__(
|
|
68
|
+
self,
|
|
69
|
+
hue_range: Tuple[int, int] = (35, 85), # Green hue range
|
|
70
|
+
sat_min: int = 50,
|
|
71
|
+
val_min: int = 50,
|
|
72
|
+
min_area: int = 500,
|
|
73
|
+
):
|
|
74
|
+
self.hue_range = hue_range
|
|
75
|
+
self.sat_min = sat_min
|
|
76
|
+
self.val_min = val_min
|
|
77
|
+
self.min_area = min_area
|
|
78
|
+
|
|
79
|
+
def detect(self, image: np.ndarray) -> TargetDetection:
|
|
80
|
+
"""
|
|
81
|
+
Detect green ball in image.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
image: BGR image from OpenCV
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
TargetDetection with ball position
|
|
88
|
+
"""
|
|
89
|
+
if image is None or image.size == 0:
|
|
90
|
+
return TargetDetection(found=False)
|
|
91
|
+
|
|
92
|
+
h, w = image.shape[:2]
|
|
93
|
+
|
|
94
|
+
# Convert to HSV
|
|
95
|
+
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
96
|
+
|
|
97
|
+
# Create mask for green color
|
|
98
|
+
lower = np.array([self.hue_range[0], self.sat_min, self.val_min])
|
|
99
|
+
upper = np.array([self.hue_range[1], 255, 255])
|
|
100
|
+
mask = cv2.inRange(hsv, lower, upper)
|
|
101
|
+
|
|
102
|
+
# Clean up mask
|
|
103
|
+
kernel = np.ones((5, 5), np.uint8)
|
|
104
|
+
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
|
|
105
|
+
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
|
|
106
|
+
|
|
107
|
+
# Find contours
|
|
108
|
+
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
109
|
+
|
|
110
|
+
if not contours:
|
|
111
|
+
return TargetDetection(found=False)
|
|
112
|
+
|
|
113
|
+
# Find largest contour
|
|
114
|
+
largest = max(contours, key=cv2.contourArea)
|
|
115
|
+
area = cv2.contourArea(largest)
|
|
116
|
+
|
|
117
|
+
if area < self.min_area:
|
|
118
|
+
return TargetDetection(found=False)
|
|
119
|
+
|
|
120
|
+
# Get centroid
|
|
121
|
+
M = cv2.moments(largest)
|
|
122
|
+
if M["m00"] == 0:
|
|
123
|
+
return TargetDetection(found=False)
|
|
124
|
+
|
|
125
|
+
cx = int(M["m10"] / M["m00"])
|
|
126
|
+
cy = int(M["m01"] / M["m00"])
|
|
127
|
+
|
|
128
|
+
# Get bounding rect for size
|
|
129
|
+
x, y, bw, bh = cv2.boundingRect(largest)
|
|
130
|
+
|
|
131
|
+
return TargetDetection(
|
|
132
|
+
found=True,
|
|
133
|
+
x=cx / w,
|
|
134
|
+
y=cy / h,
|
|
135
|
+
size=max(bw, bh) / max(w, h),
|
|
136
|
+
confidence=min(1.0, area / 10000),
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
class VisualServoController:
|
|
141
|
+
"""
|
|
142
|
+
Closed-loop visual servoing controller.
|
|
143
|
+
|
|
144
|
+
Uses camera feedback to navigate robot to target and pick it up.
|
|
145
|
+
|
|
146
|
+
Example:
|
|
147
|
+
controller = VisualServoController(
|
|
148
|
+
capture_fn=webcam.capture,
|
|
149
|
+
move_fn=robot.move,
|
|
150
|
+
gripper_fn=robot.gripper_close,
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
success = controller.pickup_target()
|
|
154
|
+
"""
|
|
155
|
+
|
|
156
|
+
def __init__(
|
|
157
|
+
self,
|
|
158
|
+
capture_fn: Callable[[], np.ndarray],
|
|
159
|
+
move_fn: Callable[[float, float], None], # move(forward_vel, turn_vel)
|
|
160
|
+
stop_fn: Callable[[], None],
|
|
161
|
+
gripper_open_fn: Callable[[], None],
|
|
162
|
+
gripper_close_fn: Callable[[], None],
|
|
163
|
+
arm_down_fn: Callable[[], None],
|
|
164
|
+
arm_up_fn: Callable[[], None],
|
|
165
|
+
detector: Optional[GreenBallDetector] = None,
|
|
166
|
+
# Tuning parameters
|
|
167
|
+
turn_speed: float = 25.0, # Turn velocity when aligning
|
|
168
|
+
approach_speed: float = 20.0, # Forward velocity when approaching
|
|
169
|
+
align_threshold: float = 0.1, # Center offset threshold
|
|
170
|
+
close_threshold: float = 0.15, # Size threshold for "close enough"
|
|
171
|
+
max_iterations: int = 50, # Max control loop iterations
|
|
172
|
+
step_delay: float = 0.3, # Delay between steps
|
|
173
|
+
# Callbacks for monitoring
|
|
174
|
+
on_state_change: Optional[Callable[[ServoState], None]] = None,
|
|
175
|
+
on_detection: Optional[Callable[[TargetDetection, np.ndarray], None]] = None,
|
|
176
|
+
):
|
|
177
|
+
self.capture = capture_fn
|
|
178
|
+
self.move = move_fn
|
|
179
|
+
self.stop = stop_fn
|
|
180
|
+
self.gripper_open = gripper_open_fn
|
|
181
|
+
self.gripper_close = gripper_close_fn
|
|
182
|
+
self.arm_down = arm_down_fn
|
|
183
|
+
self.arm_up = arm_up_fn
|
|
184
|
+
|
|
185
|
+
self.detector = detector or GreenBallDetector()
|
|
186
|
+
|
|
187
|
+
self.turn_speed = turn_speed
|
|
188
|
+
self.approach_speed = approach_speed
|
|
189
|
+
self.align_threshold = align_threshold
|
|
190
|
+
self.close_threshold = close_threshold
|
|
191
|
+
self.max_iterations = max_iterations
|
|
192
|
+
self.step_delay = step_delay
|
|
193
|
+
|
|
194
|
+
self.on_state_change = on_state_change
|
|
195
|
+
self.on_detection = on_detection
|
|
196
|
+
|
|
197
|
+
self.state = ServoState.SEARCHING
|
|
198
|
+
self.iterations = 0
|
|
199
|
+
self.history: List[TargetDetection] = []
|
|
200
|
+
|
|
201
|
+
def _set_state(self, new_state: ServoState):
|
|
202
|
+
"""Update state and notify callback."""
|
|
203
|
+
if new_state != self.state:
|
|
204
|
+
self.state = new_state
|
|
205
|
+
if self.on_state_change:
|
|
206
|
+
self.on_state_change(new_state)
|
|
207
|
+
|
|
208
|
+
def _detect(self) -> TargetDetection:
|
|
209
|
+
"""Capture image and detect target."""
|
|
210
|
+
image = self.capture()
|
|
211
|
+
detection = self.detector.detect(image)
|
|
212
|
+
|
|
213
|
+
self.history.append(detection)
|
|
214
|
+
if len(self.history) > 10:
|
|
215
|
+
self.history.pop(0)
|
|
216
|
+
|
|
217
|
+
if self.on_detection:
|
|
218
|
+
self.on_detection(detection, image)
|
|
219
|
+
|
|
220
|
+
return detection
|
|
221
|
+
|
|
222
|
+
def _is_detection_stable(self, min_frames: int = 3) -> bool:
|
|
223
|
+
"""Check if detection is stable across recent frames."""
|
|
224
|
+
if len(self.history) < min_frames:
|
|
225
|
+
return False
|
|
226
|
+
|
|
227
|
+
recent = self.history[-min_frames:]
|
|
228
|
+
if not all(d.found for d in recent):
|
|
229
|
+
return False
|
|
230
|
+
|
|
231
|
+
# Check position consistency
|
|
232
|
+
x_vals = [d.x for d in recent]
|
|
233
|
+
return max(x_vals) - min(x_vals) < 0.15
|
|
234
|
+
|
|
235
|
+
def pickup_target(self) -> bool:
|
|
236
|
+
"""
|
|
237
|
+
Execute full pickup behavior with visual servoing.
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
True if pickup succeeded, False otherwise
|
|
241
|
+
"""
|
|
242
|
+
print("\n" + "=" * 50)
|
|
243
|
+
print("VISUAL SERVOING PICKUP")
|
|
244
|
+
print("=" * 50)
|
|
245
|
+
|
|
246
|
+
self.iterations = 0
|
|
247
|
+
self.history = []
|
|
248
|
+
self._set_state(ServoState.SEARCHING)
|
|
249
|
+
|
|
250
|
+
try:
|
|
251
|
+
# Phase 1: Search for target
|
|
252
|
+
print("\n[1] Searching for target...")
|
|
253
|
+
if not self._search_phase():
|
|
254
|
+
return False
|
|
255
|
+
|
|
256
|
+
# Phase 2: Align to target
|
|
257
|
+
print("\n[2] Aligning to target...")
|
|
258
|
+
if not self._align_phase():
|
|
259
|
+
return False
|
|
260
|
+
|
|
261
|
+
# Phase 3: Approach target
|
|
262
|
+
print("\n[3] Approaching target...")
|
|
263
|
+
if not self._approach_phase():
|
|
264
|
+
return False
|
|
265
|
+
|
|
266
|
+
# Phase 4: Execute pickup
|
|
267
|
+
print("\n[4] Executing pickup...")
|
|
268
|
+
if not self._pickup_phase():
|
|
269
|
+
return False
|
|
270
|
+
|
|
271
|
+
self._set_state(ServoState.SUCCESS)
|
|
272
|
+
print("\n[SUCCESS] Pickup complete!")
|
|
273
|
+
return True
|
|
274
|
+
|
|
275
|
+
except KeyboardInterrupt:
|
|
276
|
+
print("\n[ABORTED] User interrupted")
|
|
277
|
+
self.stop()
|
|
278
|
+
self._set_state(ServoState.FAILED)
|
|
279
|
+
return False
|
|
280
|
+
|
|
281
|
+
except Exception as e:
|
|
282
|
+
print(f"\n[ERROR] {e}")
|
|
283
|
+
self.stop()
|
|
284
|
+
self._set_state(ServoState.FAILED)
|
|
285
|
+
return False
|
|
286
|
+
|
|
287
|
+
def _search_phase(self) -> bool:
|
|
288
|
+
"""Search for target by rotating."""
|
|
289
|
+
self._set_state(ServoState.SEARCHING)
|
|
290
|
+
|
|
291
|
+
# First check current view
|
|
292
|
+
detection = self._detect()
|
|
293
|
+
if detection.found:
|
|
294
|
+
print(f" Found immediately at x={detection.x:.2f}")
|
|
295
|
+
return True
|
|
296
|
+
|
|
297
|
+
# Rotate to search
|
|
298
|
+
search_steps = 12 # Full rotation in ~12 steps
|
|
299
|
+
for i in range(search_steps):
|
|
300
|
+
if self.iterations >= self.max_iterations:
|
|
301
|
+
print(" Max iterations reached")
|
|
302
|
+
self._set_state(ServoState.FAILED)
|
|
303
|
+
return False
|
|
304
|
+
|
|
305
|
+
print(f" Searching... (step {i+1}/{search_steps})")
|
|
306
|
+
|
|
307
|
+
# Turn a bit
|
|
308
|
+
self.move(0, self.turn_speed)
|
|
309
|
+
time.sleep(0.5)
|
|
310
|
+
self.stop()
|
|
311
|
+
time.sleep(0.2)
|
|
312
|
+
|
|
313
|
+
# Check for target
|
|
314
|
+
detection = self._detect()
|
|
315
|
+
self.iterations += 1
|
|
316
|
+
|
|
317
|
+
if detection.found:
|
|
318
|
+
print(f" Found at x={detection.x:.2f}")
|
|
319
|
+
return True
|
|
320
|
+
|
|
321
|
+
print(" Target not found after full rotation")
|
|
322
|
+
self._set_state(ServoState.FAILED)
|
|
323
|
+
return False
|
|
324
|
+
|
|
325
|
+
def _align_phase(self) -> bool:
|
|
326
|
+
"""Align robot to center target in view."""
|
|
327
|
+
self._set_state(ServoState.ALIGNING)
|
|
328
|
+
|
|
329
|
+
for i in range(20): # Max 20 alignment steps
|
|
330
|
+
if self.iterations >= self.max_iterations:
|
|
331
|
+
print(" Max iterations reached")
|
|
332
|
+
self._set_state(ServoState.FAILED)
|
|
333
|
+
return False
|
|
334
|
+
|
|
335
|
+
detection = self._detect()
|
|
336
|
+
self.iterations += 1
|
|
337
|
+
|
|
338
|
+
if not detection.found:
|
|
339
|
+
print(" Lost target during alignment!")
|
|
340
|
+
# Try to re-find
|
|
341
|
+
if not self._search_phase():
|
|
342
|
+
return False
|
|
343
|
+
continue
|
|
344
|
+
|
|
345
|
+
offset = detection.center_offset
|
|
346
|
+
|
|
347
|
+
if abs(offset) < self.align_threshold:
|
|
348
|
+
print(f" Aligned! (offset={offset:.3f})")
|
|
349
|
+
return True
|
|
350
|
+
|
|
351
|
+
# Turn toward target
|
|
352
|
+
# Negative offset = target on left = turn left (negative turn_vel)
|
|
353
|
+
turn_vel = -self.turn_speed if offset < 0 else self.turn_speed
|
|
354
|
+
|
|
355
|
+
# Proportional control - turn less when close to aligned
|
|
356
|
+
turn_vel *= min(1.0, abs(offset) * 3)
|
|
357
|
+
|
|
358
|
+
print(f" Offset={offset:.3f}, turning {'left' if turn_vel < 0 else 'right'}")
|
|
359
|
+
|
|
360
|
+
self.move(0, turn_vel)
|
|
361
|
+
time.sleep(0.3)
|
|
362
|
+
self.stop()
|
|
363
|
+
time.sleep(0.1)
|
|
364
|
+
|
|
365
|
+
print(" Failed to align after 20 attempts")
|
|
366
|
+
self._set_state(ServoState.FAILED)
|
|
367
|
+
return False
|
|
368
|
+
|
|
369
|
+
def _approach_phase(self) -> bool:
|
|
370
|
+
"""Approach target until close enough."""
|
|
371
|
+
self._set_state(ServoState.APPROACHING)
|
|
372
|
+
|
|
373
|
+
for i in range(30): # Max 30 approach steps
|
|
374
|
+
if self.iterations >= self.max_iterations:
|
|
375
|
+
print(" Max iterations reached")
|
|
376
|
+
self._set_state(ServoState.FAILED)
|
|
377
|
+
return False
|
|
378
|
+
|
|
379
|
+
detection = self._detect()
|
|
380
|
+
self.iterations += 1
|
|
381
|
+
|
|
382
|
+
if not detection.found:
|
|
383
|
+
print(" Lost target during approach!")
|
|
384
|
+
# Stop and try to re-find
|
|
385
|
+
self.stop()
|
|
386
|
+
if not self._search_phase():
|
|
387
|
+
return False
|
|
388
|
+
if not self._align_phase():
|
|
389
|
+
return False
|
|
390
|
+
continue
|
|
391
|
+
|
|
392
|
+
# Check if close enough
|
|
393
|
+
if detection.size >= self.close_threshold:
|
|
394
|
+
print(f" Close enough! (size={detection.size:.3f})")
|
|
395
|
+
self.stop()
|
|
396
|
+
return True
|
|
397
|
+
|
|
398
|
+
# Check alignment and correct if needed
|
|
399
|
+
offset = detection.center_offset
|
|
400
|
+
turn_correction = 0
|
|
401
|
+
if abs(offset) > self.align_threshold:
|
|
402
|
+
turn_correction = -self.turn_speed * 0.5 if offset < 0 else self.turn_speed * 0.5
|
|
403
|
+
|
|
404
|
+
print(f" Size={detection.size:.3f}, offset={offset:.3f}, moving forward")
|
|
405
|
+
|
|
406
|
+
# Move forward with slight turn correction
|
|
407
|
+
self.move(self.approach_speed, turn_correction)
|
|
408
|
+
time.sleep(0.4)
|
|
409
|
+
self.stop()
|
|
410
|
+
time.sleep(0.1)
|
|
411
|
+
|
|
412
|
+
print(" Failed to reach target after 30 steps")
|
|
413
|
+
self._set_state(ServoState.FAILED)
|
|
414
|
+
return False
|
|
415
|
+
|
|
416
|
+
def _pickup_phase(self) -> bool:
|
|
417
|
+
"""Execute the pickup sequence."""
|
|
418
|
+
self._set_state(ServoState.GRABBING)
|
|
419
|
+
|
|
420
|
+
print(" Opening gripper...")
|
|
421
|
+
self.gripper_open()
|
|
422
|
+
time.sleep(0.4)
|
|
423
|
+
|
|
424
|
+
print(" Lowering arm...")
|
|
425
|
+
self.arm_down()
|
|
426
|
+
time.sleep(0.5)
|
|
427
|
+
|
|
428
|
+
print(" Closing gripper...")
|
|
429
|
+
self.gripper_close()
|
|
430
|
+
time.sleep(0.4)
|
|
431
|
+
|
|
432
|
+
print(" Lifting arm...")
|
|
433
|
+
self.arm_up()
|
|
434
|
+
time.sleep(0.4)
|
|
435
|
+
|
|
436
|
+
# Verify pickup by checking if ball still visible on ground
|
|
437
|
+
detection = self._detect()
|
|
438
|
+
|
|
439
|
+
if not detection.found or detection.size < 0.05:
|
|
440
|
+
print(" Ball no longer visible on ground - likely picked up!")
|
|
441
|
+
return True
|
|
442
|
+
else:
|
|
443
|
+
print(f" Ball still visible (size={detection.size:.3f}) - may have missed")
|
|
444
|
+
# Still return True as we completed the sequence
|
|
445
|
+
return True
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def create_mechdog_servo_controller(
|
|
449
|
+
serial_port: str = "/dev/cu.usbserial-10",
|
|
450
|
+
webcam_index: int = 0,
|
|
451
|
+
) -> Optional[VisualServoController]:
|
|
452
|
+
"""
|
|
453
|
+
Create a visual servo controller for MechDog.
|
|
454
|
+
|
|
455
|
+
Args:
|
|
456
|
+
serial_port: Serial port for MechDog
|
|
457
|
+
webcam_index: Webcam device index
|
|
458
|
+
|
|
459
|
+
Returns:
|
|
460
|
+
Configured VisualServoController, or None if setup fails
|
|
461
|
+
"""
|
|
462
|
+
if not HAS_CV:
|
|
463
|
+
print("OpenCV not available. Install with: pip install opencv-python")
|
|
464
|
+
return None
|
|
465
|
+
|
|
466
|
+
try:
|
|
467
|
+
import serial
|
|
468
|
+
except ImportError:
|
|
469
|
+
print("pyserial not available. Install with: pip install pyserial")
|
|
470
|
+
return None
|
|
471
|
+
|
|
472
|
+
# Setup serial connection
|
|
473
|
+
try:
|
|
474
|
+
ser = serial.Serial(serial_port, 115200, timeout=1)
|
|
475
|
+
time.sleep(0.5)
|
|
476
|
+
ser.read(ser.in_waiting)
|
|
477
|
+
|
|
478
|
+
def cmd(command: str, wait: float = 0.3):
|
|
479
|
+
ser.write(f"{command}\r\n".encode())
|
|
480
|
+
time.sleep(wait)
|
|
481
|
+
return ser.read(ser.in_waiting).decode('utf-8', errors='ignore')
|
|
482
|
+
|
|
483
|
+
# Initialize MechDog
|
|
484
|
+
cmd("from HW_MechDog import MechDog", 1.5)
|
|
485
|
+
cmd("dog = MechDog()", 1.5)
|
|
486
|
+
print(f"Connected to MechDog on {serial_port}")
|
|
487
|
+
|
|
488
|
+
except Exception as e:
|
|
489
|
+
print(f"Failed to connect to MechDog: {e}")
|
|
490
|
+
return None
|
|
491
|
+
|
|
492
|
+
# Setup webcam
|
|
493
|
+
cap = cv2.VideoCapture(webcam_index)
|
|
494
|
+
if not cap.isOpened():
|
|
495
|
+
print(f"Failed to open webcam {webcam_index}")
|
|
496
|
+
ser.close()
|
|
497
|
+
return None
|
|
498
|
+
|
|
499
|
+
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
|
|
500
|
+
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
|
|
501
|
+
print(f"Webcam {webcam_index} opened")
|
|
502
|
+
|
|
503
|
+
# Create wrapper functions
|
|
504
|
+
def capture() -> np.ndarray:
|
|
505
|
+
# Warm up frames
|
|
506
|
+
for _ in range(2):
|
|
507
|
+
cap.read()
|
|
508
|
+
ret, frame = cap.read()
|
|
509
|
+
return frame if ret else np.array([])
|
|
510
|
+
|
|
511
|
+
def move(forward: float, turn: float):
|
|
512
|
+
cmd(f"dog.move({int(forward)}, {int(turn)})", 0.2)
|
|
513
|
+
|
|
514
|
+
def stop():
|
|
515
|
+
cmd("dog.move(0, 0)", 0.2)
|
|
516
|
+
|
|
517
|
+
def gripper_open():
|
|
518
|
+
cmd("dog.set_servo(11, 500, 400)", 0.5)
|
|
519
|
+
|
|
520
|
+
def gripper_close():
|
|
521
|
+
cmd("dog.set_servo(11, 2400, 400)", 0.5)
|
|
522
|
+
|
|
523
|
+
def arm_down():
|
|
524
|
+
# CORRECTED 2026-01-03: Vision analysis confirmed servos 8,9,11 are arm
|
|
525
|
+
cmd("dog.set_servo(8, 800, 600)", 0.7) # Shoulder down
|
|
526
|
+
cmd("dog.set_servo(9, 800, 600)", 0.7) # Elbow extended
|
|
527
|
+
|
|
528
|
+
def arm_up():
|
|
529
|
+
# CORRECTED 2026-01-03: Vision analysis confirmed servos 8,9,11 are arm
|
|
530
|
+
cmd("dog.set_servo(9, 2000, 600)", 0.7) # Elbow retracted first
|
|
531
|
+
cmd("dog.set_servo(8, 2200, 600)", 0.7) # Then shoulder up
|
|
532
|
+
|
|
533
|
+
# State change callback
|
|
534
|
+
def on_state(state: ServoState):
|
|
535
|
+
print(f" [STATE] {state.value}")
|
|
536
|
+
|
|
537
|
+
# Detection callback (save images for debugging)
|
|
538
|
+
frame_count = [0]
|
|
539
|
+
def on_detection(det: TargetDetection, image: np.ndarray):
|
|
540
|
+
# Save every 5th frame
|
|
541
|
+
if frame_count[0] % 5 == 0 and image is not None and image.size > 0:
|
|
542
|
+
path = f"/tmp/servo_{frame_count[0]:03d}.jpg"
|
|
543
|
+
cv2.imwrite(path, image)
|
|
544
|
+
frame_count[0] += 1
|
|
545
|
+
|
|
546
|
+
return VisualServoController(
|
|
547
|
+
capture_fn=capture,
|
|
548
|
+
move_fn=move,
|
|
549
|
+
stop_fn=stop,
|
|
550
|
+
gripper_open_fn=gripper_open,
|
|
551
|
+
gripper_close_fn=gripper_close,
|
|
552
|
+
arm_down_fn=arm_down,
|
|
553
|
+
arm_up_fn=arm_up,
|
|
554
|
+
on_state_change=on_state,
|
|
555
|
+
on_detection=on_detection,
|
|
556
|
+
)
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
if __name__ == "__main__":
|
|
560
|
+
print("Visual Servoing Demo")
|
|
561
|
+
print("=" * 50)
|
|
562
|
+
|
|
563
|
+
controller = create_mechdog_servo_controller()
|
|
564
|
+
|
|
565
|
+
if controller:
|
|
566
|
+
success = controller.pickup_target()
|
|
567
|
+
print(f"\nResult: {'SUCCESS' if success else 'FAILED'}")
|
|
568
|
+
print(f"Total iterations: {controller.iterations}")
|
|
569
|
+
else:
|
|
570
|
+
print("Failed to create controller")
|