foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +100 -0
- ate/behaviors/approach.py +399 -0
- ate/behaviors/common.py +686 -0
- ate/behaviors/tree.py +454 -0
- ate/cli.py +855 -3995
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +39 -0
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +942 -0
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +187 -0
- ate/interfaces/base.py +273 -0
- ate/interfaces/body.py +267 -0
- ate/interfaces/detection.py +282 -0
- ate/interfaces/locomotion.py +422 -0
- ate/interfaces/manipulation.py +408 -0
- ate/interfaces/navigation.py +389 -0
- ate/interfaces/perception.py +362 -0
- ate/interfaces/sensors.py +247 -0
- ate/interfaces/types.py +371 -0
- ate/llm_proxy.py +239 -0
- ate/mcp_server.py +387 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +83 -0
- ate/recording/demonstration.py +378 -0
- ate/recording/session.py +415 -0
- ate/recording/upload.py +304 -0
- ate/recording/visual.py +416 -0
- ate/recording/wrapper.py +95 -0
- ate/robot/__init__.py +221 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +668 -0
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +3735 -0
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +441 -0
- ate/robot/introspection.py +330 -0
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/manager.py +270 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +281 -0
- ate/robot/registry.py +322 -0
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +675 -0
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +1048 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,477 @@
|
|
|
1
|
+
"""
|
|
2
|
+
WiFi Camera driver for network-connected cameras.
|
|
3
|
+
|
|
4
|
+
Supports common protocols:
|
|
5
|
+
- MJPEG stream over HTTP (ESP32-CAM, many IP cameras)
|
|
6
|
+
- Snapshot endpoint (REST API)
|
|
7
|
+
- RTSP stream (professional IP cameras)
|
|
8
|
+
|
|
9
|
+
Hiwonder MechDog visual module typically uses ESP32-CAM
|
|
10
|
+
which serves MJPEG at http://<ip>:81/stream
|
|
11
|
+
and snapshots at http://<ip>/capture
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import io
|
|
15
|
+
import time
|
|
16
|
+
import threading
|
|
17
|
+
from typing import Optional, Tuple, Callable, Dict, Any
|
|
18
|
+
from dataclasses import dataclass
|
|
19
|
+
import requests
|
|
20
|
+
from urllib.parse import urljoin
|
|
21
|
+
|
|
22
|
+
from ..interfaces import (
|
|
23
|
+
CameraInterface,
|
|
24
|
+
Image,
|
|
25
|
+
ActionResult,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class WiFiCameraConfig:
|
|
31
|
+
"""Configuration for WiFi camera."""
|
|
32
|
+
ip: str # Camera IP address
|
|
33
|
+
port: int = 80 # HTTP port
|
|
34
|
+
stream_port: int = 81 # MJPEG stream port (ESP32-CAM uses 81)
|
|
35
|
+
snapshot_path: str = "/capture" # Snapshot endpoint
|
|
36
|
+
stream_path: str = "/stream" # MJPEG stream endpoint
|
|
37
|
+
username: str = "" # Auth username (if required)
|
|
38
|
+
password: str = "" # Auth password (if required)
|
|
39
|
+
timeout: float = 5.0 # Request timeout
|
|
40
|
+
protocol: str = "esp32cam" # Protocol: esp32cam, generic, rtsp
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class WiFiCamera(CameraInterface):
|
|
44
|
+
"""
|
|
45
|
+
WiFi camera implementation.
|
|
46
|
+
|
|
47
|
+
Connects to network cameras over HTTP.
|
|
48
|
+
Tested with ESP32-CAM (common in hobby robots like MechDog).
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(self, config: WiFiCameraConfig):
|
|
52
|
+
"""
|
|
53
|
+
Initialize WiFi camera.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
config: Camera configuration
|
|
57
|
+
"""
|
|
58
|
+
self.config = config
|
|
59
|
+
self._base_url = f"http://{config.ip}:{config.port}"
|
|
60
|
+
self._stream_url = f"http://{config.ip}:{config.stream_port}{config.stream_path}"
|
|
61
|
+
self._snapshot_url = f"{self._base_url}{config.snapshot_path}"
|
|
62
|
+
|
|
63
|
+
self._session = requests.Session()
|
|
64
|
+
if config.username and config.password:
|
|
65
|
+
self._session.auth = (config.username, config.password)
|
|
66
|
+
|
|
67
|
+
self._streaming = False
|
|
68
|
+
self._stream_thread: Optional[threading.Thread] = None
|
|
69
|
+
self._stream_callback: Optional[Callable[[Image], None]] = None
|
|
70
|
+
self._last_frame: Optional[Image] = None
|
|
71
|
+
self._frame_count = 0
|
|
72
|
+
self._fps = 0.0
|
|
73
|
+
self._resolution = (640, 480) # Default, updated on first capture
|
|
74
|
+
|
|
75
|
+
# Intrinsics (approximate for typical ESP32-CAM with OV2640)
|
|
76
|
+
self._intrinsics = {
|
|
77
|
+
"fx": 500.0,
|
|
78
|
+
"fy": 500.0,
|
|
79
|
+
"cx": 320.0,
|
|
80
|
+
"cy": 240.0,
|
|
81
|
+
"distortion": [0.0, 0.0, 0.0, 0.0, 0.0],
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
def get_image(self) -> Image:
|
|
85
|
+
"""
|
|
86
|
+
Capture current frame from camera.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
Image with RGB data
|
|
90
|
+
"""
|
|
91
|
+
try:
|
|
92
|
+
response = self._session.get(
|
|
93
|
+
self._snapshot_url,
|
|
94
|
+
timeout=self.config.timeout,
|
|
95
|
+
stream=True
|
|
96
|
+
)
|
|
97
|
+
response.raise_for_status()
|
|
98
|
+
|
|
99
|
+
# Read image data
|
|
100
|
+
image_data = response.content
|
|
101
|
+
|
|
102
|
+
# Try to decode with PIL if available
|
|
103
|
+
try:
|
|
104
|
+
from PIL import Image as PILImage
|
|
105
|
+
pil_image = PILImage.open(io.BytesIO(image_data))
|
|
106
|
+
width, height = pil_image.size
|
|
107
|
+
self._resolution = (width, height)
|
|
108
|
+
|
|
109
|
+
# Convert to RGB if needed
|
|
110
|
+
if pil_image.mode != "RGB":
|
|
111
|
+
pil_image = pil_image.convert("RGB")
|
|
112
|
+
|
|
113
|
+
# Get raw bytes
|
|
114
|
+
rgb_data = pil_image.tobytes()
|
|
115
|
+
|
|
116
|
+
return Image(
|
|
117
|
+
data=rgb_data,
|
|
118
|
+
width=width,
|
|
119
|
+
height=height,
|
|
120
|
+
encoding="rgb8",
|
|
121
|
+
timestamp=time.time(),
|
|
122
|
+
)
|
|
123
|
+
except ImportError:
|
|
124
|
+
# Return raw JPEG if PIL not available
|
|
125
|
+
return Image(
|
|
126
|
+
data=image_data,
|
|
127
|
+
width=self._resolution[0],
|
|
128
|
+
height=self._resolution[1],
|
|
129
|
+
encoding="jpeg",
|
|
130
|
+
timestamp=time.time(),
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
except requests.RequestException as e:
|
|
134
|
+
# Return empty image on error
|
|
135
|
+
return Image(
|
|
136
|
+
data=b"",
|
|
137
|
+
width=0,
|
|
138
|
+
height=0,
|
|
139
|
+
encoding="error",
|
|
140
|
+
timestamp=time.time(),
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
def get_resolution(self) -> Tuple[int, int]:
|
|
144
|
+
"""Get camera resolution."""
|
|
145
|
+
return self._resolution
|
|
146
|
+
|
|
147
|
+
def get_intrinsics(self) -> dict:
|
|
148
|
+
"""Get camera intrinsic parameters."""
|
|
149
|
+
return self._intrinsics.copy()
|
|
150
|
+
|
|
151
|
+
def get_frame_id(self) -> str:
|
|
152
|
+
"""Get coordinate frame ID."""
|
|
153
|
+
return "camera_link"
|
|
154
|
+
|
|
155
|
+
def set_resolution(self, width: int, height: int) -> ActionResult:
|
|
156
|
+
"""
|
|
157
|
+
Set camera resolution.
|
|
158
|
+
|
|
159
|
+
ESP32-CAM supports: QQVGA(160x120), QVGA(320x240),
|
|
160
|
+
CIF(400x296), VGA(640x480), SVGA(800x600), XGA(1024x768),
|
|
161
|
+
SXGA(1280x1024), UXGA(1600x1200)
|
|
162
|
+
"""
|
|
163
|
+
# Map resolution to ESP32-CAM framesize
|
|
164
|
+
resolution_map = {
|
|
165
|
+
(160, 120): 0, # QQVGA
|
|
166
|
+
(320, 240): 5, # QVGA
|
|
167
|
+
(640, 480): 8, # VGA
|
|
168
|
+
(800, 600): 9, # SVGA
|
|
169
|
+
(1024, 768): 10, # XGA
|
|
170
|
+
(1280, 1024): 11, # SXGA
|
|
171
|
+
(1600, 1200): 13, # UXGA
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
framesize = resolution_map.get((width, height))
|
|
175
|
+
if framesize is None:
|
|
176
|
+
return ActionResult.error(f"Unsupported resolution: {width}x{height}")
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
# ESP32-CAM control endpoint
|
|
180
|
+
url = f"{self._base_url}/control?var=framesize&val={framesize}"
|
|
181
|
+
response = self._session.get(url, timeout=self.config.timeout)
|
|
182
|
+
response.raise_for_status()
|
|
183
|
+
self._resolution = (width, height)
|
|
184
|
+
return ActionResult.success(f"Resolution set to {width}x{height}")
|
|
185
|
+
except requests.RequestException as e:
|
|
186
|
+
return ActionResult.error(f"Failed to set resolution: {e}")
|
|
187
|
+
|
|
188
|
+
def set_exposure(self, exposure_ms: float) -> ActionResult:
|
|
189
|
+
"""Set exposure time (ESP32-CAM auto-exposure control)."""
|
|
190
|
+
try:
|
|
191
|
+
# Enable/disable auto exposure
|
|
192
|
+
# aec: 0=manual, 1=auto
|
|
193
|
+
# aec_value: exposure value (0-1200)
|
|
194
|
+
aec_value = int(exposure_ms * 10) # Approximate mapping
|
|
195
|
+
aec_value = max(0, min(1200, aec_value))
|
|
196
|
+
|
|
197
|
+
# Disable auto exposure
|
|
198
|
+
self._session.get(
|
|
199
|
+
f"{self._base_url}/control?var=aec&val=0",
|
|
200
|
+
timeout=self.config.timeout
|
|
201
|
+
)
|
|
202
|
+
# Set exposure value
|
|
203
|
+
response = self._session.get(
|
|
204
|
+
f"{self._base_url}/control?var=aec_value&val={aec_value}",
|
|
205
|
+
timeout=self.config.timeout
|
|
206
|
+
)
|
|
207
|
+
response.raise_for_status()
|
|
208
|
+
return ActionResult.success(f"Exposure set to {exposure_ms}ms")
|
|
209
|
+
except requests.RequestException as e:
|
|
210
|
+
return ActionResult.error(f"Failed to set exposure: {e}")
|
|
211
|
+
|
|
212
|
+
# =========================================================================
|
|
213
|
+
# Streaming
|
|
214
|
+
# =========================================================================
|
|
215
|
+
|
|
216
|
+
def start_streaming(self, callback: Callable[[Image], None]) -> ActionResult:
|
|
217
|
+
"""Start continuous image streaming."""
|
|
218
|
+
if self._streaming:
|
|
219
|
+
return ActionResult.error("Already streaming")
|
|
220
|
+
|
|
221
|
+
self._stream_callback = callback
|
|
222
|
+
self._streaming = True
|
|
223
|
+
self._stream_thread = threading.Thread(target=self._stream_loop, daemon=True)
|
|
224
|
+
self._stream_thread.start()
|
|
225
|
+
|
|
226
|
+
return ActionResult.success("Streaming started")
|
|
227
|
+
|
|
228
|
+
def stop_streaming(self) -> ActionResult:
|
|
229
|
+
"""Stop image streaming."""
|
|
230
|
+
if not self._streaming:
|
|
231
|
+
return ActionResult.error("Not streaming")
|
|
232
|
+
|
|
233
|
+
self._streaming = False
|
|
234
|
+
if self._stream_thread:
|
|
235
|
+
self._stream_thread.join(timeout=2.0)
|
|
236
|
+
self._stream_thread = None
|
|
237
|
+
|
|
238
|
+
return ActionResult.success("Streaming stopped")
|
|
239
|
+
|
|
240
|
+
def _stream_loop(self):
|
|
241
|
+
"""Background thread for MJPEG streaming."""
|
|
242
|
+
try:
|
|
243
|
+
response = self._session.get(
|
|
244
|
+
self._stream_url,
|
|
245
|
+
stream=True,
|
|
246
|
+
timeout=self.config.timeout
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
if response.status_code != 200:
|
|
250
|
+
return
|
|
251
|
+
|
|
252
|
+
bytes_buffer = b""
|
|
253
|
+
frame_start = time.time()
|
|
254
|
+
frame_count = 0
|
|
255
|
+
|
|
256
|
+
for chunk in response.iter_content(chunk_size=1024):
|
|
257
|
+
if not self._streaming:
|
|
258
|
+
break
|
|
259
|
+
|
|
260
|
+
bytes_buffer += chunk
|
|
261
|
+
|
|
262
|
+
# Find JPEG start and end markers
|
|
263
|
+
start = bytes_buffer.find(b"\xff\xd8") # JPEG start
|
|
264
|
+
end = bytes_buffer.find(b"\xff\xd9") # JPEG end
|
|
265
|
+
|
|
266
|
+
if start != -1 and end != -1 and end > start:
|
|
267
|
+
# Extract JPEG frame
|
|
268
|
+
jpeg_data = bytes_buffer[start:end + 2]
|
|
269
|
+
bytes_buffer = bytes_buffer[end + 2:]
|
|
270
|
+
|
|
271
|
+
# Decode frame
|
|
272
|
+
try:
|
|
273
|
+
from PIL import Image as PILImage
|
|
274
|
+
pil_image = PILImage.open(io.BytesIO(jpeg_data))
|
|
275
|
+
width, height = pil_image.size
|
|
276
|
+
|
|
277
|
+
if pil_image.mode != "RGB":
|
|
278
|
+
pil_image = pil_image.convert("RGB")
|
|
279
|
+
|
|
280
|
+
frame = Image(
|
|
281
|
+
data=pil_image.tobytes(),
|
|
282
|
+
width=width,
|
|
283
|
+
height=height,
|
|
284
|
+
format="rgb8",
|
|
285
|
+
encoding="raw",
|
|
286
|
+
timestamp=time.time(),
|
|
287
|
+
)
|
|
288
|
+
except ImportError:
|
|
289
|
+
frame = Image(
|
|
290
|
+
data=jpeg_data,
|
|
291
|
+
width=self._resolution[0],
|
|
292
|
+
height=self._resolution[1],
|
|
293
|
+
format="jpeg",
|
|
294
|
+
encoding="jpeg",
|
|
295
|
+
timestamp=time.time(),
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
self._last_frame = frame
|
|
299
|
+
frame_count += 1
|
|
300
|
+
|
|
301
|
+
# Calculate FPS
|
|
302
|
+
elapsed = time.time() - frame_start
|
|
303
|
+
if elapsed >= 1.0:
|
|
304
|
+
self._fps = frame_count / elapsed
|
|
305
|
+
frame_count = 0
|
|
306
|
+
frame_start = time.time()
|
|
307
|
+
|
|
308
|
+
# Call callback
|
|
309
|
+
if self._stream_callback:
|
|
310
|
+
self._stream_callback(frame)
|
|
311
|
+
|
|
312
|
+
except Exception as e:
|
|
313
|
+
print(f"Stream error: {e}")
|
|
314
|
+
finally:
|
|
315
|
+
self._streaming = False
|
|
316
|
+
|
|
317
|
+
def get_fps(self) -> float:
|
|
318
|
+
"""Get current frame rate."""
|
|
319
|
+
return self._fps
|
|
320
|
+
|
|
321
|
+
def get_last_frame(self) -> Optional[Image]:
|
|
322
|
+
"""Get the last captured frame (for streaming mode)."""
|
|
323
|
+
return self._last_frame
|
|
324
|
+
|
|
325
|
+
# =========================================================================
|
|
326
|
+
# Camera controls (ESP32-CAM specific)
|
|
327
|
+
# =========================================================================
|
|
328
|
+
|
|
329
|
+
def set_quality(self, quality: int) -> ActionResult:
|
|
330
|
+
"""
|
|
331
|
+
Set JPEG quality (4-63, lower is better quality).
|
|
332
|
+
|
|
333
|
+
Args:
|
|
334
|
+
quality: JPEG quality (4-63)
|
|
335
|
+
"""
|
|
336
|
+
quality = max(4, min(63, quality))
|
|
337
|
+
try:
|
|
338
|
+
response = self._session.get(
|
|
339
|
+
f"{self._base_url}/control?var=quality&val={quality}",
|
|
340
|
+
timeout=self.config.timeout
|
|
341
|
+
)
|
|
342
|
+
response.raise_for_status()
|
|
343
|
+
return ActionResult.success(f"Quality set to {quality}")
|
|
344
|
+
except requests.RequestException as e:
|
|
345
|
+
return ActionResult.error(f"Failed to set quality: {e}")
|
|
346
|
+
|
|
347
|
+
def set_brightness(self, brightness: int) -> ActionResult:
|
|
348
|
+
"""Set brightness (-2 to 2)."""
|
|
349
|
+
brightness = max(-2, min(2, brightness))
|
|
350
|
+
try:
|
|
351
|
+
response = self._session.get(
|
|
352
|
+
f"{self._base_url}/control?var=brightness&val={brightness}",
|
|
353
|
+
timeout=self.config.timeout
|
|
354
|
+
)
|
|
355
|
+
response.raise_for_status()
|
|
356
|
+
return ActionResult.success(f"Brightness set to {brightness}")
|
|
357
|
+
except requests.RequestException as e:
|
|
358
|
+
return ActionResult.error(f"Failed to set brightness: {e}")
|
|
359
|
+
|
|
360
|
+
def set_contrast(self, contrast: int) -> ActionResult:
|
|
361
|
+
"""Set contrast (-2 to 2)."""
|
|
362
|
+
contrast = max(-2, min(2, contrast))
|
|
363
|
+
try:
|
|
364
|
+
response = self._session.get(
|
|
365
|
+
f"{self._base_url}/control?var=contrast&val={contrast}",
|
|
366
|
+
timeout=self.config.timeout
|
|
367
|
+
)
|
|
368
|
+
response.raise_for_status()
|
|
369
|
+
return ActionResult.success(f"Contrast set to {contrast}")
|
|
370
|
+
except requests.RequestException as e:
|
|
371
|
+
return ActionResult.error(f"Failed to set contrast: {e}")
|
|
372
|
+
|
|
373
|
+
def flip_horizontal(self, flip: bool) -> ActionResult:
|
|
374
|
+
"""Flip image horizontally."""
|
|
375
|
+
try:
|
|
376
|
+
response = self._session.get(
|
|
377
|
+
f"{self._base_url}/control?var=hmirror&val={1 if flip else 0}",
|
|
378
|
+
timeout=self.config.timeout
|
|
379
|
+
)
|
|
380
|
+
response.raise_for_status()
|
|
381
|
+
return ActionResult.success(f"Horizontal flip: {flip}")
|
|
382
|
+
except requests.RequestException as e:
|
|
383
|
+
return ActionResult.error(f"Failed to flip: {e}")
|
|
384
|
+
|
|
385
|
+
def flip_vertical(self, flip: bool) -> ActionResult:
|
|
386
|
+
"""Flip image vertically."""
|
|
387
|
+
try:
|
|
388
|
+
response = self._session.get(
|
|
389
|
+
f"{self._base_url}/control?var=vflip&val={1 if flip else 0}",
|
|
390
|
+
timeout=self.config.timeout
|
|
391
|
+
)
|
|
392
|
+
response.raise_for_status()
|
|
393
|
+
return ActionResult.success(f"Vertical flip: {flip}")
|
|
394
|
+
except requests.RequestException as e:
|
|
395
|
+
return ActionResult.error(f"Failed to flip: {e}")
|
|
396
|
+
|
|
397
|
+
def enable_face_detection(self, enable: bool) -> ActionResult:
|
|
398
|
+
"""Enable/disable face detection (ESP32-CAM feature)."""
|
|
399
|
+
try:
|
|
400
|
+
response = self._session.get(
|
|
401
|
+
f"{self._base_url}/control?var=face_detect&val={1 if enable else 0}",
|
|
402
|
+
timeout=self.config.timeout
|
|
403
|
+
)
|
|
404
|
+
response.raise_for_status()
|
|
405
|
+
return ActionResult.success(f"Face detection: {enable}")
|
|
406
|
+
except requests.RequestException as e:
|
|
407
|
+
return ActionResult.error(f"Failed to set face detection: {e}")
|
|
408
|
+
|
|
409
|
+
# =========================================================================
|
|
410
|
+
# Connection management
|
|
411
|
+
# =========================================================================
|
|
412
|
+
|
|
413
|
+
def is_connected(self) -> bool:
|
|
414
|
+
"""Check if camera is reachable."""
|
|
415
|
+
try:
|
|
416
|
+
response = self._session.get(
|
|
417
|
+
f"{self._base_url}/status",
|
|
418
|
+
timeout=2.0
|
|
419
|
+
)
|
|
420
|
+
return response.status_code == 200
|
|
421
|
+
except requests.RequestException:
|
|
422
|
+
return False
|
|
423
|
+
|
|
424
|
+
def get_status(self) -> Dict[str, Any]:
|
|
425
|
+
"""Get camera status."""
|
|
426
|
+
try:
|
|
427
|
+
response = self._session.get(
|
|
428
|
+
f"{self._base_url}/status",
|
|
429
|
+
timeout=self.config.timeout
|
|
430
|
+
)
|
|
431
|
+
if response.status_code == 200:
|
|
432
|
+
return response.json()
|
|
433
|
+
except Exception:
|
|
434
|
+
pass
|
|
435
|
+
return {"connected": False}
|
|
436
|
+
|
|
437
|
+
def __repr__(self) -> str:
|
|
438
|
+
return f"WiFiCamera({self.config.ip}:{self.config.port})"
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
def discover_cameras(subnet: str = "192.168.1", timeout: float = 1.0) -> list:
|
|
442
|
+
"""
|
|
443
|
+
Discover cameras on the local network.
|
|
444
|
+
|
|
445
|
+
Args:
|
|
446
|
+
subnet: Subnet to scan (e.g., "192.168.1")
|
|
447
|
+
timeout: Timeout per IP
|
|
448
|
+
|
|
449
|
+
Returns:
|
|
450
|
+
List of discovered camera IPs
|
|
451
|
+
"""
|
|
452
|
+
import concurrent.futures
|
|
453
|
+
|
|
454
|
+
cameras = []
|
|
455
|
+
|
|
456
|
+
def check_ip(ip):
|
|
457
|
+
try:
|
|
458
|
+
response = requests.get(
|
|
459
|
+
f"http://{ip}/status",
|
|
460
|
+
timeout=timeout
|
|
461
|
+
)
|
|
462
|
+
if response.status_code == 200:
|
|
463
|
+
return ip
|
|
464
|
+
except:
|
|
465
|
+
pass
|
|
466
|
+
return None
|
|
467
|
+
|
|
468
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
|
|
469
|
+
ips = [f"{subnet}.{i}" for i in range(1, 255)]
|
|
470
|
+
futures = {executor.submit(check_ip, ip): ip for ip in ips}
|
|
471
|
+
|
|
472
|
+
for future in concurrent.futures.as_completed(futures):
|
|
473
|
+
result = future.result()
|
|
474
|
+
if result:
|
|
475
|
+
cameras.append(result)
|
|
476
|
+
|
|
477
|
+
return cameras
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
"""
|
|
2
|
+
FoodforThought Robot Interfaces
|
|
3
|
+
|
|
4
|
+
Abstract interfaces that define capabilities for any robot.
|
|
5
|
+
Implementations are hardware-specific, but skills are written against these interfaces.
|
|
6
|
+
|
|
7
|
+
Architecture:
|
|
8
|
+
RobotInterface (base)
|
|
9
|
+
├── Capabilities (mixins)
|
|
10
|
+
│ ├── Locomotion
|
|
11
|
+
│ │ ├── QuadrupedLocomotion
|
|
12
|
+
│ │ ├── BipedLocomotion
|
|
13
|
+
│ │ ├── WheeledLocomotion
|
|
14
|
+
│ │ └── AerialLocomotion
|
|
15
|
+
│ ├── Manipulation
|
|
16
|
+
│ │ ├── ArmInterface
|
|
17
|
+
│ │ └── GripperInterface
|
|
18
|
+
│ ├── Perception
|
|
19
|
+
│ │ ├── CameraInterface
|
|
20
|
+
│ │ ├── DepthCameraInterface
|
|
21
|
+
│ │ ├── LidarInterface
|
|
22
|
+
│ │ └── IMUInterface
|
|
23
|
+
│ └── Body
|
|
24
|
+
│ └── BodyPoseInterface
|
|
25
|
+
└── Safety
|
|
26
|
+
└── SafetyInterface
|
|
27
|
+
|
|
28
|
+
Example:
|
|
29
|
+
# MechDog implements:
|
|
30
|
+
class MechDogDriver(QuadrupedLocomotion, BodyPoseInterface, SafetyInterface):
|
|
31
|
+
...
|
|
32
|
+
|
|
33
|
+
# Spot with arm implements:
|
|
34
|
+
class SpotDriver(QuadrupedLocomotion, BodyPoseInterface, ArmInterface,
|
|
35
|
+
GripperInterface, CameraInterface, LidarInterface, SafetyInterface):
|
|
36
|
+
...
|
|
37
|
+
|
|
38
|
+
# Skills are written against interfaces:
|
|
39
|
+
def pick_and_place(robot: QuadrupedLocomotion & GripperInterface, target: Vector3):
|
|
40
|
+
robot.walk_to(target)
|
|
41
|
+
robot.lower_body(height=0.1)
|
|
42
|
+
robot.grasp()
|
|
43
|
+
...
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
from .types import (
|
|
47
|
+
Vector3,
|
|
48
|
+
Quaternion,
|
|
49
|
+
Pose,
|
|
50
|
+
Twist,
|
|
51
|
+
JointState,
|
|
52
|
+
JointLimits,
|
|
53
|
+
Image,
|
|
54
|
+
DepthImage,
|
|
55
|
+
PointCloud,
|
|
56
|
+
IMUReading,
|
|
57
|
+
ForceTorqueReading,
|
|
58
|
+
BatteryState,
|
|
59
|
+
RobotStatus,
|
|
60
|
+
GaitType,
|
|
61
|
+
GripperState,
|
|
62
|
+
GripperStatus,
|
|
63
|
+
ActionResult,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
from .base import (
|
|
67
|
+
RobotInterface,
|
|
68
|
+
SafetyInterface,
|
|
69
|
+
Capability,
|
|
70
|
+
RobotInfo,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
from .locomotion import (
|
|
74
|
+
LocomotionInterface,
|
|
75
|
+
QuadrupedLocomotion,
|
|
76
|
+
BipedLocomotion,
|
|
77
|
+
WheeledLocomotion,
|
|
78
|
+
AerialLocomotion,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
from .manipulation import (
|
|
82
|
+
ArmInterface,
|
|
83
|
+
GripperInterface,
|
|
84
|
+
DualArmInterface,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
from .perception import (
|
|
88
|
+
CameraInterface,
|
|
89
|
+
DepthCameraInterface,
|
|
90
|
+
LidarInterface,
|
|
91
|
+
IMUInterface,
|
|
92
|
+
ForceTorqueInterface,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
from .body import (
|
|
96
|
+
BodyPoseInterface,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
from .detection import (
|
|
100
|
+
BoundingBox,
|
|
101
|
+
Detection,
|
|
102
|
+
DetectionResult,
|
|
103
|
+
ObjectDetectionInterface,
|
|
104
|
+
TrashDetectionInterface,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
from .navigation import (
|
|
108
|
+
NavigationState,
|
|
109
|
+
NavigationGoal,
|
|
110
|
+
NavigationStatus,
|
|
111
|
+
Waypoint,
|
|
112
|
+
NavigationInterface,
|
|
113
|
+
SimpleNavigationInterface,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
from .sensors import (
|
|
117
|
+
DistanceSensorType,
|
|
118
|
+
DistanceReading,
|
|
119
|
+
DistanceSensorInterface,
|
|
120
|
+
VisualDistanceEstimator,
|
|
121
|
+
ProximitySensorReading,
|
|
122
|
+
ProximitySensorInterface,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
__all__ = [
|
|
126
|
+
# Types
|
|
127
|
+
"Vector3",
|
|
128
|
+
"Quaternion",
|
|
129
|
+
"Pose",
|
|
130
|
+
"Twist",
|
|
131
|
+
"JointState",
|
|
132
|
+
"JointLimits",
|
|
133
|
+
"Image",
|
|
134
|
+
"DepthImage",
|
|
135
|
+
"PointCloud",
|
|
136
|
+
"IMUReading",
|
|
137
|
+
"ForceTorqueReading",
|
|
138
|
+
"BatteryState",
|
|
139
|
+
"RobotStatus",
|
|
140
|
+
"GaitType",
|
|
141
|
+
"GripperState",
|
|
142
|
+
"GripperStatus",
|
|
143
|
+
"ActionResult",
|
|
144
|
+
# Base
|
|
145
|
+
"RobotInterface",
|
|
146
|
+
"SafetyInterface",
|
|
147
|
+
"Capability",
|
|
148
|
+
"RobotInfo",
|
|
149
|
+
# Locomotion
|
|
150
|
+
"LocomotionInterface",
|
|
151
|
+
"QuadrupedLocomotion",
|
|
152
|
+
"BipedLocomotion",
|
|
153
|
+
"WheeledLocomotion",
|
|
154
|
+
"AerialLocomotion",
|
|
155
|
+
# Manipulation
|
|
156
|
+
"ArmInterface",
|
|
157
|
+
"GripperInterface",
|
|
158
|
+
"DualArmInterface",
|
|
159
|
+
# Perception
|
|
160
|
+
"CameraInterface",
|
|
161
|
+
"DepthCameraInterface",
|
|
162
|
+
"LidarInterface",
|
|
163
|
+
"IMUInterface",
|
|
164
|
+
"ForceTorqueInterface",
|
|
165
|
+
# Body
|
|
166
|
+
"BodyPoseInterface",
|
|
167
|
+
# Detection (higher-level)
|
|
168
|
+
"BoundingBox",
|
|
169
|
+
"Detection",
|
|
170
|
+
"DetectionResult",
|
|
171
|
+
"ObjectDetectionInterface",
|
|
172
|
+
"TrashDetectionInterface",
|
|
173
|
+
# Navigation (higher-level)
|
|
174
|
+
"NavigationState",
|
|
175
|
+
"NavigationGoal",
|
|
176
|
+
"NavigationStatus",
|
|
177
|
+
"Waypoint",
|
|
178
|
+
"NavigationInterface",
|
|
179
|
+
"SimpleNavigationInterface",
|
|
180
|
+
# Sensors (distance/proximity)
|
|
181
|
+
"DistanceSensorType",
|
|
182
|
+
"DistanceReading",
|
|
183
|
+
"DistanceSensorInterface",
|
|
184
|
+
"VisualDistanceEstimator",
|
|
185
|
+
"ProximitySensorReading",
|
|
186
|
+
"ProximitySensorInterface",
|
|
187
|
+
]
|