foodforthought-cli 0.2.8__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +12 -0
- ate/behaviors/approach.py +399 -0
- ate/cli.py +855 -4551
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +399 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +18 -6
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +360 -24
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +16 -0
- ate/interfaces/base.py +2 -0
- ate/interfaces/sensors.py +247 -0
- ate/llm_proxy.py +239 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +42 -3
- ate/recording/session.py +12 -2
- ate/recording/visual.py +416 -0
- ate/robot/__init__.py +142 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +88 -3
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +143 -11
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +104 -2
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +6 -0
- ate/robot/registry.py +5 -2
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +285 -3
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +9 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +1 -1
- foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.8.dist-info/RECORD +0 -73
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Trash detection for cleanup robots.
|
|
3
|
+
|
|
4
|
+
Combines multiple detection strategies:
|
|
5
|
+
1. Color detection (bottles, cans, wrappers)
|
|
6
|
+
2. Shape heuristics (round objects, rectangular objects)
|
|
7
|
+
3. Location heuristics (ground-level objects)
|
|
8
|
+
|
|
9
|
+
This is a practical starting point that works without ML models.
|
|
10
|
+
For production, integrate with YOLO or similar.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
from typing import List, Optional, Any, Dict, Set
|
|
15
|
+
import time
|
|
16
|
+
|
|
17
|
+
from .base import DetectorBase, Detection, BoundingBox
|
|
18
|
+
from .color_detector import ColorDetector, ColorRange, PREDEFINED_COLORS, HAS_PIL
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Trash-specific color ranges
|
|
22
|
+
TRASH_COLORS: Dict[str, ColorRange] = {
|
|
23
|
+
# Plastic bottles (often blue/green/clear)
|
|
24
|
+
"plastic_blue": ColorRange("plastic", 90, 120, 30, 200, 100, 255),
|
|
25
|
+
"plastic_green": ColorRange("plastic", 40, 80, 30, 200, 100, 255),
|
|
26
|
+
|
|
27
|
+
# Metal cans (silver/gray with reflections)
|
|
28
|
+
"metal": ColorRange("metal", 0, 179, 0, 50, 100, 220),
|
|
29
|
+
|
|
30
|
+
# Wrappers/packaging (bright colors)
|
|
31
|
+
"wrapper_red": PREDEFINED_COLORS["red"],
|
|
32
|
+
"wrapper_yellow": PREDEFINED_COLORS["yellow"],
|
|
33
|
+
"wrapper_orange": PREDEFINED_COLORS["orange"],
|
|
34
|
+
|
|
35
|
+
# Paper/cardboard (brown/tan)
|
|
36
|
+
"cardboard": ColorRange("cardboard", 10, 25, 30, 150, 80, 200),
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class TrashItem:
|
|
42
|
+
"""A detected piece of trash."""
|
|
43
|
+
detection: Detection
|
|
44
|
+
trash_type: str # bottle, can, wrapper, paper, unknown
|
|
45
|
+
confidence: float # Combined confidence
|
|
46
|
+
is_on_ground: bool # Likely on ground level
|
|
47
|
+
size_category: str # small, medium, large
|
|
48
|
+
priority: float # Pickup priority (0-1)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class TrashDetector(DetectorBase):
|
|
52
|
+
"""
|
|
53
|
+
Specialized detector for trash/litter.
|
|
54
|
+
|
|
55
|
+
Designed for cleanup robots - prioritizes:
|
|
56
|
+
- Ground-level objects
|
|
57
|
+
- Common litter types (bottles, cans, wrappers)
|
|
58
|
+
- Pickable size objects
|
|
59
|
+
|
|
60
|
+
Example:
|
|
61
|
+
detector = TrashDetector()
|
|
62
|
+
|
|
63
|
+
# Detect all trash
|
|
64
|
+
items = detector.detect_trash(image)
|
|
65
|
+
|
|
66
|
+
# Find nearest pickable trash
|
|
67
|
+
nearest = detector.find_pickable(image)
|
|
68
|
+
if nearest:
|
|
69
|
+
print(f"Found {nearest.trash_type} at {nearest.detection.center}")
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def __init__(
|
|
73
|
+
self,
|
|
74
|
+
min_area: int = 300,
|
|
75
|
+
max_area: int = 50000,
|
|
76
|
+
ground_threshold: float = 0.6, # Bottom 60% of image
|
|
77
|
+
confidence_threshold: float = 0.3,
|
|
78
|
+
):
|
|
79
|
+
"""
|
|
80
|
+
Initialize trash detector.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
min_area: Minimum object area in pixels
|
|
84
|
+
max_area: Maximum object area (too big = not trash)
|
|
85
|
+
ground_threshold: Fraction of image height considered "ground"
|
|
86
|
+
confidence_threshold: Minimum confidence to report
|
|
87
|
+
"""
|
|
88
|
+
if not HAS_PIL:
|
|
89
|
+
raise ImportError(
|
|
90
|
+
"TrashDetector requires Pillow. Install with: pip install 'foodforthought-cli[detection]'"
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
self.min_area = min_area
|
|
94
|
+
self.max_area = max_area
|
|
95
|
+
self.ground_threshold = ground_threshold
|
|
96
|
+
self.confidence_threshold = confidence_threshold
|
|
97
|
+
|
|
98
|
+
# Color detector for actual detection
|
|
99
|
+
self._color_detector = ColorDetector(
|
|
100
|
+
min_area=min_area,
|
|
101
|
+
max_area=max_area,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
def detect(self, image: Any, **kwargs) -> List[Detection]:
|
|
105
|
+
"""Detect all colored objects (for base interface)."""
|
|
106
|
+
return self._color_detector.detect(image, **kwargs)
|
|
107
|
+
|
|
108
|
+
def detect_class(self, image: Any, class_name: str, **kwargs) -> List[Detection]:
|
|
109
|
+
"""Detect objects of a specific class."""
|
|
110
|
+
if class_name == "trash":
|
|
111
|
+
items = self.detect_trash(image)
|
|
112
|
+
return [item.detection for item in items]
|
|
113
|
+
return self._color_detector.detect_class(image, class_name, **kwargs)
|
|
114
|
+
|
|
115
|
+
def detect_trash(self, image: Any) -> List[TrashItem]:
|
|
116
|
+
"""
|
|
117
|
+
Detect trash items in image.
|
|
118
|
+
|
|
119
|
+
Returns list of TrashItem with classification and priority.
|
|
120
|
+
"""
|
|
121
|
+
# Get image dimensions
|
|
122
|
+
width, height = self._get_dimensions(image)
|
|
123
|
+
if width == 0:
|
|
124
|
+
return []
|
|
125
|
+
|
|
126
|
+
# Detect with trash-specific colors
|
|
127
|
+
color_ranges = list(TRASH_COLORS.values())
|
|
128
|
+
detections = self._color_detector.detect(image, color_ranges=color_ranges)
|
|
129
|
+
|
|
130
|
+
# Convert to TrashItems with classification
|
|
131
|
+
items = []
|
|
132
|
+
for det in detections:
|
|
133
|
+
item = self._classify_trash(det, width, height)
|
|
134
|
+
if item.confidence >= self.confidence_threshold:
|
|
135
|
+
items.append(item)
|
|
136
|
+
|
|
137
|
+
# Sort by priority (highest first)
|
|
138
|
+
items.sort(key=lambda x: x.priority, reverse=True)
|
|
139
|
+
|
|
140
|
+
return items
|
|
141
|
+
|
|
142
|
+
def find_pickable(
|
|
143
|
+
self,
|
|
144
|
+
image: Any,
|
|
145
|
+
reference_point: tuple = None,
|
|
146
|
+
max_size: str = "large",
|
|
147
|
+
) -> Optional[TrashItem]:
|
|
148
|
+
"""
|
|
149
|
+
Find the best trash item to pick up.
|
|
150
|
+
|
|
151
|
+
Considers:
|
|
152
|
+
- Distance from reference point (or image center)
|
|
153
|
+
- Size (pickable range)
|
|
154
|
+
- Priority
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
image: Input image
|
|
158
|
+
reference_point: Reference point (default: bottom center)
|
|
159
|
+
max_size: Maximum size category to consider
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
Best TrashItem to pick, or None
|
|
163
|
+
"""
|
|
164
|
+
items = self.detect_trash(image)
|
|
165
|
+
if not items:
|
|
166
|
+
return None
|
|
167
|
+
|
|
168
|
+
# Filter by size
|
|
169
|
+
size_order = ["small", "medium", "large"]
|
|
170
|
+
max_idx = size_order.index(max_size) if max_size in size_order else 2
|
|
171
|
+
items = [i for i in items if size_order.index(i.size_category) <= max_idx]
|
|
172
|
+
|
|
173
|
+
if not items:
|
|
174
|
+
return None
|
|
175
|
+
|
|
176
|
+
# Default reference: bottom center of image
|
|
177
|
+
width, height = self._get_dimensions(image)
|
|
178
|
+
if reference_point is None:
|
|
179
|
+
reference_point = (width // 2, int(height * 0.9))
|
|
180
|
+
|
|
181
|
+
# Score each item
|
|
182
|
+
def score(item):
|
|
183
|
+
# Distance score (closer is better)
|
|
184
|
+
cx, cy = item.detection.center
|
|
185
|
+
dist = ((cx - reference_point[0])**2 + (cy - reference_point[1])**2)**0.5
|
|
186
|
+
max_dist = (width**2 + height**2)**0.5
|
|
187
|
+
dist_score = 1.0 - (dist / max_dist)
|
|
188
|
+
|
|
189
|
+
# Combine with priority
|
|
190
|
+
return 0.6 * item.priority + 0.4 * dist_score
|
|
191
|
+
|
|
192
|
+
return max(items, key=score)
|
|
193
|
+
|
|
194
|
+
def _classify_trash(self, det: Detection, width: int, height: int) -> TrashItem:
|
|
195
|
+
"""Classify a detection as a trash type."""
|
|
196
|
+
bbox = det.bbox
|
|
197
|
+
cx, cy = bbox.center
|
|
198
|
+
area = bbox.area
|
|
199
|
+
|
|
200
|
+
# Determine if on ground (in lower portion of image)
|
|
201
|
+
is_on_ground = cy > height * (1 - self.ground_threshold)
|
|
202
|
+
|
|
203
|
+
# Classify size
|
|
204
|
+
image_area = width * height
|
|
205
|
+
area_ratio = area / image_area
|
|
206
|
+
if area_ratio < 0.01:
|
|
207
|
+
size_category = "small"
|
|
208
|
+
elif area_ratio < 0.05:
|
|
209
|
+
size_category = "medium"
|
|
210
|
+
else:
|
|
211
|
+
size_category = "large"
|
|
212
|
+
|
|
213
|
+
# Classify trash type based on color and shape
|
|
214
|
+
color = det.metadata.get("color", det.label)
|
|
215
|
+
aspect = bbox.width / max(bbox.height, 1)
|
|
216
|
+
|
|
217
|
+
if "plastic" in color or det.label in ["blue", "green"]:
|
|
218
|
+
if 0.3 < aspect < 0.6: # Tall and thin = bottle
|
|
219
|
+
trash_type = "bottle"
|
|
220
|
+
else:
|
|
221
|
+
trash_type = "plastic"
|
|
222
|
+
elif "metal" in color or det.label == "gray":
|
|
223
|
+
if 0.7 < aspect < 1.3: # Round-ish = can
|
|
224
|
+
trash_type = "can"
|
|
225
|
+
else:
|
|
226
|
+
trash_type = "metal"
|
|
227
|
+
elif "cardboard" in color:
|
|
228
|
+
trash_type = "paper"
|
|
229
|
+
elif det.label in ["red", "yellow", "orange"]:
|
|
230
|
+
trash_type = "wrapper"
|
|
231
|
+
else:
|
|
232
|
+
trash_type = "unknown"
|
|
233
|
+
|
|
234
|
+
# Calculate priority
|
|
235
|
+
# Higher priority for:
|
|
236
|
+
# - Objects on ground
|
|
237
|
+
# - Pickable size
|
|
238
|
+
# - Higher confidence
|
|
239
|
+
priority = det.confidence
|
|
240
|
+
|
|
241
|
+
if is_on_ground:
|
|
242
|
+
priority *= 1.3
|
|
243
|
+
|
|
244
|
+
if size_category == "medium":
|
|
245
|
+
priority *= 1.2
|
|
246
|
+
elif size_category == "small":
|
|
247
|
+
priority *= 1.1
|
|
248
|
+
# Large items get no bonus (harder to pick)
|
|
249
|
+
|
|
250
|
+
# Certain types are easier to pick
|
|
251
|
+
if trash_type in ["bottle", "can"]:
|
|
252
|
+
priority *= 1.1
|
|
253
|
+
|
|
254
|
+
priority = min(priority, 1.0)
|
|
255
|
+
|
|
256
|
+
return TrashItem(
|
|
257
|
+
detection=det,
|
|
258
|
+
trash_type=trash_type,
|
|
259
|
+
confidence=det.confidence,
|
|
260
|
+
is_on_ground=is_on_ground,
|
|
261
|
+
size_category=size_category,
|
|
262
|
+
priority=priority,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
def _get_dimensions(self, image: Any) -> tuple:
|
|
266
|
+
"""Get image dimensions."""
|
|
267
|
+
if hasattr(image, 'width') and hasattr(image, 'height'):
|
|
268
|
+
return (image.width, image.height)
|
|
269
|
+
|
|
270
|
+
try:
|
|
271
|
+
from PIL import Image as PILImage
|
|
272
|
+
if isinstance(image, PILImage.Image):
|
|
273
|
+
return image.size
|
|
274
|
+
except ImportError:
|
|
275
|
+
pass
|
|
276
|
+
|
|
277
|
+
return (640, 480) # Default VGA
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def demo_detection(camera_or_image: Any = None):
|
|
281
|
+
"""
|
|
282
|
+
Demo the trash detector.
|
|
283
|
+
|
|
284
|
+
Can be run standalone for testing:
|
|
285
|
+
python -m ate.detection.trash_detector
|
|
286
|
+
"""
|
|
287
|
+
detector = TrashDetector()
|
|
288
|
+
|
|
289
|
+
if camera_or_image is None:
|
|
290
|
+
print("TrashDetector Demo")
|
|
291
|
+
print("=" * 40)
|
|
292
|
+
print("\nUsage:")
|
|
293
|
+
print(" from ate.detection import TrashDetector")
|
|
294
|
+
print(" detector = TrashDetector()")
|
|
295
|
+
print(" items = detector.detect_trash(image)")
|
|
296
|
+
print(" pickable = detector.find_pickable(image)")
|
|
297
|
+
print("\nTrash types detected:")
|
|
298
|
+
print(" - bottle (plastic bottles)")
|
|
299
|
+
print(" - can (metal cans)")
|
|
300
|
+
print(" - wrapper (colorful packaging)")
|
|
301
|
+
print(" - paper (cardboard, paper)")
|
|
302
|
+
print(" - plastic (other plastic items)")
|
|
303
|
+
print(" - metal (other metal items)")
|
|
304
|
+
print(" - unknown (unclassified)")
|
|
305
|
+
return
|
|
306
|
+
|
|
307
|
+
# Detect from provided image
|
|
308
|
+
items = detector.detect_trash(camera_or_image)
|
|
309
|
+
|
|
310
|
+
print(f"\nFound {len(items)} trash items:")
|
|
311
|
+
for item in items:
|
|
312
|
+
cx, cy = item.detection.center
|
|
313
|
+
print(f" {item.trash_type}: ({cx}, {cy}) - {item.size_category}, priority={item.priority:.2f}")
|
|
314
|
+
|
|
315
|
+
pickable = detector.find_pickable(camera_or_image)
|
|
316
|
+
if pickable:
|
|
317
|
+
cx, cy = pickable.detection.center
|
|
318
|
+
print(f"\nBest to pick: {pickable.trash_type} at ({cx}, {cy})")
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
if __name__ == "__main__":
|
|
322
|
+
demo_detection()
|
ate/drivers/__init__.py
CHANGED
|
@@ -4,24 +4,36 @@ Hardware drivers for specific robots.
|
|
|
4
4
|
Each driver implements the appropriate interfaces from ate.interfaces.
|
|
5
5
|
|
|
6
6
|
Example:
|
|
7
|
-
from ate.drivers import MechDogDriver
|
|
7
|
+
from ate.drivers import MechDogDriver, MechDogConfig
|
|
8
8
|
|
|
9
|
-
# Connect to MechDog
|
|
10
|
-
|
|
9
|
+
# Connect to MechDog with camera
|
|
10
|
+
config = MechDogConfig(
|
|
11
|
+
port="/dev/cu.usbserial-10",
|
|
12
|
+
has_camera=True,
|
|
13
|
+
camera_ip="192.168.1.100"
|
|
14
|
+
)
|
|
15
|
+
dog = MechDogDriver(config=config)
|
|
11
16
|
dog.connect()
|
|
12
17
|
|
|
13
18
|
# Use through abstract interface
|
|
14
19
|
dog.stand()
|
|
15
20
|
dog.walk(Vector3.forward(), speed=0.3)
|
|
16
21
|
dog.set_body_height(0.15)
|
|
17
|
-
dog.stop()
|
|
18
22
|
|
|
19
|
-
#
|
|
23
|
+
# Capture image from visual module
|
|
24
|
+
image = dog.get_image()
|
|
25
|
+
|
|
26
|
+
dog.stop()
|
|
20
27
|
dog.disconnect()
|
|
21
28
|
"""
|
|
22
29
|
|
|
23
|
-
from .mechdog import MechDogDriver
|
|
30
|
+
from .mechdog import MechDogDriver, MechDogConfig
|
|
31
|
+
from .wifi_camera import WiFiCamera, WiFiCameraConfig, discover_cameras
|
|
24
32
|
|
|
25
33
|
__all__ = [
|
|
26
34
|
"MechDogDriver",
|
|
35
|
+
"MechDogConfig",
|
|
36
|
+
"WiFiCamera",
|
|
37
|
+
"WiFiCameraConfig",
|
|
38
|
+
"discover_cameras",
|
|
27
39
|
]
|