foodforthought-cli 0.2.8__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ate/__init__.py +6 -0
- ate/__main__.py +16 -0
- ate/auth/__init__.py +1 -0
- ate/auth/device_flow.py +141 -0
- ate/auth/token_store.py +96 -0
- ate/behaviors/__init__.py +12 -0
- ate/behaviors/approach.py +399 -0
- ate/cli.py +855 -4551
- ate/client.py +90 -0
- ate/commands/__init__.py +168 -0
- ate/commands/auth.py +389 -0
- ate/commands/bridge.py +448 -0
- ate/commands/data.py +185 -0
- ate/commands/deps.py +111 -0
- ate/commands/generate.py +384 -0
- ate/commands/memory.py +907 -0
- ate/commands/parts.py +166 -0
- ate/commands/primitive.py +399 -0
- ate/commands/protocol.py +288 -0
- ate/commands/recording.py +524 -0
- ate/commands/repo.py +154 -0
- ate/commands/simulation.py +291 -0
- ate/commands/skill.py +303 -0
- ate/commands/skills.py +487 -0
- ate/commands/team.py +147 -0
- ate/commands/workflow.py +271 -0
- ate/detection/__init__.py +38 -0
- ate/detection/base.py +142 -0
- ate/detection/color_detector.py +402 -0
- ate/detection/trash_detector.py +322 -0
- ate/drivers/__init__.py +18 -6
- ate/drivers/ble_transport.py +405 -0
- ate/drivers/mechdog.py +360 -24
- ate/drivers/wifi_camera.py +477 -0
- ate/interfaces/__init__.py +16 -0
- ate/interfaces/base.py +2 -0
- ate/interfaces/sensors.py +247 -0
- ate/llm_proxy.py +239 -0
- ate/memory/__init__.py +35 -0
- ate/memory/cloud.py +244 -0
- ate/memory/context.py +269 -0
- ate/memory/embeddings.py +184 -0
- ate/memory/export.py +26 -0
- ate/memory/merge.py +146 -0
- ate/memory/migrate/__init__.py +34 -0
- ate/memory/migrate/base.py +89 -0
- ate/memory/migrate/pipeline.py +189 -0
- ate/memory/migrate/sources/__init__.py +13 -0
- ate/memory/migrate/sources/chroma.py +170 -0
- ate/memory/migrate/sources/pinecone.py +120 -0
- ate/memory/migrate/sources/qdrant.py +110 -0
- ate/memory/migrate/sources/weaviate.py +160 -0
- ate/memory/reranker.py +353 -0
- ate/memory/search.py +26 -0
- ate/memory/store.py +548 -0
- ate/recording/__init__.py +42 -3
- ate/recording/session.py +12 -2
- ate/recording/visual.py +416 -0
- ate/robot/__init__.py +142 -0
- ate/robot/agentic_servo.py +856 -0
- ate/robot/behaviors.py +493 -0
- ate/robot/ble_capture.py +1000 -0
- ate/robot/ble_enumerate.py +506 -0
- ate/robot/calibration.py +88 -3
- ate/robot/calibration_state.py +388 -0
- ate/robot/commands.py +143 -11
- ate/robot/direction_calibration.py +554 -0
- ate/robot/discovery.py +104 -2
- ate/robot/llm_system_id.py +654 -0
- ate/robot/locomotion_calibration.py +508 -0
- ate/robot/marker_generator.py +611 -0
- ate/robot/perception.py +502 -0
- ate/robot/primitives.py +614 -0
- ate/robot/profiles.py +6 -0
- ate/robot/registry.py +5 -2
- ate/robot/servo_mapper.py +1153 -0
- ate/robot/skill_upload.py +285 -3
- ate/robot/target_calibration.py +500 -0
- ate/robot/teach.py +515 -0
- ate/robot/types.py +242 -0
- ate/robot/visual_labeler.py +9 -0
- ate/robot/visual_servo_loop.py +494 -0
- ate/robot/visual_servoing.py +570 -0
- ate/robot/visual_system_id.py +906 -0
- ate/transports/__init__.py +121 -0
- ate/transports/base.py +394 -0
- ate/transports/ble.py +405 -0
- ate/transports/hybrid.py +444 -0
- ate/transports/serial.py +345 -0
- ate/urdf/__init__.py +30 -0
- ate/urdf/capture.py +582 -0
- ate/urdf/cloud.py +491 -0
- ate/urdf/collision.py +271 -0
- ate/urdf/commands.py +708 -0
- ate/urdf/depth.py +360 -0
- ate/urdf/inertial.py +312 -0
- ate/urdf/kinematics.py +330 -0
- ate/urdf/lifting.py +415 -0
- ate/urdf/meshing.py +300 -0
- ate/urdf/models/__init__.py +110 -0
- ate/urdf/models/depth_anything.py +253 -0
- ate/urdf/models/sam2.py +324 -0
- ate/urdf/motion_analysis.py +396 -0
- ate/urdf/pipeline.py +468 -0
- ate/urdf/scale.py +256 -0
- ate/urdf/scan_session.py +411 -0
- ate/urdf/segmentation.py +299 -0
- ate/urdf/synthesis.py +319 -0
- ate/urdf/topology.py +336 -0
- ate/urdf/validation.py +371 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/METADATA +1 -1
- foodforthought_cli-0.3.1.dist-info/RECORD +166 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/WHEEL +1 -1
- foodforthought_cli-0.2.8.dist-info/RECORD +0 -73
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/entry_points.txt +0 -0
- {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/top_level.txt +0 -0
ate/commands/workflow.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workflow commands for FoodforThought CLI.
|
|
3
|
+
|
|
4
|
+
Commands:
|
|
5
|
+
- ate workflow validate - Validate a workflow YAML file
|
|
6
|
+
- ate workflow run - Run a workflow
|
|
7
|
+
- ate workflow export - Export workflow to different formats
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
import random
|
|
12
|
+
import sys
|
|
13
|
+
import time
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Optional, Dict
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def workflow_validate(client, path: str) -> None:
|
|
19
|
+
"""Validate a workflow YAML file."""
|
|
20
|
+
import yaml
|
|
21
|
+
|
|
22
|
+
workflow_path = Path(path)
|
|
23
|
+
if not workflow_path.exists():
|
|
24
|
+
print(f"Error: File not found: {path}", file=sys.stderr)
|
|
25
|
+
sys.exit(1)
|
|
26
|
+
|
|
27
|
+
print(f"Validating workflow: {path}")
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
with open(workflow_path) as f:
|
|
31
|
+
workflow_data = yaml.safe_load(f)
|
|
32
|
+
|
|
33
|
+
# Basic validation
|
|
34
|
+
errors = []
|
|
35
|
+
warnings = []
|
|
36
|
+
|
|
37
|
+
# Required fields
|
|
38
|
+
if not workflow_data.get("name"):
|
|
39
|
+
errors.append("Missing required field: name")
|
|
40
|
+
if not workflow_data.get("steps"):
|
|
41
|
+
errors.append("Missing required field: steps")
|
|
42
|
+
elif not isinstance(workflow_data["steps"], list):
|
|
43
|
+
errors.append("Steps must be an array")
|
|
44
|
+
elif len(workflow_data["steps"]) == 0:
|
|
45
|
+
errors.append("Workflow must have at least one step")
|
|
46
|
+
|
|
47
|
+
# Validate steps
|
|
48
|
+
step_ids = set()
|
|
49
|
+
for i, step in enumerate(workflow_data.get("steps", [])):
|
|
50
|
+
step_id = step.get("id", f"step_{i}")
|
|
51
|
+
|
|
52
|
+
if not step.get("id"):
|
|
53
|
+
errors.append(f"Step {i+1}: Missing required field 'id'")
|
|
54
|
+
elif step["id"] in step_ids:
|
|
55
|
+
errors.append(f"Duplicate step ID: {step['id']}")
|
|
56
|
+
step_ids.add(step_id)
|
|
57
|
+
|
|
58
|
+
if not step.get("skill"):
|
|
59
|
+
errors.append(f"Step '{step_id}': Missing required field 'skill'")
|
|
60
|
+
|
|
61
|
+
# Check dependency references
|
|
62
|
+
for step in workflow_data.get("steps", []):
|
|
63
|
+
for dep in step.get("depends_on", []):
|
|
64
|
+
if dep not in step_ids:
|
|
65
|
+
errors.append(f"Step '{step.get('id')}' depends on unknown step '{dep}'")
|
|
66
|
+
|
|
67
|
+
# Print results
|
|
68
|
+
print(f"\n{'=' * 50}")
|
|
69
|
+
print(f"Validation Results")
|
|
70
|
+
print(f"{'=' * 50}")
|
|
71
|
+
|
|
72
|
+
print(f"\nWorkflow: {workflow_data.get('name', 'Unnamed')}")
|
|
73
|
+
print(f"Version: {workflow_data.get('version', '1.0.0')}")
|
|
74
|
+
print(f"Steps: {len(workflow_data.get('steps', []))}")
|
|
75
|
+
|
|
76
|
+
if errors:
|
|
77
|
+
print(f"\n✗ Validation FAILED")
|
|
78
|
+
print(f"\nErrors ({len(errors)}):")
|
|
79
|
+
for error in errors:
|
|
80
|
+
print(f" ✗ {error}")
|
|
81
|
+
sys.exit(1)
|
|
82
|
+
else:
|
|
83
|
+
print(f"\n✓ Workflow is valid!")
|
|
84
|
+
if warnings:
|
|
85
|
+
print(f"\nWarnings ({len(warnings)}):")
|
|
86
|
+
for warning in warnings:
|
|
87
|
+
print(f" ⚠ {warning}")
|
|
88
|
+
|
|
89
|
+
except Exception as e:
|
|
90
|
+
if "yaml" in str(type(e).__module__):
|
|
91
|
+
print(f"\n✗ Invalid YAML syntax: {e}", file=sys.stderr)
|
|
92
|
+
else:
|
|
93
|
+
print(f"\n✗ Validation failed: {e}", file=sys.stderr)
|
|
94
|
+
sys.exit(1)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def workflow_run(client, path: str, sim: bool, dry_run: bool) -> None:
|
|
98
|
+
"""Run a workflow."""
|
|
99
|
+
import yaml
|
|
100
|
+
|
|
101
|
+
workflow_path = Path(path)
|
|
102
|
+
if not workflow_path.exists():
|
|
103
|
+
print(f"Error: File not found: {path}", file=sys.stderr)
|
|
104
|
+
sys.exit(1)
|
|
105
|
+
|
|
106
|
+
with open(workflow_path) as f:
|
|
107
|
+
workflow_data = yaml.safe_load(f)
|
|
108
|
+
|
|
109
|
+
print(f"Running workflow: {workflow_data.get('name', 'Unnamed')}")
|
|
110
|
+
print(f" Mode: {'Simulation' if sim else 'Real Robot'}")
|
|
111
|
+
print(f" Dry Run: {dry_run}")
|
|
112
|
+
|
|
113
|
+
if dry_run:
|
|
114
|
+
print("\n[DRY RUN] Execution plan:")
|
|
115
|
+
for i, step in enumerate(workflow_data.get("steps", [])):
|
|
116
|
+
deps = step.get("depends_on", [])
|
|
117
|
+
deps_str = f" (after: {', '.join(deps)})" if deps else ""
|
|
118
|
+
print(f" {i+1}. {step.get('id')}: {step.get('skill')}{deps_str}")
|
|
119
|
+
print("\n✓ Dry run complete. No actions taken.")
|
|
120
|
+
return
|
|
121
|
+
|
|
122
|
+
# Simulate execution
|
|
123
|
+
print("\n" + "=" * 50)
|
|
124
|
+
print("Executing workflow...")
|
|
125
|
+
print("=" * 50)
|
|
126
|
+
|
|
127
|
+
for i, step in enumerate(workflow_data.get("steps", [])):
|
|
128
|
+
step_id = step.get("id", f"step_{i}")
|
|
129
|
+
skill = step.get("skill", "unknown")
|
|
130
|
+
|
|
131
|
+
print(f"\n[{i+1}/{len(workflow_data.get('steps', []))}] {step_id}")
|
|
132
|
+
print(f" Skill: {skill}")
|
|
133
|
+
|
|
134
|
+
if sim:
|
|
135
|
+
print(f" Mode: Simulation")
|
|
136
|
+
time.sleep(random.uniform(0.5, 1.5))
|
|
137
|
+
|
|
138
|
+
# Simulate result
|
|
139
|
+
success = random.random() > 0.1
|
|
140
|
+
if success:
|
|
141
|
+
print(f" Status: ✓ Completed")
|
|
142
|
+
else:
|
|
143
|
+
print(f" Status: ✗ Failed")
|
|
144
|
+
if step.get("on_failure") == "fail":
|
|
145
|
+
print("\nWorkflow FAILED")
|
|
146
|
+
sys.exit(1)
|
|
147
|
+
else:
|
|
148
|
+
print(f" Status: Would execute on real robot")
|
|
149
|
+
|
|
150
|
+
print("\n" + "=" * 50)
|
|
151
|
+
print("✓ Workflow completed successfully!")
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def workflow_export(client, path: str, format: str, output: Optional[str]) -> None:
|
|
155
|
+
"""Export workflow to different formats."""
|
|
156
|
+
import yaml
|
|
157
|
+
|
|
158
|
+
workflow_path = Path(path)
|
|
159
|
+
if not workflow_path.exists():
|
|
160
|
+
print(f"Error: File not found: {path}", file=sys.stderr)
|
|
161
|
+
sys.exit(1)
|
|
162
|
+
|
|
163
|
+
with open(workflow_path) as f:
|
|
164
|
+
workflow_data = yaml.safe_load(f)
|
|
165
|
+
|
|
166
|
+
print(f"Exporting workflow: {workflow_data.get('name', 'Unnamed')}")
|
|
167
|
+
print(f" Format: {format}")
|
|
168
|
+
|
|
169
|
+
if format == "ros2":
|
|
170
|
+
# Generate ROS2 launch file
|
|
171
|
+
launch_content = _generate_ros2_launch(workflow_data)
|
|
172
|
+
output_file = output or f"{workflow_data.get('name', 'workflow').replace(' ', '_').lower()}_launch.py"
|
|
173
|
+
|
|
174
|
+
with open(output_file, 'w') as f:
|
|
175
|
+
f.write(launch_content)
|
|
176
|
+
|
|
177
|
+
print(f"\n✓ Exported to: {output_file}")
|
|
178
|
+
|
|
179
|
+
elif format == "json":
|
|
180
|
+
output_file = output or f"{workflow_data.get('name', 'workflow').replace(' ', '_').lower()}.json"
|
|
181
|
+
with open(output_file, 'w') as f:
|
|
182
|
+
json.dump(workflow_data, f, indent=2)
|
|
183
|
+
print(f"\n✓ Exported to: {output_file}")
|
|
184
|
+
|
|
185
|
+
else:
|
|
186
|
+
print(f"Unsupported format: {format}", file=sys.stderr)
|
|
187
|
+
sys.exit(1)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def _generate_ros2_launch(workflow: Dict) -> str:
|
|
191
|
+
"""Generate ROS2 launch file from workflow."""
|
|
192
|
+
steps_code = ""
|
|
193
|
+
for step in workflow.get("steps", []):
|
|
194
|
+
step_id = step.get("id", "step")
|
|
195
|
+
skill = step.get("skill", "unknown")
|
|
196
|
+
inputs = step.get("inputs", {})
|
|
197
|
+
|
|
198
|
+
inputs_str = ", ".join([f"'{k}': '{v}'" for k, v in inputs.items()])
|
|
199
|
+
|
|
200
|
+
steps_code += f'''
|
|
201
|
+
# Step: {step_id}
|
|
202
|
+
{step_id}_node = Node(
|
|
203
|
+
package='skill_executor',
|
|
204
|
+
executable='run_skill',
|
|
205
|
+
name='{step_id}',
|
|
206
|
+
parameters=[{{
|
|
207
|
+
'skill_id': '{skill}',
|
|
208
|
+
'inputs': {{{inputs_str}}},
|
|
209
|
+
}}],
|
|
210
|
+
)
|
|
211
|
+
ld.add_action({step_id}_node)
|
|
212
|
+
'''
|
|
213
|
+
|
|
214
|
+
return f'''#!/usr/bin/env python3
|
|
215
|
+
"""
|
|
216
|
+
ROS2 Launch File - {workflow.get('name', 'Workflow')}
|
|
217
|
+
Generated by FoodforThought CLI
|
|
218
|
+
|
|
219
|
+
Version: {workflow.get('version', '1.0.0')}
|
|
220
|
+
"""
|
|
221
|
+
|
|
222
|
+
from launch import LaunchDescription
|
|
223
|
+
from launch_ros.actions import Node
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def generate_launch_description():
|
|
227
|
+
ld = LaunchDescription()
|
|
228
|
+
{steps_code}
|
|
229
|
+
return ld
|
|
230
|
+
'''
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def register_parser(subparsers):
|
|
234
|
+
"""Register workflow commands with argparse."""
|
|
235
|
+
workflow_parser = subparsers.add_parser("workflow", help="Manage skill workflows/pipelines")
|
|
236
|
+
workflow_subparsers = workflow_parser.add_subparsers(dest="workflow_action", help="Workflow action")
|
|
237
|
+
|
|
238
|
+
# workflow validate
|
|
239
|
+
workflow_validate_parser = workflow_subparsers.add_parser("validate",
|
|
240
|
+
help="Validate workflow YAML")
|
|
241
|
+
workflow_validate_parser.add_argument("path", help="Path to workflow YAML file")
|
|
242
|
+
|
|
243
|
+
# workflow run
|
|
244
|
+
workflow_run_parser = workflow_subparsers.add_parser("run", help="Run a workflow")
|
|
245
|
+
workflow_run_parser.add_argument("path", help="Path to workflow YAML file")
|
|
246
|
+
workflow_run_parser.add_argument("--sim", action="store_true",
|
|
247
|
+
help="Run in simulation mode")
|
|
248
|
+
workflow_run_parser.add_argument("--dry-run", action="store_true",
|
|
249
|
+
help="Show execution plan without running")
|
|
250
|
+
|
|
251
|
+
# workflow export
|
|
252
|
+
workflow_export_parser = workflow_subparsers.add_parser("export",
|
|
253
|
+
help="Export workflow to other formats")
|
|
254
|
+
workflow_export_parser.add_argument("path", help="Path to workflow YAML file")
|
|
255
|
+
workflow_export_parser.add_argument("-f", "--format", default="ros2",
|
|
256
|
+
choices=["ros2", "json"],
|
|
257
|
+
help="Export format (default: ros2)")
|
|
258
|
+
workflow_export_parser.add_argument("-o", "--output", help="Output file path")
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
def handle(client, args):
|
|
262
|
+
"""Handle workflow commands."""
|
|
263
|
+
if args.workflow_action == "validate":
|
|
264
|
+
workflow_validate(client, args.path)
|
|
265
|
+
elif args.workflow_action == "run":
|
|
266
|
+
workflow_run(client, args.path, args.sim, args.dry_run)
|
|
267
|
+
elif args.workflow_action == "export":
|
|
268
|
+
workflow_export(client, args.path, args.format, args.output)
|
|
269
|
+
else:
|
|
270
|
+
print("Usage: ate workflow {validate|run|export}")
|
|
271
|
+
sys.exit(1)
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Object detection for robotics.
|
|
3
|
+
|
|
4
|
+
Provides:
|
|
5
|
+
- Simple color-based detection (no ML dependencies)
|
|
6
|
+
- Cloud vision API integration (optional)
|
|
7
|
+
- Local model inference (requires additional setup)
|
|
8
|
+
|
|
9
|
+
Example:
|
|
10
|
+
from ate.detection import ColorDetector, detect_objects
|
|
11
|
+
from ate.drivers import MechDogDriver
|
|
12
|
+
|
|
13
|
+
dog = MechDogDriver(config=config)
|
|
14
|
+
dog.connect()
|
|
15
|
+
|
|
16
|
+
# Get image from camera
|
|
17
|
+
image = dog.get_image()
|
|
18
|
+
|
|
19
|
+
# Detect colored objects (no ML required)
|
|
20
|
+
detector = ColorDetector()
|
|
21
|
+
detections = detector.detect(image, target_colors=["red", "blue", "green"])
|
|
22
|
+
|
|
23
|
+
for det in detections:
|
|
24
|
+
print(f"Found {det.label} at ({det.x}, {det.y})")
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
from .color_detector import ColorDetector, ColorRange
|
|
28
|
+
from .base import DetectorBase, Detection, BoundingBox
|
|
29
|
+
from .trash_detector import TrashDetector
|
|
30
|
+
|
|
31
|
+
__all__ = [
|
|
32
|
+
"ColorDetector",
|
|
33
|
+
"ColorRange",
|
|
34
|
+
"DetectorBase",
|
|
35
|
+
"Detection",
|
|
36
|
+
"BoundingBox",
|
|
37
|
+
"TrashDetector",
|
|
38
|
+
]
|
ate/detection/base.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base classes for object detection.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
from typing import List, Optional, Tuple, Any
|
|
8
|
+
from enum import Enum, auto
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class BoundingBox:
|
|
13
|
+
"""Bounding box for detected object."""
|
|
14
|
+
x: int # Top-left x
|
|
15
|
+
y: int # Top-left y
|
|
16
|
+
width: int # Box width
|
|
17
|
+
height: int # Box height
|
|
18
|
+
|
|
19
|
+
@property
|
|
20
|
+
def center(self) -> Tuple[int, int]:
|
|
21
|
+
"""Get center point."""
|
|
22
|
+
return (self.x + self.width // 2, self.y + self.height // 2)
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def area(self) -> int:
|
|
26
|
+
"""Get area in pixels."""
|
|
27
|
+
return self.width * self.height
|
|
28
|
+
|
|
29
|
+
def to_tuple(self) -> Tuple[int, int, int, int]:
|
|
30
|
+
"""Convert to (x, y, w, h) tuple."""
|
|
31
|
+
return (self.x, self.y, self.width, self.height)
|
|
32
|
+
|
|
33
|
+
def contains(self, x: int, y: int) -> bool:
|
|
34
|
+
"""Check if point is inside box."""
|
|
35
|
+
return (self.x <= x <= self.x + self.width and
|
|
36
|
+
self.y <= y <= self.y + self.height)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class Detection:
|
|
41
|
+
"""A detected object."""
|
|
42
|
+
label: str # Object class label
|
|
43
|
+
confidence: float # Detection confidence (0-1)
|
|
44
|
+
bbox: BoundingBox # Bounding box
|
|
45
|
+
mask: Optional[Any] = None # Optional segmentation mask
|
|
46
|
+
metadata: dict = field(default_factory=dict)
|
|
47
|
+
|
|
48
|
+
@property
|
|
49
|
+
def center(self) -> Tuple[int, int]:
|
|
50
|
+
"""Get center of detection."""
|
|
51
|
+
return self.bbox.center
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def area(self) -> int:
|
|
55
|
+
"""Get area of detection."""
|
|
56
|
+
return self.bbox.area
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class DetectorBase(ABC):
|
|
60
|
+
"""
|
|
61
|
+
Abstract base class for object detectors.
|
|
62
|
+
|
|
63
|
+
Subclasses implement different detection strategies:
|
|
64
|
+
- ColorDetector: Simple color-based detection
|
|
65
|
+
- YOLODetector: Neural network based
|
|
66
|
+
- CloudDetector: Cloud vision API
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
@abstractmethod
|
|
70
|
+
def detect(self, image: Any, **kwargs) -> List[Detection]:
|
|
71
|
+
"""
|
|
72
|
+
Detect objects in image.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
image: Input image (RGB bytes, PIL Image, or numpy array)
|
|
76
|
+
**kwargs: Detector-specific options
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
List of Detection objects
|
|
80
|
+
"""
|
|
81
|
+
pass
|
|
82
|
+
|
|
83
|
+
@abstractmethod
|
|
84
|
+
def detect_class(self, image: Any, class_name: str, **kwargs) -> List[Detection]:
|
|
85
|
+
"""
|
|
86
|
+
Detect objects of a specific class.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
image: Input image
|
|
90
|
+
class_name: Class to detect (e.g., "trash", "person", "bottle")
|
|
91
|
+
**kwargs: Detector-specific options
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
List of Detection objects matching the class
|
|
95
|
+
"""
|
|
96
|
+
pass
|
|
97
|
+
|
|
98
|
+
def find_nearest(self, image: Any, class_name: str, reference_point: Tuple[int, int] = None) -> Optional[Detection]:
|
|
99
|
+
"""
|
|
100
|
+
Find the nearest object of a class.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
image: Input image
|
|
104
|
+
class_name: Class to find
|
|
105
|
+
reference_point: Reference point (default: image center)
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
Nearest Detection or None
|
|
109
|
+
"""
|
|
110
|
+
detections = self.detect_class(image, class_name)
|
|
111
|
+
if not detections:
|
|
112
|
+
return None
|
|
113
|
+
|
|
114
|
+
# Default to image center if no reference point
|
|
115
|
+
if reference_point is None:
|
|
116
|
+
# Estimate from first detection's image size
|
|
117
|
+
reference_point = (320, 240) # Default VGA center
|
|
118
|
+
|
|
119
|
+
# Find nearest
|
|
120
|
+
def distance(det):
|
|
121
|
+
cx, cy = det.center
|
|
122
|
+
rx, ry = reference_point
|
|
123
|
+
return ((cx - rx) ** 2 + (cy - ry) ** 2) ** 0.5
|
|
124
|
+
|
|
125
|
+
return min(detections, key=distance)
|
|
126
|
+
|
|
127
|
+
def find_largest(self, image: Any, class_name: str) -> Optional[Detection]:
|
|
128
|
+
"""
|
|
129
|
+
Find the largest object of a class.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
image: Input image
|
|
133
|
+
class_name: Class to find
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
Largest Detection or None
|
|
137
|
+
"""
|
|
138
|
+
detections = self.detect_class(image, class_name)
|
|
139
|
+
if not detections:
|
|
140
|
+
return None
|
|
141
|
+
|
|
142
|
+
return max(detections, key=lambda d: d.area)
|