foodforthought-cli 0.2.8__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +12 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/cli.py +855 -4551
  9. ate/client.py +90 -0
  10. ate/commands/__init__.py +168 -0
  11. ate/commands/auth.py +389 -0
  12. ate/commands/bridge.py +448 -0
  13. ate/commands/data.py +185 -0
  14. ate/commands/deps.py +111 -0
  15. ate/commands/generate.py +384 -0
  16. ate/commands/memory.py +907 -0
  17. ate/commands/parts.py +166 -0
  18. ate/commands/primitive.py +399 -0
  19. ate/commands/protocol.py +288 -0
  20. ate/commands/recording.py +524 -0
  21. ate/commands/repo.py +154 -0
  22. ate/commands/simulation.py +291 -0
  23. ate/commands/skill.py +303 -0
  24. ate/commands/skills.py +487 -0
  25. ate/commands/team.py +147 -0
  26. ate/commands/workflow.py +271 -0
  27. ate/detection/__init__.py +38 -0
  28. ate/detection/base.py +142 -0
  29. ate/detection/color_detector.py +402 -0
  30. ate/detection/trash_detector.py +322 -0
  31. ate/drivers/__init__.py +18 -6
  32. ate/drivers/ble_transport.py +405 -0
  33. ate/drivers/mechdog.py +360 -24
  34. ate/drivers/wifi_camera.py +477 -0
  35. ate/interfaces/__init__.py +16 -0
  36. ate/interfaces/base.py +2 -0
  37. ate/interfaces/sensors.py +247 -0
  38. ate/llm_proxy.py +239 -0
  39. ate/memory/__init__.py +35 -0
  40. ate/memory/cloud.py +244 -0
  41. ate/memory/context.py +269 -0
  42. ate/memory/embeddings.py +184 -0
  43. ate/memory/export.py +26 -0
  44. ate/memory/merge.py +146 -0
  45. ate/memory/migrate/__init__.py +34 -0
  46. ate/memory/migrate/base.py +89 -0
  47. ate/memory/migrate/pipeline.py +189 -0
  48. ate/memory/migrate/sources/__init__.py +13 -0
  49. ate/memory/migrate/sources/chroma.py +170 -0
  50. ate/memory/migrate/sources/pinecone.py +120 -0
  51. ate/memory/migrate/sources/qdrant.py +110 -0
  52. ate/memory/migrate/sources/weaviate.py +160 -0
  53. ate/memory/reranker.py +353 -0
  54. ate/memory/search.py +26 -0
  55. ate/memory/store.py +548 -0
  56. ate/recording/__init__.py +42 -3
  57. ate/recording/session.py +12 -2
  58. ate/recording/visual.py +416 -0
  59. ate/robot/__init__.py +142 -0
  60. ate/robot/agentic_servo.py +856 -0
  61. ate/robot/behaviors.py +493 -0
  62. ate/robot/ble_capture.py +1000 -0
  63. ate/robot/ble_enumerate.py +506 -0
  64. ate/robot/calibration.py +88 -3
  65. ate/robot/calibration_state.py +388 -0
  66. ate/robot/commands.py +143 -11
  67. ate/robot/direction_calibration.py +554 -0
  68. ate/robot/discovery.py +104 -2
  69. ate/robot/llm_system_id.py +654 -0
  70. ate/robot/locomotion_calibration.py +508 -0
  71. ate/robot/marker_generator.py +611 -0
  72. ate/robot/perception.py +502 -0
  73. ate/robot/primitives.py +614 -0
  74. ate/robot/profiles.py +6 -0
  75. ate/robot/registry.py +5 -2
  76. ate/robot/servo_mapper.py +1153 -0
  77. ate/robot/skill_upload.py +285 -3
  78. ate/robot/target_calibration.py +500 -0
  79. ate/robot/teach.py +515 -0
  80. ate/robot/types.py +242 -0
  81. ate/robot/visual_labeler.py +9 -0
  82. ate/robot/visual_servo_loop.py +494 -0
  83. ate/robot/visual_servoing.py +570 -0
  84. ate/robot/visual_system_id.py +906 -0
  85. ate/transports/__init__.py +121 -0
  86. ate/transports/base.py +394 -0
  87. ate/transports/ble.py +405 -0
  88. ate/transports/hybrid.py +444 -0
  89. ate/transports/serial.py +345 -0
  90. ate/urdf/__init__.py +30 -0
  91. ate/urdf/capture.py +582 -0
  92. ate/urdf/cloud.py +491 -0
  93. ate/urdf/collision.py +271 -0
  94. ate/urdf/commands.py +708 -0
  95. ate/urdf/depth.py +360 -0
  96. ate/urdf/inertial.py +312 -0
  97. ate/urdf/kinematics.py +330 -0
  98. ate/urdf/lifting.py +415 -0
  99. ate/urdf/meshing.py +300 -0
  100. ate/urdf/models/__init__.py +110 -0
  101. ate/urdf/models/depth_anything.py +253 -0
  102. ate/urdf/models/sam2.py +324 -0
  103. ate/urdf/motion_analysis.py +396 -0
  104. ate/urdf/pipeline.py +468 -0
  105. ate/urdf/scale.py +256 -0
  106. ate/urdf/scan_session.py +411 -0
  107. ate/urdf/segmentation.py +299 -0
  108. ate/urdf/synthesis.py +319 -0
  109. ate/urdf/topology.py +336 -0
  110. ate/urdf/validation.py +371 -0
  111. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/METADATA +1 -1
  112. foodforthought_cli-0.3.1.dist-info/RECORD +166 -0
  113. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/WHEEL +1 -1
  114. foodforthought_cli-0.2.8.dist-info/RECORD +0 -73
  115. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/entry_points.txt +0 -0
  116. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,247 @@
1
+ """
2
+ Sensor interfaces for robot perception.
3
+
4
+ These interfaces abstract different sensor types so behaviors
5
+ can work across robots with different sensor configurations.
6
+
7
+ Design principle: A behavior that needs distance sensing can use
8
+ ANY implementation of DistanceSensorInterface - ultrasonic, lidar,
9
+ depth camera, or even visual estimation.
10
+ """
11
+
12
+ from abc import ABC, abstractmethod
13
+ from dataclasses import dataclass
14
+ from typing import Optional, List, Tuple
15
+ from enum import Enum, auto
16
+
17
+ from .types import ActionResult
18
+
19
+
20
+ class DistanceSensorType(Enum):
21
+ """Type of distance sensor."""
22
+ ULTRASONIC = auto() # HC-SR04, etc.
23
+ INFRARED = auto() # IR proximity
24
+ LIDAR_SINGLE = auto() # Single-point lidar
25
+ LIDAR_2D = auto() # 2D scanning lidar
26
+ DEPTH_CAMERA = auto() # Depth camera center point
27
+ VISUAL_ESTIMATION = auto() # Estimated from RGB camera
28
+
29
+
30
+ @dataclass
31
+ class DistanceReading:
32
+ """A distance measurement."""
33
+ distance: float # Distance in meters
34
+ valid: bool = True # Whether reading is valid
35
+ sensor_type: DistanceSensorType = DistanceSensorType.ULTRASONIC
36
+ timestamp: float = 0.0 # Seconds since epoch
37
+ confidence: float = 1.0 # Confidence in reading (0-1)
38
+
39
+ @classmethod
40
+ def invalid(cls) -> "DistanceReading":
41
+ """Create an invalid reading."""
42
+ return cls(distance=0.0, valid=False)
43
+
44
+
45
+ class DistanceSensorInterface(ABC):
46
+ """
47
+ Interface for distance/proximity sensing.
48
+
49
+ Can be implemented by:
50
+ - Ultrasonic sensors (HC-SR04, etc.)
51
+ - IR proximity sensors
52
+ - Single-point lidar
53
+ - Depth camera (center pixel or ROI average)
54
+ - Visual estimation (object size in image)
55
+
56
+ This abstraction allows behaviors like "approach target" to work
57
+ on any robot with any distance sensing capability.
58
+
59
+ Example:
60
+ # Works with any implementation
61
+ sensor: DistanceSensorInterface = robot.get_distance_sensor()
62
+
63
+ while True:
64
+ reading = sensor.get_distance()
65
+ if reading.valid and reading.distance < 0.15:
66
+ robot.stop()
67
+ break
68
+ robot.walk_forward(speed=0.1)
69
+ """
70
+
71
+ @abstractmethod
72
+ def get_distance(self) -> DistanceReading:
73
+ """
74
+ Get current distance reading.
75
+
76
+ Returns:
77
+ DistanceReading with distance in meters
78
+ """
79
+ pass
80
+
81
+ @abstractmethod
82
+ def get_min_range(self) -> float:
83
+ """
84
+ Get minimum measurable range in meters.
85
+
86
+ Returns below this distance are unreliable.
87
+ """
88
+ pass
89
+
90
+ @abstractmethod
91
+ def get_max_range(self) -> float:
92
+ """
93
+ Get maximum measurable range in meters.
94
+
95
+ Returns above this distance are unreliable.
96
+ """
97
+ pass
98
+
99
+ def get_sensor_type(self) -> DistanceSensorType:
100
+ """Get the type of distance sensor."""
101
+ return DistanceSensorType.ULTRASONIC # Default
102
+
103
+ def is_obstacle_detected(self, threshold: float = 0.20) -> bool:
104
+ """
105
+ Check if an obstacle is within threshold distance.
106
+
107
+ Args:
108
+ threshold: Distance threshold in meters
109
+
110
+ Returns:
111
+ True if obstacle detected within threshold
112
+ """
113
+ reading = self.get_distance()
114
+ return reading.valid and reading.distance < threshold
115
+
116
+
117
+ class VisualDistanceEstimator(DistanceSensorInterface):
118
+ """
119
+ Estimate distance from visual object size.
120
+
121
+ When no hardware distance sensor is available, we can estimate
122
+ distance based on the apparent size of a detected object.
123
+
124
+ This uses the pinhole camera model:
125
+ distance = (known_size * focal_length) / apparent_size
126
+
127
+ For unknown objects, we use heuristics based on object category.
128
+ """
129
+
130
+ # Typical object sizes in meters (width)
131
+ TYPICAL_SIZES = {
132
+ "bottle": 0.07,
133
+ "can": 0.065,
134
+ "wrapper": 0.10,
135
+ "paper": 0.20,
136
+ "plastic": 0.10,
137
+ "metal": 0.08,
138
+ "unknown": 0.10,
139
+ }
140
+
141
+ def __init__(
142
+ self,
143
+ image_width: int = 640,
144
+ image_height: int = 480,
145
+ fov_horizontal: float = 60.0, # Degrees
146
+ ):
147
+ """
148
+ Initialize visual distance estimator.
149
+
150
+ Args:
151
+ image_width: Camera image width in pixels
152
+ image_height: Camera image height in pixels
153
+ fov_horizontal: Horizontal field of view in degrees
154
+ """
155
+ self.image_width = image_width
156
+ self.image_height = image_height
157
+ self.fov_horizontal = fov_horizontal
158
+
159
+ # Calculate focal length in pixels
160
+ import math
161
+ self.focal_length = image_width / (2 * math.tan(math.radians(fov_horizontal / 2)))
162
+
163
+ self._last_reading: Optional[DistanceReading] = None
164
+
165
+ def estimate_from_detection(
166
+ self,
167
+ bbox_width: int,
168
+ object_type: str = "unknown",
169
+ known_size: Optional[float] = None,
170
+ ) -> DistanceReading:
171
+ """
172
+ Estimate distance from detected object bounding box.
173
+
174
+ Args:
175
+ bbox_width: Width of bounding box in pixels
176
+ object_type: Type of object (for size lookup)
177
+ known_size: Known real-world size in meters (overrides lookup)
178
+
179
+ Returns:
180
+ DistanceReading with estimated distance
181
+ """
182
+ import time
183
+
184
+ if bbox_width <= 0:
185
+ return DistanceReading.invalid()
186
+
187
+ # Get object size
188
+ real_size = known_size or self.TYPICAL_SIZES.get(object_type, 0.10)
189
+
190
+ # Estimate distance using pinhole model
191
+ distance = (real_size * self.focal_length) / bbox_width
192
+
193
+ # Confidence decreases with smaller bounding boxes (more uncertainty)
194
+ confidence = min(1.0, bbox_width / 50.0)
195
+
196
+ reading = DistanceReading(
197
+ distance=distance,
198
+ valid=True,
199
+ sensor_type=DistanceSensorType.VISUAL_ESTIMATION,
200
+ timestamp=time.time(),
201
+ confidence=confidence,
202
+ )
203
+
204
+ self._last_reading = reading
205
+ return reading
206
+
207
+ def get_distance(self) -> DistanceReading:
208
+ """Get last estimated distance."""
209
+ if self._last_reading:
210
+ return self._last_reading
211
+ return DistanceReading.invalid()
212
+
213
+ def get_min_range(self) -> float:
214
+ return 0.1 # 10cm minimum
215
+
216
+ def get_max_range(self) -> float:
217
+ return 5.0 # 5m maximum (very rough beyond this)
218
+
219
+ def get_sensor_type(self) -> DistanceSensorType:
220
+ return DistanceSensorType.VISUAL_ESTIMATION
221
+
222
+
223
+ @dataclass
224
+ class ProximitySensorReading:
225
+ """Reading from proximity/bump sensors."""
226
+ triggered: bool # Whether sensor is triggered
227
+ sensor_id: str # Sensor identifier
228
+ timestamp: float = 0.0
229
+
230
+
231
+ class ProximitySensorInterface(ABC):
232
+ """
233
+ Interface for proximity/bump sensors.
234
+
235
+ These are simple binary sensors that detect contact or very close proximity.
236
+ Common on wheeled robots and robot vacuums.
237
+ """
238
+
239
+ @abstractmethod
240
+ def get_proximity_sensors(self) -> List[ProximitySensorReading]:
241
+ """Get readings from all proximity sensors."""
242
+ pass
243
+
244
+ @abstractmethod
245
+ def is_any_triggered(self) -> bool:
246
+ """Check if any proximity sensor is triggered."""
247
+ pass
ate/llm_proxy.py ADDED
@@ -0,0 +1,239 @@
1
+ """
2
+ LLM Proxy - Routes AI requests through FoodforThought edge function.
3
+
4
+ Benefits:
5
+ - No API keys needed on client
6
+ - Automatic usage metering per user
7
+ - Rate limiting (50/week free, 500/week pro)
8
+ - Billing integration via usage_logs table
9
+
10
+ Usage:
11
+ from ate.llm_proxy import LLMProxy
12
+
13
+ proxy = LLMProxy()
14
+ response = proxy.chat(
15
+ messages=[{"role": "user", "content": "Hello"}],
16
+ model="claude-3-5-haiku-20241022",
17
+ max_tokens=150,
18
+ )
19
+ """
20
+
21
+ import json
22
+ from pathlib import Path
23
+ from typing import Dict, Any, List, Optional
24
+ from dataclasses import dataclass
25
+
26
+ try:
27
+ import requests
28
+ HAS_REQUESTS = True
29
+ except ImportError:
30
+ HAS_REQUESTS = False
31
+
32
+
33
+ # Configuration
34
+ EDGE_FUNCTION_URL = "https://tbkczrruqxopscwqxntr.supabase.co/functions/v1/chat-proxy"
35
+ CONFIG_DIR = Path.home() / ".ate"
36
+ CONFIG_FILE = CONFIG_DIR / "config.json"
37
+
38
+
39
+ @dataclass
40
+ class LLMResponse:
41
+ """Parsed response from LLM."""
42
+ content: str
43
+ input_tokens: int
44
+ output_tokens: int
45
+ model: str
46
+ stop_reason: str
47
+
48
+
49
+ class LLMProxyError(Exception):
50
+ """Error from LLM proxy."""
51
+ def __init__(self, message: str, status_code: Optional[int] = None, details: Optional[str] = None):
52
+ super().__init__(message)
53
+ self.status_code = status_code
54
+ self.details = details
55
+
56
+
57
+ class LLMProxy:
58
+ """
59
+ Proxy for LLM requests through FoodforThought edge function.
60
+
61
+ Handles authentication, rate limiting, and usage tracking automatically.
62
+ """
63
+
64
+ def __init__(self, access_token: Optional[str] = None):
65
+ """
66
+ Initialize proxy.
67
+
68
+ Args:
69
+ access_token: Optional auth token. If not provided, reads from CLI config.
70
+ """
71
+ if not HAS_REQUESTS:
72
+ raise ImportError("requests module required. Install with: pip install requests")
73
+
74
+ self.access_token = access_token or self._load_token()
75
+ self.base_url = EDGE_FUNCTION_URL
76
+
77
+ def _load_token(self) -> Optional[str]:
78
+ """Load access token from CLI config file."""
79
+ if not CONFIG_FILE.exists():
80
+ return None
81
+
82
+ try:
83
+ with open(CONFIG_FILE) as f:
84
+ config = json.load(f)
85
+ return config.get("access_token")
86
+ except Exception:
87
+ return None
88
+
89
+ def chat(
90
+ self,
91
+ messages: List[Dict[str, Any]],
92
+ model: str = "claude-3-5-haiku-20241022",
93
+ max_tokens: int = 1024,
94
+ temperature: float = 0.7,
95
+ system: Optional[str] = None,
96
+ stream: bool = False,
97
+ ) -> LLMResponse:
98
+ """
99
+ Send chat completion request through edge function.
100
+
101
+ Args:
102
+ messages: List of message dicts with 'role' and 'content'
103
+ model: Model to use (default: claude-3-5-haiku-20241022)
104
+ max_tokens: Maximum tokens in response
105
+ temperature: Sampling temperature
106
+ system: Optional system prompt
107
+ stream: Whether to stream (not yet supported)
108
+
109
+ Returns:
110
+ LLMResponse with content and token usage
111
+
112
+ Raises:
113
+ LLMProxyError: On API errors or auth issues
114
+ """
115
+ if stream:
116
+ raise NotImplementedError("Streaming not yet implemented in proxy")
117
+
118
+ # Build request
119
+ all_messages = []
120
+ if system:
121
+ all_messages.append({"role": "system", "content": system})
122
+ all_messages.extend(messages)
123
+
124
+ payload = {
125
+ "messages": all_messages,
126
+ "model": model,
127
+ "max_tokens": max_tokens,
128
+ "temperature": temperature,
129
+ "stream": False,
130
+ }
131
+
132
+ headers = {
133
+ "Content-Type": "application/json",
134
+ }
135
+
136
+ # Add auth header if we have a token
137
+ if self.access_token:
138
+ headers["Authorization"] = f"Bearer {self.access_token}"
139
+ else:
140
+ raise LLMProxyError(
141
+ "Not authenticated. Run 'ate login' first.",
142
+ status_code=401,
143
+ )
144
+
145
+ # Make request
146
+ try:
147
+ response = requests.post(
148
+ self.base_url,
149
+ json=payload,
150
+ headers=headers,
151
+ timeout=60,
152
+ )
153
+ except requests.RequestException as e:
154
+ raise LLMProxyError(f"Request failed: {e}")
155
+
156
+ # Parse response
157
+ if response.status_code == 401:
158
+ raise LLMProxyError(
159
+ "Authentication failed. Try 'ate login' to re-authenticate.",
160
+ status_code=401,
161
+ )
162
+
163
+ if response.status_code == 402:
164
+ # Rate limit exceeded
165
+ data = response.json()
166
+ usage = data.get("usage", {})
167
+ raise LLMProxyError(
168
+ f"Rate limit exceeded: {usage.get('current', '?')}/{usage.get('limit', '?')} requests this week. "
169
+ f"Resets at {usage.get('resetsAt', 'next week')}. "
170
+ f"Upgrade at {data.get('upgrade_url', 'https://artifex.kindly.fyi/upgrade')}",
171
+ status_code=402,
172
+ details=json.dumps(data),
173
+ )
174
+
175
+ if response.status_code != 200:
176
+ try:
177
+ error_data = response.json()
178
+ error_msg = error_data.get("message") or error_data.get("error") or "Unknown error"
179
+ details = error_data.get("details")
180
+ except Exception:
181
+ error_msg = response.text or "Unknown error"
182
+ details = None
183
+
184
+ raise LLMProxyError(
185
+ f"LLM request failed: {error_msg}",
186
+ status_code=response.status_code,
187
+ details=details,
188
+ )
189
+
190
+ # Parse successful response (Anthropic format)
191
+ data = response.json()
192
+
193
+ # Extract content from Anthropic response format
194
+ content = ""
195
+ if "content" in data:
196
+ # Anthropic format: {"content": [{"type": "text", "text": "..."}]}
197
+ for block in data["content"]:
198
+ if block.get("type") == "text":
199
+ content += block.get("text", "")
200
+ elif "choices" in data:
201
+ # OpenAI format
202
+ content = data["choices"][0]["message"]["content"]
203
+ else:
204
+ content = str(data)
205
+
206
+ # Extract usage
207
+ usage = data.get("usage", {})
208
+ input_tokens = usage.get("input_tokens", 0)
209
+ output_tokens = usage.get("output_tokens", 0)
210
+
211
+ return LLMResponse(
212
+ content=content,
213
+ input_tokens=input_tokens,
214
+ output_tokens=output_tokens,
215
+ model=data.get("model", model),
216
+ stop_reason=data.get("stop_reason", "unknown"),
217
+ )
218
+
219
+ def is_authenticated(self) -> bool:
220
+ """Check if we have a valid access token."""
221
+ return bool(self.access_token)
222
+
223
+
224
+ def get_proxy() -> LLMProxy:
225
+ """
226
+ Get a configured LLM proxy instance.
227
+
228
+ Returns:
229
+ LLMProxy instance ready to use
230
+
231
+ Raises:
232
+ LLMProxyError: If not authenticated
233
+ """
234
+ proxy = LLMProxy()
235
+ if not proxy.is_authenticated():
236
+ raise LLMProxyError(
237
+ "Not authenticated. Run 'ate login' to authenticate with FoodforThought."
238
+ )
239
+ return proxy
ate/memory/__init__.py ADDED
@@ -0,0 +1,35 @@
1
+ """ATE Memory Library - Core memory operations with memvid-sdk backend."""
2
+
3
+ from .store import MemoryStore
4
+ from .search import SearchResult
5
+ from .export import MemoryInfo
6
+ from .merge import merge_memories
7
+ from .embeddings import EmbeddingConfig, EmbeddingManager
8
+ from .reranker import RerankConfig, LLMReranker
9
+ from .context import ContextManager, MemoryContext, MemoryMetadata
10
+
11
+ # Migration module imports
12
+ from . import migrate
13
+ from .migrate import (
14
+ VectorRecord,
15
+ MigrationEstimate,
16
+ MigrationResult,
17
+ MigrationCheckpoint,
18
+ MigrationSource,
19
+ MigrationPipeline,
20
+ PineconeMigrationSource,
21
+ QdrantMigrationSource,
22
+ WeaviateMigrationSource,
23
+ ChromaMigrationSource
24
+ )
25
+
26
+ __all__ = [
27
+ 'MemoryStore', 'SearchResult', 'MemoryInfo', 'merge_memories',
28
+ 'EmbeddingConfig', 'EmbeddingManager', 'RerankConfig', 'LLMReranker',
29
+ 'ContextManager', 'MemoryContext', 'MemoryMetadata',
30
+ # Migration exports
31
+ 'migrate', 'VectorRecord', 'MigrationEstimate', 'MigrationResult',
32
+ 'MigrationCheckpoint', 'MigrationSource', 'MigrationPipeline',
33
+ 'PineconeMigrationSource', 'QdrantMigrationSource', 'WeaviateMigrationSource',
34
+ 'ChromaMigrationSource'
35
+ ]