foodforthought-cli 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +100 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/behaviors/common.py +686 -0
  9. ate/behaviors/tree.py +454 -0
  10. ate/cli.py +855 -3995
  11. ate/client.py +90 -0
  12. ate/commands/__init__.py +168 -0
  13. ate/commands/auth.py +389 -0
  14. ate/commands/bridge.py +448 -0
  15. ate/commands/data.py +185 -0
  16. ate/commands/deps.py +111 -0
  17. ate/commands/generate.py +384 -0
  18. ate/commands/memory.py +907 -0
  19. ate/commands/parts.py +166 -0
  20. ate/commands/primitive.py +399 -0
  21. ate/commands/protocol.py +288 -0
  22. ate/commands/recording.py +524 -0
  23. ate/commands/repo.py +154 -0
  24. ate/commands/simulation.py +291 -0
  25. ate/commands/skill.py +303 -0
  26. ate/commands/skills.py +487 -0
  27. ate/commands/team.py +147 -0
  28. ate/commands/workflow.py +271 -0
  29. ate/detection/__init__.py +38 -0
  30. ate/detection/base.py +142 -0
  31. ate/detection/color_detector.py +399 -0
  32. ate/detection/trash_detector.py +322 -0
  33. ate/drivers/__init__.py +39 -0
  34. ate/drivers/ble_transport.py +405 -0
  35. ate/drivers/mechdog.py +942 -0
  36. ate/drivers/wifi_camera.py +477 -0
  37. ate/interfaces/__init__.py +187 -0
  38. ate/interfaces/base.py +273 -0
  39. ate/interfaces/body.py +267 -0
  40. ate/interfaces/detection.py +282 -0
  41. ate/interfaces/locomotion.py +422 -0
  42. ate/interfaces/manipulation.py +408 -0
  43. ate/interfaces/navigation.py +389 -0
  44. ate/interfaces/perception.py +362 -0
  45. ate/interfaces/sensors.py +247 -0
  46. ate/interfaces/types.py +371 -0
  47. ate/llm_proxy.py +239 -0
  48. ate/mcp_server.py +387 -0
  49. ate/memory/__init__.py +35 -0
  50. ate/memory/cloud.py +244 -0
  51. ate/memory/context.py +269 -0
  52. ate/memory/embeddings.py +184 -0
  53. ate/memory/export.py +26 -0
  54. ate/memory/merge.py +146 -0
  55. ate/memory/migrate/__init__.py +34 -0
  56. ate/memory/migrate/base.py +89 -0
  57. ate/memory/migrate/pipeline.py +189 -0
  58. ate/memory/migrate/sources/__init__.py +13 -0
  59. ate/memory/migrate/sources/chroma.py +170 -0
  60. ate/memory/migrate/sources/pinecone.py +120 -0
  61. ate/memory/migrate/sources/qdrant.py +110 -0
  62. ate/memory/migrate/sources/weaviate.py +160 -0
  63. ate/memory/reranker.py +353 -0
  64. ate/memory/search.py +26 -0
  65. ate/memory/store.py +548 -0
  66. ate/recording/__init__.py +83 -0
  67. ate/recording/demonstration.py +378 -0
  68. ate/recording/session.py +415 -0
  69. ate/recording/upload.py +304 -0
  70. ate/recording/visual.py +416 -0
  71. ate/recording/wrapper.py +95 -0
  72. ate/robot/__init__.py +221 -0
  73. ate/robot/agentic_servo.py +856 -0
  74. ate/robot/behaviors.py +493 -0
  75. ate/robot/ble_capture.py +1000 -0
  76. ate/robot/ble_enumerate.py +506 -0
  77. ate/robot/calibration.py +668 -0
  78. ate/robot/calibration_state.py +388 -0
  79. ate/robot/commands.py +3735 -0
  80. ate/robot/direction_calibration.py +554 -0
  81. ate/robot/discovery.py +441 -0
  82. ate/robot/introspection.py +330 -0
  83. ate/robot/llm_system_id.py +654 -0
  84. ate/robot/locomotion_calibration.py +508 -0
  85. ate/robot/manager.py +270 -0
  86. ate/robot/marker_generator.py +611 -0
  87. ate/robot/perception.py +502 -0
  88. ate/robot/primitives.py +614 -0
  89. ate/robot/profiles.py +281 -0
  90. ate/robot/registry.py +322 -0
  91. ate/robot/servo_mapper.py +1153 -0
  92. ate/robot/skill_upload.py +675 -0
  93. ate/robot/target_calibration.py +500 -0
  94. ate/robot/teach.py +515 -0
  95. ate/robot/types.py +242 -0
  96. ate/robot/visual_labeler.py +1048 -0
  97. ate/robot/visual_servo_loop.py +494 -0
  98. ate/robot/visual_servoing.py +570 -0
  99. ate/robot/visual_system_id.py +906 -0
  100. ate/transports/__init__.py +121 -0
  101. ate/transports/base.py +394 -0
  102. ate/transports/ble.py +405 -0
  103. ate/transports/hybrid.py +444 -0
  104. ate/transports/serial.py +345 -0
  105. ate/urdf/__init__.py +30 -0
  106. ate/urdf/capture.py +582 -0
  107. ate/urdf/cloud.py +491 -0
  108. ate/urdf/collision.py +271 -0
  109. ate/urdf/commands.py +708 -0
  110. ate/urdf/depth.py +360 -0
  111. ate/urdf/inertial.py +312 -0
  112. ate/urdf/kinematics.py +330 -0
  113. ate/urdf/lifting.py +415 -0
  114. ate/urdf/meshing.py +300 -0
  115. ate/urdf/models/__init__.py +110 -0
  116. ate/urdf/models/depth_anything.py +253 -0
  117. ate/urdf/models/sam2.py +324 -0
  118. ate/urdf/motion_analysis.py +396 -0
  119. ate/urdf/pipeline.py +468 -0
  120. ate/urdf/scale.py +256 -0
  121. ate/urdf/scan_session.py +411 -0
  122. ate/urdf/segmentation.py +299 -0
  123. ate/urdf/synthesis.py +319 -0
  124. ate/urdf/topology.py +336 -0
  125. ate/urdf/validation.py +371 -0
  126. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +9 -1
  127. foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
  128. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
  129. foodforthought_cli-0.2.7.dist-info/RECORD +0 -44
  130. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
  131. {foodforthought_cli-0.2.7.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,304 @@
1
+ """
2
+ Upload demonstrations to FoodforThought.
3
+
4
+ Converts interface recordings to the FoodforThought telemetry format
5
+ and uploads them as artifacts for labeling and training.
6
+ """
7
+
8
+ import os
9
+ import json
10
+ import requests
11
+ from datetime import datetime
12
+ from typing import Optional, Dict, Any
13
+ from pathlib import Path
14
+
15
+ from .demonstration import Demonstration, load_demonstration
16
+ from .session import RecordingSession
17
+
18
+
19
+ # API configuration
20
+ BASE_URL = os.getenv("ATE_API_URL", "https://www.kindly.fyi/api")
21
+ CONFIG_FILE = Path.home() / ".ate" / "config.json"
22
+
23
+
24
+ class DemonstrationUploader:
25
+ """
26
+ Uploads demonstrations to FoodforThought.
27
+
28
+ Handles authentication and converts the interface-based recording
29
+ format to the FoodforThought telemetry ingest format.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ base_url: str = BASE_URL,
35
+ api_key: Optional[str] = None,
36
+ ):
37
+ """
38
+ Initialize uploader.
39
+
40
+ Args:
41
+ base_url: FoodforThought API URL
42
+ api_key: API key (or set ATE_API_KEY env var)
43
+ """
44
+ self.base_url = base_url
45
+ self.headers = {
46
+ "Content-Type": "application/json",
47
+ }
48
+
49
+ token = None
50
+
51
+ # Try to load from config file first (device auth flow)
52
+ if CONFIG_FILE.exists():
53
+ try:
54
+ with open(CONFIG_FILE) as f:
55
+ config = json.load(f)
56
+ # Prefer access_token from device auth flow
57
+ token = config.get("access_token") or config.get("api_key")
58
+ except Exception:
59
+ pass
60
+
61
+ # Override with explicit api_key or env var
62
+ if api_key:
63
+ token = api_key
64
+ elif os.getenv("ATE_API_KEY"):
65
+ token = os.getenv("ATE_API_KEY")
66
+
67
+ if token:
68
+ self.headers["Authorization"] = f"Bearer {token}"
69
+ else:
70
+ raise ValueError(
71
+ "Not logged in. Run 'ate login' to authenticate."
72
+ )
73
+
74
+ def _request(self, method: str, endpoint: str, **kwargs) -> Dict:
75
+ """Make HTTP request to API."""
76
+ url = f"{self.base_url}{endpoint}"
77
+ response = requests.request(method, url, headers=self.headers, **kwargs)
78
+ response.raise_for_status()
79
+ return response.json()
80
+
81
+ def upload(
82
+ self,
83
+ demonstration: Demonstration,
84
+ project_id: Optional[str] = None,
85
+ skill_id: Optional[str] = None,
86
+ create_labeling_task: bool = False,
87
+ ) -> Dict[str, Any]:
88
+ """
89
+ Upload a demonstration to FoodforThought.
90
+
91
+ Args:
92
+ demonstration: Demonstration object to upload
93
+ project_id: Optional project ID to associate with
94
+ skill_id: Optional skill ID this demonstrates
95
+ create_labeling_task: Create a labeling task for annotation
96
+
97
+ Returns:
98
+ Response dict with artifactId and optional taskId
99
+ """
100
+ # Convert to telemetry ingest format
101
+ recording_data = self._convert_to_telemetry_format(
102
+ demonstration,
103
+ skill_id=skill_id,
104
+ )
105
+
106
+ if create_labeling_task:
107
+ recording_data["createLabelingTask"] = True
108
+
109
+ # Upload via telemetry ingest API
110
+ response = self._request("POST", "/telemetry/ingest", json=recording_data)
111
+
112
+ return {
113
+ "success": True,
114
+ "artifactId": response.get("data", {}).get("artifactId"),
115
+ "taskId": response.get("data", {}).get("taskId"),
116
+ "url": f"https://foodforthought.kindly.fyi/artifacts/{response.get('data', {}).get('artifactId', '')}",
117
+ }
118
+
119
+ def upload_file(
120
+ self,
121
+ path: str,
122
+ project_id: Optional[str] = None,
123
+ skill_id: Optional[str] = None,
124
+ create_labeling_task: bool = False,
125
+ ) -> Dict[str, Any]:
126
+ """
127
+ Upload a demonstration file to FoodforThought.
128
+
129
+ Args:
130
+ path: Path to .demonstration file
131
+ project_id: Optional project ID
132
+ skill_id: Optional skill ID
133
+ create_labeling_task: Create labeling task
134
+
135
+ Returns:
136
+ Response dict with artifactId
137
+ """
138
+ demonstration = load_demonstration(path)
139
+ return self.upload(
140
+ demonstration,
141
+ project_id=project_id,
142
+ skill_id=skill_id,
143
+ create_labeling_task=create_labeling_task,
144
+ )
145
+
146
+ def upload_session(
147
+ self,
148
+ session: RecordingSession,
149
+ project_id: Optional[str] = None,
150
+ skill_id: Optional[str] = None,
151
+ create_labeling_task: bool = False,
152
+ ) -> Dict[str, Any]:
153
+ """
154
+ Upload a recording session directly to FoodforThought.
155
+
156
+ Args:
157
+ session: RecordingSession to upload
158
+ project_id: Optional project ID
159
+ skill_id: Optional skill ID
160
+ create_labeling_task: Create labeling task
161
+
162
+ Returns:
163
+ Response dict with artifactId
164
+ """
165
+ # Convert session to demonstration
166
+ metadata = session.get_metadata()
167
+ demonstration = Demonstration(
168
+ metadata=metadata,
169
+ calls=session.calls,
170
+ segments=[],
171
+ )
172
+
173
+ return self.upload(
174
+ demonstration,
175
+ project_id=project_id,
176
+ skill_id=skill_id,
177
+ create_labeling_task=create_labeling_task,
178
+ )
179
+
180
+ def _convert_to_telemetry_format(
181
+ self,
182
+ demonstration: Demonstration,
183
+ skill_id: Optional[str] = None,
184
+ ) -> Dict[str, Any]:
185
+ """
186
+ Convert demonstration to FoodforThought telemetry ingest format.
187
+
188
+ The telemetry format is designed for time-series data from robots.
189
+ We map interface calls to this format while preserving the
190
+ abstract nature of the recording.
191
+ """
192
+ metadata = demonstration.metadata
193
+
194
+ # Convert calls to telemetry frames
195
+ frames = []
196
+ for call in demonstration.calls:
197
+ frame = {
198
+ "timestamp": call.timestamp,
199
+ "relativeTime": call.relative_time,
200
+ "type": "interface_call",
201
+ "data": {
202
+ "interface": call.interface,
203
+ "method": call.method,
204
+ "args": call.args,
205
+ "kwargs": call.kwargs,
206
+ "result": call.result,
207
+ "success": call.success,
208
+ },
209
+ }
210
+ if call.error:
211
+ frame["data"]["error"] = call.error
212
+ frames.append(frame)
213
+
214
+ # Convert segments to events
215
+ events = []
216
+ for segment in demonstration.segments:
217
+ events.append({
218
+ "type": "task_segment",
219
+ "startTime": segment.start_time,
220
+ "endTime": segment.end_time,
221
+ "label": segment.label,
222
+ "description": segment.description,
223
+ "confidence": segment.confidence,
224
+ })
225
+
226
+ # Build recording data
227
+ start_time = datetime.fromtimestamp(metadata.start_time).isoformat() if metadata.start_time else None
228
+ end_time = datetime.fromtimestamp(metadata.end_time).isoformat() if metadata.end_time else None
229
+
230
+ recording_data = {
231
+ "recording": {
232
+ "id": metadata.id,
233
+ "robotId": metadata.robot_model,
234
+ "skillId": skill_id or "demonstration",
235
+ "source": "interface_recording",
236
+ "startTime": start_time,
237
+ "endTime": end_time,
238
+ "success": all(c.success for c in demonstration.calls),
239
+ "metadata": {
240
+ "name": metadata.name,
241
+ "description": metadata.description,
242
+ "robotName": metadata.robot_name,
243
+ "robotModel": metadata.robot_model,
244
+ "robotArchetype": metadata.robot_archetype,
245
+ "capabilities": metadata.capabilities,
246
+ "duration": metadata.duration,
247
+ "callCount": len(demonstration.calls),
248
+ "segmentCount": len(demonstration.segments),
249
+ "interfacesUsed": demonstration.get_interfaces_used(),
250
+ "tags": metadata.tags + ["interface_recording"],
251
+ },
252
+ "frames": frames,
253
+ "events": events,
254
+ },
255
+ }
256
+
257
+ return recording_data
258
+
259
+
260
+ def upload_demonstration(
261
+ path_or_demonstration,
262
+ project_id: Optional[str] = None,
263
+ skill_id: Optional[str] = None,
264
+ create_labeling_task: bool = False,
265
+ api_key: Optional[str] = None,
266
+ ) -> Dict[str, Any]:
267
+ """
268
+ Convenience function to upload a demonstration.
269
+
270
+ Args:
271
+ path_or_demonstration: Path to .demonstration file or Demonstration object
272
+ project_id: Optional project ID
273
+ skill_id: Optional skill ID
274
+ create_labeling_task: Create labeling task
275
+ api_key: Optional API key
276
+
277
+ Returns:
278
+ Response dict with artifactId
279
+ """
280
+ uploader = DemonstrationUploader(api_key=api_key)
281
+
282
+ if isinstance(path_or_demonstration, str):
283
+ return uploader.upload_file(
284
+ path_or_demonstration,
285
+ project_id=project_id,
286
+ skill_id=skill_id,
287
+ create_labeling_task=create_labeling_task,
288
+ )
289
+ elif isinstance(path_or_demonstration, Demonstration):
290
+ return uploader.upload(
291
+ path_or_demonstration,
292
+ project_id=project_id,
293
+ skill_id=skill_id,
294
+ create_labeling_task=create_labeling_task,
295
+ )
296
+ elif isinstance(path_or_demonstration, RecordingSession):
297
+ return uploader.upload_session(
298
+ path_or_demonstration,
299
+ project_id=project_id,
300
+ skill_id=skill_id,
301
+ create_labeling_task=create_labeling_task,
302
+ )
303
+ else:
304
+ raise TypeError(f"Expected path, Demonstration, or RecordingSession, got {type(path_or_demonstration)}")