foodforthought-cli 0.2.8__tar.gz → 0.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/PKG-INFO +1 -1
  2. foodforthought_cli-0.3.1/ate/__init__.py +10 -0
  3. foodforthought_cli-0.3.1/ate/__main__.py +16 -0
  4. foodforthought_cli-0.3.1/ate/auth/__init__.py +1 -0
  5. foodforthought_cli-0.3.1/ate/auth/device_flow.py +141 -0
  6. foodforthought_cli-0.3.1/ate/auth/token_store.py +96 -0
  7. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/behaviors/__init__.py +12 -0
  8. foodforthought_cli-0.3.1/ate/behaviors/approach.py +399 -0
  9. foodforthought_cli-0.3.1/ate/cli.py +1008 -0
  10. foodforthought_cli-0.3.1/ate/client.py +90 -0
  11. foodforthought_cli-0.3.1/ate/commands/__init__.py +168 -0
  12. foodforthought_cli-0.3.1/ate/commands/auth.py +389 -0
  13. foodforthought_cli-0.3.1/ate/commands/bridge.py +448 -0
  14. foodforthought_cli-0.3.1/ate/commands/data.py +185 -0
  15. foodforthought_cli-0.3.1/ate/commands/deps.py +111 -0
  16. foodforthought_cli-0.3.1/ate/commands/generate.py +384 -0
  17. foodforthought_cli-0.3.1/ate/commands/memory.py +907 -0
  18. foodforthought_cli-0.3.1/ate/commands/parts.py +166 -0
  19. foodforthought_cli-0.3.1/ate/commands/primitive.py +399 -0
  20. foodforthought_cli-0.3.1/ate/commands/protocol.py +288 -0
  21. foodforthought_cli-0.3.1/ate/commands/recording.py +524 -0
  22. foodforthought_cli-0.3.1/ate/commands/repo.py +154 -0
  23. foodforthought_cli-0.3.1/ate/commands/simulation.py +291 -0
  24. foodforthought_cli-0.3.1/ate/commands/skill.py +303 -0
  25. foodforthought_cli-0.3.1/ate/commands/skills.py +487 -0
  26. foodforthought_cli-0.3.1/ate/commands/team.py +147 -0
  27. foodforthought_cli-0.3.1/ate/commands/workflow.py +271 -0
  28. foodforthought_cli-0.3.1/ate/detection/__init__.py +38 -0
  29. foodforthought_cli-0.3.1/ate/detection/base.py +142 -0
  30. foodforthought_cli-0.3.1/ate/detection/color_detector.py +402 -0
  31. foodforthought_cli-0.3.1/ate/detection/trash_detector.py +322 -0
  32. foodforthought_cli-0.3.1/ate/drivers/__init__.py +39 -0
  33. foodforthought_cli-0.3.1/ate/drivers/ble_transport.py +405 -0
  34. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/drivers/mechdog.py +360 -24
  35. foodforthought_cli-0.3.1/ate/drivers/wifi_camera.py +477 -0
  36. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/interfaces/__init__.py +16 -0
  37. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/interfaces/base.py +2 -0
  38. foodforthought_cli-0.3.1/ate/interfaces/sensors.py +247 -0
  39. foodforthought_cli-0.3.1/ate/llm_proxy.py +239 -0
  40. foodforthought_cli-0.3.1/ate/memory/__init__.py +35 -0
  41. foodforthought_cli-0.3.1/ate/memory/cloud.py +244 -0
  42. foodforthought_cli-0.3.1/ate/memory/context.py +269 -0
  43. foodforthought_cli-0.3.1/ate/memory/embeddings.py +184 -0
  44. foodforthought_cli-0.3.1/ate/memory/export.py +26 -0
  45. foodforthought_cli-0.3.1/ate/memory/merge.py +146 -0
  46. foodforthought_cli-0.3.1/ate/memory/migrate/__init__.py +34 -0
  47. foodforthought_cli-0.3.1/ate/memory/migrate/base.py +89 -0
  48. foodforthought_cli-0.3.1/ate/memory/migrate/pipeline.py +189 -0
  49. foodforthought_cli-0.3.1/ate/memory/migrate/sources/__init__.py +13 -0
  50. foodforthought_cli-0.3.1/ate/memory/migrate/sources/chroma.py +170 -0
  51. foodforthought_cli-0.3.1/ate/memory/migrate/sources/pinecone.py +120 -0
  52. foodforthought_cli-0.3.1/ate/memory/migrate/sources/qdrant.py +110 -0
  53. foodforthought_cli-0.3.1/ate/memory/migrate/sources/weaviate.py +160 -0
  54. foodforthought_cli-0.3.1/ate/memory/reranker.py +353 -0
  55. foodforthought_cli-0.3.1/ate/memory/search.py +26 -0
  56. foodforthought_cli-0.3.1/ate/memory/store.py +548 -0
  57. foodforthought_cli-0.3.1/ate/recording/__init__.py +83 -0
  58. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/recording/session.py +12 -2
  59. foodforthought_cli-0.3.1/ate/recording/visual.py +416 -0
  60. foodforthought_cli-0.3.1/ate/robot/__init__.py +221 -0
  61. foodforthought_cli-0.3.1/ate/robot/agentic_servo.py +856 -0
  62. foodforthought_cli-0.3.1/ate/robot/behaviors.py +493 -0
  63. foodforthought_cli-0.3.1/ate/robot/ble_capture.py +1000 -0
  64. foodforthought_cli-0.3.1/ate/robot/ble_enumerate.py +506 -0
  65. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/robot/calibration.py +88 -3
  66. foodforthought_cli-0.3.1/ate/robot/calibration_state.py +388 -0
  67. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/robot/commands.py +143 -11
  68. foodforthought_cli-0.3.1/ate/robot/direction_calibration.py +554 -0
  69. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/robot/discovery.py +104 -2
  70. foodforthought_cli-0.3.1/ate/robot/llm_system_id.py +654 -0
  71. foodforthought_cli-0.3.1/ate/robot/locomotion_calibration.py +508 -0
  72. foodforthought_cli-0.3.1/ate/robot/marker_generator.py +611 -0
  73. foodforthought_cli-0.3.1/ate/robot/perception.py +502 -0
  74. foodforthought_cli-0.3.1/ate/robot/primitives.py +614 -0
  75. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/robot/profiles.py +6 -0
  76. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/robot/registry.py +5 -2
  77. foodforthought_cli-0.3.1/ate/robot/servo_mapper.py +1153 -0
  78. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/robot/skill_upload.py +285 -3
  79. foodforthought_cli-0.3.1/ate/robot/target_calibration.py +500 -0
  80. foodforthought_cli-0.3.1/ate/robot/teach.py +515 -0
  81. foodforthought_cli-0.3.1/ate/robot/types.py +242 -0
  82. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/robot/visual_labeler.py +9 -0
  83. foodforthought_cli-0.3.1/ate/robot/visual_servo_loop.py +494 -0
  84. foodforthought_cli-0.3.1/ate/robot/visual_servoing.py +570 -0
  85. foodforthought_cli-0.3.1/ate/robot/visual_system_id.py +906 -0
  86. foodforthought_cli-0.3.1/ate/transports/__init__.py +121 -0
  87. foodforthought_cli-0.3.1/ate/transports/base.py +394 -0
  88. foodforthought_cli-0.3.1/ate/transports/ble.py +405 -0
  89. foodforthought_cli-0.3.1/ate/transports/hybrid.py +444 -0
  90. foodforthought_cli-0.3.1/ate/transports/serial.py +345 -0
  91. foodforthought_cli-0.3.1/ate/urdf/__init__.py +30 -0
  92. foodforthought_cli-0.3.1/ate/urdf/capture.py +582 -0
  93. foodforthought_cli-0.3.1/ate/urdf/cloud.py +491 -0
  94. foodforthought_cli-0.3.1/ate/urdf/collision.py +271 -0
  95. foodforthought_cli-0.3.1/ate/urdf/commands.py +708 -0
  96. foodforthought_cli-0.3.1/ate/urdf/depth.py +360 -0
  97. foodforthought_cli-0.3.1/ate/urdf/inertial.py +312 -0
  98. foodforthought_cli-0.3.1/ate/urdf/kinematics.py +330 -0
  99. foodforthought_cli-0.3.1/ate/urdf/lifting.py +415 -0
  100. foodforthought_cli-0.3.1/ate/urdf/meshing.py +300 -0
  101. foodforthought_cli-0.3.1/ate/urdf/models/__init__.py +110 -0
  102. foodforthought_cli-0.3.1/ate/urdf/models/depth_anything.py +253 -0
  103. foodforthought_cli-0.3.1/ate/urdf/models/sam2.py +324 -0
  104. foodforthought_cli-0.3.1/ate/urdf/motion_analysis.py +396 -0
  105. foodforthought_cli-0.3.1/ate/urdf/pipeline.py +468 -0
  106. foodforthought_cli-0.3.1/ate/urdf/scale.py +256 -0
  107. foodforthought_cli-0.3.1/ate/urdf/scan_session.py +411 -0
  108. foodforthought_cli-0.3.1/ate/urdf/segmentation.py +299 -0
  109. foodforthought_cli-0.3.1/ate/urdf/synthesis.py +319 -0
  110. foodforthought_cli-0.3.1/ate/urdf/topology.py +336 -0
  111. foodforthought_cli-0.3.1/ate/urdf/validation.py +371 -0
  112. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/foodforthought_cli.egg-info/PKG-INFO +1 -1
  113. foodforthought_cli-0.3.1/foodforthought_cli.egg-info/SOURCES.txt +170 -0
  114. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/setup.py +1 -1
  115. foodforthought_cli-0.2.8/ate/__init__.py +0 -4
  116. foodforthought_cli-0.2.8/ate/cli.py +0 -4704
  117. foodforthought_cli-0.2.8/ate/drivers/__init__.py +0 -27
  118. foodforthought_cli-0.2.8/ate/recording/__init__.py +0 -44
  119. foodforthought_cli-0.2.8/ate/robot/__init__.py +0 -79
  120. foodforthought_cli-0.2.8/foodforthought_cli.egg-info/SOURCES.txt +0 -77
  121. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/README.md +0 -0
  122. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/behaviors/common.py +0 -0
  123. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/behaviors/tree.py +0 -0
  124. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/bridge_server.py +0 -0
  125. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/compatibility.py +0 -0
  126. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/generator.py +0 -0
  127. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/generators/__init__.py +0 -0
  128. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/generators/docker_generator.py +0 -0
  129. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/generators/hardware_config.py +0 -0
  130. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/generators/ros2_generator.py +0 -0
  131. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/generators/skill_generator.py +0 -0
  132. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/interfaces/body.py +0 -0
  133. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/interfaces/detection.py +0 -0
  134. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/interfaces/locomotion.py +0 -0
  135. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/interfaces/manipulation.py +0 -0
  136. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/interfaces/navigation.py +0 -0
  137. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/interfaces/perception.py +0 -0
  138. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/interfaces/types.py +0 -0
  139. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/marketplace.py +0 -0
  140. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/mcp_server.py +0 -0
  141. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/primitives.py +0 -0
  142. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/recording/demonstration.py +0 -0
  143. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/recording/upload.py +0 -0
  144. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/recording/wrapper.py +0 -0
  145. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/robot/introspection.py +0 -0
  146. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/robot/manager.py +0 -0
  147. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/robot_setup.py +0 -0
  148. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/skill_schema.py +0 -0
  149. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/telemetry/__init__.py +0 -0
  150. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/telemetry/cli.py +0 -0
  151. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/telemetry/collector.py +0 -0
  152. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/telemetry/context.py +0 -0
  153. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/telemetry/fleet_agent.py +0 -0
  154. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/telemetry/formats/__init__.py +0 -0
  155. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/telemetry/formats/hdf5_serializer.py +0 -0
  156. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/telemetry/formats/mcap_serializer.py +0 -0
  157. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/ate/telemetry/types.py +0 -0
  158. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/foodforthought_cli.egg-info/dependency_links.txt +0 -0
  159. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/foodforthought_cli.egg-info/entry_points.txt +0 -0
  160. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/foodforthought_cli.egg-info/requires.txt +0 -0
  161. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/foodforthought_cli.egg-info/top_level.txt +0 -0
  162. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/mechdog_labeled/__init__.py +0 -0
  163. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/mechdog_labeled/primitives.py +0 -0
  164. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/mechdog_labeled/servo_map.py +0 -0
  165. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/mechdog_output/__init__.py +0 -0
  166. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/mechdog_output/primitives.py +0 -0
  167. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/mechdog_output/servo_map.py +0 -0
  168. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/setup.cfg +0 -0
  169. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/test_autodetect/__init__.py +0 -0
  170. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/test_autodetect/primitives.py +0 -0
  171. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/test_autodetect/servo_map.py +0 -0
  172. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/test_full_auto/__init__.py +0 -0
  173. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/test_full_auto/primitives.py +0 -0
  174. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/test_full_auto/servo_map.py +0 -0
  175. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/test_smart_detect/__init__.py +0 -0
  176. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/test_smart_detect/primitives.py +0 -0
  177. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/test_smart_detect/servo_map.py +0 -0
  178. {foodforthought_cli-0.2.8 → foodforthought_cli-0.3.1}/tests/test_auth.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: foodforthought-cli
3
- Version: 0.2.8
3
+ Version: 0.3.1
4
4
  Summary: CLI tool for FoodforThought robotics repository platform - manage robot skills and data
5
5
  Home-page: https://kindly.fyi/foodforthought
6
6
  Author: Kindly Robotics
@@ -0,0 +1,10 @@
1
+ """FoodforThought CLI (ATE) - GitHub-like interface for robotics repositories"""
2
+
3
+ __version__ = "0.2.7"
4
+
5
+ # LLM Proxy for metered AI access
6
+ try:
7
+ from .llm_proxy import LLMProxy, LLMProxyError, LLMResponse, get_proxy
8
+ except ImportError:
9
+ pass # Optional dependency
10
+
@@ -0,0 +1,16 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Enable running ate as a module: python -m ate
4
+
5
+ This allows the CLI to be invoked via:
6
+ python -m ate --help
7
+ python -m ate robot upload mechdog --dry-run
8
+ python -m ate login
9
+
10
+ Instead of requiring the installed entry point.
11
+ """
12
+
13
+ from ate.cli import main
14
+
15
+ if __name__ == "__main__":
16
+ main()
@@ -0,0 +1 @@
1
+ """OAuth 2.0 Device Authorization Grant (RFC 8628) auth package."""
@@ -0,0 +1,141 @@
1
+ """
2
+ OAuth 2.0 Device Authorization Grant (RFC 8628) client.
3
+
4
+ Provides agent-friendly authentication for the `ate` CLI.
5
+ The device flow allows headless/CLI clients to authenticate
6
+ by having the user authorize on a separate device with a browser.
7
+ """
8
+
9
+ import time
10
+ from dataclasses import dataclass
11
+
12
+ import requests
13
+
14
+
15
+ class DeviceFlowError(Exception):
16
+ """General device flow error."""
17
+ pass
18
+
19
+
20
+ class DeviceFlowTimeout(DeviceFlowError):
21
+ """Polling timed out waiting for user authorization."""
22
+ pass
23
+
24
+
25
+ class DeviceFlowDenied(DeviceFlowError):
26
+ """User denied the authorization request."""
27
+ pass
28
+
29
+
30
+ @dataclass
31
+ class DeviceCodeResponse:
32
+ """Response from the device authorization endpoint."""
33
+ device_code: str
34
+ user_code: str # e.g. "ABCD-1234" — human types this
35
+ verification_uri: str # e.g. "https://kindly.fyi/device"
36
+ expires_in: int # seconds (default 600)
37
+ interval: int # polling interval seconds (default 5)
38
+
39
+
40
+ @dataclass
41
+ class TokenResponse:
42
+ """Token response from the token endpoint."""
43
+ access_token: str
44
+ refresh_token: str
45
+ expires_in: int # seconds
46
+ token_type: str # "Bearer"
47
+
48
+
49
+ class DeviceFlowClient:
50
+ """OAuth 2.0 Device Authorization Grant (RFC 8628) client."""
51
+
52
+ def __init__(self, server_url: str = "https://kindly.fyi"):
53
+ self.server_url = server_url
54
+
55
+ def request_code(self) -> DeviceCodeResponse:
56
+ """POST /api/auth/device/code → DeviceCodeResponse.
57
+
58
+ Raises DeviceFlowError on HTTP or network errors.
59
+ """
60
+ try:
61
+ resp = requests.post(
62
+ f"{self.server_url}/api/auth/device/code",
63
+ json={"client_id": "ate-cli"},
64
+ timeout=10,
65
+ )
66
+ resp.raise_for_status()
67
+ data = resp.json()
68
+ return DeviceCodeResponse(
69
+ device_code=data["device_code"],
70
+ user_code=data["user_code"],
71
+ verification_uri=data["verification_uri"],
72
+ expires_in=data["expires_in"],
73
+ interval=data["interval"],
74
+ )
75
+ except Exception as e:
76
+ if isinstance(e, DeviceFlowError):
77
+ raise
78
+ raise DeviceFlowError(f"Failed to request device code: {e}") from e
79
+
80
+ def poll_for_token(
81
+ self,
82
+ device_code: str,
83
+ interval: int = 5,
84
+ expires_in: int = 600,
85
+ ) -> TokenResponse:
86
+ """Poll POST /api/auth/device/token until authorized or expired.
87
+
88
+ Returns TokenResponse on success.
89
+ Raises DeviceFlowTimeout if expires_in exceeded.
90
+ Raises DeviceFlowDenied if user denied.
91
+ """
92
+ current_interval = interval
93
+ start_time = time.time()
94
+
95
+ while True:
96
+ time.sleep(current_interval)
97
+
98
+ elapsed = time.time() - start_time
99
+ if elapsed > expires_in:
100
+ raise DeviceFlowTimeout(
101
+ f"Device authorization timed out after {expires_in}s"
102
+ )
103
+
104
+ resp = requests.post(
105
+ f"{self.server_url}/api/auth/device/token",
106
+ json={
107
+ "client_id": "ate-cli",
108
+ "device_code": device_code,
109
+ "grant_type": "urn:ietf:params:oauth:grant-type:device_code",
110
+ },
111
+ timeout=10,
112
+ )
113
+
114
+ if resp.status_code == 200:
115
+ data = resp.json()
116
+ return TokenResponse(
117
+ access_token=data["access_token"],
118
+ refresh_token=data["refresh_token"],
119
+ expires_in=data["expires_in"],
120
+ token_type=data["token_type"],
121
+ )
122
+
123
+ # Handle error responses (400-level)
124
+ try:
125
+ error_data = resp.json()
126
+ except Exception:
127
+ continue
128
+
129
+ error = error_data.get("error", "")
130
+
131
+ if error == "authorization_pending":
132
+ continue
133
+ elif error == "slow_down":
134
+ current_interval += 5
135
+ continue
136
+ elif error == "access_denied":
137
+ raise DeviceFlowDenied("User denied the authorization request")
138
+ elif error == "expired_token":
139
+ raise DeviceFlowTimeout("Device code expired")
140
+ else:
141
+ continue
@@ -0,0 +1,96 @@
1
+ """
2
+ Token persistence for OAuth 2.0 device flow credentials.
3
+
4
+ Stores tokens at ~/.ate/credentials.json with expiry tracking.
5
+ """
6
+
7
+ import json
8
+ import os
9
+ import time
10
+ from typing import Optional
11
+
12
+ from ate.auth.device_flow import TokenResponse
13
+
14
+
15
+ class TokenStore:
16
+ """Manages token persistence at ~/.ate/credentials.json."""
17
+
18
+ def __init__(self, path: str = None):
19
+ self.path = path or os.path.expanduser("~/.ate/credentials.json")
20
+
21
+ def save(self, token_response: TokenResponse) -> None:
22
+ """Save tokens to disk (creates parent dirs).
23
+
24
+ Records saved_at timestamp for expiry calculations.
25
+ """
26
+ parent = os.path.dirname(self.path)
27
+ if parent:
28
+ os.makedirs(parent, exist_ok=True)
29
+
30
+ data = {
31
+ "access_token": token_response.access_token,
32
+ "refresh_token": token_response.refresh_token,
33
+ "expires_in": token_response.expires_in,
34
+ "token_type": token_response.token_type,
35
+ "saved_at": time.time(),
36
+ }
37
+
38
+ with open(self.path, "w") as f:
39
+ json.dump(data, f, indent=2)
40
+
41
+ def load(self) -> Optional[TokenResponse]:
42
+ """Load tokens from disk. Returns None if no credentials or invalid."""
43
+ if not os.path.exists(self.path):
44
+ return None
45
+
46
+ try:
47
+ with open(self.path) as f:
48
+ data = json.load(f)
49
+ return TokenResponse(
50
+ access_token=data["access_token"],
51
+ refresh_token=data["refresh_token"],
52
+ expires_in=data["expires_in"],
53
+ token_type=data["token_type"],
54
+ )
55
+ except (json.JSONDecodeError, KeyError, TypeError):
56
+ return None
57
+
58
+ def clear(self) -> None:
59
+ """Delete stored credentials."""
60
+ try:
61
+ os.remove(self.path)
62
+ except FileNotFoundError:
63
+ pass
64
+
65
+ def _load_raw(self) -> Optional[dict]:
66
+ """Load raw JSON data including saved_at."""
67
+ if not os.path.exists(self.path):
68
+ return None
69
+ try:
70
+ with open(self.path) as f:
71
+ return json.load(f)
72
+ except (json.JSONDecodeError, TypeError):
73
+ return None
74
+
75
+ def _expires_at(self) -> Optional[float]:
76
+ """Calculate the absolute expiry time."""
77
+ data = self._load_raw()
78
+ if data is None:
79
+ return None
80
+ saved_at = data.get("saved_at", 0)
81
+ expires_in = data.get("expires_in", 0)
82
+ return saved_at + expires_in
83
+
84
+ def is_expired(self) -> bool:
85
+ """Check if the access token has expired."""
86
+ expires_at = self._expires_at()
87
+ if expires_at is None:
88
+ return True
89
+ return time.time() >= expires_at
90
+
91
+ def needs_refresh(self) -> bool:
92
+ """Check if token is within 5 minutes of expiry."""
93
+ expires_at = self._expires_at()
94
+ if expires_at is None:
95
+ return True
96
+ return time.time() >= (expires_at - 300)
@@ -50,6 +50,13 @@ from .common import (
50
50
  SearchAndRetrieve,
51
51
  )
52
52
 
53
+ from .approach import (
54
+ ApproachState,
55
+ ApproachConfig,
56
+ ApproachTarget,
57
+ VisualServoApproach,
58
+ )
59
+
53
60
  __all__ = [
54
61
  # Core tree nodes
55
62
  "BehaviorNode",
@@ -85,4 +92,9 @@ __all__ = [
85
92
  # Composite behaviors
86
93
  "PatrolAndCleanup",
87
94
  "SearchAndRetrieve",
95
+ # Approach behaviors (locomotion-agnostic)
96
+ "ApproachState",
97
+ "ApproachConfig",
98
+ "ApproachTarget",
99
+ "VisualServoApproach",
88
100
  ]
@@ -0,0 +1,399 @@
1
+ """
2
+ Approach behaviors for targeting detected objects.
3
+
4
+ These behaviors work with any locomotion interface (quadruped, wheeled, etc.)
5
+ rather than requiring NavigationInterface. They use:
6
+ - Visual servoing (centering target in camera view)
7
+ - Distance sensing (ultrasonic, visual estimation, depth camera)
8
+
9
+ This is how FoodforThought makes skills portable across different robots.
10
+ """
11
+
12
+ import time
13
+ from dataclasses import dataclass
14
+ from typing import Optional, Protocol, Union, Tuple
15
+ from enum import Enum, auto
16
+
17
+ from .tree import BehaviorNode, BehaviorStatus
18
+
19
+ from ..interfaces import (
20
+ QuadrupedLocomotion,
21
+ WheeledLocomotion,
22
+ BipedLocomotion,
23
+ LocomotionInterface,
24
+ CameraInterface,
25
+ Vector3,
26
+ ActionResult,
27
+ )
28
+ from ..interfaces.sensors import (
29
+ DistanceSensorInterface,
30
+ DistanceReading,
31
+ VisualDistanceEstimator,
32
+ )
33
+ from ..interfaces.detection import Detection, BoundingBox
34
+
35
+
36
+ class ApproachState(Enum):
37
+ """State machine for approach behavior."""
38
+ SEARCHING = auto() # Looking for target
39
+ CENTERING = auto() # Turning to center target
40
+ APPROACHING = auto() # Moving toward target
41
+ REACHED = auto() # Close enough to target
42
+ LOST = auto() # Lost sight of target
43
+
44
+
45
+ @dataclass
46
+ class ApproachConfig:
47
+ """Configuration for approach behavior."""
48
+ # Target distance to stop at (meters)
49
+ target_distance: float = 0.15
50
+
51
+ # Image centering tolerance (fraction of image width from center)
52
+ center_tolerance: float = 0.15
53
+
54
+ # Speed settings
55
+ approach_speed: float = 0.3
56
+ turn_speed: float = 0.4
57
+ slow_distance: float = 0.3 # Slow down when this close
58
+
59
+ # Timeout settings
60
+ max_approach_time: float = 30.0
61
+ lost_target_timeout: float = 3.0
62
+
63
+ # Visual estimation (when no hardware distance sensor)
64
+ use_visual_distance: bool = True
65
+ default_object_size: float = 0.10 # meters
66
+
67
+
68
+ class ApproachTarget(BehaviorNode):
69
+ """
70
+ Approach a detected target using visual servoing.
71
+
72
+ Works with ANY locomotion interface by using:
73
+ 1. Camera to track target position in image
74
+ 2. Distance sensor OR visual estimation for range
75
+ 3. Locomotion commands (walk/drive forward, turn)
76
+
77
+ Blackboard:
78
+ - Reads: "target_detection" or "target_bbox" or "target_position"
79
+ - Writes: "approach_state", "target_distance", "approach_complete"
80
+
81
+ Example:
82
+ # Works with quadruped
83
+ approach = ApproachTarget(
84
+ locomotion=mechdog,
85
+ camera=mechdog, # Same driver implements both
86
+ distance_sensor=mechdog, # Has ultrasonic
87
+ )
88
+
89
+ # Works with wheeled robot
90
+ approach = ApproachTarget(
91
+ locomotion=diff_drive,
92
+ camera=usb_camera,
93
+ distance_sensor=None, # Will use visual estimation
94
+ )
95
+ """
96
+
97
+ def __init__(
98
+ self,
99
+ locomotion: LocomotionInterface,
100
+ camera: Optional[CameraInterface] = None,
101
+ distance_sensor: Optional[DistanceSensorInterface] = None,
102
+ config: Optional[ApproachConfig] = None,
103
+ name: str = "",
104
+ ):
105
+ super().__init__(name or "ApproachTarget")
106
+ self.locomotion = locomotion
107
+ self.camera = camera
108
+ self.distance_sensor = distance_sensor
109
+ self.config = config or ApproachConfig()
110
+
111
+ # Visual distance estimator (fallback when no hardware sensor)
112
+ self._visual_estimator: Optional[VisualDistanceEstimator] = None
113
+ if camera and self.config.use_visual_distance:
114
+ res = camera.get_resolution()
115
+ if res[0] > 0:
116
+ self._visual_estimator = VisualDistanceEstimator(
117
+ image_width=res[0],
118
+ image_height=res[1],
119
+ )
120
+
121
+ # State
122
+ self._state = ApproachState.SEARCHING
123
+ self._start_time: Optional[float] = None
124
+ self._last_detection_time: Optional[float] = None
125
+ self._current_distance: float = float('inf')
126
+
127
+ def tick(self) -> BehaviorStatus:
128
+ """Execute one tick of the approach behavior."""
129
+ now = time.time()
130
+
131
+ # Initialize on first tick
132
+ if self._start_time is None:
133
+ self._start_time = now
134
+ self._state = ApproachState.SEARCHING
135
+
136
+ # Check timeout
137
+ if now - self._start_time > self.config.max_approach_time:
138
+ self._stop_locomotion()
139
+ return BehaviorStatus.FAILURE
140
+
141
+ # Get current target info from blackboard
142
+ target_info = self._get_target_info()
143
+
144
+ if target_info is None:
145
+ # No target info available
146
+ if self._last_detection_time is None:
147
+ return BehaviorStatus.FAILURE # Never saw target
148
+
149
+ # Check if we lost target recently
150
+ if now - self._last_detection_time > self.config.lost_target_timeout:
151
+ self._state = ApproachState.LOST
152
+ self._stop_locomotion()
153
+ return BehaviorStatus.FAILURE
154
+
155
+ # Keep approaching based on last known info
156
+ return BehaviorStatus.RUNNING
157
+
158
+ # Update last detection time
159
+ self._last_detection_time = now
160
+
161
+ bbox, object_type = target_info
162
+
163
+ # Get distance to target
164
+ distance = self._get_distance(bbox, object_type)
165
+ self._current_distance = distance
166
+
167
+ if self.blackboard:
168
+ self.blackboard.set("target_distance", distance)
169
+ self.blackboard.set("approach_state", self._state.name)
170
+
171
+ # Check if we've reached the target
172
+ if distance <= self.config.target_distance:
173
+ self._state = ApproachState.REACHED
174
+ self._stop_locomotion()
175
+ if self.blackboard:
176
+ self.blackboard.set("approach_complete", True)
177
+ return BehaviorStatus.SUCCESS
178
+
179
+ # Get target position in image (for centering)
180
+ center_offset = self._get_center_offset(bbox)
181
+
182
+ # State machine
183
+ if abs(center_offset) > self.config.center_tolerance:
184
+ # Need to turn to center target
185
+ self._state = ApproachState.CENTERING
186
+ self._turn_toward_target(center_offset)
187
+ return BehaviorStatus.RUNNING
188
+ else:
189
+ # Target is centered, approach
190
+ self._state = ApproachState.APPROACHING
191
+ self._move_toward_target(distance)
192
+ return BehaviorStatus.RUNNING
193
+
194
+ def _get_target_info(self) -> Optional[Tuple[BoundingBox, str]]:
195
+ """Get target bounding box and type from blackboard."""
196
+ if not self.blackboard:
197
+ return None
198
+
199
+ # Try detection first
200
+ detection = self.blackboard.get("target_detection")
201
+ if detection and isinstance(detection, Detection):
202
+ return (detection.bbox, detection.class_name)
203
+
204
+ # Try raw bbox
205
+ bbox = self.blackboard.get("target_bbox")
206
+ if bbox and isinstance(bbox, BoundingBox):
207
+ return (bbox, "unknown")
208
+
209
+ # Try detection list (get first)
210
+ detections = self.blackboard.get("detections")
211
+ if detections and len(detections) > 0:
212
+ det = detections[0]
213
+ if isinstance(det, Detection):
214
+ return (det.bbox, det.class_name)
215
+
216
+ return None
217
+
218
+ def _get_center_offset(self, bbox: BoundingBox) -> float:
219
+ """
220
+ Get offset from image center as fraction of image width.
221
+
222
+ Returns:
223
+ Negative = target is left of center
224
+ Positive = target is right of center
225
+ 0 = centered
226
+ """
227
+ if not self.camera:
228
+ return 0.0
229
+
230
+ res = self.camera.get_resolution()
231
+ if res[0] == 0:
232
+ return 0.0
233
+
234
+ image_width = res[0]
235
+ image_center_x = image_width / 2
236
+
237
+ # Get bbox center
238
+ bbox_center_x = bbox.x + bbox.width / 2
239
+
240
+ # Calculate offset as fraction of image width
241
+ offset = (bbox_center_x - image_center_x) / image_width
242
+
243
+ return offset
244
+
245
+ def _get_distance(self, bbox: BoundingBox, object_type: str) -> float:
246
+ """
247
+ Get distance to target using best available method.
248
+
249
+ Priority:
250
+ 1. Hardware distance sensor (ultrasonic, lidar)
251
+ 2. Visual estimation from bbox size
252
+ 3. Default large distance
253
+ """
254
+ # Try hardware sensor first
255
+ if self.distance_sensor:
256
+ reading = self.distance_sensor.get_distance()
257
+ if reading.valid:
258
+ return reading.distance
259
+
260
+ # Fall back to visual estimation
261
+ if self._visual_estimator and bbox.width > 0:
262
+ reading = self._visual_estimator.estimate_from_detection(
263
+ bbox_width=int(bbox.width),
264
+ object_type=object_type,
265
+ known_size=self.config.default_object_size,
266
+ )
267
+ if reading.valid:
268
+ return reading.distance
269
+
270
+ # Default: assume we need to approach
271
+ return 1.0
272
+
273
+ def _turn_toward_target(self, offset: float) -> None:
274
+ """Turn to center the target in view."""
275
+ # Positive offset = target is right, turn right (negative angular)
276
+ # Negative offset = target is left, turn left (positive angular)
277
+
278
+ # Scale turn based on offset magnitude
279
+ turn_rate = -offset * self.config.turn_speed * 2
280
+
281
+ # Use appropriate locomotion method
282
+ if hasattr(self.locomotion, 'turn_continuous'):
283
+ self.locomotion.turn_continuous(turn_rate)
284
+ elif hasattr(self.locomotion, 'walk'):
285
+ # For quadrupeds, walk in place while turning
286
+ direction = Vector3(0, -offset, 0) # y component controls turn
287
+ self.locomotion.walk(direction, speed=0.1)
288
+ elif hasattr(self.locomotion, 'drive'):
289
+ # For wheeled, differential drive
290
+ self.locomotion.drive(0, turn_rate)
291
+
292
+ def _move_toward_target(self, distance: float) -> None:
293
+ """Move toward the target."""
294
+ # Calculate speed based on distance
295
+ speed = self.config.approach_speed
296
+ if distance < self.config.slow_distance:
297
+ # Slow down as we get close
298
+ speed *= distance / self.config.slow_distance
299
+ speed = max(speed, 0.1) # Minimum speed
300
+
301
+ # Use appropriate locomotion method
302
+ if hasattr(self.locomotion, 'walk'):
303
+ self.locomotion.walk(Vector3.forward(), speed=speed)
304
+ elif hasattr(self.locomotion, 'drive'):
305
+ self.locomotion.drive(speed, 0)
306
+
307
+ def _stop_locomotion(self) -> None:
308
+ """Stop all locomotion."""
309
+ if hasattr(self.locomotion, 'stop'):
310
+ self.locomotion.stop()
311
+
312
+ def reset(self) -> None:
313
+ """Reset behavior state."""
314
+ super().reset()
315
+ self._state = ApproachState.SEARCHING
316
+ self._start_time = None
317
+ self._last_detection_time = None
318
+ self._current_distance = float('inf')
319
+
320
+
321
+ class VisualServoApproach(BehaviorNode):
322
+ """
323
+ Simpler approach using only visual feedback.
324
+
325
+ Approaches until the bounding box reaches a target size in the image.
326
+ No distance sensor required - uses bbox size as proxy for distance.
327
+
328
+ Blackboard:
329
+ - Reads: "target_detection" or "target_bbox"
330
+ - Writes: "approach_complete"
331
+ """
332
+
333
+ def __init__(
334
+ self,
335
+ locomotion: LocomotionInterface,
336
+ target_bbox_height_fraction: float = 0.6, # Target height as fraction of image
337
+ center_tolerance: float = 0.1,
338
+ approach_speed: float = 0.3,
339
+ image_height: int = 480,
340
+ name: str = "",
341
+ ):
342
+ super().__init__(name or "VisualServoApproach")
343
+ self.locomotion = locomotion
344
+ self.target_fraction = target_bbox_height_fraction
345
+ self.center_tolerance = center_tolerance
346
+ self.approach_speed = approach_speed
347
+ self.image_height = image_height
348
+
349
+ def tick(self) -> BehaviorStatus:
350
+ """Execute visual servo approach."""
351
+ if not self.blackboard:
352
+ return BehaviorStatus.FAILURE
353
+
354
+ # Get target bbox
355
+ detection = self.blackboard.get("target_detection")
356
+ bbox = None
357
+ if detection and isinstance(detection, Detection):
358
+ bbox = detection.bbox
359
+ else:
360
+ bbox = self.blackboard.get("target_bbox")
361
+
362
+ if not bbox:
363
+ # No target visible
364
+ if hasattr(self.locomotion, 'stop'):
365
+ self.locomotion.stop()
366
+ return BehaviorStatus.FAILURE
367
+
368
+ # Check if bbox is large enough (close enough)
369
+ height_fraction = bbox.height / self.image_height
370
+ if height_fraction >= self.target_fraction:
371
+ if hasattr(self.locomotion, 'stop'):
372
+ self.locomotion.stop()
373
+ if self.blackboard:
374
+ self.blackboard.set("approach_complete", True)
375
+ return BehaviorStatus.SUCCESS
376
+
377
+ # Calculate center offset
378
+ image_center_x = self.image_height * (4/3) / 2 # Assume 4:3 aspect
379
+ bbox_center_x = bbox.x + bbox.width / 2
380
+ offset = (bbox_center_x - image_center_x) / image_center_x
381
+
382
+ # Turn or approach
383
+ if abs(offset) > self.center_tolerance:
384
+ # Turn toward target
385
+ turn_rate = -offset * 0.5
386
+ if hasattr(self.locomotion, 'turn_continuous'):
387
+ self.locomotion.turn_continuous(turn_rate)
388
+ elif hasattr(self.locomotion, 'walk'):
389
+ self.locomotion.walk(Vector3(0, -offset, 0), speed=0.1)
390
+ else:
391
+ # Approach
392
+ speed = self.approach_speed * (1 - height_fraction / self.target_fraction)
393
+ speed = max(speed, 0.1)
394
+ if hasattr(self.locomotion, 'walk'):
395
+ self.locomotion.walk(Vector3.forward(), speed=speed)
396
+ elif hasattr(self.locomotion, 'drive'):
397
+ self.locomotion.drive(speed, 0)
398
+
399
+ return BehaviorStatus.RUNNING