foodforthought-cli 0.2.8__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +12 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/cli.py +855 -4551
  9. ate/client.py +90 -0
  10. ate/commands/__init__.py +168 -0
  11. ate/commands/auth.py +389 -0
  12. ate/commands/bridge.py +448 -0
  13. ate/commands/data.py +185 -0
  14. ate/commands/deps.py +111 -0
  15. ate/commands/generate.py +384 -0
  16. ate/commands/memory.py +907 -0
  17. ate/commands/parts.py +166 -0
  18. ate/commands/primitive.py +399 -0
  19. ate/commands/protocol.py +288 -0
  20. ate/commands/recording.py +524 -0
  21. ate/commands/repo.py +154 -0
  22. ate/commands/simulation.py +291 -0
  23. ate/commands/skill.py +303 -0
  24. ate/commands/skills.py +487 -0
  25. ate/commands/team.py +147 -0
  26. ate/commands/workflow.py +271 -0
  27. ate/detection/__init__.py +38 -0
  28. ate/detection/base.py +142 -0
  29. ate/detection/color_detector.py +402 -0
  30. ate/detection/trash_detector.py +322 -0
  31. ate/drivers/__init__.py +18 -6
  32. ate/drivers/ble_transport.py +405 -0
  33. ate/drivers/mechdog.py +360 -24
  34. ate/drivers/wifi_camera.py +477 -0
  35. ate/interfaces/__init__.py +16 -0
  36. ate/interfaces/base.py +2 -0
  37. ate/interfaces/sensors.py +247 -0
  38. ate/llm_proxy.py +239 -0
  39. ate/memory/__init__.py +35 -0
  40. ate/memory/cloud.py +244 -0
  41. ate/memory/context.py +269 -0
  42. ate/memory/embeddings.py +184 -0
  43. ate/memory/export.py +26 -0
  44. ate/memory/merge.py +146 -0
  45. ate/memory/migrate/__init__.py +34 -0
  46. ate/memory/migrate/base.py +89 -0
  47. ate/memory/migrate/pipeline.py +189 -0
  48. ate/memory/migrate/sources/__init__.py +13 -0
  49. ate/memory/migrate/sources/chroma.py +170 -0
  50. ate/memory/migrate/sources/pinecone.py +120 -0
  51. ate/memory/migrate/sources/qdrant.py +110 -0
  52. ate/memory/migrate/sources/weaviate.py +160 -0
  53. ate/memory/reranker.py +353 -0
  54. ate/memory/search.py +26 -0
  55. ate/memory/store.py +548 -0
  56. ate/recording/__init__.py +42 -3
  57. ate/recording/session.py +12 -2
  58. ate/recording/visual.py +416 -0
  59. ate/robot/__init__.py +142 -0
  60. ate/robot/agentic_servo.py +856 -0
  61. ate/robot/behaviors.py +493 -0
  62. ate/robot/ble_capture.py +1000 -0
  63. ate/robot/ble_enumerate.py +506 -0
  64. ate/robot/calibration.py +88 -3
  65. ate/robot/calibration_state.py +388 -0
  66. ate/robot/commands.py +143 -11
  67. ate/robot/direction_calibration.py +554 -0
  68. ate/robot/discovery.py +104 -2
  69. ate/robot/llm_system_id.py +654 -0
  70. ate/robot/locomotion_calibration.py +508 -0
  71. ate/robot/marker_generator.py +611 -0
  72. ate/robot/perception.py +502 -0
  73. ate/robot/primitives.py +614 -0
  74. ate/robot/profiles.py +6 -0
  75. ate/robot/registry.py +5 -2
  76. ate/robot/servo_mapper.py +1153 -0
  77. ate/robot/skill_upload.py +285 -3
  78. ate/robot/target_calibration.py +500 -0
  79. ate/robot/teach.py +515 -0
  80. ate/robot/types.py +242 -0
  81. ate/robot/visual_labeler.py +9 -0
  82. ate/robot/visual_servo_loop.py +494 -0
  83. ate/robot/visual_servoing.py +570 -0
  84. ate/robot/visual_system_id.py +906 -0
  85. ate/transports/__init__.py +121 -0
  86. ate/transports/base.py +394 -0
  87. ate/transports/ble.py +405 -0
  88. ate/transports/hybrid.py +444 -0
  89. ate/transports/serial.py +345 -0
  90. ate/urdf/__init__.py +30 -0
  91. ate/urdf/capture.py +582 -0
  92. ate/urdf/cloud.py +491 -0
  93. ate/urdf/collision.py +271 -0
  94. ate/urdf/commands.py +708 -0
  95. ate/urdf/depth.py +360 -0
  96. ate/urdf/inertial.py +312 -0
  97. ate/urdf/kinematics.py +330 -0
  98. ate/urdf/lifting.py +415 -0
  99. ate/urdf/meshing.py +300 -0
  100. ate/urdf/models/__init__.py +110 -0
  101. ate/urdf/models/depth_anything.py +253 -0
  102. ate/urdf/models/sam2.py +324 -0
  103. ate/urdf/motion_analysis.py +396 -0
  104. ate/urdf/pipeline.py +468 -0
  105. ate/urdf/scale.py +256 -0
  106. ate/urdf/scan_session.py +411 -0
  107. ate/urdf/segmentation.py +299 -0
  108. ate/urdf/synthesis.py +319 -0
  109. ate/urdf/topology.py +336 -0
  110. ate/urdf/validation.py +371 -0
  111. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/METADATA +1 -1
  112. foodforthought_cli-0.3.1.dist-info/RECORD +166 -0
  113. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/WHEEL +1 -1
  114. foodforthought_cli-0.2.8.dist-info/RECORD +0 -73
  115. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/entry_points.txt +0 -0
  116. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,120 @@
1
+ """Pinecone migration source implementation."""
2
+ from typing import Optional, List, Tuple
3
+
4
+ try:
5
+ import pinecone
6
+ except ImportError:
7
+ pinecone = None
8
+
9
+ from ..base import MigrationSource, VectorRecord, MigrationEstimate
10
+
11
+
12
+ class PineconeMigrationSource(MigrationSource):
13
+ """Migration source for Pinecone vector database."""
14
+
15
+ def __init__(self, api_key: str, index_name: str, environment: str, namespace: Optional[str] = None):
16
+ """Initialize Pinecone migration source.
17
+
18
+ Args:
19
+ api_key: Pinecone API key
20
+ index_name: Name of the Pinecone index
21
+ environment: Pinecone environment
22
+ namespace: Optional namespace to migrate from
23
+ """
24
+ self.api_key = api_key
25
+ self.index_name = index_name
26
+ self.environment = environment
27
+ self.namespace = namespace
28
+ self.index = None
29
+
30
+ @property
31
+ def source_type(self) -> str:
32
+ """Return the source type."""
33
+ return "pinecone"
34
+
35
+ @property
36
+ def source_name(self) -> str:
37
+ """Return the source name."""
38
+ return self.index_name
39
+
40
+ def connect(self) -> None:
41
+ """Connect to Pinecone."""
42
+ if pinecone is None:
43
+ raise ImportError("pinecone library is required for Pinecone migration")
44
+
45
+ pinecone.init(api_key=self.api_key, environment=self.environment)
46
+ self.index = pinecone.Index(self.index_name)
47
+
48
+ def estimate(self) -> MigrationEstimate:
49
+ """Estimate migration size and time."""
50
+ if not self.index:
51
+ raise RuntimeError("Must call connect() first")
52
+
53
+ stats = self.index.describe_index_stats()
54
+
55
+ # If namespace is specified, use namespace count, otherwise use total
56
+ if self.namespace and 'namespaces' in stats and self.namespace in stats['namespaces']:
57
+ total_vectors = stats['namespaces'][self.namespace]['vector_count']
58
+ else:
59
+ total_vectors = stats.get('total_vector_count', 0)
60
+
61
+ dimensions = stats.get('dimension', 768)
62
+
63
+ # Rough estimates
64
+ bytes_per_vector = dimensions * 4 + 1024 # 4 bytes per float + metadata overhead
65
+ estimated_mv2_bytes = total_vectors * bytes_per_vector
66
+ estimated_seconds = total_vectors / 1000.0 # Rough estimate of 1000 vectors/second
67
+
68
+ return MigrationEstimate(
69
+ total_vectors=total_vectors,
70
+ dimensions=dimensions,
71
+ estimated_mv2_bytes=estimated_mv2_bytes,
72
+ estimated_seconds=estimated_seconds
73
+ )
74
+
75
+ def fetch_batch(self, batch_size: int = 10000, cursor: Optional[str] = None) -> Tuple[List[VectorRecord], Optional[str]]:
76
+ """Fetch a batch of records from Pinecone."""
77
+ if not self.index:
78
+ raise RuntimeError("Must call connect() first")
79
+
80
+ # List vector IDs with pagination
81
+ list_response = self.index.list(
82
+ namespace=self.namespace,
83
+ limit=batch_size,
84
+ pagination_token=cursor
85
+ )
86
+
87
+ vector_ids = [vec['id'] for vec in list_response.get('vectors', [])]
88
+
89
+ if not vector_ids:
90
+ return [], None
91
+
92
+ # Fetch the actual vectors
93
+ fetch_response = self.index.fetch(vector_ids, namespace=self.namespace)
94
+
95
+ records = []
96
+ for vector_id, vector_data in fetch_response.get('vectors', {}).items():
97
+ # Extract text from metadata if present
98
+ metadata = vector_data.get('metadata', {})
99
+ text = metadata.get('text')
100
+
101
+ # Create clean metadata without text (since text has its own field)
102
+ clean_metadata = {k: v for k, v in metadata.items() if k != 'text'}
103
+
104
+ record = VectorRecord(
105
+ id=vector_data['id'],
106
+ vector=vector_data['values'],
107
+ text=text,
108
+ metadata=clean_metadata
109
+ )
110
+ records.append(record)
111
+
112
+ # Get next cursor from pagination
113
+ next_cursor = list_response.get('pagination', {}).get('next')
114
+
115
+ return records, next_cursor
116
+
117
+ def close(self) -> None:
118
+ """Close the connection and clean up resources."""
119
+ # Pinecone doesn't require explicit cleanup
120
+ self.index = None
@@ -0,0 +1,110 @@
1
+ """Qdrant migration source implementation."""
2
+ from typing import Optional, List, Tuple
3
+
4
+ try:
5
+ from qdrant_client import QdrantClient
6
+ except ImportError:
7
+ QdrantClient = None
8
+
9
+ from ..base import MigrationSource, VectorRecord, MigrationEstimate
10
+
11
+
12
+ class QdrantMigrationSource(MigrationSource):
13
+ """Migration source for Qdrant vector database."""
14
+
15
+ def __init__(self, url: str, collection_name: str, api_key: Optional[str] = None):
16
+ """Initialize Qdrant migration source.
17
+
18
+ Args:
19
+ url: Qdrant server URL
20
+ collection_name: Name of the collection to migrate from
21
+ api_key: Optional API key for authentication
22
+ """
23
+ self.url = url
24
+ self.collection_name = collection_name
25
+ self.api_key = api_key
26
+ self.client = None
27
+
28
+ @property
29
+ def source_type(self) -> str:
30
+ """Return the source type."""
31
+ return "qdrant"
32
+
33
+ @property
34
+ def source_name(self) -> str:
35
+ """Return the source name."""
36
+ return self.collection_name
37
+
38
+ def connect(self) -> None:
39
+ """Connect to Qdrant."""
40
+ if QdrantClient is None:
41
+ raise ImportError("qdrant-client library is required for Qdrant migration")
42
+
43
+ self.client = QdrantClient(url=self.url, api_key=self.api_key)
44
+
45
+ def estimate(self) -> MigrationEstimate:
46
+ """Estimate migration size and time."""
47
+ if not self.client:
48
+ raise RuntimeError("Must call connect() first")
49
+
50
+ collection_info = self.client.get_collection(self.collection_name)
51
+
52
+ total_vectors = collection_info.points_count
53
+ dimensions = collection_info.config.params.vectors.size
54
+
55
+ # Rough estimates
56
+ bytes_per_vector = dimensions * 4 + 1024 # 4 bytes per float + metadata overhead
57
+ estimated_mv2_bytes = total_vectors * bytes_per_vector
58
+ estimated_seconds = total_vectors / 1000.0 # Rough estimate of 1000 vectors/second
59
+
60
+ return MigrationEstimate(
61
+ total_vectors=total_vectors,
62
+ dimensions=dimensions,
63
+ estimated_mv2_bytes=estimated_mv2_bytes,
64
+ estimated_seconds=estimated_seconds
65
+ )
66
+
67
+ def fetch_batch(self, batch_size: int = 10000, cursor: Optional[str] = None) -> Tuple[List[VectorRecord], Optional[str]]:
68
+ """Fetch a batch of records from Qdrant."""
69
+ if not self.client:
70
+ raise RuntimeError("Must call connect() first")
71
+
72
+ # Use cursor directly as offset (Qdrant handles different offset types)
73
+ offset = cursor
74
+
75
+ # Scroll through points
76
+ points, next_page_offset = self.client.scroll(
77
+ collection_name=self.collection_name,
78
+ limit=batch_size,
79
+ offset=offset,
80
+ with_payload=True,
81
+ with_vectors=True
82
+ )
83
+
84
+ records = []
85
+ for point in points:
86
+ # Extract text from payload if present
87
+ payload = point.payload or {}
88
+ text = payload.get('text')
89
+
90
+ # Create clean metadata without text
91
+ clean_metadata = {k: v for k, v in payload.items() if k != 'text'}
92
+
93
+ record = VectorRecord(
94
+ id=str(point.id),
95
+ vector=point.vector,
96
+ text=text,
97
+ metadata=clean_metadata
98
+ )
99
+ records.append(record)
100
+
101
+ # Convert next_page_offset to cursor string
102
+ next_cursor = str(next_page_offset) if next_page_offset is not None else None
103
+
104
+ return records, next_cursor
105
+
106
+ def close(self) -> None:
107
+ """Close the connection and clean up resources."""
108
+ if self.client:
109
+ self.client.close()
110
+ self.client = None
@@ -0,0 +1,160 @@
1
+ """Weaviate migration source implementation."""
2
+ from typing import Optional, List, Tuple, Dict, Any
3
+
4
+ try:
5
+ import weaviate
6
+ WEAVIATE_AVAILABLE = True
7
+ except ImportError:
8
+ # Create a simple mock structure for testing
9
+ class WeaviateMock:
10
+ class Client:
11
+ pass
12
+ class auth:
13
+ class AuthApiKey:
14
+ def __init__(self, **kwargs):
15
+ pass
16
+
17
+ weaviate = WeaviateMock()
18
+ WEAVIATE_AVAILABLE = False
19
+
20
+ from ..base import MigrationSource, VectorRecord, MigrationEstimate
21
+
22
+
23
+ class WeaviateMigrationSource(MigrationSource):
24
+ """Migration source for Weaviate vector database."""
25
+
26
+ def __init__(self, host: str, class_name: str, api_key: Optional[str] = None, auth_config: Optional[Dict[str, Any]] = None):
27
+ """Initialize Weaviate migration source.
28
+
29
+ Args:
30
+ host: Weaviate server host URL
31
+ class_name: Name of the class to migrate from
32
+ api_key: Optional API key for authentication
33
+ auth_config: Optional authentication configuration
34
+ """
35
+ self.host = host
36
+ self.class_name = class_name
37
+ self.api_key = api_key
38
+ self.auth_config = auth_config or {}
39
+ self.client = None
40
+
41
+ @property
42
+ def source_type(self) -> str:
43
+ """Return the source type."""
44
+ return "weaviate"
45
+
46
+ @property
47
+ def source_name(self) -> str:
48
+ """Return the source name."""
49
+ return self.class_name
50
+
51
+ def connect(self) -> None:
52
+ """Connect to Weaviate."""
53
+ if weaviate.Client is None:
54
+ raise ImportError("weaviate-client library is required for Weaviate migration")
55
+
56
+ # Build client configuration
57
+ if self.api_key:
58
+ auth = weaviate.auth.AuthApiKey(api_key=self.api_key)
59
+ self.client = weaviate.Client(url=self.host, auth_client_secret=auth)
60
+ else:
61
+ self.client = weaviate.Client(url=self.host)
62
+
63
+ def estimate(self) -> MigrationEstimate:
64
+ """Estimate migration size and time."""
65
+ if not self.client:
66
+ raise RuntimeError("Must call connect() first")
67
+
68
+ # Get aggregate count using fluent API
69
+ result = (self.client.query
70
+ .aggregate(self.class_name)
71
+ .with_meta_count()
72
+ .do())
73
+
74
+ aggregate_data = result.get('data', {}).get('Aggregate', {}).get(self.class_name, [])
75
+
76
+ if aggregate_data:
77
+ total_vectors = aggregate_data[0].get('meta', {}).get('count', 0)
78
+ else:
79
+ total_vectors = 0
80
+
81
+ # Get class schema to determine vector dimensions
82
+ schema = self.client.schema.get()
83
+ dimensions = 768 # Default assumption
84
+
85
+ for class_def in schema.get('classes', []):
86
+ if class_def['class'] == self.class_name:
87
+ # Look for vectorizer configuration
88
+ if 'vectorizer' in class_def:
89
+ # This is a simplified assumption; real implementation would inspect vectorizer config
90
+ dimensions = 1536 if 'openai' in class_def['vectorizer'].lower() else 768
91
+ break
92
+
93
+ # Rough estimates
94
+ bytes_per_vector = dimensions * 4 + 2048 # 4 bytes per float + metadata overhead
95
+ estimated_mv2_bytes = total_vectors * bytes_per_vector
96
+ estimated_seconds = total_vectors / 800.0 # Rough estimate of 800 vectors/second
97
+
98
+ return MigrationEstimate(
99
+ total_vectors=total_vectors,
100
+ dimensions=dimensions,
101
+ estimated_mv2_bytes=estimated_mv2_bytes,
102
+ estimated_seconds=estimated_seconds
103
+ )
104
+
105
+ def fetch_batch(self, batch_size: int = 10000, cursor: Optional[str] = None) -> Tuple[List[VectorRecord], Optional[str]]:
106
+ """Fetch a batch of records from Weaviate."""
107
+ if not self.client:
108
+ raise RuntimeError("Must call connect() first")
109
+
110
+ # Parse cursor as offset if provided
111
+ offset = 0
112
+ if cursor:
113
+ try:
114
+ offset = int(cursor)
115
+ except (ValueError, TypeError):
116
+ offset = 0
117
+
118
+ # Query with additional vector data
119
+ result = (self.client.query
120
+ .get(self.class_name)
121
+ .with_additional(['id', 'vector'])
122
+ .with_limit(batch_size)
123
+ .with_offset(offset)
124
+ .do())
125
+
126
+ if 'errors' in result:
127
+ raise Exception(f"Weaviate query error: {result['errors']}")
128
+
129
+ objects = result.get('data', {}).get('Get', {}).get(self.class_name, [])
130
+
131
+ records = []
132
+ for obj in objects:
133
+ additional = obj.get('_additional', {})
134
+ obj_id = additional.get('id', 'unknown')
135
+ vector = additional.get('vector', [])
136
+
137
+ # Extract text and metadata
138
+ properties = {k: v for k, v in obj.items() if not k.startswith('_')}
139
+ text = properties.get('text') or properties.get('content')
140
+
141
+ # Create clean metadata without text
142
+ clean_metadata = {k: v for k, v in properties.items() if k not in ('text', 'content')}
143
+
144
+ record = VectorRecord(
145
+ id=obj_id,
146
+ vector=vector,
147
+ text=text,
148
+ metadata=clean_metadata
149
+ )
150
+ records.append(record)
151
+
152
+ # Determine if there are more results
153
+ next_cursor = str(offset + batch_size) if len(objects) == batch_size else None
154
+
155
+ return records, next_cursor
156
+
157
+ def close(self) -> None:
158
+ """Close the connection and clean up resources."""
159
+ # Weaviate client doesn't require explicit cleanup
160
+ self.client = None