sf-vector-sdk 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sf_vector_sdk-0.2.0.dist-info/METADATA +476 -0
- sf_vector_sdk-0.2.0.dist-info/RECORD +27 -0
- sf_vector_sdk-0.2.0.dist-info/WHEEL +4 -0
- vector_sdk/__init__.py +262 -0
- vector_sdk/client.py +538 -0
- vector_sdk/content_types.py +233 -0
- vector_sdk/generated/embedding_pipeline/content_types/v1/content_types_pb2.py +57 -0
- vector_sdk/generated/embedding_pipeline/content_types/v1/content_types_pb2.pyi +141 -0
- vector_sdk/generated/embedding_pipeline/db/vectors/v1/vectors_pb2.py +58 -0
- vector_sdk/generated/embedding_pipeline/db/vectors/v1/vectors_pb2.pyi +145 -0
- vector_sdk/generated/embedding_pipeline/query/v1/query_pb2.py +58 -0
- vector_sdk/generated/embedding_pipeline/query/v1/query_pb2.pyi +109 -0
- vector_sdk/generated/embedding_pipeline/tools/v1/tools_pb2.py +39 -0
- vector_sdk/generated/embedding_pipeline/tools/v1/tools_pb2.pyi +31 -0
- vector_sdk/hash/__init__.py +31 -0
- vector_sdk/hash/hasher.py +259 -0
- vector_sdk/hash/types.py +67 -0
- vector_sdk/namespaces/__init__.py +13 -0
- vector_sdk/namespaces/base.py +45 -0
- vector_sdk/namespaces/db.py +230 -0
- vector_sdk/namespaces/embeddings.py +268 -0
- vector_sdk/namespaces/search.py +258 -0
- vector_sdk/structured/__init__.py +60 -0
- vector_sdk/structured/router.py +190 -0
- vector_sdk/structured/structured_embeddings.py +431 -0
- vector_sdk/structured/tool_config.py +254 -0
- vector_sdk/types.py +864 -0
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Database namespace for direct database operations (no embedding required).
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any, Optional
|
|
6
|
+
|
|
7
|
+
import requests
|
|
8
|
+
|
|
9
|
+
from vector_sdk.namespaces.base import BaseNamespace
|
|
10
|
+
from vector_sdk.types import (
|
|
11
|
+
CloneResult,
|
|
12
|
+
DeleteFromNamespaceResult,
|
|
13
|
+
LookupResult,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class DBNamespace(BaseNamespace):
|
|
18
|
+
"""
|
|
19
|
+
Namespace for direct database operations.
|
|
20
|
+
|
|
21
|
+
These operations call the query-gateway HTTP API directly, bypassing
|
|
22
|
+
the Redis Streams queue. They do not require embedding the query.
|
|
23
|
+
|
|
24
|
+
Example:
|
|
25
|
+
```python
|
|
26
|
+
client = VectorClient(
|
|
27
|
+
redis_url="redis://localhost:6379",
|
|
28
|
+
http_url="http://localhost:8080",
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# Lookup documents by ID
|
|
32
|
+
result = client.db.get_by_ids(
|
|
33
|
+
ids=["doc1", "doc2"],
|
|
34
|
+
database="turbopuffer",
|
|
35
|
+
namespace="my_namespace",
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# Find by metadata
|
|
39
|
+
result = client.db.find_by_metadata(
|
|
40
|
+
filters={"userId": "user123"},
|
|
41
|
+
database="mongodb",
|
|
42
|
+
collection="vectors",
|
|
43
|
+
database_name="mydb",
|
|
44
|
+
)
|
|
45
|
+
```
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def get_by_ids(
|
|
49
|
+
self,
|
|
50
|
+
ids: list[str],
|
|
51
|
+
database: str,
|
|
52
|
+
namespace: Optional[str] = None,
|
|
53
|
+
collection: Optional[str] = None,
|
|
54
|
+
database_name: Optional[str] = None,
|
|
55
|
+
include_vectors: bool = False,
|
|
56
|
+
include_metadata: bool = True,
|
|
57
|
+
) -> LookupResult:
|
|
58
|
+
"""
|
|
59
|
+
Look up documents by their IDs.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
ids: List of document/vector IDs to retrieve
|
|
63
|
+
database: Which vector database to query ("mongodb", "turbopuffer", "pinecone")
|
|
64
|
+
namespace: Namespace for TurboPuffer/Pinecone
|
|
65
|
+
collection: Collection name for MongoDB (also used as index name for Pinecone)
|
|
66
|
+
database_name: Database name for MongoDB
|
|
67
|
+
include_vectors: Whether to include vector values in response
|
|
68
|
+
include_metadata: Whether to include metadata in response (default: True)
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
LookupResult containing retrieved documents
|
|
72
|
+
|
|
73
|
+
Raises:
|
|
74
|
+
ValueError: If http_url is not configured or ids is empty
|
|
75
|
+
requests.HTTPError: If the request fails
|
|
76
|
+
"""
|
|
77
|
+
http_url = self._require_http_url("get_by_ids")
|
|
78
|
+
|
|
79
|
+
if not ids:
|
|
80
|
+
raise ValueError("ids list cannot be empty")
|
|
81
|
+
|
|
82
|
+
if len(ids) > 100:
|
|
83
|
+
raise ValueError("Maximum 100 IDs per request")
|
|
84
|
+
|
|
85
|
+
url = f"{http_url}/v1/lookup/{database}"
|
|
86
|
+
body = {
|
|
87
|
+
"ids": ids,
|
|
88
|
+
"namespace": namespace,
|
|
89
|
+
"collection": collection,
|
|
90
|
+
"database": database_name,
|
|
91
|
+
"includeVectors": include_vectors,
|
|
92
|
+
"includeMetadata": include_metadata,
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
response = requests.post(url, json=body, timeout=30)
|
|
96
|
+
response.raise_for_status()
|
|
97
|
+
|
|
98
|
+
return LookupResult.from_dict(response.json())
|
|
99
|
+
|
|
100
|
+
def find_by_metadata(
|
|
101
|
+
self,
|
|
102
|
+
filters: dict[str, Any],
|
|
103
|
+
database: str,
|
|
104
|
+
namespace: Optional[str] = None,
|
|
105
|
+
collection: Optional[str] = None,
|
|
106
|
+
database_name: Optional[str] = None,
|
|
107
|
+
limit: int = 100,
|
|
108
|
+
include_vectors: bool = False,
|
|
109
|
+
) -> LookupResult:
|
|
110
|
+
"""
|
|
111
|
+
Search for documents by metadata filters.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
filters: Metadata key-value pairs to match
|
|
115
|
+
database: Which vector database to query ("mongodb", "turbopuffer", "pinecone")
|
|
116
|
+
namespace: Namespace for TurboPuffer/Pinecone
|
|
117
|
+
collection: Collection name for MongoDB (also used as index name for Pinecone)
|
|
118
|
+
database_name: Database name for MongoDB
|
|
119
|
+
limit: Maximum number of results (default: 100, max: 1000)
|
|
120
|
+
include_vectors: Whether to include vector values in response
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
LookupResult containing matched documents
|
|
124
|
+
|
|
125
|
+
Raises:
|
|
126
|
+
ValueError: If http_url is not configured or filters is empty
|
|
127
|
+
requests.HTTPError: If the request fails
|
|
128
|
+
"""
|
|
129
|
+
http_url = self._require_http_url("find_by_metadata")
|
|
130
|
+
|
|
131
|
+
if not filters:
|
|
132
|
+
raise ValueError("filters dict cannot be empty")
|
|
133
|
+
|
|
134
|
+
url = f"{http_url}/v1/search/{database}"
|
|
135
|
+
body = {
|
|
136
|
+
"filters": filters,
|
|
137
|
+
"namespace": namespace,
|
|
138
|
+
"collection": collection,
|
|
139
|
+
"database": database_name,
|
|
140
|
+
"limit": min(limit, 1000),
|
|
141
|
+
"includeVectors": include_vectors,
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
response = requests.post(url, json=body, timeout=30)
|
|
145
|
+
response.raise_for_status()
|
|
146
|
+
|
|
147
|
+
return LookupResult.from_dict(response.json())
|
|
148
|
+
|
|
149
|
+
def clone(
|
|
150
|
+
self,
|
|
151
|
+
id: str,
|
|
152
|
+
source_namespace: str,
|
|
153
|
+
destination_namespace: str,
|
|
154
|
+
) -> CloneResult:
|
|
155
|
+
"""
|
|
156
|
+
Clone a document from one TurboPuffer namespace to another.
|
|
157
|
+
|
|
158
|
+
This method fetches a document by ID from the source namespace (including
|
|
159
|
+
its vector and metadata) and writes it to the destination namespace.
|
|
160
|
+
Vectors are stored as f16 in the destination regardless of source format.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
id: Document ID to clone
|
|
164
|
+
source_namespace: Namespace to clone from
|
|
165
|
+
destination_namespace: Namespace to clone to
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
CloneResult containing success status and timing
|
|
169
|
+
|
|
170
|
+
Raises:
|
|
171
|
+
ValueError: If http_url is not configured or required params are missing
|
|
172
|
+
requests.HTTPError: If the request fails
|
|
173
|
+
"""
|
|
174
|
+
http_url = self._require_http_url("clone")
|
|
175
|
+
|
|
176
|
+
if not id:
|
|
177
|
+
raise ValueError("id is required")
|
|
178
|
+
if not source_namespace:
|
|
179
|
+
raise ValueError("source_namespace is required")
|
|
180
|
+
if not destination_namespace:
|
|
181
|
+
raise ValueError("destination_namespace is required")
|
|
182
|
+
|
|
183
|
+
url = f"{http_url}/v1/clone/turbopuffer"
|
|
184
|
+
body = {
|
|
185
|
+
"id": id,
|
|
186
|
+
"sourceNamespace": source_namespace,
|
|
187
|
+
"destinationNamespace": destination_namespace,
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
response = requests.post(url, json=body, timeout=30)
|
|
191
|
+
response.raise_for_status()
|
|
192
|
+
|
|
193
|
+
return CloneResult.from_dict(response.json())
|
|
194
|
+
|
|
195
|
+
def delete(
|
|
196
|
+
self,
|
|
197
|
+
id: str,
|
|
198
|
+
namespace: str,
|
|
199
|
+
) -> DeleteFromNamespaceResult:
|
|
200
|
+
"""
|
|
201
|
+
Delete a document from a TurboPuffer namespace.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
id: Document ID to delete
|
|
205
|
+
namespace: Namespace to delete from
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
DeleteFromNamespaceResult containing success status and timing
|
|
209
|
+
|
|
210
|
+
Raises:
|
|
211
|
+
ValueError: If http_url is not configured or required params are missing
|
|
212
|
+
requests.HTTPError: If the request fails
|
|
213
|
+
"""
|
|
214
|
+
http_url = self._require_http_url("delete")
|
|
215
|
+
|
|
216
|
+
if not id:
|
|
217
|
+
raise ValueError("id is required")
|
|
218
|
+
if not namespace:
|
|
219
|
+
raise ValueError("namespace is required")
|
|
220
|
+
|
|
221
|
+
url = f"{http_url}/v1/delete/turbopuffer"
|
|
222
|
+
body = {
|
|
223
|
+
"id": id,
|
|
224
|
+
"namespace": namespace,
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
response = requests.post(url, json=body, timeout=30)
|
|
228
|
+
response.raise_for_status()
|
|
229
|
+
|
|
230
|
+
return DeleteFromNamespaceResult.from_dict(response.json())
|
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Embeddings namespace for creating and managing vector embeddings.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import uuid
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from typing import Any, Optional
|
|
9
|
+
|
|
10
|
+
from vector_sdk.namespaces.base import BaseNamespace
|
|
11
|
+
from vector_sdk.types import (
|
|
12
|
+
EmbeddingConfigOverride,
|
|
13
|
+
EmbeddingRequest,
|
|
14
|
+
EmbeddingResult,
|
|
15
|
+
StorageConfig,
|
|
16
|
+
TextInput,
|
|
17
|
+
get_stream_for_priority,
|
|
18
|
+
validate_model,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class EmbeddingsNamespace(BaseNamespace):
|
|
23
|
+
"""
|
|
24
|
+
Namespace for embedding generation operations.
|
|
25
|
+
|
|
26
|
+
Example:
|
|
27
|
+
```python
|
|
28
|
+
client = VectorClient("redis://localhost:6379")
|
|
29
|
+
|
|
30
|
+
# Create embeddings asynchronously
|
|
31
|
+
request_id = client.embeddings.create(
|
|
32
|
+
texts=[{"id": "doc1", "text": "Hello world"}],
|
|
33
|
+
content_type="document",
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# Wait for the result
|
|
37
|
+
result = client.embeddings.wait_for(request_id)
|
|
38
|
+
|
|
39
|
+
# Or do both in one call
|
|
40
|
+
result = client.embeddings.create_and_wait(
|
|
41
|
+
texts=[{"id": "doc1", "text": "Hello world"}],
|
|
42
|
+
content_type="document",
|
|
43
|
+
)
|
|
44
|
+
```
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def create(
|
|
48
|
+
self,
|
|
49
|
+
texts: list[dict[str, Any]],
|
|
50
|
+
content_type: str,
|
|
51
|
+
priority: str = "normal",
|
|
52
|
+
storage: Optional[StorageConfig] = None,
|
|
53
|
+
metadata: Optional[dict[str, str]] = None,
|
|
54
|
+
request_id: Optional[str] = None,
|
|
55
|
+
embedding_model: Optional[str] = None,
|
|
56
|
+
embedding_dimensions: Optional[int] = None,
|
|
57
|
+
) -> str:
|
|
58
|
+
"""
|
|
59
|
+
Create embeddings for the given texts.
|
|
60
|
+
|
|
61
|
+
This method submits an embedding request to the gateway and returns immediately
|
|
62
|
+
with a request ID. Use `wait_for()` to get the result, or use `create_and_wait()`
|
|
63
|
+
for a combined operation.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
texts: List of text inputs. Each item should have:
|
|
67
|
+
- id: Unique identifier for the text
|
|
68
|
+
- text: The actual text content to embed
|
|
69
|
+
- document: (optional) Full document to store with embedding
|
|
70
|
+
content_type: Type of content being embedded (e.g., "topic", "flashcard")
|
|
71
|
+
priority: Queue priority - one of "critical", "high", "normal", "low"
|
|
72
|
+
storage: Configuration for where to store embeddings
|
|
73
|
+
metadata: Optional key-value pairs for tracking
|
|
74
|
+
request_id: Optional custom request ID (auto-generated if not provided)
|
|
75
|
+
embedding_model: Optional embedding model override
|
|
76
|
+
embedding_dimensions: Optional embedding dimensions override
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
The request ID for tracking the request
|
|
80
|
+
|
|
81
|
+
Raises:
|
|
82
|
+
ValueError: If texts list is empty or invalid
|
|
83
|
+
ModelValidationError: If embedding model is not supported
|
|
84
|
+
"""
|
|
85
|
+
if not texts:
|
|
86
|
+
raise ValueError("texts list cannot be empty")
|
|
87
|
+
|
|
88
|
+
# Validate embedding model if specified
|
|
89
|
+
if embedding_model:
|
|
90
|
+
validate_model(embedding_model, embedding_dimensions)
|
|
91
|
+
|
|
92
|
+
# Generate request ID if not provided
|
|
93
|
+
if request_id is None:
|
|
94
|
+
request_id = str(uuid.uuid4())
|
|
95
|
+
|
|
96
|
+
# Convert text dicts to TextInput objects
|
|
97
|
+
text_inputs = []
|
|
98
|
+
for t in texts:
|
|
99
|
+
if isinstance(t, TextInput):
|
|
100
|
+
text_inputs.append(t)
|
|
101
|
+
elif isinstance(t, dict):
|
|
102
|
+
text_inputs.append(TextInput(
|
|
103
|
+
id=t["id"],
|
|
104
|
+
text=t["text"],
|
|
105
|
+
document=t.get("document"),
|
|
106
|
+
))
|
|
107
|
+
else:
|
|
108
|
+
raise ValueError(f"Invalid text input type: {type(t)}")
|
|
109
|
+
|
|
110
|
+
# Build embedding config if model or dimensions specified
|
|
111
|
+
embedding_config = None
|
|
112
|
+
if embedding_model or embedding_dimensions:
|
|
113
|
+
embedding_config = EmbeddingConfigOverride(
|
|
114
|
+
model=embedding_model,
|
|
115
|
+
dimensions=embedding_dimensions,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# Build request
|
|
119
|
+
request = EmbeddingRequest(
|
|
120
|
+
request_id=request_id,
|
|
121
|
+
content_type=content_type,
|
|
122
|
+
priority=priority,
|
|
123
|
+
texts=text_inputs,
|
|
124
|
+
storage=storage,
|
|
125
|
+
embedding_config=embedding_config,
|
|
126
|
+
metadata=metadata or {},
|
|
127
|
+
created_at=datetime.utcnow(),
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Get the appropriate stream for this priority
|
|
131
|
+
stream = get_stream_for_priority(priority)
|
|
132
|
+
|
|
133
|
+
# Serialize and publish to Redis Stream
|
|
134
|
+
payload = json.dumps(request.to_dict())
|
|
135
|
+
self._redis.xadd(stream, {"payload": payload})
|
|
136
|
+
|
|
137
|
+
return request_id
|
|
138
|
+
|
|
139
|
+
def wait_for(
|
|
140
|
+
self,
|
|
141
|
+
request_id: str,
|
|
142
|
+
timeout: int = 60,
|
|
143
|
+
) -> EmbeddingResult:
|
|
144
|
+
"""
|
|
145
|
+
Wait for an embedding request to complete.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
request_id: The request ID to wait for
|
|
149
|
+
timeout: Maximum time to wait in seconds (default: 60)
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
The embedding result
|
|
153
|
+
|
|
154
|
+
Raises:
|
|
155
|
+
TimeoutError: If no result is received within the timeout
|
|
156
|
+
"""
|
|
157
|
+
channel = f"embedding:result:{request_id}"
|
|
158
|
+
pubsub = self._redis.pubsub()
|
|
159
|
+
pubsub.subscribe(channel)
|
|
160
|
+
|
|
161
|
+
try:
|
|
162
|
+
start_time = datetime.utcnow()
|
|
163
|
+
while True:
|
|
164
|
+
message = pubsub.get_message(timeout=1.0)
|
|
165
|
+
if message and message["type"] == "message":
|
|
166
|
+
data = json.loads(message["data"])
|
|
167
|
+
return EmbeddingResult.from_dict(data)
|
|
168
|
+
|
|
169
|
+
elapsed = (datetime.utcnow() - start_time).total_seconds()
|
|
170
|
+
if elapsed >= timeout:
|
|
171
|
+
raise TimeoutError(
|
|
172
|
+
f"No result received for {request_id} within {timeout}s"
|
|
173
|
+
)
|
|
174
|
+
finally:
|
|
175
|
+
pubsub.unsubscribe(channel)
|
|
176
|
+
pubsub.close()
|
|
177
|
+
|
|
178
|
+
def create_and_wait(
|
|
179
|
+
self,
|
|
180
|
+
texts: list[dict[str, Any]],
|
|
181
|
+
content_type: str,
|
|
182
|
+
priority: str = "normal",
|
|
183
|
+
storage: Optional[StorageConfig] = None,
|
|
184
|
+
metadata: Optional[dict[str, str]] = None,
|
|
185
|
+
embedding_model: Optional[str] = None,
|
|
186
|
+
embedding_dimensions: Optional[int] = None,
|
|
187
|
+
timeout: int = 60,
|
|
188
|
+
) -> EmbeddingResult:
|
|
189
|
+
"""
|
|
190
|
+
Create embeddings and wait for the result.
|
|
191
|
+
|
|
192
|
+
This method subscribes to the result channel BEFORE submitting the request,
|
|
193
|
+
ensuring no race condition where the result is published before we're listening.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
texts: List of text inputs
|
|
197
|
+
content_type: Type of content
|
|
198
|
+
priority: Queue priority
|
|
199
|
+
storage: Storage configuration
|
|
200
|
+
metadata: Optional metadata
|
|
201
|
+
embedding_model: Optional embedding model override
|
|
202
|
+
embedding_dimensions: Optional embedding dimensions override
|
|
203
|
+
timeout: Maximum time to wait in seconds
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
The embedding result
|
|
207
|
+
"""
|
|
208
|
+
# Generate request ID upfront so we can subscribe before submitting
|
|
209
|
+
request_id = str(uuid.uuid4())
|
|
210
|
+
channel = f"embedding:result:{request_id}"
|
|
211
|
+
|
|
212
|
+
# Subscribe BEFORE submitting to avoid race condition
|
|
213
|
+
pubsub = self._redis.pubsub()
|
|
214
|
+
pubsub.subscribe(channel)
|
|
215
|
+
|
|
216
|
+
try:
|
|
217
|
+
# Now submit the request (subscription is already active)
|
|
218
|
+
self.create(
|
|
219
|
+
texts=texts,
|
|
220
|
+
content_type=content_type,
|
|
221
|
+
priority=priority,
|
|
222
|
+
storage=storage,
|
|
223
|
+
metadata=metadata,
|
|
224
|
+
request_id=request_id,
|
|
225
|
+
embedding_model=embedding_model,
|
|
226
|
+
embedding_dimensions=embedding_dimensions,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
# Wait for message with timeout
|
|
230
|
+
start_time = datetime.utcnow()
|
|
231
|
+
while True:
|
|
232
|
+
message = pubsub.get_message(timeout=1.0)
|
|
233
|
+
if message and message["type"] == "message":
|
|
234
|
+
data = json.loads(message["data"])
|
|
235
|
+
return EmbeddingResult.from_dict(data)
|
|
236
|
+
|
|
237
|
+
elapsed = (datetime.utcnow() - start_time).total_seconds()
|
|
238
|
+
if elapsed >= timeout:
|
|
239
|
+
raise TimeoutError(
|
|
240
|
+
f"No result received for {request_id} within {timeout}s"
|
|
241
|
+
)
|
|
242
|
+
finally:
|
|
243
|
+
pubsub.unsubscribe(channel)
|
|
244
|
+
pubsub.close()
|
|
245
|
+
|
|
246
|
+
def get_queue_depth(self) -> dict[str, int]:
|
|
247
|
+
"""
|
|
248
|
+
Get the current queue depth for each priority level.
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Dictionary mapping stream name to pending message count
|
|
252
|
+
"""
|
|
253
|
+
streams = [
|
|
254
|
+
"embedding:critical",
|
|
255
|
+
"embedding:high",
|
|
256
|
+
"embedding:normal",
|
|
257
|
+
"embedding:low",
|
|
258
|
+
]
|
|
259
|
+
|
|
260
|
+
depths = {}
|
|
261
|
+
for stream in streams:
|
|
262
|
+
try:
|
|
263
|
+
info = self._redis.xinfo_stream(stream)
|
|
264
|
+
depths[stream] = info.get("length", 0)
|
|
265
|
+
except Exception:
|
|
266
|
+
depths[stream] = 0
|
|
267
|
+
|
|
268
|
+
return depths
|