nucliadb-utils 5.0.0.post787__py3-none-any.whl → 5.0.0.post796__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,11 +17,24 @@
17
17
  # You should have received a copy of the GNU Affero General Public License
18
18
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
19
19
  #
20
+ import asyncio
21
+ import json
20
22
  import logging
23
+ import random
24
+ from collections.abc import AsyncIterable, Iterable
25
+ from itertools import islice
26
+ from typing import Any, AsyncGenerator, Optional
21
27
 
22
28
  import httpx
23
29
 
24
30
  from nucliadb_telemetry.metrics import Observer
31
+ from nucliadb_utils.aiopynecone.models import (
32
+ CreateIndexResponse,
33
+ ListResponse,
34
+ QueryResponse,
35
+ UpsertRequest,
36
+ Vector,
37
+ )
25
38
 
26
39
  logger = logging.getLogger(__name__)
27
40
 
@@ -30,54 +43,465 @@ pinecone_observer = Observer(
30
43
  labels={"type": ""},
31
44
  )
32
45
 
33
- BASE_URL = "https://api.pinecone.io/"
46
+ DEFAULT_TIMEOUT = 30
47
+ CONTROL_PLANE_BASE_URL = "https://api.pinecone.io/"
48
+ INDEX_HOST_BASE_URL = "https://{index_host}/"
49
+ BASE_API_HEADERS = {
50
+ "Content-Type": "application/json",
51
+ "Accept": "application/json",
52
+ }
53
+ MEGA_BYTE = 1024 * 1024
54
+ MAX_UPSERT_PAYLOAD_SIZE = 2 * MEGA_BYTE
55
+ MAX_DELETE_BATCH_SIZE = 1000
56
+
57
+
58
+ class PineconeAPIError(Exception):
59
+ def __init__(
60
+ self,
61
+ http_status_code: int,
62
+ code: Optional[str] = None,
63
+ message: Optional[str] = None,
64
+ details: Optional[Any] = None,
65
+ ):
66
+ self.http_status_code = http_status_code
67
+ self.code = code or ""
68
+ self.message = message or ""
69
+ self.details = details or {}
70
+ exc_message = '[{http_status_code}] message="{message}" code={code} details={details}'.format(
71
+ http_status_code=http_status_code,
72
+ message=message,
73
+ code=code,
74
+ details=details,
75
+ )
76
+ super().__init__(exc_message)
77
+
34
78
 
79
+ class ControlPlane:
80
+ """
81
+ Client for interacting with the Pinecone control plane API.
82
+ https://docs.pinecone.io/reference/api/control-plane
83
+ """
35
84
 
36
- class PineconeClient:
37
85
  def __init__(self, api_key: str, http_session: httpx.AsyncClient):
38
86
  self.api_key = api_key
39
- self.session = http_session
87
+ self.http_session = http_session
40
88
 
41
89
  @pinecone_observer.wrap({"type": "create_index"})
42
- async def create_index(self, name: str, dimension: int) -> str:
90
+ async def create_index(self, name: str, dimension: int, metric: str = "dotproduct") -> str:
91
+ """
92
+ Create a new index in Pinecone. It can only create serverless indexes on the AWS us-east-1 region.
93
+ Params:
94
+ - `name`: The name of the index.
95
+ - `dimension`: The dimension of the vectors in the index.
96
+ - `metric`: The similarity metric to use. Default is "dotproduct".
97
+ Returns:
98
+ - The index host to be used for data plane operations.
99
+ """
43
100
  payload = {
44
101
  "name": name,
45
102
  "dimension": dimension,
46
- "metric": "dotproduct",
103
+ "metric": metric,
47
104
  "spec": {"serverless": {"cloud": "aws", "region": "us-east-1"}},
48
105
  }
49
106
  headers = {"Api-Key": self.api_key}
50
- response = await self.session.post("/indexes", json=payload, headers=headers)
51
- response.raise_for_status()
52
- response_json = response.json()
53
- return response_json["host"]
107
+ http_response = await self.http_session.post("/indexes", json=payload, headers=headers)
108
+ raise_for_status(http_response)
109
+ response = CreateIndexResponse.model_validate(http_response.json())
110
+ return response.host
54
111
 
55
112
  @pinecone_observer.wrap({"type": "delete_index"})
56
113
  async def delete_index(self, name: str) -> None:
114
+ """
115
+ Delete an index in Pinecone.
116
+ Params:
117
+ - `name`: The name of the index to delete.
118
+ """
57
119
  headers = {"Api-Key": self.api_key}
58
- response = await self.session.delete(f"/indexes/{name}", headers=headers)
59
- if response.status_code == 404:
120
+ response = await self.http_session.delete(f"/indexes/{name}", headers=headers)
121
+ if response.status_code == 404: # pragma: no cover
60
122
  logger.warning("Pinecone index not found.", extra={"index_name": name})
61
123
  return
62
- response.raise_for_status()
124
+ raise_for_status(response)
125
+
126
+
127
+ class DataPlane:
128
+ """
129
+ Client for interacting with the Pinecone data plane API, hosted by an index host.
130
+ https://docs.pinecone.io/reference/api/data-plane
131
+ """
132
+
133
+ def __init__(
134
+ self, api_key: str, index_host_session: httpx.AsyncClient, timeout: Optional[float] = None
135
+ ):
136
+ """
137
+ Params:
138
+ - `api_key`: The Pinecone API key.
139
+ - `index_host_session`: The http session for the index host.
140
+ - `timeout`: The default timeout for all requests. If not set, the default timeout from httpx.AsyncClient is used.
141
+ """
142
+ self.api_key = api_key
143
+ self.http_session = index_host_session
144
+ self.client_timeout = timeout
145
+ self._upsert_batch_size: Optional[int] = None
146
+
147
+ def _get_request_timeout(self, timeout: Optional[float] = None) -> Optional[float]:
148
+ return timeout or self.client_timeout
149
+
150
+ @pinecone_observer.wrap({"type": "upsert"})
151
+ async def upsert(self, vectors: list[Vector], timeout: Optional[float] = None) -> None:
152
+ """
153
+ Upsert vectors into the index.
154
+ Params:
155
+ - `vectors`: The vectors to upsert.
156
+ - `timeout`: to control the request timeout. If not set, the default timeout is used.
157
+ """
158
+ headers = {"Api-Key": self.api_key}
159
+ payload = UpsertRequest(vectors=vectors)
160
+ post_kwargs: dict[str, Any] = {
161
+ "headers": headers,
162
+ "json": payload.model_dump(),
163
+ }
164
+ request_timeout = self._get_request_timeout(timeout)
165
+ if request_timeout is not None:
166
+ post_kwargs["timeout"] = timeout
167
+ response = await self.http_session.post("/vectors/upsert", **post_kwargs)
168
+ raise_for_status(response)
169
+
170
+ def _estimate_upsert_batch_size(self, vectors: list[Vector]) -> int:
171
+ """
172
+ Estimate a batch size so that the upsert payload does not exceed the hard limit.
173
+ https://docs.pinecone.io/reference/quotas-and-limits#hard-limits
174
+ """
175
+ if self._upsert_batch_size is not None:
176
+ # Return the cached value.
177
+ return self._upsert_batch_size
178
+ # Take the dimension of the first vector as the vector dimension.
179
+ # Assumes all vectors have the same dimension.
180
+ vector_dimension = len(vectors[0].values)
181
+ # Estimate the metadata size by taking the average of 20 random vectors.
182
+ metadata_sizes = []
183
+ for _ in range(20):
184
+ metadata_sizes.append(len(json.dumps(random.choice(vectors).metadata)))
185
+ average_metadata_size = sum(metadata_sizes) / len(metadata_sizes)
186
+ # Estimate the size of the vector payload. 4 bytes per float.
187
+ vector_size = 4 * vector_dimension + average_metadata_size
188
+ # Cache the value.
189
+ self._upsert_batch_size = max(int(MAX_UPSERT_PAYLOAD_SIZE // vector_size), 1)
190
+ return self._upsert_batch_size
191
+
192
+ @pinecone_observer.wrap({"type": "upsert_in_batches"})
193
+ async def upsert_in_batches(
194
+ self,
195
+ vectors: list[Vector],
196
+ batch_size: Optional[int] = None,
197
+ max_parallel_batches: int = 1,
198
+ batch_timeout: Optional[float] = None,
199
+ ) -> None:
200
+ """
201
+ Upsert vectors in batches.
202
+ Params:
203
+ - `vectors`: The vectors to upsert.
204
+ - `batch_size`: to control the number of vectors in each batch.
205
+ - `max_parallel_batches`: to control the number of batches sent concurrently.
206
+ - `batch_timeout`: to control the request timeout for each batch.
207
+ """
208
+ if batch_size is None:
209
+ batch_size = self._estimate_upsert_batch_size(vectors)
210
+
211
+ semaphore = asyncio.Semaphore(max_parallel_batches)
212
+
213
+ async def _upsert_batch(batch):
214
+ async with semaphore:
215
+ await self.upsert(vectors=batch, timeout=batch_timeout)
216
+
217
+ tasks = []
218
+ for batch in batchify(vectors, batch_size):
219
+ tasks.append(asyncio.create_task(_upsert_batch(batch)))
220
+
221
+ await asyncio.gather(*tasks)
222
+
223
+ @pinecone_observer.wrap({"type": "delete"})
224
+ async def delete(self, ids: list[str], timeout: Optional[float] = None) -> None:
225
+ """
226
+ Delete vectors by their ids.
227
+ Maximum number of ids in a single request is 1000.
228
+ Params:
229
+ - `ids`: The ids of the vectors to delete.
230
+ - `timeout`: to control the request timeout. If not set, the default timeout is used.
231
+ """
232
+ if len(ids) > MAX_DELETE_BATCH_SIZE:
233
+ raise ValueError(f"Maximum number of ids in a single request is {MAX_DELETE_BATCH_SIZE}.")
234
+
235
+ headers = {"Api-Key": self.api_key}
236
+ payload = {"ids": ids}
237
+ post_kwargs: dict[str, Any] = {
238
+ "headers": headers,
239
+ "json": payload,
240
+ }
241
+ request_timeout = self._get_request_timeout(timeout)
242
+ if request_timeout is not None:
243
+ post_kwargs["timeout"] = timeout
244
+ response = await self.http_session.post("/vectors/delete", **post_kwargs)
245
+ raise_for_status(response)
246
+
247
+ @pinecone_observer.wrap({"type": "list_page"})
248
+ async def list_page(
249
+ self,
250
+ id_prefix: Optional[str] = None,
251
+ limit: int = 100,
252
+ pagination_token: Optional[str] = None,
253
+ timeout: Optional[float] = None,
254
+ ) -> ListResponse:
255
+ """
256
+ List vectors in a paginated manner.
257
+ Params:
258
+ - `id_prefix`: to filter vectors by their id prefix.
259
+ - `limit`: to control the number of vectors fetched in each page.
260
+ - `pagination_token`: to fetch the next page. The token is provided in the response
261
+ if there are more pages to fetch.
262
+ - `timeout`: to control the request timeout. If not set, the default timeout is used.
263
+ """
264
+ headers = {"Api-Key": self.api_key}
265
+ params = {"limit": str(limit)}
266
+ if id_prefix is not None:
267
+ params["prefix"] = id_prefix
268
+ if pagination_token is not None:
269
+ params["paginationToken"] = pagination_token
270
+
271
+ post_kwargs: dict[str, Any] = {
272
+ "headers": headers,
273
+ "params": params,
274
+ }
275
+ request_timeout = self._get_request_timeout(timeout)
276
+ if request_timeout is not None:
277
+ post_kwargs["timeout"] = timeout
278
+ response = await self.http_session.get(
279
+ "/vectors/list",
280
+ **post_kwargs,
281
+ )
282
+ raise_for_status(response)
283
+ return ListResponse.model_validate(response.json())
284
+
285
+ async def list_all(
286
+ self, id_prefix: Optional[str] = None, page_size: int = 100, page_timeout: Optional[float] = None
287
+ ) -> AsyncGenerator[str, None]:
288
+ """
289
+ Iterate over all vector ids from the index in a paginated manner.
290
+ Params:
291
+ - `id_prefix`: to filter vectors by their id prefix.
292
+ - `page_size`: to control the number of vectors fetched in each page.
293
+ - `page_timeout`: to control the request timeout for each page. If not set, the default timeout is used.
294
+ """
295
+ pagination_token = None
296
+ while True:
297
+ response = await self.list_page(
298
+ id_prefix=id_prefix,
299
+ limit=page_size,
300
+ pagination_token=pagination_token,
301
+ timeout=page_timeout,
302
+ )
303
+ for vector_id in response.vectors:
304
+ yield vector_id.id
305
+ if response.pagination is None:
306
+ break
307
+ pagination_token = response.pagination.next
308
+
309
+ @pinecone_observer.wrap({"type": "delete_all"})
310
+ async def delete_all(self, timeout: Optional[float] = None):
311
+ """
312
+ Delete all vectors in the index.
313
+ Params:
314
+ - `timeout`: to control the request timeout. If not set, the default timeout is used.
315
+ """
316
+ headers = {"Api-Key": self.api_key}
317
+ payload = {"deleteAll": True, "ids": [], "namespace": ""}
318
+ post_kwargs: dict[str, Any] = {
319
+ "headers": headers,
320
+ "json": payload,
321
+ }
322
+ request_timeout = self._get_request_timeout(timeout)
323
+ if request_timeout is not None:
324
+ post_kwargs["timeout"] = timeout
325
+ response = await self.http_session.post("/vectors/delete", **post_kwargs)
326
+ try:
327
+ raise_for_status(response)
328
+ except PineconeAPIError as err:
329
+ if err.http_status_code == 404 and err.code == 5: # pragma: no cover
330
+ # Namespace not found. No vectors to delete.
331
+ return
332
+ raise
333
+
334
+ @pinecone_observer.wrap({"type": "delete_by_id_prefix"})
335
+ async def delete_by_id_prefix(
336
+ self,
337
+ id_prefix: str,
338
+ batch_size: int = MAX_DELETE_BATCH_SIZE,
339
+ max_parallel_batches: int = 1,
340
+ batch_timeout: Optional[float] = None,
341
+ ) -> None:
342
+ """
343
+ Delete vectors by their id prefix. It lists all vectors with the given prefix and deletes them in batches.
344
+ Params:
345
+ - `id_prefix`: to filter vectors by their id prefix.
346
+ - `batch_size`: to control the number of vectors deleted in each batch. Maximum is 1000.
347
+ - `max_parallel_batches`: to control the number of batches sent concurrently.
348
+ - `batch_timeout`: to control the request timeout for each batch.
349
+ """
350
+ if batch_size > MAX_DELETE_BATCH_SIZE:
351
+ logger.warning(f"Batch size {batch_size} is too large. Limiting to {MAX_DELETE_BATCH_SIZE}.")
352
+ batch_size = MAX_DELETE_BATCH_SIZE
353
+
354
+ semaphore = asyncio.Semaphore(max_parallel_batches)
355
+
356
+ async def _delete_batch(batch):
357
+ async with semaphore:
358
+ await self.delete(ids=batch, timeout=batch_timeout)
359
+
360
+ tasks = []
361
+ async_iterable = self.list_all(
362
+ id_prefix=id_prefix, page_size=batch_size, page_timeout=batch_timeout
363
+ )
364
+ async for batch in async_batchify(async_iterable, batch_size):
365
+ tasks.append(asyncio.create_task(_delete_batch(batch)))
366
+
367
+ await asyncio.gather(*tasks)
368
+
369
+ @pinecone_observer.wrap({"type": "query"})
370
+ async def query(
371
+ self,
372
+ vector: list[float],
373
+ top_k: int = 20,
374
+ include_values: bool = False,
375
+ include_metadata: bool = False,
376
+ filter: Optional[dict[str, Any]] = None,
377
+ timeout: Optional[float] = None,
378
+ ) -> QueryResponse:
379
+ """
380
+ Query the index for similar vectors to the given vector.
381
+ Params:
382
+ - `vector`: The query vector.
383
+ - `top_k`: to control the number of similar vectors to return.
384
+ - `include_values`: to include the vector values in the response.
385
+ - `include_metadata`: to include the vector metadata in the response.
386
+ - `filter`: to filter the vectors by their metadata. See:
387
+ https://docs.pinecone.io/guides/data/filter-with-metadata#metadata-query-language
388
+ - `timeout`: to control the request timeout. If not set, the default timeout is used.
389
+ """
390
+ headers = {"Api-Key": self.api_key}
391
+ payload = {
392
+ "vector": vector,
393
+ "topK": top_k,
394
+ "includeValues": include_values,
395
+ "includeMetadata": include_metadata,
396
+ }
397
+ if filter:
398
+ payload["filter"] = filter
399
+ post_kwargs: dict[str, Any] = {
400
+ "headers": headers,
401
+ "json": payload,
402
+ }
403
+ request_timeout = self._get_request_timeout(timeout)
404
+ if request_timeout is not None:
405
+ post_kwargs["timeout"] = timeout
406
+ response = await self.http_session.post("/query", **post_kwargs)
407
+ raise_for_status(response)
408
+ return QueryResponse.model_validate(response.json())
63
409
 
64
410
 
65
411
  class PineconeSession:
66
412
  """
67
- Wrapper that manages the singletone session around all Pinecone http api interactions.
413
+ Wrapper class that manages the sessions around all Pinecone http api interactions.
414
+ Holds a single control plane session and multiple data plane sessions, one for each index host.
68
415
  """
69
416
 
70
417
  def __init__(self):
71
- self.headers = {
72
- "Content-Type": "application/json",
73
- "Accept": "application/json",
74
- }
75
- self.http_session = httpx.AsyncClient(base_url=BASE_URL, headers=self.headers)
418
+ self.control_plane_session = httpx.AsyncClient(
419
+ base_url=CONTROL_PLANE_BASE_URL, headers=BASE_API_HEADERS, timeout=DEFAULT_TIMEOUT
420
+ )
421
+ self.index_host_sessions = {}
422
+
423
+ async def __aenter__(self):
424
+ return self
425
+
426
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
427
+ await self.finalize()
76
428
 
77
429
  async def finalize(self):
78
- if self.http_session.is_closed:
79
- return
80
- await self.http_session.aclose()
430
+ if not self.control_plane_session.is_closed:
431
+ await self.control_plane_session.aclose()
432
+ for session in self.index_host_sessions.values():
433
+ if not session.is_closed:
434
+ await session.aclose()
435
+ self.index_host_sessions.clear()
436
+
437
+ def control_plane(self, api_key: str) -> ControlPlane:
438
+ return ControlPlane(api_key=api_key, http_session=self.control_plane_session)
439
+
440
+ def _get_index_host_session(self, index_host: str) -> httpx.AsyncClient:
441
+ """
442
+ Get a session for the given index host.
443
+ Cache http sessions so that they are reused for the same index host.
444
+ """
445
+ session = self.index_host_sessions.get(index_host, None)
446
+ if session is not None:
447
+ return session
448
+
449
+ session = httpx.AsyncClient(
450
+ base_url=INDEX_HOST_BASE_URL.format(index_host=index_host),
451
+ headers=BASE_API_HEADERS,
452
+ timeout=DEFAULT_TIMEOUT,
453
+ )
454
+ self.index_host_sessions[index_host] = session
455
+ return session
456
+
457
+ def data_plane(self, api_key: str, index_host: str, timeout: Optional[float] = None) -> DataPlane:
458
+ index_host_session = self._get_index_host_session(index_host)
459
+ return DataPlane(api_key=api_key, index_host_session=index_host_session, timeout=timeout)
460
+
461
+
462
+ def raise_for_status(response: httpx.Response):
463
+ try:
464
+ response.raise_for_status()
465
+ except httpx.HTTPStatusError:
466
+ code = None
467
+ message = None
468
+ details = None
469
+ try:
470
+ resp_json = response.json()
471
+ code = resp_json.get("code")
472
+ message = resp_json.get("message")
473
+ details = resp_json.get("details")
474
+ except Exception:
475
+ message = response.text
476
+ raise PineconeAPIError(
477
+ http_status_code=response.status_code,
478
+ code=code,
479
+ message=message,
480
+ details=details,
481
+ )
482
+
483
+
484
+ def batchify(iterable: Iterable, batch_size: int):
485
+ """
486
+ Split an iterable into batches of batch_size
487
+ """
488
+ iterator = iter(iterable)
489
+ while True:
490
+ batch = list(islice(iterator, batch_size))
491
+ if not batch:
492
+ break
493
+ yield batch
494
+
81
495
 
82
- def get_client(self, api_key: str) -> PineconeClient:
83
- return PineconeClient(api_key=api_key, http_session=self.http_session)
496
+ async def async_batchify(async_iterable: AsyncIterable, batch_size: int):
497
+ """
498
+ Split an async iterable into batches of batch_size
499
+ """
500
+ batch = []
501
+ async for item in async_iterable:
502
+ batch.append(item)
503
+ if len(batch) == batch_size:
504
+ yield batch
505
+ batch = []
506
+ if batch:
507
+ yield batch
@@ -0,0 +1,79 @@
1
+ # Copyright (C) 2021 Bosutech XXI S.L.
2
+ #
3
+ # nucliadb is offered under the AGPL v3.0 and as commercial software.
4
+ # For commercial licensing, contact us at info@nuclia.com.
5
+ #
6
+ # AGPL:
7
+ # This program is free software: you can redistribute it and/or modify
8
+ # it under the terms of the GNU Affero General Public License as
9
+ # published by the Free Software Foundation, either version 3 of the
10
+ # License, or (at your option) any later version.
11
+ #
12
+ # This program is distributed in the hope that it will be useful,
13
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
14
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
+ # GNU Affero General Public License for more details.
16
+ #
17
+ # You should have received a copy of the GNU Affero General Public License
18
+ # along with this program. If not, see <http://www.gnu.org/licenses/>.
19
+ #
20
+ import json
21
+ from typing import Any, Optional
22
+
23
+ from pydantic import BaseModel, Field, field_validator
24
+
25
+ KILO_BYTE = 1024
26
+ MAX_METADATA_SIZE = 40 * KILO_BYTE
27
+
28
+
29
+ # Requests
30
+
31
+
32
+ class Vector(BaseModel):
33
+ id: str = Field(max_length=512)
34
+ values: list[float]
35
+ metadata: dict[str, Any] = {}
36
+
37
+ @field_validator("metadata", mode="after")
38
+ @classmethod
39
+ def validate_metadata_size(cls, value):
40
+ json_value = json.dumps(value)
41
+ if len(json_value) > MAX_METADATA_SIZE:
42
+ raise ValueError("metadata size is too large")
43
+
44
+
45
+ class UpsertRequest(BaseModel):
46
+ vectors: list[Vector]
47
+
48
+
49
+ # Responses
50
+
51
+
52
+ class CreateIndexResponse(BaseModel):
53
+ host: str
54
+
55
+
56
+ class VectorId(BaseModel):
57
+ id: str
58
+
59
+
60
+ class Pagination(BaseModel):
61
+ next: str
62
+
63
+
64
+ class ListResponse(BaseModel):
65
+ vectors: list[VectorId]
66
+ pagination: Optional[Pagination] = None
67
+
68
+
69
+ class VectorMatch(BaseModel):
70
+ id: str
71
+ score: float
72
+ # Only populated if `includeValues` is set to `True
73
+ values: Optional[list[float]] = None
74
+ # Only populated if `includeMetadata` is set to `True
75
+ metadata: Optional[dict[str, Any]] = None
76
+
77
+
78
+ class QueryResponse(BaseModel):
79
+ matches: list[VectorMatch]
@@ -429,7 +429,7 @@ def get_endecryptor() -> EndecryptorUtility:
429
429
  return util
430
430
 
431
431
 
432
- def get_pinecone_session() -> PineconeSession:
432
+ def get_pinecone() -> PineconeSession:
433
433
  util = get_utility(Utility.PINECONE_SESSION)
434
434
  if util is not None:
435
435
  return util
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: nucliadb_utils
3
- Version: 5.0.0.post787
3
+ Version: 5.0.0.post796
4
4
  Home-page: https://nuclia.com
5
5
  License: BSD
6
6
  Classifier: Development Status :: 4 - Beta
@@ -23,8 +23,8 @@ Requires-Dist: PyNaCl
23
23
  Requires-Dist: pyjwt >=2.4.0
24
24
  Requires-Dist: memorylru >=1.1.2
25
25
  Requires-Dist: mrflagly
26
- Requires-Dist: nucliadb-protos >=5.0.0.post787
27
- Requires-Dist: nucliadb-telemetry >=5.0.0.post787
26
+ Requires-Dist: nucliadb-protos >=5.0.0.post796
27
+ Requires-Dist: nucliadb-telemetry >=5.0.0.post796
28
28
  Provides-Extra: cache
29
29
  Requires-Dist: redis >=4.3.4 ; extra == 'cache'
30
30
  Requires-Dist: orjson >=3.6.7 ; extra == 'cache'
@@ -16,9 +16,10 @@ nucliadb_utils/settings.py,sha256=AaOtQZVRqRcMnUyN1l1MpR10lANaDT2uPrbhmTyn6uk,76
16
16
  nucliadb_utils/signals.py,sha256=JRNv2y9zLtBjOANBf7krGfDGfOc9qcoXZ6N1nKWS2FE,2674
17
17
  nucliadb_utils/store.py,sha256=kQ35HemE0v4_Qg6xVqNIJi8vSFAYQtwI3rDtMsNy62Y,890
18
18
  nucliadb_utils/transaction.py,sha256=mwcI3aIHAvU5KOGqd_Uz_d1XQzXhk_-NWY8NqU1lfb0,7307
19
- nucliadb_utils/utilities.py,sha256=Hgca8E_VvjsszzHT4ABcqfzvhuRbtQIK2GCkm50UHI0,15295
19
+ nucliadb_utils/utilities.py,sha256=oz3tEODG2g3todnyvA-nW1Ou6xXDveL_tMKTDGdWXM4,15287
20
20
  nucliadb_utils/aiopynecone/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
21
- nucliadb_utils/aiopynecone/client.py,sha256=uY6X-CnOAx6HlZHLlA8xOTDlYWwHegib9c9YwUTp2DA,2855
21
+ nucliadb_utils/aiopynecone/client.py,sha256=kvGLCzSbDaMIWx0LK9WBAL7QsyuPmUjC7cPv5djMdFw,19028
22
+ nucliadb_utils/aiopynecone/models.py,sha256=DVlCVrinHAaDxuii3fzdciFn4dqS5HTTyBavnoDlR2U,2024
22
23
  nucliadb_utils/audit/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
23
24
  nucliadb_utils/audit/audit.py,sha256=dn5ZnCVQUlCcvdjzaORghbrjk9QgVGrtkfIftq30Bp8,2819
24
25
  nucliadb_utils/audit/basic.py,sha256=NViey6mKbCXqRTLDBX2xNTcCg9I-2e4oB2xkekuhDvM,3392
@@ -62,8 +63,8 @@ nucliadb_utils/tests/indexing.py,sha256=YW2QhkhO9Q_8A4kKWJaWSvXvyQ_AiAwY1VylcfVQ
62
63
  nucliadb_utils/tests/local.py,sha256=c3gZJJWmvOftruJkIQIwB3q_hh3uxEhqGIAVWim1Bbk,1343
63
64
  nucliadb_utils/tests/nats.py,sha256=Tosonm9A9cusImyji80G4pgdXEHNVPaCLT5TbFK_ra0,7543
64
65
  nucliadb_utils/tests/s3.py,sha256=YB8QqDaBXxyhHonEHmeBbRRDmvB7sTOaKBSi8KBGokg,2330
65
- nucliadb_utils-5.0.0.post787.dist-info/METADATA,sha256=X2wNpoAW0HtG3ZW8YH6o7hEZec5OnwIvvbATRBvdXZU,2073
66
- nucliadb_utils-5.0.0.post787.dist-info/WHEEL,sha256=Z4pYXqR_rTB7OWNDYFOm1qRk0RX6GFP2o8LgvP453Hk,91
67
- nucliadb_utils-5.0.0.post787.dist-info/top_level.txt,sha256=fE3vJtALTfgh7bcAWcNhcfXkNPp_eVVpbKK-2IYua3E,15
68
- nucliadb_utils-5.0.0.post787.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
69
- nucliadb_utils-5.0.0.post787.dist-info/RECORD,,
66
+ nucliadb_utils-5.0.0.post796.dist-info/METADATA,sha256=mwdR7jpGf8oA1T8pa62MXf_BajMORpePKeK7KpiujIM,2073
67
+ nucliadb_utils-5.0.0.post796.dist-info/WHEEL,sha256=Z4pYXqR_rTB7OWNDYFOm1qRk0RX6GFP2o8LgvP453Hk,91
68
+ nucliadb_utils-5.0.0.post796.dist-info/top_level.txt,sha256=fE3vJtALTfgh7bcAWcNhcfXkNPp_eVVpbKK-2IYua3E,15
69
+ nucliadb_utils-5.0.0.post796.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
70
+ nucliadb_utils-5.0.0.post796.dist-info/RECORD,,