nucliadb-utils 4.0.1.post552__py3-none-any.whl → 4.0.1.post553__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nucliadb_utils/storages/gcs.py +25 -16
- nucliadb_utils/storages/local.py +22 -11
- nucliadb_utils/storages/pg.py +20 -6
- nucliadb_utils/storages/s3.py +20 -14
- nucliadb_utils/storages/storage.py +20 -5
- nucliadb_utils/tests/unit/storages/test_pg.py +5 -6
- nucliadb_utils/tests/unit/storages/test_storage.py +3 -2
- {nucliadb_utils-4.0.1.post552.dist-info → nucliadb_utils-4.0.1.post553.dist-info}/METADATA +3 -3
- {nucliadb_utils-4.0.1.post552.dist-info → nucliadb_utils-4.0.1.post553.dist-info}/RECORD +12 -12
- {nucliadb_utils-4.0.1.post552.dist-info → nucliadb_utils-4.0.1.post553.dist-info}/WHEEL +0 -0
- {nucliadb_utils-4.0.1.post552.dist-info → nucliadb_utils-4.0.1.post553.dist-info}/top_level.txt +0 -0
- {nucliadb_utils-4.0.1.post552.dist-info → nucliadb_utils-4.0.1.post553.dist-info}/zip-safe +0 -0
nucliadb_utils/storages/gcs.py
CHANGED
@@ -26,7 +26,7 @@ import socket
|
|
26
26
|
from concurrent.futures import ThreadPoolExecutor
|
27
27
|
from copy import deepcopy
|
28
28
|
from datetime import datetime
|
29
|
-
from typing import
|
29
|
+
from typing import AsyncGenerator, AsyncIterator, Dict, List, Optional
|
30
30
|
from urllib.parse import quote_plus
|
31
31
|
|
32
32
|
import aiohttp
|
@@ -47,7 +47,12 @@ from nucliadb_utils.storages.exceptions import (
|
|
47
47
|
InvalidOffset,
|
48
48
|
ResumableUploadGone,
|
49
49
|
)
|
50
|
-
from nucliadb_utils.storages.storage import
|
50
|
+
from nucliadb_utils.storages.storage import (
|
51
|
+
ObjectInfo,
|
52
|
+
ObjectMetadata,
|
53
|
+
Storage,
|
54
|
+
StorageField,
|
55
|
+
)
|
51
56
|
|
52
57
|
storage_ops_observer = metrics.Observer("gcs_ops", labels={"type": ""})
|
53
58
|
|
@@ -411,7 +416,7 @@ class GCSStorageField(StorageField):
|
|
411
416
|
max_tries=MAX_TRIES,
|
412
417
|
)
|
413
418
|
@storage_ops_observer.wrap({"type": "exists"})
|
414
|
-
async def exists(self) -> Optional[
|
419
|
+
async def exists(self) -> Optional[ObjectMetadata]:
|
415
420
|
"""
|
416
421
|
Existence can be checked either with a CloudFile data in the field attribute
|
417
422
|
or own StorageField key and bucket. Field takes precendece
|
@@ -438,16 +443,18 @@ class GCSStorageField(StorageField):
|
|
438
443
|
async with self.storage.session.get(url, headers=headers) as api_resp:
|
439
444
|
if api_resp.status == 200:
|
440
445
|
data = await api_resp.json()
|
441
|
-
metadata = data.get("metadata")
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
metadata
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
446
|
+
metadata = data.get("metadata") or {}
|
447
|
+
metadata = {k.lower(): v for k, v in metadata.items()}
|
448
|
+
size = metadata.get("size") or data.get("size") or 0
|
449
|
+
content_type = (
|
450
|
+
metadata.get("content_type") or data.get("contentType") or ""
|
451
|
+
)
|
452
|
+
filename = metadata.get("filename") or key.split("/")[-1]
|
453
|
+
return ObjectMetadata(
|
454
|
+
filename=filename,
|
455
|
+
size=int(size),
|
456
|
+
content_type=content_type,
|
457
|
+
)
|
451
458
|
else:
|
452
459
|
return None
|
453
460
|
|
@@ -718,7 +725,9 @@ class GCSStorage(Storage):
|
|
718
725
|
errors.capture_message(msg, "error", scope)
|
719
726
|
return deleted, conflict
|
720
727
|
|
721
|
-
async def
|
728
|
+
async def iterate_objects(
|
729
|
+
self, bucket: str, prefix: str
|
730
|
+
) -> AsyncGenerator[ObjectInfo, None]:
|
722
731
|
if self.session is None:
|
723
732
|
raise AttributeError()
|
724
733
|
url = "{}/{}/o".format(self.object_base_url, bucket)
|
@@ -732,7 +741,7 @@ class GCSStorage(Storage):
|
|
732
741
|
data = await resp.json()
|
733
742
|
if "items" in data:
|
734
743
|
for item in data["items"]:
|
735
|
-
yield item
|
744
|
+
yield ObjectInfo(name=item["name"])
|
736
745
|
|
737
746
|
page_token = data.get("nextPageToken")
|
738
747
|
while page_token is not None:
|
@@ -747,5 +756,5 @@ class GCSStorage(Storage):
|
|
747
756
|
if len(items) == 0:
|
748
757
|
break
|
749
758
|
for item in items:
|
750
|
-
yield item
|
759
|
+
yield ObjectInfo(name=item["name"])
|
751
760
|
page_token = data.get("nextPageToken")
|
nucliadb_utils/storages/local.py
CHANGED
@@ -24,13 +24,18 @@ import json
|
|
24
24
|
import os
|
25
25
|
import shutil
|
26
26
|
from datetime import datetime
|
27
|
-
from typing import
|
27
|
+
from typing import AsyncGenerator, AsyncIterator, Dict, Optional
|
28
28
|
|
29
29
|
import aiofiles
|
30
30
|
from nucliadb_protos.resources_pb2 import CloudFile
|
31
31
|
|
32
32
|
from nucliadb_utils.storages import CHUNK_SIZE
|
33
|
-
from nucliadb_utils.storages.storage import
|
33
|
+
from nucliadb_utils.storages.storage import (
|
34
|
+
ObjectInfo,
|
35
|
+
ObjectMetadata,
|
36
|
+
Storage,
|
37
|
+
StorageField,
|
38
|
+
)
|
34
39
|
|
35
40
|
|
36
41
|
class LocalStorageField(StorageField):
|
@@ -142,14 +147,16 @@ class LocalStorageField(StorageField):
|
|
142
147
|
|
143
148
|
init_url = self.storage.get_file_path(self.bucket, upload_uri)
|
144
149
|
metadata_init_url = self.metadata_key(init_url)
|
145
|
-
|
146
|
-
|
150
|
+
object_metadata = ObjectMetadata(
|
151
|
+
filename=cf.filename,
|
152
|
+
content_type=cf.content_type,
|
153
|
+
size=cf.size,
|
147
154
|
)
|
148
|
-
|
155
|
+
raw_metadata = json.dumps(object_metadata.model_dump())
|
149
156
|
path_to_create = os.path.dirname(metadata_init_url)
|
150
157
|
os.makedirs(path_to_create, exist_ok=True)
|
151
158
|
async with aiofiles.open(metadata_init_url, "w+") as resp:
|
152
|
-
await resp.write(
|
159
|
+
await resp.write(raw_metadata)
|
153
160
|
|
154
161
|
self._handler = await aiofiles.threadpool.open(init_url, "wb+")
|
155
162
|
field.offset = 0
|
@@ -190,12 +197,15 @@ class LocalStorageField(StorageField):
|
|
190
197
|
self.field.ClearField("offset")
|
191
198
|
self.field.ClearField("upload_uri")
|
192
199
|
|
193
|
-
async def exists(self) -> Optional[
|
200
|
+
async def exists(self) -> Optional[ObjectMetadata]:
|
194
201
|
file_path = self.storage.get_file_path(self.bucket, self.key)
|
195
202
|
metadata_path = self.metadata_key(file_path)
|
196
203
|
if os.path.exists(metadata_path):
|
197
204
|
async with aiofiles.open(metadata_path, "r") as metadata:
|
198
|
-
|
205
|
+
raw_metadata = await metadata.read()
|
206
|
+
metadata_dict = json.loads(raw_metadata)
|
207
|
+
metadata_dict = {k.lower(): v for k, v in metadata_dict.items()}
|
208
|
+
return ObjectMetadata.model_validate(metadata_dict)
|
199
209
|
return None
|
200
210
|
|
201
211
|
async def upload(self, iterator: AsyncIterator, origin: CloudFile) -> CloudFile:
|
@@ -269,10 +279,11 @@ class LocalStorage(Storage):
|
|
269
279
|
deleted = False
|
270
280
|
return deleted
|
271
281
|
|
272
|
-
async def
|
282
|
+
async def iterate_objects(
|
283
|
+
self, bucket: str, prefix: str
|
284
|
+
) -> AsyncGenerator[ObjectInfo, None]:
|
273
285
|
for key in glob.glob(f"{bucket}/{prefix}*"):
|
274
|
-
|
275
|
-
yield item
|
286
|
+
yield ObjectInfo(name=key)
|
276
287
|
|
277
288
|
async def download(
|
278
289
|
self, bucket_name: str, key: str, headers: Optional[Dict[str, str]] = None
|
nucliadb_utils/storages/pg.py
CHANGED
@@ -28,7 +28,12 @@ import asyncpg
|
|
28
28
|
from nucliadb_protos.resources_pb2 import CloudFile
|
29
29
|
|
30
30
|
from nucliadb_utils.storages import CHUNK_SIZE
|
31
|
-
from nucliadb_utils.storages.storage import
|
31
|
+
from nucliadb_utils.storages.storage import (
|
32
|
+
ObjectInfo,
|
33
|
+
ObjectMetadata,
|
34
|
+
Storage,
|
35
|
+
StorageField,
|
36
|
+
)
|
32
37
|
|
33
38
|
logger = logging.getLogger(__name__)
|
34
39
|
|
@@ -288,7 +293,7 @@ where kb_id = $1 and file_id = $2
|
|
288
293
|
|
289
294
|
async def iterate_kb(
|
290
295
|
self, bucket: str, prefix: Optional[str] = None
|
291
|
-
) ->
|
296
|
+
) -> AsyncGenerator[FileInfo, None]:
|
292
297
|
query = """
|
293
298
|
SELECT filename, size, content_type, file_id
|
294
299
|
FROM kb_files
|
@@ -517,10 +522,17 @@ class PostgresStorageField(StorageField):
|
|
517
522
|
self.field.ClearField("offset")
|
518
523
|
self.field.ClearField("upload_uri")
|
519
524
|
|
520
|
-
async def exists(self) -> Optional[
|
525
|
+
async def exists(self) -> Optional[ObjectMetadata]:
|
521
526
|
async with self.storage.pool.acquire() as conn:
|
522
527
|
dl = PostgresFileDataLayer(conn)
|
523
|
-
|
528
|
+
file_info = await dl.get_file_info(self.bucket, self.key)
|
529
|
+
if file_info is None:
|
530
|
+
return None
|
531
|
+
return ObjectMetadata(
|
532
|
+
filename=file_info["filename"],
|
533
|
+
size=file_info["size"],
|
534
|
+
content_type=file_info["content_type"],
|
535
|
+
)
|
524
536
|
|
525
537
|
async def upload(self, iterator: AsyncIterator, origin: CloudFile) -> CloudFile:
|
526
538
|
self.field = await self.start(origin)
|
@@ -588,11 +600,13 @@ class PostgresStorage(Storage):
|
|
588
600
|
await self.delete_kb(kbid)
|
589
601
|
return True
|
590
602
|
|
591
|
-
async def
|
603
|
+
async def iterate_objects(
|
604
|
+
self, bucket: str, prefix: str
|
605
|
+
) -> AsyncGenerator[ObjectInfo, None]:
|
592
606
|
async with self.pool.acquire() as conn:
|
593
607
|
dl = PostgresFileDataLayer(conn)
|
594
608
|
async for file_data in dl.iterate_kb(bucket, prefix):
|
595
|
-
yield
|
609
|
+
yield ObjectInfo(name=file_data["key"])
|
596
610
|
|
597
611
|
async def download(
|
598
612
|
self, bucket_name: str, key: str, headers: Optional[dict[str, str]] = None
|
nucliadb_utils/storages/s3.py
CHANGED
@@ -21,7 +21,7 @@ from __future__ import annotations
|
|
21
21
|
|
22
22
|
from contextlib import AsyncExitStack
|
23
23
|
from datetime import datetime
|
24
|
-
from typing import
|
24
|
+
from typing import AsyncGenerator, AsyncIterator, Optional
|
25
25
|
|
26
26
|
import aiobotocore # type: ignore
|
27
27
|
import aiohttp
|
@@ -34,7 +34,12 @@ from nucliadb_protos.resources_pb2 import CloudFile
|
|
34
34
|
from nucliadb_telemetry import errors
|
35
35
|
from nucliadb_utils import logger
|
36
36
|
from nucliadb_utils.storages.exceptions import UnparsableResponse
|
37
|
-
from nucliadb_utils.storages.storage import
|
37
|
+
from nucliadb_utils.storages.storage import (
|
38
|
+
ObjectInfo,
|
39
|
+
ObjectMetadata,
|
40
|
+
Storage,
|
41
|
+
StorageField,
|
42
|
+
)
|
38
43
|
|
39
44
|
MB = 1024 * 1024
|
40
45
|
MIN_UPLOAD_SIZE = 5 * MB
|
@@ -272,7 +277,7 @@ class S3StorageField(StorageField):
|
|
272
277
|
MultipartUpload=part_info,
|
273
278
|
)
|
274
279
|
|
275
|
-
async def exists(self):
|
280
|
+
async def exists(self) -> Optional[ObjectMetadata]:
|
276
281
|
"""
|
277
282
|
Existence can be checked either with a CloudFile data in the field attribute
|
278
283
|
or own StorageField key and bucket. Field takes precendece
|
@@ -292,13 +297,15 @@ class S3StorageField(StorageField):
|
|
292
297
|
try:
|
293
298
|
obj = await self.storage._s3aioclient.head_object(Bucket=bucket, Key=key)
|
294
299
|
if obj is not None:
|
295
|
-
metadata = obj.get("Metadata"
|
296
|
-
|
297
|
-
|
298
|
-
"
|
299
|
-
|
300
|
-
|
301
|
-
|
300
|
+
metadata = obj.get("Metadata") or {}
|
301
|
+
size = metadata.get("size") or obj.get("ContentLength") or 0
|
302
|
+
content_type = (
|
303
|
+
metadata.get("content_type") or obj.get("ContentType") or ""
|
304
|
+
)
|
305
|
+
filename = metadata.get("filename") or key.split("/")[-1]
|
306
|
+
return ObjectMetadata(
|
307
|
+
size=int(size), content_type=content_type, filename=filename
|
308
|
+
)
|
302
309
|
else:
|
303
310
|
return None
|
304
311
|
except botocore.exceptions.ClientError as e:
|
@@ -424,14 +431,13 @@ class S3Storage(Storage):
|
|
424
431
|
else:
|
425
432
|
raise AttributeError("No valid uri")
|
426
433
|
|
427
|
-
async def
|
434
|
+
async def iterate_objects(
|
428
435
|
self, bucket: str, prefix: str = "/"
|
429
|
-
) ->
|
436
|
+
) -> AsyncGenerator[ObjectInfo, None]:
|
430
437
|
paginator = self._s3aioclient.get_paginator("list_objects")
|
431
438
|
async for result in paginator.paginate(Bucket=bucket, Prefix=prefix):
|
432
439
|
for item in result.get("Contents", []):
|
433
|
-
|
434
|
-
yield item
|
440
|
+
yield ObjectInfo(name=item["Key"])
|
435
441
|
|
436
442
|
async def create_kb(self, kbid: str):
|
437
443
|
bucket_name = self.get_bucket_name(kbid)
|
@@ -40,6 +40,7 @@ from nucliadb_protos.noderesources_pb2 import Resource as BrainResource
|
|
40
40
|
from nucliadb_protos.nodewriter_pb2 import IndexMessage
|
41
41
|
from nucliadb_protos.resources_pb2 import CloudFile
|
42
42
|
from nucliadb_protos.writer_pb2 import BrokerMessage
|
43
|
+
from pydantic import BaseModel
|
43
44
|
|
44
45
|
from nucliadb_utils import logger
|
45
46
|
from nucliadb_utils.helpers import async_gen_lookahead
|
@@ -60,6 +61,16 @@ INDEXING_KEY = "index/{kb}/{shard}/{resource}/{txid}"
|
|
60
61
|
MESSAGE_KEY = "message/{kbid}/{rid}/{mid}"
|
61
62
|
|
62
63
|
|
64
|
+
class ObjectInfo(BaseModel):
|
65
|
+
name: str
|
66
|
+
|
67
|
+
|
68
|
+
class ObjectMetadata(BaseModel):
|
69
|
+
filename: str
|
70
|
+
content_type: str
|
71
|
+
size: int
|
72
|
+
|
73
|
+
|
63
74
|
class StorageField(abc.ABC, metaclass=abc.ABCMeta):
|
64
75
|
storage: Storage
|
65
76
|
bucket: str
|
@@ -99,7 +110,7 @@ class StorageField(abc.ABC, metaclass=abc.ABCMeta):
|
|
99
110
|
return deleted
|
100
111
|
|
101
112
|
@abc.abstractmethod
|
102
|
-
async def exists(self) -> Optional[
|
113
|
+
async def exists(self) -> Optional[ObjectMetadata]: ...
|
103
114
|
|
104
115
|
@abc.abstractmethod
|
105
116
|
async def copy(
|
@@ -141,10 +152,10 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
|
|
141
152
|
# Delete all keys inside a resource
|
142
153
|
bucket = self.get_bucket_name(kbid)
|
143
154
|
resource_storage_base_path = STORAGE_RESOURCE.format(kbid=kbid, uuid=uuid)
|
144
|
-
async for
|
155
|
+
async for object_info in self.iterate_objects(
|
145
156
|
bucket, resource_storage_base_path
|
146
157
|
):
|
147
|
-
await self.delete_upload(
|
158
|
+
await self.delete_upload(object_info.name, bucket)
|
148
159
|
|
149
160
|
async def deadletter(
|
150
161
|
self, message: BrokerMessage, seq: int, seqid: int, partition: str
|
@@ -286,7 +297,7 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
|
|
286
297
|
elif file.source == self.source:
|
287
298
|
# This is the case for NucliaDB hosted deployment (Nuclia's cloud deployment):
|
288
299
|
# The data is already stored in the right place by the processing
|
289
|
-
logger.debug(
|
300
|
+
logger.debug("[Nuclia hosted]")
|
290
301
|
return file
|
291
302
|
elif file.source == CloudFile.EXPORT:
|
292
303
|
# This is for files coming from an export
|
@@ -511,7 +522,11 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
|
|
511
522
|
async def finalize(self) -> None: ...
|
512
523
|
|
513
524
|
@abc.abstractmethod
|
514
|
-
def
|
525
|
+
async def iterate_objects(
|
526
|
+
self, bucket: str, prefix: str
|
527
|
+
) -> AsyncGenerator[ObjectInfo, None]:
|
528
|
+
raise NotImplementedError()
|
529
|
+
yield ObjectInfo(name="")
|
515
530
|
|
516
531
|
async def copy(self, file: CloudFile, destination: StorageField) -> None:
|
517
532
|
await destination.copy(
|
@@ -523,7 +523,7 @@ class TestPostgresStorage:
|
|
523
523
|
await storage.delete_upload("file_id", "kb_id")
|
524
524
|
connection.execute.assert_awaited_with(ANY, "kb_id", "file_id")
|
525
525
|
|
526
|
-
async def
|
526
|
+
async def test_iterate_objects(self, storage: pg.PostgresStorage, connection):
|
527
527
|
connection.cursor = MagicMock(
|
528
528
|
return_value=iter_result(
|
529
529
|
[
|
@@ -543,11 +543,10 @@ class TestPostgresStorage:
|
|
543
543
|
)
|
544
544
|
)
|
545
545
|
|
546
|
-
|
547
|
-
async for
|
548
|
-
|
549
|
-
|
550
|
-
assert chunks == [{"name": "file_id1"}, {"name": "file_id2"}]
|
546
|
+
object_names = []
|
547
|
+
async for object_info in storage.iterate_objects("kb_id", "file_id"):
|
548
|
+
object_names.append(object_info.name)
|
549
|
+
assert object_names == ["file_id1", "file_id2"]
|
551
550
|
|
552
551
|
async def test_download(
|
553
552
|
self, storage: pg.PostgresStorage, connection, chunk_info, chunk_data
|
@@ -28,6 +28,7 @@ from nucliadb_protos.resources_pb2 import CloudFile
|
|
28
28
|
|
29
29
|
from nucliadb_utils.storages.local import LocalStorageField
|
30
30
|
from nucliadb_utils.storages.storage import (
|
31
|
+
ObjectInfo,
|
31
32
|
Storage,
|
32
33
|
StorageField,
|
33
34
|
iter_and_add_size,
|
@@ -67,8 +68,8 @@ class StorageTest(Storage):
|
|
67
68
|
def get_bucket_name(self, kbid):
|
68
69
|
return "bucket"
|
69
70
|
|
70
|
-
async def
|
71
|
-
yield
|
71
|
+
async def iterate_objects(self, bucket_name, prefix):
|
72
|
+
yield ObjectInfo(name="uri")
|
72
73
|
|
73
74
|
async def download(self, bucket_name, uri):
|
74
75
|
br = BrainResource(labels=["label"])
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: nucliadb_utils
|
3
|
-
Version: 4.0.1.
|
3
|
+
Version: 4.0.1.post553
|
4
4
|
Home-page: https://nuclia.com
|
5
5
|
License: BSD
|
6
6
|
Classifier: Development Status :: 4 - Beta
|
@@ -23,8 +23,8 @@ Requires-Dist: PyNaCl
|
|
23
23
|
Requires-Dist: pyjwt >=2.4.0
|
24
24
|
Requires-Dist: memorylru >=1.1.2
|
25
25
|
Requires-Dist: mrflagly
|
26
|
-
Requires-Dist: nucliadb-protos >=4.0.1.
|
27
|
-
Requires-Dist: nucliadb-telemetry >=4.0.1.
|
26
|
+
Requires-Dist: nucliadb-protos >=4.0.1.post553
|
27
|
+
Requires-Dist: nucliadb-telemetry >=4.0.1.post553
|
28
28
|
Provides-Extra: cache
|
29
29
|
Requires-Dist: redis >=4.3.4 ; extra == 'cache'
|
30
30
|
Requires-Dist: orjson >=3.6.7 ; extra == 'cache'
|
@@ -40,13 +40,13 @@ nucliadb_utils/nuclia_usage/utils/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZ
|
|
40
40
|
nucliadb_utils/nuclia_usage/utils/kb_usage_report.py,sha256=E1eUSFXBVNzQP9Q2rWj9y3koCO5S7iKwckny_AoLKuk,3870
|
41
41
|
nucliadb_utils/storages/__init__.py,sha256=5Qc8AUWiJv9_JbGCBpAn88AIJhwDlm0OPQpg2ZdRL4U,872
|
42
42
|
nucliadb_utils/storages/exceptions.py,sha256=n6aBOyurWMo8mXd1XY6Psgno4VfXJ9TRbxCy67c08-g,2417
|
43
|
-
nucliadb_utils/storages/gcs.py,sha256=
|
44
|
-
nucliadb_utils/storages/local.py,sha256=
|
43
|
+
nucliadb_utils/storages/gcs.py,sha256=krBkNd7wkHhfIn3T-4QvYu1Rw-envYCa6G4G90oOjvM,27303
|
44
|
+
nucliadb_utils/storages/local.py,sha256=JewYQ-fes9iUtUjlbHgWXrG1RsQWh16TJDunJnwfbTg,10447
|
45
45
|
nucliadb_utils/storages/nuclia.py,sha256=UfvRu92eqG1v-PE-UWH2x8KEJFqDqATMmUGFmEuqSSs,2097
|
46
|
-
nucliadb_utils/storages/pg.py,sha256=
|
47
|
-
nucliadb_utils/storages/s3.py,sha256=
|
46
|
+
nucliadb_utils/storages/pg.py,sha256=DxXNwcstAFOTC6kaXlWp-b4WrvR8aSSOfgVJNDQ5oDI,18976
|
47
|
+
nucliadb_utils/storages/s3.py,sha256=f2bjgmT6JRlUr5DHy3tRUip4kYSA1MzXfYrLNVUp_Cg,19447
|
48
48
|
nucliadb_utils/storages/settings.py,sha256=ugCPy1zxBOmA2KosT-4tsjpvP002kg5iQyi42yCGCJA,1285
|
49
|
-
nucliadb_utils/storages/storage.py,sha256=
|
49
|
+
nucliadb_utils/storages/storage.py,sha256=sR2Qvev6eLUvbH1WTXjqXIOnKRy1YMMx6Vsj0wZ2x8A,20585
|
50
50
|
nucliadb_utils/tests/__init__.py,sha256=Oo9CAE7B0eW5VHn8sHd6o30SQzOWUhktLPRXdlDOleA,1456
|
51
51
|
nucliadb_utils/tests/asyncbenchmark.py,sha256=rN_NNDk4ras0qgFp0QlRyAi9ZU9xITdzxl2s5CigzBo,10698
|
52
52
|
nucliadb_utils/tests/conftest.py,sha256=gPYVuVhj_e6Aeanb91wvUerwuxZgaS7d3luIBRQFIU0,1876
|
@@ -69,10 +69,10 @@ nucliadb_utils/tests/unit/test_utilities.py,sha256=KcHSPp3RZyKAnscJrIwc2M3PCD3l1
|
|
69
69
|
nucliadb_utils/tests/unit/storages/__init__.py,sha256=itSI7dtTwFP55YMX4iK7JzdMHS5CQVUiB1XzQu4UBh8,833
|
70
70
|
nucliadb_utils/tests/unit/storages/test_aws.py,sha256=GCsB_jwCUNV3Ogt8TZZEmNKAHvOlR0HGU7blrFbtJqs,1924
|
71
71
|
nucliadb_utils/tests/unit/storages/test_gcs.py,sha256=2XzJwgNpfjVGjtE-QdZhu3ayuT1EMEXINdM-_SatPCY,3554
|
72
|
-
nucliadb_utils/tests/unit/storages/test_pg.py,sha256=
|
73
|
-
nucliadb_utils/tests/unit/storages/test_storage.py,sha256=
|
74
|
-
nucliadb_utils-4.0.1.
|
75
|
-
nucliadb_utils-4.0.1.
|
76
|
-
nucliadb_utils-4.0.1.
|
77
|
-
nucliadb_utils-4.0.1.
|
78
|
-
nucliadb_utils-4.0.1.
|
72
|
+
nucliadb_utils/tests/unit/storages/test_pg.py,sha256=hOR8WSvnuRJKH_rD2vcSMDOJqs7EndJqaPNrTENBOy8,17016
|
73
|
+
nucliadb_utils/tests/unit/storages/test_storage.py,sha256=OT21FUnQTU2oxxaFfVC1LMkJ34jyW7Leit5ZCQgNyZ0,7131
|
74
|
+
nucliadb_utils-4.0.1.post553.dist-info/METADATA,sha256=ZtL1febvSlU2s6HaxeRuUpMHxQ7Sn4g7vQjLuaRhSUk,2030
|
75
|
+
nucliadb_utils-4.0.1.post553.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
76
|
+
nucliadb_utils-4.0.1.post553.dist-info/top_level.txt,sha256=fE3vJtALTfgh7bcAWcNhcfXkNPp_eVVpbKK-2IYua3E,15
|
77
|
+
nucliadb_utils-4.0.1.post553.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
78
|
+
nucliadb_utils-4.0.1.post553.dist-info/RECORD,,
|
File without changes
|
{nucliadb_utils-4.0.1.post552.dist-info → nucliadb_utils-4.0.1.post553.dist-info}/top_level.txt
RENAMED
File without changes
|
File without changes
|