nucliadb-utils 6.1.0.post2520__py3-none-any.whl → 6.1.0.post2533__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -215,6 +215,9 @@ class AzureStorage(Storage):
215
215
  async for obj in self.object_store.iterate(bucket, prefix):
216
216
  yield obj
217
217
 
218
+ async def insert_object(self, bucket_name: str, key: str, data: bytes) -> None:
219
+ await self.object_store.insert(bucket_name, key, data)
220
+
218
221
 
219
222
  class AzureObjectStore(ObjectStore):
220
223
  def __init__(self, account_url: str, connection_string: Optional[str] = None):
@@ -336,6 +339,10 @@ class AzureObjectStore(ObjectStore):
336
339
  ),
337
340
  )
338
341
 
342
+ async def insert(self, bucket: str, key: str, data: bytes) -> None:
343
+ container_client = self.service_client.get_container_client(bucket)
344
+ await container_client.upload_blob(name=key, data=data, length=len(data))
345
+
339
346
  async def download(self, bucket: str, key: str) -> bytes:
340
347
  container_client = self.service_client.get_container_client(bucket)
341
348
  blob_client = container_client.get_blob_client(key)
@@ -260,11 +260,8 @@ class GCSStorageField(StorageField):
260
260
  source=CloudFile.GCS,
261
261
  )
262
262
  upload_uri = self.key
263
-
264
- init_url = "{}&name={}".format(
265
- self.storage._upload_url.format(bucket=self.bucket),
266
- quote_plus(upload_uri),
267
- )
263
+ bucket_upload_url = self.storage._upload_url.format(bucket=field.bucket_name)
264
+ init_url = f"{bucket_upload_url}?uploadType=resumable&name={quote_plus(upload_uri)}"
268
265
  metadata = json.dumps(
269
266
  {
270
267
  "metadata": {
@@ -284,7 +281,6 @@ class GCSStorageField(StorageField):
284
281
  "Content-Length": str(call_size),
285
282
  }
286
283
  )
287
-
288
284
  async with self.storage.session.post(
289
285
  init_url,
290
286
  headers=headers,
@@ -425,6 +421,7 @@ class GCSStorageField(StorageField):
425
421
  else:
426
422
  return None
427
423
 
424
+ @storage_ops_observer.wrap({"type": "upload"})
428
425
  async def upload(self, iterator: AsyncIterator, origin: CloudFile) -> CloudFile:
429
426
  self.field = await self.start(origin)
430
427
  if self.field is None:
@@ -493,7 +490,7 @@ class GCSStorage(Storage):
493
490
  # https://cloud.google.com/storage/docs/bucket-locations
494
491
  self._bucket_labels = labels or {}
495
492
  self._executor = executor
496
- self._upload_url = url + "/upload/storage/v1/b/{bucket}/o?uploadType=resumable" # noqa
493
+ self._upload_url = url + "/upload/storage/v1/b/{bucket}/o"
497
494
  self.object_base_url = url + "/storage/v1/b"
498
495
  self._client = None
499
496
 
@@ -511,7 +508,6 @@ class GCSStorage(Storage):
511
508
  self.session = aiohttp.ClientSession(
512
509
  loop=loop, connector=aiohttp.TCPConnector(ttl_dns_cache=60 * 5), timeout=TIMEOUT
513
510
  )
514
-
515
511
  try:
516
512
  if self.deadletter_bucket is not None and self.deadletter_bucket != "":
517
513
  await self.create_bucket(self.deadletter_bucket)
@@ -689,7 +685,7 @@ class GCSStorage(Storage):
689
685
  logger.info(f"Conflict on deleting bucket {bucket_name}: {details}")
690
686
  conflict = True
691
687
  elif resp.status == 404:
692
- logger.info(f"Does not exit on deleting: {bucket_name}")
688
+ logger.info(f"Does not exist on deleting: {bucket_name}")
693
689
  else:
694
690
  details = await resp.text()
695
691
  msg = f"Delete KB bucket returned an unexpected status {resp.status}: {details}"
@@ -732,6 +728,27 @@ class GCSStorage(Storage):
732
728
  yield ObjectInfo(name=item["name"])
733
729
  page_token = data.get("nextPageToken")
734
730
 
731
+ @storage_ops_observer.wrap({"type": "insert_object"})
732
+ async def insert_object(self, bucket_name: str, key: str, data: bytes) -> None:
733
+ """
734
+ Put an object in the storage without any metadata.
735
+ """
736
+ if self.session is None: # pragma: no cover
737
+ raise AttributeError()
738
+ bucket_upload_url = self._upload_url.format(bucket=bucket_name)
739
+ url = f"{bucket_upload_url}?uploadType=media&name={quote_plus(key)}"
740
+ headers = await self.get_access_headers()
741
+ headers.update(
742
+ {
743
+ "Content-Length": str(len(data)),
744
+ "Content-Type": "application/octet-stream",
745
+ }
746
+ )
747
+ async with self.session.post(url, headers=headers, data=data) as resp:
748
+ if resp.status != 200:
749
+ text = await resp.text()
750
+ raise GoogleCloudException(f"{resp.status}: {text}")
751
+
735
752
 
736
753
  def parse_object_metadata(object_data: dict[str, Any], key: str) -> ObjectMetadata:
737
754
  custom_metadata: dict[str, str] = object_data.get("metadata") or {}
@@ -278,8 +278,13 @@ class LocalStorage(Storage):
278
278
  return deleted
279
279
 
280
280
  async def iterate_objects(self, bucket: str, prefix: str) -> AsyncGenerator[ObjectInfo, None]:
281
- for key in glob.glob(f"{bucket}/{prefix}*"):
282
- yield ObjectInfo(name=key)
281
+ pathname = f"{self.get_file_path(bucket, prefix)}*"
282
+ for key in glob.glob(pathname):
283
+ if key.endswith(".metadata"):
284
+ # Skip metadata files -- they are internal to the local-storage implementation.
285
+ continue
286
+ name = key.split("/")[-1]
287
+ yield ObjectInfo(name=name)
283
288
 
284
289
  async def download(self, bucket_name: str, key: str, range: Optional[Range] = None):
285
290
  key_path = self.get_file_path(bucket_name, key)
@@ -287,3 +292,9 @@ class LocalStorage(Storage):
287
292
  return
288
293
  async for chunk in super().download(bucket_name, key, range=range):
289
294
  yield chunk
295
+
296
+ async def insert_object(self, bucket: str, key: str, data: bytes) -> None:
297
+ path = self.get_file_path(bucket, key)
298
+ os.makedirs(os.path.dirname(path), exist_ok=True)
299
+ with open(path, "wb") as file:
300
+ file.write(data)
@@ -97,6 +97,18 @@ class ObjectStore(abc.ABC, metaclass=abc.ABCMeta):
97
97
  metadata: ObjectMetadata,
98
98
  ) -> None: ...
99
99
 
100
+ @abc.abstractmethod
101
+ async def insert(
102
+ self,
103
+ bucket: str,
104
+ key: str,
105
+ data: bytes,
106
+ ) -> None:
107
+ """
108
+ Insert data to the object storage without any metadata
109
+ """
110
+ ...
111
+
100
112
  @abc.abstractmethod
101
113
  async def download(self, bucket: str, key: str) -> bytes: ...
102
114
 
@@ -330,6 +330,7 @@ class S3StorageField(StorageField):
330
330
  await self.copy(origin_uri, destination_uri, origin_bucket_name, destination_bucket_name)
331
331
  await self.storage.delete_upload(origin_uri, origin_bucket_name)
332
332
 
333
+ @s3_ops_observer.wrap({"type": "upload"})
333
334
  async def upload(self, iterator: AsyncIterator, origin: CloudFile) -> CloudFile:
334
335
  self.field = await self.start(origin)
335
336
  await self.append(origin, iterator)
@@ -484,6 +485,15 @@ class S3Storage(Storage):
484
485
  deleted = True
485
486
  return deleted, conflict
486
487
 
488
+ @s3_ops_observer.wrap({"type": "insert_object"})
489
+ async def insert_object(self, bucket_name: str, key: str, data: bytes) -> None:
490
+ await self._s3aioclient.put_object(
491
+ Bucket=bucket_name,
492
+ Key=key,
493
+ Body=data,
494
+ ContentType="application/octet-stream",
495
+ )
496
+
487
497
 
488
498
  async def bucket_exists(client: AioSession, bucket_name: str) -> bool:
489
499
  exists = True
@@ -161,7 +161,7 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
161
161
  logger.error("No Deadletter Bucket defined will not store the error")
162
162
  return
163
163
  key = DEADLETTER.format(seqid=seqid, seq=seq, partition=partition)
164
- await self.uploadbytes(self.deadletter_bucket, key, message.SerializeToString())
164
+ await self.upload_object(self.deadletter_bucket, key, message.SerializeToString())
165
165
 
166
166
  def get_indexing_storage_key(
167
167
  self, *, kb: str, logical_shard: str, resource_uid: str, txid: Union[int, str]
@@ -187,7 +187,7 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
187
187
  resource_uid=message.resource.uuid,
188
188
  txid=txid,
189
189
  )
190
- await self.uploadbytes(self.indexing_bucket, key, message.SerializeToString())
190
+ await self.upload_object(self.indexing_bucket, key, message.SerializeToString())
191
191
 
192
192
  return key
193
193
 
@@ -199,7 +199,7 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
199
199
  kb: str,
200
200
  logical_shard: str,
201
201
  ) -> str:
202
- if self.indexing_bucket is None:
202
+ if self.indexing_bucket is None: # pragma: no cover
203
203
  raise AttributeError()
204
204
  key = self.get_indexing_storage_key(
205
205
  kb=kb,
@@ -207,10 +207,7 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
207
207
  resource_uid=message.resource.uuid,
208
208
  txid=reindex_id,
209
209
  )
210
- message_serialized = message.SerializeToString()
211
- logger.debug("Starting to upload bytes")
212
- await self.uploadbytes(self.indexing_bucket, key, message_serialized)
213
- logger.debug("Finished to upload bytes")
210
+ await self.upload_object(self.indexing_bucket, key, message.SerializeToString())
214
211
  return key
215
212
 
216
213
  async def get_indexing(self, payload: IndexMessage) -> BrainResource:
@@ -259,7 +256,7 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
259
256
  + ".deleted"
260
257
  )
261
258
 
262
- await self.uploadbytes(self.indexing_bucket, key, b"")
259
+ await self.upload_object(self.indexing_bucket, key, b"")
263
260
 
264
261
  def needs_move(self, file: CloudFile, kbid: str) -> bool:
265
262
  # The cloudfile is valid for our environment
@@ -285,11 +282,9 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
285
282
  elif file.source == self.source:
286
283
  # This is the case for NucliaDB hosted deployment (Nuclia's cloud deployment):
287
284
  # The data is already stored in the right place by the processing
288
- logger.debug("[Nuclia hosted]")
289
285
  return file
290
286
  elif file.source == CloudFile.EXPORT:
291
287
  # This is for files coming from an export
292
- logger.debug(f"[Exported file]: {file.uri}")
293
288
  new_cf = CloudFile()
294
289
  new_cf.CopyFrom(file)
295
290
  new_cf.bucket_name = destination.bucket
@@ -298,18 +293,15 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
298
293
  elif file.source == CloudFile.FLAPS:
299
294
  # NucliaDB On-Prem: the data is stored in NUA, so we need to
300
295
  # download it and upload it to NucliaDB's storage
301
- logger.debug(f"[NucliaDB OnPrem]: {file.uri}")
302
296
  flaps_storage = await get_nuclia_storage()
303
297
  iterator = flaps_storage.download(file)
304
298
  new_cf = await self.uploaditerator(iterator, destination, file)
305
299
  elif file.source == CloudFile.LOCAL:
306
300
  # For testing purposes: protobuffer is stored in a file in the local filesystem
307
- logger.debug(f"[Local]: {file.uri}")
308
301
  local_storage = get_local_storage()
309
302
  iterator = local_storage.download(file.bucket_name, file.uri)
310
303
  new_cf = await self.uploaditerator(iterator, destination, file)
311
304
  elif file.source == CloudFile.EMPTY:
312
- logger.warning(f"[Empty file]: {file.uri}")
313
305
  new_cf = CloudFile()
314
306
  new_cf.CopyFrom(file)
315
307
  else:
@@ -379,7 +371,7 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
379
371
  cf = await self.uploaditerator(generator, sf, cf)
380
372
  return cf
381
373
 
382
- async def uploadbytes(
374
+ async def chunked_upload_object(
383
375
  self,
384
376
  bucket: str,
385
377
  key: str,
@@ -387,6 +379,10 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
387
379
  filename: str = "payload",
388
380
  content_type: str = "",
389
381
  ):
382
+ """
383
+ Upload bytes to the storage in chunks.
384
+ This is useful for large files that are already loaded in memory.
385
+ """
390
386
  destination = self.field_klass(storage=self, bucket=bucket, fullkey=key)
391
387
 
392
388
  cf = CloudFile()
@@ -405,9 +401,16 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
405
401
  generator = splitter(buffer)
406
402
  await self.uploaditerator(generator, destination, cf)
407
403
 
404
+ # For backwards compatibility
405
+ uploadbytes = chunked_upload_object
406
+
408
407
  async def uploaditerator(
409
408
  self, iterator: AsyncIterator, destination: StorageField, origin: CloudFile
410
409
  ) -> CloudFile:
410
+ """
411
+ Upload bytes to the storage in chunks, but the data is coming from an iterator.
412
+ This is when we want to upload large files without loading them in memory.
413
+ """
411
414
  safe_iterator = iterate_storage_compatible(iterator, self, origin) # type: ignore
412
415
  return await destination.upload(safe_iterator, origin)
413
416
 
@@ -474,7 +477,7 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
474
477
  yield data
475
478
 
476
479
  async def upload_pb(self, sf: StorageField, payload: Any):
477
- await self.uploadbytes(sf.bucket, sf.key, payload.SerializeToString())
480
+ await self.upload_object(sf.bucket, sf.key, payload.SerializeToString())
478
481
 
479
482
  async def download_pb(self, sf: StorageField, PBKlass: Type):
480
483
  payload = await self.downloadbytes(sf.bucket, sf.key)
@@ -520,7 +523,8 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
520
523
 
521
524
  async def set_stream_message(self, kbid: str, rid: str, data: bytes) -> str:
522
525
  key = MESSAGE_KEY.format(kbid=kbid, rid=rid, mid=uuid.uuid4())
523
- await self.uploadbytes(cast(str, self.indexing_bucket), key, data)
526
+ indexing_bucket = cast(str, self.indexing_bucket)
527
+ await self.upload_object(indexing_bucket, key, data)
524
528
  return key
525
529
 
526
530
  async def get_stream_message(self, key: str) -> bytes:
@@ -532,6 +536,23 @@ class Storage(abc.ABC, metaclass=abc.ABCMeta):
532
536
  async def del_stream_message(self, key: str) -> None:
533
537
  await self.delete_upload(key, cast(str, self.indexing_bucket))
534
538
 
539
+ @abc.abstractmethod
540
+ async def insert_object(self, bucket: str, key: str, data: bytes) -> None:
541
+ """
542
+ Put some binary data into the object storage without any object metadata.
543
+ """
544
+ ...
545
+
546
+ async def upload_object(self, bucket: str, key: str, data: bytes) -> None:
547
+ """
548
+ Put some binary data into the object storage without any object metadata.
549
+ The data will be uploaded in a single request or in chunks if the data is too large.
550
+ """
551
+ if len(data) > self.chunk_size:
552
+ await self.chunked_upload_object(bucket, key, data)
553
+ else:
554
+ await self.insert_object(bucket, key, data)
555
+
535
556
 
536
557
  async def iter_and_add_size(
537
558
  stream: AsyncGenerator[bytes, None], cf: CloudFile
@@ -75,12 +75,21 @@ class GCS(BaseImage):
75
75
  @pytest.fixture(scope="session")
76
76
  def gcs():
77
77
  container = GCS()
78
- _, port = container.run()
79
- public_api_url = f"http://172.17.0.1:{port}"
78
+ host, port = container.run()
79
+ if running_in_mac_os():
80
+ public_api_url = f"http://{host}:{port}"
81
+ else:
82
+ public_api_url = f"http://172.17.0.1:{port}"
80
83
  yield public_api_url
81
84
  container.stop()
82
85
 
83
86
 
87
+ def running_in_mac_os() -> bool:
88
+ import os
89
+
90
+ return os.uname().sysname == "Darwin"
91
+
92
+
84
93
  @pytest.fixture(scope="function")
85
94
  def gcs_storage_settings(gcs) -> dict[str, Any]:
86
95
  settings = {
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: nucliadb_utils
3
- Version: 6.1.0.post2520
3
+ Version: 6.1.0.post2533
4
4
  Home-page: https://nuclia.com
5
5
  License: BSD
6
6
  Classifier: Development Status :: 4 - Beta
@@ -24,8 +24,8 @@ Requires-Dist: PyNaCl
24
24
  Requires-Dist: pyjwt>=2.4.0
25
25
  Requires-Dist: memorylru>=1.1.2
26
26
  Requires-Dist: mrflagly>=0.2.9
27
- Requires-Dist: nucliadb-protos>=6.1.0.post2520
28
- Requires-Dist: nucliadb-telemetry>=6.1.0.post2520
27
+ Requires-Dist: nucliadb-protos>=6.1.0.post2533
28
+ Requires-Dist: nucliadb-telemetry>=6.1.0.post2533
29
29
  Provides-Extra: cache
30
30
  Requires-Dist: redis>=4.3.4; extra == "cache"
31
31
  Requires-Dist: orjson>=3.6.7; extra == "cache"
@@ -40,27 +40,27 @@ nucliadb_utils/nuclia_usage/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn
40
40
  nucliadb_utils/nuclia_usage/utils/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
41
41
  nucliadb_utils/nuclia_usage/utils/kb_usage_report.py,sha256=F6dhQWhrWzl3qfAdvrqV08hnFKcvFlU8IWN2lzYX0vg,3628
42
42
  nucliadb_utils/storages/__init__.py,sha256=5Qc8AUWiJv9_JbGCBpAn88AIJhwDlm0OPQpg2ZdRL4U,872
43
- nucliadb_utils/storages/azure.py,sha256=egMDwLNIGSQyVevuySt2AswzFdNAcih05BbRg3-p8IU,16015
43
+ nucliadb_utils/storages/azure.py,sha256=FK4c_v9AUAwagScm_F1uDmJeQQq7P4jZswiD2trwb4A,16394
44
44
  nucliadb_utils/storages/exceptions.py,sha256=mm_wX4YRtp7u7enkk_4pMSlX5AQQuFbq4xLmupVDt3Y,2502
45
- nucliadb_utils/storages/gcs.py,sha256=AmGOLM19WOlHqzIjSoKZDx7aNosRHE18XgQ5lpkWqkI,27853
46
- nucliadb_utils/storages/local.py,sha256=jP1glJE8TTPO-ZpYJ8aTxcbmbbay7XJBfUoVWe5PE5U,10501
45
+ nucliadb_utils/storages/gcs.py,sha256=5Yso0o7z4eEVEsBCD3afZlLwMJe6yWvKu1pyB3pqics,28810
46
+ nucliadb_utils/storages/local.py,sha256=JxlWNtu49JJ04dq6o7bBAqbpbeYpVyvvBM5jq1sGJ-4,11003
47
47
  nucliadb_utils/storages/nuclia.py,sha256=vEv94xAT7QM2g80S25QyrOw2pzvP2BAX-ADgZLtuCVc,2097
48
- nucliadb_utils/storages/object_store.py,sha256=Tw10GmpYfM5TMqJ3Tk9pLQ9wLMBk1-snL_m6uasiZDQ,4257
49
- nucliadb_utils/storages/s3.py,sha256=pbuukqpce_kqkmI_3eUTo390KbM5rmI7h8wsYAXtTAo,20377
48
+ nucliadb_utils/storages/object_store.py,sha256=HtKjIKhErSBvuqx1SuCOnL0SkiHqgfyekNMP8o2piZU,4492
49
+ nucliadb_utils/storages/s3.py,sha256=zV0NBA74JE39caxKd2H33NhMsAtNKo_65tEqJIR-I88,20743
50
50
  nucliadb_utils/storages/settings.py,sha256=ugCPy1zxBOmA2KosT-4tsjpvP002kg5iQyi42yCGCJA,1285
51
- nucliadb_utils/storages/storage.py,sha256=onwQJ4at-XewEG7dxcWdOqobfCw4w0PyPC7olvFJgjI,20295
51
+ nucliadb_utils/storages/storage.py,sha256=eAwr8acamaC2iru3tJaGnkcS35WfioTrnWxcfgKDQSk,21058
52
52
  nucliadb_utils/storages/utils.py,sha256=8g2rIwJeYIumQLOB47Yw1rx3twlhRB_cJxer65QfZmk,1479
53
53
  nucliadb_utils/tests/__init__.py,sha256=Oo9CAE7B0eW5VHn8sHd6o30SQzOWUhktLPRXdlDOleA,1456
54
54
  nucliadb_utils/tests/asyncbenchmark.py,sha256=x4be2IwCawle9zWgMOJkmwoUwk5p1tv7cLQGmybkEOg,10587
55
55
  nucliadb_utils/tests/azure.py,sha256=Dg-Eb4KVScG-O6P9y-bVQZTAKTNUMQ0i-CKEd9IdrWw,4474
56
56
  nucliadb_utils/tests/fixtures.py,sha256=i0sqPqe5a5JlKGFdaIvOlHYkZ3pHZ2hTIgTsaIB3vSM,3472
57
- nucliadb_utils/tests/gcs.py,sha256=wO4xRhl-_wrddxlGyEktqa_bXHeQarQN-l_A72Tzr9A,4499
57
+ nucliadb_utils/tests/gcs.py,sha256=KW_DLet1WRlssSW55eI-IQ-0d94Jo2Oh7Di4xGv4JCc,4685
58
58
  nucliadb_utils/tests/indexing.py,sha256=YW2QhkhO9Q_8A4kKWJaWSvXvyQ_AiAwY1VylcfVQFxk,1513
59
59
  nucliadb_utils/tests/local.py,sha256=fXIBasrvdaFJM-sw2wk1_oiFzBcm9O10iCyC-OiXwY8,1914
60
60
  nucliadb_utils/tests/nats.py,sha256=xqpww4jZjTKY9oPGlJdDJG67L3FIBQsa9qDHxILR8r8,7687
61
61
  nucliadb_utils/tests/s3.py,sha256=pl-RJFjA4MH6iXkqhsh5g8gDuEhrYu1nPZ-laxlrMlE,3704
62
- nucliadb_utils-6.1.0.post2520.dist-info/METADATA,sha256=4g5NM63DI6a-tpIXMDuQ0u__2YaMb8M0l7bEf--JPyY,2055
63
- nucliadb_utils-6.1.0.post2520.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
64
- nucliadb_utils-6.1.0.post2520.dist-info/top_level.txt,sha256=fE3vJtALTfgh7bcAWcNhcfXkNPp_eVVpbKK-2IYua3E,15
65
- nucliadb_utils-6.1.0.post2520.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
66
- nucliadb_utils-6.1.0.post2520.dist-info/RECORD,,
62
+ nucliadb_utils-6.1.0.post2533.dist-info/METADATA,sha256=DeM7PJS1qyI9O3RpgSt8PLyqYG3O8v0dkGnfksl_G88,2055
63
+ nucliadb_utils-6.1.0.post2533.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
64
+ nucliadb_utils-6.1.0.post2533.dist-info/top_level.txt,sha256=fE3vJtALTfgh7bcAWcNhcfXkNPp_eVVpbKK-2IYua3E,15
65
+ nucliadb_utils-6.1.0.post2533.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
66
+ nucliadb_utils-6.1.0.post2533.dist-info/RECORD,,