nucliadb-dataset 6.10.0.post5647__py3-none-any.whl → 6.10.0.post5767__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,12 +13,11 @@
13
13
  # limitations under the License.
14
14
 
15
15
  from enum import Enum
16
- from typing import Dict
17
16
 
18
17
  from nucliadb_dataset.dataset import NucliaDBDataset, Task, download_all_partitions
19
18
  from nucliadb_dataset.nuclia import NucliaDriver
20
19
 
21
- NUCLIA_GLOBAL: Dict[str, NucliaDriver] = {}
20
+ NUCLIA_GLOBAL: dict[str, NucliaDriver] = {}
22
21
 
23
22
  CLIENT_ID = "CLIENT"
24
23
 
@@ -29,10 +28,10 @@ class ExportType(str, Enum):
29
28
 
30
29
 
31
30
  __all__ = (
31
+ "CLIENT_ID",
32
+ "NUCLIA_GLOBAL",
33
+ "ExportType",
32
34
  "NucliaDBDataset",
33
35
  "Task",
34
36
  "download_all_partitions",
35
- "NUCLIA_GLOBAL",
36
- "CLIENT_ID",
37
- "ExportType",
38
37
  )
nucliadb_dataset/api.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- from typing import Iterator, List
15
+ from collections.abc import Iterator
16
16
 
17
17
  from nucliadb_dataset import CLIENT_ID, NUCLIA_GLOBAL
18
18
  from nucliadb_dataset.nuclia import NucliaDriver
@@ -36,26 +36,22 @@ def get_nuclia_client() -> NucliaDriver:
36
36
 
37
37
  def iterate_sentences(kbid: str, labels: bool, entities: bool, text: bool) -> Iterator[TrainSentence]:
38
38
  client = get_nuclia_client()
39
- for sentence in client.iterate_sentences(kbid, labels, entities, text):
40
- yield sentence
39
+ yield from client.iterate_sentences(kbid, labels, entities, text)
41
40
 
42
41
 
43
42
  def iterate_paragraphs(kbid: str, labels: bool, entities: bool, text: bool) -> Iterator[TrainParagraph]:
44
43
  client = get_nuclia_client()
45
- for sentence in client.iterate_paragraphs(kbid, labels, entities, text):
46
- yield sentence
44
+ yield from client.iterate_paragraphs(kbid, labels, entities, text)
47
45
 
48
46
 
49
47
  def iterate_fields(kbid: str, labels: bool, entities: bool, text: bool) -> Iterator[TrainField]:
50
48
  client = get_nuclia_client()
51
- for sentence in client.iterate_fields(kbid, labels, entities, text):
52
- yield sentence
49
+ yield from client.iterate_fields(kbid, labels, entities, text)
53
50
 
54
51
 
55
52
  def iterate_resources(kbid: str, labels: bool, entities: bool, text: bool) -> Iterator[TrainResource]:
56
53
  client = get_nuclia_client()
57
- for sentence in client.iterate_resources(kbid, labels, entities, text):
58
- yield sentence
54
+ yield from client.iterate_resources(kbid, labels, entities, text)
59
55
 
60
56
 
61
57
  def get_labels(kbid: str) -> GetLabelsResponse:
@@ -76,7 +72,7 @@ def get_info(kbid: str) -> TrainInfo:
76
72
  return info
77
73
 
78
74
 
79
- def get_ontology_count(kbid: str, paragraph_labelsets: List[str], resource_labelset: List[str]):
75
+ def get_ontology_count(kbid: str, paragraph_labelsets: list[str], resource_labelset: list[str]):
80
76
  client = get_nuclia_client()
81
77
  labels = client.get_ontology_count(kbid, paragraph_labelsets, resource_labelset)
82
78
  return labels
@@ -14,8 +14,9 @@
14
14
 
15
15
  import logging
16
16
  import os
17
+ from collections.abc import Callable, Iterator
17
18
  from dataclasses import dataclass, field
18
- from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
19
+ from typing import Any
19
20
 
20
21
  import pyarrow as pa # type: ignore
21
22
 
@@ -42,12 +43,12 @@ CHUNK_SIZE = 5 * 1024 * 1024
42
43
  @dataclass
43
44
  class LabelSetCount:
44
45
  count: int
45
- labels: Dict[str, int] = field(default_factory=dict)
46
+ labels: dict[str, int] = field(default_factory=dict)
46
47
 
47
48
 
48
- class NucliaDataset(object):
49
- labels: Optional[KnowledgeBoxLabels]
50
- entities: Optional[KnowledgeBoxEntities]
49
+ class NucliaDataset:
50
+ labels: KnowledgeBoxLabels | None
51
+ entities: KnowledgeBoxEntities | None
51
52
 
52
53
  def __new__(cls, *args, **kwargs):
53
54
  if cls is NucliaDataset:
@@ -56,18 +57,18 @@ class NucliaDataset(object):
56
57
 
57
58
  def __init__(
58
59
  self,
59
- base_path: Optional[str] = None,
60
+ base_path: str | None = None,
60
61
  ):
61
62
  if base_path is None:
62
63
  base_path = os.getcwd()
63
64
  self.base_path = base_path
64
- self.mappings: List[Callable] = []
65
+ self.mappings: list[Callable] = []
65
66
 
66
67
  self.labels = None
67
68
  self.entities = None
68
69
  self.folder = None
69
70
 
70
- def iter_all_partitions(self, force=False) -> Iterator[Tuple[str, str]]:
71
+ def iter_all_partitions(self, force=False) -> Iterator[tuple[str, str]]:
71
72
  partitions = self.get_partitions()
72
73
  for index, partition in enumerate(partitions):
73
74
  logger.info(f"Reading partition {partition} {index}/{len(partitions)}")
@@ -75,7 +76,7 @@ class NucliaDataset(object):
75
76
  logger.info(f"Done reading partition {partition}")
76
77
  yield partition, filename
77
78
 
78
- def read_all_partitions(self, force=False, path: Optional[str] = None) -> List[str]:
79
+ def read_all_partitions(self, force=False, path: str | None = None) -> list[str]:
79
80
  partitions = self.get_partitions()
80
81
  result = []
81
82
  for index, partition in enumerate(partitions):
@@ -91,9 +92,9 @@ class NucliaDataset(object):
91
92
  def read_partition(
92
93
  self,
93
94
  partition_id: str,
94
- filename: Optional[str] = None,
95
+ filename: str | None = None,
95
96
  force: bool = False,
96
- path: Optional[str] = None,
97
+ path: str | None = None,
97
98
  ):
98
99
  raise NotImplementedError()
99
100
 
@@ -103,12 +104,12 @@ class NucliaDBDataset(NucliaDataset):
103
104
  self,
104
105
  sdk: NucliaDB,
105
106
  kbid: str,
106
- task: Optional[Task] = None,
107
- labels: Optional[List[str]] = None,
108
- trainset: Optional[Union[TrainSetPB, TrainSetModel]] = None,
109
- base_path: Optional[str] = None,
110
- search_sdk: Optional[NucliaDB] = None,
111
- reader_sdk: Optional[NucliaDB] = None,
107
+ task: Task | None = None,
108
+ labels: list[str] | None = None,
109
+ trainset: TrainSetPB | TrainSetModel | None = None,
110
+ base_path: str | None = None,
111
+ search_sdk: NucliaDB | None = None,
112
+ reader_sdk: NucliaDB | None = None,
112
113
  ):
113
114
  super().__init__(base_path)
114
115
 
@@ -165,13 +166,13 @@ class NucliaDBDataset(NucliaDataset):
165
166
  streamer.initialize(partition_id)
166
167
  return streamer
167
168
 
168
- def _set_mappings(self, funcs: List[Callable[[Any, Any], Tuple[Any, Any]]]):
169
+ def _set_mappings(self, funcs: list[Callable[[Any, Any], tuple[Any, Any]]]):
169
170
  self.mappings = funcs
170
171
 
171
172
  def _set_schema(self, schema: pa.Schema):
172
173
  self.schema = schema
173
174
 
174
- def get_partitions(self) -> List[str]:
175
+ def get_partitions(self) -> list[str]:
175
176
  """
176
177
  Get expected number of partitions from a live NucliaDB
177
178
  """
@@ -183,9 +184,9 @@ class NucliaDBDataset(NucliaDataset):
183
184
  def read_partition(
184
185
  self,
185
186
  partition_id: str,
186
- filename: Optional[str] = None,
187
+ filename: str | None = None,
187
188
  force: bool = False,
188
- path: Optional[str] = None,
189
+ path: str | None = None,
189
190
  ):
190
191
  """
191
192
  Export an arrow partition from a live NucliaDB and store it locally
@@ -220,12 +221,12 @@ class NucliaDBDataset(NucliaDataset):
220
221
 
221
222
  def download_all_partitions(
222
223
  task: str,
223
- slug: Optional[str] = None,
224
- kbid: Optional[str] = None,
225
- nucliadb_base_url: Optional[str] = "http://localhost:8080",
226
- path: Optional[str] = None,
227
- sdk: Optional[NucliaDB] = None,
228
- labels: Optional[List[str]] = None,
224
+ slug: str | None = None,
225
+ kbid: str | None = None,
226
+ nucliadb_base_url: str | None = "http://localhost:8080",
227
+ path: str | None = None,
228
+ sdk: NucliaDB | None = None,
229
+ labels: list[str] | None = None,
229
230
  ):
230
231
  if sdk is None:
231
232
  sdk = NucliaDB(region="on-prem", url=nucliadb_base_url)
@@ -12,7 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- from typing import Iterator, List
15
+ from collections.abc import Iterator
16
16
 
17
17
  import grpc
18
18
 
@@ -54,8 +54,7 @@ class NucliaDriver:
54
54
  request.metadata.labels = labels
55
55
  request.metadata.entities = entities
56
56
  request.metadata.text = text
57
- for sentence in self.stub.GetSentences(request):
58
- yield sentence
57
+ yield from self.stub.GetSentences(request)
59
58
 
60
59
  def iterate_paragraphs(
61
60
  self, kbid: str, labels: bool, entities: bool, text: bool
@@ -65,8 +64,7 @@ class NucliaDriver:
65
64
  request.metadata.labels = labels
66
65
  request.metadata.entities = entities
67
66
  request.metadata.text = text
68
- for paragraph in self.stub.GetParagraphs(request):
69
- yield paragraph
67
+ yield from self.stub.GetParagraphs(request)
70
68
 
71
69
  def iterate_resources(
72
70
  self, kbid: str, labels: bool, entities: bool, text: bool
@@ -76,8 +74,7 @@ class NucliaDriver:
76
74
  request.metadata.labels = labels
77
75
  request.metadata.entities = entities
78
76
  request.metadata.text = text
79
- for resource in self.stub.GetResources(request):
80
- yield resource
77
+ yield from self.stub.GetResources(request)
81
78
 
82
79
  def iterate_fields(
83
80
  self, kbid: str, labels: bool, entities: bool, text: bool
@@ -87,8 +84,7 @@ class NucliaDriver:
87
84
  request.metadata.labels = labels
88
85
  request.metadata.entities = entities
89
86
  request.metadata.text = text
90
- for field in self.stub.GetFields(request):
91
- yield field
87
+ yield from self.stub.GetFields(request)
92
88
 
93
89
  def get_labels(self, kbid: str) -> GetLabelsResponse:
94
90
  request = GetLabelsRequest()
@@ -106,7 +102,7 @@ class NucliaDriver:
106
102
  return self.stub.GetInfo(request)
107
103
 
108
104
  def get_ontology_count(
109
- self, kbid: str, paragraph_labelsets: List[str], resource_labelsets: List[str]
105
+ self, kbid: str, paragraph_labelsets: list[str], resource_labelsets: list[str]
110
106
  ) -> LabelsetsCount:
111
107
  request = GetLabelsetsCountRequest()
112
108
  request.kb.uuid = kbid
@@ -14,7 +14,6 @@
14
14
  #
15
15
 
16
16
  from pathlib import Path
17
- from typing import Optional
18
17
 
19
18
  import pydantic
20
19
  from pydantic_settings import BaseSettings
@@ -35,20 +34,20 @@ class RunningSettings(BaseSettings):
35
34
  download_path: str = pydantic.Field(f"{Path.home()}/.nuclia/download", description="Download path")
36
35
  url: str = pydantic.Field(description="KnowledgeBox URL")
37
36
  type: Task = pydantic.Field(description="Dataset Type")
38
- labelset: Optional[str] = pydantic.Field(
37
+ labelset: str | None = pydantic.Field(
39
38
  None, description="For classification which labelset or families"
40
39
  )
41
40
 
42
41
  datasets_url: str = pydantic.Field(
43
42
  "https://europe-1.nuclia.cloud",
44
- description="Base url for the Nuclia datasets component (excluding /api/v1)™", # noqa
43
+ description="Base url for the Nuclia datasets component (excluding /api/v1)™",
45
44
  )
46
45
 
47
- apikey: Optional[str] = pydantic.Field(None, description="API key to upload to Nuclia Datasets™")
46
+ apikey: str | None = pydantic.Field(None, description="API key to upload to Nuclia Datasets™")
48
47
 
49
48
  environment: str = pydantic.Field("on-prem", description="region or on-prem")
50
49
 
51
- service_token: Optional[str] = pydantic.Field(
50
+ service_token: str | None = pydantic.Field(
52
51
  None, description="Service account key to access Nuclia Cloud"
53
52
  )
54
53
 
@@ -13,7 +13,6 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import logging
16
- from typing import Dict, Optional, Union
17
16
 
18
17
  import requests
19
18
 
@@ -26,12 +25,12 @@ SIZE_BYTES = 4
26
25
 
27
26
 
28
27
  class Streamer:
29
- resp: Optional[requests.Response]
28
+ resp: requests.Response | None
30
29
 
31
30
  def __init__(
32
31
  self,
33
- trainset: Union[TrainSetPB, TrainSetModel],
34
- reader_headers: Dict[str, str],
32
+ trainset: TrainSetPB | TrainSetModel,
33
+ reader_headers: dict[str, str],
35
34
  base_url: str,
36
35
  kbid: str,
37
36
  ):
@@ -75,16 +74,16 @@ class Streamer:
75
74
  def __iter__(self):
76
75
  return self
77
76
 
78
- def read(self) -> Optional[bytes]:
77
+ def read(self) -> bytes | None:
79
78
  assert self.resp is not None, "Streamer not initialized"
80
79
  header = self.resp.raw.read(4, decode_content=True)
81
80
  if header == b"":
82
81
  return None
83
- payload_size = int.from_bytes(header, byteorder="big", signed=False) # noqa
82
+ payload_size = int.from_bytes(header, byteorder="big", signed=False)
84
83
  data = self.resp.raw.read(payload_size)
85
84
  return data
86
85
 
87
- def __next__(self) -> Optional[bytes]:
86
+ def __next__(self) -> bytes | None:
88
87
  payload = self.read()
89
88
  if payload in [None, b""]:
90
89
  logger.info("Streamer finished reading")
nucliadb_dataset/tasks.py CHANGED
@@ -12,9 +12,10 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ from collections.abc import Callable
15
16
  from dataclasses import dataclass
16
17
  from enum import Enum
17
- from typing import TYPE_CHECKING, Any, Callable, Dict, List
18
+ from typing import TYPE_CHECKING, Any
18
19
 
19
20
  import pyarrow as pa # type: ignore
20
21
 
@@ -64,10 +65,10 @@ class TaskDefinition:
64
65
  schema: pa.schema
65
66
  proto: Any
66
67
  labels: bool
67
- mapping: List[Callable]
68
+ mapping: list[Callable]
68
69
 
69
70
 
70
- TASK_DEFINITIONS: Dict[Task, TaskDefinition] = {
71
+ TASK_DEFINITIONS: dict[Task, TaskDefinition] = {
71
72
  Task.PARAGRAPH_CLASSIFICATION: TaskDefinition(
72
73
  schema=pa.schema(
73
74
  [
@@ -190,4 +191,4 @@ TASK_DEFINITIONS: Dict[Task, TaskDefinition] = {
190
191
  ),
191
192
  }
192
193
 
193
- TASK_DEFINITIONS_REVERSE = {task.proto: task for task in TASK_DEFINITIONS.values()} # noqa
194
+ TASK_DEFINITIONS_REVERSE = {task.proto: task for task in TASK_DEFINITIONS.values()}
@@ -14,7 +14,8 @@
14
14
 
15
15
  import re
16
16
  import tempfile
17
- from typing import TYPE_CHECKING, AsyncIterator, Iterator, Optional
17
+ from collections.abc import AsyncIterator, Iterator
18
+ from typing import TYPE_CHECKING
18
19
 
19
20
  import docker # type: ignore
20
21
  import grpc
@@ -32,7 +33,7 @@ from nucliadb_protos.writer_pb2_grpc import WriterStub
32
33
  from nucliadb_sdk.v2.sdk import NucliaDB
33
34
 
34
35
  DOCKER_ENV_GROUPS = re.search(r"//([^:]+)", docker.from_env().api.base_url)
35
- DOCKER_HOST: Optional[str] = DOCKER_ENV_GROUPS.group(1) if DOCKER_ENV_GROUPS else None
36
+ DOCKER_HOST: str | None = DOCKER_ENV_GROUPS.group(1) if DOCKER_ENV_GROUPS else None
36
37
 
37
38
  if TYPE_CHECKING:
38
39
  from nucliadb_protos.writer_pb2_grpc import WriterAsyncStub
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nucliadb_dataset
3
- Version: 6.10.0.post5647
3
+ Version: 6.10.0.post5767
4
4
  Summary: NucliaDB Train Python client
5
5
  Author-email: Nuclia <nucliadb@nuclia.com>
6
6
  License-Expression: Apache-2.0
@@ -24,9 +24,9 @@ Requires-Dist: aiohttp
24
24
  Requires-Dist: argdantic
25
25
  Requires-Dist: pydantic-settings>=2.2
26
26
  Requires-Dist: pyarrow
27
- Requires-Dist: nucliadb-protos>=6.10.0.post5647
28
- Requires-Dist: nucliadb-sdk>=6.10.0.post5647
29
- Requires-Dist: nucliadb-models>=6.10.0.post5647
27
+ Requires-Dist: nucliadb-protos>=6.10.0.post5767
28
+ Requires-Dist: nucliadb-sdk>=6.10.0.post5767
29
+ Requires-Dist: nucliadb-models>=6.10.0.post5767
30
30
 
31
31
  # NUCLIADB TRAIN CLIENT
32
32
 
@@ -0,0 +1,18 @@
1
+ nucliadb_dataset/__init__.py,sha256=1lvjYSji93zdS2zZGnanh9TeunJcaInv-CBEoEfdAL0,1030
2
+ nucliadb_dataset/api.py,sha256=i8OlwF1ly6lkrEMb9Ffoc0GSXB-7zGaOtXMN51wDyO8,2644
3
+ nucliadb_dataset/dataset.py,sha256=VeJH71WWNJPFO6UIKRP8iSz5O6FDXwOUaQdS52VuTNQ,7978
4
+ nucliadb_dataset/export.py,sha256=FJjmg1GA0fhzxEZVgMbrqjbcLTi2v9gw3wI0vhKaDWI,2528
5
+ nucliadb_dataset/mapping.py,sha256=Ayg-dDiGc4P-ctRj2ddIlbgidziuLlZGKeAZ08aHBZU,6495
6
+ nucliadb_dataset/nuclia.py,sha256=eBsuDs_HHZTpbyl35H7nqqu3pgEFHzDo8VygRFjTWDg,3593
7
+ nucliadb_dataset/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ nucliadb_dataset/run.py,sha256=Ktqv6m0f5oCs54RNZh9b3MJtYZ_JmKP1Zp2Uo3VEyF4,2806
9
+ nucliadb_dataset/settings.py,sha256=xRDTuqj4SVyTmliA6RwMRmbvI4aLXTx-CrfUDqMJvbc,1982
10
+ nucliadb_dataset/streamer.py,sha256=EKoasniFuVHSQIPVxZ6i4wQVKsXxDNLtPjpX_J5CLDk,2996
11
+ nucliadb_dataset/tasks.py,sha256=e1pCPzY9T2jQP2WgTNHGJnuBKg0JWg0vonuxQWAnJLs,6234
12
+ nucliadb_dataset/tests/__init__.py,sha256=zG33bUz1rHFPtvqQPWn4rDwBJt3FJodGuQYD45quiQg,583
13
+ nucliadb_dataset/tests/fixtures.py,sha256=KPMO0mYzX3HR41nzkJ8D0TlF7bUekQGBi8uSaMtAmLU,7524
14
+ nucliadb_dataset-6.10.0.post5767.dist-info/METADATA,sha256=vMV7P-hHx-mJs-4xBrM_bA1GMartzMHSA-1c3KxcgrY,1218
15
+ nucliadb_dataset-6.10.0.post5767.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
16
+ nucliadb_dataset-6.10.0.post5767.dist-info/entry_points.txt,sha256=ORrWnn6AUFfHGY1fWPRPTz99KV_pXXwttZAopyT8qvQ,60
17
+ nucliadb_dataset-6.10.0.post5767.dist-info/top_level.txt,sha256=aJtDe54tz6060E0uyk1rdTRAU4FPWo5if1fYFQGvdqU,17
18
+ nucliadb_dataset-6.10.0.post5767.dist-info/RECORD,,
@@ -1,18 +0,0 @@
1
- nucliadb_dataset/__init__.py,sha256=I58PAYrrgLvxmkGGHvzKKUwnaZ2ny44hba6AXEYfKOQ,1054
2
- nucliadb_dataset/api.py,sha256=RDIW23cy12E5_UlwsiOuhdFrr1OHPA4Mj7hZH0BqGgA,2757
3
- nucliadb_dataset/dataset.py,sha256=11XoRslzMQQHwJA5MrdAQ30eLP8DoNTv5EmuzL2mln0,8061
4
- nucliadb_dataset/export.py,sha256=FJjmg1GA0fhzxEZVgMbrqjbcLTi2v9gw3wI0vhKaDWI,2528
5
- nucliadb_dataset/mapping.py,sha256=Ayg-dDiGc4P-ctRj2ddIlbgidziuLlZGKeAZ08aHBZU,6495
6
- nucliadb_dataset/nuclia.py,sha256=uXiwJS_GBcN6z9l9hFahD3jLbqhkfpGNWpMR6_8K5k8,3718
7
- nucliadb_dataset/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
- nucliadb_dataset/run.py,sha256=Ktqv6m0f5oCs54RNZh9b3MJtYZ_JmKP1Zp2Uo3VEyF4,2806
9
- nucliadb_dataset/settings.py,sha256=9NYeJVgIHbLqCenUFRwj6Iz9S7klOROXQfzaUiBUEl0,2027
10
- nucliadb_dataset/streamer.py,sha256=aBzYWNQQVWM5qA14f8p0YQdshzC4KpOQFt3J0TYe1uk,3060
11
- nucliadb_dataset/tasks.py,sha256=198o37vDlzS7OdXrHYhtwI8kz2WHWJnxpholh-rtTPQ,6227
12
- nucliadb_dataset/tests/__init__.py,sha256=zG33bUz1rHFPtvqQPWn4rDwBJt3FJodGuQYD45quiQg,583
13
- nucliadb_dataset/tests/fixtures.py,sha256=5Ac20TKYmGe61d74yRT6hj0ENdLr3jT2Gbj2fiSX9LM,7510
14
- nucliadb_dataset-6.10.0.post5647.dist-info/METADATA,sha256=bzo2_22-tenSkpguiIJFXWls9b6DfHNCEEIZG9eHxIU,1218
15
- nucliadb_dataset-6.10.0.post5647.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
16
- nucliadb_dataset-6.10.0.post5647.dist-info/entry_points.txt,sha256=ORrWnn6AUFfHGY1fWPRPTz99KV_pXXwttZAopyT8qvQ,60
17
- nucliadb_dataset-6.10.0.post5647.dist-info/top_level.txt,sha256=aJtDe54tz6060E0uyk1rdTRAU4FPWo5if1fYFQGvdqU,17
18
- nucliadb_dataset-6.10.0.post5647.dist-info/RECORD,,