datachain 0.28.2__py3-none-any.whl → 0.29.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of datachain might be problematic. Click here for more details.

@@ -21,6 +21,7 @@ from datachain.lib.file import File
21
21
  from datachain.lib.signal_schema import SignalSchema
22
22
  from datachain.node import DirType, DirTypeGroup, Node, NodeWithPath, get_path
23
23
  from datachain.query.batch import RowsOutput
24
+ from datachain.query.schema import ColumnMeta
24
25
  from datachain.query.utils import get_query_id_column
25
26
  from datachain.sql.functions import path as pathfunc
26
27
  from datachain.sql.types import Int, SQLType
@@ -400,7 +401,7 @@ class AbstractWarehouse(ABC, Serializable):
400
401
  expressions: tuple[_ColumnsClauseArgument[Any], ...] = (
401
402
  sa.func.count(table.c.sys__id),
402
403
  )
403
- size_column_names = [s.replace(".", "__") + "__size" for s in file_signals]
404
+ size_column_names = [ColumnMeta.to_db_name(s) + "__size" for s in file_signals]
404
405
  size_columns = [c for c in table.columns if c.name in size_column_names]
405
406
 
406
407
  if size_columns:
@@ -6,6 +6,10 @@ from typing import TYPE_CHECKING, Any, Optional, Union
6
6
 
7
7
  import sqlalchemy
8
8
 
9
+ from datachain.query.schema import ColumnMeta
10
+
11
+ DEFAULT_DATABASE_BATCH_SIZE = 10_000
12
+
9
13
  if TYPE_CHECKING:
10
14
  from collections.abc import Iterator, Mapping, Sequence
11
15
 
@@ -30,7 +34,7 @@ if TYPE_CHECKING:
30
34
  @contextlib.contextmanager
31
35
  def _connect(
32
36
  connection: "ConnectionType",
33
- ) -> "Iterator[Union[sqlalchemy.engine.Connection, sqlalchemy.orm.Session]]":
37
+ ) -> "Iterator[sqlalchemy.engine.Connection]":
34
38
  import sqlalchemy.orm
35
39
 
36
40
  with contextlib.ExitStack() as stack:
@@ -47,27 +51,184 @@ def _connect(
47
51
  yield engine.connect()
48
52
  elif isinstance(connection, sqlalchemy.Engine):
49
53
  yield stack.enter_context(connection.connect())
50
- elif isinstance(connection, (sqlalchemy.Connection, sqlalchemy.orm.Session)):
54
+ elif isinstance(connection, sqlalchemy.Connection):
51
55
  # do not close the connection, as it is managed by the caller
52
56
  yield connection
57
+ elif isinstance(connection, sqlalchemy.orm.Session):
58
+ # For Session objects, get the underlying bind (Engine or Connection)
59
+ # Sessions don't support DDL operations directly
60
+ bind = connection.get_bind()
61
+ if isinstance(bind, sqlalchemy.Engine):
62
+ yield stack.enter_context(bind.connect())
63
+ else:
64
+ # bind is already a Connection
65
+ yield bind
53
66
  else:
54
67
  raise TypeError(f"Unsupported connection type: {type(connection).__name__}")
55
68
 
56
69
 
57
- def _infer_schema(
58
- result: "sqlalchemy.engine.Result",
59
- to_infer: list[str],
60
- infer_schema_length: Optional[int] = 100,
61
- ) -> tuple[list["sqlalchemy.Row"], dict[str, "DataType"]]:
62
- from datachain.lib.convert.values_to_tuples import values_to_tuples
70
+ def to_database(
71
+ chain: "DataChain",
72
+ table_name: str,
73
+ connection: "ConnectionType",
74
+ *,
75
+ batch_rows: int = DEFAULT_DATABASE_BATCH_SIZE,
76
+ on_conflict: Optional[str] = None,
77
+ column_mapping: Optional[dict[str, Optional[str]]] = None,
78
+ ) -> None:
79
+ """
80
+ Implementation function for exporting DataChain to database tables.
63
81
 
64
- if not to_infer:
65
- return [], {}
82
+ This is the core implementation that handles the actual database operations.
83
+ For user-facing documentation, see DataChain.to_database() method.
84
+ """
85
+ from datachain.utils import batched
66
86
 
67
- rows = list(itertools.islice(result, infer_schema_length))
68
- values = {col: [row._mapping[col] for row in rows] for col in to_infer}
69
- _, output_schema, _ = values_to_tuples("", **values)
70
- return rows, output_schema
87
+ if on_conflict and on_conflict not in ("ignore", "update"):
88
+ raise ValueError(
89
+ f"on_conflict must be 'ignore' or 'update', got: {on_conflict}"
90
+ )
91
+
92
+ signals_schema = chain.signals_schema.clone_without_sys_signals()
93
+ all_columns = [
94
+ sqlalchemy.Column(c.name, c.type) # type: ignore[union-attr]
95
+ for c in signals_schema.db_signals(as_columns=True)
96
+ ]
97
+
98
+ column_mapping = column_mapping or {}
99
+ normalized_column_mapping = _normalize_column_mapping(column_mapping)
100
+ column_indices_and_names, columns = _prepare_columns(
101
+ all_columns, normalized_column_mapping
102
+ )
103
+
104
+ with _connect(connection) as conn:
105
+ metadata = sqlalchemy.MetaData()
106
+ table = sqlalchemy.Table(table_name, metadata, *columns)
107
+
108
+ # Check if table already exists to determine if we should clean up on error.
109
+ inspector = sqlalchemy.inspect(conn)
110
+ assert inspector # to satisfy mypy
111
+ table_existed_before = table_name in inspector.get_table_names()
112
+
113
+ try:
114
+ table.create(conn, checkfirst=True)
115
+ rows_iter = chain._leaf_values()
116
+ for batch in batched(rows_iter, batch_rows):
117
+ _process_batch(
118
+ conn, table, batch, on_conflict, column_indices_and_names
119
+ )
120
+ conn.commit()
121
+ except Exception:
122
+ if not table_existed_before:
123
+ try:
124
+ table.drop(conn, checkfirst=True)
125
+ conn.commit()
126
+ except sqlalchemy.exc.SQLAlchemyError:
127
+ pass
128
+ raise
129
+
130
+
131
+ def _normalize_column_mapping(
132
+ column_mapping: dict[str, Optional[str]],
133
+ ) -> dict[str, Optional[str]]:
134
+ """
135
+ Convert column mapping keys from DataChain format (dots) to database format
136
+ (double underscores).
137
+
138
+ This allows users to specify column mappings using the intuitive DataChain
139
+ format like: {"nested_data.value": "data_value"} instead of
140
+ {"nested_data__value": "data_value"}
141
+ """
142
+ if not column_mapping:
143
+ return {}
144
+
145
+ normalized_mapping: dict[str, Optional[str]] = {}
146
+ original_keys: dict[str, str] = {}
147
+ for key, value in column_mapping.items():
148
+ db_key = ColumnMeta.to_db_name(key)
149
+ if db_key in normalized_mapping:
150
+ prev = original_keys[db_key]
151
+ raise ValueError(
152
+ "Column mapping collision: multiple keys map to the same "
153
+ f"database column name '{db_key}': '{prev}' and '{key}'. "
154
+ )
155
+ normalized_mapping[db_key] = value
156
+ original_keys[db_key] = key
157
+
158
+ # If it's a defaultdict, preserve the default factory
159
+ if hasattr(column_mapping, "default_factory"):
160
+ from collections import defaultdict
161
+
162
+ default_factory = column_mapping.default_factory
163
+ result: dict[str, Optional[str]] = defaultdict(default_factory)
164
+ result.update(normalized_mapping)
165
+ return result
166
+
167
+ return normalized_mapping
168
+
169
+
170
+ def _prepare_columns(all_columns, column_mapping):
171
+ """Prepare column mapping and column definitions."""
172
+ column_indices_and_names = [] # List of (index, target_name) tuples
173
+ columns = []
174
+ for idx, col in enumerate(all_columns):
175
+ if col.name in column_mapping or hasattr(column_mapping, "default_factory"):
176
+ mapped_name = column_mapping[col.name]
177
+ if mapped_name:
178
+ columns.append(sqlalchemy.Column(mapped_name, col.type))
179
+ column_indices_and_names.append((idx, mapped_name))
180
+ else:
181
+ columns.append(col)
182
+ column_indices_and_names.append((idx, col.name))
183
+ return column_indices_and_names, columns
184
+
185
+
186
+ def _process_batch(conn, table, batch, on_conflict, column_indices_and_names):
187
+ """Process a batch of rows with conflict resolution."""
188
+
189
+ def prepare_row(row_values):
190
+ """Convert a row tuple to a dictionary with proper DB column names."""
191
+ return {
192
+ target_name: row_values[idx]
193
+ for idx, target_name in column_indices_and_names
194
+ }
195
+
196
+ rows_to_insert = [prepare_row(row) for row in batch]
197
+
198
+ supports_conflict = on_conflict and conn.engine.name in ("postgresql", "sqlite")
199
+
200
+ if supports_conflict:
201
+ # Use dialect-specific insert for conflict resolution
202
+ if conn.engine.name == "postgresql":
203
+ from sqlalchemy.dialects.postgresql import insert as pg_insert
204
+
205
+ insert_stmt = pg_insert(table)
206
+ elif conn.engine.name == "sqlite":
207
+ from sqlalchemy.dialects.sqlite import insert as sqlite_insert
208
+
209
+ insert_stmt = sqlite_insert(table)
210
+ else:
211
+ insert_stmt = table.insert()
212
+
213
+ if supports_conflict:
214
+ if on_conflict == "ignore":
215
+ insert_stmt = insert_stmt.on_conflict_do_nothing()
216
+ elif on_conflict == "update":
217
+ update_values = {
218
+ col.name: insert_stmt.excluded[col.name] for col in table.columns
219
+ }
220
+ insert_stmt = insert_stmt.on_conflict_do_update(set_=update_values)
221
+ elif on_conflict:
222
+ import warnings
223
+
224
+ warnings.warn(
225
+ f"Database does not support conflict resolution. "
226
+ f"Ignoring on_conflict='{on_conflict}' parameter.",
227
+ UserWarning,
228
+ stacklevel=2,
229
+ )
230
+
231
+ conn.execute(insert_stmt, rows_to_insert)
71
232
 
72
233
 
73
234
  def read_database(
@@ -151,3 +312,19 @@ def read_database(
151
312
  in_memory=in_memory,
152
313
  schema=inferred_schema | output,
153
314
  )
315
+
316
+
317
+ def _infer_schema(
318
+ result: "sqlalchemy.engine.Result",
319
+ to_infer: list[str],
320
+ infer_schema_length: Optional[int] = 100,
321
+ ) -> tuple[list["sqlalchemy.Row"], dict[str, "DataType"]]:
322
+ from datachain.lib.convert.values_to_tuples import values_to_tuples
323
+
324
+ if not to_infer:
325
+ return [], {}
326
+
327
+ rows = list(itertools.islice(result, infer_schema_length))
328
+ values = {col: [row._mapping[col] for row in rows] for col in to_infer}
329
+ _, output_schema, _ = values_to_tuples("", **values)
330
+ return rows, output_schema
@@ -58,6 +58,7 @@ from datachain.query.schema import DEFAULT_DELIMITER, Column
58
58
  from datachain.sql.functions import path as pathfunc
59
59
  from datachain.utils import batched_it, inside_notebook, row_to_nested_dict
60
60
 
61
+ from .database import DEFAULT_DATABASE_BATCH_SIZE
61
62
  from .utils import (
62
63
  DatasetMergeError,
63
64
  DatasetPrepareError,
@@ -77,11 +78,23 @@ UDFObjT = TypeVar("UDFObjT", bound=UDFBase)
77
78
  DEFAULT_PARQUET_CHUNK_SIZE = 100_000
78
79
 
79
80
  if TYPE_CHECKING:
81
+ import sqlite3
82
+
80
83
  import pandas as pd
81
84
  from typing_extensions import ParamSpec, Self
82
85
 
83
86
  P = ParamSpec("P")
84
87
 
88
+ ConnectionType = Union[
89
+ str,
90
+ sqlalchemy.engine.URL,
91
+ sqlalchemy.engine.interfaces.Connectable,
92
+ sqlalchemy.engine.Engine,
93
+ sqlalchemy.engine.Connection,
94
+ "sqlalchemy.orm.Session",
95
+ sqlite3.Connection,
96
+ ]
97
+
85
98
 
86
99
  T = TypeVar("T", bound="DataChain")
87
100
 
@@ -2276,6 +2289,97 @@ class DataChain:
2276
2289
  """
2277
2290
  self.to_json(path, fs_kwargs, include_outer_list=False)
2278
2291
 
2292
+ def to_database(
2293
+ self,
2294
+ table_name: str,
2295
+ connection: "ConnectionType",
2296
+ *,
2297
+ batch_rows: int = DEFAULT_DATABASE_BATCH_SIZE,
2298
+ on_conflict: Optional[str] = None,
2299
+ column_mapping: Optional[dict[str, Optional[str]]] = None,
2300
+ ) -> None:
2301
+ """Save chain to a database table using a given database connection.
2302
+
2303
+ This method exports all DataChain records to a database table, creating the
2304
+ table if it doesn't exist and appending data if it does. The table schema
2305
+ is automatically inferred from the DataChain's signal schema.
2306
+
2307
+ Parameters:
2308
+ table_name: Name of the database table to create/write to.
2309
+ connection: SQLAlchemy connectable, str, or a sqlite3 connection
2310
+ Using SQLAlchemy makes it possible to use any DB supported by that
2311
+ library. If a DBAPI2 object, only sqlite3 is supported. The user is
2312
+ responsible for engine disposal and connection closure for the
2313
+ SQLAlchemy connectable; str connections are closed automatically.
2314
+ batch_rows: Number of rows to insert per batch for optimal performance.
2315
+ Larger batches are faster but use more memory. Default: 10,000.
2316
+ on_conflict: Strategy for handling duplicate rows (requires table
2317
+ constraints):
2318
+ - None: Raise error (`sqlalchemy.exc.IntegrityError`) on conflict
2319
+ (default)
2320
+ - "ignore": Skip duplicate rows silently
2321
+ - "update": Update existing rows with new values
2322
+ column_mapping: Optional mapping to rename or skip columns:
2323
+ - Dict mapping DataChain column names to database column names
2324
+ - Set values to None to skip columns entirely, or use `defaultdict` to
2325
+ skip all columns except those specified.
2326
+
2327
+ Examples:
2328
+ Basic usage with PostgreSQL:
2329
+ ```py
2330
+ import sqlalchemy as sa
2331
+ import datachain as dc
2332
+
2333
+ chain = dc.read_storage("s3://my-bucket/")
2334
+ engine = sa.create_engine("postgresql://user:pass@localhost/mydb")
2335
+ chain.to_database("files_table", engine)
2336
+ ```
2337
+
2338
+ Using SQLite with connection string:
2339
+ ```py
2340
+ chain.to_database("my_table", "sqlite:///data.db")
2341
+ ```
2342
+
2343
+ Column mapping and renaming:
2344
+ ```py
2345
+ mapping = {
2346
+ "user.id": "id",
2347
+ "user.name": "name",
2348
+ "user.password": None # Skip this column
2349
+ }
2350
+ chain.to_database("users", engine, column_mapping=mapping)
2351
+ ```
2352
+
2353
+ Handling conflicts (requires PRIMARY KEY or UNIQUE constraints):
2354
+ ```py
2355
+ # Skip duplicates
2356
+ chain.to_database("my_table", engine, on_conflict="ignore")
2357
+
2358
+ # Update existing records
2359
+ chain.to_database("my_table", engine, on_conflict="update")
2360
+ ```
2361
+
2362
+ Working with different databases:
2363
+ ```py
2364
+ # MySQL
2365
+ mysql_engine = sa.create_engine("mysql+pymysql://user:pass@host/db")
2366
+ chain.to_database("mysql_table", mysql_engine)
2367
+
2368
+ # SQLite in-memory
2369
+ chain.to_database("temp_table", "sqlite:///:memory:")
2370
+ ```
2371
+ """
2372
+ from .database import to_database
2373
+
2374
+ to_database(
2375
+ self,
2376
+ table_name,
2377
+ connection,
2378
+ batch_rows=batch_rows,
2379
+ on_conflict=on_conflict,
2380
+ column_mapping=column_mapping,
2381
+ )
2382
+
2279
2383
  @classmethod
2280
2384
  def from_records(
2281
2385
  cls,
@@ -34,7 +34,7 @@ from datachain.lib.data_model import DataModel, DataType, DataValue
34
34
  from datachain.lib.file import File
35
35
  from datachain.lib.model_store import ModelStore
36
36
  from datachain.lib.utils import DataChainParamsError
37
- from datachain.query.schema import DEFAULT_DELIMITER, Column
37
+ from datachain.query.schema import DEFAULT_DELIMITER, Column, ColumnMeta
38
38
  from datachain.sql.types import SQLType
39
39
 
40
40
  if TYPE_CHECKING:
@@ -590,7 +590,7 @@ class SignalSchema:
590
590
 
591
591
  if name:
592
592
  if "." in name:
593
- name = name.replace(".", "__")
593
+ name = ColumnMeta.to_db_name(name)
594
594
 
595
595
  signals = [
596
596
  s
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: datachain
3
- Version: 0.28.2
3
+ Version: 0.29.0
4
4
  Summary: Wrangle unstructured AI data at scale
5
5
  Author-email: Dmitry Petrov <support@dvc.org>
6
6
  License-Expression: Apache-2.0
@@ -53,7 +53,7 @@ datachain/data_storage/metastore.py,sha256=Qw332arvhgXB4UY0yX-Hu8Vgl3smU12l6bvxr
53
53
  datachain/data_storage/schema.py,sha256=o3JbURKXRg3IJyIVA4QjHHkn6byRuz7avbydU2FlvNY,9897
54
54
  datachain/data_storage/serializer.py,sha256=6G2YtOFqqDzJf1KbvZraKGXl2XHZyVml2krunWUum5o,927
55
55
  datachain/data_storage/sqlite.py,sha256=TTQjdDXUaZSr3MEaxZjDhsVIkIJqxFNA-sD25TO3m_4,30228
56
- datachain/data_storage/warehouse.py,sha256=nhF8yfpdJpstpXnv_sj7WFzU97JkvSeqetqJQp33cyE,32563
56
+ datachain/data_storage/warehouse.py,sha256=66PETLzfkgSmj-EF604m62xmFMQBXaRZSw8sdKGMam8,32613
57
57
  datachain/diff/__init__.py,sha256=-OFZzgOplqO84iWgGY7kfe60NXaWR9JRIh9T-uJboAM,9668
58
58
  datachain/fs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
59
59
  datachain/fs/reference.py,sha256=A8McpXF0CqbXPqanXuvpKu50YLB3a2ZXA3YAPxtBXSM,914
@@ -86,7 +86,7 @@ datachain/lib/namespaces.py,sha256=it52UbbwB8dzhesO2pMs_nThXiPQ1Ph9sD9I3GQkg5s,2
86
86
  datachain/lib/projects.py,sha256=8lN0qV8czX1LGtWURCUvRlSJk-RpO9w9Rra_pOZus6g,2595
87
87
  datachain/lib/pytorch.py,sha256=S-st2SAczYut13KMf6eSqP_OQ8otWI5TRmzhK5fN3k0,7828
88
88
  datachain/lib/settings.py,sha256=n0YYhCVdgCdMkCSLY7kscJF9mUhlQ0a4ENWBsJFynkw,3809
89
- datachain/lib/signal_schema.py,sha256=JMsL8c4iCRH9PoRumvjimsOLQQslTjm_aDR2jh1zT2Q,38558
89
+ datachain/lib/signal_schema.py,sha256=FmsfEAdRDeAzv1ApQnRXzkkyNeY9fTaXpjMzSMhDh7M,38574
90
90
  datachain/lib/tar.py,sha256=MLcVjzIgBqRuJacCNpZ6kwSZNq1i2tLyROc8PVprHsA,999
91
91
  datachain/lib/text.py,sha256=UNHm8fhidk7wdrWqacEWaA6I9ykfYqarQ2URby7jc7M,1261
92
92
  datachain/lib/udf.py,sha256=IB1IKF5KyA-NiyfhVzmBPpF_aITPS3zSlrt24f_Ofjo,17956
@@ -103,8 +103,8 @@ datachain/lib/convert/unflatten.py,sha256=ysMkstwJzPMWUlnxn-Z-tXJR3wmhjHeSN_P-sD
103
103
  datachain/lib/convert/values_to_tuples.py,sha256=j5yZMrVUH6W7b-7yUvdCTGI7JCUAYUOzHUGPoyZXAB0,4360
104
104
  datachain/lib/dc/__init__.py,sha256=TFci5HTvYGjBesNUxDAnXaX36PnzPEUSn5a6JxB9o0U,872
105
105
  datachain/lib/dc/csv.py,sha256=q6a9BpapGwP6nwy6c5cklxQumep2fUp9l2LAjtTJr6s,4411
106
- datachain/lib/dc/database.py,sha256=g5M6NjYR1T0vKte-abV-3Ejnm-HqxTIMir5cRi_SziE,6051
107
- datachain/lib/dc/datachain.py,sha256=T5-b2LLCF0zYhXQjOgtzzr6cm5NfrKVGxcJTWn7tfNU,94164
106
+ datachain/lib/dc/database.py,sha256=MPE-KzwcR2DhWLCEbl1gWFp63dLqjWuiJ1iEfC2BrJI,12443
107
+ datachain/lib/dc/datachain.py,sha256=_C9PZjUHVewpdp94AR2GS3QEI96Svsyx52dLJVM4tm4,98143
108
108
  datachain/lib/dc/datasets.py,sha256=P6CIJizD2IYFwOQG5D3VbQRjDmUiRH0ysdtb551Xdm8,15098
109
109
  datachain/lib/dc/hf.py,sha256=AP_MUHg6HJWae10PN9hD_beQVjrl0cleZ6Cvhtl1yoI,2901
110
110
  datachain/lib/dc/json.py,sha256=dNijfJ-H92vU3soyR7X1IiDrWhm6yZIGG3bSnZkPdAE,2733
@@ -158,9 +158,9 @@ datachain/sql/sqlite/vector.py,sha256=ncW4eu2FlJhrP_CIpsvtkUabZlQdl2D5Lgwy_cbfqR
158
158
  datachain/toolkit/__init__.py,sha256=eQ58Q5Yf_Fgv1ZG0IO5dpB4jmP90rk8YxUWmPc1M2Bo,68
159
159
  datachain/toolkit/split.py,sha256=ktGWzY4kyzjWyR86dhvzw-Zhl0lVk_LOX3NciTac6qo,2914
160
160
  datachain/torch/__init__.py,sha256=gIS74PoEPy4TB3X6vx9nLO0Y3sLJzsA8ckn8pRWihJM,579
161
- datachain-0.28.2.dist-info/licenses/LICENSE,sha256=8DnqK5yoPI_E50bEg_zsHKZHY2HqPy4rYN338BHQaRA,11344
162
- datachain-0.28.2.dist-info/METADATA,sha256=dYo2qW8RMNNCyy6KOXztfXOIldyS4_mADxeAlCI9cKw,13766
163
- datachain-0.28.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
164
- datachain-0.28.2.dist-info/entry_points.txt,sha256=0GMJS6B_KWq0m3VT98vQI2YZodAMkn4uReZ_okga9R4,49
165
- datachain-0.28.2.dist-info/top_level.txt,sha256=lZPpdU_2jJABLNIg2kvEOBi8PtsYikbN1OdMLHk8bTg,10
166
- datachain-0.28.2.dist-info/RECORD,,
161
+ datachain-0.29.0.dist-info/licenses/LICENSE,sha256=8DnqK5yoPI_E50bEg_zsHKZHY2HqPy4rYN338BHQaRA,11344
162
+ datachain-0.29.0.dist-info/METADATA,sha256=g5YmnSXxBvUz_ZO1ZoEPHkzRyQGW5ZbPc8a4ZRJqHXE,13766
163
+ datachain-0.29.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
164
+ datachain-0.29.0.dist-info/entry_points.txt,sha256=0GMJS6B_KWq0m3VT98vQI2YZodAMkn4uReZ_okga9R4,49
165
+ datachain-0.29.0.dist-info/top_level.txt,sha256=lZPpdU_2jJABLNIg2kvEOBi8PtsYikbN1OdMLHk8bTg,10
166
+ datachain-0.29.0.dist-info/RECORD,,