datachain 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of datachain might be problematic. Click here for more details.
- datachain/catalog/catalog.py +11 -2
- datachain/client/fsspec.py +1 -4
- datachain/client/local.py +2 -7
- datachain/data_storage/schema.py +22 -8
- datachain/data_storage/sqlite.py +5 -0
- datachain/data_storage/warehouse.py +8 -14
- datachain/lib/dc.py +28 -14
- datachain/lib/meta_formats.py +8 -2
- datachain/lib/udf.py +21 -14
- datachain/node.py +1 -1
- datachain/query/batch.py +45 -41
- datachain/query/dataset.py +13 -6
- datachain/query/dispatch.py +53 -68
- datachain/query/queue.py +120 -0
- datachain/query/schema.py +4 -0
- datachain/query/udf.py +23 -8
- datachain/sql/default/base.py +3 -0
- datachain/sql/sqlite/base.py +3 -0
- datachain/sql/types.py +120 -11
- datachain/utils.py +17 -2
- {datachain-0.3.0.dist-info → datachain-0.3.2.dist-info}/METADATA +74 -86
- {datachain-0.3.0.dist-info → datachain-0.3.2.dist-info}/RECORD +26 -25
- {datachain-0.3.0.dist-info → datachain-0.3.2.dist-info}/WHEEL +1 -1
- {datachain-0.3.0.dist-info → datachain-0.3.2.dist-info}/LICENSE +0 -0
- {datachain-0.3.0.dist-info → datachain-0.3.2.dist-info}/entry_points.txt +0 -0
- {datachain-0.3.0.dist-info → datachain-0.3.2.dist-info}/top_level.txt +0 -0
datachain/sql/types.py
CHANGED
|
@@ -17,6 +17,7 @@ from datetime import datetime
|
|
|
17
17
|
from types import MappingProxyType
|
|
18
18
|
from typing import Any, Union
|
|
19
19
|
|
|
20
|
+
import sqlalchemy as sa
|
|
20
21
|
from sqlalchemy import TypeDecorator, types
|
|
21
22
|
|
|
22
23
|
_registry: dict[str, "TypeConverter"] = {}
|
|
@@ -28,6 +29,9 @@ read_converter_registry = MappingProxyType(_read_converter_registry)
|
|
|
28
29
|
_type_defaults_registry: dict[str, "TypeDefaults"] = {}
|
|
29
30
|
type_defaults_registry = MappingProxyType(_type_defaults_registry)
|
|
30
31
|
|
|
32
|
+
_db_defaults_registry: dict[str, "DBDefaults"] = {}
|
|
33
|
+
db_defaults_registry = MappingProxyType(_db_defaults_registry)
|
|
34
|
+
|
|
31
35
|
NullType = types.NullType
|
|
32
36
|
|
|
33
37
|
|
|
@@ -43,6 +47,10 @@ def register_type_defaults(dialect_name: str, td: "TypeDefaults"):
|
|
|
43
47
|
_type_defaults_registry[dialect_name] = td
|
|
44
48
|
|
|
45
49
|
|
|
50
|
+
def register_db_defaults(dialect_name: str, dbd: "DBDefaults"):
|
|
51
|
+
_db_defaults_registry[dialect_name] = dbd
|
|
52
|
+
|
|
53
|
+
|
|
46
54
|
def converter(dialect) -> "TypeConverter":
|
|
47
55
|
name = dialect.name
|
|
48
56
|
try:
|
|
@@ -71,6 +79,14 @@ def type_defaults(dialect) -> "TypeDefaults":
|
|
|
71
79
|
raise ValueError(f"No type defaults registered for dialect: {name!r}") from None
|
|
72
80
|
|
|
73
81
|
|
|
82
|
+
def db_defaults(dialect) -> "DBDefaults":
|
|
83
|
+
name = dialect.name
|
|
84
|
+
try:
|
|
85
|
+
return db_defaults_registry[name]
|
|
86
|
+
except KeyError:
|
|
87
|
+
raise ValueError(f"No DB defaults registered for dialect: {name!r}") from None
|
|
88
|
+
|
|
89
|
+
|
|
74
90
|
class SQLType(TypeDecorator):
|
|
75
91
|
impl: type[types.TypeEngine[Any]] = types.TypeEngine
|
|
76
92
|
cache_ok = True
|
|
@@ -97,6 +113,10 @@ class String(SQLType):
|
|
|
97
113
|
def default_value(dialect):
|
|
98
114
|
return type_defaults(dialect).string()
|
|
99
115
|
|
|
116
|
+
@staticmethod
|
|
117
|
+
def db_default_value(dialect):
|
|
118
|
+
return db_defaults(dialect).string()
|
|
119
|
+
|
|
100
120
|
def on_read_convert(self, value, dialect):
|
|
101
121
|
return read_converter(dialect).string(value)
|
|
102
122
|
|
|
@@ -115,6 +135,10 @@ class Boolean(SQLType):
|
|
|
115
135
|
def default_value(dialect):
|
|
116
136
|
return type_defaults(dialect).boolean()
|
|
117
137
|
|
|
138
|
+
@staticmethod
|
|
139
|
+
def db_default_value(dialect):
|
|
140
|
+
return db_defaults(dialect).boolean()
|
|
141
|
+
|
|
118
142
|
def on_read_convert(self, value, dialect):
|
|
119
143
|
return read_converter(dialect).boolean(value)
|
|
120
144
|
|
|
@@ -133,6 +157,10 @@ class Int(SQLType):
|
|
|
133
157
|
def default_value(dialect):
|
|
134
158
|
return type_defaults(dialect).int()
|
|
135
159
|
|
|
160
|
+
@staticmethod
|
|
161
|
+
def db_default_value(dialect):
|
|
162
|
+
return db_defaults(dialect).int()
|
|
163
|
+
|
|
136
164
|
def on_read_convert(self, value, dialect):
|
|
137
165
|
return read_converter(dialect).int(value)
|
|
138
166
|
|
|
@@ -145,6 +173,10 @@ class Int32(Int):
|
|
|
145
173
|
def default_value(dialect):
|
|
146
174
|
return type_defaults(dialect).int32()
|
|
147
175
|
|
|
176
|
+
@staticmethod
|
|
177
|
+
def db_default_value(dialect):
|
|
178
|
+
return db_defaults(dialect).int32()
|
|
179
|
+
|
|
148
180
|
def on_read_convert(self, value, dialect):
|
|
149
181
|
return read_converter(dialect).int32(value)
|
|
150
182
|
|
|
@@ -157,6 +189,10 @@ class Int64(Int):
|
|
|
157
189
|
def default_value(dialect):
|
|
158
190
|
return type_defaults(dialect).int64()
|
|
159
191
|
|
|
192
|
+
@staticmethod
|
|
193
|
+
def db_default_value(dialect):
|
|
194
|
+
return db_defaults(dialect).int64()
|
|
195
|
+
|
|
160
196
|
def on_read_convert(self, value, dialect):
|
|
161
197
|
return read_converter(dialect).int64(value)
|
|
162
198
|
|
|
@@ -169,12 +205,16 @@ class UInt64(Int):
|
|
|
169
205
|
def default_value(dialect):
|
|
170
206
|
return type_defaults(dialect).uint64()
|
|
171
207
|
|
|
208
|
+
@staticmethod
|
|
209
|
+
def db_default_value(dialect):
|
|
210
|
+
return db_defaults(dialect).uint64()
|
|
211
|
+
|
|
172
212
|
def on_read_convert(self, value, dialect):
|
|
173
213
|
return read_converter(dialect).uint64(value)
|
|
174
214
|
|
|
175
215
|
|
|
176
216
|
class Float(SQLType):
|
|
177
|
-
impl = types.
|
|
217
|
+
impl = types.FLOAT
|
|
178
218
|
|
|
179
219
|
@property
|
|
180
220
|
def python_type(self):
|
|
@@ -187,6 +227,10 @@ class Float(SQLType):
|
|
|
187
227
|
def default_value(dialect):
|
|
188
228
|
return type_defaults(dialect).float()
|
|
189
229
|
|
|
230
|
+
@staticmethod
|
|
231
|
+
def db_default_value(dialect):
|
|
232
|
+
return db_defaults(dialect).float()
|
|
233
|
+
|
|
190
234
|
def on_read_convert(self, value, dialect):
|
|
191
235
|
return read_converter(dialect).float(value)
|
|
192
236
|
|
|
@@ -199,6 +243,10 @@ class Float32(Float):
|
|
|
199
243
|
def default_value(dialect):
|
|
200
244
|
return type_defaults(dialect).float32()
|
|
201
245
|
|
|
246
|
+
@staticmethod
|
|
247
|
+
def db_default_value(dialect):
|
|
248
|
+
return db_defaults(dialect).float32()
|
|
249
|
+
|
|
202
250
|
def on_read_convert(self, value, dialect):
|
|
203
251
|
return read_converter(dialect).float32(value)
|
|
204
252
|
|
|
@@ -211,6 +259,10 @@ class Float64(Float):
|
|
|
211
259
|
def default_value(dialect):
|
|
212
260
|
return type_defaults(dialect).float64()
|
|
213
261
|
|
|
262
|
+
@staticmethod
|
|
263
|
+
def db_default_value(dialect):
|
|
264
|
+
return db_defaults(dialect).float64()
|
|
265
|
+
|
|
214
266
|
def on_read_convert(self, value, dialect):
|
|
215
267
|
return read_converter(dialect).float64(value)
|
|
216
268
|
|
|
@@ -247,6 +299,10 @@ class Array(SQLType):
|
|
|
247
299
|
def default_value(dialect):
|
|
248
300
|
return type_defaults(dialect).array()
|
|
249
301
|
|
|
302
|
+
@staticmethod
|
|
303
|
+
def db_default_value(dialect):
|
|
304
|
+
return db_defaults(dialect).array()
|
|
305
|
+
|
|
250
306
|
def on_read_convert(self, value, dialect):
|
|
251
307
|
r = read_converter(dialect).array(value, self.item_type, dialect)
|
|
252
308
|
if isinstance(self.item_type, JSON):
|
|
@@ -268,6 +324,10 @@ class JSON(SQLType):
|
|
|
268
324
|
def default_value(dialect):
|
|
269
325
|
return type_defaults(dialect).json()
|
|
270
326
|
|
|
327
|
+
@staticmethod
|
|
328
|
+
def db_default_value(dialect):
|
|
329
|
+
return db_defaults(dialect).json()
|
|
330
|
+
|
|
271
331
|
def on_read_convert(self, value, dialect):
|
|
272
332
|
return read_converter(dialect).json(value)
|
|
273
333
|
|
|
@@ -286,6 +346,10 @@ class DateTime(SQLType):
|
|
|
286
346
|
def default_value(dialect):
|
|
287
347
|
return type_defaults(dialect).datetime()
|
|
288
348
|
|
|
349
|
+
@staticmethod
|
|
350
|
+
def db_default_value(dialect):
|
|
351
|
+
return db_defaults(dialect).datetime()
|
|
352
|
+
|
|
289
353
|
def on_read_convert(self, value, dialect):
|
|
290
354
|
return read_converter(dialect).datetime(value)
|
|
291
355
|
|
|
@@ -304,6 +368,10 @@ class Binary(SQLType):
|
|
|
304
368
|
def default_value(dialect):
|
|
305
369
|
return type_defaults(dialect).binary()
|
|
306
370
|
|
|
371
|
+
@staticmethod
|
|
372
|
+
def db_default_value(dialect):
|
|
373
|
+
return db_defaults(dialect).binary()
|
|
374
|
+
|
|
307
375
|
def on_read_convert(self, value, dialect):
|
|
308
376
|
return read_converter(dialect).binary(value)
|
|
309
377
|
|
|
@@ -328,13 +396,17 @@ class TypeReadConverter:
|
|
|
328
396
|
return value
|
|
329
397
|
|
|
330
398
|
def float(self, value):
|
|
399
|
+
if value is None:
|
|
400
|
+
return float("nan")
|
|
401
|
+
if isinstance(value, str) and value.lower() == "nan":
|
|
402
|
+
return float("nan")
|
|
331
403
|
return value
|
|
332
404
|
|
|
333
405
|
def float32(self, value):
|
|
334
|
-
return value
|
|
406
|
+
return self.float(value)
|
|
335
407
|
|
|
336
408
|
def float64(self, value):
|
|
337
|
-
return value
|
|
409
|
+
return self.float(value)
|
|
338
410
|
|
|
339
411
|
def array(self, value, item_type, dialect):
|
|
340
412
|
if value is None or item_type is None:
|
|
@@ -347,10 +419,9 @@ class TypeReadConverter:
|
|
|
347
419
|
def datetime(self, value):
|
|
348
420
|
return value
|
|
349
421
|
|
|
350
|
-
def uuid(self, value):
|
|
351
|
-
return value
|
|
352
|
-
|
|
353
422
|
def binary(self, value):
|
|
423
|
+
if isinstance(value, str):
|
|
424
|
+
return value.encode()
|
|
354
425
|
return value
|
|
355
426
|
|
|
356
427
|
|
|
@@ -415,13 +486,13 @@ class TypeDefaults:
|
|
|
415
486
|
return None
|
|
416
487
|
|
|
417
488
|
def float(self):
|
|
418
|
-
return
|
|
489
|
+
return float("nan")
|
|
419
490
|
|
|
420
491
|
def float32(self):
|
|
421
|
-
return
|
|
492
|
+
return self.float()
|
|
422
493
|
|
|
423
494
|
def float64(self):
|
|
424
|
-
return
|
|
495
|
+
return self.float()
|
|
425
496
|
|
|
426
497
|
def array(self):
|
|
427
498
|
return None
|
|
@@ -432,11 +503,49 @@ class TypeDefaults:
|
|
|
432
503
|
def datetime(self):
|
|
433
504
|
return None
|
|
434
505
|
|
|
435
|
-
def
|
|
506
|
+
def binary(self):
|
|
436
507
|
return None
|
|
437
508
|
|
|
509
|
+
|
|
510
|
+
class DBDefaults:
|
|
511
|
+
def string(self):
|
|
512
|
+
return sa.text("''")
|
|
513
|
+
|
|
514
|
+
def boolean(self):
|
|
515
|
+
return sa.text("False")
|
|
516
|
+
|
|
517
|
+
def int(self):
|
|
518
|
+
return sa.text("0")
|
|
519
|
+
|
|
520
|
+
def int32(self):
|
|
521
|
+
return self.int()
|
|
522
|
+
|
|
523
|
+
def int64(self):
|
|
524
|
+
return self.int()
|
|
525
|
+
|
|
526
|
+
def uint64(self):
|
|
527
|
+
return self.int()
|
|
528
|
+
|
|
529
|
+
def float(self):
|
|
530
|
+
return sa.text("NaN")
|
|
531
|
+
|
|
532
|
+
def float32(self):
|
|
533
|
+
return self.float()
|
|
534
|
+
|
|
535
|
+
def float64(self):
|
|
536
|
+
return self.float()
|
|
537
|
+
|
|
538
|
+
def array(self):
|
|
539
|
+
return sa.text("'[]'")
|
|
540
|
+
|
|
541
|
+
def json(self):
|
|
542
|
+
return sa.text("'{}'")
|
|
543
|
+
|
|
544
|
+
def datetime(self):
|
|
545
|
+
return sa.text("'1970-01-01 00:00:00'")
|
|
546
|
+
|
|
438
547
|
def binary(self):
|
|
439
|
-
return
|
|
548
|
+
return sa.text("''")
|
|
440
549
|
|
|
441
550
|
|
|
442
551
|
TYPES = [
|
datachain/utils.py
CHANGED
|
@@ -10,7 +10,7 @@ import sys
|
|
|
10
10
|
import time
|
|
11
11
|
from collections.abc import Iterable, Iterator, Sequence
|
|
12
12
|
from datetime import date, datetime, timezone
|
|
13
|
-
from itertools import islice
|
|
13
|
+
from itertools import chain, islice
|
|
14
14
|
from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union
|
|
15
15
|
from uuid import UUID
|
|
16
16
|
|
|
@@ -241,7 +241,7 @@ _T_co = TypeVar("_T_co", covariant=True)
|
|
|
241
241
|
|
|
242
242
|
|
|
243
243
|
def batched(iterable: Iterable[_T_co], n: int) -> Iterator[tuple[_T_co, ...]]:
|
|
244
|
-
"Batch data into tuples of length n. The last batch may be shorter."
|
|
244
|
+
"""Batch data into tuples of length n. The last batch may be shorter."""
|
|
245
245
|
# Based on: https://docs.python.org/3/library/itertools.html#itertools-recipes
|
|
246
246
|
# batched('ABCDEFG', 3) --> ABC DEF G
|
|
247
247
|
if n < 1:
|
|
@@ -251,6 +251,21 @@ def batched(iterable: Iterable[_T_co], n: int) -> Iterator[tuple[_T_co, ...]]:
|
|
|
251
251
|
yield batch
|
|
252
252
|
|
|
253
253
|
|
|
254
|
+
def batched_it(iterable: Iterable[_T_co], n: int) -> Iterator[Iterator[_T_co]]:
|
|
255
|
+
"""Batch data into iterators of length n. The last batch may be shorter."""
|
|
256
|
+
# batched('ABCDEFG', 3) --> ABC DEF G
|
|
257
|
+
if n < 1:
|
|
258
|
+
raise ValueError("Batch size must be at least one")
|
|
259
|
+
it = iter(iterable)
|
|
260
|
+
while True:
|
|
261
|
+
chunk_it = islice(it, n)
|
|
262
|
+
try:
|
|
263
|
+
first_el = next(chunk_it)
|
|
264
|
+
except StopIteration:
|
|
265
|
+
return
|
|
266
|
+
yield chain((first_el,), chunk_it)
|
|
267
|
+
|
|
268
|
+
|
|
254
269
|
def flatten(items):
|
|
255
270
|
for item in items:
|
|
256
271
|
if isinstance(item, list):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: datachain
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.2
|
|
4
4
|
Summary: Wrangle unstructured AI data at scale
|
|
5
5
|
Author-email: Dmitry Petrov <support@dvc.org>
|
|
6
6
|
License: Apache-2.0
|
|
@@ -55,6 +55,15 @@ Requires-Dist: mkdocs-material >=9.3.1 ; extra == 'docs'
|
|
|
55
55
|
Requires-Dist: mkdocs-section-index >=0.3.6 ; extra == 'docs'
|
|
56
56
|
Requires-Dist: mkdocstrings-python >=1.6.3 ; extra == 'docs'
|
|
57
57
|
Requires-Dist: mkdocs-literate-nav >=0.6.1 ; extra == 'docs'
|
|
58
|
+
Provides-Extra: examples
|
|
59
|
+
Requires-Dist: datachain[tests] ; extra == 'examples'
|
|
60
|
+
Requires-Dist: numpy <2,>=1 ; extra == 'examples'
|
|
61
|
+
Requires-Dist: defusedxml ; extra == 'examples'
|
|
62
|
+
Requires-Dist: accelerate ; extra == 'examples'
|
|
63
|
+
Requires-Dist: unstructured[pdf] ; extra == 'examples'
|
|
64
|
+
Requires-Dist: pdfplumber ==0.11.3 ; extra == 'examples'
|
|
65
|
+
Requires-Dist: huggingface-hub[hf_transfer] ; extra == 'examples'
|
|
66
|
+
Requires-Dist: nltk ==3.8.1 ; extra == 'examples'
|
|
58
67
|
Provides-Extra: remote
|
|
59
68
|
Requires-Dist: lz4 ; extra == 'remote'
|
|
60
69
|
Requires-Dist: msgpack <2,>=1.0.4 ; extra == 'remote'
|
|
@@ -100,102 +109,78 @@ Requires-Dist: usearch ; extra == 'vector'
|
|
|
100
109
|
AI 🔗 DataChain
|
|
101
110
|
----------------
|
|
102
111
|
|
|
103
|
-
DataChain is a data-frame library designed for
|
|
104
|
-
|
|
105
|
-
|
|
112
|
+
DataChain is a modern Pythonic data-frame library designed for artificial intelligence.
|
|
113
|
+
It is made to organize your unstructured data into datasets and wrangle it at scale on
|
|
114
|
+
your local machine.
|
|
106
115
|
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
Local), version and update datasets.
|
|
116
|
+
Key Features
|
|
117
|
+
============
|
|
110
118
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
119
|
+
📂 **Storage as a Source of Truth.**
|
|
120
|
+
- Process unstructured data without redundant copies: S3, GCP, Azure, and local
|
|
121
|
+
file systems.
|
|
122
|
+
- Multimodal data: images, video, text, PDFs, JSONs, CSVs, parquet.
|
|
123
|
+
- Join files and metadata together into persistent, versioned, columnar datasets.
|
|
114
124
|
|
|
115
|
-
|
|
116
|
-
|
|
125
|
+
🐍 **Python-friendly data pipelines.**
|
|
126
|
+
- Operate on Python objects and object fields.
|
|
127
|
+
- Built-in parallelization and out-of-memory compute without a need in SQL or
|
|
128
|
+
Spark jobs.
|
|
117
129
|
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
130
|
+
🧠 **Data Enrichment and Processing.**
|
|
131
|
+
- Generate metadata columns using local AI models and LLM APIs.
|
|
132
|
+
- Filter, join, and group by AI metadata. Vector similarity search.
|
|
133
|
+
- Pass datasets to Pytorch and Tensorflow, or export back into storage.
|
|
121
134
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
135
|
+
🚀 **Efficiency.**
|
|
136
|
+
- Parallelization, out-of-memory workloads and data caching.
|
|
137
|
+
- Vectorized operations on Python object fields: sum, count, avg, etc.
|
|
138
|
+
- Vector search on embeddings.
|
|
125
139
|
|
|
126
140
|
|
|
141
|
+
Quick Start
|
|
142
|
+
-----------
|
|
143
|
+
|
|
127
144
|
.. code:: console
|
|
128
145
|
|
|
129
146
|
$ pip install datachain
|
|
130
147
|
|
|
131
148
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
DataChain introduces expressive data structures tailored for AI-specific workload:
|
|
136
|
-
|
|
137
|
-
- **Dataset:** Preserves the file-references and meta-information. Takes care of Python
|
|
138
|
-
object serialization, dataset versioning and difference. Operations on dataset:
|
|
139
|
-
|
|
140
|
-
- **Transformations:** traditional data-frame or SQL operations such as filtering,
|
|
141
|
-
grouping, joining.
|
|
142
|
-
- **Enrichments:** mapping, aggregating and generating using customer’s Python
|
|
143
|
-
code. This is needed to work with ML inference and LLM calls.
|
|
144
|
-
|
|
145
|
-
- **Chain** is a sequence of operations on datasets. Chain executes operations in lazy
|
|
146
|
-
mode - only when needed.
|
|
147
|
-
|
|
148
|
-
DataChain name comes from these major data structures: dataset and chaining.
|
|
149
|
-
|
|
149
|
+
Selecting files using JSON metadata
|
|
150
|
+
======================================
|
|
150
151
|
|
|
151
|
-
|
|
152
|
-
|
|
152
|
+
A storage consists of images of cats and dogs (`dog.1048.jpg`, `cat.1009.jpg`),
|
|
153
|
+
annotated with ground truth and model inferences in the 'json-pairs' format,
|
|
154
|
+
where each image has a matching JSON file like `cat.1009.json`:
|
|
153
155
|
|
|
154
|
-
|
|
155
|
-
use-cases and at the same time to fit it into traditional data infrastructure.
|
|
156
|
+
.. code:: json
|
|
156
157
|
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
needed for distributed computations.
|
|
162
|
-
- **Resuming data processing** (in development). Introduces idempotent operations,
|
|
163
|
-
allowing data processing to resume from the last successful process file/record/batch
|
|
164
|
-
if it fails due to issues like failed LLM calls, ML inference or file download.
|
|
158
|
+
{
|
|
159
|
+
"class": "cat", "id": "1009", "num_annotators": 8,
|
|
160
|
+
"inference": {"class": "dog", "confidence": 0.68}
|
|
161
|
+
}
|
|
165
162
|
|
|
166
|
-
|
|
163
|
+
Example of downloading only high-confidence cat images using JSON metadata:
|
|
167
164
|
|
|
168
|
-
- **Functional style data processing.** Using a functional/chaining approach to data
|
|
169
|
-
processing rather than declarative SQL, inspired by R-dplyr and some Python libraries.
|
|
170
|
-
- **Data Versioning.** Treats raw files in cloud storage as the source of truth for data
|
|
171
|
-
and implements data versioning, extending ideas from DVC (developed by the same team).
|
|
172
165
|
|
|
166
|
+
.. code:: py
|
|
173
167
|
|
|
174
|
-
|
|
175
|
-
======================
|
|
176
|
-
|
|
177
|
-
- **Not a database** (Postgres, MySQL). Instead, it uses databases under the hood:
|
|
178
|
-
`SQLite`_ in open-source and ClickHouse and other data warehouses for the commercial
|
|
179
|
-
version.
|
|
180
|
-
- **Not a data processing tool / data warehouse** (Spark, Snowflake, Big Query) since
|
|
181
|
-
it delegates heavy data transformations to underlying data warehouses and focuses on
|
|
182
|
-
AI specific data enrichments and orchestrating all the pieces together.
|
|
183
|
-
|
|
168
|
+
from datachain import Column, DataChain
|
|
184
169
|
|
|
185
|
-
|
|
186
|
-
|
|
170
|
+
meta = DataChain.from_json("gs://datachain-demo/dogs-and-cats/*json", object_name="meta")
|
|
171
|
+
images = DataChain.from_storage("gs://datachain-demo/dogs-and-cats/*jpg")
|
|
187
172
|
|
|
188
|
-
|
|
189
|
-
|
|
173
|
+
images_id = images.map(id=lambda file: file.path.split('.')[-2])
|
|
174
|
+
annotated = images_id.merge(meta, on="id", right_on="meta.id")
|
|
190
175
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
Our goal is to identify the successful dialogs.
|
|
176
|
+
likely_cats = annotated.filter((Column("meta.inference.confidence") > 0.93) \
|
|
177
|
+
& (Column("meta.inference.class_") == "cat"))
|
|
178
|
+
likely_cats.export_files("high-confidence-cats/", signal="file")
|
|
195
179
|
|
|
196
|
-
The data used in the examples is `publicly available`_. The sample code is designed to run on a local machine.
|
|
197
180
|
|
|
198
|
-
|
|
181
|
+
Data curation with a local AI model
|
|
182
|
+
===================================
|
|
183
|
+
Batch inference with a simple sentiment model using the `transformers` library:
|
|
199
184
|
|
|
200
185
|
.. code:: shell
|
|
201
186
|
|
|
@@ -246,30 +231,30 @@ LLM judging chatbots
|
|
|
246
231
|
=============================
|
|
247
232
|
|
|
248
233
|
LLMs can work as efficient universal classifiers. In the example below,
|
|
249
|
-
we employ a free API from Mistral to judge the chatbot
|
|
234
|
+
we employ a free API from Mistral to judge the `publicly available`_ chatbot dialogs. Please get a free
|
|
250
235
|
Mistral API key at https://console.mistral.ai
|
|
251
236
|
|
|
237
|
+
|
|
252
238
|
.. code:: shell
|
|
253
239
|
|
|
254
|
-
$ pip install mistralai
|
|
240
|
+
$ pip install mistralai (Requires version >=1.0.0)
|
|
255
241
|
$ export MISTRAL_API_KEY=_your_key_
|
|
256
242
|
|
|
257
243
|
DataChain can parallelize API calls; the free Mistral tier supports up to 4 requests at the same time.
|
|
258
244
|
|
|
259
245
|
.. code:: py
|
|
260
246
|
|
|
261
|
-
from mistralai
|
|
262
|
-
from mistralai.models.chat_completion import ChatMessage
|
|
247
|
+
from mistralai import Mistral
|
|
263
248
|
from datachain import File, DataChain, Column
|
|
264
249
|
|
|
265
250
|
PROMPT = "Was this dialog successful? Answer in a single word: Success or Failure."
|
|
266
251
|
|
|
267
252
|
def eval_dialogue(file: File) -> bool:
|
|
268
|
-
client =
|
|
269
|
-
response = client.chat(
|
|
253
|
+
client = Mistral()
|
|
254
|
+
response = client.chat.complete(
|
|
270
255
|
model="open-mixtral-8x22b",
|
|
271
|
-
messages=[
|
|
272
|
-
|
|
256
|
+
messages=[{"role": "system", "content": PROMPT},
|
|
257
|
+
{"role": "user", "content": file.read()}])
|
|
273
258
|
result = response.choices[0].message.content
|
|
274
259
|
return result.lower().startswith("success")
|
|
275
260
|
|
|
@@ -309,8 +294,8 @@ Instead of extracting this information from the Mistral response data structure
|
|
|
309
294
|
|
|
310
295
|
.. code:: py
|
|
311
296
|
|
|
312
|
-
from mistralai
|
|
313
|
-
from mistralai.models
|
|
297
|
+
from mistralai import Mistral
|
|
298
|
+
from mistralai.models import ChatCompletionResponse
|
|
314
299
|
from datachain import File, DataChain, Column
|
|
315
300
|
|
|
316
301
|
PROMPT = "Was this dialog successful? Answer in a single word: Success or Failure."
|
|
@@ -319,8 +304,8 @@ Instead of extracting this information from the Mistral response data structure
|
|
|
319
304
|
client = MistralClient()
|
|
320
305
|
return client.chat(
|
|
321
306
|
model="open-mixtral-8x22b",
|
|
322
|
-
messages=[
|
|
323
|
-
|
|
307
|
+
messages=[{"role": "system", "content": PROMPT},
|
|
308
|
+
{"role": "user", "content": file.read()}])
|
|
324
309
|
|
|
325
310
|
chain = (
|
|
326
311
|
DataChain.from_storage("gs://datachain-demo/chatbot-KiT/", object_name="file")
|
|
@@ -438,7 +423,10 @@ Tutorials
|
|
|
438
423
|
---------
|
|
439
424
|
|
|
440
425
|
* `Getting Started`_
|
|
441
|
-
* `Multimodal <examples/multimodal/clip_fine_tuning.ipynb>`_ (try in `Colab <https://colab.research.google.com/github/iterative/datachain/blob/main/
|
|
426
|
+
* `Multimodal <https://github.com/iterative/datachain-examples/blob/main/multimodal/clip_fine_tuning.ipynb>`_ (try in `Colab <https://colab.research.google.com/github/iterative/datachain-examples/blob/main/multimodal/clip_fine_tuning.ipynb>`__)
|
|
427
|
+
* `LLM evaluations <https://github.com/iterative/datachain-examples/blob/main/llm/llm_chatbot_evaluation.ipynb>`_ (try in `Colab <https://colab.research.google.com/github/iterative/datachain-examples/blob/main/llm/llm_chatbot_evaluation.ipynb>`__)
|
|
428
|
+
* `Reading JSON metadata <https://github.com/iterative/datachain-examples/blob/main/formats/json-metadata-tutorial.ipynb>`_ (try in `Colab <https://colab.research.google.com/github/iterative/datachain-examples/blob/main/formats/json-metadata-tutorial.ipynb>`__)
|
|
429
|
+
|
|
442
430
|
|
|
443
431
|
Contributions
|
|
444
432
|
-------------
|