datachain 0.16.0__py3-none-any.whl → 0.16.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of datachain might be problematic. Click here for more details.
- datachain/func/aggregate.py +3 -3
- datachain/lib/convert/values_to_tuples.py +6 -8
- datachain/lib/dc/datachain.py +16 -10
- datachain/lib/dc/records.py +16 -10
- datachain/lib/dc/utils.py +2 -2
- datachain/lib/signal_schema.py +1 -10
- datachain/query/dataset.py +13 -6
- datachain/query/schema.py +1 -4
- {datachain-0.16.0.dist-info → datachain-0.16.1.dist-info}/METADATA +1 -1
- {datachain-0.16.0.dist-info → datachain-0.16.1.dist-info}/RECORD +14 -14
- {datachain-0.16.0.dist-info → datachain-0.16.1.dist-info}/WHEEL +1 -1
- {datachain-0.16.0.dist-info → datachain-0.16.1.dist-info}/entry_points.txt +0 -0
- {datachain-0.16.0.dist-info → datachain-0.16.1.dist-info}/licenses/LICENSE +0 -0
- {datachain-0.16.0.dist-info → datachain-0.16.1.dist-info}/top_level.txt +0 -0
datachain/func/aggregate.py
CHANGED
|
@@ -165,7 +165,7 @@ def any_value(col: str) -> Func:
|
|
|
165
165
|
Example:
|
|
166
166
|
```py
|
|
167
167
|
dc.group_by(
|
|
168
|
-
file_example=func.any_value("file.
|
|
168
|
+
file_example=func.any_value("file.path"),
|
|
169
169
|
partition_by="signal.category",
|
|
170
170
|
)
|
|
171
171
|
```
|
|
@@ -227,7 +227,7 @@ def concat(col: str, separator="") -> Func:
|
|
|
227
227
|
Example:
|
|
228
228
|
```py
|
|
229
229
|
dc.group_by(
|
|
230
|
-
files=func.concat("file.
|
|
230
|
+
files=func.concat("file.path", separator=", "),
|
|
231
231
|
partition_by="signal.category",
|
|
232
232
|
)
|
|
233
233
|
```
|
|
@@ -343,7 +343,7 @@ def first(col: str) -> Func:
|
|
|
343
343
|
```py
|
|
344
344
|
window = func.window(partition_by="signal.category", order_by="created_at")
|
|
345
345
|
dc.mutate(
|
|
346
|
-
first_file=func.first("file.
|
|
346
|
+
first_file=func.first("file.path").over(window),
|
|
347
347
|
)
|
|
348
348
|
```
|
|
349
349
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import itertools
|
|
2
2
|
from collections.abc import Sequence
|
|
3
|
-
from typing import Any,
|
|
3
|
+
from typing import Any, Union
|
|
4
4
|
|
|
5
5
|
from datachain.lib.data_model import (
|
|
6
6
|
DataType,
|
|
@@ -71,14 +71,13 @@ def values_to_tuples( # noqa: C901, PLR0912
|
|
|
71
71
|
# If a non-None value appears early, it won't check the remaining items for
|
|
72
72
|
# `None` values.
|
|
73
73
|
try:
|
|
74
|
-
|
|
75
|
-
itertools.dropwhile(lambda
|
|
74
|
+
first_not_none_element = next(
|
|
75
|
+
itertools.dropwhile(lambda i: i is None, v)
|
|
76
76
|
)
|
|
77
77
|
except StopIteration:
|
|
78
|
-
|
|
79
|
-
|
|
78
|
+
# set default type to `str` if column is empty or all values are `None`
|
|
79
|
+
typ = str
|
|
80
80
|
else:
|
|
81
|
-
nullable = pos > 0
|
|
82
81
|
typ = type(first_not_none_element) # type: ignore[assignment]
|
|
83
82
|
if not is_chain_type(typ):
|
|
84
83
|
raise ValuesToTupleError(
|
|
@@ -88,8 +87,7 @@ def values_to_tuples( # noqa: C901, PLR0912
|
|
|
88
87
|
)
|
|
89
88
|
if isinstance(first_not_none_element, list):
|
|
90
89
|
typ = list[type(first_not_none_element[0])] # type: ignore[assignment, misc]
|
|
91
|
-
|
|
92
|
-
types_map[k] = Optional[typ] if nullable else typ # type: ignore[assignment]
|
|
90
|
+
types_map[k] = typ
|
|
93
91
|
|
|
94
92
|
if length < 0:
|
|
95
93
|
length = len_
|
datachain/lib/dc/datachain.py
CHANGED
|
@@ -756,7 +756,7 @@ class DataChain:
|
|
|
756
756
|
|
|
757
757
|
Example:
|
|
758
758
|
```py
|
|
759
|
-
dc.distinct("file.
|
|
759
|
+
dc.distinct("file.path")
|
|
760
760
|
```
|
|
761
761
|
"""
|
|
762
762
|
return self._evolve(
|
|
@@ -882,7 +882,7 @@ class DataChain:
|
|
|
882
882
|
```py
|
|
883
883
|
dc.mutate(
|
|
884
884
|
area=Column("image.height") * Column("image.width"),
|
|
885
|
-
extension=file_ext(Column("file.
|
|
885
|
+
extension=file_ext(Column("file.path")),
|
|
886
886
|
dist=cosine_distance(embedding_text, embedding_image)
|
|
887
887
|
)
|
|
888
888
|
```
|
|
@@ -1071,13 +1071,13 @@ class DataChain:
|
|
|
1071
1071
|
|
|
1072
1072
|
Iterating over all rows with selected columns:
|
|
1073
1073
|
```py
|
|
1074
|
-
for name, size in dc.collect("file.
|
|
1074
|
+
for name, size in dc.collect("file.path", "file.size"):
|
|
1075
1075
|
print(name, size)
|
|
1076
1076
|
```
|
|
1077
1077
|
|
|
1078
1078
|
Iterating over a single column:
|
|
1079
1079
|
```py
|
|
1080
|
-
for file in dc.collect("file.
|
|
1080
|
+
for file in dc.collect("file.path"):
|
|
1081
1081
|
print(file)
|
|
1082
1082
|
```
|
|
1083
1083
|
"""
|
|
@@ -1630,7 +1630,7 @@ class DataChain:
|
|
|
1630
1630
|
import datachain as dc
|
|
1631
1631
|
|
|
1632
1632
|
chain = dc.read_storage("s3://mybucket")
|
|
1633
|
-
chain = chain.filter(dc.C("file.
|
|
1633
|
+
chain = chain.filter(dc.C("file.path").glob("*.jsonl"))
|
|
1634
1634
|
chain = chain.parse_tabular(format="json")
|
|
1635
1635
|
```
|
|
1636
1636
|
"""
|
|
@@ -2089,25 +2089,31 @@ class DataChain:
|
|
|
2089
2089
|
|
|
2090
2090
|
Using glob to match patterns
|
|
2091
2091
|
```py
|
|
2092
|
-
dc.filter(C("file.
|
|
2092
|
+
dc.filter(C("file.path").glob("*.jpg"))
|
|
2093
|
+
```
|
|
2094
|
+
|
|
2095
|
+
Using in to match lists
|
|
2096
|
+
```py
|
|
2097
|
+
ids = [1,2,3]
|
|
2098
|
+
dc.filter(C("experiment_id").in_(ids))
|
|
2093
2099
|
```
|
|
2094
2100
|
|
|
2095
2101
|
Using `datachain.func`
|
|
2096
2102
|
```py
|
|
2097
2103
|
from datachain.func import string
|
|
2098
|
-
dc.filter(string.length(C("file.
|
|
2104
|
+
dc.filter(string.length(C("file.path")) > 5)
|
|
2099
2105
|
```
|
|
2100
2106
|
|
|
2101
2107
|
Combining filters with "or"
|
|
2102
2108
|
```py
|
|
2103
|
-
dc.filter(C("file.
|
|
2109
|
+
dc.filter(C("file.path").glob("cat*") | C("file.path").glob("dog*))
|
|
2104
2110
|
```
|
|
2105
2111
|
|
|
2106
2112
|
Combining filters with "and"
|
|
2107
2113
|
```py
|
|
2108
2114
|
dc.filter(
|
|
2109
|
-
C("file.
|
|
2110
|
-
(string.length(C("file.
|
|
2115
|
+
C("file.path").glob("*.jpg) &
|
|
2116
|
+
(string.length(C("file.path")) > 5)
|
|
2111
2117
|
)
|
|
2112
2118
|
```
|
|
2113
2119
|
"""
|
datachain/lib/dc/records.py
CHANGED
|
@@ -4,12 +4,9 @@ from typing import TYPE_CHECKING, Optional, Union
|
|
|
4
4
|
import sqlalchemy
|
|
5
5
|
|
|
6
6
|
from datachain.lib.data_model import DataType
|
|
7
|
-
from datachain.lib.file import
|
|
8
|
-
File,
|
|
9
|
-
)
|
|
7
|
+
from datachain.lib.file import File
|
|
10
8
|
from datachain.lib.signal_schema import SignalSchema
|
|
11
9
|
from datachain.query import Session
|
|
12
|
-
from datachain.query.schema import Column
|
|
13
10
|
|
|
14
11
|
if TYPE_CHECKING:
|
|
15
12
|
from typing_extensions import ParamSpec
|
|
@@ -41,6 +38,9 @@ def read_records(
|
|
|
41
38
|
single_record = dc.read_records(dc.DEFAULT_FILE_RECORD)
|
|
42
39
|
```
|
|
43
40
|
"""
|
|
41
|
+
from datachain.query.dataset import adjust_outputs, get_col_types
|
|
42
|
+
from datachain.sql.types import SQLType
|
|
43
|
+
|
|
44
44
|
from .datasets import read_dataset
|
|
45
45
|
|
|
46
46
|
session = Session.get(session, in_memory=in_memory)
|
|
@@ -52,11 +52,10 @@ def read_records(
|
|
|
52
52
|
|
|
53
53
|
if schema:
|
|
54
54
|
signal_schema = SignalSchema(schema)
|
|
55
|
-
columns = [
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
columns.append(sqlalchemy.Column(c.name, c.type, **kw))
|
|
55
|
+
columns = [
|
|
56
|
+
sqlalchemy.Column(c.name, c.type) # type: ignore[union-attr]
|
|
57
|
+
for c in signal_schema.db_signals(as_columns=True)
|
|
58
|
+
]
|
|
60
59
|
else:
|
|
61
60
|
columns = [
|
|
62
61
|
sqlalchemy.Column(name, typ)
|
|
@@ -83,6 +82,13 @@ def read_records(
|
|
|
83
82
|
warehouse = catalog.warehouse
|
|
84
83
|
dr = warehouse.dataset_rows(dsr)
|
|
85
84
|
table = dr.get_table()
|
|
86
|
-
|
|
85
|
+
|
|
86
|
+
# Optimization: Compute row types once, rather than for every row.
|
|
87
|
+
col_types = get_col_types(
|
|
88
|
+
warehouse,
|
|
89
|
+
{c.name: c.type for c in columns if isinstance(c.type, SQLType)},
|
|
90
|
+
)
|
|
91
|
+
records = (adjust_outputs(warehouse, record, col_types) for record in to_insert)
|
|
92
|
+
warehouse.insert_rows(table, records)
|
|
87
93
|
warehouse.insert_rows_done(table)
|
|
88
94
|
return read_dataset(name=dsr.name, session=session, settings=settings)
|
datachain/lib/dc/utils.py
CHANGED
|
@@ -31,8 +31,8 @@ def resolve_columns(
|
|
|
31
31
|
) -> "Callable[Concatenate[D, P], D]":
|
|
32
32
|
"""Decorator that resolvs input column names to their actual DB names. This is
|
|
33
33
|
specially important for nested columns as user works with them by using dot
|
|
34
|
-
notation e.g (file.
|
|
35
|
-
in DB, e.g
|
|
34
|
+
notation e.g (file.path) but are actually defined with default delimiter
|
|
35
|
+
in DB, e.g file__path.
|
|
36
36
|
If there are any sql functions in arguments, they will just be transferred as is
|
|
37
37
|
to a method.
|
|
38
38
|
"""
|
datachain/lib/signal_schema.py
CHANGED
|
@@ -581,11 +581,7 @@ class SignalSchema:
|
|
|
581
581
|
signals = [
|
|
582
582
|
DEFAULT_DELIMITER.join(path)
|
|
583
583
|
if not as_columns
|
|
584
|
-
else Column(
|
|
585
|
-
DEFAULT_DELIMITER.join(path),
|
|
586
|
-
python_to_sql(_type),
|
|
587
|
-
nullable=is_optional(_type),
|
|
588
|
-
)
|
|
584
|
+
else Column(DEFAULT_DELIMITER.join(path), python_to_sql(_type))
|
|
589
585
|
for path, _type, has_subtree, _ in self.get_flat_tree(
|
|
590
586
|
include_hidden=include_hidden
|
|
591
587
|
)
|
|
@@ -994,8 +990,3 @@ class SignalSchema:
|
|
|
994
990
|
}
|
|
995
991
|
|
|
996
992
|
return SignalSchema.deserialize(schema)
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
def is_optional(type_: Any) -> bool:
|
|
1000
|
-
"""Check if a type is Optional."""
|
|
1001
|
-
return get_origin(type_) is Union and type(None) in get_args(type_)
|
datachain/query/dataset.py
CHANGED
|
@@ -57,6 +57,7 @@ from datachain.query.schema import C, UDFParamSpec, normalize_param
|
|
|
57
57
|
from datachain.query.session import Session
|
|
58
58
|
from datachain.query.udf import UdfInfo
|
|
59
59
|
from datachain.sql.functions.random import rand
|
|
60
|
+
from datachain.sql.types import SQLType
|
|
60
61
|
from datachain.utils import (
|
|
61
62
|
batched,
|
|
62
63
|
determine_processes,
|
|
@@ -67,6 +68,8 @@ from datachain.utils import (
|
|
|
67
68
|
)
|
|
68
69
|
|
|
69
70
|
if TYPE_CHECKING:
|
|
71
|
+
from collections.abc import Mapping
|
|
72
|
+
|
|
70
73
|
from sqlalchemy.sql.elements import ClauseElement
|
|
71
74
|
from sqlalchemy.sql.schema import Table
|
|
72
75
|
from sqlalchemy.sql.selectable import GenerativeSelect
|
|
@@ -273,7 +276,9 @@ class Subtract(DatasetDiffOperation):
|
|
|
273
276
|
|
|
274
277
|
|
|
275
278
|
def adjust_outputs(
|
|
276
|
-
warehouse: "AbstractWarehouse",
|
|
279
|
+
warehouse: "AbstractWarehouse",
|
|
280
|
+
row: dict[str, Any],
|
|
281
|
+
col_types: list[tuple[str, SQLType, type, str, Any]],
|
|
277
282
|
) -> dict[str, Any]:
|
|
278
283
|
"""
|
|
279
284
|
This function does a couple of things to prepare a row for inserting into the db:
|
|
@@ -289,7 +294,7 @@ def adjust_outputs(
|
|
|
289
294
|
col_python_type,
|
|
290
295
|
col_type_name,
|
|
291
296
|
default_value,
|
|
292
|
-
) in
|
|
297
|
+
) in col_types:
|
|
293
298
|
row_val = row.get(col_name)
|
|
294
299
|
|
|
295
300
|
# Fill None or missing values with defaults (get returns None if not in the row)
|
|
@@ -304,8 +309,10 @@ def adjust_outputs(
|
|
|
304
309
|
return row
|
|
305
310
|
|
|
306
311
|
|
|
307
|
-
def
|
|
308
|
-
""
|
|
312
|
+
def get_col_types(
|
|
313
|
+
warehouse: "AbstractWarehouse", output: "Mapping[str, Any]"
|
|
314
|
+
) -> list[tuple]:
|
|
315
|
+
"""Optimization: Precompute column types so these don't have to be computed
|
|
309
316
|
in the convert_type function for each row in a loop."""
|
|
310
317
|
dialect = warehouse.db.dialect
|
|
311
318
|
return [
|
|
@@ -317,7 +324,7 @@ def get_udf_col_types(warehouse: "AbstractWarehouse", udf: "UDFAdapter") -> list
|
|
|
317
324
|
type(col_type_inst).__name__,
|
|
318
325
|
col_type.default_value(dialect),
|
|
319
326
|
)
|
|
320
|
-
for col_name, col_type in
|
|
327
|
+
for col_name, col_type in output.items()
|
|
321
328
|
]
|
|
322
329
|
|
|
323
330
|
|
|
@@ -333,7 +340,7 @@ def process_udf_outputs(
|
|
|
333
340
|
|
|
334
341
|
rows: list[UDFResult] = []
|
|
335
342
|
# Optimization: Compute row types once, rather than for every row.
|
|
336
|
-
udf_col_types =
|
|
343
|
+
udf_col_types = get_col_types(warehouse, udf.output)
|
|
337
344
|
|
|
338
345
|
for udf_output in udf_results:
|
|
339
346
|
if not udf_output:
|
datachain/query/schema.py
CHANGED
|
@@ -40,15 +40,12 @@ class ColumnMeta(type):
|
|
|
40
40
|
class Column(sa.ColumnClause, metaclass=ColumnMeta):
|
|
41
41
|
inherit_cache: Optional[bool] = True
|
|
42
42
|
|
|
43
|
-
def __init__(
|
|
44
|
-
self, text, type_=None, is_literal=False, nullable=None, _selectable=None
|
|
45
|
-
):
|
|
43
|
+
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
|
|
46
44
|
"""Dataset column."""
|
|
47
45
|
self.name = ColumnMeta.to_db_name(text)
|
|
48
46
|
super().__init__(
|
|
49
47
|
self.name, type_=type_, is_literal=is_literal, _selectable=_selectable
|
|
50
48
|
)
|
|
51
|
-
self.nullable = nullable
|
|
52
49
|
|
|
53
50
|
def __getattr__(self, name: str):
|
|
54
51
|
return Column(self.name + DEFAULT_DELIMITER + name)
|
|
@@ -55,7 +55,7 @@ datachain/fs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
55
55
|
datachain/fs/reference.py,sha256=A8McpXF0CqbXPqanXuvpKu50YLB3a2ZXA3YAPxtBXSM,914
|
|
56
56
|
datachain/fs/utils.py,sha256=s-FkTOCGBk-b6TT3toQH51s9608pofoFjUSTc1yy7oE,825
|
|
57
57
|
datachain/func/__init__.py,sha256=CjNLHfJkepdXdRZ6HjJBjNSIjOeFMuMkwPDaPUrM75g,1270
|
|
58
|
-
datachain/func/aggregate.py,sha256=
|
|
58
|
+
datachain/func/aggregate.py,sha256=UfxENlw56Qv3UEkj2sZ-JZHmr9q8Rnic9io9_63gF-E,10942
|
|
59
59
|
datachain/func/array.py,sha256=O784_uwmaP5CjZX4VSF4RmS8cmpaForQc8zASxHJB6A,6717
|
|
60
60
|
datachain/func/base.py,sha256=wA0sBQAVyN9LPxoo7Ox83peS0zUVnyuKxukwAcjGLfY,534
|
|
61
61
|
datachain/func/conditional.py,sha256=HkNamQr9dLyIMDEbIeO6CZR0emQoDqeaWrZ1fECod4M,8062
|
|
@@ -79,7 +79,7 @@ datachain/lib/meta_formats.py,sha256=Epydbdch1g4CojK8wd_ePzmwmljC4fVWlJtZ16jsX-A
|
|
|
79
79
|
datachain/lib/model_store.py,sha256=DNIv8Y6Jtk1_idNLzIpsThOsdW2BMAudyUCbPUcgcxk,2515
|
|
80
80
|
datachain/lib/pytorch.py,sha256=YS6yR13iVlrAXo5wzJswFFUHwWOql9KTdWIa86DXB-k,7712
|
|
81
81
|
datachain/lib/settings.py,sha256=ZELRCTLbi5vzRPiDX6cQ9LLg9TefJ_A05gIGni0lll8,2535
|
|
82
|
-
datachain/lib/signal_schema.py,sha256=
|
|
82
|
+
datachain/lib/signal_schema.py,sha256=uIBHYXtu_XpLbOUVC-kq-GduEOCfz9hQORi9ZG3JFqo,35820
|
|
83
83
|
datachain/lib/tar.py,sha256=3WIzao6yD5fbLqXLTt9GhPGNonbFIs_fDRu-9vgLgsA,1038
|
|
84
84
|
datachain/lib/text.py,sha256=UNHm8fhidk7wdrWqacEWaA6I9ykfYqarQ2URby7jc7M,1261
|
|
85
85
|
datachain/lib/udf.py,sha256=zCdO5__gLMCgrdHmOvIa0eoWKCDAU1uO-MMAu_EU13o,16228
|
|
@@ -93,20 +93,20 @@ datachain/lib/convert/flatten.py,sha256=IZFiUYbgXSxXhPSG5Cqf5IjnJ4ZDZKXMr4o_yCR1
|
|
|
93
93
|
datachain/lib/convert/python_to_sql.py,sha256=wg-O5FRKX3x3Wh8ZL1b9ntMlgf1zRO4djMP3t8CHJLo,3188
|
|
94
94
|
datachain/lib/convert/sql_to_python.py,sha256=XXCBYDQFUXJIBNWkjEP944cnCfJ8GF2Tji0DLF3A_zQ,315
|
|
95
95
|
datachain/lib/convert/unflatten.py,sha256=ysMkstwJzPMWUlnxn-Z-tXJR3wmhjHeSN_P-sDcLS6s,2010
|
|
96
|
-
datachain/lib/convert/values_to_tuples.py,sha256=
|
|
96
|
+
datachain/lib/convert/values_to_tuples.py,sha256=j5yZMrVUH6W7b-7yUvdCTGI7JCUAYUOzHUGPoyZXAB0,4360
|
|
97
97
|
datachain/lib/dc/__init__.py,sha256=HD0NYrdy44u6kkpvgGjJcvGz-UGTHui2azghcT8ZUg0,838
|
|
98
98
|
datachain/lib/dc/csv.py,sha256=asWPAxhMgIoLAdD2dObDlnGL8CTSD3TAuFuM4ci89bQ,4374
|
|
99
99
|
datachain/lib/dc/database.py,sha256=gYKh1iO5hOWMPFTU1vZC5kOXkJzVse14TYTWE4_1iEA,5940
|
|
100
|
-
datachain/lib/dc/datachain.py,sha256=
|
|
100
|
+
datachain/lib/dc/datachain.py,sha256=36J8QIB04hKKumQgLvHNTC94Pd7G2yE4slZ9RfwI9zw,76980
|
|
101
101
|
datachain/lib/dc/datasets.py,sha256=u6hlz0Eodh_s39TOW6kz0VIL3nGfadqu8FLoWqDxSJs,6890
|
|
102
102
|
datachain/lib/dc/hf.py,sha256=PJl2wiLjdRsMz0SYbLT-6H8b-D5i2WjeH7li8HHOk_0,2145
|
|
103
103
|
datachain/lib/dc/json.py,sha256=ZUThPDAaP2gBFIL5vsQTwKBcuN_dhvC_O44wdDv0jEc,2683
|
|
104
104
|
datachain/lib/dc/listings.py,sha256=2na9v63xO1vPUNaoBSzA-TSN49V7zQAb-4iS1wOPLFE,1029
|
|
105
105
|
datachain/lib/dc/pandas.py,sha256=ObueUXDUFKJGu380GmazdG02ARpKAHPhSaymfmOH13E,1489
|
|
106
106
|
datachain/lib/dc/parquet.py,sha256=zYcSgrWwyEDW9UxGUSVdIVsCu15IGEf0xL8KfWQqK94,1782
|
|
107
|
-
datachain/lib/dc/records.py,sha256=
|
|
107
|
+
datachain/lib/dc/records.py,sha256=Z6EWy6c6hf87cWiDlQduvrDgOHMLwqF22g-XksOnXsU,2884
|
|
108
108
|
datachain/lib/dc/storage.py,sha256=QLf3-xMV2Gmy3AA8qF9WqAsb7R8Rk87l4s5hBoiCH98,5285
|
|
109
|
-
datachain/lib/dc/utils.py,sha256=
|
|
109
|
+
datachain/lib/dc/utils.py,sha256=VawOAlJSvAtZbsMg33s5tJe21TRx1Km3QggI1nN6tnw,3984
|
|
110
110
|
datachain/lib/dc/values.py,sha256=cBQubhmPNEDMJldUXzGh-UKbdim4P6O2B91Gp39roKw,1389
|
|
111
111
|
datachain/model/__init__.py,sha256=R9faX5OHV1xh2EW-g2MPedwbtEqt3LodJRyluB-QylI,189
|
|
112
112
|
datachain/model/bbox.py,sha256=cQNHuQuVsh6bW3n3Hj40F2Cc20cExQ9Lg_q7R2jxUMI,9324
|
|
@@ -119,12 +119,12 @@ datachain/model/ultralytics/pose.py,sha256=gXAWfAk4OWZl93hKcQPKZvqJa3nIrECB4RM8K
|
|
|
119
119
|
datachain/model/ultralytics/segment.py,sha256=koq1HASo29isf0in6oSlzmU4IzsmOXe87F1ajQQVfh4,2911
|
|
120
120
|
datachain/query/__init__.py,sha256=7DhEIjAA8uZJfejruAVMZVcGFmvUpffuZJwgRqNwe-c,263
|
|
121
121
|
datachain/query/batch.py,sha256=6w8gzLTmLeylststu-gT5jIqEfi4-djS7_yTYyeo-fw,4190
|
|
122
|
-
datachain/query/dataset.py,sha256=
|
|
122
|
+
datachain/query/dataset.py,sha256=8O9TFOBLyh_ylqY4gZ7MRLziwAQaU6YdDR_SfBSK65c,58806
|
|
123
123
|
datachain/query/dispatch.py,sha256=5p_jXxKJVCfIA4jLSQ0tAY1IhZUS3oJvyQXUH0Dk3bc,13215
|
|
124
124
|
datachain/query/metrics.py,sha256=r5b0ygYhokbXp8Mg3kCH8iFSRw0jxzyeBe-C-J_bKFc,938
|
|
125
125
|
datachain/query/params.py,sha256=O_j89mjYRLOwWNhYZl-z7mi-rkdP7WyFmaDufsdTryE,863
|
|
126
126
|
datachain/query/queue.py,sha256=waqM_KzavU8C-G95-4211Nd4GXna_u2747Chgwtgz2w,3839
|
|
127
|
-
datachain/query/schema.py,sha256=
|
|
127
|
+
datachain/query/schema.py,sha256=b_KnVy6B26Ol4nYG0LqNNpeQ1QYPk95YRGUjXfdaQWs,6606
|
|
128
128
|
datachain/query/session.py,sha256=wNdOHAi4HrsEihfzdcTlfB5i1xyj0dw6rlUz84StOoU,6512
|
|
129
129
|
datachain/query/udf.py,sha256=ljAYaF-J77t7iS4zc1-g1ssYd4c6Q-ccKGEc3VQQmeM,1322
|
|
130
130
|
datachain/query/utils.py,sha256=u0A_BwG9PNs0DxoDcvSWgWLpj3ByTUv8CqH13CIuGag,1293
|
|
@@ -151,9 +151,9 @@ datachain/sql/sqlite/vector.py,sha256=ncW4eu2FlJhrP_CIpsvtkUabZlQdl2D5Lgwy_cbfqR
|
|
|
151
151
|
datachain/toolkit/__init__.py,sha256=eQ58Q5Yf_Fgv1ZG0IO5dpB4jmP90rk8YxUWmPc1M2Bo,68
|
|
152
152
|
datachain/toolkit/split.py,sha256=ktGWzY4kyzjWyR86dhvzw-Zhl0lVk_LOX3NciTac6qo,2914
|
|
153
153
|
datachain/torch/__init__.py,sha256=gIS74PoEPy4TB3X6vx9nLO0Y3sLJzsA8ckn8pRWihJM,579
|
|
154
|
-
datachain-0.16.
|
|
155
|
-
datachain-0.16.
|
|
156
|
-
datachain-0.16.
|
|
157
|
-
datachain-0.16.
|
|
158
|
-
datachain-0.16.
|
|
159
|
-
datachain-0.16.
|
|
154
|
+
datachain-0.16.1.dist-info/licenses/LICENSE,sha256=8DnqK5yoPI_E50bEg_zsHKZHY2HqPy4rYN338BHQaRA,11344
|
|
155
|
+
datachain-0.16.1.dist-info/METADATA,sha256=9YPqP6Sthuf_fuxFX3miQyp9MEjRq8j2DqubLXvZg0k,11328
|
|
156
|
+
datachain-0.16.1.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
|
|
157
|
+
datachain-0.16.1.dist-info/entry_points.txt,sha256=0GMJS6B_KWq0m3VT98vQI2YZodAMkn4uReZ_okga9R4,49
|
|
158
|
+
datachain-0.16.1.dist-info/top_level.txt,sha256=lZPpdU_2jJABLNIg2kvEOBi8PtsYikbN1OdMLHk8bTg,10
|
|
159
|
+
datachain-0.16.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|