datachain 0.8.4__py3-none-any.whl → 0.8.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of datachain might be problematic. Click here for more details.

datachain/asyn.py CHANGED
@@ -8,12 +8,14 @@ from collections.abc import (
8
8
  Iterable,
9
9
  Iterator,
10
10
  )
11
- from concurrent.futures import ThreadPoolExecutor
11
+ from concurrent.futures import ThreadPoolExecutor, wait
12
12
  from heapq import heappop, heappush
13
13
  from typing import Any, Callable, Generic, Optional, TypeVar
14
14
 
15
15
  from fsspec.asyn import get_loop
16
16
 
17
+ from datachain.utils import safe_closing
18
+
17
19
  ASYNC_WORKERS = 20
18
20
 
19
21
  InputT = TypeVar("InputT", contravariant=True) # noqa: PLC0105
@@ -56,6 +58,7 @@ class AsyncMapper(Generic[InputT, ResultT]):
56
58
  self.pool = ThreadPoolExecutor(workers)
57
59
  self._tasks: set[asyncio.Task] = set()
58
60
  self._shutdown_producer = threading.Event()
61
+ self._producer_is_shutdown = threading.Event()
59
62
 
60
63
  def start_task(self, coro: Coroutine) -> asyncio.Task:
61
64
  task = self.loop.create_task(coro)
@@ -64,11 +67,16 @@ class AsyncMapper(Generic[InputT, ResultT]):
64
67
  return task
65
68
 
66
69
  def _produce(self) -> None:
67
- for item in self.iterable:
68
- if self._shutdown_producer.is_set():
69
- return
70
- fut = asyncio.run_coroutine_threadsafe(self.work_queue.put(item), self.loop)
71
- fut.result() # wait until the item is in the queue
70
+ try:
71
+ with safe_closing(self.iterable):
72
+ for item in self.iterable:
73
+ if self._shutdown_producer.is_set():
74
+ return
75
+ coro = self.work_queue.put(item)
76
+ fut = asyncio.run_coroutine_threadsafe(coro, self.loop)
77
+ fut.result() # wait until the item is in the queue
78
+ finally:
79
+ self._producer_is_shutdown.set()
72
80
 
73
81
  async def produce(self) -> None:
74
82
  await self.to_thread(self._produce)
@@ -179,6 +187,8 @@ class AsyncMapper(Generic[InputT, ResultT]):
179
187
  self.shutdown_producer()
180
188
  if not async_run.done():
181
189
  async_run.cancel()
190
+ wait([async_run])
191
+ self._producer_is_shutdown.wait()
182
192
 
183
193
  def __iter__(self):
184
194
  return self.iterate()
datachain/cache.py CHANGED
@@ -1,8 +1,12 @@
1
1
  import os
2
+ from collections.abc import Iterator
3
+ from contextlib import contextmanager
4
+ from tempfile import mkdtemp
2
5
  from typing import TYPE_CHECKING, Optional
3
6
 
4
7
  from dvc_data.hashfile.db.local import LocalHashFileDB
5
8
  from dvc_objects.fs.local import LocalFileSystem
9
+ from dvc_objects.fs.utils import remove
6
10
  from fsspec.callbacks import Callback, TqdmCallback
7
11
 
8
12
  from .progress import Tqdm
@@ -20,6 +24,23 @@ def try_scandir(path):
20
24
  pass
21
25
 
22
26
 
27
+ def get_temp_cache(tmp_dir: str, prefix: Optional[str] = None) -> "DataChainCache":
28
+ cache_dir = mkdtemp(prefix=prefix, dir=tmp_dir)
29
+ return DataChainCache(cache_dir, tmp_dir=tmp_dir)
30
+
31
+
32
+ @contextmanager
33
+ def temporary_cache(
34
+ tmp_dir: str, prefix: Optional[str] = None, delete: bool = True
35
+ ) -> Iterator["DataChainCache"]:
36
+ cache = get_temp_cache(tmp_dir, prefix=prefix)
37
+ try:
38
+ yield cache
39
+ finally:
40
+ if delete:
41
+ cache.destroy()
42
+
43
+
23
44
  class DataChainCache:
24
45
  def __init__(self, cache_dir: str, tmp_dir: str):
25
46
  self.odb = LocalHashFileDB(
@@ -28,6 +49,9 @@ class DataChainCache:
28
49
  tmp_dir=tmp_dir,
29
50
  )
30
51
 
52
+ def __eq__(self, other) -> bool:
53
+ return self.odb == other.odb
54
+
31
55
  @property
32
56
  def cache_dir(self):
33
57
  return self.odb.path
@@ -63,7 +87,7 @@ class DataChainCache:
63
87
  if size < 0:
64
88
  size = await client.get_size(from_path, version_id=file.version)
65
89
  cb = callback or TqdmCallback(
66
- tqdm_kwargs={"desc": odb_fs.name(from_path), "bytes": True},
90
+ tqdm_kwargs={"desc": odb_fs.name(from_path), "bytes": True, "leave": False},
67
91
  tqdm_cls=Tqdm,
68
92
  size=size,
69
93
  )
@@ -82,20 +106,18 @@ class DataChainCache:
82
106
  os.unlink(tmp_info)
83
107
 
84
108
  def store_data(self, file: "File", contents: bytes) -> None:
85
- checksum = file.get_hash()
86
- dst = self.path_from_checksum(checksum)
87
- if not os.path.exists(dst):
88
- # Create the file only if it's not already in cache
89
- os.makedirs(os.path.dirname(dst), exist_ok=True)
90
- with open(dst, mode="wb") as f:
91
- f.write(contents)
92
-
93
- def clear(self):
109
+ self.odb.add_bytes(file.get_hash(), contents)
110
+
111
+ def clear(self) -> None:
94
112
  """
95
113
  Completely clear the cache.
96
114
  """
97
115
  self.odb.clear()
98
116
 
117
+ def destroy(self) -> None:
118
+ # `clear` leaves the prefix directory structure intact.
119
+ remove(self.cache_dir)
120
+
99
121
  def get_total_size(self) -> int:
100
122
  total = 0
101
123
  for subdir in try_scandir(self.odb.path):
@@ -405,6 +405,7 @@ def get_download_bar(bar_format: str, total_size: int):
405
405
  unit_scale=True,
406
406
  unit_divisor=1000,
407
407
  total=total_size,
408
+ leave=False,
408
409
  )
409
410
 
410
411
 
@@ -429,6 +430,7 @@ def instantiate_node_groups(
429
430
  unit_scale=True,
430
431
  unit_divisor=1000,
431
432
  total=total_files,
433
+ leave=False,
432
434
  )
433
435
  )
434
436
 
@@ -534,6 +536,12 @@ def find_column_to_str( # noqa: PLR0911
534
536
  return ""
535
537
 
536
538
 
539
+ def clone_catalog_with_cache(catalog: "Catalog", cache: "DataChainCache") -> "Catalog":
540
+ clone = catalog.copy()
541
+ clone.cache = cache
542
+ return clone
543
+
544
+
537
545
  class Catalog:
538
546
  def __init__(
539
547
  self,
@@ -1242,10 +1250,17 @@ class Catalog:
1242
1250
  path: str,
1243
1251
  version_id: Optional[str] = None,
1244
1252
  client_config=None,
1253
+ content_disposition: Optional[str] = None,
1254
+ **kwargs,
1245
1255
  ) -> str:
1246
1256
  client_config = client_config or self.client_config
1247
1257
  client = Client.get_client(source, self.cache, **client_config)
1248
- return client.url(path, version_id=version_id)
1258
+ return client.url(
1259
+ path,
1260
+ version_id=version_id,
1261
+ content_disposition=content_disposition,
1262
+ **kwargs,
1263
+ )
1249
1264
 
1250
1265
  def export_dataset_table(
1251
1266
  self,
@@ -1437,6 +1452,7 @@ class Catalog:
1437
1452
  unit_scale=True,
1438
1453
  unit_divisor=1000,
1439
1454
  total=ds_stats.num_objects, # type: ignore [union-attr]
1455
+ leave=False,
1440
1456
  )
1441
1457
 
1442
1458
  schema = DatasetRecord.parse_schema(remote_ds_version.schema)
datachain/client/azure.py CHANGED
@@ -31,8 +31,12 @@ class AzureClient(Client):
31
31
  Generate a signed URL for the given path.
32
32
  """
33
33
  version_id = kwargs.pop("version_id", None)
34
+ content_disposition = kwargs.pop("content_disposition", None)
34
35
  result = self.fs.sign(
35
- self.get_full_path(path, version_id), expiration=expires, **kwargs
36
+ self.get_full_path(path, version_id),
37
+ expiration=expires,
38
+ content_disposition=content_disposition,
39
+ **kwargs,
36
40
  )
37
41
  return result + (f"&versionid={version_id}" if version_id else "")
38
42
 
@@ -42,7 +46,7 @@ class AzureClient(Client):
42
46
  prefix = prefix.lstrip(DELIMITER) + DELIMITER
43
47
  found = False
44
48
  try:
45
- with tqdm(desc=f"Listing {self.uri}", unit=" objects") as pbar:
49
+ with tqdm(desc=f"Listing {self.uri}", unit=" objects", leave=False) as pbar:
46
50
  async with self.fs.service_client.get_container_client(
47
51
  container=self.name
48
52
  ) as container_client:
@@ -249,7 +249,7 @@ class Client(ABC):
249
249
  await main_task
250
250
 
251
251
  async def _fetch_nested(self, start_prefix: str, result_queue: ResultQueue) -> None:
252
- progress_bar = tqdm(desc=f"Listing {self.uri}", unit=" objects")
252
+ progress_bar = tqdm(desc=f"Listing {self.uri}", unit=" objects", leave=False)
253
253
  loop = get_loop()
254
254
 
255
255
  queue: asyncio.Queue[str] = asyncio.Queue()
datachain/client/gcs.py CHANGED
@@ -39,11 +39,15 @@ class GCSClient(Client):
39
39
  (see https://cloud.google.com/storage/docs/access-public-data#api-link).
40
40
  """
41
41
  version_id = kwargs.pop("version_id", None)
42
+ content_disposition = kwargs.pop("content_disposition", None)
42
43
  if self.fs.storage_options.get("token") == "anon":
43
44
  query = f"?generation={version_id}" if version_id else ""
44
45
  return f"https://storage.googleapis.com/{self.name}/{path}{query}"
45
46
  return self.fs.sign(
46
- self.get_full_path(path, version_id), expiration=expires, **kwargs
47
+ self.get_full_path(path, version_id),
48
+ expiration=expires,
49
+ response_disposition=content_disposition,
50
+ **kwargs,
47
51
  )
48
52
 
49
53
  @staticmethod
@@ -83,7 +87,7 @@ class GCSClient(Client):
83
87
  self, page_queue: PageQueue, result_queue: ResultQueue
84
88
  ) -> bool:
85
89
  found = False
86
- with tqdm(desc=f"Listing {self.uri}", unit=" objects") as pbar:
90
+ with tqdm(desc=f"Listing {self.uri}", unit=" objects", leave=False) as pbar:
87
91
  while (page := await page_queue.get()) is not None:
88
92
  if page:
89
93
  found = True
datachain/client/s3.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ import os
2
3
  from typing import Any, Optional, cast
3
4
  from urllib.parse import parse_qs, urlsplit, urlunsplit
4
5
 
@@ -31,9 +32,11 @@ class ClientS3(Client):
31
32
  if "aws_token" in kwargs:
32
33
  kwargs.setdefault("token", kwargs.pop("aws_token"))
33
34
 
34
- # caching bucket regions to use the right one in signed urls, otherwise
35
- # it tries to randomly guess and creates wrong signature
36
- kwargs.setdefault("cache_regions", True)
35
+ # remove this `if` when https://github.com/fsspec/s3fs/pull/929 lands
36
+ if not os.environ.get("AWS_REGION") and not os.environ.get("AWS_ENDPOINT_URL"):
37
+ # caching bucket regions to use the right one in signed urls, otherwise
38
+ # it tries to randomly guess and creates wrong signature
39
+ kwargs.setdefault("cache_regions", True)
37
40
 
38
41
  # We want to use newer v4 signature version since regions added after
39
42
  # 2014 are not going to support v2 which is the older one.
@@ -51,6 +54,21 @@ class ClientS3(Client):
51
54
 
52
55
  return cast(S3FileSystem, super().create_fs(**kwargs))
53
56
 
57
+ def url(self, path: str, expires: int = 3600, **kwargs) -> str:
58
+ """
59
+ Generate a signed URL for the given path.
60
+ """
61
+ version_id = kwargs.pop("version_id", None)
62
+ content_disposition = kwargs.pop("content_disposition", None)
63
+ if content_disposition:
64
+ kwargs["ResponseContentDisposition"] = content_disposition
65
+
66
+ return self.fs.sign(
67
+ self.get_full_path(path, version_id),
68
+ expiration=expires,
69
+ **kwargs,
70
+ )
71
+
54
72
  async def _fetch_flat(self, start_prefix: str, result_queue: ResultQueue) -> None:
55
73
  async def get_pages(it, page_queue):
56
74
  try:
@@ -61,7 +79,7 @@ class ClientS3(Client):
61
79
 
62
80
  async def process_pages(page_queue, result_queue):
63
81
  found = False
64
- with tqdm(desc=f"Listing {self.uri}", unit=" objects") as pbar:
82
+ with tqdm(desc=f"Listing {self.uri}", unit=" objects", leave=False) as pbar:
65
83
  while (res := await page_queue.get()) is not None:
66
84
  if res:
67
85
  found = True
@@ -79,6 +79,15 @@ class DatabaseEngine(ABC, Serializable):
79
79
  conn: Optional[Any] = None,
80
80
  ) -> Iterator[tuple[Any, ...]]: ...
81
81
 
82
+ def get_table(self, name: str) -> "Table":
83
+ table = self.metadata.tables.get(name)
84
+ if table is None:
85
+ sa.Table(name, self.metadata, autoload_with=self.engine)
86
+ # ^^^ This table may not be correctly initialised on some dialects
87
+ # Grab it from metadata instead.
88
+ table = self.metadata.tables[name]
89
+ return table
90
+
82
91
  @abstractmethod
83
92
  def executemany(
84
93
  self, query, params, cursor: Optional[Any] = None
@@ -16,7 +16,6 @@ from datachain.sql.functions import path as pathfunc
16
16
  from datachain.sql.types import Int, SQLType, UInt64
17
17
 
18
18
  if TYPE_CHECKING:
19
- from sqlalchemy import Engine
20
19
  from sqlalchemy.engine.interfaces import Dialect
21
20
  from sqlalchemy.sql.base import (
22
21
  ColumnCollection,
@@ -25,6 +24,8 @@ if TYPE_CHECKING:
25
24
  )
26
25
  from sqlalchemy.sql.elements import ColumnElement
27
26
 
27
+ from datachain.data_storage.db_engine import DatabaseEngine
28
+
28
29
 
29
30
  DEFAULT_DELIMITER = "__"
30
31
 
@@ -150,14 +151,12 @@ class DataTable:
150
151
  def __init__(
151
152
  self,
152
153
  name: str,
153
- engine: "Engine",
154
- metadata: Optional["sa.MetaData"] = None,
154
+ engine: "DatabaseEngine",
155
155
  column_types: Optional[dict[str, SQLType]] = None,
156
156
  object_name: str = "file",
157
157
  ):
158
158
  self.name: str = name
159
159
  self.engine = engine
160
- self.metadata: sa.MetaData = metadata if metadata is not None else sa.MetaData()
161
160
  self.column_types: dict[str, SQLType] = column_types or {}
162
161
  self.object_name = object_name
163
162
 
@@ -211,12 +210,7 @@ class DataTable:
211
210
  return sa.Table(name, metadata, *columns)
212
211
 
213
212
  def get_table(self) -> "sa.Table":
214
- table = self.metadata.tables.get(self.name)
215
- if table is None:
216
- sa.Table(self.name, self.metadata, autoload_with=self.engine)
217
- # ^^^ This table may not be correctly initialised on some dialects
218
- # Grab it from metadata instead.
219
- table = self.metadata.tables[self.name]
213
+ table = self.engine.get_table(self.name)
220
214
 
221
215
  column_types = self.column_types | {c.name: c.type for c in self.sys_columns()}
222
216
  # adjusting types for custom columns to be instances of SQLType if possible
@@ -186,6 +186,12 @@ class SQLiteDatabaseEngine(DatabaseEngine):
186
186
  self.db_file = db_file
187
187
  self.is_closed = False
188
188
 
189
+ def get_table(self, name: str) -> Table:
190
+ if self.is_closed:
191
+ # Reconnect in case of being closed previously.
192
+ self._reconnect()
193
+ return super().get_table(name)
194
+
189
195
  @retry_sqlite_locks
190
196
  def execute(
191
197
  self,
@@ -670,7 +676,7 @@ class SQLiteWarehouse(AbstractWarehouse):
670
676
  ]
671
677
  table = self.create_udf_table(columns)
672
678
 
673
- with tqdm(desc="Preparing", unit=" rows") as pbar:
679
+ with tqdm(desc="Preparing", unit=" rows", leave=False) as pbar:
674
680
  self.copy_table(table, query, progress_cb=pbar.update)
675
681
 
676
682
  return table
@@ -191,8 +191,7 @@ class AbstractWarehouse(ABC, Serializable):
191
191
  table_name = self.dataset_table_name(dataset.name, version)
192
192
  return self.schema.dataset_row_cls(
193
193
  table_name,
194
- self.db.engine,
195
- self.db.metadata,
194
+ self.db,
196
195
  dataset.get_schema(version),
197
196
  object_name=object_name,
198
197
  )
@@ -904,8 +903,11 @@ class AbstractWarehouse(ABC, Serializable):
904
903
  This should be implemented to ensure that the provided tables
905
904
  are cleaned up as soon as they are no longer needed.
906
905
  """
907
- with tqdm(desc="Cleanup", unit=" tables") as pbar:
908
- for name in set(names):
906
+ to_drop = set(names)
907
+ with tqdm(
908
+ desc="Cleanup", unit=" tables", total=len(to_drop), leave=False
909
+ ) as pbar:
910
+ for name in to_drop:
909
911
  self.db.drop_table(Table(name, self.db.metadata), if_exists=True)
910
912
  pbar.update(1)
911
913
 
@@ -1,6 +1,7 @@
1
1
  import random
2
2
  import string
3
3
  from collections.abc import Sequence
4
+ from enum import Enum
4
5
  from typing import TYPE_CHECKING, Optional, Union
5
6
 
6
7
  import sqlalchemy as sa
@@ -16,7 +17,22 @@ if TYPE_CHECKING:
16
17
  C = Column
17
18
 
18
19
 
19
- def compare( # noqa: PLR0912, PLR0915, C901
20
+ def get_status_col_name() -> str:
21
+ """Returns new unique status col name"""
22
+ return "diff_" + "".join(
23
+ random.choice(string.ascii_letters) # noqa: S311
24
+ for _ in range(10)
25
+ )
26
+
27
+
28
+ class CompareStatus(str, Enum):
29
+ ADDED = "A"
30
+ DELETED = "D"
31
+ MODIFIED = "M"
32
+ SAME = "S"
33
+
34
+
35
+ def _compare( # noqa: PLR0912, PLR0915, C901
20
36
  left: "DataChain",
21
37
  right: "DataChain",
22
38
  on: Union[str, Sequence[str]],
@@ -72,13 +88,10 @@ def compare( # noqa: PLR0912, PLR0915, C901
72
88
  "At least one of added, deleted, modified, same flags must be set"
73
89
  )
74
90
 
75
- # we still need status column for internal implementation even if not
76
- # needed in output
77
91
  need_status_col = bool(status_col)
78
- status_col = status_col or "diff_" + "".join(
79
- random.choice(string.ascii_letters) # noqa: S311
80
- for _ in range(10)
81
- )
92
+ # we still need status column for internal implementation even if not
93
+ # needed in the output
94
+ status_col = status_col or get_status_col_name()
82
95
 
83
96
  # calculate on and compare column names
84
97
  right_on = right_on or on
@@ -112,7 +125,7 @@ def compare( # noqa: PLR0912, PLR0915, C901
112
125
  for c in [f"{_rprefix(c, rc)}{rc}" for c, rc in zip(on, right_on)]
113
126
  ]
114
127
  )
115
- diff_cond.append((added_cond, "A"))
128
+ diff_cond.append((added_cond, CompareStatus.ADDED))
116
129
  if modified and compare:
117
130
  modified_cond = sa.or_(
118
131
  *[
@@ -120,7 +133,7 @@ def compare( # noqa: PLR0912, PLR0915, C901
120
133
  for c, rc in zip(compare, right_compare) # type: ignore[arg-type]
121
134
  ]
122
135
  )
123
- diff_cond.append((modified_cond, "M"))
136
+ diff_cond.append((modified_cond, CompareStatus.MODIFIED))
124
137
  if same and compare:
125
138
  same_cond = sa.and_(
126
139
  *[
@@ -128,9 +141,11 @@ def compare( # noqa: PLR0912, PLR0915, C901
128
141
  for c, rc in zip(compare, right_compare) # type: ignore[arg-type]
129
142
  ]
130
143
  )
131
- diff_cond.append((same_cond, "S"))
144
+ diff_cond.append((same_cond, CompareStatus.SAME))
132
145
 
133
- diff = sa.case(*diff_cond, else_=None if compare else "M").label(status_col)
146
+ diff = sa.case(*diff_cond, else_=None if compare else CompareStatus.MODIFIED).label(
147
+ status_col
148
+ )
134
149
  diff.type = String()
135
150
 
136
151
  left_right_merge = left.merge(
@@ -145,7 +160,7 @@ def compare( # noqa: PLR0912, PLR0915, C901
145
160
  )
146
161
  )
147
162
 
148
- diff_col = sa.literal("D").label(status_col)
163
+ diff_col = sa.literal(CompareStatus.DELETED).label(status_col)
149
164
  diff_col.type = String()
150
165
 
151
166
  right_left_merge = right.merge(
@@ -195,3 +210,92 @@ def compare( # noqa: PLR0912, PLR0915, C901
195
210
  res = res.select_except(C(status_col))
196
211
 
197
212
  return left._evolve(query=res, signal_schema=schema)
213
+
214
+
215
+ def compare_and_split(
216
+ left: "DataChain",
217
+ right: "DataChain",
218
+ on: Union[str, Sequence[str]],
219
+ right_on: Optional[Union[str, Sequence[str]]] = None,
220
+ compare: Optional[Union[str, Sequence[str]]] = None,
221
+ right_compare: Optional[Union[str, Sequence[str]]] = None,
222
+ added: bool = True,
223
+ deleted: bool = True,
224
+ modified: bool = True,
225
+ same: bool = False,
226
+ ) -> dict[str, "DataChain"]:
227
+ """Comparing two chains and returning multiple chains, one for each of `added`,
228
+ `deleted`, `modified` and `same` status. Result is returned in form of
229
+ dictionary where each item represents one of the statuses and key values
230
+ are `A`, `D`, `M`, `S` corresponding. Note that status column is not in the
231
+ resulting chains.
232
+
233
+ Parameters:
234
+ left: Chain to calculate diff on.
235
+ right: Chain to calculate diff from.
236
+ on: Column or list of columns to match on. If both chains have the
237
+ same columns then this column is enough for the match. Otherwise,
238
+ `right_on` parameter has to specify the columns for the other chain.
239
+ This value is used to find corresponding row in other dataset. If not
240
+ found there, row is considered as added (or removed if vice versa), and
241
+ if found then row can be either modified or same.
242
+ right_on: Optional column or list of columns
243
+ for the `other` to match.
244
+ compare: Column or list of columns to compare on. If both chains have
245
+ the same columns then this column is enough for the compare. Otherwise,
246
+ `right_compare` parameter has to specify the columns for the other
247
+ chain. This value is used to see if row is modified or same. If
248
+ not set, all columns will be used for comparison
249
+ right_compare: Optional column or list of columns
250
+ for the `other` to compare to.
251
+ added (bool): Whether to return chain containing only added rows.
252
+ deleted (bool): Whether to return chain containing only deleted rows.
253
+ modified (bool): Whether to return chain containing only modified rows.
254
+ same (bool): Whether to return chain containing only same rows.
255
+
256
+ Example:
257
+ ```py
258
+ chains = compare(
259
+ persons,
260
+ new_persons,
261
+ on=["id"],
262
+ right_on=["other_id"],
263
+ compare=["name"],
264
+ added=True,
265
+ deleted=True,
266
+ modified=True,
267
+ same=True,
268
+ )
269
+ ```
270
+ """
271
+ status_col = get_status_col_name()
272
+
273
+ res = _compare(
274
+ left,
275
+ right,
276
+ on,
277
+ right_on=right_on,
278
+ compare=compare,
279
+ right_compare=right_compare,
280
+ added=added,
281
+ deleted=deleted,
282
+ modified=modified,
283
+ same=same,
284
+ status_col=status_col,
285
+ )
286
+
287
+ chains = {}
288
+
289
+ def filter_by_status(compare_status) -> "DataChain":
290
+ return res.filter(C(status_col) == compare_status).select_except(status_col)
291
+
292
+ if added:
293
+ chains[CompareStatus.ADDED.value] = filter_by_status(CompareStatus.ADDED)
294
+ if deleted:
295
+ chains[CompareStatus.DELETED.value] = filter_by_status(CompareStatus.DELETED)
296
+ if modified:
297
+ chains[CompareStatus.MODIFIED.value] = filter_by_status(CompareStatus.MODIFIED)
298
+ if same:
299
+ chains[CompareStatus.SAME.value] = filter_by_status(CompareStatus.SAME)
300
+
301
+ return chains
@@ -16,7 +16,7 @@ from .aggregate import (
16
16
  sum,
17
17
  )
18
18
  from .array import cosine_distance, euclidean_distance, length, sip_hash_64
19
- from .conditional import case, greatest, least
19
+ from .conditional import case, greatest, ifelse, least
20
20
  from .numeric import bit_and, bit_hamming_distance, bit_or, bit_xor, int_hash_64
21
21
  from .random import rand
22
22
  from .string import byte_hamming_distance
@@ -40,6 +40,7 @@ __all__ = [
40
40
  "euclidean_distance",
41
41
  "first",
42
42
  "greatest",
43
+ "ifelse",
43
44
  "int_hash_64",
44
45
  "least",
45
46
  "length",