deltacat 0.2.9__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. deltacat/__init__.py +1 -1
  2. deltacat/aws/redshift/__init__.py +4 -0
  3. deltacat/aws/redshift/model/manifest.py +93 -1
  4. deltacat/aws/s3u.py +250 -111
  5. deltacat/catalog/default_catalog_impl/__init__.py +369 -0
  6. deltacat/compute/compactor_v2/compaction_session.py +175 -152
  7. deltacat/compute/compactor_v2/model/hash_bucket_input.py +6 -0
  8. deltacat/compute/compactor_v2/model/merge_file_group.py +213 -0
  9. deltacat/compute/compactor_v2/model/merge_input.py +8 -24
  10. deltacat/compute/compactor_v2/model/merge_result.py +1 -0
  11. deltacat/compute/compactor_v2/steps/hash_bucket.py +4 -56
  12. deltacat/compute/compactor_v2/steps/merge.py +106 -171
  13. deltacat/compute/compactor_v2/utils/delta.py +97 -0
  14. deltacat/compute/compactor_v2/utils/merge.py +126 -0
  15. deltacat/compute/compactor_v2/utils/task_options.py +47 -4
  16. deltacat/compute/merge_on_read/__init__.py +4 -0
  17. deltacat/compute/merge_on_read/daft.py +40 -0
  18. deltacat/compute/merge_on_read/model/__init__.py +0 -0
  19. deltacat/compute/merge_on_read/model/merge_on_read_params.py +66 -0
  20. deltacat/compute/merge_on_read/utils/__init__.py +0 -0
  21. deltacat/compute/merge_on_read/utils/delta.py +42 -0
  22. deltacat/storage/interface.py +10 -2
  23. deltacat/storage/model/types.py +3 -11
  24. deltacat/tests/catalog/__init__.py +0 -0
  25. deltacat/tests/catalog/test_default_catalog_impl.py +98 -0
  26. deltacat/tests/compute/compact_partition_test_cases.py +126 -1
  27. deltacat/tests/compute/test_compact_partition_incremental.py +4 -1
  28. deltacat/tests/compute/test_compact_partition_rebase_then_incremental.py +9 -2
  29. deltacat/tests/local_deltacat_storage/__init__.py +19 -2
  30. deltacat/tests/test_utils/pyarrow.py +33 -14
  31. deltacat/tests/utils/test_daft.py +42 -2
  32. deltacat/types/media.py +5 -0
  33. deltacat/types/tables.py +7 -1
  34. deltacat/utils/daft.py +78 -13
  35. {deltacat-0.2.9.dist-info → deltacat-1.0.0.dist-info}/METADATA +2 -2
  36. {deltacat-0.2.9.dist-info → deltacat-1.0.0.dist-info}/RECORD +39 -27
  37. {deltacat-0.2.9.dist-info → deltacat-1.0.0.dist-info}/LICENSE +0 -0
  38. {deltacat-0.2.9.dist-info → deltacat-1.0.0.dist-info}/WHEEL +0 -0
  39. {deltacat-0.2.9.dist-info → deltacat-1.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,369 @@
1
+ from typing import Any, Dict, List, Optional, Set, Union, Tuple
2
+ import pyarrow as pa
3
+ import logging
4
+ from deltacat.catalog.model.table_definition import TableDefinition
5
+ from deltacat.storage.model.sort_key import SortKey
6
+ from deltacat.storage.model.list_result import ListResult
7
+ from deltacat.storage.model.namespace import Namespace
8
+ from deltacat.storage.model.types import (
9
+ DistributedDataset,
10
+ LifecycleState,
11
+ LocalDataset,
12
+ LocalTable,
13
+ SchemaConsistencyType,
14
+ )
15
+ from deltacat.storage.model.partition import PartitionLocator, Partition
16
+ from deltacat.storage.model.table_version import TableVersion
17
+ from deltacat.compute.merge_on_read.model.merge_on_read_params import MergeOnReadParams
18
+ from deltacat.storage.model.delta import DeltaType
19
+ import deltacat.storage.interface as deltacat_storage
20
+ from deltacat.types.media import ContentType, TableType, DistributedDatasetType
21
+ from deltacat.types.tables import TableWriteMode
22
+ from deltacat.compute.merge_on_read import MERGE_FUNC_BY_DISTRIBUTED_DATASET_TYPE
23
+ from deltacat import logs
24
+
25
+ logger = logs.configure_deltacat_logger(logging.getLogger(__name__))
26
+
27
+ STORAGE = None
28
+
29
+
30
+ # table functions
31
+ def write_to_table(
32
+ data: Union[LocalTable, LocalDataset, DistributedDataset], # type: ignore
33
+ table: str,
34
+ namespace: Optional[str] = None,
35
+ mode: TableWriteMode = TableWriteMode.AUTO,
36
+ content_type: ContentType = ContentType.PARQUET,
37
+ *args,
38
+ **kwargs,
39
+ ) -> None:
40
+ """Write local or distributed data to a table. Raises an error if the
41
+ table does not exist and the table write mode is not CREATE or AUTO.
42
+
43
+ When creating a table, all `create_table` parameters may be optionally
44
+ specified as additional keyword arguments. When appending to, or replacing,
45
+ an existing table, all `alter_table` parameters may be optionally specified
46
+ as additional keyword arguments."""
47
+ raise NotImplementedError("write_to_table not implemented")
48
+
49
+
50
+ def read_table(
51
+ table: str,
52
+ namespace: Optional[str] = None,
53
+ table_version: Optional[str] = None,
54
+ table_type: Optional[TableType] = TableType.PYARROW,
55
+ distributed_dataset_type: Optional[
56
+ DistributedDatasetType
57
+ ] = DistributedDatasetType.RAY_DATASET,
58
+ partition_filter: Optional[List[Union[Partition, PartitionLocator]]] = None,
59
+ stream_position_range_inclusive: Optional[Tuple[int, int]] = None,
60
+ merge_on_read: Optional[bool] = False,
61
+ reader_kwargs: Optional[Dict[Any, Any]] = None,
62
+ deltacat_storage_kwargs: Optional[Dict[Any, Any]] = None,
63
+ *args,
64
+ **kwargs,
65
+ ) -> DistributedDataset: # type: ignore
66
+ """Read a table into a distributed dataset."""
67
+
68
+ if reader_kwargs is None:
69
+ reader_kwargs = {}
70
+
71
+ if deltacat_storage_kwargs is None:
72
+ deltacat_storage_kwargs = {}
73
+
74
+ _validate_read_table_args(
75
+ namespace=namespace,
76
+ table_type=table_type,
77
+ distributed_dataset_type=distributed_dataset_type,
78
+ merge_on_read=merge_on_read,
79
+ )
80
+
81
+ table_version_obj = _get_latest_or_given_table_version(
82
+ namespace=namespace,
83
+ table_name=table,
84
+ table_version=table_version,
85
+ **deltacat_storage_kwargs,
86
+ )
87
+ table_version = table_version_obj.table_version
88
+
89
+ if (
90
+ table_version_obj.content_types is None
91
+ or len(table_version_obj.content_types) != 1
92
+ ):
93
+ raise ValueError(
94
+ "Expected exactly one content type but "
95
+ f"found {table_version_obj.content_types}."
96
+ )
97
+
98
+ logger.info(
99
+ f"Reading metadata for table={namespace}/{table}/{table_version} "
100
+ f"with partition_filters={partition_filter} and stream position"
101
+ f" range={stream_position_range_inclusive}"
102
+ )
103
+
104
+ if partition_filter is None:
105
+ logger.info(
106
+ f"Reading all partitions metadata in the table={table} "
107
+ "as partition_filter was None."
108
+ )
109
+ partition_filter = STORAGE.list_partitions(
110
+ table_name=table,
111
+ namespace=namespace,
112
+ table_version=table_version,
113
+ **deltacat_storage_kwargs,
114
+ ).all_items()
115
+
116
+ qualified_deltas = _get_deltas_from_partition_filter(
117
+ stream_position_range_inclusive=stream_position_range_inclusive,
118
+ partition_filter=partition_filter,
119
+ **deltacat_storage_kwargs,
120
+ )
121
+
122
+ logger.info(
123
+ f"Total qualified deltas={len(qualified_deltas)} "
124
+ f"from {len(partition_filter)} partitions."
125
+ )
126
+
127
+ merge_on_read_params = MergeOnReadParams.of(
128
+ {
129
+ "deltas": qualified_deltas,
130
+ "deltacat_storage": STORAGE,
131
+ "deltacat_storage_kwargs": deltacat_storage_kwargs,
132
+ "reader_kwargs": reader_kwargs,
133
+ }
134
+ )
135
+
136
+ return MERGE_FUNC_BY_DISTRIBUTED_DATASET_TYPE[distributed_dataset_type.value](
137
+ params=merge_on_read_params, **kwargs
138
+ )
139
+
140
+
141
+ def alter_table(
142
+ table: str,
143
+ namespace: Optional[str] = None,
144
+ lifecycle_state: Optional[LifecycleState] = None,
145
+ schema_updates: Optional[Dict[str, Any]] = None,
146
+ partition_updates: Optional[Dict[str, Any]] = None,
147
+ primary_keys: Optional[Set[str]] = None,
148
+ sort_keys: Optional[List[SortKey]] = None,
149
+ description: Optional[str] = None,
150
+ properties: Optional[Dict[str, str]] = None,
151
+ *args,
152
+ **kwargs,
153
+ ) -> None:
154
+ """Alter table definition."""
155
+ raise NotImplementedError("alter_table not implemented")
156
+
157
+
158
+ def create_table(
159
+ table: str,
160
+ namespace: Optional[str] = None,
161
+ lifecycle_state: Optional[LifecycleState] = None,
162
+ schema: Optional[Union[pa.Schema, str, bytes]] = None,
163
+ schema_consistency: Optional[Dict[str, SchemaConsistencyType]] = None,
164
+ partition_keys: Optional[List[Dict[str, Any]]] = None,
165
+ primary_keys: Optional[Set[str]] = None,
166
+ sort_keys: Optional[List[SortKey]] = None,
167
+ description: Optional[str] = None,
168
+ properties: Optional[Dict[str, str]] = None,
169
+ permissions: Optional[Dict[str, Any]] = None,
170
+ content_types: Optional[List[ContentType]] = None,
171
+ replace_existing_table: bool = False,
172
+ *args,
173
+ **kwargs,
174
+ ) -> TableDefinition:
175
+ """Create an empty table. Raises an error if the table already exists and
176
+ `replace_existing_table` is False."""
177
+ raise NotImplementedError("create_table not implemented")
178
+
179
+
180
+ def drop_table(
181
+ table: str, namespace: Optional[str] = None, purge: bool = False, *args, **kwargs
182
+ ) -> None:
183
+ """Drop a table from the catalog and optionally purge it. Raises an error
184
+ if the table does not exist."""
185
+ raise NotImplementedError("drop_table not implemented")
186
+
187
+
188
+ def refresh_table(table: str, namespace: Optional[str] = None, *args, **kwargs) -> None:
189
+ """Refresh metadata cached on the Ray cluster for the given table."""
190
+ raise NotImplementedError("refresh_table not implemented")
191
+
192
+
193
+ def list_tables(
194
+ namespace: Optional[str] = None, *args, **kwargs
195
+ ) -> ListResult[TableDefinition]:
196
+ """List a page of table definitions. Raises an error if the given namespace
197
+ does not exist."""
198
+ raise NotImplementedError("list_tables not implemented")
199
+
200
+
201
+ def get_table(
202
+ table: str, namespace: Optional[str] = None, *args, **kwargs
203
+ ) -> Optional[TableDefinition]:
204
+ """Get table definition metadata. Returns None if the given table does not
205
+ exist."""
206
+ raise NotImplementedError("get_table not implemented")
207
+
208
+
209
+ def truncate_table(
210
+ table: str, namespace: Optional[str] = None, *args, **kwargs
211
+ ) -> None:
212
+ """Truncate table data. Raises an error if the table does not exist."""
213
+ raise NotImplementedError("truncate_table not implemented")
214
+
215
+
216
+ def rename_table(
217
+ table: str, new_name: str, namespace: Optional[str] = None, *args, **kwargs
218
+ ) -> None:
219
+ """Rename a table."""
220
+ raise NotImplementedError("rename_table not implemented")
221
+
222
+
223
+ def table_exists(table: str, namespace: Optional[str] = None, *args, **kwargs) -> bool:
224
+ """Returns True if the given table exists, False if not."""
225
+ raise NotImplementedError("table_exists not implemented")
226
+
227
+
228
+ # namespace functions
229
+ def list_namespaces(*args, **kwargs) -> ListResult[Namespace]:
230
+ """List a page of table namespaces."""
231
+ raise NotImplementedError("list_namespaces not implemented")
232
+
233
+
234
+ def get_namespace(namespace: str, *args, **kwargs) -> Optional[Namespace]:
235
+ """Gets table namespace metadata for the specified table namespace. Returns
236
+ None if the given namespace does not exist."""
237
+ raise NotImplementedError("get_namespace not implemented")
238
+
239
+
240
+ def namespace_exists(namespace: str, *args, **kwargs) -> bool:
241
+ """Returns True if the given table namespace exists, False if not."""
242
+ raise NotImplementedError("namespace_exists not implemented")
243
+
244
+
245
+ def create_namespace(
246
+ namespace: str, permissions: Dict[str, Any], *args, **kwargs
247
+ ) -> Namespace:
248
+ """Creates a table namespace with the given name and permissions. Returns
249
+ the created namespace. Raises an error if the namespace already exists."""
250
+ raise NotImplementedError("create_namespace not implemented")
251
+
252
+
253
+ def alter_namespace(
254
+ namespace: str,
255
+ permissions: Optional[Dict[str, Any]] = None,
256
+ new_namespace: Optional[str] = None,
257
+ *args,
258
+ **kwargs,
259
+ ) -> None:
260
+ """Alter table namespace definition."""
261
+ raise NotImplementedError("alter_namespace not implemented")
262
+
263
+
264
+ def drop_namespace(namespace: str, purge: bool = False, *args, **kwargs) -> None:
265
+ """Drop the given namespace and all of its tables from the catalog,
266
+ optionally purging them."""
267
+ raise NotImplementedError("drop_namespace not implemented")
268
+
269
+
270
+ def default_namespace() -> str:
271
+ """Returns the default namespace for the catalog."""
272
+ raise NotImplementedError("default_namespace not implemented")
273
+
274
+
275
+ # catalog functions
276
+ def initialize(ds: deltacat_storage, *args, **kwargs) -> None:
277
+ """Initializes the data catalog with the given arguments."""
278
+ global STORAGE
279
+ STORAGE = ds
280
+
281
+
282
+ def _validate_read_table_args(
283
+ namespace: Optional[str] = None,
284
+ table_type: Optional[TableType] = None,
285
+ distributed_dataset_type: Optional[DistributedDatasetType] = None,
286
+ merge_on_read: Optional[bool] = None,
287
+ ):
288
+ if STORAGE is None:
289
+ raise ValueError(
290
+ "Catalog not initialized. Did you miss calling "
291
+ "initialize(ds=<deltacat_storage>)?"
292
+ )
293
+
294
+ if merge_on_read:
295
+ raise ValueError("Merge on read not supported currently.")
296
+
297
+ if table_type is not TableType.PYARROW:
298
+ raise ValueError("Only PYARROW table type is supported as of now")
299
+
300
+ if distributed_dataset_type is not DistributedDatasetType.DAFT:
301
+ raise ValueError("Only DAFT dataset type is supported as of now")
302
+
303
+ if namespace is None:
304
+ raise ValueError(
305
+ "namespace must be passed to uniquely identify a table in the catalog."
306
+ )
307
+
308
+
309
+ def _get_latest_or_given_table_version(
310
+ namespace: str,
311
+ table_name: str,
312
+ table_version: Optional[str] = None,
313
+ *args,
314
+ **kwargs,
315
+ ) -> TableVersion:
316
+ table_version_obj = None
317
+ if table_version is None:
318
+ table_version_obj = STORAGE.get_latest_table_version(
319
+ namespace=namespace, table_name=table_name, *args, **kwargs
320
+ )
321
+ table_version = table_version_obj.table_version
322
+ else:
323
+ table_version_obj = STORAGE.get_table_version(
324
+ namespace=namespace,
325
+ table_name=table_name,
326
+ table_version=table_version,
327
+ *args,
328
+ **kwargs,
329
+ )
330
+
331
+ return table_version_obj
332
+
333
+
334
+ def _get_deltas_from_partition_filter(
335
+ partition_filter: Optional[List[Union[Partition, PartitionLocator]]] = None,
336
+ stream_position_range_inclusive: Optional[Tuple[int, int]] = None,
337
+ *args,
338
+ **kwargs,
339
+ ):
340
+
341
+ result_deltas = []
342
+ start_stream_position, end_stream_position = stream_position_range_inclusive or (
343
+ None,
344
+ None,
345
+ )
346
+ for partition_like in partition_filter:
347
+ deltas = STORAGE.list_partition_deltas(
348
+ partition_like=partition_like,
349
+ ascending_order=True,
350
+ include_manifest=True,
351
+ start_stream_position=start_stream_position,
352
+ last_stream_position=end_stream_position,
353
+ *args,
354
+ **kwargs,
355
+ ).all_items()
356
+
357
+ for delta in deltas:
358
+ if (
359
+ start_stream_position is None
360
+ or delta.stream_position >= start_stream_position
361
+ ) and (
362
+ end_stream_position is None
363
+ or delta.stream_position <= end_stream_position
364
+ ):
365
+ if delta.type == DeltaType.DELETE:
366
+ raise ValueError("DELETE type deltas are not supported")
367
+ result_deltas.append(delta)
368
+
369
+ return result_deltas