dlt-utils-lib 0.4__tar.gz → 0.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dlt_utils_lib
3
- Version: 0.4
3
+ Version: 0.6
4
4
  Requires-Dist: pyspark
5
5
  Requires-Dist: delta-spark
@@ -0,0 +1,5 @@
1
+ Metadata-Version: 2.1
2
+ Name: dlt_utils_lib
3
+ Version: 0.6
4
+ Requires-Dist: pyspark
5
+ Requires-Dist: delta-spark
@@ -1,9 +1,9 @@
1
1
  setup.py
2
2
  dlt_utils/__init__.py
3
3
  dlt_utils/dlt_transformation.py
4
- dlt_utils/main_dlt_utils.py
5
4
  dlt_utils_lib.egg-info/PKG-INFO
6
5
  dlt_utils_lib.egg-info/SOURCES.txt
7
6
  dlt_utils_lib.egg-info/dependency_links.txt
8
7
  dlt_utils_lib.egg-info/requires.txt
8
+ dlt_utils_lib.egg-info/tmpz2kz6zic
9
9
  dlt_utils_lib.egg-info/top_level.txt
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name='dlt_utils_lib',
5
- version='0.4',
5
+ version='0.6',
6
6
  packages=find_packages(),
7
7
  install_requires=[
8
8
  'pyspark',
@@ -1,59 +0,0 @@
1
- import dlt
2
- from pyspark.sql.functions import col, expr, greatest, when, datediff, abs as spark_abs
3
- from dlt_transformations import update_cdc_timestamp, apply_partitions
4
- from pyspark.sql.types import TimestampType, DateType
5
- from pyspark.sql import DataFrame
6
- from pyspark.sql import SparkSession
7
-
8
- # Assuming SparkSession is already created in your Databricks notebook
9
- spark = SparkSession.builder.appName("DLT Pipeline").getOrCreate()
10
- spark.conf.set("pipelines.tableManagedByMultiplePipelinesCheck.enabled", "false")
11
-
12
-
13
- # Define the transformation function dynamically
14
- def create_bronze_table_definition(table_name: str, files_path: str, file_format: str, partitions: dict, schema_exclude_columns: list):
15
- @dlt.table(
16
- name=f"bronze_{table_name}",
17
- comment="This is the bronze table.",
18
- temporary=False
19
- )
20
- def transform_cdc_to_bronze():
21
- df = spark.read.format(file_format).load(files_path)
22
- fields = [field for field in df.schema.fields if field.name not in schema_exclude_columns]
23
- schema_string = ', '.join([f"{field.name} {field.dataType.simpleString()}" for field in fields])
24
- return spark \
25
- .readStream \
26
- .format('cloudFiles') \
27
- .option("cloudFiles.format", file_format) \
28
- .option("cloudFiles.schemaHints", schema_string) \
29
- .load(files_path) \
30
- .withColumn('cdc_timestamp', col('cdc_timestamp').cast('timestamp')) \
31
- .transform(update_cdc_timestamp) \
32
- .transform(lambda df: apply_partitions(df, partitions))
33
-
34
- return transform_cdc_to_bronze
35
-
36
-
37
-
38
- # Create silver streaming data
39
- def silver_streaming_process(table_name: str, keys: list, partitions: dict, exclude_columns: list):
40
- dlt.create_streaming_table(
41
- name=table_name,
42
- table_properties={
43
- "delta.autoOptimize.optimizeWrite": "true",
44
- "delta.autoOptimize.autoCompact": "true"
45
- },
46
- comment = "This is the silver table with source in",
47
- partition_cols=partitions if partitions else None
48
- )
49
-
50
- dlt.apply_changes(
51
- target=table_name,
52
- source=f"bronze_{table_name}",
53
- keys=keys,
54
- sequence_by=col("cdc_timestamp"),
55
- apply_as_deletes=expr("Op = 'D'"),
56
- apply_as_truncates=expr("Op = 'T'"),
57
- except_column_list=["Op", "_rescued_data"] + exclude_columns,
58
- stored_as_scd_type=1
59
- )
File without changes