dlt-utils-lib 0.2.2__tar.gz → 0.2.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dlt_utils_lib
3
- Version: 0.2.2
3
+ Version: 0.2.3
4
4
  Provides-Extra: dev
@@ -47,3 +47,12 @@ def update_cdc_timestamp(df: DataFrame, time_diff_threshold: int) -> DataFrame:
47
47
  return df
48
48
 
49
49
 
50
+ def add_default_value_for_removed_col(df: DataFrame, default_value_for_removed_col: dict) -> DataFrame:
51
+ if default_value_for_removed_col:
52
+ if default_value_for_removed_col['name'] in df.columns:
53
+ df = df.withColumn(default_value_for_removed_col['name'], when(col(default_value_for_removed_col['name']).isNull(), expr(default_value_for_removed_col['expr'])).otherwise(col(default_value_for_removed_col['name'])))
54
+ else:
55
+ df = df.withColumn(default_value_for_removed_col['name'], expr(default_value_for_removed_col['expr']))
56
+ return df
57
+
58
+
@@ -1,5 +1,5 @@
1
1
  from pyspark.sql.functions import col, expr, coalesce, lit, when
2
- from .dlt_transformations import update_cdc_timestamp, apply_partitions
2
+ from .dlt_transformations import update_cdc_timestamp, apply_partitions, add_default_value_for_removed_col
3
3
 
4
4
  # Base code pipeline: streaming process
5
5
  def base_cdc_replication_process(dlt,
@@ -53,14 +53,7 @@ def create_bronze_table_definition(spark, dlt, table_name: str, files_path: str,
53
53
  temporary=False
54
54
  )
55
55
  def transform_cdc_to_bronze():
56
- df = spark.read.format(file_format).load(files_path)
57
-
58
- if default_value_for_removed_col:
59
- if default_value_for_removed_col['name'] in df.columns:
60
- df = df.withColumn(default_value_for_removed_col['name'], when(col(default_value_for_removed_col['name']).isNull(), expr(default_value_for_removed_col['expr'])).otherwise(col(default_value_for_removed_col['name'])))
61
- else:
62
- df = df.withColumn(default_value_for_removed_col['name'], expr(default_value_for_removed_col['expr']))
63
-
56
+ df = spark.read.format(file_format).load(files_path)
64
57
  fields = [field for field in df.schema.fields if field.name not in schema_exclude_columns]
65
58
  schema_string = ', '.join([f"{field.name} {field.dataType.simpleString()}" for field in fields])
66
59
  return spark \
@@ -73,10 +66,12 @@ def create_bronze_table_definition(spark, dlt, table_name: str, files_path: str,
73
66
  .withColumn('ar_h_change_seq', col('ar_h_change_seq').cast('string')) \
74
67
  .transform(lambda df: update_cdc_timestamp(df, time_diff_history_cdc_timestamp)) \
75
68
  .transform(lambda df: apply_partitions(df, partitions))
69
+ .transform(lambda df: add_default_value_for_removed_col(df, default_value_for_removed_col))
76
70
 
77
71
  return transform_cdc_to_bronze
78
72
 
79
73
 
74
+
80
75
  # Create silver streaming data
81
76
  def silver_streaming_process(dlt, table_name: str, keys: list, partitions: dict, exclude_columns: list):
82
77
  dlt.create_streaming_table(
@@ -1,4 +1,4 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dlt-utils-lib
3
- Version: 0.2.2
3
+ Version: 0.2.3
4
4
  Provides-Extra: dev
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name='dlt_utils_lib',
5
- version='0.2.2',
5
+ version='0.2.3',
6
6
  packages=find_packages(),
7
7
  install_requires=[
8
8
  'pyspark',
@@ -11,7 +11,7 @@ import warnings
11
11
 
12
12
  # import for databricks execution whivh not support normal PYTHONPATH :()
13
13
  sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "../dlt_utils_lib")))
14
- from dlt_utils.dlt_transformations import update_cdc_timestamp, apply_partitions
14
+ from dlt_utils.dlt_transformations import update_cdc_timestamp, apply_partitions, add_default_value_for_removed_col
15
15
 
16
16
  @pytest.fixture(scope="session")
17
17
  def spark() -> SparkSession:
@@ -63,4 +63,34 @@ def test_update_cdc_timestamp(spark):
63
63
  assert result[2]['cdc_timestamp'] == datetime(2024, 1, 12)
64
64
 
65
65
  # The second row should retain its original cdc_timestamp because the difference is less than 5 days
66
- assert result[1]['cdc_timestamp'] == datetime(2024, 1, 1)
66
+ assert result[1]['cdc_timestamp'] == datetime(2024, 1, 1)
67
+
68
+
69
+
70
+
71
+ def test_add_default_value_for_removed_col(spark):
72
+ data = [
73
+ (1, None),
74
+ (2, 2),
75
+ (3, None)
76
+ ]
77
+ columns = ["id", "raw_data_index"]
78
+ df = spark.createDataFrame(data, columns)
79
+
80
+ default_value_for_removed_col = {
81
+ 'name': 'raw_data_index',
82
+ 'expr': 'cast(0 as int)'
83
+ }
84
+
85
+ result_df = add_default_value_for_removed_col(df, default_value_for_removed_col)
86
+
87
+ expected_data = [
88
+ (1, 0),
89
+ (2, 2),
90
+ (3, 0)
91
+ ]
92
+ expected_df = spark.createDataFrame(expected_data, columns)
93
+ assert result_df.collect() == expected_df.collect()
94
+
95
+
96
+
File without changes