sws-spark-dissemination-helper 0.0.107__tar.gz → 0.0.109__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (16) hide show
  1. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/PKG-INFO +1 -1
  2. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/pyproject.toml +1 -1
  3. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/src/sws_spark_dissemination_helper/SWSPostgresSparkReader.py +8 -4
  4. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/src/sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +74 -0
  5. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/.gitignore +0 -0
  6. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/LICENSE +0 -0
  7. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/README.md +0 -0
  8. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/old_requirements.txt +0 -0
  9. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/requirements.txt +0 -0
  10. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/src/sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +0 -0
  11. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/src/sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +0 -0
  12. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/src/sws_spark_dissemination_helper/__init__.py +0 -0
  13. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/src/sws_spark_dissemination_helper/constants.py +0 -0
  14. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/src/sws_spark_dissemination_helper/utils.py +0 -0
  15. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/tests/__init__.py +0 -0
  16. {sws_spark_dissemination_helper-0.0.107 → sws_spark_dissemination_helper-0.0.109}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sws-spark-dissemination-helper
3
- Version: 0.0.107
3
+ Version: 0.0.109
4
4
  Summary: A Python helper package providing streamlined Spark functions for efficient data dissemination processes
5
5
  Project-URL: Repository, https://bitbucket.org/cioapps/sws-it-python-spark-dissemination-helper
6
6
  Author-email: Daniele Mansillo <danielemansillo@gmail.com>
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sws-spark-dissemination-helper"
7
- version = "0.0.107"
7
+ version = "0.0.109"
8
8
  dependencies = [
9
9
  "annotated-types==0.7.0",
10
10
  "boto3==1.36.18",
@@ -291,13 +291,17 @@ class SWSPostgresSparkReader:
291
291
  self,
292
292
  domain_code: str,
293
293
  ) -> DataFrame:
294
- return self.read_pg_table_and_check_duplicates_for_domain(
294
+ df = self.read_pg_table(
295
295
  pg_table=DatasetDatatables.MAPPING_CODE_CORRECTION.id,
296
- table_name=DatasetDatatables.MAPPING_CODE_CORRECTION.name,
297
296
  custom_schema=DatasetDatatables.MAPPING_CODE_CORRECTION.schema,
298
- domain_code=domain_code,
299
- unique_columns=["old_code"],
300
297
  )
298
+ df.filter(
299
+ col("mapping_type").isNull() | (col("mapping_type") == lit(""))
300
+ ).transform(
301
+ correct_domain_filter, domain=domain_code, unique_columns=["old_code"]
302
+ )
303
+
304
+ return df
301
305
 
302
306
  def get_domain_code_source_datasets_ids_dest_dataset_id(
303
307
  self, dataset_id: str, domain_code: str = None
@@ -163,6 +163,80 @@ class SWSSilverIcebergSparkHelper:
163
163
 
164
164
  logging.info(f"Checking time validity for {col_name} of type {col_type}")
165
165
 
166
+ if col_type == "area":
167
+ df_start_year_correction = self.df_mapping_code_correction.filter(
168
+ col("var_type") == lit("start_year")
169
+ )
170
+ df_end_year_correction = self.df_mapping_code_correction.filter(
171
+ col("var_type") == lit("end_year")
172
+ )
173
+
174
+ original_col_order = df.columns
175
+
176
+ df = (
177
+ df.alias("d")
178
+ .join(
179
+ F.broadcast(df_start_year_correction).alias("sy"),
180
+ on=col(f"d.{col_name}") == col("sy.mapping_type"),
181
+ how="left",
182
+ )
183
+ .join(
184
+ F.broadcast(df_end_year_correction).alias("ey"),
185
+ on=col(f"d.{col_name}") == col("ey.mapping_type"),
186
+ how="left",
187
+ )
188
+ .withColumn("valid_new_start_year", col("sy.new_code").isNotNull())
189
+ .withColumn("valid_new_end_year", col("ey.new_code").isNotNull())
190
+ .withColumn(
191
+ "note",
192
+ F.when(
193
+ col("valid_new_start_year"),
194
+ F.array_append(
195
+ col("d.note"),
196
+ F.concat(
197
+ col("sy.note"),
198
+ lit(" from "),
199
+ col("sy.old_code"),
200
+ lit(" to "),
201
+ col("sy.new_code"),
202
+ ),
203
+ ),
204
+ ).otherwise(col("note")),
205
+ )
206
+ .withColumn(
207
+ "note",
208
+ F.when(
209
+ col("valid_new_end_year"),
210
+ F.array_append(
211
+ col("note"),
212
+ F.concat(
213
+ col("ey.note"),
214
+ lit(" from "),
215
+ col("ey.old_code"),
216
+ lit(" to "),
217
+ col("ey.new_code"),
218
+ ),
219
+ ),
220
+ ).otherwise(col("note")),
221
+ )
222
+ .withColumn(
223
+ f"{col_name}_start_date",
224
+ F.when(
225
+ col("valid_new_start_year"), F.to_date(col("sy.new_code"))
226
+ ).otherwise(col(f"d.{col_name}_start_date")),
227
+ )
228
+ .withColumn(
229
+ f"{col_name}_end_date",
230
+ F.when(
231
+ col("valid_new_end_year"), F.to_date(col("ey.new_code"))
232
+ ).otherwise(col(f"d.{col_name}_end_date")),
233
+ )
234
+ .selectExpr(
235
+ "d.*", "note", f"{col_name}_start_date", f"{col_name}_end_date"
236
+ )
237
+ .select(*original_col_order)
238
+ )
239
+
166
240
  # Iterate through columns and build conditions dynamically
167
241
  start_date_condition = col(f"{col_name}_start_date").isNull() | (
168
242
  col(f"{col_name}_start_date") <= col(f"{self.time_column}_start_date")