sws-spark-dissemination-helper 0.0.166__tar.gz → 0.0.176__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (16) hide show
  1. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/PKG-INFO +10 -10
  2. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/pyproject.toml +10 -10
  3. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/src/sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +62 -33
  4. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/src/sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +110 -1
  5. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/src/sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +24 -24
  6. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/src/sws_spark_dissemination_helper/constants.py +16 -16
  7. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/src/sws_spark_dissemination_helper/utils.py +9 -9
  8. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/.gitignore +0 -0
  9. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/LICENSE +0 -0
  10. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/README.md +0 -0
  11. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/src/sws_spark_dissemination_helper/SWSDatatablesExportHelper.py +0 -0
  12. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/src/sws_spark_dissemination_helper/SWSEasyIcebergSparkHelper.py +0 -0
  13. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/src/sws_spark_dissemination_helper/SWSPostgresSparkReader.py +0 -0
  14. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/src/sws_spark_dissemination_helper/__init__.py +0 -0
  15. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/tests/__init__.py +0 -0
  16. {sws_spark_dissemination_helper-0.0.166 → sws_spark_dissemination_helper-0.0.176}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sws-spark-dissemination-helper
3
- Version: 0.0.166
3
+ Version: 0.0.176
4
4
  Summary: A Python helper package providing streamlined Spark functions for efficient data dissemination processes
5
5
  Project-URL: Repository, https://github.com/un-fao/fao-sws-it-python-spark-dissemination-helper
6
6
  Author-email: Daniele Mansillo <danielemansillo@gmail.com>
@@ -31,27 +31,27 @@ Classifier: Operating System :: OS Independent
31
31
  Classifier: Programming Language :: Python :: 3
32
32
  Requires-Python: >=3.9
33
33
  Requires-Dist: annotated-types==0.7.0
34
- Requires-Dist: boto3>=1.36.18
35
- Requires-Dist: botocore>=1.36.18
34
+ Requires-Dist: boto3>=1.40.0
35
+ Requires-Dist: botocore>=1.40.0
36
36
  Requires-Dist: certifi==2025.1.31
37
37
  Requires-Dist: charset-normalizer==3.4.1
38
- Requires-Dist: idna==3.10
38
+ Requires-Dist: idna>=3.10
39
39
  Requires-Dist: jmespath==1.0.1
40
40
  Requires-Dist: numpy==2.0.2
41
- Requires-Dist: pandas==2.2.3
41
+ Requires-Dist: pandas==2.3.3
42
42
  Requires-Dist: py4j==0.10.9.7
43
43
  Requires-Dist: pydantic-core==2.27.2
44
44
  Requires-Dist: pydantic==2.10.6
45
45
  Requires-Dist: pyspark==3.5.4
46
46
  Requires-Dist: python-dateutil==2.9.0.post0
47
47
  Requires-Dist: python-dotenv==0.19.2
48
- Requires-Dist: pytz==2025.1
48
+ Requires-Dist: pytz==2025.2
49
49
  Requires-Dist: requests==2.32.3
50
- Requires-Dist: s3transfer==0.11.2
50
+ Requires-Dist: s3transfer>=0.11.2
51
51
  Requires-Dist: six==1.17.0
52
- Requires-Dist: sws-api-client==1.5.3
53
- Requires-Dist: typing-extensions==4.12.2
54
- Requires-Dist: tzdata==2025.1
52
+ Requires-Dist: sws-api-client==2.3.0
53
+ Requires-Dist: typing-extensions>=4.12.2
54
+ Requires-Dist: tzdata==2025.2
55
55
  Requires-Dist: urllib3==1.26.20
56
56
  Description-Content-Type: text/markdown
57
57
 
@@ -4,30 +4,30 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sws-spark-dissemination-helper"
7
- version = "0.0.166"
7
+ version = "0.0.176"
8
8
  dependencies = [
9
9
  "annotated-types==0.7.0",
10
- "boto3>=1.36.18",
11
- "botocore>=1.36.18",
10
+ "boto3>=1.40.0",
11
+ "botocore>=1.40.0",
12
12
  "certifi==2025.1.31",
13
13
  "charset-normalizer==3.4.1",
14
- "idna==3.10",
14
+ "idna>=3.10",
15
15
  "jmespath==1.0.1",
16
16
  "numpy==2.0.2",
17
- "pandas==2.2.3",
17
+ "pandas==2.3.3",
18
18
  "py4j==0.10.9.7",
19
19
  "pydantic==2.10.6",
20
20
  "pydantic_core==2.27.2",
21
21
  "pyspark==3.5.4",
22
22
  "python-dateutil==2.9.0.post0",
23
23
  "python-dotenv==0.19.2",
24
- "pytz==2025.1",
24
+ "pytz==2025.2",
25
25
  "requests==2.32.3",
26
- "s3transfer==0.11.2",
26
+ "s3transfer>=0.11.2",
27
27
  "six==1.17.0",
28
- "sws_api_client==1.5.3",
29
- "typing_extensions==4.12.2",
30
- "tzdata==2025.1",
28
+ "sws_api_client==2.3.0",
29
+ "typing_extensions>=4.12.2",
30
+ "tzdata==2025.2",
31
31
  "urllib3==1.26.20"
32
32
  ]
33
33
  requires-python = ">=3.9"
@@ -158,7 +158,7 @@ class SWSBronzeIcebergSparkHelper:
158
158
 
159
159
  return dfs_dimension
160
160
 
161
- def _prepare_element_uom(self) -> DataFrame:
161
+ def _prepare_element_uom(self) -> Union[DataFrame, None]:
162
162
  """Prepare the element and unit of measure join."""
163
163
 
164
164
  # Get the element DataFrame
@@ -170,23 +170,24 @@ class SWSBronzeIcebergSparkHelper:
170
170
  if dimension_column == self.element_column
171
171
  )
172
172
 
173
- # Join the element and the unit_of_measure
174
- df_element_uom = (
175
- df_element.alias("e")
176
- .join(
177
- self.df_unit_of_measure.alias("u"),
178
- col("e.unit_of_measure") == col("u.id"),
179
- )
180
- .select(
181
- col("e.code").alias("element_code"),
182
- col("u.code").alias("unit_of_measure"),
183
- col("u.symbol").alias("unit_of_measure_symbol"),
184
- col("u.base_unit").alias("unit_of_measure_base_unit"),
185
- col("u.multiplier").alias("unit_of_measure_multiplier"),
173
+ if any("unit_of_measure" == column.lower() for column in df_element.columns):
174
+ # Join the element and the unit_of_measure
175
+ df_element_uom = (
176
+ df_element.alias("e")
177
+ .join(
178
+ self.df_unit_of_measure.alias("u"),
179
+ col("e.unit_of_measure") == col("u.id"),
180
+ )
181
+ .select(
182
+ col("e.code").alias("element_code"),
183
+ col("u.code").alias("unit_of_measure"),
184
+ col("u.symbol").alias("unit_of_measure_symbol"),
185
+ col("u.base_unit").alias("unit_of_measure_base_unit"),
186
+ col("u.multiplier").alias("unit_of_measure_multiplier"),
187
+ )
186
188
  )
187
- )
188
189
 
189
- return df_element_uom
190
+ return df_element_uom
190
191
 
191
192
  def _gen_denormalized_observation(self) -> DataFrame:
192
193
  """Original query upon which the below computation is based
@@ -278,15 +279,16 @@ class SWSBronzeIcebergSparkHelper:
278
279
  .withColumnRenamed("code", dimension_column)
279
280
  )
280
281
 
281
- df_intermediate = (
282
- df_intermediate.alias("d")
283
- .join(
284
- F.broadcast(df_element_uom).alias("e"),
285
- col(f"d.{self.element_column}") == col("e.element_code"),
286
- "left",
282
+ if df_element_uom is not None:
283
+ df_intermediate = (
284
+ df_intermediate.alias("d")
285
+ .join(
286
+ F.broadcast(df_element_uom).alias("e"),
287
+ col(f"d.{self.element_column}") == col("e.element_code"),
288
+ "left",
289
+ )
290
+ .drop("element_code")
287
291
  )
288
- .drop("element_code")
289
- )
290
292
 
291
293
  df_obs_denorm = df_intermediate
292
294
 
@@ -364,16 +366,17 @@ class SWSBronzeIcebergSparkHelper:
364
366
  )
365
367
  logging.debug(f"After join count: {df_obs_denorm.count()}")
366
368
 
367
- df_obs_denorm = (
368
- df_obs_denorm.alias("d")
369
- .join(
370
- F.broadcast(df_element_uom).alias("e"),
371
- col(f"d.{self.element_column}") == col("e.element_code"),
372
- "left",
369
+ if df_element_uom is not None:
370
+ df_obs_denorm = (
371
+ df_obs_denorm.alias("d")
372
+ .join(
373
+ F.broadcast(df_element_uom).alias("e"),
374
+ col(f"d.{self.element_column}") == col("e.element_code"),
375
+ "left",
376
+ )
377
+ .drop("element_code")
373
378
  )
374
- .drop("element_code")
375
- )
376
- logging.debug(f"After uom count: {df_obs_denorm.count()}")
379
+ logging.debug(f"After uom count: {df_obs_denorm.count()}")
377
380
 
378
381
  return df_obs_denorm
379
382
 
@@ -766,3 +769,29 @@ class SWSBronzeIcebergSparkHelper:
766
769
  logging.debug(f"Tag with Added csv Table: {tag}")
767
770
 
768
771
  logging.info("Bronze Disseminated tag with selection successfully written")
772
+
773
+
774
+ 1
775
+ frozenset({"8", "4", "2", "5", "9", "1", "7", "6", "0", "3"})
776
+ 1
777
+ 1
778
+ 2
779
+ frozenset({"8", "4", "2", "5", "9", "1", "7", "6", "0", "3"})
780
+ 2
781
+ 1
782
+ 1
783
+ frozenset({"8", "4", "2", "5", "9", "1", "7", "6", "0", "3"})
784
+ 1
785
+ 1
786
+ 2
787
+ frozenset({"8", "4", "2", "5", "9", "1", "7", "6", "0", "3"})
788
+ 2
789
+ 1
790
+ 1
791
+ frozenset({"8", "4", "2", "5", "9", "1", "7", "6", "0", "3"})
792
+ 1
793
+ 1
794
+ 1
795
+ frozenset({"8", "4", "2", "5", "9", "1", "7", "6", "0", "3"})
796
+ 1
797
+ 1
@@ -87,7 +87,12 @@ class SWSGoldIcebergSparkHelper:
87
87
  return df.filter(col("diss_flag"))
88
88
 
89
89
  def keep_dim_val_attr_columns(self, df: DataFrame):
90
- return df.select(*self.cols_to_keep_sws)
90
+ cols_to_keep_sws = self.cols_to_keep_sws
91
+ if "note" in df.columns:
92
+ cols_to_keep_sws = cols_to_keep_sws + ["note"]
93
+ if "unit_of_measure_symbol" in df.columns:
94
+ cols_to_keep_sws = cols_to_keep_sws + ["unit_of_measure_symbol"]
95
+ return df.select(*cols_to_keep_sws)
91
96
 
92
97
  def read_bronze_data(self) -> DataFrame:
93
98
  return self.spark.read.option("tag", self.tag_name).table(
@@ -296,6 +301,35 @@ class SWSGoldIcebergSparkHelper:
296
301
 
297
302
  return df
298
303
 
304
+ def write_gold_faostat_unfiltered_data_to_iceberg_and_csv(
305
+ self, df: DataFrame
306
+ ) -> DataFrame:
307
+ """The expected input to this function is the output of the sws disseminated function"""
308
+ df.writeTo(
309
+ self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.iceberg_id
310
+ ).createOrReplace()
311
+
312
+ logging.info(
313
+ f"Gold FAOSTAT unfiltered table written to {self.iceberg_tables.GOLD_FAOSTAT.iceberg_id}"
314
+ )
315
+
316
+ self.spark.sql(
317
+ f"ALTER TABLE {self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.iceberg_id} CREATE OR REPLACE TAG `{self.tag_name}`"
318
+ )
319
+
320
+ logging.info(f"gold FAOSTAT unfiltered tag '{self.tag_name}' created")
321
+
322
+ df_1 = df.coalesce(1)
323
+
324
+ save_cache_csv(
325
+ df=df_1,
326
+ bucket=self.bucket,
327
+ prefix=self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.csv_prefix,
328
+ tag_name=self.tag_name,
329
+ )
330
+
331
+ return df
332
+
299
333
  def write_gold_sws_validated_sws_dissemination_tag(
300
334
  self, df: DataFrame, tags: Tags
301
335
  ) -> DataFrame:
@@ -589,3 +623,78 @@ class SWSGoldIcebergSparkHelper:
589
623
  logging.debug(f"Tag with Added csv Table: {tag}")
590
624
 
591
625
  return df
626
+
627
+ def write_gold_faostat_unfiltered_dissemination_tag(
628
+ self, df: DataFrame, tags: Tags
629
+ ) -> DataFrame:
630
+ # Get or create a new tag
631
+ tag = get_or_create_tag(tags, self.dataset_id, self.tag_name, self.tag_name)
632
+ logging.debug(f"Tag: {tag}")
633
+
634
+ new_iceberg_table = BaseDisseminatedTagTable(
635
+ id=f"{self.domain_code.lower()}_gold_faostat_unfiltered_iceberg",
636
+ name=f"{self.domain_code} gold FAOSTAT unfiltered Iceberg",
637
+ description="Gold table containing all the tag data in FAOSTAT format",
638
+ layer=TableLayer.GOLD,
639
+ private=True,
640
+ type=TableType.ICEBERG,
641
+ database=IcebergDatabases.GOLD_DATABASE,
642
+ table=self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.table,
643
+ path=self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.path,
644
+ structure={"columns": df.schema.jsonValue()["fields"]},
645
+ )
646
+ tag = upsert_disseminated_table(
647
+ sws_tags=tags,
648
+ tag=tag,
649
+ dataset_id=self.dataset_id,
650
+ tag_name=self.tag_name,
651
+ table=new_iceberg_table,
652
+ )
653
+ logging.debug(f"Tag with Added Iceberg Table: {tag}")
654
+
655
+ new_diss_table = BaseDisseminatedTagTable(
656
+ id=f"{self.domain_code.lower()}_gold_faostat_unfiltered_csv",
657
+ name=f"{self.domain_code} gold FAOSTAT unfiltered csv",
658
+ description="Gold table containing the tag data in FAOSTAT format in csv",
659
+ layer=TableLayer.GOLD,
660
+ private=True,
661
+ type=TableType.CSV,
662
+ path=self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.csv_path,
663
+ structure={"columns": df.schema.jsonValue()["fields"]},
664
+ )
665
+ tag = upsert_disseminated_table(
666
+ sws_tags=tags,
667
+ tag=tag,
668
+ dataset_id=self.dataset_id,
669
+ tag_name=self.tag_name,
670
+ table=new_diss_table,
671
+ )
672
+ logging.debug(f"Tag with Added csv Table: {tag}")
673
+
674
+ return df
675
+
676
+
677
+ 1
678
+ frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
679
+ 1
680
+ 1
681
+ 2
682
+ frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
683
+ 2
684
+ 1
685
+ 1
686
+ frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
687
+ 1
688
+ 1
689
+ 2
690
+ frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
691
+ 2
692
+ 1
693
+ 1
694
+ frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
695
+ 1
696
+ 1
697
+ 1
698
+ frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
699
+ 1
700
+ 1
@@ -103,7 +103,7 @@ class SWSSilverIcebergSparkHelper:
103
103
  # The diss_flag column is needed to initialize the condition expression
104
104
  # The note column will contain the eventual reasons why diss_flag has been set to false
105
105
  return df.withColumn("diss_flag", lit(True)).withColumn(
106
- "note", lit([]).cast(ArrayType(StringType()))
106
+ "diss_note", lit([]).cast(ArrayType(StringType()))
107
107
  )
108
108
 
109
109
  def read_bronze_data(self) -> DataFrame:
@@ -182,7 +182,7 @@ class SWSSilverIcebergSparkHelper:
182
182
  for column in cols_to_select
183
183
  if column.lower()
184
184
  not in (
185
- "note",
185
+ "diss_note",
186
186
  f"{col_name_lower}_start_date",
187
187
  f"{col_name_lower}_end_date",
188
188
  )
@@ -203,36 +203,36 @@ class SWSSilverIcebergSparkHelper:
203
203
  .withColumn("valid_new_start_year", col("sy.new_code").isNotNull())
204
204
  .withColumn("valid_new_end_year", col("ey.new_code").isNotNull())
205
205
  .withColumn(
206
- "new_note",
206
+ "new_diss_note",
207
207
  F.when(
208
208
  col("valid_new_start_year"),
209
209
  F.array_append(
210
- col("d.note"),
210
+ col("d.diss_note"),
211
211
  F.concat(
212
- col("sy.note"),
212
+ col("sy.diss_note"),
213
213
  lit(" from "),
214
214
  col("sy.old_code"),
215
215
  lit(" to "),
216
216
  col("sy.new_code"),
217
217
  ),
218
218
  ),
219
- ).otherwise(col("d.note")),
219
+ ).otherwise(col("d.diss_note")),
220
220
  )
221
221
  .withColumn(
222
- "new_note",
222
+ "new_diss_note",
223
223
  F.when(
224
224
  col("valid_new_end_year"),
225
225
  F.array_append(
226
- col("new_note"),
226
+ col("new_diss_note"),
227
227
  F.concat(
228
- col("ey.note"),
228
+ col("ey.diss_note"),
229
229
  lit(" from "),
230
230
  col("ey.old_code"),
231
231
  lit(" to "),
232
232
  col("ey.new_code"),
233
233
  ),
234
234
  ),
235
- ).otherwise(col("new_note")),
235
+ ).otherwise(col("new_diss_note")),
236
236
  )
237
237
  .withColumn(
238
238
  f"new_{col_name}_start_date",
@@ -249,7 +249,7 @@ class SWSSilverIcebergSparkHelper:
249
249
  )
250
250
  .select(
251
251
  *cols_to_select,
252
- col("new_note").alias("note"),
252
+ col("new_diss_note").alias("diss_note"),
253
253
  col(f"new_{col_name}_start_date").alias(f"{col_name}_start_date"),
254
254
  col(f"new_{col_name}_end_date").alias(f"{col_name}_end_date"),
255
255
  )
@@ -270,15 +270,15 @@ class SWSSilverIcebergSparkHelper:
270
270
  start_date_condition & end_date_condition,
271
271
  )
272
272
  .withColumn("diss_flag", col("diss_flag") & col("condition_result"))
273
- # In case the condition is satisfied update diss_flag accordingly and append a note indicating the reason for the observation exclusion from the dissemination
273
+ # In case the condition is satisfied update diss_flag accordingly and append a diss_note indicating the reason for the observation exclusion from the dissemination
274
274
  .withColumn(
275
- "note",
275
+ "diss_note",
276
276
  F.when(
277
277
  ~col("condition_result"),
278
278
  F.array_append(
279
- col("note"), lit(f"{col_type} out of time validity range")
279
+ col("diss_note"), lit(f"{col_type} out of time validity range")
280
280
  ),
281
- ).otherwise(col("note")),
281
+ ).otherwise(col("diss_note")),
282
282
  )
283
283
  .drop("condition_result")
284
284
  )
@@ -390,7 +390,7 @@ class SWSSilverIcebergSparkHelper:
390
390
  col_name (str): The DataFrame column name on which to apply the filter
391
391
 
392
392
  Returns:
393
- DataFrame: The DataFrame with updated `diss_flag` and `note` columns based on the check outcome
393
+ DataFrame: The DataFrame with updated `diss_flag` and `diss_note` columns based on the check outcome
394
394
  """
395
395
 
396
396
  # Remove the duplicates that may be in the tables
@@ -428,14 +428,14 @@ class SWSSilverIcebergSparkHelper:
428
428
  col("diss_flag") & col("condition_result"),
429
429
  )
430
430
  .withColumn(
431
- "note",
431
+ "diss_note",
432
432
  F.when(
433
433
  ~col("condition_result"),
434
434
  F.array_append(
435
- col("note"),
435
+ col("diss_note"),
436
436
  lit(f"{col_type} not disseminated for this domain"),
437
437
  ),
438
- ).otherwise(col("note")),
438
+ ).otherwise(col("diss_note")),
439
439
  )
440
440
  .drop("condition_result")
441
441
  )
@@ -522,16 +522,16 @@ class SWSSilverIcebergSparkHelper:
522
522
  col("diss_flag") & col("condition_result"),
523
523
  )
524
524
  .withColumn(
525
- "note",
525
+ "diss_note",
526
526
  F.when(
527
527
  ~col("condition_result"),
528
528
  F.array_append(
529
- col("note"),
529
+ col("diss_note"),
530
530
  lit(
531
531
  f"not disseminated according to exception with note: {row_exception['note']}"
532
532
  ),
533
533
  ),
534
- ).otherwise(col("note")),
534
+ ).otherwise(col("diss_note")),
535
535
  )
536
536
  .drop("condition_result")
537
537
  )
@@ -616,7 +616,7 @@ class SWSSilverIcebergSparkHelper:
616
616
 
617
617
  df = (
618
618
  df.withColumn("metadata", F.to_json(col("metadata")))
619
- .withColumn("note", F.to_json(col("note")))
619
+ .withColumn("diss_note", F.to_json(col("diss_note")))
620
620
  .coalesce(1)
621
621
  )
622
622
 
@@ -650,7 +650,7 @@ class SWSSilverIcebergSparkHelper:
650
650
  "value",
651
651
  *self.flag_columns,
652
652
  "diss_flag",
653
- "note",
653
+ "diss_note",
654
654
  ],
655
655
  )
656
656
  tag = upsert_disseminated_table(
@@ -254,37 +254,37 @@ class IcebergTables:
254
254
  self.__tag_name = tag_name
255
255
 
256
256
  # TODO Fix later with a more appropriate DATABASE
257
- self.DENORMALIZED_OBSERVATION = self._create_iceberg_table("BRONZE", suffix="denormalized_observation")
258
- self.DENORMALIZED_METADATA = self._create_iceberg_table("BRONZE", suffix="denormalized_metadata")
259
- self.GROUPED_METADATA = self._create_iceberg_table("BRONZE", suffix="grouped_metadata")
260
- self.TABLE = self._create_iceberg_table("BRONZE")
261
- self.TABLE_FILTERED = self._create_iceberg_table("BRONZE", suffix="filtered")
262
- self.BRONZE = self._create_iceberg_table("BRONZE")
263
- self.BRONZE_DISS_TAG = self._create_iceberg_table("BRONZE", suffix="diss_tag")
264
- self.SILVER = self._create_iceberg_table("SILVER", prefix=domain)
257
+ self.DENORMALIZED_OBSERVATION = self.create_iceberg_table("BRONZE", suffix="denormalized_observation")
258
+ self.DENORMALIZED_METADATA = self.create_iceberg_table("BRONZE", suffix="denormalized_metadata")
259
+ self.GROUPED_METADATA = self.create_iceberg_table("BRONZE", suffix="grouped_metadata")
260
+ self.TABLE = self.create_iceberg_table("BRONZE")
261
+ self.TABLE_FILTERED = self.create_iceberg_table("BRONZE", suffix="filtered")
262
+ self.BRONZE = self.create_iceberg_table("BRONZE")
263
+ self.BRONZE_DISS_TAG = self.create_iceberg_table("BRONZE", suffix="diss_tag")
264
+ self.SILVER = self.create_iceberg_table("SILVER", prefix=domain)
265
265
 
266
266
  # GOLD tables with specific suffixes
267
- self.GOLD_SWS = self._create_iceberg_table("GOLD", prefix=domain, suffix="sws")
268
- self.GOLD_SDMX = self._create_iceberg_table(
267
+ self.GOLD_SWS = self.create_iceberg_table("GOLD", prefix=domain, suffix="sws")
268
+ self.GOLD_SDMX = self.create_iceberg_table(
269
269
  "GOLD", prefix=domain, suffix="sdmx_disseminated"
270
270
  )
271
- self.GOLD_SWS_VALIDATED = self._create_iceberg_table(
271
+ self.GOLD_SWS_VALIDATED = self.create_iceberg_table(
272
272
  "GOLD", prefix=domain, suffix="sws_validated"
273
273
  )
274
- self.GOLD_SWS_DISSEMINATED = self._create_iceberg_table(
274
+ self.GOLD_SWS_DISSEMINATED = self.create_iceberg_table(
275
275
  "GOLD", prefix=domain, suffix="sws_disseminated"
276
276
  )
277
- self.GOLD_PRE_SDMX = self._create_iceberg_table(
277
+ self.GOLD_PRE_SDMX = self.create_iceberg_table(
278
278
  "GOLD", prefix=domain, suffix="pre_sdmx"
279
279
  )
280
- self.GOLD_FAOSTAT = self._create_iceberg_table(
280
+ self.GOLD_FAOSTAT = self.create_iceberg_table(
281
281
  "GOLD", prefix=domain, suffix="faostat"
282
282
  )
283
- self.GOLD_FAOSTAT_UNFILTERED = self._create_iceberg_table(
283
+ self.GOLD_FAOSTAT_UNFILTERED = self.create_iceberg_table(
284
284
  "GOLD", prefix=domain, suffix="faostat_unfiltered"
285
285
  )
286
286
 
287
- def _create_iceberg_table(
287
+ def create_iceberg_table(
288
288
  self, level: str, prefix: str = "", suffix: str = ""
289
289
  ) -> IcebergTable:
290
290
  database = getattr(IcebergDatabases, f"{level}_DATABASE")
@@ -363,26 +363,26 @@ def map_codes_and_remove_null_duplicates(
363
363
  "diss_flag", F.when(col("delete"), lit(False)).otherwise(col("diss_flag"))
364
364
  )
365
365
  .withColumn(
366
- "note",
366
+ "diss_note",
367
367
  F.when(
368
368
  col("delete"),
369
369
  F.array_append(
370
- col("note"),
370
+ col("diss_note"),
371
371
  lit(
372
372
  f"The observation is not disseminated according to the Mapping - Code correction table"
373
373
  ),
374
374
  ),
375
- ).otherwise(col("note")),
375
+ ).otherwise(col("diss_note")),
376
376
  )
377
377
  # Add mapping message to notes
378
378
  .withColumn(
379
- "note",
379
+ "diss_note",
380
380
  F.when(
381
381
  ~col("is_duplicate")
382
382
  & col("new_dim_code").isNotNull()
383
383
  & (col("new_dim_code") != lit("")),
384
384
  F.array_append(
385
- col("note"),
385
+ col("diss_note"),
386
386
  F.concat(
387
387
  lit(f"Dimension {col_name} code was changed from "),
388
388
  col(col_name),
@@ -390,7 +390,7 @@ def map_codes_and_remove_null_duplicates(
390
390
  col("new_dim_code"),
391
391
  ),
392
392
  ),
393
- ).otherwise(col("note")),
393
+ ).otherwise(col("diss_note")),
394
394
  )
395
395
  .withColumn(
396
396
  col_name,
@@ -409,18 +409,18 @@ def map_codes_and_remove_null_duplicates(
409
409
  ).otherwise(col("diss_flag")),
410
410
  )
411
411
  .withColumn(
412
- "note",
412
+ "diss_note",
413
413
  F.when(
414
414
  col("is_duplicate")
415
415
  & col("new_dim_code").isNotNull()
416
416
  & (col("new_dim_code") != lit("")),
417
417
  F.array_append(
418
- col("note"),
418
+ col("diss_note"),
419
419
  lit(
420
420
  f"The code correction was not applied to avoid observation duplications"
421
421
  ),
422
422
  ),
423
- ).otherwise(col("note")),
423
+ ).otherwise(col("diss_note")),
424
424
  )
425
425
  # Check the domain specific multiplier first and then the standard multiplier
426
426
  .withColumn("value", col("value") * F.coalesce(col("multiplier"), lit(1)))