sws-spark-dissemination-helper 0.0.173__tar.gz → 0.0.180__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (16) hide show
  1. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/PKG-INFO +1 -1
  2. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/pyproject.toml +1 -1
  3. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/src/sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +64 -7
  4. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/src/sws_spark_dissemination_helper/SWSPostgresSparkReader.py +42 -0
  5. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/src/sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +24 -24
  6. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/src/sws_spark_dissemination_helper/constants.py +22 -3
  7. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/.gitignore +0 -0
  8. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/LICENSE +0 -0
  9. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/README.md +0 -0
  10. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/src/sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +0 -0
  11. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/src/sws_spark_dissemination_helper/SWSDatatablesExportHelper.py +0 -0
  12. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/src/sws_spark_dissemination_helper/SWSEasyIcebergSparkHelper.py +0 -0
  13. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/src/sws_spark_dissemination_helper/__init__.py +0 -0
  14. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/src/sws_spark_dissemination_helper/utils.py +0 -0
  15. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/tests/__init__.py +0 -0
  16. {sws_spark_dissemination_helper-0.0.173 → sws_spark_dissemination_helper-0.0.180}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sws-spark-dissemination-helper
3
- Version: 0.0.173
3
+ Version: 0.0.180
4
4
  Summary: A Python helper package providing streamlined Spark functions for efficient data dissemination processes
5
5
  Project-URL: Repository, https://github.com/un-fao/fao-sws-it-python-spark-dissemination-helper
6
6
  Author-email: Daniele Mansillo <danielemansillo@gmail.com>
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sws-spark-dissemination-helper"
7
- version = "0.0.173"
7
+ version = "0.0.180"
8
8
  dependencies = [
9
9
  "annotated-types==0.7.0",
10
10
  "boto3>=1.40.0",
@@ -8,13 +8,9 @@ from pyspark.sql.functions import col, lit
8
8
  from sws_api_client import Tags
9
9
  from sws_api_client.tags import BaseDisseminatedTagTable, TableLayer, TableType
10
10
 
11
- from .constants import IcebergDatabases, IcebergTables
11
+ from .constants import IcebergDatabases, IcebergTables, DatasetDatatables
12
12
  from .SWSPostgresSparkReader import SWSPostgresSparkReader
13
- from .utils import (
14
- get_or_create_tag,
15
- save_cache_csv,
16
- upsert_disseminated_table,
17
- )
13
+ from .utils import get_or_create_tag, save_cache_csv, upsert_disseminated_table
18
14
 
19
15
 
20
16
  class SWSGoldIcebergSparkHelper:
@@ -66,6 +62,12 @@ class SWSGoldIcebergSparkHelper:
66
62
  if col_name in self.dim_columns
67
63
  }
68
64
 
65
+ self.display_decimals = (
66
+ self.sws_postgres_spark_reader.get_display_decimals_datatable(
67
+ domain_code=domain_code
68
+ )
69
+ )
70
+
69
71
  def _get_dim_time_flag_columns(self) -> Tuple[List[str], List[str], str, List[str]]:
70
72
  """Extract the dimension columns with time, without time, the time column and the flag columns names."""
71
73
  dim_columns_w_time = [
@@ -87,7 +89,62 @@ class SWSGoldIcebergSparkHelper:
87
89
  return df.filter(col("diss_flag"))
88
90
 
89
91
  def keep_dim_val_attr_columns(self, df: DataFrame):
90
- return df.select(*self.cols_to_keep_sws)
92
+ cols_to_keep_sws = self.cols_to_keep_sws
93
+ if "note" in df.columns:
94
+ cols_to_keep_sws = cols_to_keep_sws + ["note"]
95
+ if "unit_of_measure_symbol" in df.columns:
96
+ cols_to_keep_sws = cols_to_keep_sws + ["unit_of_measure_symbol"]
97
+ return df.select(*cols_to_keep_sws)
98
+
99
+ def round_to_display_decimals(self, df: DataFrame):
100
+ col1_name, col2_name = (
101
+ self.display_decimals.select("column_1_name", "column_2_name")
102
+ .distinct()
103
+ .collect()[0]
104
+ )
105
+ if col1_name.lower() not in [column.lower() for column in df.columns]:
106
+ raise ValueError(
107
+ f"{col1_name} is not part of the columns available for this dataset ({df.columns})"
108
+ )
109
+ if col2_name.lower() not in [column.lower() for column in df.columns]:
110
+ raise ValueError(
111
+ f"{col2_name} is not part of the columns available for this dataset ({df.columns})"
112
+ )
113
+
114
+ df = (
115
+ df.alias("d")
116
+ .join(
117
+ self.display_decimals.alias("dd"),
118
+ on=(col(f"d.{col1_name}") == col("dd.column_1_value"))
119
+ & (col(f"d.{col2_name}") == col("dd.column_2_value")),
120
+ how="left",
121
+ )
122
+ .select("d.*", "dd.display_decimals")
123
+ )
124
+
125
+ df.filter(col("display_decimals").isNull()).select(
126
+ col1_name, col2_name
127
+ ).distinct()
128
+ logging.warning(
129
+ f"The following combinations of {col1_name} and {col2_name} are not available in the table {DatasetDatatables.DISPLAY_DECIMALS.name} and will be assigned to 0"
130
+ )
131
+
132
+ df = df.withColumn(
133
+ "display_decimals",
134
+ F.coalesce(col("display_decimals"), lit("0")).cast("INT"),
135
+ ).withColumn(
136
+ "value",
137
+ F.round(
138
+ F.col("value").cast("FLOAT") * F.pow(10, F.col("display_decimals")), 0
139
+ )
140
+ / F.pow(10, F.col("display_decimals")).cast("STRING"),
141
+ )
142
+
143
+ # F.round(
144
+ # col("value").cast("FLOAT"), col("display_decimals").cast("INT")
145
+ # ).cast("STRING"),
146
+
147
+ return df
91
148
 
92
149
  def read_bronze_data(self) -> DataFrame:
93
150
  return self.spark.read.option("tag", self.tag_name).table(
@@ -497,3 +497,45 @@ class SWSPostgresSparkReader:
497
497
  "aggregation",
498
498
  ],
499
499
  )
500
+
501
+ def get_display_decimals_datatable(
502
+ self,
503
+ domain_code: str,
504
+ ) -> DataFrame:
505
+ df = self.read_pg_table(
506
+ pg_table=DatasetDatatables.DISPLAY_DECIMALS.id,
507
+ custom_schema=DatasetDatatables.DISPLAY_DECIMALS.schema,
508
+ ).filter(col("domain") == lit(domain_code))
509
+
510
+ pairs = df.select("column_1_name", "column_2_name").distinct().collect()
511
+
512
+ # If no config exists for this domain, fail early
513
+ if not pairs:
514
+ msg = (
515
+ f'No display-decimals configuration found for domain "{domain_code}". '
516
+ f'Please add an entry in table "{DatasetDatatables.DISPLAY_DECIMALS.id}".'
517
+ )
518
+ logging.error(msg)
519
+ # raise ValueError(msg)
520
+
521
+ # If more than one mapping exists, it's invalid
522
+ if len(pairs) > 1:
523
+ formatted_pairs = [(p["column_1_name"], p["column_2_name"]) for p in pairs]
524
+
525
+ msg = (
526
+ f'Invalid configuration for domain "{domain_code}". '
527
+ f"Expected exactly one (column_1_name, column_2_name) pair, but found {len(pairs)}: "
528
+ f"{formatted_pairs}. "
529
+ f'Please correct the table "{DatasetDatatables.DISPLAY_DECIMALS.id}".'
530
+ )
531
+
532
+ logging.error(
533
+ "Multiple display-decimals column pairs detected",
534
+ extra={
535
+ "domain": domain_code,
536
+ "pairs_found": formatted_pairs,
537
+ },
538
+ )
539
+ raise ValueError(msg)
540
+
541
+ return df
@@ -103,7 +103,7 @@ class SWSSilverIcebergSparkHelper:
103
103
  # The diss_flag column is needed to initialize the condition expression
104
104
  # The note column will contain the eventual reasons why diss_flag has been set to false
105
105
  return df.withColumn("diss_flag", lit(True)).withColumn(
106
- "note", lit([]).cast(ArrayType(StringType()))
106
+ "diss_note", lit([]).cast(ArrayType(StringType()))
107
107
  )
108
108
 
109
109
  def read_bronze_data(self) -> DataFrame:
@@ -182,7 +182,7 @@ class SWSSilverIcebergSparkHelper:
182
182
  for column in cols_to_select
183
183
  if column.lower()
184
184
  not in (
185
- "note",
185
+ "diss_note",
186
186
  f"{col_name_lower}_start_date",
187
187
  f"{col_name_lower}_end_date",
188
188
  )
@@ -203,36 +203,36 @@ class SWSSilverIcebergSparkHelper:
203
203
  .withColumn("valid_new_start_year", col("sy.new_code").isNotNull())
204
204
  .withColumn("valid_new_end_year", col("ey.new_code").isNotNull())
205
205
  .withColumn(
206
- "new_note",
206
+ "new_diss_note",
207
207
  F.when(
208
208
  col("valid_new_start_year"),
209
209
  F.array_append(
210
- col("d.note"),
210
+ col("d.diss_note"),
211
211
  F.concat(
212
- col("sy.note"),
212
+ col("sy.diss_note"),
213
213
  lit(" from "),
214
214
  col("sy.old_code"),
215
215
  lit(" to "),
216
216
  col("sy.new_code"),
217
217
  ),
218
218
  ),
219
- ).otherwise(col("d.note")),
219
+ ).otherwise(col("d.diss_note")),
220
220
  )
221
221
  .withColumn(
222
- "new_note",
222
+ "new_diss_note",
223
223
  F.when(
224
224
  col("valid_new_end_year"),
225
225
  F.array_append(
226
- col("new_note"),
226
+ col("new_diss_note"),
227
227
  F.concat(
228
- col("ey.note"),
228
+ col("ey.diss_note"),
229
229
  lit(" from "),
230
230
  col("ey.old_code"),
231
231
  lit(" to "),
232
232
  col("ey.new_code"),
233
233
  ),
234
234
  ),
235
- ).otherwise(col("new_note")),
235
+ ).otherwise(col("new_diss_note")),
236
236
  )
237
237
  .withColumn(
238
238
  f"new_{col_name}_start_date",
@@ -249,7 +249,7 @@ class SWSSilverIcebergSparkHelper:
249
249
  )
250
250
  .select(
251
251
  *cols_to_select,
252
- col("new_note").alias("note"),
252
+ col("new_diss_note").alias("diss_note"),
253
253
  col(f"new_{col_name}_start_date").alias(f"{col_name}_start_date"),
254
254
  col(f"new_{col_name}_end_date").alias(f"{col_name}_end_date"),
255
255
  )
@@ -270,15 +270,15 @@ class SWSSilverIcebergSparkHelper:
270
270
  start_date_condition & end_date_condition,
271
271
  )
272
272
  .withColumn("diss_flag", col("diss_flag") & col("condition_result"))
273
- # In case the condition is satisfied update diss_flag accordingly and append a note indicating the reason for the observation exclusion from the dissemination
273
+ # In case the condition is satisfied update diss_flag accordingly and append a diss_note indicating the reason for the observation exclusion from the dissemination
274
274
  .withColumn(
275
- "note",
275
+ "diss_note",
276
276
  F.when(
277
277
  ~col("condition_result"),
278
278
  F.array_append(
279
- col("note"), lit(f"{col_type} out of time validity range")
279
+ col("diss_note"), lit(f"{col_type} out of time validity range")
280
280
  ),
281
- ).otherwise(col("note")),
281
+ ).otherwise(col("diss_note")),
282
282
  )
283
283
  .drop("condition_result")
284
284
  )
@@ -390,7 +390,7 @@ class SWSSilverIcebergSparkHelper:
390
390
  col_name (str): The DataFrame column name on which to apply the filter
391
391
 
392
392
  Returns:
393
- DataFrame: The DataFrame with updated `diss_flag` and `note` columns based on the check outcome
393
+ DataFrame: The DataFrame with updated `diss_flag` and `diss_note` columns based on the check outcome
394
394
  """
395
395
 
396
396
  # Remove the duplicates that may be in the tables
@@ -428,14 +428,14 @@ class SWSSilverIcebergSparkHelper:
428
428
  col("diss_flag") & col("condition_result"),
429
429
  )
430
430
  .withColumn(
431
- "note",
431
+ "diss_note",
432
432
  F.when(
433
433
  ~col("condition_result"),
434
434
  F.array_append(
435
- col("note"),
435
+ col("diss_note"),
436
436
  lit(f"{col_type} not disseminated for this domain"),
437
437
  ),
438
- ).otherwise(col("note")),
438
+ ).otherwise(col("diss_note")),
439
439
  )
440
440
  .drop("condition_result")
441
441
  )
@@ -522,16 +522,16 @@ class SWSSilverIcebergSparkHelper:
522
522
  col("diss_flag") & col("condition_result"),
523
523
  )
524
524
  .withColumn(
525
- "note",
525
+ "diss_note",
526
526
  F.when(
527
527
  ~col("condition_result"),
528
528
  F.array_append(
529
- col("note"),
529
+ col("diss_note"),
530
530
  lit(
531
531
  f"not disseminated according to exception with note: {row_exception['note']}"
532
532
  ),
533
533
  ),
534
- ).otherwise(col("note")),
534
+ ).otherwise(col("diss_note")),
535
535
  )
536
536
  .drop("condition_result")
537
537
  )
@@ -616,7 +616,7 @@ class SWSSilverIcebergSparkHelper:
616
616
 
617
617
  df = (
618
618
  df.withColumn("metadata", F.to_json(col("metadata")))
619
- .withColumn("note", F.to_json(col("note")))
619
+ .withColumn("diss_note", F.to_json(col("diss_note")))
620
620
  .coalesce(1)
621
621
  )
622
622
 
@@ -650,7 +650,7 @@ class SWSSilverIcebergSparkHelper:
650
650
  "value",
651
651
  *self.flag_columns,
652
652
  "diss_flag",
653
- "note",
653
+ "diss_note",
654
654
  ],
655
655
  )
656
656
  tag = upsert_disseminated_table(
@@ -81,6 +81,19 @@ class DatasetDatatables:
81
81
  " method_flag",
82
82
  ],
83
83
  )
84
+ DISPLAY_DECIMALS = __SWSDatatable(
85
+ id="datatables.display_decimals",
86
+ name="Dissemination - Display Decimals",
87
+ schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, column_1_name STRING, column_1_value STRING, column_2_name STRING, column_2_value STRING, display_decimals STRING",
88
+ join_columns=[
89
+ "domain",
90
+ "column_1_name",
91
+ "column_1_value",
92
+ "column_2_name",
93
+ "column_2_value",
94
+ "display_decimals",
95
+ ],
96
+ )
84
97
  # TODO Deprecate
85
98
  DISSEMINATION_ITEM_LIST_FAOSTAT = __SWSDatatable(
86
99
  id="datatables.dissemination_item_list_faostat",
@@ -254,9 +267,15 @@ class IcebergTables:
254
267
  self.__tag_name = tag_name
255
268
 
256
269
  # TODO Fix later with a more appropriate DATABASE
257
- self.DENORMALIZED_OBSERVATION = self.create_iceberg_table("BRONZE", suffix="denormalized_observation")
258
- self.DENORMALIZED_METADATA = self.create_iceberg_table("BRONZE", suffix="denormalized_metadata")
259
- self.GROUPED_METADATA = self.create_iceberg_table("BRONZE", suffix="grouped_metadata")
270
+ self.DENORMALIZED_OBSERVATION = self.create_iceberg_table(
271
+ "BRONZE", suffix="denormalized_observation"
272
+ )
273
+ self.DENORMALIZED_METADATA = self.create_iceberg_table(
274
+ "BRONZE", suffix="denormalized_metadata"
275
+ )
276
+ self.GROUPED_METADATA = self.create_iceberg_table(
277
+ "BRONZE", suffix="grouped_metadata"
278
+ )
260
279
  self.TABLE = self.create_iceberg_table("BRONZE")
261
280
  self.TABLE_FILTERED = self.create_iceberg_table("BRONZE", suffix="filtered")
262
281
  self.BRONZE = self.create_iceberg_table("BRONZE")