sws-spark-dissemination-helper 0.0.176__tar.gz → 0.0.185__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (16) hide show
  1. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/PKG-INFO +2 -2
  2. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/pyproject.toml +2 -2
  3. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/src/sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +78 -41
  4. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/src/sws_spark_dissemination_helper/SWSPostgresSparkReader.py +42 -0
  5. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/src/sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +2 -2
  6. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/src/sws_spark_dissemination_helper/constants.py +22 -3
  7. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/.gitignore +0 -0
  8. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/LICENSE +0 -0
  9. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/README.md +0 -0
  10. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/src/sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +0 -0
  11. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/src/sws_spark_dissemination_helper/SWSDatatablesExportHelper.py +0 -0
  12. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/src/sws_spark_dissemination_helper/SWSEasyIcebergSparkHelper.py +0 -0
  13. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/src/sws_spark_dissemination_helper/__init__.py +0 -0
  14. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/src/sws_spark_dissemination_helper/utils.py +0 -0
  15. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/tests/__init__.py +0 -0
  16. {sws_spark_dissemination_helper-0.0.176 → sws_spark_dissemination_helper-0.0.185}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sws-spark-dissemination-helper
3
- Version: 0.0.176
3
+ Version: 0.0.185
4
4
  Summary: A Python helper package providing streamlined Spark functions for efficient data dissemination processes
5
5
  Project-URL: Repository, https://github.com/un-fao/fao-sws-it-python-spark-dissemination-helper
6
6
  Author-email: Daniele Mansillo <danielemansillo@gmail.com>
@@ -49,7 +49,7 @@ Requires-Dist: pytz==2025.2
49
49
  Requires-Dist: requests==2.32.3
50
50
  Requires-Dist: s3transfer>=0.11.2
51
51
  Requires-Dist: six==1.17.0
52
- Requires-Dist: sws-api-client==2.3.0
52
+ Requires-Dist: sws-api-client==2.7.3
53
53
  Requires-Dist: typing-extensions>=4.12.2
54
54
  Requires-Dist: tzdata==2025.2
55
55
  Requires-Dist: urllib3==1.26.20
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sws-spark-dissemination-helper"
7
- version = "0.0.176"
7
+ version = "0.0.185"
8
8
  dependencies = [
9
9
  "annotated-types==0.7.0",
10
10
  "boto3>=1.40.0",
@@ -25,7 +25,7 @@ dependencies = [
25
25
  "requests==2.32.3",
26
26
  "s3transfer>=0.11.2",
27
27
  "six==1.17.0",
28
- "sws_api_client==2.3.0",
28
+ "sws_api_client==2.7.3",
29
29
  "typing_extensions>=4.12.2",
30
30
  "tzdata==2025.2",
31
31
  "urllib3==1.26.20"
@@ -8,13 +8,9 @@ from pyspark.sql.functions import col, lit
8
8
  from sws_api_client import Tags
9
9
  from sws_api_client.tags import BaseDisseminatedTagTable, TableLayer, TableType
10
10
 
11
- from .constants import IcebergDatabases, IcebergTables
11
+ from .constants import IcebergDatabases, IcebergTables, DatasetDatatables
12
12
  from .SWSPostgresSparkReader import SWSPostgresSparkReader
13
- from .utils import (
14
- get_or_create_tag,
15
- save_cache_csv,
16
- upsert_disseminated_table,
17
- )
13
+ from .utils import get_or_create_tag, save_cache_csv, upsert_disseminated_table
18
14
 
19
15
 
20
16
  class SWSGoldIcebergSparkHelper:
@@ -66,6 +62,12 @@ class SWSGoldIcebergSparkHelper:
66
62
  if col_name in self.dim_columns
67
63
  }
68
64
 
65
+ self.display_decimals = (
66
+ self.sws_postgres_spark_reader.get_display_decimals_datatable(
67
+ domain_code=domain_code
68
+ )
69
+ )
70
+
69
71
  def _get_dim_time_flag_columns(self) -> Tuple[List[str], List[str], str, List[str]]:
70
72
  """Extract the dimension columns with time, without time, the time column and the flag columns names."""
71
73
  dim_columns_w_time = [
@@ -86,14 +88,67 @@ class SWSGoldIcebergSparkHelper:
86
88
  def apply_diss_flag_filter(self, df: DataFrame) -> DataFrame:
87
89
  return df.filter(col("diss_flag"))
88
90
 
89
- def keep_dim_val_attr_columns(self, df: DataFrame):
91
+ def keep_dim_val_attr_columns(
92
+ self, df: DataFrame, additional_columns: List[str] = []
93
+ ):
90
94
  cols_to_keep_sws = self.cols_to_keep_sws
91
- if "note" in df.columns:
92
- cols_to_keep_sws = cols_to_keep_sws + ["note"]
95
+ for additional_column in additional_columns:
96
+ if additional_column in df.columns:
97
+ cols_to_keep_sws = cols_to_keep_sws + [additional_column]
93
98
  if "unit_of_measure_symbol" in df.columns:
94
99
  cols_to_keep_sws = cols_to_keep_sws + ["unit_of_measure_symbol"]
95
100
  return df.select(*cols_to_keep_sws)
96
101
 
102
+ def round_to_display_decimals(self, df: DataFrame):
103
+ col1_name, col2_name = (
104
+ self.display_decimals.select("column_1_name", "column_2_name")
105
+ .distinct()
106
+ .collect()[0]
107
+ )
108
+ if col1_name.lower() not in [column.lower() for column in df.columns]:
109
+ raise ValueError(
110
+ f"{col1_name} is not part of the columns available for this dataset ({df.columns})"
111
+ )
112
+ if col2_name.lower() not in [column.lower() for column in df.columns]:
113
+ raise ValueError(
114
+ f"{col2_name} is not part of the columns available for this dataset ({df.columns})"
115
+ )
116
+
117
+ df = (
118
+ df.alias("d")
119
+ .join(
120
+ self.display_decimals.alias("dd"),
121
+ on=(col(f"d.{col1_name}") == col("dd.column_1_value"))
122
+ & (col(f"d.{col2_name}") == col("dd.column_2_value")),
123
+ how="left",
124
+ )
125
+ .select("d.*", "dd.display_decimals")
126
+ )
127
+
128
+ df.filter(col("display_decimals").isNull()).select(
129
+ col1_name, col2_name
130
+ ).distinct()
131
+ logging.warning(
132
+ f"The following combinations of {col1_name} and {col2_name} are not available in the table {DatasetDatatables.DISPLAY_DECIMALS.name} and will be assigned to 0"
133
+ )
134
+
135
+ df = df.withColumn(
136
+ "display_decimals",
137
+ F.coalesce(col("display_decimals"), lit("0")).cast("INT"),
138
+ ).withColumn(
139
+ "value",
140
+ F.round(
141
+ F.col("value").cast("FLOAT") * F.pow(10, F.col("display_decimals")), 0
142
+ )
143
+ / F.pow(10, F.col("display_decimals")).cast("STRING"),
144
+ )
145
+
146
+ # F.round(
147
+ # col("value").cast("FLOAT"), col("display_decimals").cast("INT")
148
+ # ).cast("STRING"),
149
+
150
+ return df
151
+
97
152
  def read_bronze_data(self) -> DataFrame:
98
153
  return self.spark.read.option("tag", self.tag_name).table(
99
154
  self.iceberg_tables.BRONZE_DISS_TAG.iceberg_id
@@ -104,18 +159,26 @@ class SWSGoldIcebergSparkHelper:
104
159
  self.iceberg_tables.SILVER.iceberg_id
105
160
  )
106
161
 
107
- def gen_gold_sws_disseminated_data(self) -> DataFrame:
162
+ def gen_gold_sws_disseminated_data(
163
+ self, additional_columns: List[str] = []
164
+ ) -> DataFrame:
108
165
  return (
109
166
  self.read_silver_data()
110
167
  .transform(self.apply_diss_flag_filter)
111
- .transform(self.keep_dim_val_attr_columns)
168
+ .transform(self.keep_dim_val_attr_columns, additional_columns)
112
169
  )
113
170
 
114
- def gen_gold_sws_data(self) -> DataFrame:
115
- return self.read_bronze_data().transform(self.keep_dim_val_attr_columns)
171
+ def gen_gold_sws_data(self, additional_columns: List[str] = []) -> DataFrame:
172
+ return self.read_bronze_data().transform(
173
+ self.keep_dim_val_attr_columns, additional_columns
174
+ )
116
175
 
117
- def gen_gold_sws_validated_data(self) -> DataFrame:
118
- return self.read_silver_data().transform(self.keep_dim_val_attr_columns)
176
+ def gen_gold_sws_validated_data(
177
+ self, additional_columns: List[str] = []
178
+ ) -> DataFrame:
179
+ return self.read_silver_data().transform(
180
+ self.keep_dim_val_attr_columns, additional_columns
181
+ )
119
182
 
120
183
  def write_gold_sws_validated_data_to_iceberg_and_csv(
121
184
  self, df: DataFrame
@@ -672,29 +735,3 @@ class SWSGoldIcebergSparkHelper:
672
735
  logging.debug(f"Tag with Added csv Table: {tag}")
673
736
 
674
737
  return df
675
-
676
-
677
- 1
678
- frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
679
- 1
680
- 1
681
- 2
682
- frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
683
- 2
684
- 1
685
- 1
686
- frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
687
- 1
688
- 1
689
- 2
690
- frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
691
- 2
692
- 1
693
- 1
694
- frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
695
- 1
696
- 1
697
- 1
698
- frozenset({"9", "1", "0", "4", "7", "3", "2", "6", "8", "5"})
699
- 1
700
- 1
@@ -497,3 +497,45 @@ class SWSPostgresSparkReader:
497
497
  "aggregation",
498
498
  ],
499
499
  )
500
+
501
+ def get_display_decimals_datatable(
502
+ self,
503
+ domain_code: str,
504
+ ) -> DataFrame:
505
+ df = self.read_pg_table(
506
+ pg_table=DatasetDatatables.DISPLAY_DECIMALS.id,
507
+ custom_schema=DatasetDatatables.DISPLAY_DECIMALS.schema,
508
+ ).filter(col("domain") == lit(domain_code))
509
+
510
+ pairs = df.select("column_1_name", "column_2_name").distinct().collect()
511
+
512
+ # If no config exists for this domain, fail early
513
+ if not pairs:
514
+ msg = (
515
+ f'No display-decimals configuration found for domain "{domain_code}". '
516
+ f'Please add an entry in table "{DatasetDatatables.DISPLAY_DECIMALS.id}".'
517
+ )
518
+ logging.error(msg)
519
+ # raise ValueError(msg)
520
+
521
+ # If more than one mapping exists, it's invalid
522
+ if len(pairs) > 1:
523
+ formatted_pairs = [(p["column_1_name"], p["column_2_name"]) for p in pairs]
524
+
525
+ msg = (
526
+ f'Invalid configuration for domain "{domain_code}". '
527
+ f"Expected exactly one (column_1_name, column_2_name) pair, but found {len(pairs)}: "
528
+ f"{formatted_pairs}. "
529
+ f'Please correct the table "{DatasetDatatables.DISPLAY_DECIMALS.id}".'
530
+ )
531
+
532
+ logging.error(
533
+ "Multiple display-decimals column pairs detected",
534
+ extra={
535
+ "domain": domain_code,
536
+ "pairs_found": formatted_pairs,
537
+ },
538
+ )
539
+ raise ValueError(msg)
540
+
541
+ return df
@@ -209,7 +209,7 @@ class SWSSilverIcebergSparkHelper:
209
209
  F.array_append(
210
210
  col("d.diss_note"),
211
211
  F.concat(
212
- col("sy.diss_note"),
212
+ col("sy.note"),
213
213
  lit(" from "),
214
214
  col("sy.old_code"),
215
215
  lit(" to "),
@@ -225,7 +225,7 @@ class SWSSilverIcebergSparkHelper:
225
225
  F.array_append(
226
226
  col("new_diss_note"),
227
227
  F.concat(
228
- col("ey.diss_note"),
228
+ col("ey.note"),
229
229
  lit(" from "),
230
230
  col("ey.old_code"),
231
231
  lit(" to "),
@@ -81,6 +81,19 @@ class DatasetDatatables:
81
81
  " method_flag",
82
82
  ],
83
83
  )
84
+ DISPLAY_DECIMALS = __SWSDatatable(
85
+ id="datatables.display_decimals",
86
+ name="Dissemination - Display Decimals",
87
+ schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, column_1_name STRING, column_1_value STRING, column_2_name STRING, column_2_value STRING, display_decimals STRING",
88
+ join_columns=[
89
+ "domain",
90
+ "column_1_name",
91
+ "column_1_value",
92
+ "column_2_name",
93
+ "column_2_value",
94
+ "display_decimals",
95
+ ],
96
+ )
84
97
  # TODO Deprecate
85
98
  DISSEMINATION_ITEM_LIST_FAOSTAT = __SWSDatatable(
86
99
  id="datatables.dissemination_item_list_faostat",
@@ -254,9 +267,15 @@ class IcebergTables:
254
267
  self.__tag_name = tag_name
255
268
 
256
269
  # TODO Fix later with a more appropriate DATABASE
257
- self.DENORMALIZED_OBSERVATION = self.create_iceberg_table("BRONZE", suffix="denormalized_observation")
258
- self.DENORMALIZED_METADATA = self.create_iceberg_table("BRONZE", suffix="denormalized_metadata")
259
- self.GROUPED_METADATA = self.create_iceberg_table("BRONZE", suffix="grouped_metadata")
270
+ self.DENORMALIZED_OBSERVATION = self.create_iceberg_table(
271
+ "BRONZE", suffix="denormalized_observation"
272
+ )
273
+ self.DENORMALIZED_METADATA = self.create_iceberg_table(
274
+ "BRONZE", suffix="denormalized_metadata"
275
+ )
276
+ self.GROUPED_METADATA = self.create_iceberg_table(
277
+ "BRONZE", suffix="grouped_metadata"
278
+ )
260
279
  self.TABLE = self.create_iceberg_table("BRONZE")
261
280
  self.TABLE_FILTERED = self.create_iceberg_table("BRONZE", suffix="filtered")
262
281
  self.BRONZE = self.create_iceberg_table("BRONZE")