sws-spark-dissemination-helper 0.0.79__py3-none-any.whl → 0.0.183__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,14 +8,9 @@ from pyspark.sql.functions import col, lit
8
8
  from sws_api_client import Tags
9
9
  from sws_api_client.tags import BaseDisseminatedTagTable, TableLayer, TableType
10
10
 
11
- from .constants import IcebergDatabases, IcebergTables
11
+ from .constants import IcebergDatabases, IcebergTables, DatasetDatatables
12
12
  from .SWSPostgresSparkReader import SWSPostgresSparkReader
13
- from .utils import (
14
- col_is_null_or_empty,
15
- get_or_create_tag,
16
- save_cache_csv,
17
- upsert_disseminated_table,
18
- )
13
+ from .utils import get_or_create_tag, save_cache_csv, upsert_disseminated_table
19
14
 
20
15
 
21
16
  class SWSGoldIcebergSparkHelper:
@@ -47,11 +42,6 @@ class SWSGoldIcebergSparkHelper:
47
42
  self.flag_columns,
48
43
  ) = self._get_dim_time_flag_columns()
49
44
 
50
- self.cols_to_keep_sdmx = (
51
- self.dim_columns_w_time
52
- + ["unit_of_measure", "unit_of_measure_multiplier", "value"]
53
- + self.flag_columns
54
- )
55
45
  self.cols_to_keep_sws = (
56
46
  self.dim_columns_w_time + ["value"] + self.flag_columns
57
47
  )
@@ -72,41 +62,11 @@ class SWSGoldIcebergSparkHelper:
72
62
  if col_name in self.dim_columns
73
63
  }
74
64
 
75
- (
76
- self.df_mapping_sdmx_codes,
77
- self.df_mapping_sdmx_uom,
78
- self.df_mapping_sdmx_col_names,
79
- ) = sws_postgres_spark_reader.import_sdmx_mapping_datatables(
80
- self.domain_code
81
- )
82
-
83
- self._check_column_mappings(self.df_mapping_sdmx_col_names)
84
-
85
- def _check_column_mappings(
86
- self,
87
- df_mapping_sdmx_col_names: DataFrame,
88
- ) -> DataFrame:
89
- cols_to_keep_set = set(self.cols_to_keep_sdmx)
90
- mapping_sdmx_col_names_internal_set = {
91
- row[0]
92
- for row in df_mapping_sdmx_col_names.filter(
93
- col("internal_name").isNotNull() & (col("internal_name") != lit(""))
94
- )
95
- .select("internal_name")
96
- .collect()
97
- }
98
-
99
- if not (cols_to_keep_set <= mapping_sdmx_col_names_internal_set):
100
- missing_mappings = cols_to_keep_set - mapping_sdmx_col_names_internal_set
101
-
102
- message = 'The mappings in the table "Mapping - SDMX columns names" are not correct'
103
-
104
- if len(missing_mappings) > 0:
105
- message += (
106
- f"\nThe following column mappings are missing: {missing_mappings}"
65
+ self.display_decimals = (
66
+ self.sws_postgres_spark_reader.get_display_decimals_datatable(
67
+ domain_code=domain_code
107
68
  )
108
-
109
- raise ValueError(message)
69
+ )
110
70
 
111
71
  def _get_dim_time_flag_columns(self) -> Tuple[List[str], List[str], str, List[str]]:
112
72
  """Extract the dimension columns with time, without time, the time column and the flag columns names."""
@@ -128,318 +88,96 @@ class SWSGoldIcebergSparkHelper:
128
88
  def apply_diss_flag_filter(self, df: DataFrame) -> DataFrame:
129
89
  return df.filter(col("diss_flag"))
130
90
 
131
- # TODO implement the delete flag
132
- def apply_uom_mapping(
133
- self,
134
- df: DataFrame,
135
- ) -> DataFrame:
136
- logging.info("mapping unit of measure for dissemination")
137
-
138
- df = df.withColumn(
139
- "official_sws_uom",
140
- F.when(
141
- col_is_null_or_empty("unit_of_measure_base_unit"),
142
- col("unit_of_measure"),
143
- ).otherwise(col("unit_of_measure_base_unit")),
144
- ).withColumn(
145
- "official_sws_multiplier",
146
- F.coalesce(F.log10(col("unit_of_measure_multiplier")), lit(0)).cast("int"),
147
- )
148
-
149
- delete_df_uom_mapping = self.df_mapping_sdmx_uom.filter(
150
- col("delete")
151
- & col_is_null_or_empty("sdmx_code")
152
- & col("sdmx_multiplier").isNull()
153
- & col("value_multiplier").isNull()
154
- )
155
-
156
- generic_df_uom_mapping = self.df_mapping_sdmx_uom.filter(
157
- ~col("delete")
158
- & col("sws_multiplier").isNull()
159
- & col("sdmx_multiplier").isNull()
160
- & (col("value_multiplier") == lit(0))
161
- )
162
-
163
- specific_df_uom_mapping = self.df_mapping_sdmx_uom.filter(
164
- ~col("delete")
165
- & col("sws_multiplier").isNotNull()
166
- & col("sdmx_multiplier").isNotNull()
167
- )
168
-
169
- # Apply generic uom mapping
170
- df = (
171
- df.alias("d")
172
- .join(
173
- generic_df_uom_mapping.alias("m"),
174
- col("d.official_sws_uom") == col("m.sws_code"),
175
- "left",
91
+ def keep_dim_val_attr_columns(
92
+ self, df: DataFrame, additional_columns: List[str] = []
93
+ ):
94
+ cols_to_keep_sws = self.cols_to_keep_sws
95
+ for additional_column in additional_columns:
96
+ if additional_column in df.columns:
97
+ cols_to_keep_sws = cols_to_keep_sws + [additional_column]
98
+ if "unit_of_measure_symbol" in df.columns:
99
+ cols_to_keep_sws = cols_to_keep_sws + ["unit_of_measure_symbol"]
100
+ return df.select(*cols_to_keep_sws)
101
+
102
+ def round_to_display_decimals(self, df: DataFrame):
103
+ col1_name, col2_name = (
104
+ self.display_decimals.select("column_1_name", "column_2_name")
105
+ .distinct()
106
+ .collect()[0]
107
+ )
108
+ if col1_name.lower() not in [column.lower() for column in df.columns]:
109
+ raise ValueError(
110
+ f"{col1_name} is not part of the columns available for this dataset ({df.columns})"
111
+ )
112
+ if col2_name.lower() not in [column.lower() for column in df.columns]:
113
+ raise ValueError(
114
+ f"{col2_name} is not part of the columns available for this dataset ({df.columns})"
176
115
  )
177
- .select("d.*", col("sdmx_code").alias("generic_sdmx_uom"))
178
- )
179
116
 
180
- # Apply specific uom mapping
181
117
  df = (
182
118
  df.alias("d")
183
119
  .join(
184
- specific_df_uom_mapping.alias("m"),
185
- (col("d.official_sws_uom") == col("m.sws_code"))
186
- & (col("d.official_sws_multiplier") == col("m.sws_multiplier")),
187
- "left",
188
- )
189
- .select(
190
- "d.*",
191
- col("sdmx_code").alias("specific_sdmx_uom"),
192
- col("sdmx_multiplier").alias("specific_sdmx_multiplier"),
193
- (col("value") * F.pow(lit(10), col("value_multiplier"))).alias(
194
- "specific_sdmx_value"
195
- ),
120
+ self.display_decimals.alias("dd"),
121
+ on=(col(f"d.{col1_name}") == col("dd.column_1_value"))
122
+ & (col(f"d.{col2_name}") == col("dd.column_2_value")),
123
+ how="left",
196
124
  )
125
+ .select("d.*", "dd.display_decimals")
197
126
  )
198
127
 
199
- # Select the official values according to descending specificity
200
- df = (
201
- df.withColumn(
202
- "unit_of_measure",
203
- F.coalesce(
204
- col("specific_sdmx_uom"),
205
- col("generic_sdmx_uom"),
206
- col("official_sws_uom"),
207
- ),
208
- )
209
- .withColumn(
210
- "unit_of_measure_multiplier",
211
- F.coalesce(
212
- col("specific_sdmx_multiplier"), col("official_sws_multiplier")
213
- ),
214
- )
215
- .withColumn(
216
- "value",
217
- F.coalesce(col("specific_sdmx_value"), col("value")),
218
- )
219
- # Remove the columns that were not in the original dataset
220
- .drop(
221
- col("specific_sdmx_uom"),
222
- col("specific_sdmx_multiplier"),
223
- col("specific_sdmx_value"),
224
- col("generic_sdmx_uom"),
225
- col("official_sws_uom"),
226
- col("official_sws_multiplier"),
227
- )
128
+ df.filter(col("display_decimals").isNull()).select(
129
+ col1_name, col2_name
130
+ ).distinct()
131
+ logging.warning(
132
+ f"The following combinations of {col1_name} and {col2_name} are not available in the table {DatasetDatatables.DISPLAY_DECIMALS.name} and will be assigned to 0"
228
133
  )
229
134
 
230
- return df
231
-
232
- def keep_dim_uom_val_attr_columns(self, df: DataFrame):
233
- return df.select(*self.cols_to_keep_sdmx)
234
-
235
- def keep_dim_val_attr_columns(self, df: DataFrame):
236
- return df.select(*self.cols_to_keep_sws)
237
-
238
- def _apply_sdmx_dimension_codes_mapping_single(
239
- self,
240
- df: DataFrame,
241
- dimension_name: str,
242
- dimension_type: str,
243
- ) -> DataFrame:
244
- logging.info(
245
- f"mapping column {dimension_name} of type {dimension_type} for dissemination"
246
- )
247
- return (
248
- df.alias("d")
249
- # Join the data with the standard mapping for the specific dimension
250
- .join(
251
- F.broadcast(
252
- self.df_mapping_sdmx_codes.filter(
253
- (col("domain").isNull() | (col("domain") == lit("")))
254
- & (col("var_type") == lit(dimension_type))
255
- & (
256
- col("mapping_type").isNull()
257
- | (col("mapping_type") == lit(""))
258
- )
259
- )
260
- ).alias("m_standard"),
261
- col(f"d.{dimension_name}") == col("m_standard.internal_code"),
262
- "left",
263
- )
264
- # Join the data with the domain specific mapping for the specific dimension
265
- .join(
266
- F.broadcast(
267
- self.df_mapping_sdmx_codes.filter(
268
- (col("domain") == lit(self.domain_code))
269
- & (col("var_type") == lit(dimension_type))
270
- & (
271
- col("mapping_type").isNull()
272
- | (col("mapping_type") == lit(""))
273
- )
274
- )
275
- ).alias("m_domain"),
276
- col(f"d.{dimension_name}") == col("m_domain.internal_code"),
277
- "left",
278
- )
279
- # Select only the columns we are interested in (this step is optional but recommended for debugging)
280
- .select(
281
- "d.*",
282
- col("m_standard.external_code").alias("standard_external_code"),
283
- col("m_standard.delete").alias("standard_delete"),
284
- col("m_standard.multiplier").alias("standard_multiplier"),
285
- col("m_domain.external_code").alias("domain_specific_external_code"),
286
- col("m_domain.delete").alias("domain_specific_delete"),
287
- col("m_domain.multiplier").alias("domain_specific_multiplier"),
288
- )
289
- # Filter out records to delete
290
- .filter(
291
- # Evaluate first the domain specific flag
292
- F.when(
293
- col("domain_specific_delete").isNotNull(),
294
- ~col("domain_specific_delete"),
295
- )
296
- # Then evaluate the general flag
297
- .when(
298
- col("standard_delete").isNotNull(), ~col("standard_delete")
299
- ).otherwise(lit(True))
300
- )
301
- .withColumn(
302
- dimension_name,
303
- # Evaluate first the domain specific mapping
304
- F.when(
305
- col("domain_specific_external_code").isNotNull(),
306
- col("domain_specific_external_code"),
307
- )
308
- # Then evaluate the general mapping
309
- .when(
310
- col("standard_external_code").isNotNull(),
311
- col("standard_external_code"),
312
- ).otherwise(col(dimension_name)),
313
- )
314
- .withColumn(
315
- "value",
316
- # Multiply first by the domain specific multiplier
317
- F.when(
318
- col("domain_specific_multiplier").isNotNull(),
319
- col("value") * col("domain_specific_multiplier"),
320
- )
321
- # Then multiply by the general multiplier
322
- .when(
323
- col("standard_external_code").isNotNull(),
324
- col("value") * col("standard_multiplier"),
325
- ).otherwise(col("value")),
326
- )
327
- # Remove the columns that were not in the original dataset
328
- .drop(
329
- "standard_external_code",
330
- "standard_delete",
331
- "standard_multiplier",
332
- "domain_specific_external_code",
333
- "domain_specific_delete",
334
- "domain_specific_multiplier",
135
+ df = df.withColumn(
136
+ "display_decimals",
137
+ F.coalesce(col("display_decimals"), lit("0")).cast("INT"),
138
+ ).withColumn(
139
+ "value",
140
+ F.round(
141
+ F.col("value").cast("FLOAT") * F.pow(10, F.col("display_decimals")), 0
335
142
  )
143
+ / F.pow(10, F.col("display_decimals")).cast("STRING"),
336
144
  )
337
145
 
338
- def apply_sdmx_dimension_codes_mapping(self, df: DataFrame) -> DataFrame:
339
- logging.info("Mapping codes to comply with SDMX standard")
340
- for dimension_name, dimension_type in self.codelist_type_mapping.items():
341
- df = df.transform(
342
- self._apply_sdmx_dimension_codes_mapping_single,
343
- dimension_name=dimension_name,
344
- dimension_type=dimension_type,
345
- )
346
-
347
- return df
348
-
349
- def drop_non_sdmx_columns(self, df: DataFrame) -> DataFrame:
350
- cols_to_drop = [
351
- row["internal_name"]
352
- for row in self.df_mapping_sdmx_col_names.collect()
353
- if row["delete"] is True
354
- ]
355
- logging.info(f"Dropping non-SDMX columns: {cols_to_drop}")
356
- return df.drop(*cols_to_drop)
357
-
358
- def apply_sdmx_column_names_mapping(self, df: DataFrame) -> DataFrame:
359
- logging.info("Renaming columns to comply with SDMX standard")
360
-
361
- mapping_sws_col_sdmx_col = {
362
- row["internal_name"]: row["external_name"]
363
- for row in self.df_mapping_sdmx_col_names.filter(
364
- col("internal_name").isNotNull()
365
- & (col("internal_name") != lit(""))
366
- & ~col("delete")
367
- ).collect()
368
- }
369
-
370
- logging.info(f"Column names mappings: {mapping_sws_col_sdmx_col}")
371
-
372
- return df.withColumnsRenamed(mapping_sws_col_sdmx_col)
373
-
374
- def add_sdmx_default_columns(self, df: DataFrame) -> DataFrame:
375
- col_w_default_value = {
376
- row["external_name"]: row["default_value"]
377
- for row in self.df_mapping_sdmx_col_names.collect()
378
- if row["add"] is True
379
- }
380
-
381
- logging.info("Adding SDMX columns with default values")
382
-
383
- for name, default_value in col_w_default_value.items():
384
- logging.info(
385
- f"Adding SDMX column {name} with default value {default_value}"
386
- )
387
- df = df.withColumn(name, lit(default_value))
146
+ # F.round(
147
+ # col("value").cast("FLOAT"), col("display_decimals").cast("INT")
148
+ # ).cast("STRING"),
388
149
 
389
150
  return df
390
151
 
391
- def rearrange_sdmx_columns(self, df: DataFrame) -> DataFrame:
392
- logging.info(
393
- "Rearranging the columns to have the following order: Dimensions, TimeDimension, PrimaryMeasure, Attributes"
152
+ def read_bronze_data(self) -> DataFrame:
153
+ return self.spark.read.option("tag", self.tag_name).table(
154
+ self.iceberg_tables.BRONZE_DISS_TAG.iceberg_id
394
155
  )
395
156
 
396
- get_columns_for_type = lambda df, type: [
397
- row[0]
398
- for row in df.filter(col("type") == lit(type))
399
- .select("external_name")
400
- .collect()
401
- ]
402
-
403
- df_mapping_sdmx_no_del = self.df_mapping_sdmx_col_names.filter(~col("delete"))
404
-
405
- dimensions = get_columns_for_type(df_mapping_sdmx_no_del, "Dimension")
406
- time_dimensions = get_columns_for_type(df_mapping_sdmx_no_del, "TimeDimension")
407
- primary_measure = get_columns_for_type(df_mapping_sdmx_no_del, "PrimaryMeasure")
408
- attributes = get_columns_for_type(df_mapping_sdmx_no_del, "Attribute")
409
-
410
- logging.info(f"Dimensions: {dimensions}")
411
- logging.info(f"Time Dimensions: {time_dimensions}")
412
- logging.info(f"Primary Measure: {primary_measure}")
413
- logging.info(f"Attributes: {attributes}")
414
-
415
- return df.select(*dimensions, *time_dimensions, *primary_measure, *attributes)
157
+ def read_silver_data(self) -> DataFrame:
158
+ return self.spark.read.option("tag", self.tag_name).table(
159
+ self.iceberg_tables.SILVER.iceberg_id
160
+ )
416
161
 
417
- def gen_gold_sws_disseminated_data(self) -> DataFrame:
162
+ def gen_gold_sws_disseminated_data(
163
+ self, additional_columns: List[str] = []
164
+ ) -> DataFrame:
418
165
  return (
419
- self.spark.read.option("tag", self.tag_name)
420
- .table(self.iceberg_tables.SILVER.iceberg_id)
166
+ self.read_silver_data()
421
167
  .transform(self.apply_diss_flag_filter)
422
- .transform(self.keep_dim_val_attr_columns)
168
+ .transform(self.keep_dim_val_attr_columns, additional_columns)
423
169
  )
424
170
 
425
- def gen_gold_sws_validated_data(self) -> DataFrame:
426
- return (
427
- self.spark.read.option("tag", self.tag_name)
428
- .table(self.iceberg_tables.BRONZE.iceberg_id)
429
- .transform(self.keep_dim_val_attr_columns)
171
+ def gen_gold_sws_data(self, additional_columns: List[str] = []) -> DataFrame:
172
+ return self.read_bronze_data().transform(
173
+ self.keep_dim_val_attr_columns, additional_columns
430
174
  )
431
175
 
432
- def gen_gold_sdmx_data(self) -> DataFrame:
433
- return (
434
- self.spark.read.option("tag", self.tag_name)
435
- .table(self.iceberg_tables.SILVER.iceberg_id)
436
- .transform(self.apply_diss_flag_filter)
437
- .transform(self.apply_uom_mapping)
438
- .transform(self.keep_dim_uom_val_attr_columns)
439
- .transform(self.apply_sdmx_dimension_codes_mapping)
440
- .transform(self.apply_sdmx_column_names_mapping)
441
- .transform(self.add_sdmx_default_columns)
442
- .transform(self.rearrange_sdmx_columns)
176
+ def gen_gold_sws_validated_data(
177
+ self, additional_columns: List[str] = []
178
+ ) -> DataFrame:
179
+ return self.read_silver_data().transform(
180
+ self.keep_dim_val_attr_columns, additional_columns
443
181
  )
444
182
 
445
183
  def write_gold_sws_validated_data_to_iceberg_and_csv(
@@ -468,6 +206,37 @@ class SWSGoldIcebergSparkHelper:
468
206
 
469
207
  return df
470
208
 
209
+ def write_gold_sws_data_to_iceberg_and_csv(self, df: DataFrame) -> DataFrame:
210
+ df.writeTo(self.iceberg_tables.GOLD_SWS.iceberg_id).createOrReplace()
211
+
212
+ logging.info(
213
+ f"Gold SWS table written to {self.iceberg_tables.GOLD_SWS.iceberg_id}"
214
+ )
215
+
216
+ self.spark.sql(
217
+ f"ALTER TABLE {self.iceberg_tables.GOLD_SWS.iceberg_id} CREATE OR REPLACE TAG `{self.tag_name}`"
218
+ )
219
+
220
+ logging.info(f"gold SWS tag '{self.tag_name}' created")
221
+
222
+ df_1 = df.coalesce(1)
223
+
224
+ save_cache_csv(
225
+ df=df_1,
226
+ bucket=self.bucket,
227
+ prefix=self.iceberg_tables.GOLD_SWS.csv_prefix,
228
+ tag_name=self.tag_name,
229
+ )
230
+
231
+ return df
232
+
233
+ def gen_and_write_gold_sws_data_to_iceberg_and_csv(self) -> DataFrame:
234
+ self.df_gold_sws = self.gen_gold_sws_data()
235
+
236
+ self.write_gold_sws_data_to_iceberg_and_csv(self.df_gold_sws)
237
+
238
+ return self.df_gold_sws
239
+
471
240
  def gen_and_write_gold_sws_validated_data_to_iceberg_and_csv(self) -> DataFrame:
472
241
  self.df_gold_sws_validated = self.gen_gold_sws_validated_data()
473
242
 
@@ -542,7 +311,7 @@ class SWSGoldIcebergSparkHelper:
542
311
  """The expected input to this function is the output of the sws disseminated function"""
543
312
  for column in self.dim_columns:
544
313
  df = df.withColumn(
545
- column, F.regexp_replace(col(column), lit("."), lit("_"))
314
+ column, F.regexp_replace(col(column), lit("\."), lit("_"))
546
315
  )
547
316
  df = df.withColumnRenamed("value", "OBS_VALUE").withColumnsRenamed(
548
317
  {column: column.upper() for column in df.columns}
@@ -550,14 +319,14 @@ class SWSGoldIcebergSparkHelper:
550
319
  df.writeTo(self.iceberg_tables.GOLD_PRE_SDMX.iceberg_id).createOrReplace()
551
320
 
552
321
  logging.info(
553
- f"Gold SDMX table written to {self.iceberg_tables.GOLD_PRE_SDMX.iceberg_id}"
322
+ f"Gold pre-SDMX table written to {self.iceberg_tables.GOLD_PRE_SDMX.iceberg_id}"
554
323
  )
555
324
 
556
325
  self.spark.sql(
557
326
  f"ALTER TABLE {self.iceberg_tables.GOLD_PRE_SDMX.iceberg_id} CREATE OR REPLACE TAG `{self.tag_name}`"
558
327
  )
559
328
 
560
- logging.info(f"gold SDMX tag '{self.tag_name}' created")
329
+ logging.info(f"gold pre-SDMX tag '{self.tag_name}' created")
561
330
 
562
331
  df_1 = df.coalesce(1)
563
332
 
@@ -570,12 +339,59 @@ class SWSGoldIcebergSparkHelper:
570
339
 
571
340
  return df
572
341
 
573
- def gen_and_write_gold_sdmx_data_to_iceberg_and_csv(self) -> DataFrame:
574
- self.df_gold_sdmx = self.gen_gold_sdmx_data()
342
+ def write_gold_faostat_data_to_iceberg_and_csv(self, df: DataFrame) -> DataFrame:
343
+ """The expected input to this function is the output of the sws disseminated function"""
344
+ df.writeTo(self.iceberg_tables.GOLD_FAOSTAT.iceberg_id).createOrReplace()
575
345
 
576
- self.write_gold_sdmx_data_to_iceberg_and_csv(self.df_gold_sdmx)
346
+ logging.info(
347
+ f"Gold FAOSTAT table written to {self.iceberg_tables.GOLD_FAOSTAT.iceberg_id}"
348
+ )
349
+
350
+ self.spark.sql(
351
+ f"ALTER TABLE {self.iceberg_tables.GOLD_FAOSTAT.iceberg_id} CREATE OR REPLACE TAG `{self.tag_name}`"
352
+ )
353
+
354
+ logging.info(f"gold FAOSTAT tag '{self.tag_name}' created")
355
+
356
+ df_1 = df.coalesce(1)
577
357
 
578
- return self.df_gold_sdmx
358
+ save_cache_csv(
359
+ df=df_1,
360
+ bucket=self.bucket,
361
+ prefix=self.iceberg_tables.GOLD_FAOSTAT.csv_prefix,
362
+ tag_name=self.tag_name,
363
+ )
364
+
365
+ return df
366
+
367
+ def write_gold_faostat_unfiltered_data_to_iceberg_and_csv(
368
+ self, df: DataFrame
369
+ ) -> DataFrame:
370
+ """The expected input to this function is the output of the sws disseminated function"""
371
+ df.writeTo(
372
+ self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.iceberg_id
373
+ ).createOrReplace()
374
+
375
+ logging.info(
376
+ f"Gold FAOSTAT unfiltered table written to {self.iceberg_tables.GOLD_FAOSTAT.iceberg_id}"
377
+ )
378
+
379
+ self.spark.sql(
380
+ f"ALTER TABLE {self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.iceberg_id} CREATE OR REPLACE TAG `{self.tag_name}`"
381
+ )
382
+
383
+ logging.info(f"gold FAOSTAT unfiltered tag '{self.tag_name}' created")
384
+
385
+ df_1 = df.coalesce(1)
386
+
387
+ save_cache_csv(
388
+ df=df_1,
389
+ bucket=self.bucket,
390
+ prefix=self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.csv_prefix,
391
+ tag_name=self.tag_name,
392
+ )
393
+
394
+ return df
579
395
 
580
396
  def write_gold_sws_validated_sws_dissemination_tag(
581
397
  self, df: DataFrame, tags: Tags
@@ -587,7 +403,7 @@ class SWSGoldIcebergSparkHelper:
587
403
  new_iceberg_table = BaseDisseminatedTagTable(
588
404
  id=f"{self.domain_code.lower()}_gold_sws_validated_iceberg",
589
405
  name=f"{self.domain_code} gold SWS validated Iceberg",
590
- description="Gold table containing all the data unmapped and unfiltered in SWS compatible format",
406
+ description="Gold table containing all the unfiltered tag data, with code correction appplied, in SWS compatible format",
591
407
  layer=TableLayer.GOLD,
592
408
  private=True,
593
409
  type=TableType.ICEBERG,
@@ -608,7 +424,7 @@ class SWSGoldIcebergSparkHelper:
608
424
  new_diss_table = BaseDisseminatedTagTable(
609
425
  id=f"{self.domain_code.lower()}_gold_sws_validated_csv",
610
426
  name=f"{self.domain_code} gold SWS validated csv",
611
- description="Gold table containing all the data unmapped and unfiltered in SWS compatible format cached in csv",
427
+ description="Gold table containing all the unfiltered tag data, with code correction appplied, in SWS compatible format, cached in csv",
612
428
  layer=TableLayer.GOLD,
613
429
  private=True,
614
430
  type=TableType.CSV,
@@ -636,7 +452,7 @@ class SWSGoldIcebergSparkHelper:
636
452
  new_iceberg_table = BaseDisseminatedTagTable(
637
453
  id=f"{self.domain_code.lower()}_gold_sws_disseminated_iceberg",
638
454
  name=f"{self.domain_code} gold SWS disseminated Iceberg",
639
- description="Gold table containing all the data mapped and filtered in SWS compatible format",
455
+ description="Gold table containing only the filtered tag data, with code correction appplied, in SWS compatible format",
640
456
  layer=TableLayer.GOLD,
641
457
  private=True,
642
458
  type=TableType.ICEBERG,
@@ -657,7 +473,7 @@ class SWSGoldIcebergSparkHelper:
657
473
  new_diss_table = BaseDisseminatedTagTable(
658
474
  id=f"{self.domain_code.lower()}_gold_sws_disseminated_csv",
659
475
  name=f"{self.domain_code} gold SWS disseminated csv",
660
- description="Gold table containing all the data mapped and filtered in SWS compatible format format cached in csv",
476
+ description="Gold table containing only the filtered tag data, with code correction appplied, in SWS compatible format, cached in csv",
661
477
  layer=TableLayer.GOLD,
662
478
  private=True,
663
479
  type=TableType.CSV,
@@ -733,10 +549,11 @@ class SWSGoldIcebergSparkHelper:
733
549
 
734
550
  new_iceberg_table = BaseDisseminatedTagTable(
735
551
  id=f"{self.domain_code.lower()}_gold_pre_sdmx_iceberg",
736
- name=f"{self.domain_code} gold PRE SDMX Iceberg",
552
+ name=f"{self.domain_code} gold pre-SDMX Iceberg",
737
553
  description="Gold table containing all the cleaned data in SDMX compatible format, ready to be mapped using FMR",
738
554
  layer=TableLayer.GOLD,
739
555
  private=True,
556
+ debug=True,
740
557
  type=TableType.ICEBERG,
741
558
  database=IcebergDatabases.GOLD_DATABASE,
742
559
  table=self.iceberg_tables.GOLD_PRE_SDMX.table,
@@ -754,10 +571,11 @@ class SWSGoldIcebergSparkHelper:
754
571
 
755
572
  new_diss_table = BaseDisseminatedTagTable(
756
573
  id=f"{self.domain_code.lower()}_gold_pre_sdmx_csv",
757
- name=f"{self.domain_code} gold SDMX csv",
758
- description="Gold table containing all the cleaned data in SDMX compatible format cached in csv",
574
+ name=f"{self.domain_code} gold pre-SDMX csv",
575
+ description="Gold table containing all the cleaned data in SDMX compatible format, ready to be mapped using FMR and cached in csv",
759
576
  layer=TableLayer.GOLD,
760
577
  private=True,
578
+ debug=True,
761
579
  type=TableType.CSV,
762
580
  path=self.iceberg_tables.GOLD_PRE_SDMX.csv_path,
763
581
  structure={"columns": df.schema.jsonValue()["fields"]},
@@ -772,3 +590,148 @@ class SWSGoldIcebergSparkHelper:
772
590
  logging.debug(f"Tag with Added csv Table: {tag}")
773
591
 
774
592
  return df
593
+
594
+ def write_gold_sws_dissemination_tag(self, df: DataFrame, tags: Tags) -> DataFrame:
595
+ # Get or create a new tag
596
+ tag = get_or_create_tag(tags, self.dataset_id, self.tag_name, self.tag_name)
597
+ logging.debug(f"Tag: {tag}")
598
+
599
+ new_iceberg_table = BaseDisseminatedTagTable(
600
+ id=f"{self.domain_code.lower()}_gold_sws_iceberg",
601
+ name=f"{self.domain_code} gold SWS Iceberg",
602
+ description="Gold table containing the tag data without any processing",
603
+ layer=TableLayer.GOLD,
604
+ private=True,
605
+ type=TableType.ICEBERG,
606
+ database=IcebergDatabases.GOLD_DATABASE,
607
+ table=self.iceberg_tables.GOLD_SWS.table,
608
+ path=self.iceberg_tables.GOLD_SWS.path,
609
+ structure={"columns": df.schema.jsonValue()["fields"]},
610
+ )
611
+ tag = upsert_disseminated_table(
612
+ sws_tags=tags,
613
+ tag=tag,
614
+ dataset_id=self.dataset_id,
615
+ tag_name=self.tag_name,
616
+ table=new_iceberg_table,
617
+ )
618
+ logging.debug(f"Tag with Added Iceberg Table: {tag}")
619
+
620
+ new_diss_table = BaseDisseminatedTagTable(
621
+ id=f"{self.domain_code.lower()}_gold_sws_csv",
622
+ name=f"{self.domain_code} gold SWS csv",
623
+ description="Gold table containing the tag data without any processing cached in csv",
624
+ layer=TableLayer.GOLD,
625
+ private=True,
626
+ type=TableType.CSV,
627
+ path=self.iceberg_tables.GOLD_SWS.csv_path,
628
+ structure={"columns": df.schema.jsonValue()["fields"]},
629
+ )
630
+ tag = upsert_disseminated_table(
631
+ sws_tags=tags,
632
+ tag=tag,
633
+ dataset_id=self.dataset_id,
634
+ tag_name=self.tag_name,
635
+ table=new_diss_table,
636
+ )
637
+ logging.debug(f"Tag with Added csv Table: {tag}")
638
+
639
+ return df
640
+
641
+ def write_gold_faostat_dissemination_tag(
642
+ self, df: DataFrame, tags: Tags
643
+ ) -> DataFrame:
644
+ # Get or create a new tag
645
+ tag = get_or_create_tag(tags, self.dataset_id, self.tag_name, self.tag_name)
646
+ logging.debug(f"Tag: {tag}")
647
+
648
+ new_iceberg_table = BaseDisseminatedTagTable(
649
+ id=f"{self.domain_code.lower()}_gold_faostat_iceberg",
650
+ name=f"{self.domain_code} gold FAOSTAT Iceberg",
651
+ description="Gold table containing the tag data in FAOSTAT format",
652
+ layer=TableLayer.GOLD,
653
+ private=True,
654
+ type=TableType.ICEBERG,
655
+ database=IcebergDatabases.GOLD_DATABASE,
656
+ table=self.iceberg_tables.GOLD_FAOSTAT.table,
657
+ path=self.iceberg_tables.GOLD_FAOSTAT.path,
658
+ structure={"columns": df.schema.jsonValue()["fields"]},
659
+ )
660
+ tag = upsert_disseminated_table(
661
+ sws_tags=tags,
662
+ tag=tag,
663
+ dataset_id=self.dataset_id,
664
+ tag_name=self.tag_name,
665
+ table=new_iceberg_table,
666
+ )
667
+ logging.debug(f"Tag with Added Iceberg Table: {tag}")
668
+
669
+ new_diss_table = BaseDisseminatedTagTable(
670
+ id=f"{self.domain_code.lower()}_gold_faostat_csv",
671
+ name=f"{self.domain_code} gold FAOSTAT csv",
672
+ description="Gold table containing the tag data in FAOSTAT format in csv",
673
+ layer=TableLayer.GOLD,
674
+ private=True,
675
+ type=TableType.CSV,
676
+ path=self.iceberg_tables.GOLD_FAOSTAT.csv_path,
677
+ structure={"columns": df.schema.jsonValue()["fields"]},
678
+ )
679
+ tag = upsert_disseminated_table(
680
+ sws_tags=tags,
681
+ tag=tag,
682
+ dataset_id=self.dataset_id,
683
+ tag_name=self.tag_name,
684
+ table=new_diss_table,
685
+ )
686
+ logging.debug(f"Tag with Added csv Table: {tag}")
687
+
688
+ return df
689
+
690
+ def write_gold_faostat_unfiltered_dissemination_tag(
691
+ self, df: DataFrame, tags: Tags
692
+ ) -> DataFrame:
693
+ # Get or create a new tag
694
+ tag = get_or_create_tag(tags, self.dataset_id, self.tag_name, self.tag_name)
695
+ logging.debug(f"Tag: {tag}")
696
+
697
+ new_iceberg_table = BaseDisseminatedTagTable(
698
+ id=f"{self.domain_code.lower()}_gold_faostat_unfiltered_iceberg",
699
+ name=f"{self.domain_code} gold FAOSTAT unfiltered Iceberg",
700
+ description="Gold table containing all the tag data in FAOSTAT format",
701
+ layer=TableLayer.GOLD,
702
+ private=True,
703
+ type=TableType.ICEBERG,
704
+ database=IcebergDatabases.GOLD_DATABASE,
705
+ table=self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.table,
706
+ path=self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.path,
707
+ structure={"columns": df.schema.jsonValue()["fields"]},
708
+ )
709
+ tag = upsert_disseminated_table(
710
+ sws_tags=tags,
711
+ tag=tag,
712
+ dataset_id=self.dataset_id,
713
+ tag_name=self.tag_name,
714
+ table=new_iceberg_table,
715
+ )
716
+ logging.debug(f"Tag with Added Iceberg Table: {tag}")
717
+
718
+ new_diss_table = BaseDisseminatedTagTable(
719
+ id=f"{self.domain_code.lower()}_gold_faostat_unfiltered_csv",
720
+ name=f"{self.domain_code} gold FAOSTAT unfiltered csv",
721
+ description="Gold table containing the tag data in FAOSTAT format in csv",
722
+ layer=TableLayer.GOLD,
723
+ private=True,
724
+ type=TableType.CSV,
725
+ path=self.iceberg_tables.GOLD_FAOSTAT_UNFILTERED.csv_path,
726
+ structure={"columns": df.schema.jsonValue()["fields"]},
727
+ )
728
+ tag = upsert_disseminated_table(
729
+ sws_tags=tags,
730
+ tag=tag,
731
+ dataset_id=self.dataset_id,
732
+ tag_name=self.tag_name,
733
+ table=new_diss_table,
734
+ )
735
+ logging.debug(f"Tag with Added csv Table: {tag}")
736
+
737
+ return df