sws-spark-dissemination-helper 0.0.60__py3-none-any.whl → 0.0.171__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +380 -28
- sws_spark_dissemination_helper/SWSDatatablesExportHelper.py +0 -0
- sws_spark_dissemination_helper/SWSEasyIcebergSparkHelper.py +723 -0
- sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +331 -353
- sws_spark_dissemination_helper/SWSPostgresSparkReader.py +110 -31
- sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +138 -23
- sws_spark_dissemination_helper/__init__.py +1 -0
- sws_spark_dissemination_helper/constants.py +76 -24
- sws_spark_dissemination_helper/utils.py +133 -68
- {sws_spark_dissemination_helper-0.0.60.dist-info → sws_spark_dissemination_helper-0.0.171.dist-info}/METADATA +21 -17
- sws_spark_dissemination_helper-0.0.171.dist-info/RECORD +13 -0
- sws_spark_dissemination_helper-0.0.60.dist-info/RECORD +0 -11
- {sws_spark_dissemination_helper-0.0.60.dist-info → sws_spark_dissemination_helper-0.0.171.dist-info}/WHEEL +0 -0
- {sws_spark_dissemination_helper-0.0.60.dist-info → sws_spark_dissemination_helper-0.0.171.dist-info}/licenses/LICENSE +0 -0
|
@@ -94,25 +94,37 @@ class SWSPostgresSparkReader:
|
|
|
94
94
|
|
|
95
95
|
logging.info(f"{pg_table} read start")
|
|
96
96
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
97
|
+
if min_id is None or max_id is None:
|
|
98
|
+
df = (
|
|
99
|
+
self.spark.read.format("jdbc")
|
|
100
|
+
.option("customSchema", custom_schema)
|
|
101
|
+
.option("dbtable", pg_table)
|
|
102
|
+
.option("fetchsize", "1000")
|
|
103
|
+
.option("url", self.jdbc_url)
|
|
104
|
+
.option("user", self.jdbc_conn_properties["user"])
|
|
105
|
+
.option("password", self.jdbc_conn_properties["password"])
|
|
106
|
+
.option("driver", SPARK_POSTGRES_DRIVER)
|
|
107
|
+
.load()
|
|
108
|
+
)
|
|
109
|
+
else:
|
|
110
|
+
df = (
|
|
111
|
+
self.spark.read.format("jdbc")
|
|
112
|
+
.option("customSchema", custom_schema)
|
|
113
|
+
.option("dbtable", pg_table)
|
|
114
|
+
.option("partitionColumn", partition_column)
|
|
115
|
+
.option("lowerBound", min_id)
|
|
116
|
+
.option("upperBound", max_id)
|
|
117
|
+
.option("numPartitions", num_partitions)
|
|
118
|
+
.option("fetchsize", "1000")
|
|
119
|
+
.option("url", self.jdbc_url)
|
|
120
|
+
.option("user", self.jdbc_conn_properties["user"])
|
|
121
|
+
.option("password", self.jdbc_conn_properties["password"])
|
|
122
|
+
.option("driver", SPARK_POSTGRES_DRIVER)
|
|
123
|
+
.load()
|
|
124
|
+
# .repartition(1024, partition_column)
|
|
125
|
+
# .sortWithinPartitions(partition_column)
|
|
126
|
+
# .cache()
|
|
127
|
+
)
|
|
116
128
|
else:
|
|
117
129
|
df = (
|
|
118
130
|
self.spark.read.format("jdbc")
|
|
@@ -195,6 +207,7 @@ class SWSPostgresSparkReader:
|
|
|
195
207
|
(dataset_tables.OBSERVATION_COORDINATE, "id", 10),
|
|
196
208
|
(dataset_tables.METADATA, "id", 10),
|
|
197
209
|
(dataset_tables.METADATA_ELEMENT, "metadata", 10),
|
|
210
|
+
(dataset_tables.TAG_OBSERVATION, "tag", 10),
|
|
198
211
|
]
|
|
199
212
|
return self._import_tables(data_tables)
|
|
200
213
|
|
|
@@ -209,25 +222,30 @@ class SWSPostgresSparkReader:
|
|
|
209
222
|
dataset_tables.METADATA_ELEMENT_TYPE,
|
|
210
223
|
dataset_tables.LANGUAGE,
|
|
211
224
|
dataset_tables.UNIT_OF_MEASURE,
|
|
225
|
+
dataset_tables.DATASET,
|
|
212
226
|
*dataset_tables.CODELISTS,
|
|
213
227
|
]
|
|
228
|
+
logging.info(
|
|
229
|
+
f"Importing reference data tables: {[(table.postgres_id, table.iceberg_id) for table in reference_data_tables]}"
|
|
230
|
+
)
|
|
214
231
|
return self._import_tables(
|
|
215
232
|
[(table, None, 1) for table in reference_data_tables]
|
|
216
233
|
)
|
|
217
234
|
|
|
218
235
|
def import_operational_data_tables(
|
|
219
236
|
self, dataset_tables: DatasetTables
|
|
220
|
-
) -> DataFrame:
|
|
237
|
+
) -> List[DataFrame]:
|
|
221
238
|
# Define and import operational data table without partitioning
|
|
222
239
|
operational_data_tables = [
|
|
223
240
|
(dataset_tables.USER, None, 1),
|
|
241
|
+
(dataset_tables.TAG, None, 1),
|
|
224
242
|
]
|
|
225
|
-
return self._import_tables(operational_data_tables)
|
|
243
|
+
return self._import_tables(operational_data_tables)
|
|
226
244
|
|
|
227
245
|
def import_data_reference_data_operational_data(
|
|
228
246
|
self, dataset_tables: DatasetTables
|
|
229
247
|
) -> Tuple[
|
|
230
|
-
Tuple[DataFrame, DataFrame, DataFrame, DataFrame],
|
|
248
|
+
Tuple[DataFrame, DataFrame, DataFrame, DataFrame, DataFrame],
|
|
231
249
|
Tuple[
|
|
232
250
|
DataFrame,
|
|
233
251
|
DataFrame,
|
|
@@ -235,22 +253,23 @@ class SWSPostgresSparkReader:
|
|
|
235
253
|
DataFrame,
|
|
236
254
|
DataFrame,
|
|
237
255
|
DataFrame,
|
|
256
|
+
DataFrame,
|
|
238
257
|
List[DataFrame],
|
|
239
258
|
],
|
|
240
|
-
DataFrame,
|
|
259
|
+
Tuple[DataFrame, DataFrame],
|
|
241
260
|
]:
|
|
242
261
|
# Import and organize DataFrames into the desired output structure
|
|
243
262
|
data_dfs = self.import_data_tables(dataset_tables)
|
|
244
263
|
reference_data_dfs = self.import_reference_data_tables(dataset_tables)
|
|
245
|
-
|
|
264
|
+
operational_data_dfs = self.import_operational_data_tables(dataset_tables)
|
|
246
265
|
|
|
247
266
|
return (
|
|
248
267
|
tuple(data_dfs),
|
|
249
268
|
(
|
|
250
|
-
*reference_data_dfs[:
|
|
251
|
-
reference_data_dfs[
|
|
269
|
+
*reference_data_dfs[:7],
|
|
270
|
+
reference_data_dfs[7:],
|
|
252
271
|
),
|
|
253
|
-
|
|
272
|
+
tuple(operational_data_dfs),
|
|
254
273
|
)
|
|
255
274
|
|
|
256
275
|
def get_codelist_type_mapping(
|
|
@@ -291,13 +310,73 @@ class SWSPostgresSparkReader:
|
|
|
291
310
|
self,
|
|
292
311
|
domain_code: str,
|
|
293
312
|
) -> DataFrame:
|
|
294
|
-
|
|
313
|
+
df = self.read_pg_table(
|
|
295
314
|
pg_table=DatasetDatatables.MAPPING_CODE_CORRECTION.id,
|
|
296
|
-
table_name=DatasetDatatables.MAPPING_CODE_CORRECTION.name,
|
|
297
315
|
custom_schema=DatasetDatatables.MAPPING_CODE_CORRECTION.schema,
|
|
298
|
-
domain_code=domain_code,
|
|
299
|
-
unique_columns=["old_code"],
|
|
300
316
|
)
|
|
317
|
+
df.filter(
|
|
318
|
+
col("mapping_type").isNull() | (col("mapping_type") == lit(""))
|
|
319
|
+
).transform(
|
|
320
|
+
correct_domain_filter, domain=domain_code, unique_columns=["old_code"]
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
return df
|
|
324
|
+
|
|
325
|
+
def get_domain_code_source_datasets_ids_dest_dataset_id(
|
|
326
|
+
self, dataset_id: str, domain_code: str = None
|
|
327
|
+
) -> Tuple[str, List[str], str]:
|
|
328
|
+
mapping_domains_id_df = self.read_pg_table(
|
|
329
|
+
pg_table=DatasetDatatables.MAPPING_DOMAINS_ID.id,
|
|
330
|
+
custom_schema=DatasetDatatables.MAPPING_DOMAINS_ID.schema,
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
if domain_code is None:
|
|
334
|
+
domain_code_df = mapping_domains_id_df.filter(
|
|
335
|
+
col("sws_source_id") == lit(dataset_id)
|
|
336
|
+
).select("domain")
|
|
337
|
+
|
|
338
|
+
if domain_code_df.count() == 0:
|
|
339
|
+
raise ValueError(
|
|
340
|
+
f'There is no row connecting the current source dataset id ({dataset_id}) to any domain in the table "{DatasetDatatables.MAPPING_DOMAINS_ID.name}"'
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
if domain_code_df.count() > 1:
|
|
344
|
+
raise ValueError(
|
|
345
|
+
f'There is more than one domain referencing the current source dataset id ({dataset_id}) in the table "{DatasetDatatables.MAPPING_DOMAINS_ID.name}", please specify the domain code you want to process in the parameters'
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
domain_code = domain_code_df.collect()[0][0]
|
|
349
|
+
|
|
350
|
+
source_datasets_ids = [
|
|
351
|
+
row[0]
|
|
352
|
+
for row in (
|
|
353
|
+
mapping_domains_id_df.filter(col("domain") == lit(domain_code))
|
|
354
|
+
.select("sws_source_id")
|
|
355
|
+
.collect()
|
|
356
|
+
)
|
|
357
|
+
]
|
|
358
|
+
dest_datasets_id_df = (
|
|
359
|
+
mapping_domains_id_df.filter(col("domain") == lit(domain_code))
|
|
360
|
+
.select("sws_destination_id")
|
|
361
|
+
.distinct()
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
if dest_datasets_id_df.count() == 0:
|
|
365
|
+
raise ValueError(
|
|
366
|
+
f'There is no row connecting the current source dataset id and domain pair ({dataset_id}, {domain_code}) to any destination dataset id in the table "{DatasetDatatables.MAPPING_DOMAINS_ID.name}"'
|
|
367
|
+
)
|
|
368
|
+
if dest_datasets_id_df.count() > 1:
|
|
369
|
+
raise ValueError(
|
|
370
|
+
f'The source dataset id and domain pair ({dataset_id}, {domain_code}) must point only to one destination dataset in the table "{DatasetDatatables.MAPPING_DOMAINS_ID.name}"'
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
dest_datasets_id = dest_datasets_id_df.collect()[0][0]
|
|
374
|
+
|
|
375
|
+
logging.info(f"domain code: {domain_code}")
|
|
376
|
+
logging.info(f"source datasets ids: {source_datasets_ids}")
|
|
377
|
+
logging.info(f"dest datasets ids: {dest_datasets_id}")
|
|
378
|
+
|
|
379
|
+
return (domain_code, source_datasets_ids, dest_datasets_id)
|
|
301
380
|
|
|
302
381
|
def get_dest_dataset_id(self, domain_code: str, dataset_id: str) -> Tuple[str, str]:
|
|
303
382
|
|
|
@@ -10,11 +10,11 @@ from pyspark.sql.window import Window
|
|
|
10
10
|
from sws_api_client import Tags
|
|
11
11
|
from sws_api_client.tags import BaseDisseminatedTagTable, TableLayer, TableType
|
|
12
12
|
|
|
13
|
-
from .constants import IcebergDatabases, IcebergTables
|
|
13
|
+
from .constants import IcebergDatabases, IcebergTables, DatasetDatatables
|
|
14
14
|
from .SWSPostgresSparkReader import SWSPostgresSparkReader
|
|
15
15
|
from .utils import (
|
|
16
16
|
get_or_create_tag,
|
|
17
|
-
|
|
17
|
+
map_codes_and_remove_null_duplicates,
|
|
18
18
|
save_cache_csv,
|
|
19
19
|
upsert_disseminated_table,
|
|
20
20
|
)
|
|
@@ -111,6 +111,11 @@ class SWSSilverIcebergSparkHelper:
|
|
|
111
111
|
self.iceberg_tables.BRONZE.iceberg_id
|
|
112
112
|
)
|
|
113
113
|
|
|
114
|
+
def read_bronze_diss_tag_data(self) -> DataFrame:
|
|
115
|
+
return self.spark.read.option("tag", self.tag_name).table(
|
|
116
|
+
self.iceberg_tables.BRONZE_DISS_TAG.iceberg_id
|
|
117
|
+
)
|
|
118
|
+
|
|
114
119
|
def _get_dim_time_flag_columns(self) -> Tuple[List[str], List[str], str, List[str]]:
|
|
115
120
|
"""Extract the dimension columns with time, without time, the time column and the flag columns names."""
|
|
116
121
|
dim_columns_w_time = [
|
|
@@ -158,6 +163,99 @@ class SWSSilverIcebergSparkHelper:
|
|
|
158
163
|
|
|
159
164
|
logging.info(f"Checking time validity for {col_name} of type {col_type}")
|
|
160
165
|
|
|
166
|
+
if col_type == "area":
|
|
167
|
+
logging.info(
|
|
168
|
+
f'Changing start and end year according to "{DatasetDatatables.MAPPING_CODE_CORRECTION.name}"'
|
|
169
|
+
)
|
|
170
|
+
df_start_year_correction = self.df_mapping_code_correction.filter(
|
|
171
|
+
col("var_type") == lit("start_year")
|
|
172
|
+
)
|
|
173
|
+
df_end_year_correction = self.df_mapping_code_correction.filter(
|
|
174
|
+
col("var_type") == lit("end_year")
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
original_col_order = df.columns
|
|
178
|
+
cols_to_select = df.columns
|
|
179
|
+
col_name_lower = col_name.lower()
|
|
180
|
+
cols_to_select = [
|
|
181
|
+
column
|
|
182
|
+
for column in cols_to_select
|
|
183
|
+
if column.lower()
|
|
184
|
+
not in (
|
|
185
|
+
"note",
|
|
186
|
+
f"{col_name_lower}_start_date",
|
|
187
|
+
f"{col_name_lower}_end_date",
|
|
188
|
+
)
|
|
189
|
+
]
|
|
190
|
+
|
|
191
|
+
df = (
|
|
192
|
+
df.alias("d")
|
|
193
|
+
.join(
|
|
194
|
+
F.broadcast(df_start_year_correction).alias("sy"),
|
|
195
|
+
on=col(f"d.{col_name}") == col("sy.mapping_type"),
|
|
196
|
+
how="left",
|
|
197
|
+
)
|
|
198
|
+
.join(
|
|
199
|
+
F.broadcast(df_end_year_correction).alias("ey"),
|
|
200
|
+
on=col(f"d.{col_name}") == col("ey.mapping_type"),
|
|
201
|
+
how="left",
|
|
202
|
+
)
|
|
203
|
+
.withColumn("valid_new_start_year", col("sy.new_code").isNotNull())
|
|
204
|
+
.withColumn("valid_new_end_year", col("ey.new_code").isNotNull())
|
|
205
|
+
.withColumn(
|
|
206
|
+
"new_note",
|
|
207
|
+
F.when(
|
|
208
|
+
col("valid_new_start_year"),
|
|
209
|
+
F.array_append(
|
|
210
|
+
col("d.note"),
|
|
211
|
+
F.concat(
|
|
212
|
+
col("sy.note"),
|
|
213
|
+
lit(" from "),
|
|
214
|
+
col("sy.old_code"),
|
|
215
|
+
lit(" to "),
|
|
216
|
+
col("sy.new_code"),
|
|
217
|
+
),
|
|
218
|
+
),
|
|
219
|
+
).otherwise(col("d.note")),
|
|
220
|
+
)
|
|
221
|
+
.withColumn(
|
|
222
|
+
"new_note",
|
|
223
|
+
F.when(
|
|
224
|
+
col("valid_new_end_year"),
|
|
225
|
+
F.array_append(
|
|
226
|
+
col("new_note"),
|
|
227
|
+
F.concat(
|
|
228
|
+
col("ey.note"),
|
|
229
|
+
lit(" from "),
|
|
230
|
+
col("ey.old_code"),
|
|
231
|
+
lit(" to "),
|
|
232
|
+
col("ey.new_code"),
|
|
233
|
+
),
|
|
234
|
+
),
|
|
235
|
+
).otherwise(col("new_note")),
|
|
236
|
+
)
|
|
237
|
+
.withColumn(
|
|
238
|
+
f"new_{col_name}_start_date",
|
|
239
|
+
F.when(
|
|
240
|
+
col("valid_new_start_year"), F.to_date(col("sy.new_code"))
|
|
241
|
+
).otherwise(col(f"d.{col_name}_start_date")),
|
|
242
|
+
)
|
|
243
|
+
.withColumn(
|
|
244
|
+
f"new_{col_name}_end_date",
|
|
245
|
+
F.when(
|
|
246
|
+
col("valid_new_end_year"),
|
|
247
|
+
F.to_date(F.concat(col("ey.new_code"), lit("-12-31"))),
|
|
248
|
+
).otherwise(col(f"d.{col_name}_end_date")),
|
|
249
|
+
)
|
|
250
|
+
.select(
|
|
251
|
+
*cols_to_select,
|
|
252
|
+
col("new_note").alias("note"),
|
|
253
|
+
col(f"new_{col_name}_start_date").alias(f"{col_name}_start_date"),
|
|
254
|
+
col(f"new_{col_name}_end_date").alias(f"{col_name}_end_date"),
|
|
255
|
+
)
|
|
256
|
+
.select(*original_col_order)
|
|
257
|
+
)
|
|
258
|
+
|
|
161
259
|
# Iterate through columns and build conditions dynamically
|
|
162
260
|
start_date_condition = col(f"{col_name}_start_date").isNull() | (
|
|
163
261
|
col(f"{col_name}_start_date") <= col(f"{self.time_column}_start_date")
|
|
@@ -204,7 +302,7 @@ class SWSSilverIcebergSparkHelper:
|
|
|
204
302
|
) -> DataFrame:
|
|
205
303
|
logging.info(f"Correcting codes for column {col_name} of type {col_type}")
|
|
206
304
|
|
|
207
|
-
return
|
|
305
|
+
return map_codes_and_remove_null_duplicates(
|
|
208
306
|
df,
|
|
209
307
|
self.df_mapping_code_correction,
|
|
210
308
|
self.domain_code,
|
|
@@ -212,6 +310,12 @@ class SWSSilverIcebergSparkHelper:
|
|
|
212
310
|
col_type,
|
|
213
311
|
src_column="old_code",
|
|
214
312
|
dest_column="new_code",
|
|
313
|
+
dimension_columns=[
|
|
314
|
+
column
|
|
315
|
+
for column in self.dim_columns_w_time
|
|
316
|
+
if column not in self.flag_columns
|
|
317
|
+
],
|
|
318
|
+
flag_columns=self.flag_columns,
|
|
215
319
|
)
|
|
216
320
|
|
|
217
321
|
def apply_code_correction(self, df: DataFrame) -> DataFrame:
|
|
@@ -434,7 +538,9 @@ class SWSSilverIcebergSparkHelper:
|
|
|
434
538
|
|
|
435
539
|
return df
|
|
436
540
|
|
|
437
|
-
def
|
|
541
|
+
def check_duplicates(
|
|
542
|
+
self, df: DataFrame, partition_columns: List[str] = None
|
|
543
|
+
) -> DataFrame:
|
|
438
544
|
"""
|
|
439
545
|
Removes rows from the DataFrame where the combination of specified dimension columns
|
|
440
546
|
(e.g., 'area', 'element', 'product') is duplicated, and the 'value' column is null.
|
|
@@ -468,31 +574,33 @@ class SWSSilverIcebergSparkHelper:
|
|
|
468
574
|
- The intermediate 'count' column is dropped from the resulting DataFrame.
|
|
469
575
|
"""
|
|
470
576
|
# Step 1: Define a window specification based on area, element, and product
|
|
471
|
-
window_spec = Window.partitionBy(
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
577
|
+
window_spec = Window.partitionBy(
|
|
578
|
+
*(
|
|
579
|
+
partition_columns
|
|
580
|
+
or [
|
|
581
|
+
col
|
|
582
|
+
for col in self.dim_columns_w_time
|
|
583
|
+
if col not in self.flag_columns
|
|
584
|
+
]
|
|
585
|
+
)
|
|
477
586
|
)
|
|
478
587
|
|
|
479
|
-
# Step
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
warning_rows = [row[0] for row in df_warning.select("id").collect()]
|
|
486
|
-
blocking_warning_rows = df_warning.filter(col("diss_flag")).collect()
|
|
487
|
-
logging.warning(
|
|
488
|
-
f"The rows with the following ids are duplicates but they are not disseminated: {str(warning_rows)}"
|
|
588
|
+
# Step 2: Count the occurrences of each combination of dimensions that is disseminated
|
|
589
|
+
df_duplicates = (
|
|
590
|
+
df.filter(col("diss_flag"))
|
|
591
|
+
.withColumn("count", F.count(lit(1)).over(window_spec))
|
|
592
|
+
.filter(col("count") > lit(1))
|
|
593
|
+
.drop("count")
|
|
489
594
|
)
|
|
490
|
-
if
|
|
595
|
+
if df_duplicates.count() > 0:
|
|
596
|
+
df_duplicates.writeTo(
|
|
597
|
+
f"{self.iceberg_tables.SILVER.iceberg_id}_duplicates"
|
|
598
|
+
).createOrReplace()
|
|
491
599
|
raise RuntimeError(
|
|
492
|
-
f"There are some duplicates in the data that are flagged for dissemination
|
|
600
|
+
f"There are some duplicates in the data that are flagged for dissemination they can be checked in the {self.iceberg_tables.SILVER.iceberg_id}_duplicates table"
|
|
493
601
|
)
|
|
494
602
|
|
|
495
|
-
return
|
|
603
|
+
return df
|
|
496
604
|
|
|
497
605
|
def write_silver_data_to_iceberg_and_csv(self, df: DataFrame) -> DataFrame:
|
|
498
606
|
|
|
@@ -537,6 +645,13 @@ class SWSSilverIcebergSparkHelper:
|
|
|
537
645
|
table=self.iceberg_tables.SILVER.table,
|
|
538
646
|
path=self.iceberg_tables.SILVER.path,
|
|
539
647
|
structure={"columns": df.schema.jsonValue()["fields"]},
|
|
648
|
+
pinned_columns=[
|
|
649
|
+
*self.dim_columns_w_time,
|
|
650
|
+
"value",
|
|
651
|
+
*self.flag_columns,
|
|
652
|
+
"diss_flag",
|
|
653
|
+
"note",
|
|
654
|
+
],
|
|
540
655
|
)
|
|
541
656
|
tag = upsert_disseminated_table(
|
|
542
657
|
sws_tags=tags,
|
|
@@ -2,3 +2,4 @@ from .SWSPostgresSparkReader import SWSPostgresSparkReader
|
|
|
2
2
|
from .SWSBronzeIcebergSparkHelper import SWSBronzeIcebergSparkHelper
|
|
3
3
|
from .SWSSilverIcebergSparkHelper import SWSSilverIcebergSparkHelper
|
|
4
4
|
from .SWSGoldIcebergSparkHelper import SWSGoldIcebergSparkHelper
|
|
5
|
+
from .SWSEasyIcebergSparkHelper import SWSEasyIcebergSparkHelper
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
|
|
1
3
|
from pyspark.sql.functions import col, lit
|
|
2
4
|
|
|
3
5
|
SPARK_POSTGRES_DRIVER = "org.postgresql.Driver"
|
|
@@ -34,26 +36,57 @@ class DomainFilters:
|
|
|
34
36
|
class DatasetDatatables:
|
|
35
37
|
|
|
36
38
|
class __SWSDatatable:
|
|
37
|
-
def __init__(
|
|
39
|
+
def __init__(
|
|
40
|
+
self, id: str, name: str, schema: str, join_columns: List[str] = []
|
|
41
|
+
):
|
|
38
42
|
self.id = id
|
|
43
|
+
self.iceberg_id = f"{IcebergDatabases.BRONZE_DATABASE}.{id.split('.')[1]}"
|
|
39
44
|
self.name = name
|
|
40
45
|
self.schema = schema
|
|
46
|
+
self.join_columns = join_columns
|
|
47
|
+
|
|
48
|
+
# Aggregation Tables
|
|
49
|
+
AGGREGATES_COMPOSITION = __SWSDatatable(
|
|
50
|
+
id="datatables.aggregates_composition",
|
|
51
|
+
name="Aggregation - Composition",
|
|
52
|
+
schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, aggregation_type STRING, group_code STRING, child_code STRING, group_name STRING, child_name STRING, link_code STRING, factor STRING",
|
|
53
|
+
)
|
|
54
|
+
AGGREGATES_ELEMENTS = __SWSDatatable(
|
|
55
|
+
id="datatables.aggregates_elements",
|
|
56
|
+
name="Aggregation - Aggregates per elements",
|
|
57
|
+
schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, element STRING, aggregation_type STRING, code STRING",
|
|
58
|
+
)
|
|
41
59
|
|
|
42
60
|
# Dissemination Tables
|
|
43
61
|
DISSEMINATION_TYPE_LIST = __SWSDatatable(
|
|
44
62
|
id="datatables.dissemination_{type}_list",
|
|
45
63
|
name="Dissemination - {type} list",
|
|
46
64
|
schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, code STRING, name STRING, aggregation_type STRING, dissemination BOOLEAN, aggregation BOOLEAN",
|
|
65
|
+
join_columns=["domain", "code"],
|
|
47
66
|
)
|
|
48
67
|
DISSEMINATION_EXCEPTIONS = __SWSDatatable(
|
|
49
68
|
id="datatables.dissemination_exception",
|
|
50
69
|
name="Dissemination - Exceptions",
|
|
51
70
|
schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, dim1_code STRING, dim2_code STRING, dim3_code STRING, dim4_code STRING, dim5_code STRING, dim6_code STRING, dim7_code STRING, status_flag STRING, method_flag STRING, dissemination BOOLEAN, aggregation BOOLEAN, note STRING",
|
|
71
|
+
join_columns=[
|
|
72
|
+
"domain",
|
|
73
|
+
" dim1_code",
|
|
74
|
+
" dim2_code",
|
|
75
|
+
" dim3_code",
|
|
76
|
+
" dim4_code",
|
|
77
|
+
" dim5_code",
|
|
78
|
+
" dim6_code",
|
|
79
|
+
" dim7_code",
|
|
80
|
+
" status_flag",
|
|
81
|
+
" method_flag",
|
|
82
|
+
],
|
|
52
83
|
)
|
|
84
|
+
# TODO Deprecate
|
|
53
85
|
DISSEMINATION_ITEM_LIST_FAOSTAT = __SWSDatatable(
|
|
54
86
|
id="datatables.dissemination_item_list_faostat",
|
|
55
87
|
name="Dissemination - Item list - FAOSTAT",
|
|
56
88
|
schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, code STRING, name STRING, aggregation_type STRING, dissemination BOOLEAN, aggregation BOOLEAN",
|
|
89
|
+
join_columns=["domain", "code"],
|
|
57
90
|
)
|
|
58
91
|
|
|
59
92
|
# Mapping Tables
|
|
@@ -61,34 +94,23 @@ class DatasetDatatables:
|
|
|
61
94
|
id="datatables.aggregates_mapping_domains_id",
|
|
62
95
|
name="Mapping - Domains ID",
|
|
63
96
|
schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, domain_name STRING, sws_source_id STRING, sws_destination_id STRING",
|
|
97
|
+
join_columns=["domain", "sws_source_id"],
|
|
64
98
|
)
|
|
65
99
|
MAPPING_CODELIST_TYPE = __SWSDatatable(
|
|
66
100
|
id="datatables.mapping_codelist_type",
|
|
67
101
|
name="Mapping Codelist type",
|
|
68
102
|
schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, col_name STRING, col_type STRING",
|
|
103
|
+
join_columns=["domain", "col_name"],
|
|
69
104
|
)
|
|
70
105
|
MAPPING_CODE_CORRECTION = __SWSDatatable(
|
|
71
106
|
id="datatables.aggregates_mapping_code_correction",
|
|
72
107
|
name="Mapping - Code correction",
|
|
73
108
|
schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, old_code STRING, new_code STRING, var_type STRING, delete BOOLEAN, multiplier FLOAT, mapping_type STRING",
|
|
74
|
-
|
|
75
|
-
MAPPING_SDMX_COLUMN_NAMES = __SWSDatatable(
|
|
76
|
-
id="datatables.mapping_sdmx_col_names",
|
|
77
|
-
name="Mapping - SDMX column names",
|
|
78
|
-
schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, internal_name STRING, external_name STRING, delete BOOLEAN, add BOOLEAN, default_value STRING",
|
|
79
|
-
)
|
|
80
|
-
MAPPING_SDMX_CODES = __SWSDatatable(
|
|
81
|
-
id="datatables.mapping_pre_dissemination",
|
|
82
|
-
name="Mapping - Pre dissemination",
|
|
83
|
-
schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, internal_code STRING, external_code STRING, var_type STRING, delete BOOLEAN, multiplier FLOAT, mapping_type STRING",
|
|
84
|
-
)
|
|
85
|
-
MAPPING_UNITS_OF_MEASURE = __SWSDatatable(
|
|
86
|
-
id="datatables.mapping_units_of_measure",
|
|
87
|
-
name="Mapping - Units of measure",
|
|
88
|
-
schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, sws_code STRING, sws_multiplier INT, sdmx_code STRING, sdmx_multiplier INT, value_multiplier INT, delete BOOLEAN, mapping_type STRING",
|
|
109
|
+
join_columns=["domain", "old_code", "var_type", "mapping_type"],
|
|
89
110
|
)
|
|
90
111
|
|
|
91
112
|
# Non-SWS Sources Tables
|
|
113
|
+
# TODO To deprecate
|
|
92
114
|
FAOSTAT_CODE_MAPPING = __SWSDatatable(
|
|
93
115
|
id="datatables.faostat_code_mapping",
|
|
94
116
|
name="FAOSTAT Code Mapping",
|
|
@@ -150,6 +172,11 @@ class DatasetTables:
|
|
|
150
172
|
iceberg_id=f"{IcebergDatabases.STAGING_DATABASE}.{self.__dataset_id}_metadata_element",
|
|
151
173
|
schema="id BIGINT, metadata INT, metadata_element_type INT, value STRING",
|
|
152
174
|
)
|
|
175
|
+
self.TAG_OBSERVATION = self.__SWSTable(
|
|
176
|
+
postgres_id=f"{self.__dataset_id}.tag_observation",
|
|
177
|
+
iceberg_id=f"{IcebergDatabases.STAGING_DATABASE}.{self.__dataset_id}_tag_observation",
|
|
178
|
+
schema="tag BIGINT, observation INT",
|
|
179
|
+
)
|
|
153
180
|
|
|
154
181
|
# Reference data
|
|
155
182
|
self.CODELISTS = [
|
|
@@ -181,18 +208,21 @@ class DatasetTables:
|
|
|
181
208
|
iceberg_id=f"{IcebergDatabases.STAGING_DATABASE}.metadata_element_type",
|
|
182
209
|
schema="id INT, metadata_type INT, code STRING, description STRING, mandatory BOOLEAN, repeatable BOOLEAN, private BOOLEAN",
|
|
183
210
|
)
|
|
184
|
-
|
|
185
211
|
LANGUAGE = __SWSTable(
|
|
186
212
|
postgres_id="reference_data.language",
|
|
187
213
|
iceberg_id=f"{IcebergDatabases.STAGING_DATABASE}.language",
|
|
188
214
|
schema="id INT, country_code STRING, description STRING",
|
|
189
215
|
)
|
|
190
|
-
|
|
191
216
|
UNIT_OF_MEASURE = __SWSTable(
|
|
192
217
|
postgres_id="reference_data.unit_of_measure",
|
|
193
218
|
iceberg_id=f"{IcebergDatabases.STAGING_DATABASE}.unit_of_measure",
|
|
194
219
|
schema="id INT, code STRING, sdmx_code STRING, metric BOOLEAN, description STRING, symbol STRING, base_unit STRING, multiplier DECIMAL",
|
|
195
220
|
)
|
|
221
|
+
DATASET = __SWSTable(
|
|
222
|
+
postgres_id="reference_data.dataset",
|
|
223
|
+
iceberg_id=f"{IcebergDatabases.STAGING_DATABASE}.dataset",
|
|
224
|
+
schema="id INT, xml_name STRING",
|
|
225
|
+
)
|
|
196
226
|
|
|
197
227
|
# Operational data
|
|
198
228
|
USER = __SWSTable(
|
|
@@ -200,6 +230,11 @@ class DatasetTables:
|
|
|
200
230
|
iceberg_id=f"{IcebergDatabases.STAGING_DATABASE}.user",
|
|
201
231
|
schema="id INT, username STRING, preferences INT, email STRING, active BOOLEAN, settings STRING",
|
|
202
232
|
)
|
|
233
|
+
TAG = __SWSTable(
|
|
234
|
+
postgres_id="operational_data.tag",
|
|
235
|
+
iceberg_id=f"{IcebergDatabases.STAGING_DATABASE}.tag",
|
|
236
|
+
schema="id INT, name STRING, reference_date DATE, dataset INT, type STRING, released_ON DATE, released_by INT, properties STRING",
|
|
237
|
+
)
|
|
203
238
|
|
|
204
239
|
|
|
205
240
|
class IcebergTable:
|
|
@@ -218,21 +253,38 @@ class IcebergTables:
|
|
|
218
253
|
self.__dataset_id = dataset_id
|
|
219
254
|
self.__tag_name = tag_name
|
|
220
255
|
|
|
221
|
-
|
|
222
|
-
self.
|
|
256
|
+
# TODO Fix later with a more appropriate DATABASE
|
|
257
|
+
self.DENORMALIZED_OBSERVATION = self.create_iceberg_table("BRONZE", suffix="denormalized_observation")
|
|
258
|
+
self.DENORMALIZED_METADATA = self.create_iceberg_table("BRONZE", suffix="denormalized_metadata")
|
|
259
|
+
self.GROUPED_METADATA = self.create_iceberg_table("BRONZE", suffix="grouped_metadata")
|
|
260
|
+
self.TABLE = self.create_iceberg_table("BRONZE")
|
|
261
|
+
self.TABLE_FILTERED = self.create_iceberg_table("BRONZE", suffix="filtered")
|
|
262
|
+
self.BRONZE = self.create_iceberg_table("BRONZE")
|
|
263
|
+
self.BRONZE_DISS_TAG = self.create_iceberg_table("BRONZE", suffix="diss_tag")
|
|
264
|
+
self.SILVER = self.create_iceberg_table("SILVER", prefix=domain)
|
|
223
265
|
|
|
224
266
|
# GOLD tables with specific suffixes
|
|
225
|
-
self.
|
|
267
|
+
self.GOLD_SWS = self.create_iceberg_table("GOLD", prefix=domain, suffix="sws")
|
|
268
|
+
self.GOLD_SDMX = self.create_iceberg_table(
|
|
226
269
|
"GOLD", prefix=domain, suffix="sdmx_disseminated"
|
|
227
270
|
)
|
|
228
|
-
self.GOLD_SWS_VALIDATED = self.
|
|
271
|
+
self.GOLD_SWS_VALIDATED = self.create_iceberg_table(
|
|
229
272
|
"GOLD", prefix=domain, suffix="sws_validated"
|
|
230
273
|
)
|
|
231
|
-
self.GOLD_SWS_DISSEMINATED = self.
|
|
274
|
+
self.GOLD_SWS_DISSEMINATED = self.create_iceberg_table(
|
|
232
275
|
"GOLD", prefix=domain, suffix="sws_disseminated"
|
|
233
276
|
)
|
|
277
|
+
self.GOLD_PRE_SDMX = self.create_iceberg_table(
|
|
278
|
+
"GOLD", prefix=domain, suffix="pre_sdmx"
|
|
279
|
+
)
|
|
280
|
+
self.GOLD_FAOSTAT = self.create_iceberg_table(
|
|
281
|
+
"GOLD", prefix=domain, suffix="faostat"
|
|
282
|
+
)
|
|
283
|
+
self.GOLD_FAOSTAT_UNFILTERED = self.create_iceberg_table(
|
|
284
|
+
"GOLD", prefix=domain, suffix="faostat_unfiltered"
|
|
285
|
+
)
|
|
234
286
|
|
|
235
|
-
def
|
|
287
|
+
def create_iceberg_table(
|
|
236
288
|
self, level: str, prefix: str = "", suffix: str = ""
|
|
237
289
|
) -> IcebergTable:
|
|
238
290
|
database = getattr(IcebergDatabases, f"{level}_DATABASE")
|