sws-spark-dissemination-helper 0.0.159__tar.gz → 0.0.161__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sws-spark-dissemination-helper might be problematic. Click here for more details.
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/PKG-INFO +1 -1
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/pyproject.toml +1 -1
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/src/sws_spark_dissemination_helper/SWSEasyIcebergSparkHelper.py +77 -29
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/src/sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +51 -2
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/src/sws_spark_dissemination_helper/constants.py +9 -0
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/.gitignore +0 -0
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/LICENSE +0 -0
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/README.md +0 -0
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/src/sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +0 -0
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/src/sws_spark_dissemination_helper/SWSDatatablesExportHelper.py +0 -0
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/src/sws_spark_dissemination_helper/SWSPostgresSparkReader.py +0 -0
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/src/sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +0 -0
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/src/sws_spark_dissemination_helper/__init__.py +0 -0
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/src/sws_spark_dissemination_helper/utils.py +0 -0
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/tests/__init__.py +0 -0
- {sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/tests/test.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sws-spark-dissemination-helper
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.161
|
|
4
4
|
Summary: A Python helper package providing streamlined Spark functions for efficient data dissemination processes
|
|
5
5
|
Project-URL: Repository, https://github.com/un-fao/fao-sws-it-python-spark-dissemination-helper
|
|
6
6
|
Author-email: Daniele Mansillo <danielemansillo@gmail.com>
|
|
@@ -276,11 +276,17 @@ class SWSEasyIcebergSparkHelper:
|
|
|
276
276
|
if not self.keep_history:
|
|
277
277
|
final_query += "\nWHERE o.replaced_on IS NULL"
|
|
278
278
|
|
|
279
|
-
logging.info("Final query for merging observation and
|
|
279
|
+
logging.info("Final query for merging observation and observation_coordinates")
|
|
280
280
|
logging.info(final_query)
|
|
281
281
|
|
|
282
282
|
df_obs_denorm = self.spark.sql(final_query)
|
|
283
283
|
|
|
284
|
+
df_obs_denorm.writeTo(
|
|
285
|
+
self.iceberg_tables.DENORMALIZED_OBSERVATION.iceberg_id
|
|
286
|
+
).createOrReplace()
|
|
287
|
+
|
|
288
|
+
logging.info(f"{self.iceberg_tables.DENORMALIZED_OBSERVATION.table} write")
|
|
289
|
+
|
|
284
290
|
return df_obs_denorm
|
|
285
291
|
|
|
286
292
|
def _gen_denormalized_observation_sql_from_tag(self) -> DataFrame:
|
|
@@ -418,7 +424,13 @@ class SWSEasyIcebergSparkHelper:
|
|
|
418
424
|
|
|
419
425
|
df_meta_denorm = self.spark.sql(
|
|
420
426
|
f"""
|
|
421
|
-
select
|
|
427
|
+
select
|
|
428
|
+
/*+
|
|
429
|
+
BROADCAST({self.dataset_tables.METADATA_ELEMENT_TYPE.iceberg_id}),
|
|
430
|
+
BROADCAST({self.dataset_tables.METADATA_TYPE.iceberg_id}),
|
|
431
|
+
BROADCAST({self.dataset_tables.LANGUAGE.iceberg_id}),
|
|
432
|
+
*/
|
|
433
|
+
m.observation as observation_id,
|
|
422
434
|
mt.code as type,
|
|
423
435
|
met.code as element_type,
|
|
424
436
|
l.country_code as language,
|
|
@@ -431,7 +443,11 @@ class SWSEasyIcebergSparkHelper:
|
|
|
431
443
|
"""
|
|
432
444
|
)
|
|
433
445
|
|
|
434
|
-
|
|
446
|
+
df_meta_denorm.writeTo(
|
|
447
|
+
self.iceberg_tables.DENORMALIZED_METADATA.iceberg_id
|
|
448
|
+
).createOrReplace()
|
|
449
|
+
|
|
450
|
+
logging.info(f"{self.iceberg_tables.DENORMALIZED_METADATA.table} write")
|
|
435
451
|
|
|
436
452
|
return df_meta_denorm
|
|
437
453
|
|
|
@@ -456,25 +472,31 @@ class SWSEasyIcebergSparkHelper:
|
|
|
456
472
|
)
|
|
457
473
|
|
|
458
474
|
def _gen_grouped_metadata_sql(self) -> DataFrame:
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
.groupby("observation_id")
|
|
475
|
-
.agg(F.collect_list("metadata").alias("metadata"))
|
|
475
|
+
df_meta_grouped = self.spark.sql(
|
|
476
|
+
f"""
|
|
477
|
+
SELECT
|
|
478
|
+
observation_id,
|
|
479
|
+
collect_list(
|
|
480
|
+
map(
|
|
481
|
+
'type', type,
|
|
482
|
+
'element_type', element_type,
|
|
483
|
+
'language', language,
|
|
484
|
+
'value', value
|
|
485
|
+
)
|
|
486
|
+
) AS metadata
|
|
487
|
+
FROM {self.iceberg_tables.DENORMALIZED_METADATA.iceberg_id}
|
|
488
|
+
GROUP BY observation_id
|
|
489
|
+
"""
|
|
476
490
|
)
|
|
477
491
|
|
|
492
|
+
df_meta_grouped.writeTo(
|
|
493
|
+
self.iceberg_tables.GROUPED_METADATA.iceberg_id
|
|
494
|
+
).createOrReplace()
|
|
495
|
+
|
|
496
|
+
logging.info(f"{self.iceberg_tables.GROUPED_METADATA.table} write")
|
|
497
|
+
|
|
498
|
+
return df_meta_grouped
|
|
499
|
+
|
|
478
500
|
def _gen_denormalied_data(self) -> DataFrame:
|
|
479
501
|
return (
|
|
480
502
|
self._gen_denormalized_observation()
|
|
@@ -488,15 +510,15 @@ class SWSEasyIcebergSparkHelper:
|
|
|
488
510
|
)
|
|
489
511
|
|
|
490
512
|
def _gen_denormalied_data_sql(self) -> DataFrame:
|
|
491
|
-
return (
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
513
|
+
return self.spark.sql(
|
|
514
|
+
f"""
|
|
515
|
+
SELECT
|
|
516
|
+
o.*,
|
|
517
|
+
m.metadata
|
|
518
|
+
FROM {self.iceberg_tables.GROUPED_METADATA.iceberg_id} AS o
|
|
519
|
+
LEFT JOIN {self.iceberg_tables.GROUPED_METADATA.iceberg_id} AS m
|
|
520
|
+
ON o.id = m.observation_id
|
|
521
|
+
"""
|
|
500
522
|
)
|
|
501
523
|
|
|
502
524
|
def _gen_denormalied_data_sql_from_tag(self) -> DataFrame:
|
|
@@ -669,3 +691,29 @@ class SWSEasyIcebergSparkHelper:
|
|
|
669
691
|
logging.debug(f"Tag with Added csv Table: {tag}")
|
|
670
692
|
|
|
671
693
|
logging.info("Filtered data tags successfully written")
|
|
694
|
+
|
|
695
|
+
|
|
696
|
+
1
|
|
697
|
+
frozenset({"1", "0", "7", "9", "4", "8", "6", "3", "2", "5"})
|
|
698
|
+
1
|
|
699
|
+
1
|
|
700
|
+
2
|
|
701
|
+
frozenset({"1", "0", "7", "9", "4", "8", "6", "3", "2", "5"})
|
|
702
|
+
2
|
|
703
|
+
1
|
|
704
|
+
1
|
|
705
|
+
frozenset({"1", "0", "7", "9", "4", "8", "6", "3", "2", "5"})
|
|
706
|
+
1
|
|
707
|
+
1
|
|
708
|
+
2
|
|
709
|
+
frozenset({"1", "0", "7", "9", "4", "8", "6", "3", "2", "5"})
|
|
710
|
+
2
|
|
711
|
+
1
|
|
712
|
+
1
|
|
713
|
+
frozenset({"1", "0", "7", "9", "4", "8", "6", "3", "2", "5"})
|
|
714
|
+
1
|
|
715
|
+
1
|
|
716
|
+
1
|
|
717
|
+
frozenset({"1", "0", "7", "9", "4", "8", "6", "3", "2", "5"})
|
|
718
|
+
1
|
|
719
|
+
1
|
|
@@ -496,8 +496,8 @@ class SWSGoldIcebergSparkHelper:
|
|
|
496
496
|
logging.debug(f"Tag with Added Iceberg Table: {tag}")
|
|
497
497
|
|
|
498
498
|
new_diss_table = BaseDisseminatedTagTable(
|
|
499
|
-
id=f"{self.domain_code.lower()}
|
|
500
|
-
name=f"{self.domain_code} gold
|
|
499
|
+
id=f"{self.domain_code.lower()}_gold_sws_csv",
|
|
500
|
+
name=f"{self.domain_code} gold SWS csv",
|
|
501
501
|
description="Gold table containing the tag data without any processing cached in csv",
|
|
502
502
|
layer=TableLayer.GOLD,
|
|
503
503
|
private=True,
|
|
@@ -515,3 +515,52 @@ class SWSGoldIcebergSparkHelper:
|
|
|
515
515
|
logging.debug(f"Tag with Added csv Table: {tag}")
|
|
516
516
|
|
|
517
517
|
return df
|
|
518
|
+
|
|
519
|
+
def write_gold_faostat_dissemination_tag(
|
|
520
|
+
self, df: DataFrame, tags: Tags
|
|
521
|
+
) -> DataFrame:
|
|
522
|
+
# Get or create a new tag
|
|
523
|
+
tag = get_or_create_tag(tags, self.dataset_id, self.tag_name, self.tag_name)
|
|
524
|
+
logging.debug(f"Tag: {tag}")
|
|
525
|
+
|
|
526
|
+
new_iceberg_table = BaseDisseminatedTagTable(
|
|
527
|
+
id=f"{self.domain_code.lower()}_gold_faostat_iceberg",
|
|
528
|
+
name=f"{self.domain_code} gold FAOSTAT Iceberg",
|
|
529
|
+
description="Gold table containing the tag data in FAOSTAT format",
|
|
530
|
+
layer=TableLayer.GOLD,
|
|
531
|
+
private=True,
|
|
532
|
+
type=TableType.ICEBERG,
|
|
533
|
+
database=IcebergDatabases.GOLD_DATABASE,
|
|
534
|
+
table=self.iceberg_tables.GOLD_FAOSTAT.table,
|
|
535
|
+
path=self.iceberg_tables.GOLD_FAOSTAT.path,
|
|
536
|
+
structure={"columns": df.schema.jsonValue()["fields"]},
|
|
537
|
+
)
|
|
538
|
+
tag = upsert_disseminated_table(
|
|
539
|
+
sws_tags=tags,
|
|
540
|
+
tag=tag,
|
|
541
|
+
dataset_id=self.dataset_id,
|
|
542
|
+
tag_name=self.tag_name,
|
|
543
|
+
table=new_iceberg_table,
|
|
544
|
+
)
|
|
545
|
+
logging.debug(f"Tag with Added Iceberg Table: {tag}")
|
|
546
|
+
|
|
547
|
+
new_diss_table = BaseDisseminatedTagTable(
|
|
548
|
+
id=f"{self.domain_code.lower()}_gold_faosta_csv",
|
|
549
|
+
name=f"{self.domain_code} gold FAOSTAT csv",
|
|
550
|
+
description="Gold table containing the tag data in FAOSTAT format in csv",
|
|
551
|
+
layer=TableLayer.GOLD,
|
|
552
|
+
private=True,
|
|
553
|
+
type=TableType.CSV,
|
|
554
|
+
path=self.iceberg_tables.GOLD_FAOSTAT.csv_path,
|
|
555
|
+
structure={"columns": df.schema.jsonValue()["fields"]},
|
|
556
|
+
)
|
|
557
|
+
tag = upsert_disseminated_table(
|
|
558
|
+
sws_tags=tags,
|
|
559
|
+
tag=tag,
|
|
560
|
+
dataset_id=self.dataset_id,
|
|
561
|
+
tag_name=self.tag_name,
|
|
562
|
+
table=new_diss_table,
|
|
563
|
+
)
|
|
564
|
+
logging.debug(f"Tag with Added csv Table: {tag}")
|
|
565
|
+
|
|
566
|
+
return df
|
|
@@ -254,6 +254,9 @@ class IcebergTables:
|
|
|
254
254
|
self.__tag_name = tag_name
|
|
255
255
|
|
|
256
256
|
# TODO Fix later with a more appropriate DATABASE
|
|
257
|
+
self.DENORMALIZED_OBSERVATION = self._create_iceberg_table("BRONZE", suffix="denormalized_observation")
|
|
258
|
+
self.DENORMALIZED_METADATA = self._create_iceberg_table("BRONZE", suffix="denormalized_metadata")
|
|
259
|
+
self.GROUPED_METADATA = self._create_iceberg_table("BRONZE", suffix="grouped_metadata")
|
|
257
260
|
self.TABLE = self._create_iceberg_table("BRONZE")
|
|
258
261
|
self.TABLE_FILTERED = self._create_iceberg_table("BRONZE", suffix="filtered")
|
|
259
262
|
self.BRONZE = self._create_iceberg_table("BRONZE")
|
|
@@ -274,6 +277,12 @@ class IcebergTables:
|
|
|
274
277
|
self.GOLD_PRE_SDMX = self._create_iceberg_table(
|
|
275
278
|
"GOLD", prefix=domain, suffix="pre_sdmx"
|
|
276
279
|
)
|
|
280
|
+
self.GOLD_FAOSTAT = self._create_iceberg_table(
|
|
281
|
+
"GOLD", prefix=domain, suffix="faostat"
|
|
282
|
+
)
|
|
283
|
+
self.GOLD_FAOSTAT_UNFILTERED = self._create_iceberg_table(
|
|
284
|
+
"GOLD", prefix=domain, suffix="faostat_unfiltered"
|
|
285
|
+
)
|
|
277
286
|
|
|
278
287
|
def _create_iceberg_table(
|
|
279
288
|
self, level: str, prefix: str = "", suffix: str = ""
|
{sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/.gitignore
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/tests/__init__.py
RENAMED
|
File without changes
|
{sws_spark_dissemination_helper-0.0.159 → sws_spark_dissemination_helper-0.0.161}/tests/test.py
RENAMED
|
File without changes
|