sws-spark-dissemination-helper 0.0.140__tar.gz → 0.0.142__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (18) hide show
  1. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/PKG-INFO +1 -1
  2. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/pyproject.toml +1 -1
  3. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/src/sws_spark_dissemination_helper/SWSEasyIcebergSparkHelper.py +18 -9
  4. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/src/sws_spark_dissemination_helper/constants.py +38 -19
  5. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/.gitignore +0 -0
  6. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/LICENSE +0 -0
  7. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/README.md +0 -0
  8. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/old_requirements.txt +0 -0
  9. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/requirements.txt +0 -0
  10. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/src/sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +0 -0
  11. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/src/sws_spark_dissemination_helper/SWSDatatablesExportHelper.py +0 -0
  12. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/src/sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +0 -0
  13. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/src/sws_spark_dissemination_helper/SWSPostgresSparkReader.py +0 -0
  14. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/src/sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +0 -0
  15. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/src/sws_spark_dissemination_helper/__init__.py +0 -0
  16. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/src/sws_spark_dissemination_helper/utils.py +0 -0
  17. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/tests/__init__.py +0 -0
  18. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.142}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sws-spark-dissemination-helper
3
- Version: 0.0.140
3
+ Version: 0.0.142
4
4
  Summary: A Python helper package providing streamlined Spark functions for efficient data dissemination processes
5
5
  Project-URL: Repository, https://bitbucket.org/cioapps/sws-it-python-spark-dissemination-helper
6
6
  Author-email: Daniele Mansillo <danielemansillo@gmail.com>
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sws-spark-dissemination-helper"
7
- version = "0.0.140"
7
+ version = "0.0.142"
8
8
  dependencies = [
9
9
  "annotated-types==0.7.0",
10
10
  "boto3==1.36.18",
@@ -235,7 +235,7 @@ class SWSEasyIcebergSparkHelper:
235
235
  # ----------------
236
236
 
237
237
  select_statement = """
238
- select o.id,
238
+ o.id,
239
239
  o.value,
240
240
  u.email,
241
241
  o.created_on,
@@ -243,24 +243,33 @@ class SWSEasyIcebergSparkHelper:
243
243
  o.version"""
244
244
 
245
245
  from_statement = f"""
246
- from {self.dataset_tables.OBSERVATION.iceberg_id} o
247
- join {self.dataset_tables.USER.iceberg_id} u ON u.id = o.created_by
248
- left join {self.dataset_tables.OBSERVATION_COORDINATE.iceberg_id} as oc on oc.id = o.observation_coordinates"""
246
+ FROM {self.dataset_tables.OBSERVATION.iceberg_id} o
247
+ JOIN {self.dataset_tables.USER.iceberg_id} u ON u.id = o.created_by
248
+ LEFT JOIN {self.dataset_tables.OBSERVATION_COORDINATE.iceberg_id} AS oc ON oc.id = o.observation_coordinates"""
249
+
250
+ hint_statement = ""
249
251
 
250
252
  id_to_flag_col_mapping = {v: k for k, v in self.flag_col_to_id_mapping.items()}
251
253
  for flag_col in self.flag_columns:
252
- select_statement += f",\no.{id_to_flag_col_mapping[flag_col]} as {flag_col}"
254
+ select_statement += f",\no.{id_to_flag_col_mapping[flag_col]} AS {flag_col}"
253
255
 
254
256
  id_to_dim_col_mapping = {v: k for k, v in self.dim_col_to_id_mapping.items()}
255
257
  for i, (dim_col, cl) in enumerate(
256
258
  zip(self.dim_columns_w_time, self.dataset_tables.CODELISTS)
257
259
  ):
258
- select_statement += f",\nd{i}.code as {dim_col}"
259
- from_statement += f"\nleft join {cl.iceberg_id} d{i} on d{i}.id = oc.{id_to_dim_col_mapping[dim_col]}"
260
+ select_statement += f",\nd{i}.code AS {dim_col}"
261
+ from_statement += f"\nLEFT JOIN {cl.iceberg_id} d{i} ON d{i}.id = oc.{id_to_dim_col_mapping[dim_col]}"
262
+ hint_statement = (
263
+ hint_statement + f", BROADCAST({cl.iceberg_id})"
264
+ if hint_statement
265
+ else f"BROADCAST({cl.iceberg_id})"
266
+ )
267
+
268
+ hint_statement = "/*+ " + hint_statement + " */"
260
269
 
261
- final_query = select_statement + from_statement
270
+ final_query = "SELECT " + hint_statement + select_statement + from_statement
262
271
  if not self.keep_history:
263
- final_query += "\nwhere o.replaced_on is null"
272
+ final_query += "\nWHERE o.replaced_on IS NULL"
264
273
 
265
274
  logging.info("Final query for merging observation and observation_coordinares")
266
275
  logging.info(final_query)
@@ -1,3 +1,5 @@
1
+ from typing import List
2
+
1
3
  from pyspark.sql.functions import col, lit
2
4
 
3
5
  SPARK_POSTGRES_DRIVER = "org.postgresql.Driver"
@@ -34,26 +36,56 @@ class DomainFilters:
34
36
  class DatasetDatatables:
35
37
 
36
38
  class __SWSDatatable:
37
- def __init__(self, id: str, name: str, schema: str):
39
+ def __init__(
40
+ self, id: str, name: str, schema: str, join_columns: List[str] = []
41
+ ):
38
42
  self.id = id
39
43
  self.name = name
40
44
  self.schema = schema
45
+ self.join_columns = join_columns
46
+
47
+ # Aggregation Tables
48
+ AGGREGATES_COMPOSITION = __SWSDatatable(
49
+ id="datatables.aggregates_composition",
50
+ name="Aggregation - Composition",
51
+ schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, aggregation_type STRING, group_code STRING, child_code STRING, group_name STRING, child_name STRING, link_code STRING, factor STRING",
52
+ )
53
+ AGGREGATES_ELEMENTS = __SWSDatatable(
54
+ id="datatables.aggregates_elements",
55
+ name="Aggregation - Aggregates per elements",
56
+ schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, element STRING, aggregation_type STRING, code STRING",
57
+ )
41
58
 
42
59
  # Dissemination Tables
43
60
  DISSEMINATION_TYPE_LIST = __SWSDatatable(
44
61
  id="datatables.dissemination_{type}_list",
45
62
  name="Dissemination - {type} list",
46
63
  schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, code STRING, name STRING, aggregation_type STRING, dissemination BOOLEAN, aggregation BOOLEAN",
64
+ join_columns=["domain", "code"],
47
65
  )
48
66
  DISSEMINATION_EXCEPTIONS = __SWSDatatable(
49
67
  id="datatables.dissemination_exception",
50
68
  name="Dissemination - Exceptions",
51
69
  schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, dim1_code STRING, dim2_code STRING, dim3_code STRING, dim4_code STRING, dim5_code STRING, dim6_code STRING, dim7_code STRING, status_flag STRING, method_flag STRING, dissemination BOOLEAN, aggregation BOOLEAN, note STRING",
70
+ join_columns=[
71
+ "domain",
72
+ " dim1_code",
73
+ " dim2_code",
74
+ " dim3_code",
75
+ " dim4_code",
76
+ " dim5_code",
77
+ " dim6_code",
78
+ " dim7_code",
79
+ " status_flag",
80
+ " method_flag",
81
+ ],
52
82
  )
83
+ # TODO Deprecate
53
84
  DISSEMINATION_ITEM_LIST_FAOSTAT = __SWSDatatable(
54
85
  id="datatables.dissemination_item_list_faostat",
55
86
  name="Dissemination - Item list - FAOSTAT",
56
87
  schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, code STRING, name STRING, aggregation_type STRING, dissemination BOOLEAN, aggregation BOOLEAN",
88
+ join_columns=["domain", "code"],
57
89
  )
58
90
 
59
91
  # Mapping Tables
@@ -61,34 +93,23 @@ class DatasetDatatables:
61
93
  id="datatables.aggregates_mapping_domains_id",
62
94
  name="Mapping - Domains ID",
63
95
  schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, domain_name STRING, sws_source_id STRING, sws_destination_id STRING",
96
+ join_columns=["domain", "sws_source_id"],
64
97
  )
65
98
  MAPPING_CODELIST_TYPE = __SWSDatatable(
66
99
  id="datatables.mapping_codelist_type",
67
100
  name="Mapping Codelist type",
68
101
  schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, col_name STRING, col_type STRING",
102
+ join_columns=["domain", "col_name"],
69
103
  )
70
104
  MAPPING_CODE_CORRECTION = __SWSDatatable(
71
105
  id="datatables.aggregates_mapping_code_correction",
72
106
  name="Mapping - Code correction",
73
107
  schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, old_code STRING, new_code STRING, var_type STRING, delete BOOLEAN, multiplier FLOAT, mapping_type STRING",
74
- )
75
- MAPPING_SDMX_COLUMN_NAMES = __SWSDatatable(
76
- id="datatables.mapping_sdmx_col_names",
77
- name="Mapping - SDMX column names",
78
- schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, internal_name STRING, external_name STRING, delete BOOLEAN, add BOOLEAN, default_value STRING",
79
- )
80
- MAPPING_SDMX_CODES = __SWSDatatable(
81
- id="datatables.mapping_pre_dissemination",
82
- name="Mapping - Pre dissemination",
83
- schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, internal_code STRING, external_code STRING, var_type STRING, delete BOOLEAN, multiplier FLOAT, mapping_type STRING",
84
- )
85
- MAPPING_UNITS_OF_MEASURE = __SWSDatatable(
86
- id="datatables.mapping_units_of_measure",
87
- name="Mapping - Units of measure",
88
- schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, sws_code STRING, sws_multiplier INT, sdmx_code STRING, sdmx_multiplier INT, value_multiplier INT, delete BOOLEAN, mapping_type STRING",
108
+ join_columns=["domain", "old_code", "var_type", "mapping_type"],
89
109
  )
90
110
 
91
111
  # Non-SWS Sources Tables
112
+ # TODO To deprecate
92
113
  FAOSTAT_CODE_MAPPING = __SWSDatatable(
93
114
  id="datatables.faostat_code_mapping",
94
115
  name="FAOSTAT Code Mapping",
@@ -226,9 +247,7 @@ class IcebergTables:
226
247
  self.SILVER = self._create_iceberg_table("SILVER", prefix=domain)
227
248
 
228
249
  # GOLD tables with specific suffixes
229
- self.GOLD_SWS = self._create_iceberg_table(
230
- "GOLD", prefix=domain, suffix="sws"
231
- )
250
+ self.GOLD_SWS = self._create_iceberg_table("GOLD", prefix=domain, suffix="sws")
232
251
  self.GOLD_SDMX = self._create_iceberg_table(
233
252
  "GOLD", prefix=domain, suffix="sdmx_disseminated"
234
253
  )