sws-spark-dissemination-helper 0.0.140__tar.gz → 0.0.141__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (18) hide show
  1. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/PKG-INFO +1 -1
  2. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/pyproject.toml +1 -1
  3. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/src/sws_spark_dissemination_helper/SWSEasyIcebergSparkHelper.py +18 -9
  4. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/src/sws_spark_dissemination_helper/constants.py +12 -15
  5. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/.gitignore +0 -0
  6. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/LICENSE +0 -0
  7. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/README.md +0 -0
  8. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/old_requirements.txt +0 -0
  9. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/requirements.txt +0 -0
  10. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/src/sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +0 -0
  11. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/src/sws_spark_dissemination_helper/SWSDatatablesExportHelper.py +0 -0
  12. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/src/sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +0 -0
  13. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/src/sws_spark_dissemination_helper/SWSPostgresSparkReader.py +0 -0
  14. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/src/sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +0 -0
  15. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/src/sws_spark_dissemination_helper/__init__.py +0 -0
  16. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/src/sws_spark_dissemination_helper/utils.py +0 -0
  17. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/tests/__init__.py +0 -0
  18. {sws_spark_dissemination_helper-0.0.140 → sws_spark_dissemination_helper-0.0.141}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sws-spark-dissemination-helper
3
- Version: 0.0.140
3
+ Version: 0.0.141
4
4
  Summary: A Python helper package providing streamlined Spark functions for efficient data dissemination processes
5
5
  Project-URL: Repository, https://bitbucket.org/cioapps/sws-it-python-spark-dissemination-helper
6
6
  Author-email: Daniele Mansillo <danielemansillo@gmail.com>
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sws-spark-dissemination-helper"
7
- version = "0.0.140"
7
+ version = "0.0.141"
8
8
  dependencies = [
9
9
  "annotated-types==0.7.0",
10
10
  "boto3==1.36.18",
@@ -235,7 +235,7 @@ class SWSEasyIcebergSparkHelper:
235
235
  # ----------------
236
236
 
237
237
  select_statement = """
238
- select o.id,
238
+ o.id,
239
239
  o.value,
240
240
  u.email,
241
241
  o.created_on,
@@ -243,24 +243,33 @@ class SWSEasyIcebergSparkHelper:
243
243
  o.version"""
244
244
 
245
245
  from_statement = f"""
246
- from {self.dataset_tables.OBSERVATION.iceberg_id} o
247
- join {self.dataset_tables.USER.iceberg_id} u ON u.id = o.created_by
248
- left join {self.dataset_tables.OBSERVATION_COORDINATE.iceberg_id} as oc on oc.id = o.observation_coordinates"""
246
+ FROM {self.dataset_tables.OBSERVATION.iceberg_id} o
247
+ JOIN {self.dataset_tables.USER.iceberg_id} u ON u.id = o.created_by
248
+ LEFT JOIN {self.dataset_tables.OBSERVATION_COORDINATE.iceberg_id} AS oc ON oc.id = o.observation_coordinates"""
249
+
250
+ hint_statement = ""
249
251
 
250
252
  id_to_flag_col_mapping = {v: k for k, v in self.flag_col_to_id_mapping.items()}
251
253
  for flag_col in self.flag_columns:
252
- select_statement += f",\no.{id_to_flag_col_mapping[flag_col]} as {flag_col}"
254
+ select_statement += f",\no.{id_to_flag_col_mapping[flag_col]} AS {flag_col}"
253
255
 
254
256
  id_to_dim_col_mapping = {v: k for k, v in self.dim_col_to_id_mapping.items()}
255
257
  for i, (dim_col, cl) in enumerate(
256
258
  zip(self.dim_columns_w_time, self.dataset_tables.CODELISTS)
257
259
  ):
258
- select_statement += f",\nd{i}.code as {dim_col}"
259
- from_statement += f"\nleft join {cl.iceberg_id} d{i} on d{i}.id = oc.{id_to_dim_col_mapping[dim_col]}"
260
+ select_statement += f",\nd{i}.code AS {dim_col}"
261
+ from_statement += f"\nLEFT JOIN {cl.iceberg_id} d{i} ON d{i}.id = oc.{id_to_dim_col_mapping[dim_col]}"
262
+ hint_statement = (
263
+ hint_statement + f", BROADCAST({cl.iceberg_id})"
264
+ if hint_statement
265
+ else f"BROADCAST({cl.iceberg_id})"
266
+ )
267
+
268
+ hint_statement = "/*+ " + hint_statement + " */"
260
269
 
261
- final_query = select_statement + from_statement
270
+ final_query = "SELECT " + hint_statement + select_statement + from_statement
262
271
  if not self.keep_history:
263
- final_query += "\nwhere o.replaced_on is null"
272
+ final_query += "\nWHERE o.replaced_on IS NULL"
264
273
 
265
274
  logging.info("Final query for merging observation and observation_coordinares")
266
275
  logging.info(final_query)
@@ -39,6 +39,18 @@ class DatasetDatatables:
39
39
  self.name = name
40
40
  self.schema = schema
41
41
 
42
+ # Aggregation Tables
43
+ AGGREGATES_COMPOSITION = __SWSDatatable(
44
+ id="datatables.aggregates_composition",
45
+ name="Aggregation - Composition",
46
+ schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, aggregation_type STRING, group_code STRING, child_code STRING, group_name STRING, child_name STRING, link_code STRING, factor STRING",
47
+ )
48
+ AGGREGATES_ELEMENTS = __SWSDatatable(
49
+ id="datatables.aggregates_elements",
50
+ name="Aggregation - Aggregates per elements",
51
+ schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, element STRING, aggregation_type STRING, code STRING",
52
+ )
53
+
42
54
  # Dissemination Tables
43
55
  DISSEMINATION_TYPE_LIST = __SWSDatatable(
44
56
  id="datatables.dissemination_{type}_list",
@@ -72,21 +84,6 @@ class DatasetDatatables:
72
84
  name="Mapping - Code correction",
73
85
  schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, old_code STRING, new_code STRING, var_type STRING, delete BOOLEAN, multiplier FLOAT, mapping_type STRING",
74
86
  )
75
- MAPPING_SDMX_COLUMN_NAMES = __SWSDatatable(
76
- id="datatables.mapping_sdmx_col_names",
77
- name="Mapping - SDMX column names",
78
- schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, internal_name STRING, external_name STRING, delete BOOLEAN, add BOOLEAN, default_value STRING",
79
- )
80
- MAPPING_SDMX_CODES = __SWSDatatable(
81
- id="datatables.mapping_pre_dissemination",
82
- name="Mapping - Pre dissemination",
83
- schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, internal_code STRING, external_code STRING, var_type STRING, delete BOOLEAN, multiplier FLOAT, mapping_type STRING",
84
- )
85
- MAPPING_UNITS_OF_MEASURE = __SWSDatatable(
86
- id="datatables.mapping_units_of_measure",
87
- name="Mapping - Units of measure",
88
- schema=f"{DATATABLE_COLUMNS_SCHEMA}, domain STRING, sws_code STRING, sws_multiplier INT, sdmx_code STRING, sdmx_multiplier INT, value_multiplier INT, delete BOOLEAN, mapping_type STRING",
89
- )
90
87
 
91
88
  # Non-SWS Sources Tables
92
89
  FAOSTAT_CODE_MAPPING = __SWSDatatable(