sws-spark-dissemination-helper 0.0.131__tar.gz → 0.0.133__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (18) hide show
  1. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/PKG-INFO +1 -1
  2. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/pyproject.toml +1 -1
  3. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/src/sws_spark_dissemination_helper/SWSEasyIcebergSparkHelper.py +3 -2
  4. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/.gitignore +0 -0
  5. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/LICENSE +0 -0
  6. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/README.md +0 -0
  7. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/old_requirements.txt +0 -0
  8. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/requirements.txt +0 -0
  9. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/src/sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +0 -0
  10. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/src/sws_spark_dissemination_helper/SWSDatatablesExportHelper.py +0 -0
  11. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/src/sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +0 -0
  12. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/src/sws_spark_dissemination_helper/SWSPostgresSparkReader.py +0 -0
  13. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/src/sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +0 -0
  14. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/src/sws_spark_dissemination_helper/__init__.py +0 -0
  15. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/src/sws_spark_dissemination_helper/constants.py +0 -0
  16. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/src/sws_spark_dissemination_helper/utils.py +0 -0
  17. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/tests/__init__.py +0 -0
  18. {sws_spark_dissemination_helper-0.0.131 → sws_spark_dissemination_helper-0.0.133}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sws-spark-dissemination-helper
3
- Version: 0.0.131
3
+ Version: 0.0.133
4
4
  Summary: A Python helper package providing streamlined Spark functions for efficient data dissemination processes
5
5
  Project-URL: Repository, https://bitbucket.org/cioapps/sws-it-python-spark-dissemination-helper
6
6
  Author-email: Daniele Mansillo <danielemansillo@gmail.com>
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sws-spark-dissemination-helper"
7
- version = "0.0.131"
7
+ version = "0.0.133"
8
8
  dependencies = [
9
9
  "annotated-types==0.7.0",
10
10
  "boto3==1.36.18",
@@ -249,11 +249,12 @@ class SWSEasyIcebergSparkHelper:
249
249
  join {self.dataset_tables.USER.iceberg_id} u ON u.id = o.created_by
250
250
  left join {self.dataset_tables.OBSERVATION_COORDINATE.iceberg_id} as oc on oc.id = o.observation_coordinates"""
251
251
 
252
+ id_to_dim_col_mapping = {v: k for k, v in self.dim_col_to_id_mapping.items()}
252
253
  for i, (dim, cl) in enumerate(
253
254
  zip(self.dim_columns_w_time, self.dataset_tables.CODELISTS)
254
255
  ):
255
- select_statement += f"",\nd{i}.code as {dim}"
256
- from_statement += f"\nleft join {cl.iceberg_id} d{i} on d{i}.id = oc.{dim}"
256
+ select_statement += f",\nd{i}.code as {dim}"
257
+ from_statement += f"\nleft join {cl.iceberg_id} d{i} on d{i}.id = oc.{id_to_dim_col_mapping[dim]}"
257
258
 
258
259
  final_query = select_statement + from_statement
259
260
  if not self.keep_history: