sws-spark-dissemination-helper 0.0.133__tar.gz → 0.0.134__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (18) hide show
  1. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/PKG-INFO +1 -1
  2. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/pyproject.toml +1 -1
  3. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/src/sws_spark_dissemination_helper/SWSEasyIcebergSparkHelper.py +9 -6
  4. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/.gitignore +0 -0
  5. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/LICENSE +0 -0
  6. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/README.md +0 -0
  7. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/old_requirements.txt +0 -0
  8. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/requirements.txt +0 -0
  9. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/src/sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +0 -0
  10. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/src/sws_spark_dissemination_helper/SWSDatatablesExportHelper.py +0 -0
  11. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/src/sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +0 -0
  12. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/src/sws_spark_dissemination_helper/SWSPostgresSparkReader.py +0 -0
  13. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/src/sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +0 -0
  14. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/src/sws_spark_dissemination_helper/__init__.py +0 -0
  15. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/src/sws_spark_dissemination_helper/constants.py +0 -0
  16. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/src/sws_spark_dissemination_helper/utils.py +0 -0
  17. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/tests/__init__.py +0 -0
  18. {sws_spark_dissemination_helper-0.0.133 → sws_spark_dissemination_helper-0.0.134}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sws-spark-dissemination-helper
3
- Version: 0.0.133
3
+ Version: 0.0.134
4
4
  Summary: A Python helper package providing streamlined Spark functions for efficient data dissemination processes
5
5
  Project-URL: Repository, https://bitbucket.org/cioapps/sws-it-python-spark-dissemination-helper
6
6
  Author-email: Daniele Mansillo <danielemansillo@gmail.com>
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sws-spark-dissemination-helper"
7
- version = "0.0.133"
7
+ version = "0.0.134"
8
8
  dependencies = [
9
9
  "annotated-types==0.7.0",
10
10
  "boto3==1.36.18",
@@ -240,21 +240,24 @@ class SWSEasyIcebergSparkHelper:
240
240
  u.email,
241
241
  o.created_on,
242
242
  o.replaced_on,
243
- o.version,
244
- o.flag_obs_status,
245
- o.flag_method"""
243
+ o.version"""
246
244
 
247
245
  from_statement = f"""
248
246
  from {self.dataset_tables.OBSERVATION.iceberg_id} o
249
247
  join {self.dataset_tables.USER.iceberg_id} u ON u.id = o.created_by
250
248
  left join {self.dataset_tables.OBSERVATION_COORDINATE.iceberg_id} as oc on oc.id = o.observation_coordinates"""
251
249
 
250
+ for flag_col in self.flag_columns:
251
+ select_statement += (
252
+ f",\no.{self.flag_col_to_id_mapping[flag_col]} as {flag_col}"
253
+ )
254
+
252
255
  id_to_dim_col_mapping = {v: k for k, v in self.dim_col_to_id_mapping.items()}
253
- for i, (dim, cl) in enumerate(
256
+ for i, (dim_col, cl) in enumerate(
254
257
  zip(self.dim_columns_w_time, self.dataset_tables.CODELISTS)
255
258
  ):
256
- select_statement += f",\nd{i}.code as {dim}"
257
- from_statement += f"\nleft join {cl.iceberg_id} d{i} on d{i}.id = oc.{id_to_dim_col_mapping[dim]}"
259
+ select_statement += f",\nd{i}.code as {dim_col}"
260
+ from_statement += f"\nleft join {cl.iceberg_id} d{i} on d{i}.id = oc.{id_to_dim_col_mapping[dim_col]}"
258
261
 
259
262
  final_query = select_statement + from_statement
260
263
  if not self.keep_history: