sws-spark-dissemination-helper 0.0.157__tar.gz → 0.0.158__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (16) hide show
  1. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/PKG-INFO +1 -1
  2. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/pyproject.toml +1 -1
  3. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/src/sws_spark_dissemination_helper/SWSPostgresSparkReader.py +31 -19
  4. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/.gitignore +0 -0
  5. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/LICENSE +0 -0
  6. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/README.md +0 -0
  7. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/src/sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +0 -0
  8. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/src/sws_spark_dissemination_helper/SWSDatatablesExportHelper.py +0 -0
  9. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/src/sws_spark_dissemination_helper/SWSEasyIcebergSparkHelper.py +0 -0
  10. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/src/sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +0 -0
  11. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/src/sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +0 -0
  12. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/src/sws_spark_dissemination_helper/__init__.py +0 -0
  13. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/src/sws_spark_dissemination_helper/constants.py +0 -0
  14. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/src/sws_spark_dissemination_helper/utils.py +0 -0
  15. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/tests/__init__.py +0 -0
  16. {sws_spark_dissemination_helper-0.0.157 → sws_spark_dissemination_helper-0.0.158}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sws-spark-dissemination-helper
3
- Version: 0.0.157
3
+ Version: 0.0.158
4
4
  Summary: A Python helper package providing streamlined Spark functions for efficient data dissemination processes
5
5
  Project-URL: Repository, https://github.com/un-fao/fao-sws-it-python-spark-dissemination-helper
6
6
  Author-email: Daniele Mansillo <danielemansillo@gmail.com>
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sws-spark-dissemination-helper"
7
- version = "0.0.157"
7
+ version = "0.0.158"
8
8
  dependencies = [
9
9
  "annotated-types==0.7.0",
10
10
  "boto3>=1.36.18",
@@ -94,25 +94,37 @@ class SWSPostgresSparkReader:
94
94
 
95
95
  logging.info(f"{pg_table} read start")
96
96
 
97
- # Read observations from the PostgreSQL table into a DataFrame
98
- df = (
99
- self.spark.read.format("jdbc")
100
- .option("customSchema", custom_schema)
101
- .option("dbtable", pg_table)
102
- .option("partitionColumn", partition_column)
103
- .option("lowerBound", min_id)
104
- .option("upperBound", max_id)
105
- .option("numPartitions", num_partitions)
106
- .option("fetchsize", "1000")
107
- .option("url", self.jdbc_url)
108
- .option("user", self.jdbc_conn_properties["user"])
109
- .option("password", self.jdbc_conn_properties["password"])
110
- .option("driver", SPARK_POSTGRES_DRIVER)
111
- .load()
112
- # .repartition(1024, partition_column)
113
- # .sortWithinPartitions(partition_column)
114
- # .cache()
115
- )
97
+ if min_id is None or max_id is None:
98
+ df = (
99
+ self.spark.read.format("jdbc")
100
+ .option("customSchema", custom_schema)
101
+ .option("dbtable", pg_table)
102
+ .option("fetchsize", "1000")
103
+ .option("url", self.jdbc_url)
104
+ .option("user", self.jdbc_conn_properties["user"])
105
+ .option("password", self.jdbc_conn_properties["password"])
106
+ .option("driver", SPARK_POSTGRES_DRIVER)
107
+ .load()
108
+ )
109
+ else:
110
+ df = (
111
+ self.spark.read.format("jdbc")
112
+ .option("customSchema", custom_schema)
113
+ .option("dbtable", pg_table)
114
+ .option("partitionColumn", partition_column)
115
+ .option("lowerBound", min_id)
116
+ .option("upperBound", max_id)
117
+ .option("numPartitions", num_partitions)
118
+ .option("fetchsize", "1000")
119
+ .option("url", self.jdbc_url)
120
+ .option("user", self.jdbc_conn_properties["user"])
121
+ .option("password", self.jdbc_conn_properties["password"])
122
+ .option("driver", SPARK_POSTGRES_DRIVER)
123
+ .load()
124
+ # .repartition(1024, partition_column)
125
+ # .sortWithinPartitions(partition_column)
126
+ # .cache()
127
+ )
116
128
  else:
117
129
  df = (
118
130
  self.spark.read.format("jdbc")