sws-spark-dissemination-helper 0.0.84__tar.gz → 0.0.85__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/PKG-INFO +2 -2
  2. sws_spark_dissemination_helper-0.0.85/old_requirements.txt +23 -0
  3. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/pyproject.toml +2 -2
  4. sws_spark_dissemination_helper-0.0.85/requirements.txt +23 -0
  5. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/src/sws_spark_dissemination_helper/SWSGoldIcebergSparkHelper.py +4 -349
  6. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/src/sws_spark_dissemination_helper/SWSPostgresSparkReader.py +56 -0
  7. sws_spark_dissemination_helper-0.0.84/requirements.txt +0 -23
  8. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/.gitignore +0 -0
  9. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/.pipeline/bitbucket-pipelines.yml +0 -0
  10. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/.pipeline/pyproject.toml +0 -0
  11. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/.python-version +0 -0
  12. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/.wip/SWSBaseIcebergSparkHelper.py +0 -0
  13. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/.wip/model.py +0 -0
  14. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/LICENSE +0 -0
  15. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/README.md +0 -0
  16. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/src/sws_spark_dissemination_helper/SWSBronzeIcebergSparkHelper.py +0 -0
  17. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/src/sws_spark_dissemination_helper/SWSSilverIcebergSparkHelper.py +0 -0
  18. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/src/sws_spark_dissemination_helper/__init__.py +0 -0
  19. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/src/sws_spark_dissemination_helper/constants.py +0 -0
  20. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/src/sws_spark_dissemination_helper/utils.py +0 -0
  21. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/tests/__init__.py +0 -0
  22. {sws_spark_dissemination_helper-0.0.84 → sws_spark_dissemination_helper-0.0.85}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sws-spark-dissemination-helper
3
- Version: 0.0.84
3
+ Version: 0.0.85
4
4
  Summary: A Python helper package providing streamlined Spark functions for efficient data dissemination processes
5
5
  Project-URL: Repository, https://bitbucket.org/cioapps/sws-it-python-spark-dissemination-helper
6
6
  Author-email: Daniele Mansillo <danielemansillo@gmail.com>
@@ -49,7 +49,7 @@ Requires-Dist: pytz==2025.1
49
49
  Requires-Dist: requests==2.32.3
50
50
  Requires-Dist: s3transfer==0.11.2
51
51
  Requires-Dist: six==1.17.0
52
- Requires-Dist: sws-api-client==1.4.4
52
+ Requires-Dist: sws-api-client==1.4.5
53
53
  Requires-Dist: typing-extensions==4.12.2
54
54
  Requires-Dist: tzdata==2025.1
55
55
  Requires-Dist: urllib3==1.26.20
@@ -0,0 +1,23 @@
1
+ annotated-types==0.7.0
2
+ boto3==1.36.18
3
+ botocore==1.36.18
4
+ certifi==2025.1.31
5
+ charset-normalizer==3.4.1
6
+ idna==3.10
7
+ jmespath==1.0.1
8
+ numpy==2.0.2
9
+ pandas==2.2.3
10
+ py4j==0.10.9.7
11
+ pydantic==2.10.6
12
+ pydantic_core==2.27.2
13
+ pyspark==3.5.4
14
+ python-dateutil==2.9.0.post0
15
+ python-dotenv==0.19.2
16
+ pytz==2025.1
17
+ requests==2.32.3
18
+ s3transfer==0.11.2
19
+ six==1.17.0
20
+ sws_api_client==1.4.4
21
+ typing_extensions==4.12.2
22
+ tzdata==2025.1
23
+ urllib3==1.26.20
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "sws-spark-dissemination-helper"
7
- version = "0.0.84"
7
+ version = "0.0.85"
8
8
  dependencies = [
9
9
  "annotated-types==0.7.0",
10
10
  "boto3==1.36.18",
@@ -25,7 +25,7 @@ dependencies = [
25
25
  "requests==2.32.3",
26
26
  "s3transfer==0.11.2",
27
27
  "six==1.17.0",
28
- "sws_api_client==1.4.4",
28
+ "sws_api_client==1.4.5",
29
29
  "typing_extensions==4.12.2",
30
30
  "tzdata==2025.1",
31
31
  "urllib3==1.26.20"
@@ -0,0 +1,23 @@
1
+ annotated-types==0.7.0
2
+ boto3==1.36.18
3
+ botocore==1.36.18
4
+ certifi==2025.1.31
5
+ charset-normalizer==3.4.1
6
+ idna==3.10
7
+ jmespath==1.0.1
8
+ numpy==2.0.2
9
+ pandas==2.2.3
10
+ py4j==0.10.9.7
11
+ pydantic==2.10.6
12
+ pydantic_core==2.27.2
13
+ pyspark==3.5.4
14
+ python-dateutil==2.9.0.post0
15
+ python-dotenv==0.19.2
16
+ pytz==2025.1
17
+ requests==2.32.3
18
+ s3transfer==0.11.2
19
+ six==1.17.0
20
+ sws_api_client==1.4.5
21
+ typing_extensions==4.12.2
22
+ tzdata==2025.1
23
+ urllib3==1.26.20
@@ -11,7 +11,6 @@ from sws_api_client.tags import BaseDisseminatedTagTable, TableLayer, TableType
11
11
  from .constants import IcebergDatabases, IcebergTables
12
12
  from .SWSPostgresSparkReader import SWSPostgresSparkReader
13
13
  from .utils import (
14
- col_is_null_or_empty,
15
14
  get_or_create_tag,
16
15
  save_cache_csv,
17
16
  upsert_disseminated_table,
@@ -47,11 +46,6 @@ class SWSGoldIcebergSparkHelper:
47
46
  self.flag_columns,
48
47
  ) = self._get_dim_time_flag_columns()
49
48
 
50
- self.cols_to_keep_sdmx = (
51
- self.dim_columns_w_time
52
- + ["unit_of_measure", "unit_of_measure_multiplier", "value"]
53
- + self.flag_columns
54
- )
55
49
  self.cols_to_keep_sws = (
56
50
  self.dim_columns_w_time + ["value"] + self.flag_columns
57
51
  )
@@ -72,42 +66,6 @@ class SWSGoldIcebergSparkHelper:
72
66
  if col_name in self.dim_columns
73
67
  }
74
68
 
75
- (
76
- self.df_mapping_sdmx_codes,
77
- self.df_mapping_sdmx_uom,
78
- self.df_mapping_sdmx_col_names,
79
- ) = sws_postgres_spark_reader.import_sdmx_mapping_datatables(
80
- self.domain_code
81
- )
82
-
83
- self._check_column_mappings(self.df_mapping_sdmx_col_names)
84
-
85
- def _check_column_mappings(
86
- self,
87
- df_mapping_sdmx_col_names: DataFrame,
88
- ) -> DataFrame:
89
- cols_to_keep_set = set(self.cols_to_keep_sdmx)
90
- mapping_sdmx_col_names_internal_set = {
91
- row[0]
92
- for row in df_mapping_sdmx_col_names.filter(
93
- col("internal_name").isNotNull() & (col("internal_name") != lit(""))
94
- )
95
- .select("internal_name")
96
- .collect()
97
- }
98
-
99
- if not (cols_to_keep_set <= mapping_sdmx_col_names_internal_set):
100
- missing_mappings = cols_to_keep_set - mapping_sdmx_col_names_internal_set
101
-
102
- message = 'The mappings in the table "Mapping - SDMX columns names" are not correct'
103
-
104
- if len(missing_mappings) > 0:
105
- message += (
106
- f"\nThe following column mappings are missing: {missing_mappings}"
107
- )
108
-
109
- raise ValueError(message)
110
-
111
69
  def _get_dim_time_flag_columns(self) -> Tuple[List[str], List[str], str, List[str]]:
112
70
  """Extract the dimension columns with time, without time, the time column and the flag columns names."""
113
71
  dim_columns_w_time = [
@@ -128,292 +86,9 @@ class SWSGoldIcebergSparkHelper:
128
86
  def apply_diss_flag_filter(self, df: DataFrame) -> DataFrame:
129
87
  return df.filter(col("diss_flag"))
130
88
 
131
- # TODO implement the delete flag
132
- def apply_uom_mapping(
133
- self,
134
- df: DataFrame,
135
- ) -> DataFrame:
136
- logging.info("mapping unit of measure for dissemination")
137
-
138
- df = df.withColumn(
139
- "official_sws_uom",
140
- F.when(
141
- col_is_null_or_empty("unit_of_measure_base_unit"),
142
- col("unit_of_measure"),
143
- ).otherwise(col("unit_of_measure_base_unit")),
144
- ).withColumn(
145
- "official_sws_multiplier",
146
- F.coalesce(F.log10(col("unit_of_measure_multiplier")), lit(0)).cast("int"),
147
- )
148
-
149
- delete_df_uom_mapping = self.df_mapping_sdmx_uom.filter(
150
- col("delete")
151
- & col_is_null_or_empty("sdmx_code")
152
- & col("sdmx_multiplier").isNull()
153
- & col("value_multiplier").isNull()
154
- )
155
-
156
- generic_df_uom_mapping = self.df_mapping_sdmx_uom.filter(
157
- ~col("delete")
158
- & col("sws_multiplier").isNull()
159
- & col("sdmx_multiplier").isNull()
160
- & (col("value_multiplier") == lit(0))
161
- )
162
-
163
- specific_df_uom_mapping = self.df_mapping_sdmx_uom.filter(
164
- ~col("delete")
165
- & col("sws_multiplier").isNotNull()
166
- & col("sdmx_multiplier").isNotNull()
167
- )
168
-
169
- # Apply generic uom mapping
170
- df = (
171
- df.alias("d")
172
- .join(
173
- generic_df_uom_mapping.alias("m"),
174
- col("d.official_sws_uom") == col("m.sws_code"),
175
- "left",
176
- )
177
- .select("d.*", col("sdmx_code").alias("generic_sdmx_uom"))
178
- )
179
-
180
- # Apply specific uom mapping
181
- df = (
182
- df.alias("d")
183
- .join(
184
- specific_df_uom_mapping.alias("m"),
185
- (col("d.official_sws_uom") == col("m.sws_code"))
186
- & (col("d.official_sws_multiplier") == col("m.sws_multiplier")),
187
- "left",
188
- )
189
- .select(
190
- "d.*",
191
- col("sdmx_code").alias("specific_sdmx_uom"),
192
- col("sdmx_multiplier").alias("specific_sdmx_multiplier"),
193
- (col("value") * F.pow(lit(10), col("value_multiplier"))).alias(
194
- "specific_sdmx_value"
195
- ),
196
- )
197
- )
198
-
199
- # Select the official values according to descending specificity
200
- df = (
201
- df.withColumn(
202
- "unit_of_measure",
203
- F.coalesce(
204
- col("specific_sdmx_uom"),
205
- col("generic_sdmx_uom"),
206
- col("official_sws_uom"),
207
- ),
208
- )
209
- .withColumn(
210
- "unit_of_measure_multiplier",
211
- F.coalesce(
212
- col("specific_sdmx_multiplier"), col("official_sws_multiplier")
213
- ),
214
- )
215
- .withColumn(
216
- "value",
217
- F.coalesce(col("specific_sdmx_value"), col("value")),
218
- )
219
- # Remove the columns that were not in the original dataset
220
- .drop(
221
- col("specific_sdmx_uom"),
222
- col("specific_sdmx_multiplier"),
223
- col("specific_sdmx_value"),
224
- col("generic_sdmx_uom"),
225
- col("official_sws_uom"),
226
- col("official_sws_multiplier"),
227
- )
228
- )
229
-
230
- return df
231
-
232
- def keep_dim_uom_val_attr_columns(self, df: DataFrame):
233
- return df.select(*self.cols_to_keep_sdmx)
234
-
235
89
  def keep_dim_val_attr_columns(self, df: DataFrame):
236
90
  return df.select(*self.cols_to_keep_sws)
237
91
 
238
- def _apply_sdmx_dimension_codes_mapping_single(
239
- self,
240
- df: DataFrame,
241
- dimension_name: str,
242
- dimension_type: str,
243
- ) -> DataFrame:
244
- logging.info(
245
- f"mapping column {dimension_name} of type {dimension_type} for dissemination"
246
- )
247
- return (
248
- df.alias("d")
249
- # Join the data with the standard mapping for the specific dimension
250
- .join(
251
- F.broadcast(
252
- self.df_mapping_sdmx_codes.filter(
253
- (col("domain").isNull() | (col("domain") == lit("")))
254
- & (col("var_type") == lit(dimension_type))
255
- & (
256
- col("mapping_type").isNull()
257
- | (col("mapping_type") == lit(""))
258
- )
259
- )
260
- ).alias("m_standard"),
261
- col(f"d.{dimension_name}") == col("m_standard.internal_code"),
262
- "left",
263
- )
264
- # Join the data with the domain specific mapping for the specific dimension
265
- .join(
266
- F.broadcast(
267
- self.df_mapping_sdmx_codes.filter(
268
- (col("domain") == lit(self.domain_code))
269
- & (col("var_type") == lit(dimension_type))
270
- & (
271
- col("mapping_type").isNull()
272
- | (col("mapping_type") == lit(""))
273
- )
274
- )
275
- ).alias("m_domain"),
276
- col(f"d.{dimension_name}") == col("m_domain.internal_code"),
277
- "left",
278
- )
279
- # Select only the columns we are interested in (this step is optional but recommended for debugging)
280
- .select(
281
- "d.*",
282
- col("m_standard.external_code").alias("standard_external_code"),
283
- col("m_standard.delete").alias("standard_delete"),
284
- col("m_standard.multiplier").alias("standard_multiplier"),
285
- col("m_domain.external_code").alias("domain_specific_external_code"),
286
- col("m_domain.delete").alias("domain_specific_delete"),
287
- col("m_domain.multiplier").alias("domain_specific_multiplier"),
288
- )
289
- # Filter out records to delete
290
- .filter(
291
- # Evaluate first the domain specific flag
292
- F.when(
293
- col("domain_specific_delete").isNotNull(),
294
- ~col("domain_specific_delete"),
295
- )
296
- # Then evaluate the general flag
297
- .when(
298
- col("standard_delete").isNotNull(), ~col("standard_delete")
299
- ).otherwise(lit(True))
300
- )
301
- .withColumn(
302
- dimension_name,
303
- # Evaluate first the domain specific mapping
304
- F.when(
305
- col("domain_specific_external_code").isNotNull(),
306
- col("domain_specific_external_code"),
307
- )
308
- # Then evaluate the general mapping
309
- .when(
310
- col("standard_external_code").isNotNull(),
311
- col("standard_external_code"),
312
- ).otherwise(col(dimension_name)),
313
- )
314
- .withColumn(
315
- "value",
316
- # Multiply first by the domain specific multiplier
317
- F.when(
318
- col("domain_specific_multiplier").isNotNull(),
319
- col("value") * col("domain_specific_multiplier"),
320
- )
321
- # Then multiply by the general multiplier
322
- .when(
323
- col("standard_external_code").isNotNull(),
324
- col("value") * col("standard_multiplier"),
325
- ).otherwise(col("value")),
326
- )
327
- # Remove the columns that were not in the original dataset
328
- .drop(
329
- "standard_external_code",
330
- "standard_delete",
331
- "standard_multiplier",
332
- "domain_specific_external_code",
333
- "domain_specific_delete",
334
- "domain_specific_multiplier",
335
- )
336
- )
337
-
338
- def apply_sdmx_dimension_codes_mapping(self, df: DataFrame) -> DataFrame:
339
- logging.info("Mapping codes to comply with SDMX standard")
340
- for dimension_name, dimension_type in self.codelist_type_mapping.items():
341
- df = df.transform(
342
- self._apply_sdmx_dimension_codes_mapping_single,
343
- dimension_name=dimension_name,
344
- dimension_type=dimension_type,
345
- )
346
-
347
- return df
348
-
349
- def drop_non_sdmx_columns(self, df: DataFrame) -> DataFrame:
350
- cols_to_drop = [
351
- row["internal_name"]
352
- for row in self.df_mapping_sdmx_col_names.collect()
353
- if row["delete"] is True
354
- ]
355
- logging.info(f"Dropping non-SDMX columns: {cols_to_drop}")
356
- return df.drop(*cols_to_drop)
357
-
358
- def apply_sdmx_column_names_mapping(self, df: DataFrame) -> DataFrame:
359
- logging.info("Renaming columns to comply with SDMX standard")
360
-
361
- mapping_sws_col_sdmx_col = {
362
- row["internal_name"]: row["external_name"]
363
- for row in self.df_mapping_sdmx_col_names.filter(
364
- col("internal_name").isNotNull()
365
- & (col("internal_name") != lit(""))
366
- & ~col("delete")
367
- ).collect()
368
- }
369
-
370
- logging.info(f"Column names mappings: {mapping_sws_col_sdmx_col}")
371
-
372
- return df.withColumnsRenamed(mapping_sws_col_sdmx_col)
373
-
374
- def add_sdmx_default_columns(self, df: DataFrame) -> DataFrame:
375
- col_w_default_value = {
376
- row["external_name"]: row["default_value"]
377
- for row in self.df_mapping_sdmx_col_names.collect()
378
- if row["add"] is True
379
- }
380
-
381
- logging.info("Adding SDMX columns with default values")
382
-
383
- for name, default_value in col_w_default_value.items():
384
- logging.info(
385
- f"Adding SDMX column {name} with default value {default_value}"
386
- )
387
- df = df.withColumn(name, lit(default_value))
388
-
389
- return df
390
-
391
- def rearrange_sdmx_columns(self, df: DataFrame) -> DataFrame:
392
- logging.info(
393
- "Rearranging the columns to have the following order: Dimensions, TimeDimension, PrimaryMeasure, Attributes"
394
- )
395
-
396
- get_columns_for_type = lambda df, type: [
397
- row[0]
398
- for row in df.filter(col("type") == lit(type))
399
- .select("external_name")
400
- .collect()
401
- ]
402
-
403
- df_mapping_sdmx_no_del = self.df_mapping_sdmx_col_names.filter(~col("delete"))
404
-
405
- dimensions = get_columns_for_type(df_mapping_sdmx_no_del, "Dimension")
406
- time_dimensions = get_columns_for_type(df_mapping_sdmx_no_del, "TimeDimension")
407
- primary_measure = get_columns_for_type(df_mapping_sdmx_no_del, "PrimaryMeasure")
408
- attributes = get_columns_for_type(df_mapping_sdmx_no_del, "Attribute")
409
-
410
- logging.info(f"Dimensions: {dimensions}")
411
- logging.info(f"Time Dimensions: {time_dimensions}")
412
- logging.info(f"Primary Measure: {primary_measure}")
413
- logging.info(f"Attributes: {attributes}")
414
-
415
- return df.select(*dimensions, *time_dimensions, *primary_measure, *attributes)
416
-
417
92
  def gen_gold_sws_disseminated_data(self) -> DataFrame:
418
93
  return (
419
94
  self.spark.read.option("tag", self.tag_name)
@@ -429,19 +104,6 @@ class SWSGoldIcebergSparkHelper:
429
104
  .transform(self.keep_dim_val_attr_columns)
430
105
  )
431
106
 
432
- def gen_gold_sdmx_data(self) -> DataFrame:
433
- return (
434
- self.spark.read.option("tag", self.tag_name)
435
- .table(self.iceberg_tables.SILVER.iceberg_id)
436
- .transform(self.apply_diss_flag_filter)
437
- .transform(self.apply_uom_mapping)
438
- .transform(self.keep_dim_uom_val_attr_columns)
439
- .transform(self.apply_sdmx_dimension_codes_mapping)
440
- .transform(self.apply_sdmx_column_names_mapping)
441
- .transform(self.add_sdmx_default_columns)
442
- .transform(self.rearrange_sdmx_columns)
443
- )
444
-
445
107
  def write_gold_sws_validated_data_to_iceberg_and_csv(
446
108
  self, df: DataFrame
447
109
  ) -> DataFrame:
@@ -550,14 +212,14 @@ class SWSGoldIcebergSparkHelper:
550
212
  df.writeTo(self.iceberg_tables.GOLD_PRE_SDMX.iceberg_id).createOrReplace()
551
213
 
552
214
  logging.info(
553
- f"Gold SDMX table written to {self.iceberg_tables.GOLD_PRE_SDMX.iceberg_id}"
215
+ f"Gold pre-SDMX table written to {self.iceberg_tables.GOLD_PRE_SDMX.iceberg_id}"
554
216
  )
555
217
 
556
218
  self.spark.sql(
557
219
  f"ALTER TABLE {self.iceberg_tables.GOLD_PRE_SDMX.iceberg_id} CREATE OR REPLACE TAG `{self.tag_name}`"
558
220
  )
559
221
 
560
- logging.info(f"gold PRE SDMX tag '{self.tag_name}' created")
222
+ logging.info(f"gold pre-SDMX tag '{self.tag_name}' created")
561
223
 
562
224
  df_1 = df.coalesce(1)
563
225
 
@@ -570,13 +232,6 @@ class SWSGoldIcebergSparkHelper:
570
232
 
571
233
  return df
572
234
 
573
- def gen_and_write_gold_sdmx_data_to_iceberg_and_csv(self) -> DataFrame:
574
- self.df_gold_sdmx = self.gen_gold_sdmx_data()
575
-
576
- self.write_gold_sdmx_data_to_iceberg_and_csv(self.df_gold_sdmx)
577
-
578
- return self.df_gold_sdmx
579
-
580
235
  def write_gold_sws_validated_sws_dissemination_tag(
581
236
  self, df: DataFrame, tags: Tags
582
237
  ) -> DataFrame:
@@ -733,7 +388,7 @@ class SWSGoldIcebergSparkHelper:
733
388
 
734
389
  new_iceberg_table = BaseDisseminatedTagTable(
735
390
  id=f"{self.domain_code.lower()}_gold_pre_sdmx_iceberg",
736
- name=f"{self.domain_code} gold PRE SDMX Iceberg",
391
+ name=f"{self.domain_code} gold pre-SDMX Iceberg",
737
392
  description="Gold table containing all the cleaned data in SDMX compatible format, ready to be mapped using FMR",
738
393
  layer=TableLayer.GOLD,
739
394
  private=True,
@@ -754,7 +409,7 @@ class SWSGoldIcebergSparkHelper:
754
409
 
755
410
  new_diss_table = BaseDisseminatedTagTable(
756
411
  id=f"{self.domain_code.lower()}_gold_pre_sdmx_csv",
757
- name=f"{self.domain_code} gold PRE SDMX csv",
412
+ name=f"{self.domain_code} gold pre-SDMX csv",
758
413
  description="Gold table containing all the cleaned data in SDMX compatible format, ready to be mapped using FMR and cached in csv",
759
414
  layer=TableLayer.GOLD,
760
415
  private=True,
@@ -299,6 +299,62 @@ class SWSPostgresSparkReader:
299
299
  unique_columns=["old_code"],
300
300
  )
301
301
 
302
+ def get_domain_code_source_datasets_ids_dest_dataset_id(
303
+ self, dataset_id: str, domain_code: str = None
304
+ ) -> Tuple[str, List[str], str]:
305
+ mapping_domains_id_df = self.read_pg_table(
306
+ pg_table=DatasetDatatables.MAPPING_DOMAINS_ID.id,
307
+ custom_schema=DatasetDatatables.MAPPING_DOMAINS_ID.schema,
308
+ )
309
+
310
+ if domain_code is None:
311
+ domain_code_df = mapping_domains_id_df.filter(
312
+ col("sws_source_id") == lit(dataset_id)
313
+ ).select("domain")
314
+
315
+ if domain_code_df.count() == 0:
316
+ raise ValueError(
317
+ f'There is no row connecting the current source dataset id ({dataset_id}) to any domain in the table "{DatasetDatatables.MAPPING_DOMAINS_ID.name}"'
318
+ )
319
+
320
+ if domain_code_df.count() > 1:
321
+ raise ValueError(
322
+ f'There is more than one domain referencing the current source dataset id ({dataset_id}) in the table "{DatasetDatatables.MAPPING_DOMAINS_ID.name}", please specify the domain code you want to process in the parameters'
323
+ )
324
+
325
+ domain_code = domain_code_df.collect()[0][0]
326
+
327
+ source_datasets_ids = [
328
+ row[0]
329
+ for row in (
330
+ mapping_domains_id_df.filter(col("domain") == lit(domain_code))
331
+ .select("sws_source_id")
332
+ .collect()
333
+ )
334
+ ]
335
+ dest_datasets_id_df = (
336
+ mapping_domains_id_df.filter(col("domain") == lit(domain_code))
337
+ .select("sws_source_id")
338
+ .distinct()
339
+ )
340
+
341
+ if dest_datasets_id_df.count() == 0:
342
+ raise ValueError(
343
+ f'There is no row connecting the current source dataset id and domain pair ({dataset_id}, {domain_code}) to any destination dataset id in the table "{DatasetDatatables.MAPPING_DOMAINS_ID.name}"'
344
+ )
345
+ if dest_datasets_id_df.count() > 1:
346
+ raise ValueError(
347
+ f'The source dataset id and domain pair ({dataset_id}, {domain_code}) must point only to one destination dataset in the table "{DatasetDatatables.MAPPING_DOMAINS_ID.name}"'
348
+ )
349
+
350
+ dest_datasets_id = dest_datasets_id_df.collect()[0][0]
351
+
352
+ logging.info(f"domain code: {domain_code}")
353
+ logging.info(f"source datasets ids: {source_datasets_ids}")
354
+ logging.info(f"dest datasets ids: {dest_datasets_id}")
355
+
356
+ return (domain_code, source_datasets_ids, dest_datasets_id)
357
+
302
358
  def get_dest_dataset_id(self, domain_code: str, dataset_id: str) -> Tuple[str, str]:
303
359
 
304
360
  df = self.read_pg_table(
@@ -1,23 +0,0 @@
1
- "annotated-types==0.7.0",
2
- "boto3==1.36.18",
3
- "botocore==1.36.18",
4
- "certifi==2025.1.31",
5
- "charset-normalizer==3.4.1",
6
- "idna==3.10",
7
- "jmespath==1.0.1",
8
- "numpy==2.0.2",
9
- "pandas==2.2.3",
10
- "py4j==0.10.9.7",
11
- "pydantic==2.10.6",
12
- "pydantic_core==2.27.2",
13
- "pyspark==3.5.4",
14
- "python-dateutil==2.9.0.post0",
15
- "python-dotenv==0.19.2",
16
- "pytz==2025.1",
17
- "requests==2.32.3",
18
- "s3transfer==0.11.2",
19
- "six==1.17.0",
20
- "sws_api_client==1.4.3",
21
- "typing_extensions==4.12.2",
22
- "tzdata==2025.1",
23
- "urllib3==1.26.20"