acryl-datahub 1.2.0.7rc4__py3-none-any.whl → 1.2.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of acryl-datahub might be problematic. Click here for more details.

Files changed (44) hide show
  1. {acryl_datahub-1.2.0.7rc4.dist-info → acryl_datahub-1.2.0.8.dist-info}/METADATA +2557 -2557
  2. {acryl_datahub-1.2.0.7rc4.dist-info → acryl_datahub-1.2.0.8.dist-info}/RECORD +44 -42
  3. datahub/_version.py +1 -1
  4. datahub/cli/delete_cli.py +1 -0
  5. datahub/ingestion/api/report.py +18 -0
  6. datahub/ingestion/api/sink.py +0 -3
  7. datahub/ingestion/api/source.py +4 -4
  8. datahub/ingestion/autogenerated/capability_summary.json +1 -1
  9. datahub/ingestion/graph/client.py +8 -1
  10. datahub/ingestion/run/pipeline.py +19 -4
  11. datahub/ingestion/sink/datahub_rest.py +0 -12
  12. datahub/ingestion/source/cassandra/cassandra_api.py +17 -1
  13. datahub/ingestion/source/cassandra/cassandra_config.py +5 -0
  14. datahub/ingestion/source/datahub/config.py +4 -0
  15. datahub/ingestion/source/datahub/datahub_database_reader.py +6 -1
  16. datahub/ingestion/source/fivetran/config.py +1 -1
  17. datahub/ingestion/source/iceberg/iceberg.py +76 -34
  18. datahub/ingestion/source/metadata/lineage.py +8 -8
  19. datahub/ingestion/source/redshift/redshift.py +1 -1
  20. datahub/ingestion/source/sql/athena.py +95 -18
  21. datahub/ingestion/source/sql/athena_properties_extractor.py +43 -25
  22. datahub/ingestion/source/superset.py +3 -2
  23. datahub/ingestion/source/tableau/tableau.py +8 -5
  24. datahub/metadata/_internal_schema_classes.py +207 -12
  25. datahub/metadata/_urns/urn_defs.py +4 -0
  26. datahub/metadata/com/linkedin/pegasus2avro/settings/asset/__init__.py +19 -0
  27. datahub/metadata/com/linkedin/pegasus2avro/template/__init__.py +6 -0
  28. datahub/metadata/schema.avsc +160 -12
  29. datahub/metadata/schemas/AssetSettings.avsc +63 -0
  30. datahub/metadata/schemas/DataHubPageModuleProperties.avsc +9 -1
  31. datahub/metadata/schemas/DataHubPageTemplateProperties.avsc +77 -1
  32. datahub/metadata/schemas/DataProductKey.avsc +2 -1
  33. datahub/metadata/schemas/DomainKey.avsc +2 -1
  34. datahub/metadata/schemas/GlossaryNodeKey.avsc +2 -1
  35. datahub/metadata/schemas/GlossaryTermKey.avsc +2 -1
  36. datahub/metadata/schemas/IncidentInfo.avsc +3 -3
  37. datahub/metadata/schemas/StructuredPropertyDefinition.avsc +0 -3
  38. datahub/sql_parsing/sqlglot_lineage.py +121 -28
  39. datahub/sql_parsing/sqlglot_utils.py +12 -1
  40. datahub/utilities/urns/urn.py +41 -2
  41. {acryl_datahub-1.2.0.7rc4.dist-info → acryl_datahub-1.2.0.8.dist-info}/WHEEL +0 -0
  42. {acryl_datahub-1.2.0.7rc4.dist-info → acryl_datahub-1.2.0.8.dist-info}/entry_points.txt +0 -0
  43. {acryl_datahub-1.2.0.7rc4.dist-info → acryl_datahub-1.2.0.8.dist-info}/licenses/LICENSE +0 -0
  44. {acryl_datahub-1.2.0.7rc4.dist-info → acryl_datahub-1.2.0.8.dist-info}/top_level.txt +0 -0
@@ -12,7 +12,7 @@ from pyiceberg.exceptions import (
12
12
  NoSuchNamespaceError,
13
13
  NoSuchPropertyException,
14
14
  NoSuchTableError,
15
- ServerError,
15
+ RESTError,
16
16
  )
17
17
  from pyiceberg.schema import Schema, SchemaVisitorPerPrimitiveType, visit
18
18
  from pyiceberg.table import Table
@@ -154,6 +154,10 @@ class IcebergSource(StatefulIngestionSourceBase):
154
154
  self.report: IcebergSourceReport = IcebergSourceReport()
155
155
  self.config: IcebergSourceConfig = config
156
156
  self.ctx: PipelineContext = ctx
157
+ self.stamping_processor = AutoSystemMetadata(
158
+ self.ctx
159
+ ) # single instance used only when processing namespaces
160
+ self.namespaces: List[Tuple[Identifier, str]] = []
157
161
 
158
162
  @classmethod
159
163
  def create(cls, config_dict: Dict, ctx: PipelineContext) -> "IcebergSource":
@@ -196,9 +200,9 @@ class IcebergSource(StatefulIngestionSourceBase):
196
200
  auto_lowercase_dataset_urns,
197
201
  auto_materialize_referenced_tags_terms,
198
202
  partial(
199
- auto_fix_duplicate_schema_field_paths, platform=self._infer_platform()
203
+ auto_fix_duplicate_schema_field_paths, platform=self.infer_platform()
200
204
  ),
201
- partial(auto_fix_empty_field_paths, platform=self._infer_platform()),
205
+ partial(auto_fix_empty_field_paths, platform=self.infer_platform()),
202
206
  partial(auto_workunit_reporter, self.get_report()),
203
207
  auto_patch_last_modified,
204
208
  EnsureAspectSizeProcessor(self.get_report()).ensure_aspect_size,
@@ -246,6 +250,13 @@ class IcebergSource(StatefulIngestionSourceBase):
246
250
  context=str(namespace),
247
251
  exc=e,
248
252
  )
253
+ except RESTError as e:
254
+ self.report.warning(
255
+ title="Iceberg REST Server Error",
256
+ message="Iceberg REST Server returned error status when trying to list tables for a namespace, skipping it.",
257
+ context=str(namespace),
258
+ exc=e,
259
+ )
249
260
  except Exception as e:
250
261
  self.report.report_failure(
251
262
  title="Error when processing a namespace",
@@ -322,10 +333,10 @@ class IcebergSource(StatefulIngestionSourceBase):
322
333
  context=dataset_name,
323
334
  exc=e,
324
335
  )
325
- except ServerError as e:
336
+ except RESTError as e:
326
337
  self.report.warning(
327
338
  title="Iceberg REST Server Error",
328
- message="Iceberg returned 500 HTTP status when trying to process a table, skipping it.",
339
+ message="Iceberg REST Server returned error status when trying to process a table, skipping it.",
329
340
  context=dataset_name,
330
341
  exc=e,
331
342
  )
@@ -365,7 +376,7 @@ class IcebergSource(StatefulIngestionSourceBase):
365
376
  )
366
377
 
367
378
  try:
368
- catalog = self.config.get_catalog()
379
+ self.catalog = self.config.get_catalog()
369
380
  except Exception as e:
370
381
  self.report.report_failure(
371
382
  title="Failed to initialize catalog object",
@@ -375,33 +386,7 @@ class IcebergSource(StatefulIngestionSourceBase):
375
386
  return
376
387
 
377
388
  try:
378
- stamping_processor = AutoSystemMetadata(self.ctx)
379
- namespace_ids = self._get_namespaces(catalog)
380
- namespaces: List[Tuple[Identifier, str]] = []
381
- for namespace in namespace_ids:
382
- namespace_repr = ".".join(namespace)
383
- LOGGER.debug(f"Processing namespace {namespace_repr}")
384
- namespace_urn = make_container_urn(
385
- NamespaceKey(
386
- namespace=namespace_repr,
387
- platform=self.platform,
388
- instance=self.config.platform_instance,
389
- env=self.config.env,
390
- )
391
- )
392
- namespace_properties: Properties = catalog.load_namespace_properties(
393
- namespace
394
- )
395
- namespaces.append((namespace, namespace_urn))
396
- for aspect in self._create_iceberg_namespace_aspects(
397
- namespace, namespace_properties
398
- ):
399
- yield stamping_processor.stamp_wu(
400
- MetadataChangeProposalWrapper(
401
- entityUrn=namespace_urn, aspect=aspect
402
- ).as_workunit()
403
- )
404
- LOGGER.debug("Namespaces ingestion completed")
389
+ yield from self._process_namespaces()
405
390
  except Exception as e:
406
391
  self.report.report_failure(
407
392
  title="Failed to list namespaces",
@@ -415,13 +400,70 @@ class IcebergSource(StatefulIngestionSourceBase):
415
400
  args_list=[
416
401
  (dataset_path, namespace_urn)
417
402
  for dataset_path, namespace_urn in self._get_datasets(
418
- catalog, namespaces
403
+ self.catalog, self.namespaces
419
404
  )
420
405
  ],
421
406
  max_workers=self.config.processing_threads,
422
407
  ):
423
408
  yield wu
424
409
 
410
+ def _try_processing_namespace(
411
+ self, namespace: Identifier
412
+ ) -> Iterable[MetadataWorkUnit]:
413
+ namespace_repr = ".".join(namespace)
414
+ try:
415
+ LOGGER.debug(f"Processing namespace {namespace_repr}")
416
+ namespace_urn = make_container_urn(
417
+ NamespaceKey(
418
+ namespace=namespace_repr,
419
+ platform=self.platform,
420
+ instance=self.config.platform_instance,
421
+ env=self.config.env,
422
+ )
423
+ )
424
+
425
+ namespace_properties: Properties = self.catalog.load_namespace_properties(
426
+ namespace
427
+ )
428
+ for aspect in self._create_iceberg_namespace_aspects(
429
+ namespace, namespace_properties
430
+ ):
431
+ yield self.stamping_processor.stamp_wu(
432
+ MetadataChangeProposalWrapper(
433
+ entityUrn=namespace_urn, aspect=aspect
434
+ ).as_workunit()
435
+ )
436
+ self.namespaces.append((namespace, namespace_urn))
437
+ except NoSuchNamespaceError as e:
438
+ self.report.report_warning(
439
+ title="Failed to retrieve namespace properties",
440
+ message="Couldn't find the namespace, was it deleted during the ingestion?",
441
+ context=namespace_repr,
442
+ exc=e,
443
+ )
444
+ return
445
+ except RESTError as e:
446
+ self.report.warning(
447
+ title="Iceberg REST Server Error",
448
+ message="Iceberg REST Server returned error status when trying to retrieve namespace properties, skipping it.",
449
+ context=str(namespace),
450
+ exc=e,
451
+ )
452
+ except Exception as e:
453
+ self.report.report_failure(
454
+ title="Failed to process namespace",
455
+ message="Unhandled exception happened during processing of the namespace",
456
+ context=namespace_repr,
457
+ exc=e,
458
+ )
459
+
460
+ def _process_namespaces(self) -> Iterable[MetadataWorkUnit]:
461
+ namespace_ids = self._get_namespaces(self.catalog)
462
+ for namespace in namespace_ids:
463
+ yield from self._try_processing_namespace(namespace)
464
+
465
+ LOGGER.debug("Namespaces ingestion completed")
466
+
425
467
  def _create_iceberg_table_aspects(
426
468
  self, dataset_name: str, table: Table, namespace_urn: str
427
469
  ) -> Iterable[_Aspect]:
@@ -37,9 +37,9 @@ from datahub.ingestion.api.source_helpers import (
37
37
  from datahub.ingestion.api.workunit import MetadataWorkUnit
38
38
  from datahub.ingestion.graph.client import get_default_graph
39
39
  from datahub.ingestion.graph.config import ClientMode
40
- from datahub.metadata.com.linkedin.pegasus2avro.dataset import (
41
- FineGrainedLineageDownstreamType,
42
- FineGrainedLineageUpstreamType,
40
+ from datahub.metadata.schema_classes import (
41
+ FineGrainedLineageDownstreamTypeClass,
42
+ FineGrainedLineageUpstreamTypeClass,
43
43
  )
44
44
 
45
45
  logger = logging.getLogger(__name__)
@@ -80,9 +80,9 @@ class FineGrainedLineageConfig(ConfigModel):
80
80
  @validator("upstreamType")
81
81
  def upstream_type_must_be_supported(cls, v: str) -> str:
82
82
  allowed_types = [
83
- FineGrainedLineageUpstreamType.FIELD_SET,
84
- FineGrainedLineageUpstreamType.DATASET,
85
- FineGrainedLineageUpstreamType.NONE,
83
+ FineGrainedLineageUpstreamTypeClass.FIELD_SET,
84
+ FineGrainedLineageUpstreamTypeClass.DATASET,
85
+ FineGrainedLineageUpstreamTypeClass.NONE,
86
86
  ]
87
87
  if v not in allowed_types:
88
88
  raise ValueError(
@@ -93,8 +93,8 @@ class FineGrainedLineageConfig(ConfigModel):
93
93
  @validator("downstreamType")
94
94
  def downstream_type_must_be_supported(cls, v: str) -> str:
95
95
  allowed_types = [
96
- FineGrainedLineageDownstreamType.FIELD_SET,
97
- FineGrainedLineageDownstreamType.FIELD,
96
+ FineGrainedLineageDownstreamTypeClass.FIELD_SET,
97
+ FineGrainedLineageDownstreamTypeClass.FIELD,
98
98
  ]
99
99
  if v not in allowed_types:
100
100
  raise ValueError(
@@ -143,7 +143,7 @@ logger: logging.Logger = logging.getLogger(__name__)
143
143
  @capability(SourceCapability.SCHEMA_METADATA, "Enabled by default")
144
144
  @capability(
145
145
  SourceCapability.USAGE_STATS,
146
- "Enabled by default, can be disabled via configuration `include_usage_statistics`",
146
+ "Optionally enabled via `include_usage_statistics`",
147
147
  )
148
148
  @capability(
149
149
  SourceCapability.DELETION_DETECTION, "Enabled by default via stateful ingestion"
@@ -73,6 +73,11 @@ except ImportError:
73
73
 
74
74
  logger = logging.getLogger(__name__)
75
75
 
76
+ # Precompiled regex for SQL identifier validation
77
+ # Athena identifiers can only contain lowercase letters, numbers, underscore, and period (for complex types)
78
+ # Note: Athena automatically converts uppercase to lowercase, but we're being strict for security
79
+ _IDENTIFIER_PATTERN = re.compile(r"^[a-zA-Z0-9_.]+$")
80
+
76
81
  assert STRUCT, "required type modules are not available"
77
82
  register_custom_type(STRUCT, RecordTypeClass)
78
83
  register_custom_type(MapType, MapTypeClass)
@@ -510,20 +515,76 @@ class AthenaSource(SQLAlchemySource):
510
515
  return [schema for schema in schemas if schema == athena_config.database]
511
516
  return schemas
512
517
 
518
+ @classmethod
519
+ def _sanitize_identifier(cls, identifier: str) -> str:
520
+ """Sanitize SQL identifiers to prevent injection attacks.
521
+
522
+ Args:
523
+ identifier: The SQL identifier to sanitize
524
+
525
+ Returns:
526
+ Sanitized identifier safe for SQL queries
527
+
528
+ Raises:
529
+ ValueError: If identifier contains unsafe characters
530
+ """
531
+ if not identifier:
532
+ raise ValueError("Identifier cannot be empty")
533
+
534
+ # Allow only alphanumeric characters, underscores, and periods for identifiers
535
+ # This matches Athena's identifier naming rules
536
+ if not _IDENTIFIER_PATTERN.match(identifier):
537
+ raise ValueError(
538
+ f"Identifier '{identifier}' contains unsafe characters. Only alphanumeric characters, underscores, and periods are allowed."
539
+ )
540
+
541
+ return identifier
542
+
513
543
  @classmethod
514
544
  def _casted_partition_key(cls, key: str) -> str:
515
545
  # We need to cast the partition keys to a VARCHAR, since otherwise
516
546
  # Athena may throw an error during concatenation / comparison.
517
- return f"CAST({key} as VARCHAR)"
547
+ sanitized_key = cls._sanitize_identifier(key)
548
+ return f"CAST({sanitized_key} as VARCHAR)"
549
+
550
+ @classmethod
551
+ def _build_max_partition_query(
552
+ cls, schema: str, table: str, partitions: List[str]
553
+ ) -> str:
554
+ """Build SQL query to find the row with maximum partition values.
555
+
556
+ Args:
557
+ schema: Database schema name
558
+ table: Table name
559
+ partitions: List of partition column names
560
+
561
+ Returns:
562
+ SQL query string to find the maximum partition
563
+
564
+ Raises:
565
+ ValueError: If any identifier contains unsafe characters
566
+ """
567
+ # Sanitize all identifiers to prevent SQL injection
568
+ sanitized_schema = cls._sanitize_identifier(schema)
569
+ sanitized_table = cls._sanitize_identifier(table)
570
+ sanitized_partitions = [
571
+ cls._sanitize_identifier(partition) for partition in partitions
572
+ ]
573
+
574
+ casted_keys = [cls._casted_partition_key(key) for key in partitions]
575
+ if len(casted_keys) == 1:
576
+ part_concat = casted_keys[0]
577
+ else:
578
+ separator = "CAST('-' AS VARCHAR)"
579
+ part_concat = f"CONCAT({f', {separator}, '.join(casted_keys)})"
580
+
581
+ return f'select {",".join(sanitized_partitions)} from "{sanitized_schema}"."{sanitized_table}$partitions" where {part_concat} = (select max({part_concat}) from "{sanitized_schema}"."{sanitized_table}$partitions")'
518
582
 
519
583
  @override
520
584
  def get_partitions(
521
585
  self, inspector: Inspector, schema: str, table: str
522
586
  ) -> Optional[List[str]]:
523
- if (
524
- not self.config.extract_partitions
525
- and not self.config.extract_partitions_using_create_statements
526
- ):
587
+ if not self.config.extract_partitions:
527
588
  return None
528
589
 
529
590
  if not self.cursor:
@@ -557,11 +618,9 @@ class AthenaSource(SQLAlchemySource):
557
618
  context=f"{schema}.{table}",
558
619
  level=StructuredLogLevel.WARN,
559
620
  ):
560
- # We create an artifical concatenated partition key to be able to query max partition easier
561
- part_concat = " || '-' || ".join(
562
- self._casted_partition_key(key) for key in partitions
621
+ max_partition_query = self._build_max_partition_query(
622
+ schema, table, partitions
563
623
  )
564
- max_partition_query = f'select {",".join(partitions)} from "{schema}"."{table}$partitions" where {part_concat} = (select max({part_concat}) from "{schema}"."{table}$partitions")'
565
624
  ret = self.cursor.execute(max_partition_query)
566
625
  max_partition: Dict[str, str] = {}
567
626
  if ret:
@@ -678,16 +737,34 @@ class AthenaSource(SQLAlchemySource):
678
737
  ).get(table, None)
679
738
 
680
739
  if partition and partition.max_partition:
681
- max_partition_filters = []
682
- for key, value in partition.max_partition.items():
683
- max_partition_filters.append(
684
- f"{self._casted_partition_key(key)} = '{value}'"
740
+ try:
741
+ # Sanitize identifiers to prevent SQL injection
742
+ sanitized_schema = self._sanitize_identifier(schema)
743
+ sanitized_table = self._sanitize_identifier(table)
744
+
745
+ max_partition_filters = []
746
+ for key, value in partition.max_partition.items():
747
+ # Sanitize partition key and properly escape the value
748
+ sanitized_key = self._sanitize_identifier(key)
749
+ # Escape single quotes in the value to prevent injection
750
+ escaped_value = value.replace("'", "''") if value else ""
751
+ max_partition_filters.append(
752
+ f"{self._casted_partition_key(sanitized_key)} = '{escaped_value}'"
753
+ )
754
+ max_partition = str(partition.max_partition)
755
+ return (
756
+ max_partition,
757
+ f'SELECT * FROM "{sanitized_schema}"."{sanitized_table}" WHERE {" AND ".join(max_partition_filters)}',
685
758
  )
686
- max_partition = str(partition.max_partition)
687
- return (
688
- max_partition,
689
- f'SELECT * FROM "{schema}"."{table}" WHERE {" AND ".join(max_partition_filters)}',
690
- )
759
+ except ValueError as e:
760
+ # If sanitization fails due to malicious identifiers,
761
+ # return None to disable partition profiling for this table
762
+ # rather than crashing the entire ingestion
763
+ logger.warning(
764
+ f"Failed to generate partition profiler query for {schema}.{table} due to unsafe identifiers: {e}. "
765
+ f"Partition profiling disabled for this table."
766
+ )
767
+ return None, None
691
768
  return None, None
692
769
 
693
770
  def close(self):
@@ -174,20 +174,16 @@ class AthenaPropertiesExtractor:
174
174
  def format_column_definition(line):
175
175
  # Use regex to parse the line more accurately
176
176
  # Pattern: column_name data_type [COMMENT comment_text] [,]
177
- # Use greedy match for comment to capture everything until trailing comma
178
- pattern = r"^\s*(.+?)\s+([\s,\w<>\[\]]+)((\s+COMMENT\s+(.+?)(,?))|(,?)\s*)?$"
179
- match = re.match(pattern, line, re.IGNORECASE)
177
+ # Improved pattern to better separate column name, data type, and comment
178
+ pattern = r"^\s*([`\w']+)\s+([\w<>\[\](),\s]+?)(\s+COMMENT\s+(.+?))?(,?)\s*$"
179
+ match = re.match(pattern, line.strip(), re.IGNORECASE)
180
180
 
181
181
  if not match:
182
182
  return line
183
- column_name = match.group(1)
184
- data_type = match.group(2)
185
- comment_part = match.group(5) # COMMENT part
186
- # there are different number of match groups depending on whether comment exists
187
- if comment_part:
188
- trailing_comma = match.group(6) if match.group(6) else ""
189
- else:
190
- trailing_comma = match.group(7) if match.group(7) else ""
183
+ column_name = match.group(1).strip()
184
+ data_type = match.group(2).strip()
185
+ comment_part = match.group(4) # COMMENT part
186
+ trailing_comma = match.group(5) if match.group(5) else ""
191
187
 
192
188
  # Add backticks to column name if not already present
193
189
  if not (column_name.startswith("`") and column_name.endswith("`")):
@@ -201,17 +197,19 @@ class AthenaPropertiesExtractor:
201
197
 
202
198
  # Handle comment quoting and escaping
203
199
  if comment_part.startswith("'") and comment_part.endswith("'"):
204
- # Already properly single quoted - keep as is
205
- formatted_comment = comment_part
200
+ # Already single quoted - but check for proper escaping
201
+ inner_content = comment_part[1:-1]
202
+ # Re-escape any single quotes that aren't properly escaped
203
+ escaped_content = inner_content.replace("'", "''")
204
+ formatted_comment = f"'{escaped_content}'"
206
205
  elif comment_part.startswith('"') and comment_part.endswith('"'):
207
206
  # Double quoted - convert to single quotes and escape internal single quotes
208
207
  inner_content = comment_part[1:-1]
209
208
  escaped_content = inner_content.replace("'", "''")
210
209
  formatted_comment = f"'{escaped_content}'"
211
210
  else:
212
- # Not quoted - add quotes and escape any single quotes
213
- escaped_content = comment_part.replace("'", "''")
214
- formatted_comment = f"'{escaped_content}'"
211
+ # Not quoted - use double quotes to avoid escaping issues with single quotes
212
+ formatted_comment = f'"{comment_part}"'
215
213
 
216
214
  result_parts.extend(["COMMENT", formatted_comment])
217
215
 
@@ -240,19 +238,39 @@ class AthenaPropertiesExtractor:
240
238
  formatted_lines.append(line)
241
239
  continue
242
240
 
243
- # Check if we're exiting column definitions (closing parenthesis before PARTITIONED BY or end)
244
- if in_column_definition and ")" in line:
245
- in_column_definition = False
241
+ # Skip processing PARTITIONED BY clauses as column definitions
242
+ if in_column_definition and "PARTITIONED BY" in line.upper():
246
243
  formatted_lines.append(line)
247
244
  continue
248
245
 
249
- # Process only column definitions (not PARTITIONED BY or other sections)
246
+ # Process column definitions first, then check for exit condition
250
247
  if in_column_definition and stripped_line:
251
- # Match column definition pattern and format it
252
- formatted_line = AthenaPropertiesExtractor.format_column_definition(
253
- line
254
- )
255
- formatted_lines.append(formatted_line)
248
+ # Check if this line contains a column definition (before the closing paren)
249
+ if ")" in line:
250
+ # Split the line at the closing parenthesis
251
+ paren_index = line.find(")")
252
+ column_part = line[:paren_index].strip()
253
+ closing_part = line[paren_index:]
254
+
255
+ if column_part:
256
+ # Format the column part
257
+ formatted_column = (
258
+ AthenaPropertiesExtractor.format_column_definition(
259
+ column_part
260
+ )
261
+ )
262
+ # Reconstruct the line
263
+ formatted_line = formatted_column.rstrip() + closing_part
264
+ formatted_lines.append(formatted_line)
265
+ else:
266
+ formatted_lines.append(line)
267
+ in_column_definition = False
268
+ else:
269
+ # Regular column definition line
270
+ formatted_line = AthenaPropertiesExtractor.format_column_definition(
271
+ line
272
+ )
273
+ formatted_lines.append(formatted_line)
256
274
  else:
257
275
  # For all other lines, keep as-is
258
276
  formatted_lines.append(line)
@@ -154,6 +154,7 @@ class SupersetDataset(BaseModel):
154
154
  table_name: str
155
155
  changed_on_utc: Optional[str] = None
156
156
  explore_url: Optional[str] = ""
157
+ description: Optional[str] = ""
157
158
 
158
159
  @property
159
160
  def modified_dt(self) -> Optional[datetime]:
@@ -1062,7 +1063,7 @@ class SupersetSource(StatefulIngestionSourceBase):
1062
1063
  fieldPath=col.get("column_name", ""),
1063
1064
  type=SchemaFieldDataType(data_type),
1064
1065
  nativeDataType="",
1065
- description=col.get("column_name", ""),
1066
+ description=col.get("description") or col.get("column_name", ""),
1066
1067
  nullable=True,
1067
1068
  )
1068
1069
  schema_fields.append(field)
@@ -1283,7 +1284,7 @@ class SupersetSource(StatefulIngestionSourceBase):
1283
1284
 
1284
1285
  dataset_info = DatasetPropertiesClass(
1285
1286
  name=dataset.table_name,
1286
- description="",
1287
+ description=dataset.description or "",
1287
1288
  externalUrl=dataset_url,
1288
1289
  lastModified=TimeStamp(time=modified_ts),
1289
1290
  )
@@ -1561,12 +1561,15 @@ class TableauSiteSource:
1561
1561
  }}""",
1562
1562
  )
1563
1563
  else:
1564
- # As of Tableau Server 2024.2, the metadata API sporadically returns a 30-second
1565
- # timeout error.
1566
- # It doesn't reliably happen, so retrying a couple of times makes sense.
1567
1564
  if all(
1565
+ # As of Tableau Server 2024.2, the metadata API sporadically returns a 30-second
1566
+ # timeout error.
1567
+ # It doesn't reliably happen, so retrying a couple of times makes sense.
1568
1568
  error.get("message")
1569
1569
  == "Execution canceled because timeout of 30000 millis was reached"
1570
+ # The Metadata API sometimes returns an 'unexpected error' message when querying
1571
+ # embeddedDatasourcesConnection. Try retrying a couple of times.
1572
+ or error.get("message") == "Unexpected error occurred"
1570
1573
  for error in errors
1571
1574
  ):
1572
1575
  # If it was only a timeout error, we can retry.
@@ -1578,8 +1581,8 @@ class TableauSiteSource:
1578
1581
  (self.config.max_retries - retries_remaining + 1) ** 2, 60
1579
1582
  )
1580
1583
  logger.info(
1581
- f"Query {connection_type} received a 30 second timeout error - will retry in {backoff_time} seconds. "
1582
- f"Retries remaining: {retries_remaining}"
1584
+ f"Query {connection_type} received a retryable error with {retries_remaining} retries remaining, "
1585
+ f"will retry in {backoff_time} seconds: {errors}"
1583
1586
  )
1584
1587
  time.sleep(backoff_time)
1585
1588
  return self.get_connection_object_page(