acryl-datahub 1.2.0.7rc3__py3-none-any.whl → 1.2.0.8rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of acryl-datahub might be problematic. Click here for more details.

Files changed (43) hide show
  1. {acryl_datahub-1.2.0.7rc3.dist-info → acryl_datahub-1.2.0.8rc1.dist-info}/METADATA +2634 -2633
  2. {acryl_datahub-1.2.0.7rc3.dist-info → acryl_datahub-1.2.0.8rc1.dist-info}/RECORD +43 -40
  3. datahub/_version.py +1 -1
  4. datahub/cli/delete_cli.py +1 -0
  5. datahub/ingestion/api/report.py +4 -0
  6. datahub/ingestion/autogenerated/capability_summary.json +2 -2
  7. datahub/ingestion/graph/client.py +8 -1
  8. datahub/ingestion/source/bigquery_v2/bigquery_connection.py +12 -1
  9. datahub/ingestion/source/datahub/config.py +4 -0
  10. datahub/ingestion/source/datahub/datahub_database_reader.py +6 -1
  11. datahub/ingestion/source/metadata/lineage.py +8 -8
  12. datahub/ingestion/source/qlik_sense/qlik_sense.py +1 -1
  13. datahub/ingestion/source/redshift/redshift.py +1 -1
  14. datahub/ingestion/source/sql/athena.py +95 -18
  15. datahub/ingestion/source/sql/athena_properties_extractor.py +43 -25
  16. datahub/ingestion/source/superset.py +3 -2
  17. datahub/ingestion/source/tableau/tableau.py +8 -5
  18. datahub/ingestion/source/unity/config.py +65 -11
  19. datahub/ingestion/source/unity/proxy.py +90 -5
  20. datahub/ingestion/source/unity/proxy_patch.py +321 -0
  21. datahub/ingestion/source/unity/source.py +12 -0
  22. datahub/ingestion/source/usage/usage_common.py +1 -0
  23. datahub/metadata/_internal_schema_classes.py +207 -12
  24. datahub/metadata/com/linkedin/pegasus2avro/settings/asset/__init__.py +19 -0
  25. datahub/metadata/com/linkedin/pegasus2avro/template/__init__.py +6 -0
  26. datahub/metadata/schema.avsc +160 -12
  27. datahub/metadata/schemas/AssetSettings.avsc +63 -0
  28. datahub/metadata/schemas/DataHubPageModuleProperties.avsc +9 -1
  29. datahub/metadata/schemas/DataHubPageTemplateProperties.avsc +77 -1
  30. datahub/metadata/schemas/DataProductKey.avsc +2 -1
  31. datahub/metadata/schemas/DomainKey.avsc +2 -1
  32. datahub/metadata/schemas/GlossaryNodeKey.avsc +2 -1
  33. datahub/metadata/schemas/GlossaryTermKey.avsc +2 -1
  34. datahub/metadata/schemas/IncidentInfo.avsc +3 -3
  35. datahub/metadata/schemas/StructuredPropertyDefinition.avsc +0 -3
  36. datahub/sdk/chart.py +36 -22
  37. datahub/sdk/dashboard.py +38 -62
  38. datahub/sql_parsing/sqlglot_lineage.py +121 -28
  39. datahub/sql_parsing/sqlglot_utils.py +12 -1
  40. {acryl_datahub-1.2.0.7rc3.dist-info → acryl_datahub-1.2.0.8rc1.dist-info}/WHEEL +0 -0
  41. {acryl_datahub-1.2.0.7rc3.dist-info → acryl_datahub-1.2.0.8rc1.dist-info}/entry_points.txt +0 -0
  42. {acryl_datahub-1.2.0.7rc3.dist-info → acryl_datahub-1.2.0.8rc1.dist-info}/licenses/LICENSE +0 -0
  43. {acryl_datahub-1.2.0.7rc3.dist-info → acryl_datahub-1.2.0.8rc1.dist-info}/top_level.txt +0 -0
@@ -73,6 +73,11 @@ except ImportError:
73
73
 
74
74
  logger = logging.getLogger(__name__)
75
75
 
76
+ # Precompiled regex for SQL identifier validation
77
+ # Athena identifiers can only contain lowercase letters, numbers, underscore, and period (for complex types)
78
+ # Note: Athena automatically converts uppercase to lowercase, but we're being strict for security
79
+ _IDENTIFIER_PATTERN = re.compile(r"^[a-zA-Z0-9_.]+$")
80
+
76
81
  assert STRUCT, "required type modules are not available"
77
82
  register_custom_type(STRUCT, RecordTypeClass)
78
83
  register_custom_type(MapType, MapTypeClass)
@@ -510,20 +515,76 @@ class AthenaSource(SQLAlchemySource):
510
515
  return [schema for schema in schemas if schema == athena_config.database]
511
516
  return schemas
512
517
 
518
+ @classmethod
519
+ def _sanitize_identifier(cls, identifier: str) -> str:
520
+ """Sanitize SQL identifiers to prevent injection attacks.
521
+
522
+ Args:
523
+ identifier: The SQL identifier to sanitize
524
+
525
+ Returns:
526
+ Sanitized identifier safe for SQL queries
527
+
528
+ Raises:
529
+ ValueError: If identifier contains unsafe characters
530
+ """
531
+ if not identifier:
532
+ raise ValueError("Identifier cannot be empty")
533
+
534
+ # Allow only alphanumeric characters, underscores, and periods for identifiers
535
+ # This matches Athena's identifier naming rules
536
+ if not _IDENTIFIER_PATTERN.match(identifier):
537
+ raise ValueError(
538
+ f"Identifier '{identifier}' contains unsafe characters. Only alphanumeric characters, underscores, and periods are allowed."
539
+ )
540
+
541
+ return identifier
542
+
513
543
  @classmethod
514
544
  def _casted_partition_key(cls, key: str) -> str:
515
545
  # We need to cast the partition keys to a VARCHAR, since otherwise
516
546
  # Athena may throw an error during concatenation / comparison.
517
- return f"CAST({key} as VARCHAR)"
547
+ sanitized_key = cls._sanitize_identifier(key)
548
+ return f"CAST({sanitized_key} as VARCHAR)"
549
+
550
+ @classmethod
551
+ def _build_max_partition_query(
552
+ cls, schema: str, table: str, partitions: List[str]
553
+ ) -> str:
554
+ """Build SQL query to find the row with maximum partition values.
555
+
556
+ Args:
557
+ schema: Database schema name
558
+ table: Table name
559
+ partitions: List of partition column names
560
+
561
+ Returns:
562
+ SQL query string to find the maximum partition
563
+
564
+ Raises:
565
+ ValueError: If any identifier contains unsafe characters
566
+ """
567
+ # Sanitize all identifiers to prevent SQL injection
568
+ sanitized_schema = cls._sanitize_identifier(schema)
569
+ sanitized_table = cls._sanitize_identifier(table)
570
+ sanitized_partitions = [
571
+ cls._sanitize_identifier(partition) for partition in partitions
572
+ ]
573
+
574
+ casted_keys = [cls._casted_partition_key(key) for key in partitions]
575
+ if len(casted_keys) == 1:
576
+ part_concat = casted_keys[0]
577
+ else:
578
+ separator = "CAST('-' AS VARCHAR)"
579
+ part_concat = f"CONCAT({f', {separator}, '.join(casted_keys)})"
580
+
581
+ return f'select {",".join(sanitized_partitions)} from "{sanitized_schema}"."{sanitized_table}$partitions" where {part_concat} = (select max({part_concat}) from "{sanitized_schema}"."{sanitized_table}$partitions")'
518
582
 
519
583
  @override
520
584
  def get_partitions(
521
585
  self, inspector: Inspector, schema: str, table: str
522
586
  ) -> Optional[List[str]]:
523
- if (
524
- not self.config.extract_partitions
525
- and not self.config.extract_partitions_using_create_statements
526
- ):
587
+ if not self.config.extract_partitions:
527
588
  return None
528
589
 
529
590
  if not self.cursor:
@@ -557,11 +618,9 @@ class AthenaSource(SQLAlchemySource):
557
618
  context=f"{schema}.{table}",
558
619
  level=StructuredLogLevel.WARN,
559
620
  ):
560
- # We create an artifical concatenated partition key to be able to query max partition easier
561
- part_concat = " || '-' || ".join(
562
- self._casted_partition_key(key) for key in partitions
621
+ max_partition_query = self._build_max_partition_query(
622
+ schema, table, partitions
563
623
  )
564
- max_partition_query = f'select {",".join(partitions)} from "{schema}"."{table}$partitions" where {part_concat} = (select max({part_concat}) from "{schema}"."{table}$partitions")'
565
624
  ret = self.cursor.execute(max_partition_query)
566
625
  max_partition: Dict[str, str] = {}
567
626
  if ret:
@@ -678,16 +737,34 @@ class AthenaSource(SQLAlchemySource):
678
737
  ).get(table, None)
679
738
 
680
739
  if partition and partition.max_partition:
681
- max_partition_filters = []
682
- for key, value in partition.max_partition.items():
683
- max_partition_filters.append(
684
- f"{self._casted_partition_key(key)} = '{value}'"
740
+ try:
741
+ # Sanitize identifiers to prevent SQL injection
742
+ sanitized_schema = self._sanitize_identifier(schema)
743
+ sanitized_table = self._sanitize_identifier(table)
744
+
745
+ max_partition_filters = []
746
+ for key, value in partition.max_partition.items():
747
+ # Sanitize partition key and properly escape the value
748
+ sanitized_key = self._sanitize_identifier(key)
749
+ # Escape single quotes in the value to prevent injection
750
+ escaped_value = value.replace("'", "''") if value else ""
751
+ max_partition_filters.append(
752
+ f"{self._casted_partition_key(sanitized_key)} = '{escaped_value}'"
753
+ )
754
+ max_partition = str(partition.max_partition)
755
+ return (
756
+ max_partition,
757
+ f'SELECT * FROM "{sanitized_schema}"."{sanitized_table}" WHERE {" AND ".join(max_partition_filters)}',
685
758
  )
686
- max_partition = str(partition.max_partition)
687
- return (
688
- max_partition,
689
- f'SELECT * FROM "{schema}"."{table}" WHERE {" AND ".join(max_partition_filters)}',
690
- )
759
+ except ValueError as e:
760
+ # If sanitization fails due to malicious identifiers,
761
+ # return None to disable partition profiling for this table
762
+ # rather than crashing the entire ingestion
763
+ logger.warning(
764
+ f"Failed to generate partition profiler query for {schema}.{table} due to unsafe identifiers: {e}. "
765
+ f"Partition profiling disabled for this table."
766
+ )
767
+ return None, None
691
768
  return None, None
692
769
 
693
770
  def close(self):
@@ -174,20 +174,16 @@ class AthenaPropertiesExtractor:
174
174
  def format_column_definition(line):
175
175
  # Use regex to parse the line more accurately
176
176
  # Pattern: column_name data_type [COMMENT comment_text] [,]
177
- # Use greedy match for comment to capture everything until trailing comma
178
- pattern = r"^\s*(.+?)\s+([\s,\w<>\[\]]+)((\s+COMMENT\s+(.+?)(,?))|(,?)\s*)?$"
179
- match = re.match(pattern, line, re.IGNORECASE)
177
+ # Improved pattern to better separate column name, data type, and comment
178
+ pattern = r"^\s*([`\w']+)\s+([\w<>\[\](),\s]+?)(\s+COMMENT\s+(.+?))?(,?)\s*$"
179
+ match = re.match(pattern, line.strip(), re.IGNORECASE)
180
180
 
181
181
  if not match:
182
182
  return line
183
- column_name = match.group(1)
184
- data_type = match.group(2)
185
- comment_part = match.group(5) # COMMENT part
186
- # there are different number of match groups depending on whether comment exists
187
- if comment_part:
188
- trailing_comma = match.group(6) if match.group(6) else ""
189
- else:
190
- trailing_comma = match.group(7) if match.group(7) else ""
183
+ column_name = match.group(1).strip()
184
+ data_type = match.group(2).strip()
185
+ comment_part = match.group(4) # COMMENT part
186
+ trailing_comma = match.group(5) if match.group(5) else ""
191
187
 
192
188
  # Add backticks to column name if not already present
193
189
  if not (column_name.startswith("`") and column_name.endswith("`")):
@@ -201,17 +197,19 @@ class AthenaPropertiesExtractor:
201
197
 
202
198
  # Handle comment quoting and escaping
203
199
  if comment_part.startswith("'") and comment_part.endswith("'"):
204
- # Already properly single quoted - keep as is
205
- formatted_comment = comment_part
200
+ # Already single quoted - but check for proper escaping
201
+ inner_content = comment_part[1:-1]
202
+ # Re-escape any single quotes that aren't properly escaped
203
+ escaped_content = inner_content.replace("'", "''")
204
+ formatted_comment = f"'{escaped_content}'"
206
205
  elif comment_part.startswith('"') and comment_part.endswith('"'):
207
206
  # Double quoted - convert to single quotes and escape internal single quotes
208
207
  inner_content = comment_part[1:-1]
209
208
  escaped_content = inner_content.replace("'", "''")
210
209
  formatted_comment = f"'{escaped_content}'"
211
210
  else:
212
- # Not quoted - add quotes and escape any single quotes
213
- escaped_content = comment_part.replace("'", "''")
214
- formatted_comment = f"'{escaped_content}'"
211
+ # Not quoted - use double quotes to avoid escaping issues with single quotes
212
+ formatted_comment = f'"{comment_part}"'
215
213
 
216
214
  result_parts.extend(["COMMENT", formatted_comment])
217
215
 
@@ -240,19 +238,39 @@ class AthenaPropertiesExtractor:
240
238
  formatted_lines.append(line)
241
239
  continue
242
240
 
243
- # Check if we're exiting column definitions (closing parenthesis before PARTITIONED BY or end)
244
- if in_column_definition and ")" in line:
245
- in_column_definition = False
241
+ # Skip processing PARTITIONED BY clauses as column definitions
242
+ if in_column_definition and "PARTITIONED BY" in line.upper():
246
243
  formatted_lines.append(line)
247
244
  continue
248
245
 
249
- # Process only column definitions (not PARTITIONED BY or other sections)
246
+ # Process column definitions first, then check for exit condition
250
247
  if in_column_definition and stripped_line:
251
- # Match column definition pattern and format it
252
- formatted_line = AthenaPropertiesExtractor.format_column_definition(
253
- line
254
- )
255
- formatted_lines.append(formatted_line)
248
+ # Check if this line contains a column definition (before the closing paren)
249
+ if ")" in line:
250
+ # Split the line at the closing parenthesis
251
+ paren_index = line.find(")")
252
+ column_part = line[:paren_index].strip()
253
+ closing_part = line[paren_index:]
254
+
255
+ if column_part:
256
+ # Format the column part
257
+ formatted_column = (
258
+ AthenaPropertiesExtractor.format_column_definition(
259
+ column_part
260
+ )
261
+ )
262
+ # Reconstruct the line
263
+ formatted_line = formatted_column.rstrip() + closing_part
264
+ formatted_lines.append(formatted_line)
265
+ else:
266
+ formatted_lines.append(line)
267
+ in_column_definition = False
268
+ else:
269
+ # Regular column definition line
270
+ formatted_line = AthenaPropertiesExtractor.format_column_definition(
271
+ line
272
+ )
273
+ formatted_lines.append(formatted_line)
256
274
  else:
257
275
  # For all other lines, keep as-is
258
276
  formatted_lines.append(line)
@@ -154,6 +154,7 @@ class SupersetDataset(BaseModel):
154
154
  table_name: str
155
155
  changed_on_utc: Optional[str] = None
156
156
  explore_url: Optional[str] = ""
157
+ description: Optional[str] = ""
157
158
 
158
159
  @property
159
160
  def modified_dt(self) -> Optional[datetime]:
@@ -1062,7 +1063,7 @@ class SupersetSource(StatefulIngestionSourceBase):
1062
1063
  fieldPath=col.get("column_name", ""),
1063
1064
  type=SchemaFieldDataType(data_type),
1064
1065
  nativeDataType="",
1065
- description=col.get("column_name", ""),
1066
+ description=col.get("description") or col.get("column_name", ""),
1066
1067
  nullable=True,
1067
1068
  )
1068
1069
  schema_fields.append(field)
@@ -1283,7 +1284,7 @@ class SupersetSource(StatefulIngestionSourceBase):
1283
1284
 
1284
1285
  dataset_info = DatasetPropertiesClass(
1285
1286
  name=dataset.table_name,
1286
- description="",
1287
+ description=dataset.description or "",
1287
1288
  externalUrl=dataset_url,
1288
1289
  lastModified=TimeStamp(time=modified_ts),
1289
1290
  )
@@ -1561,12 +1561,15 @@ class TableauSiteSource:
1561
1561
  }}""",
1562
1562
  )
1563
1563
  else:
1564
- # As of Tableau Server 2024.2, the metadata API sporadically returns a 30-second
1565
- # timeout error.
1566
- # It doesn't reliably happen, so retrying a couple of times makes sense.
1567
1564
  if all(
1565
+ # As of Tableau Server 2024.2, the metadata API sporadically returns a 30-second
1566
+ # timeout error.
1567
+ # It doesn't reliably happen, so retrying a couple of times makes sense.
1568
1568
  error.get("message")
1569
1569
  == "Execution canceled because timeout of 30000 millis was reached"
1570
+ # The Metadata API sometimes returns an 'unexpected error' message when querying
1571
+ # embeddedDatasourcesConnection. Try retrying a couple of times.
1572
+ or error.get("message") == "Unexpected error occurred"
1570
1573
  for error in errors
1571
1574
  ):
1572
1575
  # If it was only a timeout error, we can retry.
@@ -1578,8 +1581,8 @@ class TableauSiteSource:
1578
1581
  (self.config.max_retries - retries_remaining + 1) ** 2, 60
1579
1582
  )
1580
1583
  logger.info(
1581
- f"Query {connection_type} received a 30 second timeout error - will retry in {backoff_time} seconds. "
1582
- f"Retries remaining: {retries_remaining}"
1584
+ f"Query {connection_type} received a retryable error with {retries_remaining} retries remaining, "
1585
+ f"will retry in {backoff_time} seconds: {errors}"
1583
1586
  )
1584
1587
  time.sleep(backoff_time)
1585
1588
  return self.get_connection_object_page(
@@ -35,6 +35,10 @@ from datahub.utilities.global_warning_util import add_global_warning
35
35
 
36
36
  logger = logging.getLogger(__name__)
37
37
 
38
+ # Configuration default constants
39
+ INCLUDE_TAGS_DEFAULT = True
40
+ INCLUDE_HIVE_METASTORE_DEFAULT = True
41
+
38
42
 
39
43
  class LineageDataSource(ConfigEnum):
40
44
  AUTO = "AUTO"
@@ -137,10 +141,18 @@ class UnityCatalogSourceConfig(
137
141
  )
138
142
  warehouse_id: Optional[str] = pydantic.Field(
139
143
  default=None,
140
- description="SQL Warehouse id, for running queries. If not set, will use the default warehouse.",
144
+ description=(
145
+ "SQL Warehouse id, for running queries. Must be explicitly provided to enable SQL-based features. "
146
+ "Required for the following features that need SQL access: "
147
+ "1) Tag extraction (include_tags=True) - queries system.information_schema.tags "
148
+ "2) Hive Metastore catalog (include_hive_metastore=True) - queries legacy hive_metastore catalog "
149
+ "3) System table lineage (lineage_data_source=SYSTEM_TABLES) - queries system.access.table_lineage/column_lineage "
150
+ "4) Data profiling (profiling.enabled=True) - runs SELECT/ANALYZE queries on tables. "
151
+ "When warehouse_id is missing, these features will be automatically disabled (with warnings) to allow ingestion to continue."
152
+ ),
141
153
  )
142
154
  include_hive_metastore: bool = pydantic.Field(
143
- default=True,
155
+ default=INCLUDE_HIVE_METASTORE_DEFAULT,
144
156
  description="Whether to ingest legacy `hive_metastore` catalog. This requires executing queries on SQL warehouse.",
145
157
  )
146
158
  workspace_name: Optional[str] = pydantic.Field(
@@ -236,8 +248,12 @@ class UnityCatalogSourceConfig(
236
248
  )
237
249
 
238
250
  include_tags: bool = pydantic.Field(
239
- default=True,
240
- description="Option to enable/disable column/table tag extraction.",
251
+ default=INCLUDE_TAGS_DEFAULT,
252
+ description=(
253
+ "Option to enable/disable column/table tag extraction. "
254
+ "Requires warehouse_id to be set since tag extraction needs to query system.information_schema.tags. "
255
+ "If warehouse_id is not provided, this will be automatically disabled to allow ingestion to continue."
256
+ ),
241
257
  )
242
258
 
243
259
  _rename_table_ownership = pydantic_renamed_field(
@@ -310,8 +326,6 @@ class UnityCatalogSourceConfig(
310
326
  description="Details about the delta lake, incase to emit siblings",
311
327
  )
312
328
 
313
- scheme: str = DATABRICKS
314
-
315
329
  include_ml_model_aliases: bool = pydantic.Field(
316
330
  default=False,
317
331
  description="Whether to include ML model aliases in the ingestion.",
@@ -323,6 +337,51 @@ class UnityCatalogSourceConfig(
323
337
  description="Maximum number of ML models to ingest.",
324
338
  )
325
339
 
340
+ _forced_disable_tag_extraction: bool = pydantic.PrivateAttr(default=False)
341
+ _forced_disable_hive_metastore_extraction = pydantic.PrivateAttr(default=False)
342
+
343
+ scheme: str = DATABRICKS
344
+
345
+ def __init__(self, **data):
346
+ # First, let the parent handle the root validators and field processing
347
+ super().__init__(**data)
348
+
349
+ # After model creation, check if we need to auto-disable features
350
+ # based on the final warehouse_id value (which may have been set by root validators)
351
+ include_tags_original = data.get("include_tags", INCLUDE_TAGS_DEFAULT)
352
+ include_hive_metastore_original = data.get(
353
+ "include_hive_metastore", INCLUDE_HIVE_METASTORE_DEFAULT
354
+ )
355
+
356
+ # Track what we're force-disabling
357
+ forced_disable_tag_extraction = False
358
+ forced_disable_hive_metastore_extraction = False
359
+
360
+ # Check if features should be auto-disabled based on final warehouse_id
361
+ if include_tags_original and not self.warehouse_id:
362
+ forced_disable_tag_extraction = True
363
+ self.include_tags = False # Modify the model attribute directly
364
+ logger.warning(
365
+ "warehouse_id is not set but include_tags=True. "
366
+ "Automatically disabling tag extraction since it requires SQL queries. "
367
+ "Set warehouse_id to enable tag extraction."
368
+ )
369
+
370
+ if include_hive_metastore_original and not self.warehouse_id:
371
+ forced_disable_hive_metastore_extraction = True
372
+ self.include_hive_metastore = False # Modify the model attribute directly
373
+ logger.warning(
374
+ "warehouse_id is not set but include_hive_metastore=True. "
375
+ "Automatically disabling hive metastore extraction since it requires SQL queries. "
376
+ "Set warehouse_id to enable hive metastore extraction."
377
+ )
378
+
379
+ # Set private attributes
380
+ self._forced_disable_tag_extraction = forced_disable_tag_extraction
381
+ self._forced_disable_hive_metastore_extraction = (
382
+ forced_disable_hive_metastore_extraction
383
+ )
384
+
326
385
  def get_sql_alchemy_url(self, database: Optional[str] = None) -> str:
327
386
  uri_opts = {"http_path": f"/sql/1.0/warehouses/{self.warehouse_id}"}
328
387
  if database:
@@ -392,11 +451,6 @@ class UnityCatalogSourceConfig(
392
451
  "When `warehouse_id` is set, it must match the `warehouse_id` in `profiling`."
393
452
  )
394
453
 
395
- if values.get("include_hive_metastore") and not values.get("warehouse_id"):
396
- raise ValueError(
397
- "When `include_hive_metastore` is set, `warehouse_id` must be set."
398
- )
399
-
400
454
  if values.get("warehouse_id") and profiling and not profiling.warehouse_id:
401
455
  profiling.warehouse_id = values["warehouse_id"]
402
456
 
@@ -4,6 +4,7 @@ Manage the communication with DataBricks Server and provide equivalent dataclass
4
4
 
5
5
  import dataclasses
6
6
  import logging
7
+ import os
7
8
  from concurrent.futures import ThreadPoolExecutor
8
9
  from datetime import datetime
9
10
  from typing import Any, Dict, Iterable, List, Optional, Sequence, Union, cast
@@ -71,6 +72,23 @@ logger: logging.Logger = logging.getLogger(__name__)
71
72
  _MAX_CONCURRENT_CATALOGS = 1
72
73
 
73
74
 
75
+ # Import and apply the proxy patch from separate module
76
+ try:
77
+ from datahub.ingestion.source.unity.proxy_patch import (
78
+ apply_databricks_proxy_fix,
79
+ mask_proxy_credentials,
80
+ )
81
+
82
+ # Apply the fix when the module is imported
83
+ apply_databricks_proxy_fix()
84
+ except ImportError as e:
85
+ logger.debug(f"Could not import proxy patch module: {e}")
86
+
87
+ # Fallback function for masking credentials
88
+ def mask_proxy_credentials(url: Optional[str]) -> str:
89
+ return "***MASKED***" if url else "None"
90
+
91
+
74
92
  @dataclasses.dataclass
75
93
  class TableInfoWithGeneration(TableInfo):
76
94
  generation: Optional[int] = None
@@ -411,7 +429,7 @@ class UnityCatalogApiProxy(UnityCatalogProxyProfilingMixin):
411
429
  query = f"""
412
430
  SELECT
413
431
  entity_type, entity_id,
414
- source_table_full_name, source_type,
432
+ source_table_full_name, source_type, source_path,
415
433
  target_table_full_name, target_type,
416
434
  max(event_time) as last_updated
417
435
  FROM system.access.table_lineage
@@ -420,7 +438,7 @@ class UnityCatalogApiProxy(UnityCatalogProxyProfilingMixin):
420
438
  {additional_where}
421
439
  GROUP BY
422
440
  entity_type, entity_id,
423
- source_table_full_name, source_type,
441
+ source_table_full_name, source_type, source_path,
424
442
  target_table_full_name, target_type
425
443
  """
426
444
  rows = self._execute_sql_query(query, [catalog, catalog])
@@ -432,6 +450,7 @@ class UnityCatalogApiProxy(UnityCatalogProxyProfilingMixin):
432
450
  source_full_name = row["source_table_full_name"]
433
451
  target_full_name = row["target_table_full_name"]
434
452
  source_type = row["source_type"]
453
+ source_path = row["source_path"]
435
454
  last_updated = row["last_updated"]
436
455
 
437
456
  # Initialize TableLineageInfo for both source and target tables if they're in our catalog
@@ -460,7 +479,7 @@ class UnityCatalogApiProxy(UnityCatalogProxyProfilingMixin):
460
479
  # Handle external upstreams (PATH type)
461
480
  elif source_type == "PATH":
462
481
  external_upstream = ExternalUpstream(
463
- path=source_full_name,
482
+ path=source_path,
464
483
  source_type=source_type,
465
484
  last_updated=last_updated,
466
485
  )
@@ -973,16 +992,82 @@ class UnityCatalogApiProxy(UnityCatalogProxyProfilingMixin):
973
992
 
974
993
  def _execute_sql_query(self, query: str, params: Sequence[Any] = ()) -> List[Row]:
975
994
  """Execute SQL query using databricks-sql connector for better performance"""
995
+ logger.debug(f"Executing SQL query with {len(params)} parameters")
996
+ if logger.isEnabledFor(logging.DEBUG):
997
+ # Only log full query in debug mode to avoid performance overhead
998
+ logger.debug(f"Full SQL query: {query}")
999
+ if params:
1000
+ logger.debug(f"Query parameters: {params}")
1001
+
1002
+ # Check if warehouse_id is available for SQL operations
1003
+ if not self.warehouse_id:
1004
+ self.report.report_warning(
1005
+ "Cannot execute SQL query",
1006
+ "warehouse_id is not configured. SQL operations require a valid warehouse_id to be set in the Unity Catalog configuration",
1007
+ )
1008
+ logger.warning(
1009
+ "Cannot execute SQL query: warehouse_id is not configured. "
1010
+ "SQL operations require a valid warehouse_id to be set in the Unity Catalog configuration."
1011
+ )
1012
+ return []
1013
+
1014
+ # Log connection parameters (with masked token)
1015
+ masked_params = {**self._sql_connection_params}
1016
+ if "access_token" in masked_params:
1017
+ masked_params["access_token"] = "***MASKED***"
1018
+ logger.debug(f"Using connection parameters: {masked_params}")
1019
+
1020
+ # Log proxy environment variables that affect SQL connections
1021
+ proxy_env_debug = {}
1022
+ for var in ["HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy"]:
1023
+ value = os.environ.get(var)
1024
+ if value:
1025
+ proxy_env_debug[var] = mask_proxy_credentials(value)
1026
+
1027
+ if proxy_env_debug:
1028
+ logger.debug(
1029
+ f"SQL connection will use proxy environment variables: {proxy_env_debug}"
1030
+ )
1031
+ else:
1032
+ logger.debug("No proxy environment variables detected for SQL connection")
1033
+
976
1034
  try:
977
1035
  with (
978
1036
  connect(**self._sql_connection_params) as connection,
979
1037
  connection.cursor() as cursor,
980
1038
  ):
981
1039
  cursor.execute(query, list(params))
982
- return cursor.fetchall()
1040
+ rows = cursor.fetchall()
1041
+ logger.debug(
1042
+ f"SQL query executed successfully, returned {len(rows)} rows"
1043
+ )
1044
+ return rows
983
1045
 
984
1046
  except Exception as e:
985
- logger.warning(f"Failed to execute SQL query: {e}")
1047
+ logger.warning(f"Failed to execute SQL query: {e}", exc_info=True)
1048
+ if logger.isEnabledFor(logging.DEBUG):
1049
+ # Only log failed query details in debug mode for security
1050
+ logger.debug(f"SQL query that failed: {query}")
1051
+ logger.debug(f"SQL query parameters: {params}")
1052
+
1053
+ # Check if this might be a proxy-related error
1054
+ error_str = str(e).lower()
1055
+ if any(
1056
+ proxy_keyword in error_str
1057
+ for proxy_keyword in [
1058
+ "proxy",
1059
+ "407",
1060
+ "authentication required",
1061
+ "tunnel",
1062
+ "connect",
1063
+ ]
1064
+ ):
1065
+ logger.error(
1066
+ "SQL query failure appears to be proxy-related. "
1067
+ "Please check proxy configuration and authentication. "
1068
+ f"Proxy environment variables detected: {list(proxy_env_debug.keys())}"
1069
+ )
1070
+
986
1071
  return []
987
1072
 
988
1073
  @cached(cachetools.FIFOCache(maxsize=_MAX_CONCURRENT_CATALOGS))