acryl-datahub 1.2.0.6__py3-none-any.whl → 1.2.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of acryl-datahub might be problematic. Click here for more details.

Files changed (84) hide show
  1. {acryl_datahub-1.2.0.6.dist-info → acryl_datahub-1.2.0.7.dist-info}/METADATA +2629 -2543
  2. {acryl_datahub-1.2.0.6.dist-info → acryl_datahub-1.2.0.7.dist-info}/RECORD +83 -75
  3. {acryl_datahub-1.2.0.6.dist-info → acryl_datahub-1.2.0.7.dist-info}/entry_points.txt +1 -0
  4. datahub/_version.py +1 -1
  5. datahub/api/graphql/operation.py +1 -1
  6. datahub/ingestion/autogenerated/capability_summary.json +46 -6
  7. datahub/ingestion/autogenerated/lineage.json +3 -2
  8. datahub/ingestion/run/pipeline.py +1 -0
  9. datahub/ingestion/source/aws/s3_boto_utils.py +97 -5
  10. datahub/ingestion/source/bigquery_v2/bigquery_connection.py +12 -1
  11. datahub/ingestion/source/common/subtypes.py +3 -0
  12. datahub/ingestion/source/data_lake_common/path_spec.py +1 -1
  13. datahub/ingestion/source/datahub/datahub_database_reader.py +19 -8
  14. datahub/ingestion/source/dbt/dbt_common.py +74 -0
  15. datahub/ingestion/source/dremio/dremio_aspects.py +3 -2
  16. datahub/ingestion/source/dremio/dremio_source.py +4 -0
  17. datahub/ingestion/source/dynamodb/dynamodb.py +10 -7
  18. datahub/ingestion/source/excel/__init__.py +0 -0
  19. datahub/ingestion/source/excel/config.py +92 -0
  20. datahub/ingestion/source/excel/excel_file.py +539 -0
  21. datahub/ingestion/source/excel/profiling.py +308 -0
  22. datahub/ingestion/source/excel/report.py +49 -0
  23. datahub/ingestion/source/excel/source.py +662 -0
  24. datahub/ingestion/source/excel/util.py +18 -0
  25. datahub/ingestion/source/fivetran/fivetran_query.py +8 -1
  26. datahub/ingestion/source/openapi.py +1 -1
  27. datahub/ingestion/source/powerbi/config.py +33 -0
  28. datahub/ingestion/source/powerbi/m_query/data_classes.py +1 -0
  29. datahub/ingestion/source/powerbi/m_query/pattern_handler.py +100 -10
  30. datahub/ingestion/source/powerbi/powerbi.py +5 -0
  31. datahub/ingestion/source/qlik_sense/qlik_sense.py +1 -1
  32. datahub/ingestion/source/redshift/config.py +9 -6
  33. datahub/ingestion/source/redshift/lineage.py +386 -687
  34. datahub/ingestion/source/redshift/redshift.py +19 -106
  35. datahub/ingestion/source/s3/source.py +65 -59
  36. datahub/ingestion/source/snowflake/constants.py +2 -0
  37. datahub/ingestion/source/snowflake/snowflake_config.py +10 -0
  38. datahub/ingestion/source/snowflake/snowflake_connection.py +16 -5
  39. datahub/ingestion/source/snowflake/snowflake_query.py +27 -0
  40. datahub/ingestion/source/snowflake/snowflake_report.py +1 -0
  41. datahub/ingestion/source/snowflake/snowflake_schema.py +179 -7
  42. datahub/ingestion/source/snowflake/snowflake_schema_gen.py +25 -7
  43. datahub/ingestion/source/snowflake/snowflake_summary.py +1 -0
  44. datahub/ingestion/source/snowflake/snowflake_utils.py +18 -5
  45. datahub/ingestion/source/snowflake/snowflake_v2.py +6 -1
  46. datahub/ingestion/source/sql/hive_metastore.py +1 -0
  47. datahub/ingestion/source/sql/mssql/job_models.py +3 -1
  48. datahub/ingestion/source/sql/mssql/source.py +62 -3
  49. datahub/ingestion/source/sql_queries.py +24 -2
  50. datahub/ingestion/source/state/checkpoint.py +3 -28
  51. datahub/ingestion/source/unity/config.py +74 -9
  52. datahub/ingestion/source/unity/proxy.py +167 -5
  53. datahub/ingestion/source/unity/proxy_patch.py +321 -0
  54. datahub/ingestion/source/unity/proxy_types.py +24 -0
  55. datahub/ingestion/source/unity/report.py +5 -0
  56. datahub/ingestion/source/unity/source.py +111 -1
  57. datahub/ingestion/source/usage/usage_common.py +1 -0
  58. datahub/metadata/_internal_schema_classes.py +573 -517
  59. datahub/metadata/_urns/urn_defs.py +1748 -1748
  60. datahub/metadata/schema.avsc +18564 -18484
  61. datahub/metadata/schemas/ChartInfo.avsc +2 -1
  62. datahub/metadata/schemas/DataHubPageModuleProperties.avsc +9 -0
  63. datahub/metadata/schemas/InstitutionalMemory.avsc +9 -0
  64. datahub/metadata/schemas/LogicalParent.avsc +104 -100
  65. datahub/metadata/schemas/MetadataChangeEvent.avsc +81 -45
  66. datahub/metadata/schemas/Ownership.avsc +69 -0
  67. datahub/metadata/schemas/SchemaFieldKey.avsc +3 -1
  68. datahub/metadata/schemas/StructuredProperties.avsc +69 -0
  69. datahub/metadata/schemas/StructuredPropertyDefinition.avsc +3 -0
  70. datahub/metadata/schemas/__init__.py +3 -3
  71. datahub/sdk/chart.py +36 -22
  72. datahub/sdk/dashboard.py +38 -62
  73. datahub/sdk/lineage_client.py +6 -26
  74. datahub/sdk/main_client.py +7 -3
  75. datahub/sdk/search_filters.py +16 -0
  76. datahub/specific/aspect_helpers/siblings.py +73 -0
  77. datahub/specific/dataset.py +2 -0
  78. datahub/sql_parsing/sql_parsing_aggregator.py +3 -0
  79. datahub/sql_parsing/tool_meta_extractor.py +1 -3
  80. datahub/upgrade/upgrade.py +14 -2
  81. datahub/ingestion/source/redshift/lineage_v2.py +0 -466
  82. {acryl_datahub-1.2.0.6.dist-info → acryl_datahub-1.2.0.7.dist-info}/WHEEL +0 -0
  83. {acryl_datahub-1.2.0.6.dist-info → acryl_datahub-1.2.0.7.dist-info}/licenses/LICENSE +0 -0
  84. {acryl_datahub-1.2.0.6.dist-info → acryl_datahub-1.2.0.7.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,4 @@
1
1
  import functools
2
- import itertools
3
2
  import logging
4
3
  from collections import defaultdict
5
4
  from typing import Dict, Iterable, List, Optional, Type, Union
@@ -52,8 +51,7 @@ from datahub.ingestion.source.common.subtypes import (
52
51
  from datahub.ingestion.source.redshift.config import RedshiftConfig
53
52
  from datahub.ingestion.source.redshift.datashares import RedshiftDatasharesHelper
54
53
  from datahub.ingestion.source.redshift.exception import handle_redshift_exceptions_yield
55
- from datahub.ingestion.source.redshift.lineage import RedshiftLineageExtractor
56
- from datahub.ingestion.source.redshift.lineage_v2 import RedshiftSqlLineageV2
54
+ from datahub.ingestion.source.redshift.lineage import RedshiftSqlLineage
57
55
  from datahub.ingestion.source.redshift.profile import RedshiftProfiler
58
56
  from datahub.ingestion.source.redshift.redshift_data_reader import RedshiftDataReader
59
57
  from datahub.ingestion.source.redshift.redshift_schema import (
@@ -72,7 +70,6 @@ from datahub.ingestion.source.sql.sql_utils import (
72
70
  add_table_to_schema_container,
73
71
  gen_database_container,
74
72
  gen_database_key,
75
- gen_lineage,
76
73
  gen_schema_container,
77
74
  gen_schema_key,
78
75
  get_dataplatform_instance_aspect,
@@ -116,7 +113,6 @@ from datahub.metadata.com.linkedin.pegasus2avro.schema import (
116
113
  )
117
114
  from datahub.metadata.schema_classes import GlobalTagsClass, TagAssociationClass
118
115
  from datahub.utilities import memory_footprint
119
- from datahub.utilities.dedup_list import deduplicate_list
120
116
  from datahub.utilities.mapping import Constants
121
117
  from datahub.utilities.perf_timer import PerfTimer
122
118
  from datahub.utilities.registries.domain_registry import DomainRegistry
@@ -423,40 +419,25 @@ class RedshiftSource(StatefulIngestionSourceBase, TestableSource):
423
419
  memory_footprint.total_size(self.db_views)
424
420
  )
425
421
 
426
- if self.config.use_lineage_v2:
427
- with RedshiftSqlLineageV2(
428
- config=self.config,
429
- report=self.report,
430
- context=self.ctx,
431
- database=database,
432
- redundant_run_skip_handler=self.redundant_lineage_run_skip_handler,
433
- ) as lineage_extractor:
434
- yield from lineage_extractor.aggregator.register_schemas_from_stream(
435
- self.process_schemas(connection, database)
436
- )
437
-
438
- with self.report.new_stage(LINEAGE_EXTRACTION):
439
- yield from self.extract_lineage_v2(
440
- connection=connection,
441
- database=database,
442
- lineage_extractor=lineage_extractor,
443
- )
444
-
445
- all_tables = self.get_all_tables()
446
- else:
447
- yield from self.process_schemas(connection, database)
422
+ with RedshiftSqlLineage(
423
+ config=self.config,
424
+ report=self.report,
425
+ context=self.ctx,
426
+ database=database,
427
+ redundant_run_skip_handler=self.redundant_lineage_run_skip_handler,
428
+ ) as lineage_extractor:
429
+ yield from lineage_extractor.aggregator.register_schemas_from_stream(
430
+ self.process_schemas(connection, database)
431
+ )
448
432
 
449
- all_tables = self.get_all_tables()
433
+ with self.report.new_stage(LINEAGE_EXTRACTION):
434
+ yield from self.extract_lineage_v2(
435
+ connection=connection,
436
+ database=database,
437
+ lineage_extractor=lineage_extractor,
438
+ )
450
439
 
451
- if (
452
- self.config.include_table_lineage
453
- or self.config.include_view_lineage
454
- or self.config.include_copy_lineage
455
- ):
456
- with self.report.new_stage(LINEAGE_EXTRACTION):
457
- yield from self.extract_lineage(
458
- connection=connection, all_tables=all_tables, database=database
459
- )
440
+ all_tables = self.get_all_tables()
460
441
 
461
442
  if self.config.include_usage_statistics:
462
443
  with self.report.new_stage(USAGE_EXTRACTION_INGESTION):
@@ -968,45 +949,11 @@ class RedshiftSource(StatefulIngestionSourceBase, TestableSource):
968
949
 
969
950
  self.report.usage_extraction_sec[database] = timer.elapsed_seconds(digits=2)
970
951
 
971
- def extract_lineage(
972
- self,
973
- connection: redshift_connector.Connection,
974
- database: str,
975
- all_tables: Dict[str, Dict[str, List[Union[RedshiftView, RedshiftTable]]]],
976
- ) -> Iterable[MetadataWorkUnit]:
977
- if not self._should_ingest_lineage():
978
- return
979
-
980
- lineage_extractor = RedshiftLineageExtractor(
981
- config=self.config,
982
- report=self.report,
983
- context=self.ctx,
984
- redundant_run_skip_handler=self.redundant_lineage_run_skip_handler,
985
- )
986
-
987
- with PerfTimer() as timer:
988
- lineage_extractor.populate_lineage(
989
- database=database, connection=connection, all_tables=all_tables
990
- )
991
-
992
- self.report.lineage_extraction_sec[f"{database}"] = timer.elapsed_seconds(
993
- digits=2
994
- )
995
- yield from self.generate_lineage(
996
- database, lineage_extractor=lineage_extractor
997
- )
998
-
999
- if self.redundant_lineage_run_skip_handler:
1000
- # Update the checkpoint state for this run.
1001
- self.redundant_lineage_run_skip_handler.update_state(
1002
- self.config.start_time, self.config.end_time
1003
- )
1004
-
1005
952
  def extract_lineage_v2(
1006
953
  self,
1007
954
  connection: redshift_connector.Connection,
1008
955
  database: str,
1009
- lineage_extractor: RedshiftSqlLineageV2,
956
+ lineage_extractor: RedshiftSqlLineage,
1010
957
  ) -> Iterable[MetadataWorkUnit]:
1011
958
  if self.config.include_share_lineage:
1012
959
  outbound_shares = self.data_dictionary.get_outbound_datashares(connection)
@@ -1069,40 +1016,6 @@ class RedshiftSource(StatefulIngestionSourceBase, TestableSource):
1069
1016
 
1070
1017
  return True
1071
1018
 
1072
- def generate_lineage(
1073
- self, database: str, lineage_extractor: RedshiftLineageExtractor
1074
- ) -> Iterable[MetadataWorkUnit]:
1075
- logger.info(f"Generate lineage for {database}")
1076
- for schema in deduplicate_list(
1077
- itertools.chain(self.db_tables[database], self.db_views[database])
1078
- ):
1079
- if (
1080
- database not in self.db_schemas
1081
- or schema not in self.db_schemas[database]
1082
- ):
1083
- logger.warning(
1084
- f"Either database {database} or {schema} exists in the lineage but was not discovered earlier. Something went wrong."
1085
- )
1086
- continue
1087
-
1088
- table_or_view: Union[RedshiftTable, RedshiftView]
1089
- for table_or_view in (
1090
- []
1091
- + self.db_tables[database].get(schema, [])
1092
- + self.db_views[database].get(schema, [])
1093
- ):
1094
- datahub_dataset_name = f"{database}.{schema}.{table_or_view.name}"
1095
- dataset_urn = self.gen_dataset_urn(datahub_dataset_name)
1096
-
1097
- lineage_info = lineage_extractor.get_lineage(
1098
- table_or_view,
1099
- dataset_urn,
1100
- self.db_schemas[database][schema],
1101
- )
1102
- if lineage_info:
1103
- # incremental lineage generation is taken care by auto_incremental_lineage
1104
- yield from gen_lineage(dataset_urn, lineage_info)
1105
-
1106
1019
  def add_config_to_report(self):
1107
1020
  self.report.stateful_lineage_ingestion_enabled = (
1108
1021
  self.config.enable_stateful_lineage_ingestion
@@ -34,7 +34,13 @@ from datahub.ingestion.api.decorators import (
34
34
  )
35
35
  from datahub.ingestion.api.source import MetadataWorkUnitProcessor
36
36
  from datahub.ingestion.api.workunit import MetadataWorkUnit
37
- from datahub.ingestion.source.aws.s3_boto_utils import get_s3_tags, list_folders
37
+ from datahub.ingestion.source.aws.s3_boto_utils import (
38
+ get_s3_tags,
39
+ list_folders,
40
+ list_folders_path,
41
+ list_objects_recursive,
42
+ list_objects_recursive_path,
43
+ )
38
44
  from datahub.ingestion.source.aws.s3_util import (
39
45
  get_bucket_name,
40
46
  get_bucket_relative_path,
@@ -84,8 +90,6 @@ if TYPE_CHECKING:
84
90
  logging.getLogger("py4j").setLevel(logging.ERROR)
85
91
  logger: logging.Logger = logging.getLogger(__name__)
86
92
 
87
- PAGE_SIZE = 1000
88
-
89
93
  # Hack to support the .gzip extension with smart_open.
90
94
  so_compression.register_compressor(".gzip", so_compression._COMPRESSOR_REGISTRY[".gz"])
91
95
 
@@ -384,7 +388,10 @@ class S3Source(StatefulIngestionSourceBase):
384
388
 
385
389
  def read_file_spark(self, file: str, ext: str) -> Optional[DataFrame]:
386
390
  logger.debug(f"Opening file {file} for profiling in spark")
387
- file = file.replace("s3://", "s3a://")
391
+ if "s3://" in file:
392
+ # replace s3:// with s3a://, and make sure standalone bucket names always end with a slash.
393
+ # Spark will fail if given a path like `s3a://mybucket`, and requires it to be `s3a://mybucket/`.
394
+ file = f"s3a://{get_bucket_name(file)}/{get_bucket_relative_path(file)}"
388
395
 
389
396
  telemetry.telemetry_instance.ping("data_lake_file", {"extension": ext})
390
397
 
@@ -836,29 +843,31 @@ class S3Source(StatefulIngestionSourceBase):
836
843
  content_type=browse_path.content_type,
837
844
  )
838
845
 
839
- def resolve_templated_folders(self, bucket_name: str, prefix: str) -> Iterable[str]:
846
+ def resolve_templated_folders(self, prefix: str) -> Iterable[str]:
840
847
  folder_split: List[str] = prefix.split("*", 1)
841
848
  # If the len of split is 1 it means we don't have * in the prefix
842
849
  if len(folder_split) == 1:
843
850
  yield prefix
844
851
  return
845
852
 
846
- folders: Iterable[str] = list_folders(
847
- bucket_name, folder_split[0], self.source_config.aws_config
853
+ basename_startswith = folder_split[0].split("/")[-1]
854
+ dirname = folder_split[0].removesuffix(basename_startswith)
855
+
856
+ folders = list_folders_path(
857
+ dirname,
858
+ startswith=basename_startswith,
859
+ aws_config=self.source_config.aws_config,
848
860
  )
849
861
  for folder in folders:
850
- # Ensure proper path joining - folder already includes trailing slash from list_folders
851
- # but we need to handle the case where folder_split[1] might start with a slash
862
+ # Ensure proper path joining - folders from list_folders path never include a
863
+ # trailing slash, but we need to handle the case where folder_split[1] might
864
+ # start with a slash
852
865
  remaining_pattern = folder_split[1]
853
866
  if remaining_pattern.startswith("/"):
854
867
  remaining_pattern = remaining_pattern[1:]
855
868
 
856
- # Ensure folder ends with slash for proper path construction
857
- if not folder.endswith("/"):
858
- folder = folder + "/"
859
-
860
869
  yield from self.resolve_templated_folders(
861
- bucket_name, f"{folder}{remaining_pattern}"
870
+ f"{folder.path}/{remaining_pattern}"
862
871
  )
863
872
 
864
873
  def get_dir_to_process(
@@ -942,7 +951,9 @@ class S3Source(StatefulIngestionSourceBase):
942
951
  # Instead of loading all objects into memory, we'll accumulate folder data incrementally
943
952
  folder_data: Dict[str, FolderInfo] = {} # dirname -> FolderInfo
944
953
 
945
- for obj in bucket.objects.filter(Prefix=prefix).page_size(PAGE_SIZE):
954
+ for obj in list_objects_recursive(
955
+ bucket.name, prefix, self.source_config.aws_config
956
+ ):
946
957
  s3_path = self.create_s3_path(obj.bucket_name, obj.key)
947
958
 
948
959
  if not _is_allowed_path(path_spec, s3_path):
@@ -1016,13 +1027,6 @@ class S3Source(StatefulIngestionSourceBase):
1016
1027
  if self.source_config.aws_config is None:
1017
1028
  raise ValueError("aws_config not set. Cannot browse s3")
1018
1029
 
1019
- s3 = self.source_config.aws_config.get_s3_resource(
1020
- self.source_config.verify_ssl
1021
- )
1022
- bucket_name = get_bucket_name(path_spec.include)
1023
- bucket = s3.Bucket(bucket_name)
1024
-
1025
- logger.debug(f"Scanning bucket: {bucket_name}")
1026
1030
  logger.info(f"Processing path spec: {path_spec.include}")
1027
1031
 
1028
1032
  # Check if we have {table} template in the path
@@ -1034,16 +1038,14 @@ class S3Source(StatefulIngestionSourceBase):
1034
1038
  logger.info("Using templated path processing")
1035
1039
  # Always use templated processing when {table} is present
1036
1040
  # This groups files under table-level datasets
1037
- yield from self._process_templated_path(path_spec, bucket, bucket_name)
1041
+ yield from self._process_templated_path(path_spec)
1038
1042
  else:
1039
1043
  logger.info("Using simple path processing")
1040
1044
  # Only use simple processing for non-templated paths
1041
1045
  # This creates individual file-level datasets
1042
- yield from self._process_simple_path(path_spec, bucket, bucket_name)
1046
+ yield from self._process_simple_path(path_spec)
1043
1047
 
1044
- def _process_templated_path(
1045
- self, path_spec: PathSpec, bucket: "Bucket", bucket_name: str
1046
- ) -> Iterable[BrowsePath]:
1048
+ def _process_templated_path(self, path_spec: PathSpec) -> Iterable[BrowsePath]: # noqa: C901
1047
1049
  """
1048
1050
  Process S3 paths containing {table} templates to create table-level datasets.
1049
1051
 
@@ -1057,12 +1059,17 @@ class S3Source(StatefulIngestionSourceBase):
1057
1059
 
1058
1060
  Args:
1059
1061
  path_spec: Path specification with {table} template
1060
- bucket: S3 bucket resource
1061
- bucket_name: Name of the S3 bucket
1062
1062
 
1063
1063
  Yields:
1064
1064
  BrowsePath: One per table (not per file), containing aggregated metadata
1065
1065
  """
1066
+
1067
+ if self.source_config.aws_config is None:
1068
+ raise ValueError("aws_config not set. Cannot browse s3")
1069
+ s3 = self.source_config.aws_config.get_s3_resource(
1070
+ self.source_config.verify_ssl
1071
+ )
1072
+
1066
1073
  # Find the part before {table}
1067
1074
  table_marker = "{table}"
1068
1075
  if table_marker not in path_spec.include:
@@ -1097,20 +1104,13 @@ class S3Source(StatefulIngestionSourceBase):
1097
1104
 
1098
1105
  # Split the path at {table} to get the prefix that needs wildcard resolution
1099
1106
  prefix_before_table = include.split(table_marker)[0]
1100
- # Remove the s3:// and bucket name to get the relative path
1101
- relative_path = get_bucket_relative_path(prefix_before_table)
1102
-
1103
1107
  logger.info(f"Prefix before table: {prefix_before_table}")
1104
- logger.info(f"Relative path for resolution: {relative_path}")
1105
1108
 
1106
1109
  try:
1107
1110
  # STEP 2: Resolve ALL wildcards in the path up to {table}
1108
- # This converts patterns like "data/*/logs/" to actual paths like ["data/2023/logs/", "data/2024/logs/"]
1109
- table_index = include.find(table_marker)
1110
- folder_prefix = get_bucket_relative_path(include[:table_index])
1111
-
1111
+ # This converts patterns like "s3://data/*/logs/" to actual paths like ["s3://data/2023/logs/", "s3://data/2024/logs/"]
1112
1112
  resolved_prefixes = list(
1113
- self.resolve_templated_folders(bucket_name, folder_prefix)
1113
+ self.resolve_templated_folders(prefix_before_table)
1114
1114
  )
1115
1115
  logger.info(f"Resolved prefixes: {resolved_prefixes}")
1116
1116
 
@@ -1121,20 +1121,22 @@ class S3Source(StatefulIngestionSourceBase):
1121
1121
  # Get all folders that could be tables under this resolved prefix
1122
1122
  # These are the actual table names (e.g., "users", "events", "logs")
1123
1123
  table_folders = list(
1124
- list_folders(
1125
- bucket_name, resolved_prefix, self.source_config.aws_config
1124
+ list_folders_path(
1125
+ resolved_prefix, aws_config=self.source_config.aws_config
1126
1126
  )
1127
1127
  )
1128
1128
  logger.debug(
1129
- f"Found table folders under {resolved_prefix}: {table_folders}"
1129
+ f"Found table folders under {resolved_prefix}: {[folder.name for folder in table_folders]}"
1130
1130
  )
1131
1131
 
1132
1132
  # STEP 4: Process each table folder to create a table-level dataset
1133
- for table_folder in table_folders:
1133
+ for folder in table_folders:
1134
+ bucket_name = get_bucket_name(folder.path)
1135
+ table_folder = get_bucket_relative_path(folder.path)
1136
+ bucket = s3.Bucket(bucket_name)
1137
+
1134
1138
  # Create the full S3 path for this table
1135
- table_s3_path = self.create_s3_path(
1136
- bucket_name, table_folder.rstrip("/")
1137
- )
1139
+ table_s3_path = self.create_s3_path(bucket_name, table_folder)
1138
1140
  logger.info(
1139
1141
  f"Processing table folder: {table_folder} -> {table_s3_path}"
1140
1142
  )
@@ -1269,17 +1271,16 @@ class S3Source(StatefulIngestionSourceBase):
1269
1271
  )
1270
1272
 
1271
1273
  except Exception as e:
1272
- if "NoSuchBucket" in repr(e):
1274
+ if isinstance(e, s3.meta.client.exceptions.NoSuchBucket):
1273
1275
  self.get_report().report_warning(
1274
- "Missing bucket", f"No bucket found {bucket_name}"
1276
+ "Missing bucket",
1277
+ f"No bucket found {e.response['Error'].get('BucketName')}",
1275
1278
  )
1276
1279
  return
1277
1280
  logger.error(f"Error in _process_templated_path: {e}")
1278
1281
  raise e
1279
1282
 
1280
- def _process_simple_path(
1281
- self, path_spec: PathSpec, bucket: "Bucket", bucket_name: str
1282
- ) -> Iterable[BrowsePath]:
1283
+ def _process_simple_path(self, path_spec: PathSpec) -> Iterable[BrowsePath]:
1283
1284
  """
1284
1285
  Process simple S3 paths without {table} templates to create file-level datasets.
1285
1286
 
@@ -1295,8 +1296,6 @@ class S3Source(StatefulIngestionSourceBase):
1295
1296
 
1296
1297
  Args:
1297
1298
  path_spec: Path specification without {table} template
1298
- bucket: S3 bucket resource
1299
- bucket_name: Name of the S3 bucket
1300
1299
 
1301
1300
  Yields:
1302
1301
  BrowsePath: One per file, containing individual file metadata
@@ -1305,20 +1304,27 @@ class S3Source(StatefulIngestionSourceBase):
1305
1304
  - BrowsePath(file="data/file1.csv", size=1000, partitions=[])
1306
1305
  - BrowsePath(file="data/file2.csv", size=2000, partitions=[])
1307
1306
  """
1308
- assert self.source_config.aws_config is not None, "aws_config not set"
1307
+
1308
+ if self.source_config.aws_config is None:
1309
+ raise ValueError("aws_config not set")
1310
+ s3 = self.source_config.aws_config.get_s3_resource(
1311
+ self.source_config.verify_ssl
1312
+ )
1309
1313
 
1310
1314
  path_spec.sample_files = False # Disable sampling for simple paths
1311
1315
 
1312
1316
  # Extract the prefix from the path spec (stops at first wildcard)
1313
- prefix = self.get_prefix(get_bucket_relative_path(path_spec.include))
1317
+ prefix = self.get_prefix(path_spec.include)
1314
1318
 
1315
- # Get s3 resource for content type checking
1316
- s3 = self.source_config.aws_config.get_s3_resource(
1317
- self.source_config.verify_ssl
1318
- )
1319
+ basename_startswith = prefix.split("/")[-1]
1320
+ dirname = prefix.removesuffix(basename_startswith)
1319
1321
 
1320
1322
  # Iterate through all objects in the bucket matching the prefix
1321
- for obj in bucket.objects.filter(Prefix=prefix).page_size(PAGE_SIZE):
1323
+ for obj in list_objects_recursive_path(
1324
+ dirname,
1325
+ startswith=basename_startswith,
1326
+ aws_config=self.source_config.aws_config,
1327
+ ):
1322
1328
  s3_path = self.create_s3_path(obj.bucket_name, obj.key)
1323
1329
 
1324
1330
  # Get content type if configured
@@ -9,6 +9,8 @@ class SnowflakeCloudProvider(StrEnum):
9
9
 
10
10
  SNOWFLAKE_DEFAULT_CLOUD = SnowflakeCloudProvider.AWS
11
11
 
12
+ DEFAULT_SNOWFLAKE_DOMAIN = "snowflakecomputing.com"
13
+
12
14
 
13
15
  class SnowflakeEdition(StrEnum):
14
16
  STANDARD = "Standard"
@@ -216,6 +216,16 @@ class SnowflakeV2Config(
216
216
  description="If enabled, populates the ingested views' definitions.",
217
217
  )
218
218
 
219
+ fetch_views_from_information_schema: bool = Field(
220
+ default=False,
221
+ description="If enabled, uses information_schema.views to fetch view definitions instead of SHOW VIEWS command. "
222
+ "This alternative method can be more reliable for databases with large numbers of views (> 10K views), as the "
223
+ "SHOW VIEWS approach has proven unreliable and can lead to missing views in such scenarios. However, this method "
224
+ "requires OWNERSHIP privileges on views to retrieve their definitions. For views without ownership permissions "
225
+ "(where VIEW_DEFINITION is null/empty), the system will automatically fall back to using batched SHOW VIEWS queries "
226
+ "to populate the missing definitions.",
227
+ )
228
+
219
229
  include_technical_schema: bool = Field(
220
230
  default=True,
221
231
  description="If enabled, populates the snowflake technical schema and descriptions.",
@@ -22,6 +22,7 @@ from datahub.ingestion.api.closeable import Closeable
22
22
  from datahub.ingestion.source.snowflake.constants import (
23
23
  CLIENT_PREFETCH_THREADS,
24
24
  CLIENT_SESSION_KEEP_ALIVE,
25
+ DEFAULT_SNOWFLAKE_DOMAIN,
25
26
  )
26
27
  from datahub.ingestion.source.snowflake.oauth_config import (
27
28
  OAuthConfiguration,
@@ -47,8 +48,6 @@ _VALID_AUTH_TYPES: Dict[str, str] = {
47
48
  "OAUTH_AUTHENTICATOR_TOKEN": OAUTH_AUTHENTICATOR,
48
49
  }
49
50
 
50
- _SNOWFLAKE_HOST_SUFFIX = ".snowflakecomputing.com"
51
-
52
51
 
53
52
  class SnowflakePermissionError(MetaError):
54
53
  """A permission error has happened"""
@@ -110,6 +109,10 @@ class SnowflakeConnectionConfig(ConfigModel):
110
109
  default=None,
111
110
  description="OAuth token from external identity provider. Not recommended for most use cases because it will not be able to refresh once expired.",
112
111
  )
112
+ snowflake_domain: str = pydantic.Field(
113
+ default=DEFAULT_SNOWFLAKE_DOMAIN,
114
+ description="Snowflake domain. Use 'snowflakecomputing.com' for most regions or 'snowflakecomputing.cn' for China (cn-northwest-1) region.",
115
+ )
113
116
 
114
117
  def get_account(self) -> str:
115
118
  assert self.account_id
@@ -118,10 +121,13 @@ class SnowflakeConnectionConfig(ConfigModel):
118
121
  rename_host_port_to_account_id = pydantic_renamed_field("host_port", "account_id")
119
122
 
120
123
  @pydantic.validator("account_id")
121
- def validate_account_id(cls, account_id: str) -> str:
124
+ def validate_account_id(cls, account_id: str, values: Dict) -> str:
122
125
  account_id = remove_protocol(account_id)
123
126
  account_id = remove_trailing_slashes(account_id)
124
- account_id = remove_suffix(account_id, _SNOWFLAKE_HOST_SUFFIX)
127
+ # Get the domain from config, fallback to default
128
+ domain = values.get("snowflake_domain", DEFAULT_SNOWFLAKE_DOMAIN)
129
+ snowflake_host_suffix = f".{domain}"
130
+ account_id = remove_suffix(account_id, snowflake_host_suffix)
125
131
  return account_id
126
132
 
127
133
  @pydantic.validator("authentication_type", always=True)
@@ -311,6 +317,7 @@ class SnowflakeConnectionConfig(ConfigModel):
311
317
  warehouse=self.warehouse,
312
318
  authenticator=_VALID_AUTH_TYPES.get(self.authentication_type),
313
319
  application=_APPLICATION_NAME,
320
+ host=f"{self.account_id}.{self.snowflake_domain}",
314
321
  **connect_args,
315
322
  )
316
323
 
@@ -324,6 +331,7 @@ class SnowflakeConnectionConfig(ConfigModel):
324
331
  role=self.role,
325
332
  authenticator=_VALID_AUTH_TYPES.get(self.authentication_type),
326
333
  application=_APPLICATION_NAME,
334
+ host=f"{self.account_id}.{self.snowflake_domain}",
327
335
  **connect_args,
328
336
  )
329
337
 
@@ -337,6 +345,7 @@ class SnowflakeConnectionConfig(ConfigModel):
337
345
  warehouse=self.warehouse,
338
346
  role=self.role,
339
347
  application=_APPLICATION_NAME,
348
+ host=f"{self.account_id}.{self.snowflake_domain}",
340
349
  **connect_args,
341
350
  )
342
351
  elif self.authentication_type == "OAUTH_AUTHENTICATOR_TOKEN":
@@ -348,6 +357,7 @@ class SnowflakeConnectionConfig(ConfigModel):
348
357
  warehouse=self.warehouse,
349
358
  role=self.role,
350
359
  application=_APPLICATION_NAME,
360
+ host=f"{self.account_id}.{self.snowflake_domain}",
351
361
  **connect_args,
352
362
  )
353
363
  elif self.authentication_type == "OAUTH_AUTHENTICATOR":
@@ -363,6 +373,7 @@ class SnowflakeConnectionConfig(ConfigModel):
363
373
  role=self.role,
364
374
  authenticator=_VALID_AUTH_TYPES.get(self.authentication_type),
365
375
  application=_APPLICATION_NAME,
376
+ host=f"{self.account_id}.{self.snowflake_domain}",
366
377
  **connect_args,
367
378
  )
368
379
  else:
@@ -408,7 +419,7 @@ class SnowflakeConnection(Closeable):
408
419
  # We often run multiple queries in parallel across multiple threads,
409
420
  # so we need to number them to help with log readability.
410
421
  query_num = self.get_query_no()
411
- logger.info(f"Query #{query_num}: {query}", stacklevel=2)
422
+ logger.info(f"Query #{query_num}: {query.rstrip()}", stacklevel=2)
412
423
  resp = self._connection.cursor(DictCursor).execute(query)
413
424
  if resp is not None and resp.rowcount is not None:
414
425
  logger.info(
@@ -266,6 +266,33 @@ SHOW VIEWS IN DATABASE "{db_name}"
266
266
  LIMIT {limit} {from_clause};
267
267
  """
268
268
 
269
+ @staticmethod
270
+ def get_views_for_database(db_name: str) -> str:
271
+ # We've seen some issues with the `SHOW VIEWS` query,
272
+ # particularly when it requires pagination.
273
+ # This is an experimental alternative query that might be more reliable.
274
+ return f"""\
275
+ SELECT
276
+ TABLE_CATALOG as "VIEW_CATALOG",
277
+ TABLE_SCHEMA as "VIEW_SCHEMA",
278
+ TABLE_NAME as "VIEW_NAME",
279
+ COMMENT,
280
+ VIEW_DEFINITION,
281
+ CREATED,
282
+ LAST_ALTERED,
283
+ IS_SECURE
284
+ FROM "{db_name}".information_schema.views
285
+ WHERE TABLE_CATALOG = '{db_name}'
286
+ AND TABLE_SCHEMA != 'INFORMATION_SCHEMA'
287
+ """
288
+
289
+ @staticmethod
290
+ def get_views_for_schema(db_name: str, schema_name: str) -> str:
291
+ return f"""\
292
+ {SnowflakeQuery.get_views_for_database(db_name).rstrip()}
293
+ AND TABLE_SCHEMA = '{schema_name}'
294
+ """
295
+
269
296
  @staticmethod
270
297
  def get_secure_view_definitions() -> str:
271
298
  # https://docs.snowflake.com/en/sql-reference/account-usage/views
@@ -128,6 +128,7 @@ class SnowflakeV2Report(
128
128
  # "Information schema query returned too much data. Please repeat query with more selective predicates.""
129
129
  # This will result in overall increase in time complexity
130
130
  num_get_tables_for_schema_queries: int = 0
131
+ num_get_views_for_schema_queries: int = 0
131
132
 
132
133
  # these will be non-zero if the user choses to enable the extract_tags = "with_lineage" option, which requires
133
134
  # individual queries per object (database, schema, table) and an extra query per table to get the tags on the columns.