acryl-datahub 0.15.0rc15__py3-none-any.whl → 0.15.0rc17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of acryl-datahub might be problematic. Click here for more details.

Files changed (52) hide show
  1. {acryl_datahub-0.15.0rc15.dist-info → acryl_datahub-0.15.0rc17.dist-info}/METADATA +2485 -2501
  2. {acryl_datahub-0.15.0rc15.dist-info → acryl_datahub-0.15.0rc17.dist-info}/RECORD +49 -49
  3. datahub/__init__.py +1 -1
  4. datahub/api/entities/structuredproperties/structuredproperties.py +7 -5
  5. datahub/cli/cli_utils.py +2 -0
  6. datahub/cli/delete_cli.py +66 -20
  7. datahub/configuration/common.py +3 -3
  8. datahub/ingestion/api/incremental_properties_helper.py +69 -0
  9. datahub/ingestion/api/source.py +5 -1
  10. datahub/ingestion/api/source_helpers.py +3 -1
  11. datahub/ingestion/reporting/datahub_ingestion_run_summary_provider.py +2 -2
  12. datahub/ingestion/run/pipeline.py +1 -1
  13. datahub/ingestion/run/pipeline_config.py +6 -0
  14. datahub/ingestion/sink/datahub_rest.py +3 -3
  15. datahub/ingestion/source/abs/source.py +4 -0
  16. datahub/ingestion/source/gc/datahub_gc.py +5 -5
  17. datahub/ingestion/source/gc/soft_deleted_entity_cleanup.py +1 -1
  18. datahub/ingestion/source/kafka/kafka.py +18 -11
  19. datahub/ingestion/source/looker/lookml_concept_context.py +1 -2
  20. datahub/ingestion/source/looker/view_upstream.py +65 -30
  21. datahub/ingestion/source/mode.py +0 -23
  22. datahub/ingestion/source/redash.py +13 -63
  23. datahub/ingestion/source/redshift/config.py +1 -0
  24. datahub/ingestion/source/redshift/redshift.py +2 -0
  25. datahub/ingestion/source/snowflake/snowflake_config.py +4 -0
  26. datahub/ingestion/source/snowflake/snowflake_query.py +6 -2
  27. datahub/ingestion/source/snowflake/snowflake_report.py +1 -0
  28. datahub/ingestion/source/snowflake/snowflake_schema.py +12 -0
  29. datahub/ingestion/source/snowflake/snowflake_schema_gen.py +17 -2
  30. datahub/ingestion/source/snowflake/snowflake_utils.py +45 -5
  31. datahub/ingestion/source/snowflake/snowflake_v2.py +6 -0
  32. datahub/ingestion/source/state/redundant_run_skip_handler.py +1 -1
  33. datahub/ingestion/source/tableau/tableau.py +35 -16
  34. datahub/ingestion/source/tableau/tableau_common.py +0 -1
  35. datahub/ingestion/source/unity/source.py +2 -0
  36. datahub/ingestion/source/unity/usage.py +20 -11
  37. datahub/metadata/_schema_classes.py +122 -2
  38. datahub/metadata/com/linkedin/pegasus2avro/structured/__init__.py +2 -0
  39. datahub/metadata/schema.avsc +73 -1
  40. datahub/metadata/schemas/StructuredPropertyDefinition.avsc +1 -1
  41. datahub/metadata/schemas/StructuredPropertyKey.avsc +1 -0
  42. datahub/metadata/schemas/StructuredPropertySettings.avsc +114 -0
  43. datahub/sql_parsing/schema_resolver.py +23 -0
  44. datahub/sql_parsing/sqlglot_lineage.py +48 -13
  45. datahub/testing/doctest.py +12 -0
  46. datahub/utilities/partition_executor.py +1 -1
  47. datahub/utilities/sql_lineage_parser_impl.py +0 -160
  48. datahub/utilities/sql_parser.py +0 -94
  49. datahub/utilities/sql_parser_base.py +0 -21
  50. {acryl_datahub-0.15.0rc15.dist-info → acryl_datahub-0.15.0rc17.dist-info}/WHEEL +0 -0
  51. {acryl_datahub-0.15.0rc15.dist-info → acryl_datahub-0.15.0rc17.dist-info}/entry_points.txt +0 -0
  52. {acryl_datahub-0.15.0rc15.dist-info → acryl_datahub-0.15.0rc17.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,114 @@
1
+ {
2
+ "type": "record",
3
+ "Aspect": {
4
+ "name": "structuredPropertySettings"
5
+ },
6
+ "name": "StructuredPropertySettings",
7
+ "namespace": "com.linkedin.pegasus2avro.structured",
8
+ "fields": [
9
+ {
10
+ "Searchable": {
11
+ "fieldType": "BOOLEAN"
12
+ },
13
+ "type": "boolean",
14
+ "name": "isHidden",
15
+ "default": false,
16
+ "doc": "Whether or not this asset should be hidden in the main application"
17
+ },
18
+ {
19
+ "Searchable": {
20
+ "fieldType": "BOOLEAN"
21
+ },
22
+ "type": "boolean",
23
+ "name": "showInSearchFilters",
24
+ "default": false,
25
+ "doc": "Whether or not this asset should be displayed as a search filter"
26
+ },
27
+ {
28
+ "Searchable": {
29
+ "fieldType": "BOOLEAN"
30
+ },
31
+ "type": "boolean",
32
+ "name": "showInAssetSummary",
33
+ "default": false,
34
+ "doc": "Whether or not this asset should be displayed in the asset sidebar"
35
+ },
36
+ {
37
+ "Searchable": {
38
+ "fieldType": "BOOLEAN"
39
+ },
40
+ "type": "boolean",
41
+ "name": "showAsAssetBadge",
42
+ "default": false,
43
+ "doc": "Whether or not this asset should be displayed as an asset badge on other\nasset's headers"
44
+ },
45
+ {
46
+ "Searchable": {
47
+ "fieldType": "BOOLEAN"
48
+ },
49
+ "type": "boolean",
50
+ "name": "showInColumnsTable",
51
+ "default": false,
52
+ "doc": "Whether or not this asset should be displayed as a column in the schema field table\nin a Dataset's \"Columns\" tab."
53
+ },
54
+ {
55
+ "Searchable": {
56
+ "/time": {
57
+ "fieldName": "lastModifiedSettings",
58
+ "fieldType": "DATETIME"
59
+ }
60
+ },
61
+ "type": [
62
+ "null",
63
+ {
64
+ "type": "record",
65
+ "name": "AuditStamp",
66
+ "namespace": "com.linkedin.pegasus2avro.common",
67
+ "fields": [
68
+ {
69
+ "type": "long",
70
+ "name": "time",
71
+ "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
72
+ },
73
+ {
74
+ "java": {
75
+ "class": "com.linkedin.pegasus2avro.common.urn.Urn"
76
+ },
77
+ "type": "string",
78
+ "name": "actor",
79
+ "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change.",
80
+ "Urn": "Urn"
81
+ },
82
+ {
83
+ "java": {
84
+ "class": "com.linkedin.pegasus2avro.common.urn.Urn"
85
+ },
86
+ "type": [
87
+ "null",
88
+ "string"
89
+ ],
90
+ "name": "impersonator",
91
+ "default": null,
92
+ "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor.",
93
+ "Urn": "Urn"
94
+ },
95
+ {
96
+ "type": [
97
+ "null",
98
+ "string"
99
+ ],
100
+ "name": "message",
101
+ "default": null,
102
+ "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
103
+ }
104
+ ],
105
+ "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
106
+ }
107
+ ],
108
+ "name": "lastModified",
109
+ "default": null,
110
+ "doc": "Last Modified Audit stamp"
111
+ }
112
+ ],
113
+ "doc": "Settings specific to a structured property entity"
114
+ }
@@ -123,6 +123,13 @@ class SchemaResolver(Closeable, SchemaResolverInterface):
123
123
  )
124
124
  return urn
125
125
 
126
+ def resolve_urn(self, urn: str) -> Tuple[str, Optional[SchemaInfo]]:
127
+ schema_info = self._resolve_schema_info(urn)
128
+ if schema_info:
129
+ return urn, schema_info
130
+
131
+ return urn, None
132
+
126
133
  def resolve_table(self, table: _TableName) -> Tuple[str, Optional[SchemaInfo]]:
127
134
  urn = self.get_urn_for_table(table)
128
135
 
@@ -293,3 +300,19 @@ def _convert_schema_field_list_to_info(
293
300
 
294
301
  def _convert_schema_aspect_to_info(schema_metadata: SchemaMetadataClass) -> SchemaInfo:
295
302
  return _convert_schema_field_list_to_info(schema_metadata.fields)
303
+
304
+
305
+ def match_columns_to_schema(
306
+ schema_info: SchemaInfo, input_columns: List[str]
307
+ ) -> List[str]:
308
+ column_from_gms: List[str] = list(schema_info.keys()) # list() to silent lint
309
+
310
+ gms_column_map: Dict[str, str] = {
311
+ column.lower(): column for column in column_from_gms
312
+ }
313
+
314
+ output_columns: List[str] = [
315
+ gms_column_map.get(column.lower(), column) for column in input_columns
316
+ ]
317
+
318
+ return output_columns
@@ -1181,6 +1181,45 @@ def sqlglot_lineage(
1181
1181
  )
1182
1182
 
1183
1183
 
1184
+ @functools.lru_cache(maxsize=128)
1185
+ def create_and_cache_schema_resolver(
1186
+ platform: str,
1187
+ env: str,
1188
+ graph: Optional[DataHubGraph] = None,
1189
+ platform_instance: Optional[str] = None,
1190
+ schema_aware: bool = True,
1191
+ ) -> SchemaResolver:
1192
+ return create_schema_resolver(
1193
+ platform=platform,
1194
+ env=env,
1195
+ graph=graph,
1196
+ platform_instance=platform_instance,
1197
+ schema_aware=schema_aware,
1198
+ )
1199
+
1200
+
1201
+ def create_schema_resolver(
1202
+ platform: str,
1203
+ env: str,
1204
+ graph: Optional[DataHubGraph] = None,
1205
+ platform_instance: Optional[str] = None,
1206
+ schema_aware: bool = True,
1207
+ ) -> SchemaResolver:
1208
+ if graph and schema_aware:
1209
+ return graph._make_schema_resolver(
1210
+ platform=platform,
1211
+ platform_instance=platform_instance,
1212
+ env=env,
1213
+ )
1214
+
1215
+ return SchemaResolver(
1216
+ platform=platform,
1217
+ platform_instance=platform_instance,
1218
+ env=env,
1219
+ graph=None,
1220
+ )
1221
+
1222
+
1184
1223
  def create_lineage_sql_parsed_result(
1185
1224
  query: str,
1186
1225
  default_db: Optional[str],
@@ -1191,21 +1230,17 @@ def create_lineage_sql_parsed_result(
1191
1230
  graph: Optional[DataHubGraph] = None,
1192
1231
  schema_aware: bool = True,
1193
1232
  ) -> SqlParsingResult:
1233
+ schema_resolver = create_schema_resolver(
1234
+ platform=platform,
1235
+ platform_instance=platform_instance,
1236
+ env=env,
1237
+ schema_aware=schema_aware,
1238
+ graph=graph,
1239
+ )
1240
+
1241
+ needs_close: bool = True
1194
1242
  if graph and schema_aware:
1195
1243
  needs_close = False
1196
- schema_resolver = graph._make_schema_resolver(
1197
- platform=platform,
1198
- platform_instance=platform_instance,
1199
- env=env,
1200
- )
1201
- else:
1202
- needs_close = True
1203
- schema_resolver = SchemaResolver(
1204
- platform=platform,
1205
- platform_instance=platform_instance,
1206
- env=env,
1207
- graph=None,
1208
- )
1209
1244
 
1210
1245
  try:
1211
1246
  return sqlglot_lineage(
@@ -0,0 +1,12 @@
1
+ import doctest
2
+ from types import ModuleType
3
+
4
+
5
+ def assert_doctest(module: ModuleType) -> None:
6
+ result = doctest.testmod(
7
+ module,
8
+ raise_on_error=True,
9
+ verbose=True,
10
+ )
11
+ if result.attempted == 0:
12
+ raise ValueError(f"No doctests found in {module.__name__}")
@@ -268,7 +268,7 @@ class BatchPartitionExecutor(Closeable):
268
268
  self.process_batch = process_batch
269
269
  self.min_process_interval = min_process_interval
270
270
  self.read_from_pending_interval = read_from_pending_interval
271
- assert self.max_workers > 1
271
+ assert self.max_workers >= 1
272
272
 
273
273
  self._state_lock = threading.Lock()
274
274
  self._executor = ThreadPoolExecutor(
@@ -1,160 +0,0 @@
1
- import contextlib
2
- import logging
3
- import re
4
- import unittest
5
- import unittest.mock
6
- from typing import Dict, List, Optional, Set
7
-
8
- from sqllineage.core.holders import Column, SQLLineageHolder
9
- from sqllineage.exceptions import SQLLineageException
10
-
11
- from datahub.utilities.sql_parser_base import SQLParser, SqlParserException
12
-
13
- with contextlib.suppress(ImportError):
14
- import sqlparse
15
- from networkx import DiGraph
16
- from sqllineage.core import LineageAnalyzer
17
-
18
- import datahub.utilities.sqllineage_patch
19
- logger = logging.getLogger(__name__)
20
-
21
-
22
- class SqlLineageSQLParserImpl(SQLParser):
23
- _DATE_SWAP_TOKEN = "__d_a_t_e"
24
- _HOUR_SWAP_TOKEN = "__h_o_u_r"
25
- _TIMESTAMP_SWAP_TOKEN = "__t_i_m_e_s_t_a_m_p"
26
- _DATA_SWAP_TOKEN = "__d_a_t_a"
27
- _ADMIN_SWAP_TOKEN = "__a_d_m_i_n"
28
- _MYVIEW_SQL_TABLE_NAME_TOKEN = "__my_view__.__sql_table_name__"
29
- _MYVIEW_LOOKER_TOKEN = "my_view.SQL_TABLE_NAME"
30
-
31
- def __init__(self, sql_query: str, use_raw_names: bool = False) -> None:
32
- super().__init__(sql_query)
33
- original_sql_query = sql_query
34
- self._use_raw_names = use_raw_names
35
-
36
- # SqlLineageParser makes mistakes on lateral flatten queries, use the prefix
37
- if "lateral flatten" in sql_query:
38
- sql_query = sql_query[: sql_query.find("lateral flatten")]
39
-
40
- # Replace reserved words that break SqlLineageParser
41
- self.token_to_original: Dict[str, str] = {
42
- self._DATE_SWAP_TOKEN: "date",
43
- self._HOUR_SWAP_TOKEN: "hour",
44
- self._TIMESTAMP_SWAP_TOKEN: "timestamp",
45
- self._DATA_SWAP_TOKEN: "data",
46
- self._ADMIN_SWAP_TOKEN: "admin",
47
- }
48
- for replacement, original in self.token_to_original.items():
49
- # Replace original tokens with replacement. Since table and column name can contain a hyphen('-'),
50
- # also prevent original tokens appearing as part of these names with a hyphen from getting substituted.
51
- sql_query = re.sub(
52
- rf"((?<!-)\b{original}\b)(?!-)",
53
- rf"{replacement}",
54
- sql_query,
55
- flags=re.IGNORECASE,
56
- )
57
-
58
- # SqlLineageParser lowercarese tablenames and we need to replace Looker specific token which should be uppercased
59
- sql_query = re.sub(
60
- rf"(\${{{self._MYVIEW_LOOKER_TOKEN}}})",
61
- rf"{self._MYVIEW_SQL_TABLE_NAME_TOKEN}",
62
- sql_query,
63
- )
64
-
65
- # SqlLineageParser does not handle "encode" directives well. Remove them
66
- sql_query = re.sub(r"\sencode [a-zA-Z]*", "", sql_query, flags=re.IGNORECASE)
67
-
68
- # Replace lookml templates with the variable otherwise sqlparse can't parse ${
69
- sql_query = re.sub(r"(\${)(.+)(})", r"\2", sql_query)
70
- if sql_query != original_sql_query:
71
- logger.debug(f"Rewrote original query {original_sql_query} as {sql_query}")
72
-
73
- self._sql = sql_query
74
- self._stmt_holders: Optional[List[LineageAnalyzer]] = None
75
- self._sql_holder: Optional[SQLLineageHolder] = None
76
- try:
77
- self._stmt = [
78
- s
79
- for s in sqlparse.parse(
80
- # first apply sqlparser formatting just to get rid of comments, which cause
81
- # inconsistencies in parsing output
82
- sqlparse.format(
83
- self._sql.strip(),
84
- strip_comments=True,
85
- use_space_around_operators=True,
86
- ),
87
- )
88
- if s.token_first(skip_cm=True)
89
- ]
90
-
91
- with unittest.mock.patch(
92
- "sqllineage.core.handlers.source.SourceHandler.end_of_query_cleanup",
93
- datahub.utilities.sqllineage_patch.end_of_query_cleanup_patch,
94
- ):
95
- with unittest.mock.patch(
96
- "sqllineage.core.holders.SubQueryLineageHolder.add_column_lineage",
97
- datahub.utilities.sqllineage_patch.add_column_lineage_patch,
98
- ):
99
- self._stmt_holders = [
100
- LineageAnalyzer().analyze(stmt) for stmt in self._stmt
101
- ]
102
- self._sql_holder = SQLLineageHolder.of(*self._stmt_holders)
103
- except SQLLineageException as e:
104
- raise SqlParserException(
105
- f"SQL lineage analyzer error '{e}' for query: '{self._sql}"
106
- ) from e
107
-
108
- def get_tables(self) -> List[str]:
109
- result: List[str] = []
110
- if self._sql_holder is None:
111
- logger.error("sql holder not present so cannot get tables")
112
- return result
113
- for table in self._sql_holder.source_tables:
114
- table_normalized = re.sub(
115
- r"^<default>.",
116
- "",
117
- (
118
- str(table)
119
- if not self._use_raw_names
120
- else f"{table.schema.raw_name}.{table.raw_name}"
121
- ),
122
- )
123
- result.append(str(table_normalized))
124
-
125
- # We need to revert TOKEN replacements
126
- for token, replacement in self.token_to_original.items():
127
- result = [replacement if c == token else c for c in result]
128
- result = [
129
- self._MYVIEW_LOOKER_TOKEN if c == self._MYVIEW_SQL_TABLE_NAME_TOKEN else c
130
- for c in result
131
- ]
132
-
133
- # Sort tables to make the list deterministic
134
- result.sort()
135
-
136
- return result
137
-
138
- def get_columns(self) -> List[str]:
139
- if self._sql_holder is None:
140
- raise SqlParserException("sql holder not present so cannot get columns")
141
- graph: DiGraph = self._sql_holder.graph # For mypy attribute checking
142
- column_nodes = [n for n in graph.nodes if isinstance(n, Column)]
143
- column_graph = graph.subgraph(column_nodes)
144
-
145
- target_columns = {column for column, deg in column_graph.out_degree if deg == 0}
146
-
147
- result: Set[str] = set()
148
- for column in target_columns:
149
- # Let's drop all the count(*) and similard columns which are expression actually if it does not have an alias
150
- if not any(ele in column.raw_name for ele in ["*", "(", ")"]):
151
- result.add(str(column.raw_name))
152
-
153
- # Reverting back all the previously renamed words which confuses the parser
154
- result = {"date" if c == self._DATE_SWAP_TOKEN else c for c in result}
155
- result = {
156
- "timestamp" if c == self._TIMESTAMP_SWAP_TOKEN else c for c in list(result)
157
- }
158
-
159
- # swap back renamed date column
160
- return list(result)
@@ -1,94 +0,0 @@
1
- import logging
2
- import multiprocessing
3
- import traceback
4
- from multiprocessing import Process, Queue
5
- from typing import Any, List, Optional, Tuple
6
-
7
- from datahub.utilities.sql_lineage_parser_impl import SqlLineageSQLParserImpl
8
- from datahub.utilities.sql_parser_base import SQLParser
9
-
10
- logger = logging.getLogger(__name__)
11
-
12
-
13
- def sql_lineage_parser_impl_func_wrapper(
14
- queue: Optional[multiprocessing.Queue], sql_query: str, use_raw_names: bool = False
15
- ) -> Optional[Tuple[List[str], List[str], Any]]:
16
- """
17
- The wrapper function that computes the tables and columns using the SqlLineageSQLParserImpl
18
- and puts the results on the shared IPC queue. This is used to isolate SqlLineageSQLParserImpl
19
- functionality in a separate process, and hence protect our sources from memory leaks originating in
20
- the sqllineage module.
21
- :param queue: The shared IPC queue on to which the results will be put.
22
- :param sql_query: The SQL query to extract the tables & columns from.
23
- :param use_raw_names: Parameter used to ignore sqllineage's default lowercasing.
24
- :return: None.
25
- """
26
- exception_details: Optional[Tuple[BaseException, str]] = None
27
- tables: List[str] = []
28
- columns: List[str] = []
29
- try:
30
- parser = SqlLineageSQLParserImpl(sql_query, use_raw_names)
31
- tables = parser.get_tables()
32
- columns = parser.get_columns()
33
- except BaseException as e:
34
- exc_msg = traceback.format_exc()
35
- exception_details = (e, exc_msg)
36
- logger.debug(exc_msg)
37
-
38
- if queue is not None:
39
- queue.put((tables, columns, exception_details))
40
- return None
41
- else:
42
- return (tables, columns, exception_details)
43
-
44
-
45
- class SqlLineageSQLParser(SQLParser):
46
- def __init__(
47
- self,
48
- sql_query: str,
49
- use_external_process: bool = False,
50
- use_raw_names: bool = False,
51
- ) -> None:
52
- super().__init__(sql_query, use_external_process)
53
- if use_external_process:
54
- self.tables, self.columns = self._get_tables_columns_process_wrapped(
55
- sql_query, use_raw_names
56
- )
57
- else:
58
- return_tuple = sql_lineage_parser_impl_func_wrapper(
59
- None, sql_query, use_raw_names
60
- )
61
- if return_tuple is not None:
62
- (
63
- self.tables,
64
- self.columns,
65
- some_exception,
66
- ) = return_tuple
67
-
68
- @staticmethod
69
- def _get_tables_columns_process_wrapped(
70
- sql_query: str, use_raw_names: bool = False
71
- ) -> Tuple[List[str], List[str]]:
72
- # Invoke sql_lineage_parser_impl_func_wrapper in a separate process to avoid
73
- # memory leaks from sqllineage module used by SqlLineageSQLParserImpl. This will help
74
- # shield our sources like lookml & redash, that need to parse a large number of SQL statements,
75
- # from causing significant memory leaks in the datahub cli during ingestion.
76
- queue: multiprocessing.Queue = Queue()
77
- process: multiprocessing.Process = Process(
78
- target=sql_lineage_parser_impl_func_wrapper,
79
- args=(queue, sql_query, use_raw_names),
80
- )
81
- process.start()
82
- tables, columns, exception_details = queue.get(block=True)
83
- if exception_details is not None:
84
- raise exception_details[0](f"Sub-process exception: {exception_details[1]}")
85
- return tables, columns
86
-
87
- def get_tables(self) -> List[str]:
88
- return self.tables
89
-
90
- def get_columns(self) -> List[str]:
91
- return self.columns
92
-
93
-
94
- DefaultSQLParser = SqlLineageSQLParser
@@ -1,21 +0,0 @@
1
- from abc import ABCMeta, abstractmethod
2
- from typing import List
3
-
4
-
5
- class SqlParserException(Exception):
6
- """Raised when sql parser fails"""
7
-
8
- pass
9
-
10
-
11
- class SQLParser(metaclass=ABCMeta):
12
- def __init__(self, sql_query: str, use_external_process: bool = True) -> None:
13
- self._sql_query = sql_query
14
-
15
- @abstractmethod
16
- def get_tables(self) -> List[str]:
17
- pass
18
-
19
- @abstractmethod
20
- def get_columns(self) -> List[str]:
21
- pass