acryl-datahub 1.1.0rc3__py3-none-any.whl → 1.1.0.1rc6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of acryl-datahub might be problematic. Click here for more details.

Files changed (87) hide show
  1. {acryl_datahub-1.1.0rc3.dist-info → acryl_datahub-1.1.0.1rc6.dist-info}/METADATA +2515 -2513
  2. {acryl_datahub-1.1.0rc3.dist-info → acryl_datahub-1.1.0.1rc6.dist-info}/RECORD +87 -70
  3. {acryl_datahub-1.1.0rc3.dist-info → acryl_datahub-1.1.0.1rc6.dist-info}/WHEEL +1 -1
  4. datahub/_version.py +1 -1
  5. datahub/api/entities/dataset/dataset.py +9 -8
  6. datahub/api/entities/external/__init__.py +0 -0
  7. datahub/api/entities/external/external_entities.py +239 -0
  8. datahub/api/entities/external/external_tag.py +145 -0
  9. datahub/api/entities/external/restricted_text.py +247 -0
  10. datahub/api/entities/external/unity_catalog_external_entites.py +170 -0
  11. datahub/api/entities/structuredproperties/structuredproperties.py +2 -2
  12. datahub/cli/delete_cli.py +4 -4
  13. datahub/cli/ingest_cli.py +9 -1
  14. datahub/emitter/mce_builder.py +3 -1
  15. datahub/emitter/response_helper.py +86 -1
  16. datahub/emitter/rest_emitter.py +1 -1
  17. datahub/ingestion/graph/client.py +3 -3
  18. datahub/ingestion/source/apply/datahub_apply.py +4 -4
  19. datahub/ingestion/source/data_lake_common/data_lake_utils.py +22 -10
  20. datahub/ingestion/source/data_lake_common/object_store.py +644 -0
  21. datahub/ingestion/source/datahub/config.py +11 -0
  22. datahub/ingestion/source/datahub/datahub_database_reader.py +186 -33
  23. datahub/ingestion/source/datahub/datahub_source.py +1 -1
  24. datahub/ingestion/source/dbt/dbt_common.py +30 -11
  25. datahub/ingestion/source/gcs/gcs_source.py +22 -7
  26. datahub/ingestion/source/gcs/gcs_utils.py +36 -9
  27. datahub/ingestion/source/hex/query_fetcher.py +9 -3
  28. datahub/ingestion/source/openapi.py +12 -0
  29. datahub/ingestion/source/openapi_parser.py +56 -37
  30. datahub/ingestion/source/s3/source.py +65 -6
  31. datahub/ingestion/source/snowflake/snowflake_config.py +13 -0
  32. datahub/ingestion/source/snowflake/snowflake_queries.py +44 -21
  33. datahub/ingestion/source/snowflake/snowflake_query.py +0 -7
  34. datahub/ingestion/source/snowflake/snowflake_v2.py +17 -6
  35. datahub/ingestion/source/sql/athena.py +1 -0
  36. datahub/ingestion/source/sql/hive.py +2 -3
  37. datahub/ingestion/source/sql/sql_common.py +98 -34
  38. datahub/ingestion/source/sql/sql_types.py +5 -2
  39. datahub/ingestion/source/unity/config.py +5 -0
  40. datahub/ingestion/source/unity/proxy.py +117 -0
  41. datahub/ingestion/source/unity/source.py +167 -15
  42. datahub/ingestion/source/unity/tag_entities.py +295 -0
  43. datahub/metadata/_internal_schema_classes.py +667 -522
  44. datahub/metadata/_urns/urn_defs.py +1804 -1748
  45. datahub/metadata/com/linkedin/pegasus2avro/application/__init__.py +19 -0
  46. datahub/metadata/schema.avsc +17358 -17584
  47. datahub/metadata/schemas/ApplicationKey.avsc +31 -0
  48. datahub/metadata/schemas/ApplicationProperties.avsc +72 -0
  49. datahub/metadata/schemas/Applications.avsc +38 -0
  50. datahub/metadata/schemas/ChartKey.avsc +1 -0
  51. datahub/metadata/schemas/ContainerKey.avsc +1 -0
  52. datahub/metadata/schemas/DashboardKey.avsc +1 -0
  53. datahub/metadata/schemas/DataFlowKey.avsc +1 -0
  54. datahub/metadata/schemas/DataHubIngestionSourceKey.avsc +2 -1
  55. datahub/metadata/schemas/DataJobKey.avsc +1 -0
  56. datahub/metadata/schemas/DataProductKey.avsc +1 -0
  57. datahub/metadata/schemas/DataProductProperties.avsc +1 -1
  58. datahub/metadata/schemas/DatasetKey.avsc +1 -0
  59. datahub/metadata/schemas/ExecutionRequestInput.avsc +5 -0
  60. datahub/metadata/schemas/GlossaryTermKey.avsc +1 -0
  61. datahub/metadata/schemas/MLFeatureKey.avsc +1 -0
  62. datahub/metadata/schemas/MLFeatureTableKey.avsc +1 -0
  63. datahub/metadata/schemas/MLModelGroupKey.avsc +1 -0
  64. datahub/metadata/schemas/MLModelKey.avsc +1 -0
  65. datahub/metadata/schemas/MLPrimaryKeyKey.avsc +1 -0
  66. datahub/metadata/schemas/NotebookKey.avsc +1 -0
  67. datahub/metadata/schemas/__init__.py +3 -3
  68. datahub/sdk/__init__.py +6 -0
  69. datahub/sdk/_all_entities.py +11 -0
  70. datahub/sdk/_shared.py +118 -1
  71. datahub/sdk/chart.py +315 -0
  72. datahub/sdk/container.py +7 -0
  73. datahub/sdk/dashboard.py +432 -0
  74. datahub/sdk/dataflow.py +309 -0
  75. datahub/sdk/datajob.py +342 -0
  76. datahub/sdk/dataset.py +8 -2
  77. datahub/sdk/entity_client.py +90 -2
  78. datahub/sdk/lineage_client.py +681 -82
  79. datahub/sdk/main_client.py +27 -8
  80. datahub/sdk/mlmodel.py +101 -38
  81. datahub/sdk/mlmodelgroup.py +7 -0
  82. datahub/sql_parsing/sql_parsing_aggregator.py +1 -1
  83. datahub/testing/mce_helpers.py +421 -0
  84. datahub/testing/sdk_v2_helpers.py +18 -0
  85. {acryl_datahub-1.1.0rc3.dist-info → acryl_datahub-1.1.0.1rc6.dist-info}/entry_points.txt +0 -0
  86. {acryl_datahub-1.1.0rc3.dist-info → acryl_datahub-1.1.0.1rc6.dist-info}/licenses/LICENSE +0 -0
  87. {acryl_datahub-1.1.0rc3.dist-info → acryl_datahub-1.1.0.1rc6.dist-info}/top_level.txt +0 -0
@@ -2,17 +2,38 @@ from __future__ import annotations
2
2
 
3
3
  import difflib
4
4
  import logging
5
- from typing import TYPE_CHECKING, List, Literal, Optional, Set, Union
5
+ from dataclasses import dataclass
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ Callable,
10
+ Dict,
11
+ List,
12
+ Literal,
13
+ Optional,
14
+ Set,
15
+ Union,
16
+ overload,
17
+ )
6
18
 
7
- from typing_extensions import assert_never
19
+ from typing_extensions import assert_never, deprecated
8
20
 
9
21
  import datahub.metadata.schema_classes as models
10
22
  from datahub.emitter.mcp import MetadataChangeProposalWrapper
11
23
  from datahub.errors import SdkUsageError
12
- from datahub.metadata.urns import DataJobUrn, DatasetUrn, QueryUrn
13
- from datahub.sdk._shared import DatajobUrnOrStr, DatasetUrnOrStr
24
+ from datahub.metadata.urns import DataJobUrn, DatasetUrn, QueryUrn, SchemaFieldUrn, Urn
25
+ from datahub.sdk._shared import (
26
+ ChartUrnOrStr,
27
+ DashboardUrnOrStr,
28
+ DatajobUrnOrStr,
29
+ DatasetUrnOrStr,
30
+ )
14
31
  from datahub.sdk._utils import DEFAULT_ACTOR_URN
15
32
  from datahub.sdk.dataset import ColumnLineageMapping, parse_cll_mapping
33
+ from datahub.sdk.search_client import compile_filters
34
+ from datahub.sdk.search_filters import Filter, FilterDsl
35
+ from datahub.specific.chart import ChartPatchBuilder
36
+ from datahub.specific.dashboard import DashboardPatchBuilder
16
37
  from datahub.specific.datajob import DataJobPatchBuilder
17
38
  from datahub.specific.dataset import DatasetPatchBuilder
18
39
  from datahub.sql_parsing.fingerprint_utils import generate_hash
@@ -32,9 +53,29 @@ _empty_audit_stamp = models.AuditStampClass(
32
53
  logger = logging.getLogger(__name__)
33
54
 
34
55
 
56
+ @dataclass
57
+ class LineagePath:
58
+ urn: str
59
+ entity_name: str
60
+ column_name: Optional[str] = None
61
+
62
+
63
+ @dataclass
64
+ class LineageResult:
65
+ urn: str
66
+ type: str
67
+ hops: int
68
+ direction: Literal["upstream", "downstream"]
69
+ platform: Optional[str] = None
70
+ name: Optional[str] = None
71
+ description: Optional[str] = None
72
+ paths: Optional[List[LineagePath]] = None
73
+
74
+
35
75
  class LineageClient:
36
76
  def __init__(self, client: DataHubClient):
37
77
  self._client = client
78
+ self._graph = client._graph
38
79
 
39
80
  def _get_fields_from_dataset_urn(self, dataset_urn: DatasetUrn) -> Set[str]:
40
81
  schema_metadata = self._client._graph.get_aspect(
@@ -113,6 +154,396 @@ class LineageClient:
113
154
 
114
155
  return fuzzy_column_lineage
115
156
 
157
+ @overload
158
+ def add_lineage(
159
+ self,
160
+ *,
161
+ upstream: DatasetUrnOrStr,
162
+ downstream: DatasetUrnOrStr,
163
+ column_lineage: Union[
164
+ bool, ColumnLineageMapping, Literal["auto_fuzzy", "auto_strict"]
165
+ ] = False,
166
+ transformation_text: Optional[str] = None,
167
+ ) -> None:
168
+ ...
169
+
170
+ """
171
+ Add dataset-to-dataset lineage with column-level mapping.
172
+ """
173
+
174
+ @overload
175
+ def add_lineage(
176
+ self,
177
+ *,
178
+ upstream: Union[DatajobUrnOrStr],
179
+ downstream: DatasetUrnOrStr,
180
+ ) -> None:
181
+ ...
182
+
183
+ """
184
+ Add dataset-to-datajob or dataset-to-mlmodel lineage.
185
+ """
186
+
187
+ @overload
188
+ def add_lineage(
189
+ self,
190
+ *,
191
+ upstream: Union[DatasetUrnOrStr, DatajobUrnOrStr],
192
+ downstream: DatajobUrnOrStr,
193
+ ) -> None:
194
+ ...
195
+
196
+ """
197
+ Add datajob-to-dataset or datajob-to-datajob lineage.
198
+ """
199
+
200
+ @overload
201
+ def add_lineage(
202
+ self,
203
+ *,
204
+ upstream: Union[DashboardUrnOrStr, DatasetUrnOrStr, ChartUrnOrStr],
205
+ downstream: DashboardUrnOrStr,
206
+ ) -> None:
207
+ ...
208
+
209
+ """
210
+ Add dashboard-to-dashboard or dashboard-to-dataset lineage.
211
+ """
212
+
213
+ @overload
214
+ def add_lineage(
215
+ self,
216
+ *,
217
+ upstream: DatasetUrnOrStr,
218
+ downstream: ChartUrnOrStr,
219
+ ) -> None:
220
+ ...
221
+ """
222
+ Add dataset-to-chart lineage.
223
+ """
224
+
225
+ # The actual implementation that handles all overloaded cases
226
+ def add_lineage(
227
+ self,
228
+ *,
229
+ upstream: Union[
230
+ DatasetUrnOrStr, DatajobUrnOrStr, DashboardUrnOrStr, ChartUrnOrStr
231
+ ],
232
+ downstream: Union[
233
+ DatasetUrnOrStr, DatajobUrnOrStr, DashboardUrnOrStr, ChartUrnOrStr
234
+ ],
235
+ column_lineage: Union[
236
+ bool, ColumnLineageMapping, Literal["auto_fuzzy", "auto_strict"]
237
+ ] = False,
238
+ transformation_text: Optional[str] = None,
239
+ ) -> None:
240
+ """
241
+ Add lineage between two entities.
242
+
243
+ This flexible method handles different combinations of entity types:
244
+ - dataset to dataset
245
+ - dataset to datajob
246
+ - datajob to dataset
247
+ - datajob to datajob
248
+ - dashboard to dataset
249
+ - dashboard to chart
250
+ - dashboard to dashboard
251
+ - chart to dataset
252
+
253
+ Args:
254
+ upstream: URN of the upstream entity (dataset or datajob)
255
+ downstream: URN of the downstream entity (dataset or datajob)
256
+ column_lineage: Optional boolean to indicate if column-level lineage should be added or a lineage mapping type (auto_fuzzy, auto_strict, or a mapping of column-level lineage)
257
+ transformation_text: Optional SQL query text that defines the transformation
258
+ (only applicable for dataset-to-dataset lineage)
259
+
260
+ Raises:
261
+ InvalidUrnError: If the URNs provided are invalid
262
+ SdkUsageError: If certain parameter combinations are not supported
263
+ """
264
+ # Validate parameter combinations
265
+ upstream_entity_type = Urn.from_string(upstream).entity_type
266
+ downstream_entity_type = Urn.from_string(downstream).entity_type
267
+
268
+ key = (upstream_entity_type, downstream_entity_type)
269
+
270
+ # if it's not dataset-dataset lineage but provided with column_lineage or transformation_text, raise an error
271
+ if key != ("dataset", "dataset") and (column_lineage or transformation_text):
272
+ raise SdkUsageError(
273
+ "Column lineage and query text are only applicable for dataset-to-dataset lineage"
274
+ )
275
+
276
+ lineage_handlers: dict[tuple[str, str], Callable] = {
277
+ ("dataset", "dataset"): self._add_dataset_lineage,
278
+ ("dataset", "dashboard"): self._add_dashboard_lineage,
279
+ ("chart", "dashboard"): self._add_dashboard_lineage,
280
+ ("dashboard", "dashboard"): self._add_dashboard_lineage,
281
+ ("dataset", "dataJob"): self._add_datajob_lineage,
282
+ ("dataJob", "dataJob"): self._add_datajob_lineage,
283
+ ("dataJob", "dataset"): self._add_datajob_output,
284
+ ("dataset", "chart"): self._add_chart_lineage,
285
+ }
286
+
287
+ try:
288
+ lineage_handler = lineage_handlers[key]
289
+ lineage_handler(
290
+ upstream=upstream,
291
+ downstream=downstream,
292
+ upstream_type=upstream_entity_type,
293
+ column_lineage=column_lineage,
294
+ transformation_text=transformation_text,
295
+ )
296
+ except KeyError:
297
+ raise SdkUsageError(
298
+ f"Unsupported entity type combination: {upstream_entity_type} -> {downstream_entity_type}"
299
+ ) from None
300
+
301
+ def _add_dataset_lineage(
302
+ self,
303
+ *,
304
+ upstream,
305
+ downstream,
306
+ column_lineage,
307
+ transformation_text,
308
+ **_,
309
+ ):
310
+ upstream_urn = DatasetUrn.from_string(upstream)
311
+ downstream_urn = DatasetUrn.from_string(downstream)
312
+
313
+ if column_lineage:
314
+ column_lineage = (
315
+ "auto_fuzzy" if column_lineage is True else column_lineage
316
+ ) # if column_lineage is True, set it to auto_fuzzy
317
+ cll = self._process_column_lineage(
318
+ column_lineage, upstream_urn, downstream_urn
319
+ )
320
+ else:
321
+ cll = None
322
+
323
+ if transformation_text:
324
+ self._process_transformation_lineage(
325
+ transformation_text, upstream_urn, downstream_urn, cll
326
+ )
327
+ else:
328
+ updater = DatasetPatchBuilder(str(downstream_urn))
329
+ updater.add_upstream_lineage(
330
+ models.UpstreamClass(
331
+ dataset=str(upstream_urn),
332
+ type=models.DatasetLineageTypeClass.COPY,
333
+ )
334
+ )
335
+ for cl in cll or []:
336
+ updater.add_fine_grained_upstream_lineage(cl)
337
+ self._client.entities.update(updater)
338
+
339
+ def _add_dashboard_lineage(self, *, upstream, downstream, upstream_type, **_):
340
+ patch = DashboardPatchBuilder(str(downstream))
341
+ if upstream_type == "dataset":
342
+ patch.add_dataset_edge(upstream)
343
+ elif upstream_type == "chart":
344
+ patch.add_chart_edge(upstream)
345
+ elif upstream_type == "dashboard":
346
+ patch.add_dashboard(upstream)
347
+ else:
348
+ raise SdkUsageError(
349
+ f"Unsupported entity type combination: {upstream_type} -> dashboard"
350
+ )
351
+ self._client.entities.update(patch)
352
+
353
+ def _add_datajob_lineage(self, *, upstream, downstream, upstream_type, **_):
354
+ patch = DataJobPatchBuilder(str(downstream))
355
+ if upstream_type == "dataset":
356
+ patch.add_input_dataset(upstream)
357
+ elif upstream_type == "dataJob":
358
+ patch.add_input_datajob(upstream)
359
+ else:
360
+ raise SdkUsageError(
361
+ f"Unsupported entity type combination: {upstream_type} -> dataJob"
362
+ )
363
+ self._client.entities.update(patch)
364
+
365
+ def _add_datajob_output(self, *, upstream, downstream, **_):
366
+ patch = DataJobPatchBuilder(str(upstream))
367
+ patch.add_output_dataset(downstream)
368
+ self._client.entities.update(patch)
369
+
370
+ def _add_chart_lineage(self, *, upstream, downstream, **_):
371
+ patch = ChartPatchBuilder(str(downstream))
372
+ patch.add_input_edge(upstream)
373
+ self._client.entities.update(patch)
374
+
375
+ def _process_column_lineage(self, column_lineage, upstream_urn, downstream_urn):
376
+ cll = None
377
+ if column_lineage:
378
+ # Auto column lineage generation
379
+ if column_lineage == "auto_fuzzy" or column_lineage == "auto_strict":
380
+ upstream_schema = self._get_fields_from_dataset_urn(upstream_urn)
381
+ downstream_schema = self._get_fields_from_dataset_urn(downstream_urn)
382
+
383
+ # Choose matching strategy
384
+ mapping = (
385
+ self._get_fuzzy_column_lineage(upstream_schema, downstream_schema)
386
+ if column_lineage == "auto_fuzzy"
387
+ else self._get_strict_column_lineage(
388
+ upstream_schema, downstream_schema
389
+ )
390
+ )
391
+ cll = parse_cll_mapping(
392
+ upstream=upstream_urn,
393
+ downstream=downstream_urn,
394
+ cll_mapping=mapping,
395
+ )
396
+ # Explicit column lineage
397
+ elif isinstance(column_lineage, dict):
398
+ cll = parse_cll_mapping(
399
+ upstream=upstream_urn,
400
+ downstream=downstream_urn,
401
+ cll_mapping=column_lineage,
402
+ )
403
+ else:
404
+ assert_never(column_lineage)
405
+ return cll
406
+
407
+ def _process_transformation_lineage(
408
+ self, transformation_text, upstream_urn, downstream_urn, cll
409
+ ):
410
+ fields_involved = OrderedSet([str(upstream_urn), str(downstream_urn)])
411
+ if cll is not None:
412
+ for c in cll:
413
+ for field in c.upstreams or []:
414
+ fields_involved.add(field)
415
+ for field in c.downstreams or []:
416
+ fields_involved.add(field)
417
+
418
+ # Create query URN and entity
419
+ query_urn = QueryUrn(generate_hash(transformation_text)).urn()
420
+ from datahub.sql_parsing.sql_parsing_aggregator import (
421
+ make_query_subjects,
422
+ )
423
+
424
+ query_entity = MetadataChangeProposalWrapper.construct_many(
425
+ query_urn,
426
+ aspects=[
427
+ models.QueryPropertiesClass(
428
+ statement=models.QueryStatementClass(
429
+ value=transformation_text,
430
+ language=models.QueryLanguageClass.SQL,
431
+ ),
432
+ source=models.QuerySourceClass.SYSTEM,
433
+ created=_empty_audit_stamp,
434
+ lastModified=_empty_audit_stamp,
435
+ ),
436
+ make_query_subjects(list(fields_involved)),
437
+ ],
438
+ )
439
+
440
+ # Build dataset update
441
+ updater = DatasetPatchBuilder(str(downstream_urn))
442
+ updater.add_upstream_lineage(
443
+ models.UpstreamClass(
444
+ dataset=str(upstream_urn),
445
+ type=models.DatasetLineageTypeClass.TRANSFORMED,
446
+ query=query_urn,
447
+ )
448
+ )
449
+
450
+ # Add fine-grained lineage
451
+ for cl in cll or []:
452
+ cl.query = query_urn
453
+ updater.add_fine_grained_upstream_lineage(cl)
454
+
455
+ # Check dataset existence
456
+ if not self._client._graph.exists(updater.urn):
457
+ raise SdkUsageError(
458
+ f"Dataset {updater.urn} does not exist, and hence cannot be updated."
459
+ )
460
+
461
+ # Emit metadata change proposals
462
+ mcps: List[
463
+ Union[
464
+ MetadataChangeProposalWrapper,
465
+ models.MetadataChangeProposalClass,
466
+ ]
467
+ ] = list(updater.build())
468
+ if query_entity:
469
+ mcps.extend(query_entity)
470
+ self._client._graph.emit_mcps(mcps)
471
+
472
+ def infer_lineage_from_sql(
473
+ self,
474
+ *,
475
+ query_text: str,
476
+ platform: str,
477
+ platform_instance: Optional[str] = None,
478
+ env: str = "PROD",
479
+ default_db: Optional[str] = None,
480
+ default_schema: Optional[str] = None,
481
+ ) -> None:
482
+ """Add lineage by parsing a SQL query."""
483
+ from datahub.sql_parsing.sqlglot_lineage import (
484
+ create_lineage_sql_parsed_result,
485
+ )
486
+
487
+ # Parse the SQL query to extract lineage information
488
+ parsed_result = create_lineage_sql_parsed_result(
489
+ query=query_text,
490
+ default_db=default_db,
491
+ default_schema=default_schema,
492
+ platform=platform,
493
+ platform_instance=platform_instance,
494
+ env=env,
495
+ graph=self._client._graph,
496
+ )
497
+
498
+ if parsed_result.debug_info.table_error:
499
+ raise SdkUsageError(
500
+ f"Failed to parse SQL query: {parsed_result.debug_info.error}"
501
+ )
502
+ elif parsed_result.debug_info.column_error:
503
+ logger.warning(
504
+ f"Failed to parse SQL query: {parsed_result.debug_info.error}",
505
+ )
506
+
507
+ if not parsed_result.out_tables:
508
+ raise SdkUsageError(
509
+ "No output tables found in the query. Cannot establish lineage."
510
+ )
511
+
512
+ # Use the first output table as the downstream
513
+ downstream_urn = parsed_result.out_tables[0]
514
+
515
+ # Process all upstream tables found in the query
516
+ for upstream_table in parsed_result.in_tables:
517
+ # Skip self-lineage
518
+ if upstream_table == downstream_urn:
519
+ continue
520
+
521
+ # Extract column-level lineage for this specific upstream table
522
+ column_mapping = {}
523
+ if parsed_result.column_lineage:
524
+ for col_lineage in parsed_result.column_lineage:
525
+ if not (col_lineage.downstream and col_lineage.downstream.column):
526
+ continue
527
+
528
+ # Filter upstreams to only include columns from current upstream table
529
+ upstream_cols = [
530
+ ref.column
531
+ for ref in col_lineage.upstreams
532
+ if ref.table == upstream_table and ref.column
533
+ ]
534
+
535
+ if upstream_cols:
536
+ column_mapping[col_lineage.downstream.column] = upstream_cols
537
+
538
+ # Add lineage, including query text
539
+ self.add_lineage(
540
+ upstream=upstream_table,
541
+ downstream=downstream_urn,
542
+ column_lineage=column_mapping,
543
+ transformation_text=query_text,
544
+ )
545
+
546
+ @deprecated("Use add_lineage instead")
116
547
  def add_dataset_copy_lineage(
117
548
  self,
118
549
  *,
@@ -164,13 +595,14 @@ class LineageClient:
164
595
 
165
596
  self._client.entities.update(updater)
166
597
 
598
+ @deprecated("Use add_lineage instead")
167
599
  def add_dataset_transform_lineage(
168
600
  self,
169
601
  *,
170
602
  upstream: DatasetUrnOrStr,
171
603
  downstream: DatasetUrnOrStr,
172
604
  column_lineage: Optional[ColumnLineageMapping] = None,
173
- query_text: Optional[str] = None,
605
+ transformation_text: Optional[str] = None,
174
606
  ) -> None:
175
607
  upstream = DatasetUrn.from_string(upstream)
176
608
  downstream = DatasetUrn.from_string(downstream)
@@ -193,9 +625,9 @@ class LineageClient:
193
625
 
194
626
  query_urn = None
195
627
  query_entity = None
196
- if query_text:
628
+ if transformation_text:
197
629
  # Eventually we might want to use our regex-based fingerprinting instead.
198
- fingerprint = generate_hash(query_text)
630
+ fingerprint = generate_hash(transformation_text)
199
631
  query_urn = QueryUrn(fingerprint).urn()
200
632
 
201
633
  from datahub.sql_parsing.sql_parsing_aggregator import make_query_subjects
@@ -205,7 +637,8 @@ class LineageClient:
205
637
  aspects=[
206
638
  models.QueryPropertiesClass(
207
639
  statement=models.QueryStatementClass(
208
- value=query_text, language=models.QueryLanguageClass.SQL
640
+ value=transformation_text,
641
+ language=models.QueryLanguageClass.SQL,
209
642
  ),
210
643
  source=models.QuerySourceClass.SYSTEM,
211
644
  created=_empty_audit_stamp,
@@ -242,80 +675,7 @@ class LineageClient:
242
675
  mcps.extend(query_entity)
243
676
  self._client._graph.emit_mcps(mcps)
244
677
 
245
- def add_dataset_lineage_from_sql(
246
- self,
247
- *,
248
- query_text: str,
249
- platform: str,
250
- platform_instance: Optional[str] = None,
251
- env: str = "PROD",
252
- default_db: Optional[str] = None,
253
- default_schema: Optional[str] = None,
254
- ) -> None:
255
- """Add lineage by parsing a SQL query."""
256
- from datahub.sql_parsing.sqlglot_lineage import (
257
- create_lineage_sql_parsed_result,
258
- )
259
-
260
- # Parse the SQL query to extract lineage information
261
- parsed_result = create_lineage_sql_parsed_result(
262
- query=query_text,
263
- default_db=default_db,
264
- default_schema=default_schema,
265
- platform=platform,
266
- platform_instance=platform_instance,
267
- env=env,
268
- graph=self._client._graph,
269
- )
270
-
271
- if parsed_result.debug_info.table_error:
272
- raise SdkUsageError(
273
- f"Failed to parse SQL query: {parsed_result.debug_info.error}"
274
- )
275
- elif parsed_result.debug_info.column_error:
276
- logger.warning(
277
- f"Failed to parse SQL query: {parsed_result.debug_info.error}",
278
- )
279
-
280
- if not parsed_result.out_tables:
281
- raise SdkUsageError(
282
- "No output tables found in the query. Cannot establish lineage."
283
- )
284
-
285
- # Use the first output table as the downstream
286
- downstream_urn = parsed_result.out_tables[0]
287
-
288
- # Process all upstream tables found in the query
289
- for upstream_table in parsed_result.in_tables:
290
- # Skip self-lineage
291
- if upstream_table == downstream_urn:
292
- continue
293
-
294
- # Extract column-level lineage for this specific upstream table
295
- column_mapping = {}
296
- if parsed_result.column_lineage:
297
- for col_lineage in parsed_result.column_lineage:
298
- if not (col_lineage.downstream and col_lineage.downstream.column):
299
- continue
300
-
301
- # Filter upstreams to only include columns from current upstream table
302
- upstream_cols = [
303
- ref.column
304
- for ref in col_lineage.upstreams
305
- if ref.table == upstream_table and ref.column
306
- ]
307
-
308
- if upstream_cols:
309
- column_mapping[col_lineage.downstream.column] = upstream_cols
310
-
311
- # Add lineage, including query text
312
- self.add_dataset_transform_lineage(
313
- upstream=upstream_table,
314
- downstream=downstream_urn,
315
- column_lineage=column_mapping or None,
316
- query_text=query_text,
317
- )
318
-
678
+ @deprecated("Use add_lineage instead")
319
679
  def add_datajob_lineage(
320
680
  self,
321
681
  *,
@@ -360,3 +720,242 @@ class LineageClient:
360
720
 
361
721
  # Apply the changes to the entity
362
722
  self._client.entities.update(patch_builder)
723
+
724
+ def get_lineage(
725
+ self,
726
+ *,
727
+ source_urn: Union[str, Urn],
728
+ source_column: Optional[str] = None,
729
+ direction: Literal["upstream", "downstream"] = "upstream",
730
+ max_hops: int = 1,
731
+ filter: Optional[Filter] = None,
732
+ count: int = 500,
733
+ ) -> List[LineageResult]:
734
+ """
735
+ Retrieve lineage entities connected to a source entity.
736
+ Args:
737
+ source_urn: Source URN for the lineage search
738
+ source_column: Source column for the lineage search
739
+ direction: Direction of lineage traversal
740
+ max_hops: Maximum number of hops to traverse
741
+ filter: Filters to apply to the lineage search
742
+ count: Maximum number of results to return
743
+
744
+ Returns:
745
+ List of lineage results
746
+
747
+ Raises:
748
+ SdkUsageError for invalid filter values
749
+ """
750
+ # Validate and convert input URN
751
+ source_urn = Urn.from_string(source_urn)
752
+ # Prepare GraphQL query variables with a separate method
753
+ variables = self._process_input_variables(
754
+ source_urn, source_column, filter, direction, max_hops, count
755
+ )
756
+
757
+ return self._execute_lineage_query(variables, direction)
758
+
759
+ def _process_input_variables(
760
+ self,
761
+ source_urn: Urn,
762
+ source_column: Optional[str] = None,
763
+ filters: Optional[Filter] = None,
764
+ direction: Literal["upstream", "downstream"] = "upstream",
765
+ max_hops: int = 1,
766
+ count: int = 500,
767
+ ) -> Dict[str, Any]:
768
+ """
769
+ Process filters and prepare GraphQL query variables for lineage search.
770
+
771
+ Args:
772
+ source_urn: Source URN for the lineage search
773
+ source_column: Source column for the lineage search
774
+ filters: Optional filters to apply
775
+ direction: Direction of lineage traversal
776
+ max_hops: Maximum number of hops to traverse
777
+ count: Maximum number of results to return
778
+
779
+ Returns:
780
+ Dictionary of GraphQL query variables
781
+
782
+ Raises:
783
+ SdkUsageError for invalid filter values
784
+ """
785
+
786
+ # print warning if max_hops is greater than 2
787
+ if max_hops > 2:
788
+ logger.warning(
789
+ """If `max_hops` is more than 2, the search will try to find the full lineage graph.
790
+ By default, only 500 results are shown.
791
+ You can change the `count` to get more or fewer results.
792
+ """
793
+ )
794
+
795
+ # Determine hop values
796
+ max_hop_values = (
797
+ [str(hop) for hop in range(1, max_hops + 1)]
798
+ if max_hops <= 2
799
+ else ["1", "2", "3+"]
800
+ )
801
+
802
+ degree_filter = FilterDsl.custom_filter(
803
+ field="degree",
804
+ condition="EQUAL",
805
+ values=max_hop_values,
806
+ )
807
+
808
+ filters_with_max_hops = (
809
+ FilterDsl.and_(degree_filter, filters)
810
+ if filters is not None
811
+ else degree_filter
812
+ )
813
+
814
+ types, compiled_filters = compile_filters(filters_with_max_hops)
815
+
816
+ # Prepare base variables
817
+ variables: Dict[str, Any] = {
818
+ "input": {
819
+ "urn": str(source_urn),
820
+ "direction": direction.upper(),
821
+ "count": count,
822
+ "types": types,
823
+ "orFilters": compiled_filters,
824
+ }
825
+ }
826
+
827
+ # if column is provided, update the variables to include the schema field urn
828
+ if isinstance(source_urn, SchemaFieldUrn) or source_column:
829
+ variables["input"]["searchFlags"] = {
830
+ "groupingSpec": {
831
+ "groupingCriteria": {
832
+ "baseEntityType": "SCHEMA_FIELD",
833
+ "groupingEntityType": "SCHEMA_FIELD",
834
+ }
835
+ }
836
+ }
837
+ if isinstance(source_urn, SchemaFieldUrn):
838
+ variables["input"]["urn"] = str(source_urn)
839
+ elif source_column:
840
+ variables["input"]["urn"] = str(SchemaFieldUrn(source_urn, source_column))
841
+
842
+ return variables
843
+
844
+ def _execute_lineage_query(
845
+ self,
846
+ variables: Dict[str, Any],
847
+ direction: Literal["upstream", "downstream"],
848
+ ) -> List[LineageResult]:
849
+ """Execute GraphQL query and process results."""
850
+ # Construct GraphQL query with dynamic path query
851
+ graphql_query = """
852
+ query scrollAcrossLineage($input: ScrollAcrossLineageInput!) {
853
+ scrollAcrossLineage(input: $input) {
854
+ nextScrollId
855
+ searchResults {
856
+ degree
857
+ entity {
858
+ urn
859
+ type
860
+ ... on Dataset {
861
+ name
862
+ platform {
863
+ name
864
+ }
865
+ properties {
866
+ description
867
+ }
868
+ }
869
+ ... on DataJob {
870
+ jobId
871
+ dataPlatformInstance {
872
+ platform {
873
+ name
874
+ }
875
+ }
876
+ properties {
877
+ name
878
+ description
879
+ }
880
+ }
881
+ }
882
+ paths {
883
+ path {
884
+ urn
885
+ type
886
+ }
887
+ }
888
+ }
889
+ }
890
+ }
891
+ """
892
+
893
+ results: List[LineageResult] = []
894
+
895
+ first_iter = True
896
+ scroll_id: Optional[str] = None
897
+
898
+ while first_iter or scroll_id:
899
+ first_iter = False
900
+
901
+ # Update scroll ID if applicable
902
+ if scroll_id:
903
+ variables["input"]["scrollId"] = scroll_id
904
+
905
+ # Execute GraphQL query
906
+ response = self._graph.execute_graphql(graphql_query, variables=variables)
907
+ data = response["scrollAcrossLineage"]
908
+ scroll_id = data.get("nextScrollId")
909
+
910
+ # Process search results
911
+ for entry in data["searchResults"]:
912
+ entity = entry["entity"]
913
+
914
+ result = self._create_lineage_result(entity, entry, direction)
915
+ results.append(result)
916
+
917
+ return results
918
+
919
+ def _create_lineage_result(
920
+ self,
921
+ entity: Dict[str, Any],
922
+ entry: Dict[str, Any],
923
+ direction: Literal["upstream", "downstream"],
924
+ ) -> LineageResult:
925
+ """Create a LineageResult from entity and entry data."""
926
+ platform = entity.get("platform", {}).get("name") or entity.get(
927
+ "dataPlatformInstance", {}
928
+ ).get("platform", {}).get("name")
929
+
930
+ result = LineageResult(
931
+ urn=entity["urn"],
932
+ type=entity["type"],
933
+ hops=entry["degree"],
934
+ direction=direction,
935
+ platform=platform,
936
+ )
937
+
938
+ properties = entity.get("properties", {})
939
+ if properties:
940
+ result.name = properties.get("name", "")
941
+ result.description = properties.get("description", "")
942
+
943
+ result.paths = []
944
+ if "paths" in entry:
945
+ # Process each path in the lineage graph
946
+ for path in entry["paths"]:
947
+ for path_entry in path["path"]:
948
+ # Only include schema fields in the path (exclude other types like Query)
949
+ if path_entry["type"] == "SCHEMA_FIELD":
950
+ schema_field_urn = SchemaFieldUrn.from_string(path_entry["urn"])
951
+ result.paths.append(
952
+ LineagePath(
953
+ urn=path_entry["urn"],
954
+ entity_name=DatasetUrn.from_string(
955
+ schema_field_urn.parent
956
+ ).name,
957
+ column_name=schema_field_urn.field_path,
958
+ )
959
+ )
960
+
961
+ return result