acryl-datahub 1.2.0.2rc2__py3-none-any.whl → 1.2.0.3rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of acryl-datahub might be problematic. Click here for more details.

Files changed (45) hide show
  1. {acryl_datahub-1.2.0.2rc2.dist-info → acryl_datahub-1.2.0.3rc1.dist-info}/METADATA +2620 -2618
  2. {acryl_datahub-1.2.0.2rc2.dist-info → acryl_datahub-1.2.0.3rc1.dist-info}/RECORD +45 -37
  3. datahub/_version.py +1 -1
  4. datahub/api/entities/dataset/dataset.py +13 -1
  5. datahub/ingestion/autogenerated/capability_summary.json +97 -6
  6. datahub/ingestion/source/aws/glue.py +8 -0
  7. datahub/ingestion/source/cassandra/cassandra.py +5 -7
  8. datahub/ingestion/source/common/subtypes.py +2 -0
  9. datahub/ingestion/source/datahub/datahub_source.py +3 -0
  10. datahub/ingestion/source/delta_lake/source.py +1 -0
  11. datahub/ingestion/source/grafana/entity_mcp_builder.py +272 -0
  12. datahub/ingestion/source/grafana/field_utils.py +307 -0
  13. datahub/ingestion/source/grafana/grafana_api.py +142 -0
  14. datahub/ingestion/source/grafana/grafana_config.py +104 -0
  15. datahub/ingestion/source/grafana/grafana_source.py +522 -84
  16. datahub/ingestion/source/grafana/lineage.py +202 -0
  17. datahub/ingestion/source/grafana/models.py +120 -0
  18. datahub/ingestion/source/grafana/report.py +91 -0
  19. datahub/ingestion/source/grafana/types.py +16 -0
  20. datahub/ingestion/source/hex/hex.py +8 -0
  21. datahub/ingestion/source/looker/looker_source.py +9 -0
  22. datahub/ingestion/source/looker/lookml_source.py +8 -0
  23. datahub/ingestion/source/mongodb.py +11 -1
  24. datahub/ingestion/source/redshift/redshift.py +8 -1
  25. datahub/ingestion/source/s3/source.py +9 -1
  26. datahub/ingestion/source/sql/athena.py +8 -2
  27. datahub/ingestion/source/sql/clickhouse.py +9 -0
  28. datahub/ingestion/source/sql/vertica.py +3 -0
  29. datahub/ingestion/source/sql_queries.py +88 -46
  30. datahub/ingestion/source/unity/proxy.py +112 -22
  31. datahub/ingestion/source/unity/source.py +7 -10
  32. datahub/metadata/_internal_schema_classes.py +18 -3
  33. datahub/metadata/schema.avsc +19 -1
  34. datahub/metadata/schemas/DataHubPageModuleProperties.avsc +10 -1
  35. datahub/metadata/schemas/DataJobInputOutput.avsc +8 -0
  36. datahub/metadata/schemas/MetadataChangeEvent.avsc +9 -0
  37. datahub/metadata/schemas/UpstreamLineage.avsc +9 -0
  38. datahub/sdk/dataset.py +44 -0
  39. datahub/sdk/search_filters.py +34 -14
  40. datahub/sql_parsing/sql_parsing_aggregator.py +5 -0
  41. datahub/telemetry/telemetry.py +4 -1
  42. {acryl_datahub-1.2.0.2rc2.dist-info → acryl_datahub-1.2.0.3rc1.dist-info}/WHEEL +0 -0
  43. {acryl_datahub-1.2.0.2rc2.dist-info → acryl_datahub-1.2.0.3rc1.dist-info}/entry_points.txt +0 -0
  44. {acryl_datahub-1.2.0.2rc2.dist-info → acryl_datahub-1.2.0.3rc1.dist-info}/licenses/LICENSE +0 -0
  45. {acryl_datahub-1.2.0.2rc2.dist-info → acryl_datahub-1.2.0.3rc1.dist-info}/top_level.txt +0 -0
@@ -2,12 +2,13 @@ import json
2
2
  import logging
3
3
  import os
4
4
  from dataclasses import dataclass
5
- from datetime import datetime, timezone
5
+ from datetime import datetime
6
6
  from functools import partial
7
- from typing import Iterable, List, Optional, Union
7
+ from typing import ClassVar, Iterable, List, Optional, Union
8
8
 
9
- from pydantic import Field
9
+ from pydantic import BaseModel, Field, validator
10
10
 
11
+ from datahub.configuration.datetimes import parse_user_datetime
11
12
  from datahub.configuration.source_common import (
12
13
  EnvConfigMixin,
13
14
  PlatformInstanceConfigMixin,
@@ -35,7 +36,7 @@ from datahub.ingestion.api.workunit import MetadataWorkUnit
35
36
  from datahub.ingestion.graph.client import DataHubGraph
36
37
  from datahub.ingestion.source.usage.usage_common import BaseUsageConfig
37
38
  from datahub.ingestion.source_report.ingestion_stage import IngestionStageReport
38
- from datahub.metadata.urns import CorpUserUrn
39
+ from datahub.metadata.urns import CorpUserUrn, DatasetUrn
39
40
  from datahub.sql_parsing.schema_resolver import SchemaResolver
40
41
  from datahub.sql_parsing.sql_parsing_aggregator import (
41
42
  KnownQueryLineageInfo,
@@ -73,9 +74,8 @@ class SqlQueriesSourceConfig(PlatformInstanceConfigMixin, EnvConfigMixin):
73
74
  default=None,
74
75
  )
75
76
  override_dialect: Optional[str] = Field(
76
- description="DEPRECATED: This field is ignored. SQL dialect detection is now handled automatically by the SQL parsing aggregator based on the platform.",
77
+ description="The SQL dialect to use when parsing queries. Overrides automatic dialect detection.",
77
78
  default=None,
78
- hidden_from_docs=True,
79
79
  )
80
80
 
81
81
 
@@ -209,19 +209,40 @@ class SqlQueriesSource(Source):
209
209
  def _add_query_to_aggregator(self, query_entry: "QueryEntry") -> None:
210
210
  """Add a query to the SQL parsing aggregator."""
211
211
  try:
212
- # If we have explicit lineage, use it directly
213
- if query_entry.upstream_tables or query_entry.downstream_tables:
212
+ # If we have both upstream and downstream tables, use explicit lineage
213
+ if query_entry.upstream_tables and query_entry.downstream_tables:
214
214
  logger.debug("Using explicit lineage from query file")
215
215
  for downstream_table in query_entry.downstream_tables:
216
216
  known_lineage = KnownQueryLineageInfo(
217
217
  query_text=query_entry.query,
218
- downstream=downstream_table,
219
- upstreams=query_entry.upstream_tables,
218
+ downstream=str(downstream_table),
219
+ upstreams=[str(urn) for urn in query_entry.upstream_tables],
220
220
  timestamp=query_entry.timestamp,
221
221
  session_id=query_entry.session_id,
222
222
  )
223
223
  self.aggregator.add_known_query_lineage(known_lineage)
224
224
  else:
225
+ # Warn if only partial lineage information is provided
226
+ # XOR: true if exactly one of upstream_tables or downstream_tables is provided
227
+ if bool(query_entry.upstream_tables) ^ bool(
228
+ query_entry.downstream_tables
229
+ ):
230
+ query_preview = (
231
+ query_entry.query[:150] + "..."
232
+ if len(query_entry.query) > 150
233
+ else query_entry.query
234
+ )
235
+ missing_upstream = (
236
+ "Missing upstream. " if not query_entry.upstream_tables else ""
237
+ )
238
+ missing_downstream = (
239
+ "Missing downstream. "
240
+ if not query_entry.downstream_tables
241
+ else ""
242
+ )
243
+ logger.info(
244
+ f"Only partial lineage information provided, falling back to SQL parsing for complete lineage detection. {missing_upstream}{missing_downstream}Query: {query_preview}"
245
+ )
225
246
  # No explicit lineage, rely on parsing
226
247
  observed_query = ObservedQuery(
227
248
  query=query_entry.query,
@@ -230,6 +251,7 @@ class SqlQueriesSource(Source):
230
251
  session_id=query_entry.session_id,
231
252
  default_db=self.config.default_db,
232
253
  default_schema=self.config.default_schema,
254
+ override_dialect=self.config.override_dialect,
233
255
  )
234
256
  self.aggregator.add_observed_query(observed_query)
235
257
 
@@ -243,46 +265,66 @@ class SqlQueriesSource(Source):
243
265
  )
244
266
 
245
267
 
246
- @dataclass
247
- class QueryEntry:
268
+ class QueryEntry(BaseModel):
248
269
  query: str
249
- timestamp: Optional[datetime]
250
- user: Optional[CorpUserUrn]
251
- operation_type: Optional[str]
252
- downstream_tables: List[str]
253
- upstream_tables: List[str]
270
+ timestamp: Optional[datetime] = None
271
+ user: Optional[CorpUserUrn] = None
272
+ operation_type: Optional[str] = None
273
+ downstream_tables: List[DatasetUrn] = Field(default_factory=list)
274
+ upstream_tables: List[DatasetUrn] = Field(default_factory=list)
254
275
  session_id: Optional[str] = None
255
276
 
277
+ # Validation context for URN creation
278
+ _validation_context: ClassVar[Optional[SqlQueriesSourceConfig]] = None
279
+
280
+ class Config:
281
+ arbitrary_types_allowed = True
282
+
283
+ @validator("timestamp", pre=True)
284
+ def parse_timestamp(cls, v):
285
+ return None if v is None else parse_user_datetime(str(v))
286
+
287
+ @validator("user", pre=True)
288
+ def parse_user(cls, v):
289
+ if v is None:
290
+ return None
291
+
292
+ return v if isinstance(v, CorpUserUrn) else CorpUserUrn(v)
293
+
294
+ @validator("downstream_tables", "upstream_tables", pre=True)
295
+ def parse_tables(cls, v):
296
+ if not v:
297
+ return []
298
+
299
+ result = []
300
+ for item in v:
301
+ if isinstance(item, DatasetUrn):
302
+ result.append(item)
303
+ elif isinstance(item, str):
304
+ # Skip empty/whitespace-only strings
305
+ if item and item.strip():
306
+ # Convert to URN using validation context
307
+ assert cls._validation_context, (
308
+ "Validation context must be set for URN creation"
309
+ )
310
+ urn_string = make_dataset_urn_with_platform_instance(
311
+ name=item,
312
+ platform=cls._validation_context.platform,
313
+ platform_instance=cls._validation_context.platform_instance,
314
+ env=cls._validation_context.env,
315
+ )
316
+ result.append(DatasetUrn.from_string(urn_string))
317
+
318
+ return result
319
+
256
320
  @classmethod
257
321
  def create(
258
322
  cls, entry_dict: dict, *, config: SqlQueriesSourceConfig
259
323
  ) -> "QueryEntry":
260
- return cls(
261
- query=entry_dict["query"],
262
- timestamp=(
263
- datetime.fromtimestamp(entry_dict["timestamp"], tz=timezone.utc)
264
- if "timestamp" in entry_dict
265
- else None
266
- ),
267
- user=CorpUserUrn(entry_dict["user"]) if "user" in entry_dict else None,
268
- operation_type=entry_dict.get("operation_type"),
269
- downstream_tables=[
270
- make_dataset_urn_with_platform_instance(
271
- name=table,
272
- platform=config.platform,
273
- platform_instance=config.platform_instance,
274
- env=config.env,
275
- )
276
- for table in entry_dict.get("downstream_tables", [])
277
- ],
278
- upstream_tables=[
279
- make_dataset_urn_with_platform_instance(
280
- name=table,
281
- platform=config.platform,
282
- platform_instance=config.platform_instance,
283
- env=config.env,
284
- )
285
- for table in entry_dict.get("upstream_tables", [])
286
- ],
287
- session_id=entry_dict.get("session_id"),
288
- )
324
+ """Create QueryEntry from dict with config context."""
325
+ # Set validation context for URN creation
326
+ cls._validation_context = config
327
+ try:
328
+ return cls.parse_obj(entry_dict)
329
+ finally:
330
+ cls._validation_context = None
@@ -4,8 +4,9 @@ Manage the communication with DataBricks Server and provide equivalent dataclass
4
4
 
5
5
  import dataclasses
6
6
  import logging
7
+ from concurrent.futures import ThreadPoolExecutor
7
8
  from datetime import datetime
8
- from typing import Any, Dict, Iterable, List, Optional, Union, cast
9
+ from typing import Any, Dict, Iterable, List, Optional, Sequence, Union, cast
9
10
  from unittest.mock import patch
10
11
 
11
12
  import cachetools
@@ -28,6 +29,7 @@ from databricks.sdk.service.sql import (
28
29
  )
29
30
  from databricks.sdk.service.workspace import ObjectType
30
31
  from databricks.sql import connect
32
+ from databricks.sql.types import Row
31
33
 
32
34
  from datahub._version import nice_version_name
33
35
  from datahub.api.entities.external.unity_catalog_external_entites import UnityCatalogTag
@@ -291,10 +293,59 @@ class UnityCatalogApiProxy(UnityCatalogProxyProfilingMixin):
291
293
  method, path, body={**body, "page_token": response["next_page_token"]}
292
294
  )
293
295
 
296
+ @cached(cachetools.FIFOCache(maxsize=100))
297
+ def get_catalog_column_lineage(self, catalog: str) -> Dict[str, Dict[str, dict]]:
298
+ """Get column lineage for all tables in a catalog."""
299
+ logger.info(f"Fetching column lineage for catalog: {catalog}")
300
+ try:
301
+ query = """
302
+ SELECT
303
+ source_table_catalog, source_table_schema, source_table_name, source_column_name, source_type,
304
+ target_table_schema, target_table_name, target_column_name,
305
+ max(event_time)
306
+ FROM system.access.column_lineage
307
+ WHERE
308
+ target_table_catalog = %s
309
+ AND target_table_schema IS NOT NULL
310
+ AND target_table_name IS NOT NULL
311
+ AND target_column_name IS NOT NULL
312
+ AND source_table_catalog IS NOT NULL
313
+ AND source_table_schema IS NOT NULL
314
+ AND source_table_name IS NOT NULL
315
+ AND source_column_name IS NOT NULL
316
+ GROUP BY
317
+ source_table_catalog, source_table_schema, source_table_name, source_column_name, source_type,
318
+ target_table_schema, target_table_name, target_column_name
319
+ """
320
+ rows = self._execute_sql_query(query, (catalog,))
321
+
322
+ result_dict: Dict[str, Dict[str, dict]] = {}
323
+ for row in rows:
324
+ result_dict.setdefault(row["target_table_schema"], {}).setdefault(
325
+ row["target_table_name"], {}
326
+ ).setdefault(row["target_column_name"], []).append(
327
+ # make fields look like the response from the older HTTP API
328
+ {
329
+ "catalog_name": row["source_table_catalog"],
330
+ "schema_name": row["source_table_schema"],
331
+ "table_name": row["source_table_name"],
332
+ "name": row["source_column_name"],
333
+ }
334
+ )
335
+
336
+ return result_dict
337
+ except Exception as e:
338
+ logger.warning(
339
+ f"Error getting column lineage for catalog {catalog}: {e}",
340
+ exc_info=True,
341
+ )
342
+ return {}
343
+
294
344
  def list_lineages_by_table(
295
345
  self, table_name: str, include_entity_lineage: bool
296
346
  ) -> dict:
297
347
  """List table lineage by table name."""
348
+ logger.debug(f"Getting table lineage for {table_name}")
298
349
  return self._workspace_client.api_client.do( # type: ignore
299
350
  method="GET",
300
351
  path="/api/2.0/lineage-tracking/table-lineage",
@@ -304,13 +355,24 @@ class UnityCatalogApiProxy(UnityCatalogProxyProfilingMixin):
304
355
  },
305
356
  )
306
357
 
307
- def list_lineages_by_column(self, table_name: str, column_name: str) -> dict:
358
+ def list_lineages_by_column(self, table_name: str, column_name: str) -> list:
308
359
  """List column lineage by table name and column name."""
309
- return self._workspace_client.api_client.do( # type: ignore
310
- "GET",
311
- "/api/2.0/lineage-tracking/column-lineage",
312
- body={"table_name": table_name, "column_name": column_name},
313
- )
360
+ logger.debug(f"Getting column lineage for {table_name}.{column_name}")
361
+ try:
362
+ return (
363
+ self._workspace_client.api_client.do( # type: ignore
364
+ "GET",
365
+ "/api/2.0/lineage-tracking/column-lineage",
366
+ body={"table_name": table_name, "column_name": column_name},
367
+ ).get("upstream_cols")
368
+ or []
369
+ )
370
+ except Exception as e:
371
+ logger.warning(
372
+ f"Error getting column lineage on table {table_name}, column {column_name}: {e}",
373
+ exc_info=True,
374
+ )
375
+ return []
314
376
 
315
377
  def table_lineage(self, table: Table, include_entity_lineage: bool) -> None:
316
378
  if table.schema.catalog.type == CustomCatalogType.HIVE_METASTORE_CATALOG:
@@ -348,23 +410,51 @@ class UnityCatalogApiProxy(UnityCatalogProxyProfilingMixin):
348
410
  f"Error getting lineage on table {table.ref}: {e}", exc_info=True
349
411
  )
350
412
 
351
- def get_column_lineage(self, table: Table, column_name: str) -> None:
413
+ def get_column_lineage(
414
+ self,
415
+ table: Table,
416
+ column_names: List[str],
417
+ *,
418
+ max_workers: Optional[int] = None,
419
+ ) -> None:
352
420
  try:
353
- response: dict = self.list_lineages_by_column(
354
- table_name=table.ref.qualified_table_name,
355
- column_name=column_name,
356
- )
357
- for item in response.get("upstream_cols") or []:
358
- table_ref = TableReference.create_from_lineage(
359
- item, table.schema.catalog.metastore
421
+ # use the newer system tables if we have a SQL warehouse, otherwise fall back
422
+ # and use the older (and much slower) HTTP API.
423
+ if self.warehouse_id:
424
+ lineage = (
425
+ self.get_catalog_column_lineage(table.ref.catalog)
426
+ .get(table.ref.schema, {})
427
+ .get(table.ref.table, {})
360
428
  )
361
- if table_ref:
362
- table.upstreams.setdefault(table_ref, {}).setdefault(
363
- column_name, []
364
- ).append(item["name"])
429
+ else:
430
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
431
+ futures = [
432
+ executor.submit(
433
+ self.list_lineages_by_column,
434
+ table.ref.qualified_table_name,
435
+ column_name,
436
+ )
437
+ for column_name in column_names
438
+ ]
439
+ lineage = {
440
+ column_name: future.result()
441
+ for column_name, future in zip(column_names, futures)
442
+ }
443
+
444
+ for column_name in column_names:
445
+ for item in lineage.get(column_name) or []:
446
+ table_ref = TableReference.create_from_lineage(
447
+ item,
448
+ table.schema.catalog.metastore,
449
+ )
450
+ if table_ref:
451
+ table.upstreams.setdefault(table_ref, {}).setdefault(
452
+ column_name, []
453
+ ).append(item["name"])
454
+
365
455
  except Exception as e:
366
456
  logger.warning(
367
- f"Error getting column lineage on table {table.ref}, column {column_name}: {e}",
457
+ f"Error getting column lineage on table {table.ref}: {e}",
368
458
  exc_info=True,
369
459
  )
370
460
 
@@ -504,14 +594,14 @@ class UnityCatalogApiProxy(UnityCatalogProxyProfilingMixin):
504
594
  executed_as_user_name=info.executed_as_user_name,
505
595
  )
506
596
 
507
- def _execute_sql_query(self, query: str) -> List[List[str]]:
597
+ def _execute_sql_query(self, query: str, params: Sequence[Any] = ()) -> List[Row]:
508
598
  """Execute SQL query using databricks-sql connector for better performance"""
509
599
  try:
510
600
  with (
511
601
  connect(**self._sql_connection_params) as connection,
512
602
  connection.cursor() as cursor,
513
603
  ):
514
- cursor.execute(query)
604
+ cursor.execute(query, list(params))
515
605
  return cursor.fetchall()
516
606
 
517
607
  except Exception as e:
@@ -1,7 +1,6 @@
1
1
  import logging
2
2
  import re
3
3
  import time
4
- from concurrent.futures import ThreadPoolExecutor
5
4
  from typing import Dict, Iterable, List, Optional, Set, Tuple, Union
6
5
  from urllib.parse import urljoin
7
6
 
@@ -657,15 +656,13 @@ class UnityCatalogSource(StatefulIngestionSourceBase, TestableSource):
657
656
  if len(table.columns) > self.config.column_lineage_column_limit:
658
657
  self.report.num_column_lineage_skipped_column_count += 1
659
658
 
660
- with ThreadPoolExecutor(
661
- max_workers=self.config.lineage_max_workers
662
- ) as executor:
663
- for column in table.columns[: self.config.column_lineage_column_limit]:
664
- executor.submit(
665
- self.unity_catalog_api_proxy.get_column_lineage,
666
- table,
667
- column.name,
668
- )
659
+ column_names = [
660
+ column.name
661
+ for column in table.columns[: self.config.column_lineage_column_limit]
662
+ ]
663
+ self.unity_catalog_api_proxy.get_column_lineage(
664
+ table, column_names, max_workers=self.config.lineage_max_workers
665
+ )
669
666
 
670
667
  return self._generate_lineage_aspect(self.gen_dataset_urn(table.ref), table)
671
668
 
@@ -20163,23 +20163,24 @@ class DataHubPageModuleVisibilityClass(DictWrapper):
20163
20163
 
20164
20164
 
20165
20165
  class HierarchyModuleParamsClass(DictWrapper):
20166
- """The params required if the module is type HIERARCHY_VIEW
20167
- TODO: add filters
20168
- relatedEntitiesFilter: optional Filter"""
20166
+ """The params required if the module is type HIERARCHY_VIEW"""
20169
20167
 
20170
20168
  RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.module.HierarchyModuleParams")
20171
20169
  def __init__(self,
20172
20170
  showRelatedEntities: bool,
20173
20171
  assetUrns: Union[None, List[str]]=None,
20172
+ relatedEntitiesFilterJson: Union[None, str]=None,
20174
20173
  ):
20175
20174
  super().__init__()
20176
20175
 
20177
20176
  self.assetUrns = assetUrns
20178
20177
  self.showRelatedEntities = showRelatedEntities
20178
+ self.relatedEntitiesFilterJson = relatedEntitiesFilterJson
20179
20179
 
20180
20180
  def _restore_defaults(self) -> None:
20181
20181
  self.assetUrns = self.RECORD_SCHEMA.fields_dict["assetUrns"].default
20182
20182
  self.showRelatedEntities = bool()
20183
+ self.relatedEntitiesFilterJson = self.RECORD_SCHEMA.fields_dict["relatedEntitiesFilterJson"].default
20183
20184
 
20184
20185
 
20185
20186
  @property
@@ -20202,6 +20203,20 @@ class HierarchyModuleParamsClass(DictWrapper):
20202
20203
  self._inner_dict['showRelatedEntities'] = value
20203
20204
 
20204
20205
 
20206
+ @property
20207
+ def relatedEntitiesFilterJson(self) -> Union[None, str]:
20208
+ """Optional filters to filter relatedEntities (assetUrns) out
20209
+
20210
+ The stringified json representing the logical predicate built in the UI to select assets.
20211
+ This predicate is turned into orFilters to send through graphql since graphql doesn't support
20212
+ arbitrary nesting. This string is used to restore the UI for this logical predicate."""
20213
+ return self._inner_dict.get('relatedEntitiesFilterJson') # type: ignore
20214
+
20215
+ @relatedEntitiesFilterJson.setter
20216
+ def relatedEntitiesFilterJson(self, value: Union[None, str]) -> None:
20217
+ self._inner_dict['relatedEntitiesFilterJson'] = value
20218
+
20219
+
20205
20220
  class LinkModuleParamsClass(DictWrapper):
20206
20221
  # No docs available.
20207
20222
 
@@ -4319,6 +4319,14 @@
4319
4319
  "doc": "The type of upstream entity"
4320
4320
  },
4321
4321
  {
4322
+ "Searchable": {
4323
+ "/*": {
4324
+ "fieldName": "fineGrainedUpstreams",
4325
+ "fieldType": "URN",
4326
+ "hasValuesFieldName": "hasFineGrainedUpstreams",
4327
+ "queryByDefault": false
4328
+ }
4329
+ },
4322
4330
  "Urn": "Urn",
4323
4331
  "urn_is_array": true,
4324
4332
  "type": [
@@ -12875,6 +12883,7 @@
12875
12883
  "Searchable": {
12876
12884
  "fieldName": "upstreams",
12877
12885
  "fieldType": "URN",
12886
+ "hasValuesFieldName": "hasUpstreams",
12878
12887
  "queryByDefault": false
12879
12888
  },
12880
12889
  "java": {
@@ -17844,9 +17853,18 @@
17844
17853
  {
17845
17854
  "type": "boolean",
17846
17855
  "name": "showRelatedEntities"
17856
+ },
17857
+ {
17858
+ "type": [
17859
+ "null",
17860
+ "string"
17861
+ ],
17862
+ "name": "relatedEntitiesFilterJson",
17863
+ "default": null,
17864
+ "doc": "Optional filters to filter relatedEntities (assetUrns) out\n\nThe stringified json representing the logical predicate built in the UI to select assets.\nThis predicate is turned into orFilters to send through graphql since graphql doesn't support\narbitrary nesting. This string is used to restore the UI for this logical predicate."
17847
17865
  }
17848
17866
  ],
17849
- "doc": "The params required if the module is type HIERARCHY_VIEW\nTODO: add filters\nrelatedEntitiesFilter: optional Filter"
17867
+ "doc": "The params required if the module is type HIERARCHY_VIEW"
17850
17868
  }
17851
17869
  ],
17852
17870
  "name": "hierarchyViewParams",
@@ -181,9 +181,18 @@
181
181
  {
182
182
  "type": "boolean",
183
183
  "name": "showRelatedEntities"
184
+ },
185
+ {
186
+ "type": [
187
+ "null",
188
+ "string"
189
+ ],
190
+ "name": "relatedEntitiesFilterJson",
191
+ "default": null,
192
+ "doc": "Optional filters to filter relatedEntities (assetUrns) out\n\nThe stringified json representing the logical predicate built in the UI to select assets.\nThis predicate is turned into orFilters to send through graphql since graphql doesn't support\narbitrary nesting. This string is used to restore the UI for this logical predicate."
184
193
  }
185
194
  ],
186
- "doc": "The params required if the module is type HIERARCHY_VIEW\nTODO: add filters\nrelatedEntitiesFilter: optional Filter"
195
+ "doc": "The params required if the module is type HIERARCHY_VIEW"
187
196
  }
188
197
  ],
189
198
  "name": "hierarchyViewParams",
@@ -375,6 +375,14 @@
375
375
  "doc": "The type of upstream entity"
376
376
  },
377
377
  {
378
+ "Searchable": {
379
+ "/*": {
380
+ "fieldName": "fineGrainedUpstreams",
381
+ "fieldType": "URN",
382
+ "hasValuesFieldName": "hasFineGrainedUpstreams",
383
+ "queryByDefault": false
384
+ }
385
+ },
378
386
  "type": [
379
387
  "null",
380
388
  {
@@ -3070,6 +3070,14 @@
3070
3070
  "doc": "The type of upstream entity"
3071
3071
  },
3072
3072
  {
3073
+ "Searchable": {
3074
+ "/*": {
3075
+ "fieldName": "fineGrainedUpstreams",
3076
+ "fieldType": "URN",
3077
+ "hasValuesFieldName": "hasFineGrainedUpstreams",
3078
+ "queryByDefault": false
3079
+ }
3080
+ },
3073
3081
  "type": [
3074
3082
  "null",
3075
3083
  {
@@ -3691,6 +3699,7 @@
3691
3699
  "Searchable": {
3692
3700
  "fieldName": "upstreams",
3693
3701
  "fieldType": "URN",
3702
+ "hasValuesFieldName": "hasUpstreams",
3694
3703
  "queryByDefault": false
3695
3704
  },
3696
3705
  "java": {
@@ -94,6 +94,7 @@
94
94
  "Searchable": {
95
95
  "fieldName": "upstreams",
96
96
  "fieldType": "URN",
97
+ "hasValuesFieldName": "hasUpstreams",
97
98
  "queryByDefault": false
98
99
  },
99
100
  "java": {
@@ -199,6 +200,14 @@
199
200
  "doc": "The type of upstream entity"
200
201
  },
201
202
  {
203
+ "Searchable": {
204
+ "/*": {
205
+ "fieldName": "fineGrainedUpstreams",
206
+ "fieldType": "URN",
207
+ "hasValuesFieldName": "hasFineGrainedUpstreams",
208
+ "queryByDefault": false
209
+ }
210
+ },
202
211
  "type": [
203
212
  "null",
204
213
  {
datahub/sdk/dataset.py CHANGED
@@ -72,6 +72,11 @@ UpstreamLineageInputType: TypeAlias = Union[
72
72
  Dict[DatasetUrnOrStr, ColumnLineageMapping],
73
73
  ]
74
74
 
75
+ ViewDefinitionInputType: TypeAlias = Union[
76
+ str,
77
+ models.ViewPropertiesClass,
78
+ ]
79
+
75
80
 
76
81
  def _parse_upstream_input(
77
82
  upstream_input: UpstreamInputType,
@@ -467,6 +472,7 @@ class Dataset(
467
472
  custom_properties: Optional[Dict[str, str]] = None,
468
473
  created: Optional[datetime] = None,
469
474
  last_modified: Optional[datetime] = None,
475
+ view_definition: Optional[ViewDefinitionInputType] = None,
470
476
  # Standard aspects.
471
477
  parent_container: ParentContainerInputType | Unset = unset,
472
478
  subtype: Optional[str] = None,
@@ -495,6 +501,7 @@ class Dataset(
495
501
  custom_properties: Optional dictionary of custom properties.
496
502
  created: Optional creation timestamp.
497
503
  last_modified: Optional last modification timestamp.
504
+ view_definition: Optional view definition for the dataset.
498
505
  parent_container: Optional parent container for this dataset.
499
506
  subtype: Optional subtype of the dataset.
500
507
  owners: Optional list of owners.
@@ -536,6 +543,8 @@ class Dataset(
536
543
  self.set_created(created)
537
544
  if last_modified is not None:
538
545
  self.set_last_modified(last_modified)
546
+ if view_definition is not None:
547
+ self.set_view_definition(view_definition)
539
548
 
540
549
  if parent_container is not unset:
541
550
  self._set_container(parent_container)
@@ -717,6 +726,41 @@ class Dataset(
717
726
  def set_last_modified(self, last_modified: datetime) -> None:
718
727
  self._ensure_dataset_props().lastModified = make_time_stamp(last_modified)
719
728
 
729
+ @property
730
+ def view_definition(self) -> Optional[models.ViewPropertiesClass]:
731
+ """Get the view definition of the dataset.
732
+
733
+ Under typical usage, this will be present if the subtype is "View".
734
+
735
+ Returns:
736
+ The view definition if set, None otherwise.
737
+ """
738
+ return self._get_aspect(models.ViewPropertiesClass)
739
+
740
+ def set_view_definition(self, view_definition: ViewDefinitionInputType) -> None:
741
+ """Set the view definition of the dataset.
742
+
743
+ If you're setting a view definition, subtype should typically be set to "view".
744
+
745
+ If a string is provided, it will be treated as a SQL view definition. To set
746
+ a custom language or other properties, provide a ViewPropertiesClass object.
747
+
748
+ Args:
749
+ view_definition: The view definition to set.
750
+ """
751
+ if isinstance(view_definition, models.ViewPropertiesClass):
752
+ self._set_aspect(view_definition)
753
+ elif isinstance(view_definition, str):
754
+ self._set_aspect(
755
+ models.ViewPropertiesClass(
756
+ materialized=False,
757
+ viewLogic=view_definition,
758
+ viewLanguage="SQL",
759
+ )
760
+ )
761
+ else:
762
+ assert_never(view_definition)
763
+
720
764
  def _schema_dict(self) -> Dict[str, models.SchemaFieldClass]:
721
765
  schema_metadata = self._get_aspect(models.SchemaMetadataClass)
722
766
  if schema_metadata is None: