castor-extractor 0.22.0__py3-none-any.whl → 0.22.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of castor-extractor might be problematic. Click here for more details.

Files changed (38) hide show
  1. CHANGELOG.md +20 -0
  2. castor_extractor/utils/__init__.py +1 -0
  3. castor_extractor/utils/time.py +4 -0
  4. castor_extractor/utils/time_test.py +8 -1
  5. castor_extractor/visualization/looker_studio/__init__.py +6 -0
  6. castor_extractor/visualization/looker_studio/assets.py +6 -0
  7. castor_extractor/visualization/looker_studio/client/__init__.py +3 -0
  8. castor_extractor/visualization/looker_studio/client/admin_sdk_client.py +90 -0
  9. castor_extractor/visualization/looker_studio/client/client.py +37 -0
  10. castor_extractor/visualization/looker_studio/client/credentials.py +20 -0
  11. castor_extractor/visualization/looker_studio/client/endpoints.py +18 -0
  12. castor_extractor/visualization/looker_studio/client/enums.py +8 -0
  13. castor_extractor/visualization/looker_studio/client/looker_studio_api_client.py +102 -0
  14. castor_extractor/visualization/looker_studio/client/pagination.py +31 -0
  15. castor_extractor/visualization/looker_studio/client/scopes.py +6 -0
  16. castor_extractor/visualization/sigma/client/client.py +64 -10
  17. castor_extractor/visualization/thoughtspot/assets.py +3 -1
  18. castor_extractor/visualization/thoughtspot/client/client.py +67 -14
  19. castor_extractor/visualization/thoughtspot/client/utils.py +10 -4
  20. castor_extractor/visualization/thoughtspot/client/utils_test.py +22 -4
  21. castor_extractor/warehouse/databricks/api_client.py +2 -60
  22. castor_extractor/warehouse/databricks/client.py +4 -47
  23. castor_extractor/warehouse/databricks/client_test.py +1 -35
  24. castor_extractor/warehouse/databricks/credentials.py +4 -6
  25. castor_extractor/warehouse/databricks/enums.py +15 -0
  26. castor_extractor/warehouse/databricks/extract.py +13 -11
  27. castor_extractor/warehouse/databricks/lineage.py +47 -119
  28. castor_extractor/warehouse/databricks/lineage_test.py +86 -31
  29. castor_extractor/warehouse/databricks/sql_client.py +23 -8
  30. castor_extractor/warehouse/databricks/types.py +0 -7
  31. castor_extractor/warehouse/salesforce/format.py +12 -5
  32. castor_extractor/warehouse/salesforce/format_test.py +22 -6
  33. {castor_extractor-0.22.0.dist-info → castor_extractor-0.22.5.dist-info}/METADATA +23 -1
  34. {castor_extractor-0.22.0.dist-info → castor_extractor-0.22.5.dist-info}/RECORD +37 -26
  35. castor_extractor/warehouse/databricks/test_constants.py +0 -79
  36. {castor_extractor-0.22.0.dist-info → castor_extractor-0.22.5.dist-info}/LICENCE +0 -0
  37. {castor_extractor-0.22.0.dist-info → castor_extractor-0.22.5.dist-info}/WHEEL +0 -0
  38. {castor_extractor-0.22.0.dist-info → castor_extractor-0.22.5.dist-info}/entry_points.txt +0 -0
@@ -1,13 +1,17 @@
1
1
  import csv
2
+ import re
2
3
  from collections.abc import Iterator
3
4
  from io import StringIO
4
5
 
6
+ _END_OF_GENERATED_TEXT = r'^""$'
7
+
5
8
 
6
9
  def usage_liveboard_reader(usage_liveboard_csv: str) -> Iterator[dict]:
7
10
  """
8
11
  Converts a CSV string into an iterator of dictionaries after
9
- ignoring the first 6 lines, using the 7th line as the header.
10
- First 6 lines looks like the following:
12
+ ignoring the generated text that preceeds the actual CSV header row.
13
+ The generated block ends with a row containing only two double quotes.
14
+ Here is an example:
11
15
 
12
16
  "Data extract produced by Castor on 09/19/2024 06:54"
13
17
  "Filters applied on data :"
@@ -15,11 +19,13 @@ def usage_liveboard_reader(usage_liveboard_csv: str) -> Iterator[dict]:
15
19
  "Pinboard NOT IN [mlm - availability pinboard,null]"
16
20
  "Timestamp >= 20240820 00:00:00 < 20240919 00:00:00"
17
21
  "Timestamp >= 20240919 00:00:00 < 20240920 00:00:00"
22
+ ""
18
23
 
19
24
  """
20
25
  csv_file = StringIO(usage_liveboard_csv)
21
26
 
22
- for _ in range(7):
23
- next(csv_file)
27
+ line = next(csv_file)
28
+ while not re.match(_END_OF_GENERATED_TEXT, line.strip()):
29
+ line = next(csv_file)
24
30
 
25
31
  yield from csv.DictReader(csv_file)
@@ -2,7 +2,7 @@ from .utils import (
2
2
  usage_liveboard_reader,
3
3
  )
4
4
 
5
- VALID_CSV = '''"Data extract produced by Castor on 09/19/2024 06:54"
5
+ VALID_CSV_1 = '''"Data extract produced by Castor on 09/19/2024 06:54"
6
6
  "Filters applied on data :"
7
7
  "User Action IN [pinboard_embed_view,pinboard_tspublic_no_runtime_filter,pinboard_tspublic_runtime_filter,pinboard_view]"
8
8
  "Pinboard NOT IN [mlm - availability pinboard,null]"
@@ -16,6 +16,13 @@ VALID_CSV = '''"Data extract produced by Castor on 09/19/2024 06:54"
16
16
  "September test","25","2"'''
17
17
 
18
18
 
19
+ VALID_CSV_2 = '''"Data extract produced by Castor on 01/07/2025 16:07"
20
+ "Filters applied on data :"
21
+ "Timestamp >= 20241208 00:00:00 < 20250107 00:00:00"
22
+ ""
23
+ "Answer name","User name","Number of unique users","Count of object interactions"
24
+ "toto","tata","1","666"'''
25
+
19
26
  # Invalid CSV input (missing data rows)
20
27
  INVALID_CSV = '''"Data extract produced by Castor on 09/19/2024 06:54"
21
28
  "Filters applied on data :"
@@ -27,7 +34,7 @@ INVALID_CSV = '''"Data extract produced by Castor on 09/19/2024 06:54"
27
34
 
28
35
 
29
36
  def test_usage_liveboard_reader():
30
- expected_output = [
37
+ expected_output_1 = [
31
38
  {
32
39
  "Pinboard": "Market Report",
33
40
  "Pinboard Views": "559",
@@ -49,9 +56,20 @@ def test_usage_liveboard_reader():
49
56
  "Unique Number of User": "2",
50
57
  },
51
58
  ]
59
+ expected_output_2 = [
60
+ {
61
+ "Answer name": "toto",
62
+ "User name": "tata",
63
+ "Number of unique users": "1",
64
+ "Count of object interactions": "666",
65
+ }
66
+ ]
67
+
68
+ result = list(usage_liveboard_reader(VALID_CSV_1))
69
+ assert result == expected_output_1
52
70
 
53
- result = list(usage_liveboard_reader(VALID_CSV))
54
- assert result == expected_output
71
+ result = list(usage_liveboard_reader(VALID_CSV_2))
72
+ assert result == expected_output_2
55
73
 
56
74
  result = list(usage_liveboard_reader(INVALID_CSV))
57
75
  assert result == [] # Expect an empty result since there is no data
@@ -1,8 +1,6 @@
1
1
  import logging
2
- from collections.abc import Iterator
3
2
  from functools import partial
4
- from http import HTTPStatus
5
- from typing import Optional
3
+ from typing import Iterator, Optional
6
4
 
7
5
  import requests
8
6
 
@@ -14,16 +12,14 @@ from ...utils import (
14
12
  fetch_all_pages,
15
13
  handle_response,
16
14
  retry,
17
- retry_request,
18
15
  safe_mode,
19
16
  )
20
17
  from ..abstract import TimeFilter
21
18
  from .credentials import DatabricksCredentials
22
19
  from .endpoints import DatabricksEndpointFactory
23
20
  from .format import DatabricksFormatter, TagMapping
24
- from .lineage import single_column_lineage_links, single_table_lineage_links
25
21
  from .pagination import DATABRICKS_PAGE_SIZE, DatabricksPagination
26
- from .types import TablesColumns, TimestampedLink
22
+ from .types import TablesColumns
27
23
  from .utils import hourly_time_filters
28
24
 
29
25
  logger = logging.getLogger(__name__)
@@ -132,60 +128,6 @@ class DatabricksAPIClient(APIClient):
132
128
  column_tags=column_tags,
133
129
  )
134
130
 
135
- @safe_mode(safe_lineage_params, lambda: [])
136
- @retry(
137
- exceptions=_RETRY_EXCEPTIONS,
138
- max_retries=_RETRY_ATTEMPTS,
139
- base_ms=_RETRY_BASE_MS,
140
- )
141
- @retry_request(
142
- status_codes=(HTTPStatus.TOO_MANY_REQUESTS,),
143
- max_retries=_RETRY_ATTEMPTS,
144
- )
145
- def get_single_column_lineage(
146
- self,
147
- names: tuple[str, str],
148
- ) -> list[TimestampedLink]:
149
- """
150
- Helper function used in get_lineage_links.
151
- Call data lineage API and return the content of the result
152
-
153
- eg table_path: broward_prd.bronze.account_adjustments
154
- FYI: Maximum rate of 10 requests per SECOND
155
- """
156
- table_path, column_name = names
157
- payload = {
158
- "table_name": table_path,
159
- "column_name": column_name,
160
- "include_entity_lineage": True,
161
- }
162
- content = self._get(
163
- DatabricksEndpointFactory.column_lineage(), params=payload
164
- )
165
- column_path = f"{table_path}.{column_name}"
166
- return single_column_lineage_links(column_path, content)
167
-
168
- @safe_mode(safe_lineage_params, lambda: [])
169
- @retry(
170
- exceptions=_RETRY_EXCEPTIONS,
171
- max_retries=_RETRY_ATTEMPTS,
172
- base_ms=_RETRY_BASE_MS,
173
- )
174
- def get_single_table_lineage(
175
- self, table_path: str
176
- ) -> list[TimestampedLink]:
177
- """
178
- Helper function used in get_lineage_links.
179
- Call data lineage API and return the content of the result
180
- eg table_path: broward_prd.bronze.account_adjustments
181
- FYI: Maximum rate of 50 requests per SECOND
182
- """
183
- payload = {"table_name": table_path, "include_entity_lineage": True}
184
- content = self._get(
185
- DatabricksEndpointFactory.table_lineage(), params=payload
186
- )
187
- return single_table_lineage_links(table_path, content)
188
-
189
131
  @safe_mode(safe_query_params, lambda: [])
190
132
  @retry(
191
133
  exceptions=_RETRY_EXCEPTIONS,
@@ -1,17 +1,14 @@
1
1
  import logging
2
- from concurrent.futures import ThreadPoolExecutor
3
2
  from typing import Optional
4
3
 
5
- from ...utils import (
6
- mapping_from_rows,
7
- )
4
+ from ...utils import mapping_from_rows
8
5
  from ..abstract import TimeFilter
9
6
  from .api_client import DatabricksAPIClient
10
7
  from .credentials import DatabricksCredentials
8
+ from .enums import TagEntity
11
9
  from .format import DatabricksFormatter
12
- from .lineage import deduplicate_lineage, paths_for_column_lineage
13
- from .sql_client import DatabricksSQLClient, TagEntity
14
- from .types import TablesColumns, TimestampedLink
10
+ from .sql_client import DatabricksSQLClient
11
+ from .types import TablesColumns
15
12
 
16
13
  logger = logging.getLogger(__name__)
17
14
 
@@ -95,46 +92,6 @@ class DatabricksClient:
95
92
  columns.extend(c_to_add)
96
93
  return tables, columns
97
94
 
98
- def table_lineage(self, tables: list[dict]) -> list[dict]:
99
- """
100
- Wrapper function that retrieves all table lineage
101
- """
102
- # retrieve table lineage
103
- with ThreadPoolExecutor(max_workers=_THREADS_TABLE_LINEAGE) as executor:
104
- table_paths = [
105
- ".".join([table["schema_id"], table["table_name"]])
106
- for table in tables
107
- ]
108
- results = executor.map(
109
- self.api_client.get_single_table_lineage, table_paths
110
- )
111
- lineages = [link for links in results for link in links]
112
- deduplicated = deduplicate_lineage(lineages)
113
- return self.formatter.format_lineage(deduplicated)
114
-
115
- def column_lineage(
116
- self, tables: list[dict], columns: list[dict], table_lineage: list[dict]
117
- ) -> list[dict]:
118
- """
119
- Wrapper function that retrieves all column lineage
120
- we only try to retrieve column lineage if we found table lineage
121
- """
122
- candidate_paths = paths_for_column_lineage(
123
- tables, columns, table_lineage
124
- )
125
- # retrieve column lineage
126
- with ThreadPoolExecutor(
127
- max_workers=_THREADS_COLUMN_LINEAGE
128
- ) as executor:
129
- results = executor.map(
130
- self.api_client.get_single_column_lineage, candidate_paths
131
- )
132
- lineages: list[TimestampedLink] = [
133
- link for links in results for link in links
134
- ]
135
- deduplicated = deduplicate_lineage(lineages)
136
- return self.formatter.format_lineage(deduplicated)
137
-
138
95
  def queries(self, time_filter: Optional[TimeFilter] = None) -> list[dict]:
139
96
  return self.api_client.queries(time_filter)
140
97
 
@@ -1,14 +1,4 @@
1
- from unittest.mock import Mock, patch
2
-
3
- from .client import (
4
- DatabricksClient,
5
- )
6
- from .test_constants import (
7
- CLOSER_DATE,
8
- MOCK_TABLES_FOR_TABLE_LINEAGE,
9
- OLDER_DATE,
10
- TABLE_LINEAGE_SIDE_EFFECT,
11
- )
1
+ from .client import DatabricksClient
12
2
 
13
3
 
14
4
  class MockDatabricksClient(DatabricksClient):
@@ -48,27 +38,3 @@ def test_DatabricksClient__match_table_with_user():
48
38
  table_without_owner = {"id": 1, "owner_email": None}
49
39
  actual = client._match_table_with_user(table_without_owner, user_mapping)
50
40
  assert actual == table_without_owner
51
-
52
-
53
- @patch(
54
- "source.packages.extractor.castor_extractor.warehouse.databricks.client.DatabricksAPIClient._get",
55
- side_effect=TABLE_LINEAGE_SIDE_EFFECT,
56
- )
57
- def test_DatabricksClient_table_lineage(mock_get):
58
- client = DatabricksClient(Mock())
59
-
60
- lineage = client.table_lineage(MOCK_TABLES_FOR_TABLE_LINEAGE)
61
- assert len(lineage) == 2
62
-
63
- expected_link_1 = {
64
- "parent_path": "dev.silver.pre_analytics",
65
- "child_path": "dev.silver.analytics",
66
- "timestamp": OLDER_DATE,
67
- }
68
- expected_link_2 = {
69
- "parent_path": "dev.bronze.analytics",
70
- "child_path": "dev.silver.analytics",
71
- "timestamp": CLOSER_DATE,
72
- }
73
- assert expected_link_1 in lineage
74
- assert expected_link_2 in lineage
@@ -1,24 +1,22 @@
1
1
  from dataclasses import field
2
- from typing import Optional
3
2
 
4
- from pydantic.dataclasses import dataclass
5
- from pydantic_settings import SettingsConfigDict
3
+ from pydantic_settings import BaseSettings, SettingsConfigDict
6
4
 
7
5
  DATABRICKS_ENV_PREFIX = "CASTOR_DATABRICKS_"
8
6
 
9
7
 
10
- @dataclass
11
- class DatabricksCredentials:
8
+ class DatabricksCredentials(BaseSettings):
12
9
  """
13
10
  Credentials needed by Databricks client
14
11
  Requires:
15
12
  - host
13
+ - http_path
16
14
  - token
17
15
  """
18
16
 
19
17
  host: str
18
+ http_path: str
20
19
  token: str = field(metadata={"sensitive": True})
21
- http_path: Optional[str] = field(default=None)
22
20
 
23
21
  model_config = SettingsConfigDict(
24
22
  env_prefix=DATABRICKS_ENV_PREFIX,
@@ -0,0 +1,15 @@
1
+ from enum import Enum
2
+
3
+
4
+ class LineageEntity(Enum):
5
+ """Entities that can be linked in Databricks lineage"""
6
+
7
+ COLUMN = "COLUMN"
8
+ TABLE = "TABLE"
9
+
10
+
11
+ class TagEntity(Enum):
12
+ """Entities that can be tagged in Databricks"""
13
+
14
+ COLUMN = "COLUMN"
15
+ TABLE = "TABLE"
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ from datetime import date
2
3
  from typing import Optional
3
4
 
4
5
  from ...utils import AbstractStorage, LocalStorage, write_summary
@@ -16,6 +17,7 @@ from ..abstract import (
16
17
  )
17
18
  from .client import DatabricksClient
18
19
  from .credentials import DatabricksCredentials
20
+ from .enums import LineageEntity
19
21
 
20
22
  DATABRICKS_ASSETS: SupportedAssets = {
21
23
  WarehouseAssetGroup.ADDITIONAL_LINEAGE: ADDITIONAL_LINEAGE_ASSETS,
@@ -32,6 +34,12 @@ OTimeFilter = Optional[TimeFilter]
32
34
  Paths = dict[str, str]
33
35
 
34
36
 
37
+ def _day(time_filter: OTimeFilter) -> date:
38
+ if not time_filter:
39
+ return TimeFilter.default().day
40
+ return time_filter.day
41
+
42
+
35
43
  class DatabricksExtractionProcessor:
36
44
  """Databricks' API-based extraction management"""
37
45
 
@@ -96,22 +104,18 @@ class DatabricksExtractionProcessor:
96
104
  logger.info(f"Extracted {len(columns)} columns to {location}")
97
105
  return catalog_locations
98
106
 
99
- def extract_lineage(self) -> Paths:
107
+ def extract_lineage(self, time_filter: OTimeFilter = None) -> Paths:
100
108
  if self._should_not_reextract(WarehouseAssetGroup.ADDITIONAL_LINEAGE):
101
109
  return self._existing_group_paths(
102
110
  WarehouseAssetGroup.ADDITIONAL_LINEAGE
103
111
  )
104
112
  lineage_locations: dict[str, str] = dict()
105
113
 
106
- # extract catalog
107
- databases = self._client.databases()
108
- schemas = self._client.schemas(databases)
109
- users = self._client.users()
110
- tables, columns = self._client.tables_and_columns(schemas, users)
111
- logger.info("Extracted pre-requisite catalog. Next comes lineage")
114
+ day = _day(time_filter)
115
+ client = self._client.sql_client
112
116
 
113
117
  # extract table lineage
114
- table_lineage = self._client.table_lineage(tables)
118
+ table_lineage = client.get_lineage(LineageEntity.TABLE, day)
115
119
  table_lineage_key = WarehouseAsset.ADDITIONAL_TABLE_LINEAGE.value
116
120
  location = self._storage.put(table_lineage_key, table_lineage)
117
121
  lineage_locations[table_lineage_key] = location
@@ -119,9 +123,7 @@ class DatabricksExtractionProcessor:
119
123
  logger.info(msg)
120
124
 
121
125
  # extract column lineage
122
- column_lineage = self._client.column_lineage(
123
- tables, columns, table_lineage
124
- )
126
+ column_lineage = client.get_lineage(LineageEntity.COLUMN, day)
125
127
  column_lineage_key = WarehouseAsset.ADDITIONAL_COLUMN_LINEAGE.value
126
128
  location = self._storage.put(column_lineage_key, column_lineage)
127
129
  lineage_locations[column_lineage_key] = location
@@ -1,141 +1,69 @@
1
- from typing import cast
1
+ from typing import Iterable, Optional
2
2
 
3
- from .types import Link, Ostr, OTimestampedLink, TimestampedLink
3
+ from .enums import LineageEntity
4
4
 
5
5
 
6
- class LineageLinks:
6
+ class LineageProcessor:
7
7
  """
8
8
  helper class that handles lineage deduplication and filtering
9
9
  """
10
10
 
11
- def __init__(self):
12
- self.lineage: dict[Link, Ostr] = dict()
11
+ def __init__(self, lineage_entity: LineageEntity):
12
+ self.lineage_entity = lineage_entity
13
13
 
14
- def add(self, timestamped_link: TimestampedLink) -> None:
15
- """
16
- keep the most recent lineage link, adding to `self.lineage`
17
- """
18
- parent, child, timestamp = timestamped_link
19
- link = (parent, child)
20
- if not self.lineage.get(link):
21
- self.lineage[link] = timestamp
22
- return
23
-
24
- if not timestamp:
25
- return
26
- # keep most recent link; cast for mypy
27
- recent = max(cast(str, self.lineage[link]), cast(str, timestamp))
28
- self.lineage[link] = recent
14
+ self.lineage: dict[tuple[str, str], dict] = dict()
29
15
 
16
+ def _parent_path(self, link) -> Optional[str]:
17
+ if self.lineage_entity == LineageEntity.TABLE:
18
+ return link["source_table_full_name"]
30
19
 
31
- def _to_table_path(table: dict) -> Ostr:
32
- if table.get("name"):
33
- return f"{table['catalog_name']}.{table['schema_name']}.{table['name']}"
34
- return None
20
+ source_table = link["source_table_full_name"]
21
+ source_column = link["source_column_name"]
22
+ if not (source_table and source_column):
23
+ return None
35
24
 
25
+ return f"{source_table}.{source_column}"
36
26
 
37
- def _to_column_path(column: dict) -> Ostr:
38
- if column.get("name"):
39
- return f"{column['catalog_name']}.{column['schema_name']}.{column['table_name']}.{column['name']}"
40
- return None
27
+ def _child_path(self, link) -> Optional[str]:
28
+ if self.lineage_entity == LineageEntity.TABLE:
29
+ return link["target_table_full_name"]
41
30
 
31
+ target_table = link["target_table_full_name"]
32
+ target_column = link["target_column_name"]
33
+ if not (target_table and target_column):
34
+ return None
42
35
 
43
- def _link(path_from: Ostr, path_to: Ostr, timestamp: Ostr) -> OTimestampedLink:
44
- """exclude missing path and self-lineage"""
45
- if (not path_from) or (not path_to):
46
- return None
47
- is_self_lineage = path_from.lower() == path_to.lower()
48
- if is_self_lineage:
49
- return None
50
- return path_from, path_to, timestamp
36
+ return f"{target_table}.{target_column}"
51
37
 
38
+ def add(self, link: dict) -> None:
39
+ """
40
+ If the parent and child paths are valid, keeps the most recent lineage
41
+ link in the `self.lineage` map.
42
+ """
43
+ parent = self._parent_path(link)
44
+ child = self._child_path(link)
45
+ timestamp = link["event_time"]
52
46
 
53
- def single_table_lineage_links(
54
- table_path: str, single_table_lineage: dict
55
- ) -> list[TimestampedLink]:
56
- """
57
- process databricks lineage API response for a given table
58
- returns a list of (parent, child, timestamp)
59
-
60
- Note: in `upstreams` or `downstreams` we only care about `tableInfo`,
61
- we could also have `notebookInfos` or `fileInfo`
62
- """
63
- links: list[OTimestampedLink] = []
64
- # add parent:
65
- for link in single_table_lineage.get("upstreams", []):
66
- parent = link.get("tableInfo", {})
67
- parent_path = _to_table_path(parent)
68
- timestamp: Ostr = parent.get("lineage_timestamp")
69
- links.append(_link(parent_path, table_path, timestamp))
70
-
71
- # add children:
72
- for link in single_table_lineage.get("downstreams", []):
73
- child = link.get("tableInfo", {})
74
- child_path = _to_table_path(child)
75
- timestamp = child.get("lineage_timestamp")
76
- links.append(_link(table_path, child_path, timestamp))
77
-
78
- return list(filter(None, links))
79
-
80
-
81
- def single_column_lineage_links(
82
- column_path: str, single_column_lineage: dict
83
- ) -> list[TimestampedLink]:
84
- """
85
- process databricks lineage API response for a given table
86
- returns a list of (parent, child, timestamp)
87
-
88
- Note: in `upstreams` or `downstreams` we only care about `tableInfo`,
89
- we could also have `notebookInfos` or `fileInfo`
90
- """
91
- links: list[OTimestampedLink] = []
92
- # add parent:
93
- for link in single_column_lineage.get("upstream_cols", []):
94
- parent_path = _to_column_path(link)
95
- timestamp: Ostr = link.get("lineage_timestamp")
96
- links.append(_link(parent_path, column_path, timestamp))
47
+ if not (parent and child and parent != child):
48
+ return
97
49
 
98
- # add children:
99
- for link in single_column_lineage.get("downstream_cols", []):
100
- child_path = _to_column_path(link)
101
- timestamp = link.get("lineage_timestamp")
102
- links.append(_link(column_path, child_path, timestamp))
50
+ key = (parent, child)
51
+ if key in self.lineage and self.lineage[key]["event_time"] > timestamp:
52
+ return
103
53
 
104
- return list(filter(None, links))
54
+ self.lineage[key] = link
105
55
 
106
56
 
107
- def paths_for_column_lineage(
108
- tables: list[dict], columns: list[dict], table_lineage: list[dict]
109
- ) -> list[tuple[str, str]]:
57
+ def valid_lineage(
58
+ lineage: Iterable[dict], lineage_entity: LineageEntity
59
+ ) -> list[dict]:
110
60
  """
111
- helper providing a list of candidate columns to look lineage for:
112
- we only look for column lineage where there is table lineage
61
+ Filters out self-lineage or lineage with a missing source or target path,
62
+ then deduplicates by picking the link with the most recent event timestmap.
113
63
  """
114
- # mapping between table id and its path db.schema.table
115
- # table["schema_id"] follows the pattern `db.schema`
116
- mapping = {
117
- table["id"]: ".".join([table["schema_id"], table["table_name"]])
118
- for table in tables
119
- }
120
-
121
- tables_with_lineage: set[str] = set()
122
- for t in table_lineage:
123
- tables_with_lineage.add(t["parent_path"])
124
- tables_with_lineage.add(t["child_path"])
125
-
126
- paths_to_return: list[tuple[str, str]] = []
127
- for column in columns:
128
- table_path = mapping[column["table_id"]]
129
- if table_path not in tables_with_lineage:
130
- continue
131
- column_ = (table_path, column["column_name"])
132
- paths_to_return.append(column_)
133
-
134
- return paths_to_return
135
-
136
-
137
- def deduplicate_lineage(lineages: list[TimestampedLink]) -> dict:
138
- deduplicated_lineage = LineageLinks()
139
- for timestamped_link in lineages:
140
- deduplicated_lineage.add(timestamped_link)
141
- return deduplicated_lineage.lineage
64
+ deduplicated_lineage = LineageProcessor(lineage_entity)
65
+
66
+ for link in lineage:
67
+ deduplicated_lineage.add(link)
68
+
69
+ return list(deduplicated_lineage.lineage.values())