castor-extractor 0.24.32__py3-none-any.whl → 0.24.34__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of castor-extractor might be problematic. Click here for more details.
- CHANGELOG.md +10 -2
- castor_extractor/commands/extract_sqlserver.py +12 -0
- castor_extractor/commands/extract_tableau.py +15 -0
- castor_extractor/utils/__init__.py +7 -1
- castor_extractor/utils/collection.py +26 -0
- castor_extractor/utils/collection_test.py +31 -1
- castor_extractor/visualization/looker_studio/client/queries/query.sql +1 -0
- castor_extractor/visualization/tableau/client/client.py +9 -1
- castor_extractor/visualization/tableau/client/client_metadata_api.py +49 -11
- castor_extractor/visualization/tableau/extract.py +4 -0
- castor_extractor/warehouse/sqlserver/client.py +15 -0
- castor_extractor/warehouse/sqlserver/extract.py +8 -2
- castor_extractor/warehouse/sqlserver/queries/column.sql +10 -10
- castor_extractor/warehouse/sqlserver/queries/database.sql +1 -1
- castor_extractor/warehouse/sqlserver/queries/schema.sql +5 -6
- castor_extractor/warehouse/sqlserver/queries/table.sql +12 -14
- castor_extractor/warehouse/sqlserver/query.py +30 -1
- {castor_extractor-0.24.32.dist-info → castor_extractor-0.24.34.dist-info}/METADATA +11 -3
- {castor_extractor-0.24.32.dist-info → castor_extractor-0.24.34.dist-info}/RECORD +22 -22
- {castor_extractor-0.24.32.dist-info → castor_extractor-0.24.34.dist-info}/LICENCE +0 -0
- {castor_extractor-0.24.32.dist-info → castor_extractor-0.24.34.dist-info}/WHEEL +0 -0
- {castor_extractor-0.24.32.dist-info → castor_extractor-0.24.34.dist-info}/entry_points.txt +0 -0
CHANGELOG.md
CHANGED
|
@@ -1,12 +1,20 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## 0.24.34 - 2025-07-02
|
|
4
|
+
|
|
5
|
+
* SQLServer: multiple databases
|
|
6
|
+
|
|
7
|
+
## 0.24.33 - 2025-07-10
|
|
8
|
+
|
|
9
|
+
* Tableau - Add an option to skip fields ingestion
|
|
10
|
+
|
|
3
11
|
## 0.24.32 - 2025-07-02
|
|
4
12
|
|
|
5
13
|
* Salesforce reporting - extract report's metadata
|
|
6
|
-
|
|
14
|
+
|
|
7
15
|
## 0.24.31 - 2025-07-02
|
|
8
16
|
|
|
9
|
-
* Looker Studio: add option to list users via a provided JSON file
|
|
17
|
+
* Looker Studio: add an option to list users via a provided JSON file
|
|
10
18
|
|
|
11
19
|
## 0.24.30 - 2025-06-26
|
|
12
20
|
|
|
@@ -23,6 +23,16 @@ def main():
|
|
|
23
23
|
action="store_true",
|
|
24
24
|
help="Skips files already extracted instead of replacing them",
|
|
25
25
|
)
|
|
26
|
+
parser.add_argument(
|
|
27
|
+
"--db-allowed",
|
|
28
|
+
nargs="*",
|
|
29
|
+
help="List of databases that should be extracted",
|
|
30
|
+
)
|
|
31
|
+
parser.add_argument(
|
|
32
|
+
"--db-blocked",
|
|
33
|
+
nargs="*",
|
|
34
|
+
help="List of databases that should not be extracted",
|
|
35
|
+
)
|
|
26
36
|
parser.set_defaults(skip_existing=False)
|
|
27
37
|
|
|
28
38
|
args = parser.parse_args()
|
|
@@ -35,4 +45,6 @@ def main():
|
|
|
35
45
|
password=args.password,
|
|
36
46
|
output_directory=args.output,
|
|
37
47
|
skip_existing=args.skip_existing,
|
|
48
|
+
db_allowed=args.db_allowed,
|
|
49
|
+
db_blocked=args.db_blocked,
|
|
38
50
|
)
|
|
@@ -28,6 +28,13 @@ def main():
|
|
|
28
28
|
help="Option to avoid extracting Tableau columns, default to False",
|
|
29
29
|
)
|
|
30
30
|
|
|
31
|
+
parser.add_argument(
|
|
32
|
+
"--skip-fields",
|
|
33
|
+
dest="skip_fields",
|
|
34
|
+
action="store_true",
|
|
35
|
+
help="Option to avoid extracting Tableau fields, default to False",
|
|
36
|
+
)
|
|
37
|
+
|
|
31
38
|
parser.add_argument(
|
|
32
39
|
"--with-pulse",
|
|
33
40
|
dest="with_pulse",
|
|
@@ -41,6 +48,14 @@ def main():
|
|
|
41
48
|
required=False,
|
|
42
49
|
)
|
|
43
50
|
|
|
51
|
+
parser.add_argument(
|
|
52
|
+
"-ie",
|
|
53
|
+
"--ignore-errors",
|
|
54
|
+
action="store_true",
|
|
55
|
+
dest="ignore_errors",
|
|
56
|
+
help="Allow partial extraction of Fields and Columns: skip batch in case of Timeout errors",
|
|
57
|
+
)
|
|
58
|
+
|
|
44
59
|
parser.add_argument("-o", "--output", help="Directory to write to")
|
|
45
60
|
|
|
46
61
|
tableau.extract_all(**parse_filled_arguments(parser))
|
|
@@ -19,7 +19,13 @@ from .client import (
|
|
|
19
19
|
handle_response,
|
|
20
20
|
uri_encode,
|
|
21
21
|
)
|
|
22
|
-
from .collection import
|
|
22
|
+
from .collection import (
|
|
23
|
+
deduplicate,
|
|
24
|
+
empty_iterator,
|
|
25
|
+
filter_items,
|
|
26
|
+
group_by,
|
|
27
|
+
mapping_from_rows,
|
|
28
|
+
)
|
|
23
29
|
from .constants import OUTPUT_DIR
|
|
24
30
|
from .deprecate import deprecate_python
|
|
25
31
|
from .env import from_env
|
|
@@ -2,6 +2,8 @@ from collections import defaultdict
|
|
|
2
2
|
from collections.abc import Iterable, Sequence
|
|
3
3
|
from typing import (
|
|
4
4
|
Any,
|
|
5
|
+
List,
|
|
6
|
+
Optional,
|
|
5
7
|
TypeVar,
|
|
6
8
|
)
|
|
7
9
|
|
|
@@ -80,3 +82,27 @@ def deduplicate(
|
|
|
80
82
|
deduplicated.append(element)
|
|
81
83
|
|
|
82
84
|
return deduplicated
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def filter_items(
|
|
88
|
+
items: Iterable[T],
|
|
89
|
+
allowed: Optional[Iterable[T]] = None,
|
|
90
|
+
blocked: Optional[Iterable[T]] = None,
|
|
91
|
+
) -> List[T]:
|
|
92
|
+
"""
|
|
93
|
+
Filters `items` by excluding any in `blocked` or including only those in `allowed`.
|
|
94
|
+
If both `allowed` and `blocked` are None, returns all items.
|
|
95
|
+
If both are provided, raise an error.
|
|
96
|
+
"""
|
|
97
|
+
items = list(items)
|
|
98
|
+
|
|
99
|
+
if allowed and blocked:
|
|
100
|
+
raise AttributeError(
|
|
101
|
+
"Only one of `allowed` and `blocked` can be provided"
|
|
102
|
+
)
|
|
103
|
+
if blocked:
|
|
104
|
+
return [item for item in items if item not in blocked]
|
|
105
|
+
if allowed:
|
|
106
|
+
return [item for item in items if item in allowed]
|
|
107
|
+
|
|
108
|
+
return items
|
|
@@ -1,4 +1,6 @@
|
|
|
1
|
-
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
from .collection import deduplicate, filter_items, mapping_from_rows
|
|
2
4
|
|
|
3
5
|
|
|
4
6
|
def test__mapping_from_rows__basic_mapping():
|
|
@@ -72,3 +74,31 @@ def test_deduplicate():
|
|
|
72
74
|
{"id": "2", "name": "duplicate"},
|
|
73
75
|
]
|
|
74
76
|
assert deduplicate("id", elements) == [e1, e2, e3]
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def test_sqlserver_databases():
|
|
80
|
+
databases = [
|
|
81
|
+
"prod",
|
|
82
|
+
"staging",
|
|
83
|
+
"test",
|
|
84
|
+
]
|
|
85
|
+
|
|
86
|
+
# 1. No allowed or blocked: should return all
|
|
87
|
+
result1 = filter_items(databases)
|
|
88
|
+
assert result1 == [
|
|
89
|
+
"prod",
|
|
90
|
+
"staging",
|
|
91
|
+
"test",
|
|
92
|
+
]
|
|
93
|
+
|
|
94
|
+
# 2. Block "prod": only staging and test should remain
|
|
95
|
+
result2 = filter_items(databases, blocked=["prod"])
|
|
96
|
+
assert result2 == ["staging", "test"]
|
|
97
|
+
|
|
98
|
+
# 3. Only allow "staging" and "test"
|
|
99
|
+
result3 = filter_items(databases, allowed=["staging", "test"])
|
|
100
|
+
assert result3 == ["staging", "test"]
|
|
101
|
+
|
|
102
|
+
# 4. allowed and blocked, should raise
|
|
103
|
+
with pytest.raises(AttributeError):
|
|
104
|
+
filter_items(databases, blocked=["prod"], allowed=["staging", "test"])
|
|
@@ -122,13 +122,17 @@ class TableauClient:
|
|
|
122
122
|
credentials: TableauCredentials,
|
|
123
123
|
timeout_sec: int = DEFAULT_TIMEOUT_SECONDS,
|
|
124
124
|
with_columns: bool = True,
|
|
125
|
+
with_fields: bool = True,
|
|
125
126
|
with_pulse: bool = False,
|
|
126
127
|
override_page_size: Optional[int] = None,
|
|
128
|
+
ignore_errors: bool = False,
|
|
127
129
|
):
|
|
128
130
|
self._credentials = credentials
|
|
129
131
|
self._server = _server(credentials.server_url, timeout_sec)
|
|
130
132
|
self._with_columns = with_columns
|
|
133
|
+
self._with_fields = with_fields
|
|
131
134
|
self._with_pulse = with_pulse
|
|
135
|
+
self._ignore_errors = ignore_errors
|
|
132
136
|
|
|
133
137
|
self._client_metadata = TableauClientMetadataApi(
|
|
134
138
|
server=self._server,
|
|
@@ -221,6 +225,10 @@ class TableauClient:
|
|
|
221
225
|
logger.info(f"Skipping asset {asset} - deactivated columns")
|
|
222
226
|
return []
|
|
223
227
|
|
|
228
|
+
if asset == TableauAsset.FIELD and not self._with_fields:
|
|
229
|
+
logger.info(f"Skipping asset {asset} - deactivated fields")
|
|
230
|
+
return []
|
|
231
|
+
|
|
224
232
|
logger.info(f"Extracting {asset.name}...")
|
|
225
233
|
|
|
226
234
|
if asset == TableauAsset.DATASOURCE:
|
|
@@ -240,4 +248,4 @@ class TableauClient:
|
|
|
240
248
|
return self._client_rest.fetch(asset)
|
|
241
249
|
|
|
242
250
|
# other assets can be extracted via Metadata API
|
|
243
|
-
return self._client_metadata.fetch(asset)
|
|
251
|
+
return self._client_metadata.fetch(asset, self._ignore_errors)
|
|
@@ -2,6 +2,7 @@ import logging
|
|
|
2
2
|
from collections.abc import Iterator
|
|
3
3
|
from typing import Optional
|
|
4
4
|
|
|
5
|
+
import requests
|
|
5
6
|
import tableauserverclient as TSC # type: ignore
|
|
6
7
|
|
|
7
8
|
from ....utils import SerializedAsset, retry
|
|
@@ -12,6 +13,13 @@ from .gql_queries import FIELDS_QUERIES, GQL_QUERIES, QUERY_TEMPLATE
|
|
|
12
13
|
|
|
13
14
|
logger = logging.getLogger(__name__)
|
|
14
15
|
|
|
16
|
+
# These assets are known to be error-prone, so it's acceptable if a few are missed.
|
|
17
|
+
# If errors occur, skip the current batch.
|
|
18
|
+
_SAFE_MODE_ASSETS = (
|
|
19
|
+
TableauAsset.COLUMN,
|
|
20
|
+
TableauAsset.FIELD,
|
|
21
|
+
)
|
|
22
|
+
|
|
15
23
|
# increase the value when extraction is too slow
|
|
16
24
|
# decrease the value when timeouts arise
|
|
17
25
|
_CUSTOM_PAGE_SIZE: dict[TableauAsset, int] = {
|
|
@@ -92,6 +100,7 @@ def gql_query_scroll(
|
|
|
92
100
|
resource: str,
|
|
93
101
|
fields: str,
|
|
94
102
|
page_size: int,
|
|
103
|
+
skip_batch: bool,
|
|
95
104
|
) -> Iterator[SerializedAsset]:
|
|
96
105
|
"""
|
|
97
106
|
Iterate over GQL query results, handling pagination and cursor
|
|
@@ -119,15 +128,22 @@ def gql_query_scroll(
|
|
|
119
128
|
|
|
120
129
|
current_offset = 0
|
|
121
130
|
while True:
|
|
122
|
-
|
|
123
|
-
|
|
131
|
+
try:
|
|
132
|
+
payload = _call(first=page_size, offset=current_offset)
|
|
133
|
+
yield payload["nodes"]
|
|
134
|
+
|
|
135
|
+
current_offset += len(payload["nodes"])
|
|
136
|
+
total = payload["totalCount"]
|
|
137
|
+
logger.info(f"Extracted {current_offset}/{total} {resource}")
|
|
124
138
|
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
139
|
+
if not payload["pageInfo"]["hasNextPage"]:
|
|
140
|
+
break
|
|
141
|
+
except requests.exceptions.ReadTimeout:
|
|
142
|
+
if not skip_batch:
|
|
143
|
+
raise
|
|
128
144
|
|
|
129
|
-
|
|
130
|
-
|
|
145
|
+
logger.warning("Skipping batch because of TableauServer Timeout")
|
|
146
|
+
current_offset += page_size
|
|
131
147
|
|
|
132
148
|
|
|
133
149
|
def _deduplicate(result_pages: Iterator[SerializedAsset]) -> SerializedAsset:
|
|
@@ -177,12 +193,14 @@ class TableauClientMetadataApi:
|
|
|
177
193
|
resource: str,
|
|
178
194
|
fields: str,
|
|
179
195
|
page_size: int = DEFAULT_PAGE_SIZE,
|
|
196
|
+
skip_batch: bool = False,
|
|
180
197
|
) -> SerializedAsset:
|
|
181
198
|
result_pages = gql_query_scroll(
|
|
182
199
|
self._server,
|
|
183
200
|
resource=resource,
|
|
184
201
|
fields=fields,
|
|
185
202
|
page_size=page_size,
|
|
203
|
+
skip_batch=skip_batch,
|
|
186
204
|
)
|
|
187
205
|
return _deduplicate(result_pages)
|
|
188
206
|
|
|
@@ -193,21 +211,41 @@ class TableauClientMetadataApi:
|
|
|
193
211
|
or DEFAULT_PAGE_SIZE
|
|
194
212
|
)
|
|
195
213
|
|
|
196
|
-
def _fetch_fields(self) -> SerializedAsset:
|
|
214
|
+
def _fetch_fields(self, skip_batch: bool = False) -> SerializedAsset:
|
|
197
215
|
result: SerializedAsset = []
|
|
198
216
|
page_size = self._page_size(TableauAsset.FIELD)
|
|
199
217
|
for resource, fields in FIELDS_QUERIES:
|
|
200
|
-
current = self._call(
|
|
218
|
+
current = self._call(
|
|
219
|
+
resource,
|
|
220
|
+
fields,
|
|
221
|
+
page_size,
|
|
222
|
+
skip_batch=skip_batch,
|
|
223
|
+
)
|
|
201
224
|
result.extend(current)
|
|
202
225
|
return result
|
|
203
226
|
|
|
227
|
+
@staticmethod
|
|
228
|
+
def _should_skip_batch_with_timeout(
|
|
229
|
+
asset: TableauAsset,
|
|
230
|
+
ignore_metadata_errors: bool = False,
|
|
231
|
+
) -> bool:
|
|
232
|
+
return asset in _SAFE_MODE_ASSETS and ignore_metadata_errors
|
|
233
|
+
|
|
204
234
|
def fetch(
|
|
205
235
|
self,
|
|
206
236
|
asset: TableauAsset,
|
|
237
|
+
ignore_errors: bool = False,
|
|
207
238
|
) -> SerializedAsset:
|
|
239
|
+
skip_batch = self._should_skip_batch_with_timeout(asset, ignore_errors)
|
|
240
|
+
|
|
208
241
|
if asset == TableauAsset.FIELD:
|
|
209
|
-
return self._fetch_fields()
|
|
242
|
+
return self._fetch_fields(skip_batch=skip_batch)
|
|
210
243
|
|
|
211
244
|
page_size = self._page_size(asset)
|
|
212
245
|
resource, fields = GQL_QUERIES[asset]
|
|
213
|
-
return self._call(
|
|
246
|
+
return self._call(
|
|
247
|
+
resource=resource,
|
|
248
|
+
fields=fields,
|
|
249
|
+
page_size=page_size,
|
|
250
|
+
skip_batch=skip_batch,
|
|
251
|
+
)
|
|
@@ -33,16 +33,20 @@ def extract_all(**kwargs) -> None:
|
|
|
33
33
|
"""
|
|
34
34
|
output_directory = kwargs.get("output") or from_env(OUTPUT_DIR)
|
|
35
35
|
with_columns = not kwargs.get("skip_columns")
|
|
36
|
+
with_fields = not kwargs.get("skip_fields")
|
|
36
37
|
with_pulse = kwargs.get("with_pulse") or False
|
|
37
38
|
page_size = kwargs.get("page_size")
|
|
39
|
+
ignore_errors = kwargs.get("ignore_errors") or False
|
|
38
40
|
timestamp = current_timestamp()
|
|
39
41
|
|
|
40
42
|
credentials = TableauCredentials(**kwargs)
|
|
41
43
|
client = TableauClient(
|
|
42
44
|
credentials,
|
|
43
45
|
with_columns=with_columns,
|
|
46
|
+
with_fields=with_fields,
|
|
44
47
|
with_pulse=with_pulse,
|
|
45
48
|
override_page_size=page_size,
|
|
49
|
+
ignore_errors=ignore_errors,
|
|
46
50
|
)
|
|
47
51
|
client.login()
|
|
48
52
|
|
|
@@ -1,15 +1,20 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
from collections.abc import Iterator
|
|
2
3
|
|
|
3
4
|
from sqlalchemy import text
|
|
4
5
|
|
|
5
6
|
from ...utils import ExtractionQuery, SqlalchemyClient, uri_encode
|
|
6
7
|
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
7
10
|
SERVER_URI = "{user}:{password}@{host}:{port}/{database}"
|
|
8
11
|
MSSQL_URI = f"mssql+pymssql://{SERVER_URI}"
|
|
9
12
|
DEFAULT_PORT = 1433
|
|
10
13
|
|
|
11
14
|
_KEYS = ("user", "password", "host", "port", "database")
|
|
12
15
|
|
|
16
|
+
_SYSTEM_DATABASES = ("master", "model", "msdb", "tempdb", "DBAdmin")
|
|
17
|
+
|
|
13
18
|
|
|
14
19
|
def _check_key(credentials: dict) -> None:
|
|
15
20
|
for key in _KEYS:
|
|
@@ -51,3 +56,13 @@ class MSSQLClient(SqlalchemyClient):
|
|
|
51
56
|
yield from results
|
|
52
57
|
finally:
|
|
53
58
|
self.close()
|
|
59
|
+
|
|
60
|
+
def get_databases(self) -> list[str]:
|
|
61
|
+
result = self.execute(
|
|
62
|
+
ExtractionQuery("SELECT name FROM sys.databases", {})
|
|
63
|
+
)
|
|
64
|
+
return [
|
|
65
|
+
row["name"]
|
|
66
|
+
for row in result
|
|
67
|
+
if row["name"] not in _SYSTEM_DATABASES
|
|
68
|
+
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
|
|
3
|
-
from ...utils import LocalStorage, from_env, write_summary
|
|
3
|
+
from ...utils import LocalStorage, filter_items, from_env, write_summary
|
|
4
4
|
from ..abstract import (
|
|
5
5
|
CATALOG_ASSETS,
|
|
6
6
|
EXTERNAL_LINEAGE_ASSETS,
|
|
@@ -51,7 +51,13 @@ def extract_all(**kwargs) -> None:
|
|
|
51
51
|
|
|
52
52
|
client = MSSQLClient(credentials=_credentials(kwargs))
|
|
53
53
|
|
|
54
|
-
|
|
54
|
+
databases = filter_items(
|
|
55
|
+
client.get_databases(), kwargs.get("allowed"), kwargs.get("blocked")
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
query_builder = MSSQLQueryBuilder(
|
|
59
|
+
databases=databases,
|
|
60
|
+
)
|
|
55
61
|
|
|
56
62
|
storage = LocalStorage(directory=output_directory)
|
|
57
63
|
|
|
@@ -11,7 +11,7 @@ WITH extended_tables AS (
|
|
|
11
11
|
table_owner_id = principal_id,
|
|
12
12
|
schema_id
|
|
13
13
|
FROM
|
|
14
|
-
sys.tables
|
|
14
|
+
{database}.sys.tables
|
|
15
15
|
|
|
16
16
|
UNION
|
|
17
17
|
|
|
@@ -21,7 +21,7 @@ WITH extended_tables AS (
|
|
|
21
21
|
table_owner_id = principal_id,
|
|
22
22
|
schema_id
|
|
23
23
|
FROM
|
|
24
|
-
sys.views
|
|
24
|
+
{database}.sys.views
|
|
25
25
|
|
|
26
26
|
UNION
|
|
27
27
|
|
|
@@ -31,7 +31,7 @@ WITH extended_tables AS (
|
|
|
31
31
|
table_owner_id = principal_id,
|
|
32
32
|
schema_id
|
|
33
33
|
FROM
|
|
34
|
-
sys.external_tables
|
|
34
|
+
{database}.sys.external_tables
|
|
35
35
|
),
|
|
36
36
|
/*
|
|
37
37
|
`sys.columns` contains, among others:
|
|
@@ -54,11 +54,11 @@ column_ids AS (
|
|
|
54
54
|
schema_name = ss.name,
|
|
55
55
|
schema_id = ss.schema_id,
|
|
56
56
|
comment = CONVERT(varchar(1024), ep.value)
|
|
57
|
-
FROM sys.columns AS sc
|
|
57
|
+
FROM {database}.sys.columns AS sc
|
|
58
58
|
LEFT JOIN extended_tables AS et ON sc.object_id = et.table_id
|
|
59
|
-
LEFT JOIN sys.schemas AS ss ON et.schema_id = ss.schema_id
|
|
60
|
-
LEFT JOIN sys.databases AS sd ON sd.name =
|
|
61
|
-
LEFT JOIN sys.extended_properties AS ep
|
|
59
|
+
LEFT JOIN {database}.sys.schemas AS ss ON et.schema_id = ss.schema_id
|
|
60
|
+
LEFT JOIN {database}.sys.databases AS sd ON sd.name = '{database}'
|
|
61
|
+
LEFT JOIN {database}.sys.extended_properties AS ep
|
|
62
62
|
ON
|
|
63
63
|
sc.object_id = ep.major_id
|
|
64
64
|
AND sc.column_id = ep.minor_id
|
|
@@ -70,9 +70,9 @@ columns AS (
|
|
|
70
70
|
i.database_name,
|
|
71
71
|
i.database_id,
|
|
72
72
|
schema_name = c.table_schema,
|
|
73
|
-
i.schema_id,
|
|
73
|
+
schema_id = CAST(i.database_id AS VARCHAR(10)) + '_' + CAST(i.schema_id AS VARCHAR(10)),
|
|
74
74
|
table_name = c.table_name,
|
|
75
|
-
i.table_id,
|
|
75
|
+
table_id = CAST(i.database_id AS VARCHAR(10)) + '_' + CAST(i.schema_id AS VARCHAR(10)) + '_' + CAST(i.table_id AS VARCHAR(10)),
|
|
76
76
|
c.column_name,
|
|
77
77
|
c.data_type,
|
|
78
78
|
c.ordinal_position,
|
|
@@ -87,7 +87,7 @@ columns AS (
|
|
|
87
87
|
i.comment,
|
|
88
88
|
column_id = CONCAT(i.table_id, '.', c.column_name)
|
|
89
89
|
FROM
|
|
90
|
-
information_schema.columns AS c
|
|
90
|
+
{database}.information_schema.columns AS c
|
|
91
91
|
LEFT JOIN column_ids AS i
|
|
92
92
|
ON
|
|
93
93
|
(
|
|
@@ -3,21 +3,20 @@ WITH ids AS (
|
|
|
3
3
|
SELECT DISTINCT
|
|
4
4
|
table_catalog,
|
|
5
5
|
table_schema
|
|
6
|
-
FROM information_schema.tables
|
|
7
|
-
WHERE table_catalog = DB_NAME()
|
|
6
|
+
FROM {database}.information_schema.tables
|
|
8
7
|
)
|
|
9
8
|
|
|
10
9
|
SELECT
|
|
11
10
|
d.database_id,
|
|
12
11
|
database_name = i.table_catalog,
|
|
13
12
|
schema_name = s.name,
|
|
14
|
-
s.schema_id,
|
|
13
|
+
schema_id = CAST(d.database_id AS VARCHAR(10)) + '_' + CAST(s.schema_id AS VARCHAR(10)),
|
|
15
14
|
schema_owner = u.name,
|
|
16
15
|
schema_owner_id = u.uid
|
|
17
|
-
FROM sys.schemas AS s
|
|
16
|
+
FROM {database}.sys.schemas AS s
|
|
18
17
|
INNER JOIN ids AS i
|
|
19
18
|
ON s.name = i.table_schema
|
|
20
|
-
LEFT JOIN sys.sysusers AS u
|
|
19
|
+
LEFT JOIN {database}.sys.sysusers AS u
|
|
21
20
|
ON s.principal_id = u.uid
|
|
22
|
-
LEFT JOIN sys.databases AS d
|
|
21
|
+
LEFT JOIN {database}.sys.databases AS d
|
|
23
22
|
ON i.table_catalog = d.name
|
|
@@ -11,7 +11,7 @@ WITH extended_tables AS (
|
|
|
11
11
|
table_owner_id = principal_id,
|
|
12
12
|
schema_id
|
|
13
13
|
FROM
|
|
14
|
-
sys.tables
|
|
14
|
+
{database}.sys.tables
|
|
15
15
|
|
|
16
16
|
UNION
|
|
17
17
|
|
|
@@ -21,7 +21,7 @@ WITH extended_tables AS (
|
|
|
21
21
|
table_owner_id = principal_id,
|
|
22
22
|
schema_id
|
|
23
23
|
FROM
|
|
24
|
-
sys.views
|
|
24
|
+
{database}.sys.views
|
|
25
25
|
|
|
26
26
|
UNION
|
|
27
27
|
|
|
@@ -31,14 +31,14 @@ WITH extended_tables AS (
|
|
|
31
31
|
table_owner_id = principal_id,
|
|
32
32
|
schema_id
|
|
33
33
|
FROM
|
|
34
|
-
sys.external_tables
|
|
34
|
+
{database}.sys.external_tables
|
|
35
35
|
),
|
|
36
36
|
-- Get the row count per table
|
|
37
37
|
partitions AS (
|
|
38
38
|
SELECT
|
|
39
39
|
object_id,
|
|
40
40
|
row_count = SUM(rows)
|
|
41
|
-
FROM sys.partitions
|
|
41
|
+
FROM {database}.sys.partitions
|
|
42
42
|
GROUP BY object_id
|
|
43
43
|
),
|
|
44
44
|
-- Append row count to table properties
|
|
@@ -69,13 +69,12 @@ table_ids AS (
|
|
|
69
69
|
table_owner = u.name,
|
|
70
70
|
row_count,
|
|
71
71
|
comment = CONVERT(varchar(1024), ep.value)
|
|
72
|
-
FROM
|
|
73
|
-
|
|
74
|
-
LEFT JOIN sys.schemas AS ss
|
|
72
|
+
FROM extended_tables_with_row_count AS et
|
|
73
|
+
LEFT JOIN {database}.sys.schemas AS ss
|
|
75
74
|
ON et.schema_id = ss.schema_id
|
|
76
|
-
LEFT JOIN sys.sysusers AS u
|
|
75
|
+
LEFT JOIN {database}.sys.sysusers AS u
|
|
77
76
|
ON et.table_owner_id = u.uid
|
|
78
|
-
LEFT JOIN sys.extended_properties AS ep
|
|
77
|
+
LEFT JOIN {database}.sys.extended_properties AS ep
|
|
79
78
|
ON (
|
|
80
79
|
et.table_id = ep.major_id
|
|
81
80
|
AND ep.minor_id = 0
|
|
@@ -91,19 +90,18 @@ meta AS (
|
|
|
91
90
|
t.table_name,
|
|
92
91
|
t.table_type
|
|
93
92
|
FROM
|
|
94
|
-
information_schema.tables AS t
|
|
95
|
-
LEFT JOIN sys.databases AS db
|
|
93
|
+
{database}.information_schema.tables AS t
|
|
94
|
+
LEFT JOIN {database}.sys.databases AS db
|
|
96
95
|
ON t.table_catalog = db.name
|
|
97
|
-
WHERE t.table_catalog = db_name()
|
|
98
96
|
)
|
|
99
97
|
|
|
100
98
|
SELECT
|
|
101
99
|
m.database_name,
|
|
102
100
|
m.database_id,
|
|
103
101
|
m.schema_name,
|
|
104
|
-
i.schema_id,
|
|
102
|
+
schema_id = CAST(m.database_id AS VARCHAR(10)) + '_' + CAST(i.schema_id AS VARCHAR(10)),
|
|
105
103
|
m.table_name,
|
|
106
|
-
i.table_id,
|
|
104
|
+
table_id = CAST(m.database_id AS VARCHAR(10)) + '_' + CAST(i.schema_id AS VARCHAR(10)) + '_' + CAST(i.table_id AS VARCHAR(10)),
|
|
107
105
|
m.table_type,
|
|
108
106
|
i.table_owner,
|
|
109
107
|
i.table_owner_id,
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
from typing import Optional
|
|
2
3
|
|
|
3
4
|
from ..abstract import (
|
|
@@ -7,6 +8,15 @@ from ..abstract import (
|
|
|
7
8
|
WarehouseAsset,
|
|
8
9
|
)
|
|
9
10
|
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
_DATABASE_REQUIRED = (
|
|
15
|
+
WarehouseAsset.SCHEMA,
|
|
16
|
+
WarehouseAsset.TABLE,
|
|
17
|
+
WarehouseAsset.COLUMN,
|
|
18
|
+
)
|
|
19
|
+
|
|
10
20
|
|
|
11
21
|
class MSSQLQueryBuilder(AbstractQueryBuilder):
|
|
12
22
|
"""
|
|
@@ -15,10 +25,29 @@ class MSSQLQueryBuilder(AbstractQueryBuilder):
|
|
|
15
25
|
|
|
16
26
|
def __init__(
|
|
17
27
|
self,
|
|
28
|
+
databases: list[str],
|
|
18
29
|
time_filter: Optional[TimeFilter] = None,
|
|
19
30
|
):
|
|
20
31
|
super().__init__(time_filter=time_filter)
|
|
32
|
+
self._databases = databases
|
|
33
|
+
|
|
34
|
+
@staticmethod
|
|
35
|
+
def _format(query: ExtractionQuery, values: dict) -> ExtractionQuery:
|
|
36
|
+
return ExtractionQuery(
|
|
37
|
+
statement=query.statement.format(**values),
|
|
38
|
+
params=query.params,
|
|
39
|
+
)
|
|
21
40
|
|
|
22
41
|
def build(self, asset: WarehouseAsset) -> list[ExtractionQuery]:
|
|
23
42
|
query = self.build_default(asset)
|
|
24
|
-
|
|
43
|
+
|
|
44
|
+
if asset not in _DATABASE_REQUIRED:
|
|
45
|
+
return [query]
|
|
46
|
+
|
|
47
|
+
logger.info(
|
|
48
|
+
f"\tWill run queries with following database params: {self._databases}",
|
|
49
|
+
)
|
|
50
|
+
return [
|
|
51
|
+
self._format(query, {"database": database})
|
|
52
|
+
for database in self._databases
|
|
53
|
+
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: castor-extractor
|
|
3
|
-
Version: 0.24.
|
|
3
|
+
Version: 0.24.34
|
|
4
4
|
Summary: Extract your metadata assets.
|
|
5
5
|
Home-page: https://www.castordoc.com/
|
|
6
6
|
License: EULA
|
|
@@ -215,13 +215,21 @@ For any questions or bug report, contact us at [support@coalesce.io](mailto:supp
|
|
|
215
215
|
|
|
216
216
|
# Changelog
|
|
217
217
|
|
|
218
|
+
## 0.24.34 - 2025-07-02
|
|
219
|
+
|
|
220
|
+
* SQLServer: multiple databases
|
|
221
|
+
|
|
222
|
+
## 0.24.33 - 2025-07-10
|
|
223
|
+
|
|
224
|
+
* Tableau - Add an option to skip fields ingestion
|
|
225
|
+
|
|
218
226
|
## 0.24.32 - 2025-07-02
|
|
219
227
|
|
|
220
228
|
* Salesforce reporting - extract report's metadata
|
|
221
|
-
|
|
229
|
+
|
|
222
230
|
## 0.24.31 - 2025-07-02
|
|
223
231
|
|
|
224
|
-
* Looker Studio: add option to list users via a provided JSON file
|
|
232
|
+
* Looker Studio: add an option to list users via a provided JSON file
|
|
225
233
|
|
|
226
234
|
## 0.24.30 - 2025-06-26
|
|
227
235
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
CHANGELOG.md,sha256=
|
|
1
|
+
CHANGELOG.md,sha256=4PQMZjH-5BKSERREUHivWM7KKl_PpIDieFYH2PeRmGQ,18840
|
|
2
2
|
Dockerfile,sha256=xQ05-CFfGShT3oUqaiumaldwA288dj9Yb_pxofQpufg,301
|
|
3
3
|
DockerfileUsage.md,sha256=2hkJQF-5JuuzfPZ7IOxgM6QgIQW7l-9oRMFVwyXC4gE,998
|
|
4
4
|
LICENCE,sha256=sL-IGa4hweyya1HgzMskrRdybbIa2cktzxb5qmUgDg8,8254
|
|
@@ -24,9 +24,9 @@ castor_extractor/commands/extract_salesforce.py,sha256=3j3YTmMkPAwocR-B1ozJQai0U
|
|
|
24
24
|
castor_extractor/commands/extract_salesforce_reporting.py,sha256=FdANTNiLkIPdm80XMYxWReHjdycLsIa61pyeCD-sUDk,962
|
|
25
25
|
castor_extractor/commands/extract_sigma.py,sha256=sxewHcZ1Doq35V2qnpX_zCKKXkrb1_9bYjUMg7BOW-k,643
|
|
26
26
|
castor_extractor/commands/extract_snowflake.py,sha256=GwlrRxwEBjHqGs_3bs5vM9fzmv61_iwvBr1KcIgFgWM,2161
|
|
27
|
-
castor_extractor/commands/extract_sqlserver.py,sha256
|
|
27
|
+
castor_extractor/commands/extract_sqlserver.py,sha256=-20AlQbJ4W3oQytHLKdN8GX__UkrrQukOgSzy2l1WZY,1483
|
|
28
28
|
castor_extractor/commands/extract_strategy.py,sha256=Q-pUymatPrBFGXobhyUPzFph0-t774-XOpjdCFF1dYo,821
|
|
29
|
-
castor_extractor/commands/extract_tableau.py,sha256=
|
|
29
|
+
castor_extractor/commands/extract_tableau.py,sha256=LNtI29LbVk1vp4RNrn89GmdW6R_7QBYunRmkowDhbco,1982
|
|
30
30
|
castor_extractor/commands/extract_thoughtspot.py,sha256=caAYJlH-vK7u5IUB6OKXxcaWfLgc7d_XqnFDWK6YNS4,639
|
|
31
31
|
castor_extractor/commands/file_check.py,sha256=TJx76Ymd0QCECmq35zRJMkPE8DJtSInB28MuSXWk8Ao,2644
|
|
32
32
|
castor_extractor/commands/upload.py,sha256=rLXp7gQ8zb1kLbho4FT87q8eJd8Gvo_TkyIynAaQ-4s,1342
|
|
@@ -96,7 +96,7 @@ castor_extractor/uploader/settings.py,sha256=3MvOX-UFRqrLZoiT7wYn9jUGro7NX4RCafY
|
|
|
96
96
|
castor_extractor/uploader/upload.py,sha256=PSQfkO_7LSE0WBo9Tm_hlS2ONepKeB0cBFdJXySnues,4310
|
|
97
97
|
castor_extractor/uploader/upload_test.py,sha256=7fwstdQe7FjuwGilsCdFpEQr1qLoR2WTRUzyy93fISw,402
|
|
98
98
|
castor_extractor/uploader/utils.py,sha256=otAaySj5aeem6f0CTd0Te6ioJ6uP2J1p348j-SdIwDI,802
|
|
99
|
-
castor_extractor/utils/__init__.py,sha256=
|
|
99
|
+
castor_extractor/utils/__init__.py,sha256=z_BdKTUyuug3I5AzCuSGrAVskfLax4_olfORIjhZw_M,1691
|
|
100
100
|
castor_extractor/utils/argument_parser.py,sha256=S4EcIh3wNDjs3fOrQnttCcPsAmG8m_Txl7xvEh0Q37s,283
|
|
101
101
|
castor_extractor/utils/argument_parser_test.py,sha256=wnyLFJ74iEiPxxLSbwFtckR7FIHxsFOVU38ljs9gqRA,633
|
|
102
102
|
castor_extractor/utils/batch.py,sha256=SFlLmJgVjV2nVhIrjVIEp8wJ9du4dKKHq8YVYubnwQQ,448
|
|
@@ -118,8 +118,8 @@ castor_extractor/utils/client/postgres.py,sha256=n6ulaT222WWPY0_6qAZ0MHF0m91HtI9
|
|
|
118
118
|
castor_extractor/utils/client/query.py,sha256=O6D5EjD1KmBlwa786Uw4D4kzxx97_HH50xIIeSWt0B8,205
|
|
119
119
|
castor_extractor/utils/client/uri.py,sha256=jmP9hY-6PRqdc3-vAOdtll_U6q9VCqSqmBAN6QRs3ZI,150
|
|
120
120
|
castor_extractor/utils/client/uri_test.py,sha256=1XKF6qSseCeD4G4ckaNO07JXfGbt7XUVinOZdpEYrDQ,259
|
|
121
|
-
castor_extractor/utils/collection.py,sha256=
|
|
122
|
-
castor_extractor/utils/collection_test.py,sha256=
|
|
121
|
+
castor_extractor/utils/collection.py,sha256=g2HmB0ievvYHWaZ8iEzkcPPkrBFsh6R6b_liBqcsMjc,3044
|
|
122
|
+
castor_extractor/utils/collection_test.py,sha256=mlw33u4VidazQwWxJMvaFeYX3VB5CAj6rqRG-cRsLrw,2884
|
|
123
123
|
castor_extractor/utils/constants.py,sha256=qBQprS9U66mS-RIBXiLujdTSV3WvGv40Bc0khP4Abdk,39
|
|
124
124
|
castor_extractor/utils/deprecate.py,sha256=aBIN2QqZUx5CBNZMFfOUhi8QqtPqRcJtmrN6xqfm-y8,805
|
|
125
125
|
castor_extractor/utils/env.py,sha256=TqdtB50U8LE0993WhhEhpy89TJrHbjtIKjvg6KQ-5q0,596
|
|
@@ -197,7 +197,7 @@ castor_extractor/visualization/looker_studio/client/endpoints.py,sha256=5eY-ffqN
|
|
|
197
197
|
castor_extractor/visualization/looker_studio/client/enums.py,sha256=fHgemTaQpnwee8cw1YQVDsVnH--vTyFwT4Px8aVYYHQ,167
|
|
198
198
|
castor_extractor/visualization/looker_studio/client/looker_studio_api_client.py,sha256=Phq378VEaFLD-nyP2_A1wge6HUP45jSthhlNjD7aqSg,4085
|
|
199
199
|
castor_extractor/visualization/looker_studio/client/pagination.py,sha256=9HQ3Rkdiz2VB6AvYtZ0F-WouiD0pMmdZyAmkv-3wh08,783
|
|
200
|
-
castor_extractor/visualization/looker_studio/client/queries/query.sql,sha256=
|
|
200
|
+
castor_extractor/visualization/looker_studio/client/queries/query.sql,sha256=p7fiXu5--BlY1FKnoW2CAQF7kPKjcN1tYf_SwvCZus4,1474
|
|
201
201
|
castor_extractor/visualization/looker_studio/extract.py,sha256=NU48xQ83UtRW3jXKJcvofzqgEM2lHGjtTzjbKOSB50A,4059
|
|
202
202
|
castor_extractor/visualization/looker_studio/extract_test.py,sha256=ZckAxUMuoEjJ9RWkfRvt9M8SxblkQvsq-Grb8GSs-y0,492
|
|
203
203
|
castor_extractor/visualization/metabase/__init__.py,sha256=3E36cmkMyEgBB6Ot5rWk-N75i0G-7k24QTlc-Iol4pM,193
|
|
@@ -289,8 +289,8 @@ castor_extractor/visualization/strategy/extract.py,sha256=2fBuvS2xiOGXRpxXnZsE_C
|
|
|
289
289
|
castor_extractor/visualization/tableau/__init__.py,sha256=eFI_1hjdkxyUiAYiy3szwyuwn3yJ5C_KbpBU0ySJDcQ,138
|
|
290
290
|
castor_extractor/visualization/tableau/assets.py,sha256=HbCRd8VCj1WBEeqg9jwnygnT7xOFJ6PQD7Lq7sV-XR0,635
|
|
291
291
|
castor_extractor/visualization/tableau/client/__init__.py,sha256=P8RKFKOC63WkH5hdEytJOwHS9vzQ8GXreLfXZetmMP8,78
|
|
292
|
-
castor_extractor/visualization/tableau/client/client.py,sha256=
|
|
293
|
-
castor_extractor/visualization/tableau/client/client_metadata_api.py,sha256=
|
|
292
|
+
castor_extractor/visualization/tableau/client/client.py,sha256=QV-GFS4nEq976JLji57pIfsw2ZZaGTvfCFqy6_HOWMg,8204
|
|
293
|
+
castor_extractor/visualization/tableau/client/client_metadata_api.py,sha256=eAq9rjrB_2ZCQy9NwREHBOTXZffWdkwtwhzswm1pEfk,7449
|
|
294
294
|
castor_extractor/visualization/tableau/client/client_metadata_api_test.py,sha256=rikyQKDLFYHLJhHJTF3LwWhKJ80svtTsYp5n7n9oTU8,2665
|
|
295
295
|
castor_extractor/visualization/tableau/client/client_rest_api.py,sha256=x4dNw4PPJdalTlGowwkANwqiS2ZhGxzpQytkHq3KbpY,3988
|
|
296
296
|
castor_extractor/visualization/tableau/client/client_tsc.py,sha256=VI_PJyd1ty3HSYXHHQjshmG2ziowIbrwJRonRPCHbks,1820
|
|
@@ -299,7 +299,7 @@ castor_extractor/visualization/tableau/client/errors.py,sha256=ecT8Tit5VtzrOBB9y
|
|
|
299
299
|
castor_extractor/visualization/tableau/client/gql_queries.py,sha256=XJAfhpMZ5S7-AhfpOaoHMHCAdil-l5e5xB-CH4NC38M,2177
|
|
300
300
|
castor_extractor/visualization/tableau/client/rest_fields.py,sha256=ZKYYuMxg9PXhczVXaD4rXNk7dYyWJ1_bVM8FLEXju7s,888
|
|
301
301
|
castor_extractor/visualization/tableau/constants.py,sha256=lHGB50FgVNO2nXeIhkvQKivD8ZFBIjDrflgD5cTXKJw,104
|
|
302
|
-
castor_extractor/visualization/tableau/extract.py,sha256=
|
|
302
|
+
castor_extractor/visualization/tableau/extract.py,sha256=9mSHFJ2DGlW-cDYiRZlJafAgj4_ObACxO0l9vBBfjUw,1683
|
|
303
303
|
castor_extractor/visualization/thoughtspot/__init__.py,sha256=NhTGUk5Kdt54oCjHYoAt0cLBmVLys5lFYiRANL6wCmI,150
|
|
304
304
|
castor_extractor/visualization/thoughtspot/assets.py,sha256=SAQWPKaD2NTSDg7-GSkcRSSEkKSws0MJfOVcHkdeTSg,276
|
|
305
305
|
castor_extractor/visualization/thoughtspot/client/__init__.py,sha256=svrE2rMxR-OXctjPeAHMEPePlfcra-9KDevTMcHunAA,86
|
|
@@ -420,18 +420,18 @@ castor_extractor/warehouse/snowflake/queries/user.sql,sha256=88V8eRj1NDaD_ufclsK
|
|
|
420
420
|
castor_extractor/warehouse/snowflake/queries/view_ddl.sql,sha256=eWsci_50cxiYIv3N7BKkbXVM3RoIzqSDtohqRnE5kg4,673
|
|
421
421
|
castor_extractor/warehouse/snowflake/query.py,sha256=C2LTdPwBzMQ_zMncg0Kq4_WkoY7K9as5tvxBDrIOlwI,1763
|
|
422
422
|
castor_extractor/warehouse/sqlserver/__init__.py,sha256=PdOuYznmvKAbfWAm8UdN47MfEsd9jqPi_dDi3WEo1KY,116
|
|
423
|
-
castor_extractor/warehouse/sqlserver/client.py,sha256=
|
|
424
|
-
castor_extractor/warehouse/sqlserver/extract.py,sha256
|
|
423
|
+
castor_extractor/warehouse/sqlserver/client.py,sha256=Bjfpw96IKAQfWPiU5SZYEDfetwfkqZrnKbQYoStcnZc,2007
|
|
424
|
+
castor_extractor/warehouse/sqlserver/extract.py,sha256=-LoHY5wAGJk4vutrO3N0_PaRqts7rkEn7pADRHzoxiI,2249
|
|
425
425
|
castor_extractor/warehouse/sqlserver/queries/.sqlfluff,sha256=yy0KQdz8I_67vnXyX8eeWwOWkxTXvHyVKSVwhURktd8,48
|
|
426
|
-
castor_extractor/warehouse/sqlserver/queries/column.sql,sha256=
|
|
427
|
-
castor_extractor/warehouse/sqlserver/queries/database.sql,sha256=
|
|
428
|
-
castor_extractor/warehouse/sqlserver/queries/schema.sql,sha256=
|
|
429
|
-
castor_extractor/warehouse/sqlserver/queries/table.sql,sha256=
|
|
426
|
+
castor_extractor/warehouse/sqlserver/queries/column.sql,sha256=_K5OS63N7fM7kGPudnnjJEnIyaxR1xE2hoZgnJ_A3p8,2763
|
|
427
|
+
castor_extractor/warehouse/sqlserver/queries/database.sql,sha256=4dPeBCn85MEOXr1f-DPXxiI3RvvoE_1n8lsbTs26E0I,150
|
|
428
|
+
castor_extractor/warehouse/sqlserver/queries/schema.sql,sha256=UR3eTiYw7Iq5-GukelnNg_uq6haZ_dwg_SedZfOWUoA,619
|
|
429
|
+
castor_extractor/warehouse/sqlserver/queries/table.sql,sha256=4RgeSkHDWTWRyU2iLxaBR0KuSwIBvb3GbQGdkJYXbn0,2787
|
|
430
430
|
castor_extractor/warehouse/sqlserver/queries/user.sql,sha256=gOrZsMVypusR2dc4vwVs4E1a-CliRsr_UjnD2EbXs-A,94
|
|
431
|
-
castor_extractor/warehouse/sqlserver/query.py,sha256=
|
|
431
|
+
castor_extractor/warehouse/sqlserver/query.py,sha256=7sW8cK3JzxPt6faTJ7e4lk9tE4fo_AeCymI-LqsSols,1276
|
|
432
432
|
castor_extractor/warehouse/synapse/queries/column.sql,sha256=lNcFoIW3Y0PFOqoOzJEXmPvZvfAsY0AP63Mu2LuPzPo,1351
|
|
433
|
-
castor_extractor-0.24.
|
|
434
|
-
castor_extractor-0.24.
|
|
435
|
-
castor_extractor-0.24.
|
|
436
|
-
castor_extractor-0.24.
|
|
437
|
-
castor_extractor-0.24.
|
|
433
|
+
castor_extractor-0.24.34.dist-info/LICENCE,sha256=sL-IGa4hweyya1HgzMskrRdybbIa2cktzxb5qmUgDg8,8254
|
|
434
|
+
castor_extractor-0.24.34.dist-info/METADATA,sha256=-xB8vdjxDHFkDYbyAlL8L-nEbQMqs44GVzN5wgvKfjs,26293
|
|
435
|
+
castor_extractor-0.24.34.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
|
436
|
+
castor_extractor-0.24.34.dist-info/entry_points.txt,sha256=_F-qeZCybjoMkNb9ErEhnyqXuG6afHIFQhakdBHZsr4,1803
|
|
437
|
+
castor_extractor-0.24.34.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|