semantic-link-labs 0.9.5__py3-none-any.whl → 0.9.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of semantic-link-labs might be problematic. Click here for more details.
- {semantic_link_labs-0.9.5.dist-info → semantic_link_labs-0.9.7.dist-info}/METADATA +8 -5
- {semantic_link_labs-0.9.5.dist-info → semantic_link_labs-0.9.7.dist-info}/RECORD +65 -61
- {semantic_link_labs-0.9.5.dist-info → semantic_link_labs-0.9.7.dist-info}/WHEEL +1 -1
- sempy_labs/__init__.py +19 -1
- sempy_labs/_ai.py +3 -1
- sempy_labs/_capacities.py +37 -2
- sempy_labs/_capacity_migration.py +11 -14
- sempy_labs/_connections.py +2 -4
- sempy_labs/_dataflows.py +2 -2
- sempy_labs/_dax_query_view.py +57 -0
- sempy_labs/_delta_analyzer.py +16 -14
- sempy_labs/_delta_analyzer_history.py +298 -0
- sempy_labs/_environments.py +8 -1
- sempy_labs/_eventhouses.py +5 -1
- sempy_labs/_external_data_shares.py +4 -10
- sempy_labs/_generate_semantic_model.py +2 -1
- sempy_labs/_graphQL.py +5 -1
- sempy_labs/_helper_functions.py +440 -63
- sempy_labs/_icons.py +6 -6
- sempy_labs/_kql_databases.py +5 -1
- sempy_labs/_list_functions.py +8 -38
- sempy_labs/_managed_private_endpoints.py +9 -2
- sempy_labs/_mirrored_databases.py +3 -1
- sempy_labs/_ml_experiments.py +1 -1
- sempy_labs/_model_bpa.py +2 -11
- sempy_labs/_model_bpa_bulk.py +33 -38
- sempy_labs/_model_bpa_rules.py +1 -1
- sempy_labs/_one_lake_integration.py +2 -1
- sempy_labs/_semantic_models.py +20 -0
- sempy_labs/_sql.py +6 -2
- sempy_labs/_sqldatabase.py +61 -100
- sempy_labs/_vertipaq.py +8 -11
- sempy_labs/_warehouses.py +14 -3
- sempy_labs/_workspace_identity.py +6 -0
- sempy_labs/_workspaces.py +42 -2
- sempy_labs/admin/_basic_functions.py +29 -2
- sempy_labs/admin/_reports.py +1 -1
- sempy_labs/admin/_scanner.py +2 -4
- sempy_labs/admin/_tenant.py +8 -3
- sempy_labs/directlake/_directlake_schema_compare.py +2 -1
- sempy_labs/directlake/_directlake_schema_sync.py +65 -19
- sempy_labs/directlake/_dl_helper.py +0 -6
- sempy_labs/directlake/_generate_shared_expression.py +19 -12
- sempy_labs/directlake/_guardrails.py +2 -1
- sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +90 -57
- sempy_labs/directlake/_update_directlake_partition_entity.py +5 -2
- sempy_labs/graph/_groups.py +6 -0
- sempy_labs/graph/_teams.py +2 -0
- sempy_labs/graph/_users.py +4 -0
- sempy_labs/lakehouse/__init__.py +12 -3
- sempy_labs/lakehouse/_blobs.py +231 -0
- sempy_labs/lakehouse/_shortcuts.py +29 -8
- sempy_labs/migration/_direct_lake_to_import.py +47 -10
- sempy_labs/migration/_migration_validation.py +0 -4
- sempy_labs/report/__init__.py +4 -0
- sempy_labs/report/_download_report.py +4 -6
- sempy_labs/report/_generate_report.py +6 -6
- sempy_labs/report/_report_functions.py +5 -4
- sempy_labs/report/_report_helper.py +17 -5
- sempy_labs/report/_report_rebind.py +8 -6
- sempy_labs/report/_reportwrapper.py +17 -8
- sempy_labs/report/_save_report.py +147 -0
- sempy_labs/tom/_model.py +154 -23
- {semantic_link_labs-0.9.5.dist-info → semantic_link_labs-0.9.7.dist-info/licenses}/LICENSE +0 -0
- {semantic_link_labs-0.9.5.dist-info → semantic_link_labs-0.9.7.dist-info}/top_level.txt +0 -0
sempy_labs/_icons.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
green_dot = "\
|
|
2
|
-
yellow_dot = "\
|
|
3
|
-
red_dot = "\
|
|
1
|
+
green_dot = "\U0001f7e2"
|
|
2
|
+
yellow_dot = "\U0001f7e1"
|
|
3
|
+
red_dot = "\U0001f534"
|
|
4
4
|
in_progress = "⌛"
|
|
5
5
|
checked = "\u2611"
|
|
6
6
|
unchecked = "\u2610"
|
|
@@ -8,11 +8,11 @@ start_bold = "\033[1m"
|
|
|
8
8
|
end_bold = "\033[0m"
|
|
9
9
|
bullet = "\u2022"
|
|
10
10
|
warning = "⚠️"
|
|
11
|
-
error = "\
|
|
11
|
+
error = "\u274c"
|
|
12
12
|
info = "ℹ️"
|
|
13
13
|
measure_icon = "\u2211"
|
|
14
|
-
table_icon = "\
|
|
15
|
-
column_icon = "\
|
|
14
|
+
table_icon = "\u229e"
|
|
15
|
+
column_icon = "\u229f"
|
|
16
16
|
model_bpa_name = "ModelBPA"
|
|
17
17
|
report_bpa_name = "ReportBPA"
|
|
18
18
|
severity_mapping = {warning: "Warning", error: "Error", info: "Info"}
|
sempy_labs/_kql_databases.py
CHANGED
|
@@ -17,6 +17,8 @@ def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
17
17
|
|
|
18
18
|
This is a wrapper function for the following API: `Items - List KQL Databases <https://learn.microsoft.com/rest/api/fabric/kqldatabase/items/list-kql-databases>`_.
|
|
19
19
|
|
|
20
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
21
|
+
|
|
20
22
|
Parameters
|
|
21
23
|
----------
|
|
22
24
|
workspace : str | uuid.UUID, default=None
|
|
@@ -44,7 +46,9 @@ def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
44
46
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
45
47
|
|
|
46
48
|
responses = _base_api(
|
|
47
|
-
request=f"v1/workspaces/{workspace_id}/kqlDatabases",
|
|
49
|
+
request=f"v1/workspaces/{workspace_id}/kqlDatabases",
|
|
50
|
+
uses_pagination=True,
|
|
51
|
+
client="fabric_sp",
|
|
48
52
|
)
|
|
49
53
|
|
|
50
54
|
for r in responses:
|
sempy_labs/_list_functions.py
CHANGED
|
@@ -605,6 +605,8 @@ def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
605
605
|
"""
|
|
606
606
|
Shows the lakehouses within a workspace.
|
|
607
607
|
|
|
608
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
609
|
+
|
|
608
610
|
Parameters
|
|
609
611
|
----------
|
|
610
612
|
workspace : str | uuid.UUID, default=None
|
|
@@ -633,7 +635,9 @@ def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
633
635
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
634
636
|
|
|
635
637
|
responses = _base_api(
|
|
636
|
-
request=f"/v1/workspaces/{workspace_id}/lakehouses",
|
|
638
|
+
request=f"/v1/workspaces/{workspace_id}/lakehouses",
|
|
639
|
+
uses_pagination=True,
|
|
640
|
+
client="fabric_sp",
|
|
637
641
|
)
|
|
638
642
|
|
|
639
643
|
for r in responses:
|
|
@@ -1172,42 +1176,6 @@ def list_shortcuts(
|
|
|
1172
1176
|
return list_shortcuts(lakehouse=lakehouse, workspace=workspace, path=path)
|
|
1173
1177
|
|
|
1174
1178
|
|
|
1175
|
-
def list_capacities() -> pd.DataFrame:
|
|
1176
|
-
"""
|
|
1177
|
-
Shows the capacities and their properties.
|
|
1178
|
-
|
|
1179
|
-
Returns
|
|
1180
|
-
-------
|
|
1181
|
-
pandas.DataFrame
|
|
1182
|
-
A pandas dataframe showing the capacities and their properties
|
|
1183
|
-
"""
|
|
1184
|
-
|
|
1185
|
-
columns = {
|
|
1186
|
-
"Id": "string",
|
|
1187
|
-
"Display Name": "string",
|
|
1188
|
-
"Sku": "string",
|
|
1189
|
-
"Region": "string",
|
|
1190
|
-
"State": "string",
|
|
1191
|
-
"Admins": "string",
|
|
1192
|
-
}
|
|
1193
|
-
df = _create_dataframe(columns=columns)
|
|
1194
|
-
|
|
1195
|
-
response = _base_api(request="/v1.0/myorg/capacities")
|
|
1196
|
-
|
|
1197
|
-
for i in response.json().get("value", []):
|
|
1198
|
-
new_data = {
|
|
1199
|
-
"Id": i.get("id").lower(),
|
|
1200
|
-
"Display Name": i.get("displayName"),
|
|
1201
|
-
"Sku": i.get("sku"),
|
|
1202
|
-
"Region": i.get("region"),
|
|
1203
|
-
"State": i.get("state"),
|
|
1204
|
-
"Admins": [i.get("admins", [])],
|
|
1205
|
-
}
|
|
1206
|
-
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
1207
|
-
|
|
1208
|
-
return df
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
1179
|
def list_reports_using_semantic_model(
|
|
1212
1180
|
dataset: str | UUID, workspace: Optional[str | UUID] = None
|
|
1213
1181
|
) -> pd.DataFrame:
|
|
@@ -1508,7 +1476,9 @@ def list_server_properties(workspace: Optional[str | UUID] = None) -> pd.DataFra
|
|
|
1508
1476
|
A pandas dataframe showing a list of the server properties.
|
|
1509
1477
|
"""
|
|
1510
1478
|
|
|
1511
|
-
tom_server = fabric.create_tom_server(
|
|
1479
|
+
tom_server = fabric.create_tom_server(
|
|
1480
|
+
dataset=None, readonly=True, workspace=workspace
|
|
1481
|
+
)
|
|
1512
1482
|
|
|
1513
1483
|
rows = [
|
|
1514
1484
|
{
|
|
@@ -23,6 +23,8 @@ def create_managed_private_endpoint(
|
|
|
23
23
|
|
|
24
24
|
This is a wrapper function for the following API: `Managed Private Endpoints - Create Workspace Managed Private Endpoint <https://learn.microsoft.com/rest/api/fabric/core/managed-private-endpoints/create-workspace-managed-private-endpoint>`.
|
|
25
25
|
|
|
26
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
27
|
+
|
|
26
28
|
Parameters
|
|
27
29
|
----------
|
|
28
30
|
name: str
|
|
@@ -60,6 +62,7 @@ def create_managed_private_endpoint(
|
|
|
60
62
|
status_codes=[201, 202],
|
|
61
63
|
payload=request_body,
|
|
62
64
|
lro_return_status_code=True,
|
|
65
|
+
client="fabric_sp",
|
|
63
66
|
)
|
|
64
67
|
_print_success(
|
|
65
68
|
item_name=name,
|
|
@@ -77,6 +80,8 @@ def list_managed_private_endpoints(
|
|
|
77
80
|
|
|
78
81
|
This is a wrapper function for the following API: `Managed Private Endpoints - List Workspace Managed Private Endpoints <https://learn.microsoft.com/rest/api/fabric/core/managed-private-endpoints/list-workspace-managed-private-endpoints>`.
|
|
79
82
|
|
|
83
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
84
|
+
|
|
80
85
|
Parameters
|
|
81
86
|
----------
|
|
82
87
|
workspace : str | uuid.UUID, default=None
|
|
@@ -106,7 +111,7 @@ def list_managed_private_endpoints(
|
|
|
106
111
|
responses = _base_api(
|
|
107
112
|
request=f"/v1/workspaces/{workspace_id}/managedPrivateEndpoints",
|
|
108
113
|
uses_pagination=True,
|
|
109
|
-
|
|
114
|
+
client="fabric_sp",
|
|
110
115
|
)
|
|
111
116
|
|
|
112
117
|
for r in responses:
|
|
@@ -134,6 +139,8 @@ def delete_managed_private_endpoint(
|
|
|
134
139
|
|
|
135
140
|
This is a wrapper function for the following API: `Managed Private Endpoints - Delete Workspace Managed Private Endpoint <https://learn.microsoft.com/rest/api/fabric/core/managed-private-endpoints/delete-workspace-managed-private-endpoint>`.
|
|
136
141
|
|
|
142
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
143
|
+
|
|
137
144
|
Parameters
|
|
138
145
|
----------
|
|
139
146
|
managed_private_endpoint: str | uuid.UUID
|
|
@@ -162,7 +169,7 @@ def delete_managed_private_endpoint(
|
|
|
162
169
|
_base_api(
|
|
163
170
|
request=f"/v1/workspaces/{workspace_id}/managedPrivateEndpoints/{item_id}",
|
|
164
171
|
method="delete",
|
|
165
|
-
|
|
172
|
+
client="fabric_sp",
|
|
166
173
|
)
|
|
167
174
|
|
|
168
175
|
_print_success(
|
|
@@ -22,6 +22,8 @@ def list_mirrored_databases(workspace: Optional[str | UUID] = None) -> pd.DataFr
|
|
|
22
22
|
|
|
23
23
|
This is a wrapper function for the following API: `Items - List Mirrored Databases <https://learn.microsoft.com/rest/api/fabric/mirroredwarehouse/items/list-mirrored-databases>`_.
|
|
24
24
|
|
|
25
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
26
|
+
|
|
25
27
|
Parameters
|
|
26
28
|
----------
|
|
27
29
|
workspace : str | uuid.UUID, default=None
|
|
@@ -50,8 +52,8 @@ def list_mirrored_databases(workspace: Optional[str | UUID] = None) -> pd.DataFr
|
|
|
50
52
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
51
53
|
responses = _base_api(
|
|
52
54
|
request=f"/v1/workspaces/{workspace_id}/mirroredDatabases",
|
|
53
|
-
status_codes=200,
|
|
54
55
|
uses_pagination=True,
|
|
56
|
+
client="fabric_sp",
|
|
55
57
|
)
|
|
56
58
|
|
|
57
59
|
for r in responses:
|
sempy_labs/_ml_experiments.py
CHANGED
|
@@ -73,7 +73,7 @@ def create_ml_experiment(
|
|
|
73
73
|
name: str
|
|
74
74
|
Name of the ML experiment.
|
|
75
75
|
description : str, default=None
|
|
76
|
-
A description of the
|
|
76
|
+
A description of the ML experiment.
|
|
77
77
|
workspace : str | uuid.UUID, default=None
|
|
78
78
|
The Fabric workspace name or ID.
|
|
79
79
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
sempy_labs/_model_bpa.py
CHANGED
|
@@ -6,7 +6,6 @@ from IPython.display import display, HTML
|
|
|
6
6
|
from sempy_labs._model_dependencies import get_model_calc_dependencies
|
|
7
7
|
from sempy_labs._helper_functions import (
|
|
8
8
|
format_dax_object_name,
|
|
9
|
-
resolve_lakehouse_name,
|
|
10
9
|
create_relationship_name,
|
|
11
10
|
save_as_delta_table,
|
|
12
11
|
resolve_workspace_capacity,
|
|
@@ -389,13 +388,7 @@ def run_model_bpa(
|
|
|
389
388
|
dfExport = finalDF.copy()
|
|
390
389
|
delta_table_name = "modelbparesults"
|
|
391
390
|
|
|
392
|
-
|
|
393
|
-
lake_workspace = fabric.get_workspace_id()
|
|
394
|
-
lakehouse = resolve_lakehouse_name(
|
|
395
|
-
lakehouse_id=lakehouse_id, workspace=lake_workspace
|
|
396
|
-
)
|
|
397
|
-
|
|
398
|
-
lakeT = get_lakehouse_tables(lakehouse=lakehouse, workspace=lake_workspace)
|
|
391
|
+
lakeT = get_lakehouse_tables()
|
|
399
392
|
lakeT_filt = lakeT[lakeT["Table Name"] == delta_table_name]
|
|
400
393
|
|
|
401
394
|
dfExport["Severity"].replace(icons.severity_mapping, inplace=True)
|
|
@@ -403,9 +396,7 @@ def run_model_bpa(
|
|
|
403
396
|
if len(lakeT_filt) == 0:
|
|
404
397
|
runId = 1
|
|
405
398
|
else:
|
|
406
|
-
max_run_id = _get_column_aggregate(
|
|
407
|
-
lakehouse=lakehouse, table_name=delta_table_name
|
|
408
|
-
)
|
|
399
|
+
max_run_id = _get_column_aggregate(table_name=delta_table_name)
|
|
409
400
|
runId = max_run_id + 1
|
|
410
401
|
|
|
411
402
|
now = datetime.datetime.now()
|
sempy_labs/_model_bpa_bulk.py
CHANGED
|
@@ -2,11 +2,12 @@ import sempy.fabric as fabric
|
|
|
2
2
|
import pandas as pd
|
|
3
3
|
import datetime
|
|
4
4
|
from sempy_labs._helper_functions import (
|
|
5
|
-
resolve_lakehouse_name,
|
|
6
5
|
save_as_delta_table,
|
|
7
6
|
resolve_workspace_capacity,
|
|
8
7
|
retry,
|
|
9
8
|
_get_column_aggregate,
|
|
9
|
+
resolve_workspace_id,
|
|
10
|
+
resolve_lakehouse_name_and_id,
|
|
10
11
|
)
|
|
11
12
|
from sempy_labs.lakehouse import (
|
|
12
13
|
get_lakehouse_tables,
|
|
@@ -16,6 +17,7 @@ from sempy_labs._model_bpa import run_model_bpa
|
|
|
16
17
|
from typing import Optional, List
|
|
17
18
|
from sempy._utils._log import log
|
|
18
19
|
import sempy_labs._icons as icons
|
|
20
|
+
from uuid import UUID
|
|
19
21
|
|
|
20
22
|
|
|
21
23
|
@log
|
|
@@ -23,7 +25,7 @@ def run_model_bpa_bulk(
|
|
|
23
25
|
rules: Optional[pd.DataFrame] = None,
|
|
24
26
|
extended: bool = False,
|
|
25
27
|
language: Optional[str] = None,
|
|
26
|
-
workspace: Optional[str | List[str]] = None,
|
|
28
|
+
workspace: Optional[str | UUID | List[str | UUID]] = None,
|
|
27
29
|
skip_models: Optional[str | List[str]] = ["ModelBPA", "Fabric Capacity Metrics"],
|
|
28
30
|
skip_models_in_workspace: Optional[dict] = None,
|
|
29
31
|
):
|
|
@@ -41,8 +43,8 @@ def run_model_bpa_bulk(
|
|
|
41
43
|
language : str, default=None
|
|
42
44
|
The language (code) in which the rules will appear. For example, specifying 'it-IT' will show the Rule Name, Category and Description in Italian.
|
|
43
45
|
Defaults to None which resolves to English.
|
|
44
|
-
workspace : str | List[str], default=None
|
|
45
|
-
The workspace or list of workspaces to scan.
|
|
46
|
+
workspace : str | uuid.UUID | List[str | uuid.UUID], default=None
|
|
47
|
+
The workspace or list of workspaces to scan. Supports both the workspace name and the workspace id.
|
|
46
48
|
Defaults to None which scans all accessible workspaces.
|
|
47
49
|
skip_models : str | List[str], default=['ModelBPA', 'Fabric Capacity Metrics']
|
|
48
50
|
The semantic models to always skip when running this analysis.
|
|
@@ -66,17 +68,12 @@ def run_model_bpa_bulk(
|
|
|
66
68
|
|
|
67
69
|
now = datetime.datetime.now()
|
|
68
70
|
output_table = "modelbparesults"
|
|
69
|
-
|
|
70
|
-
lakehouse_id = fabric.get_lakehouse_id()
|
|
71
|
-
lakehouse = resolve_lakehouse_name(
|
|
72
|
-
lakehouse_id=lakehouse_id, workspace=lakehouse_workspace
|
|
73
|
-
)
|
|
74
|
-
lakeT = get_lakehouse_tables(lakehouse=lakehouse, workspace=lakehouse_workspace)
|
|
71
|
+
lakeT = get_lakehouse_tables()
|
|
75
72
|
lakeT_filt = lakeT[lakeT["Table Name"] == output_table]
|
|
76
|
-
if
|
|
73
|
+
if lakeT_filt.empty:
|
|
77
74
|
runId = 1
|
|
78
75
|
else:
|
|
79
|
-
max_run_id = _get_column_aggregate(
|
|
76
|
+
max_run_id = _get_column_aggregate(table_name=output_table)
|
|
80
77
|
runId = max_run_id + 1
|
|
81
78
|
|
|
82
79
|
if isinstance(workspace, str):
|
|
@@ -86,14 +83,14 @@ def run_model_bpa_bulk(
|
|
|
86
83
|
if workspace is None:
|
|
87
84
|
dfW_filt = dfW.copy()
|
|
88
85
|
else:
|
|
89
|
-
dfW_filt = dfW[dfW["Name"].isin(workspace)]
|
|
86
|
+
dfW_filt = dfW[(dfW["Name"].isin(workspace)) | (dfW["Id"].isin(workspace))]
|
|
90
87
|
|
|
91
|
-
if
|
|
88
|
+
if dfW_filt.empty:
|
|
92
89
|
raise ValueError(
|
|
93
90
|
f"{icons.red_dot} There are no valid workspaces to assess. This is likely due to not having proper permissions to the workspace(s) entered in the 'workspace' parameter."
|
|
94
91
|
)
|
|
95
92
|
|
|
96
|
-
for
|
|
93
|
+
for _, r in dfW_filt.iterrows():
|
|
97
94
|
wksp = r["Name"]
|
|
98
95
|
wksp_id = r["Id"]
|
|
99
96
|
capacity_id, capacity_name = resolve_workspace_capacity(workspace=wksp)
|
|
@@ -108,7 +105,7 @@ def run_model_bpa_bulk(
|
|
|
108
105
|
dfD = dfD[~dfD["Dataset Name"].isin(skip_models_wkspc)]
|
|
109
106
|
|
|
110
107
|
# Exclude default semantic models
|
|
111
|
-
if
|
|
108
|
+
if not dfD.empty:
|
|
112
109
|
dfI = fabric.list_items(workspace=wksp)
|
|
113
110
|
filtered_df = dfI.groupby("Display Name").filter(
|
|
114
111
|
lambda x: set(["Warehouse", "SemanticModel"]).issubset(set(x["Type"]))
|
|
@@ -118,7 +115,7 @@ def run_model_bpa_bulk(
|
|
|
118
115
|
skip_models.extend(default_semantic_models)
|
|
119
116
|
dfD_filt = dfD[~dfD["Dataset Name"].isin(skip_models)]
|
|
120
117
|
|
|
121
|
-
if
|
|
118
|
+
if not dfD_filt.empty:
|
|
122
119
|
for _, r2 in dfD_filt.iterrows():
|
|
123
120
|
dataset_id = r2["Dataset Id"]
|
|
124
121
|
dataset_name = r2["Dataset Name"]
|
|
@@ -161,7 +158,7 @@ def run_model_bpa_bulk(
|
|
|
161
158
|
)
|
|
162
159
|
print(e)
|
|
163
160
|
|
|
164
|
-
if
|
|
161
|
+
if df.empty:
|
|
165
162
|
print(
|
|
166
163
|
f"{icons.yellow_dot} No BPA results to save for the '{wksp}' workspace."
|
|
167
164
|
)
|
|
@@ -170,7 +167,7 @@ def run_model_bpa_bulk(
|
|
|
170
167
|
|
|
171
168
|
# Append save results individually for each workspace (so as not to create a giant dataframe)
|
|
172
169
|
print(
|
|
173
|
-
f"{icons.in_progress} Saving the Model BPA results of the '{wksp}' workspace to the '{output_table}' within the
|
|
170
|
+
f"{icons.in_progress} Saving the Model BPA results of the '{wksp}' workspace to the '{output_table}' within the lakehouse attached to this notebook..."
|
|
174
171
|
)
|
|
175
172
|
|
|
176
173
|
schema = {
|
|
@@ -195,8 +192,8 @@ def run_model_bpa_bulk(
|
|
|
195
192
|
@log
|
|
196
193
|
def create_model_bpa_semantic_model(
|
|
197
194
|
dataset: Optional[str] = icons.model_bpa_name,
|
|
198
|
-
lakehouse: Optional[str] = None,
|
|
199
|
-
lakehouse_workspace: Optional[str] = None,
|
|
195
|
+
lakehouse: Optional[str | UUID] = None,
|
|
196
|
+
lakehouse_workspace: Optional[str | UUID] = None,
|
|
200
197
|
):
|
|
201
198
|
"""
|
|
202
199
|
Dynamically generates a Direct Lake semantic model based on the 'modelbparesults' delta table which contains the Best Practice Analyzer results.
|
|
@@ -209,16 +206,15 @@ def create_model_bpa_semantic_model(
|
|
|
209
206
|
----------
|
|
210
207
|
dataset : str, default='ModelBPA'
|
|
211
208
|
Name of the semantic model to be created.
|
|
212
|
-
lakehouse : str, default=None
|
|
209
|
+
lakehouse : str | uuid.UUID, default=None
|
|
213
210
|
Name of the Fabric lakehouse which contains the 'modelbparesults' delta table.
|
|
214
211
|
Defaults to None which resolves to the default lakehouse attached to the notebook.
|
|
215
|
-
lakehouse_workspace : str, default=None
|
|
212
|
+
lakehouse_workspace : str | uuid.UUID, default=None
|
|
216
213
|
The workspace in which the lakehouse resides.
|
|
217
214
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
218
215
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
219
216
|
"""
|
|
220
217
|
|
|
221
|
-
from sempy_labs._helper_functions import resolve_lakehouse_name
|
|
222
218
|
from sempy_labs.directlake import (
|
|
223
219
|
generate_shared_expression,
|
|
224
220
|
add_table_to_direct_lake_semantic_model,
|
|
@@ -226,22 +222,21 @@ def create_model_bpa_semantic_model(
|
|
|
226
222
|
from sempy_labs import create_blank_semantic_model, refresh_semantic_model
|
|
227
223
|
from sempy_labs.tom import connect_semantic_model
|
|
228
224
|
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
lakehouse = resolve_lakehouse_name(
|
|
234
|
-
lakehouse_id=lakehouse_id, workspace=lakehouse_workspace
|
|
235
|
-
)
|
|
225
|
+
lakehouse_workspace_id = resolve_workspace_id(workspace=lakehouse_workspace)
|
|
226
|
+
(lakehouse_id, lakehouse_name) = resolve_lakehouse_name_and_id(
|
|
227
|
+
lakehouse=lakehouse, workspace=lakehouse_workspace_id
|
|
228
|
+
)
|
|
236
229
|
|
|
237
230
|
# Generate the shared expression based on the lakehouse and lakehouse workspace
|
|
238
231
|
expr = generate_shared_expression(
|
|
239
|
-
item_name=
|
|
232
|
+
item_name=lakehouse_name,
|
|
233
|
+
item_type="Lakehouse",
|
|
234
|
+
workspace=lakehouse_workspace_id,
|
|
240
235
|
)
|
|
241
236
|
|
|
242
237
|
# Create blank model
|
|
243
238
|
create_blank_semantic_model(
|
|
244
|
-
dataset=dataset, workspace=
|
|
239
|
+
dataset=dataset, workspace=lakehouse_workspace_id, overwrite=True
|
|
245
240
|
)
|
|
246
241
|
|
|
247
242
|
@retry(
|
|
@@ -250,7 +245,7 @@ def create_model_bpa_semantic_model(
|
|
|
250
245
|
)
|
|
251
246
|
def dyn_connect():
|
|
252
247
|
with connect_semantic_model(
|
|
253
|
-
dataset=dataset, readonly=True, workspace=
|
|
248
|
+
dataset=dataset, readonly=True, workspace=lakehouse_workspace_id
|
|
254
249
|
) as tom:
|
|
255
250
|
|
|
256
251
|
tom.model
|
|
@@ -259,7 +254,7 @@ def create_model_bpa_semantic_model(
|
|
|
259
254
|
icons.sll_tags.append("ModelBPABulk")
|
|
260
255
|
table_exists = False
|
|
261
256
|
with connect_semantic_model(
|
|
262
|
-
dataset=dataset, readonly=False, workspace=
|
|
257
|
+
dataset=dataset, readonly=False, workspace=lakehouse_workspace_id
|
|
263
258
|
) as tom:
|
|
264
259
|
t_name = "BPAResults"
|
|
265
260
|
t_name_full = f"'{t_name}'"
|
|
@@ -274,11 +269,11 @@ def create_model_bpa_semantic_model(
|
|
|
274
269
|
dataset=dataset,
|
|
275
270
|
table_name=t_name,
|
|
276
271
|
lakehouse_table_name="modelbparesults",
|
|
277
|
-
workspace=
|
|
272
|
+
workspace=lakehouse_workspace_id,
|
|
278
273
|
refresh=False,
|
|
279
274
|
)
|
|
280
275
|
with connect_semantic_model(
|
|
281
|
-
dataset=dataset, readonly=False, workspace=
|
|
276
|
+
dataset=dataset, readonly=False, workspace=lakehouse_workspace_id
|
|
282
277
|
) as tom:
|
|
283
278
|
# Fix column names
|
|
284
279
|
for c in tom.all_columns():
|
|
@@ -377,4 +372,4 @@ def create_model_bpa_semantic_model(
|
|
|
377
372
|
# tom.add_measure(table_name=t_name, measure_name='Rules Followed', expression="[Rules] - [Rules Violated]")
|
|
378
373
|
|
|
379
374
|
# Refresh the model
|
|
380
|
-
refresh_semantic_model(dataset=dataset, workspace=
|
|
375
|
+
refresh_semantic_model(dataset=dataset, workspace=lakehouse_workspace_id)
|
sempy_labs/_model_bpa_rules.py
CHANGED
|
@@ -556,7 +556,7 @@ def model_bpa_rules(
|
|
|
556
556
|
"Warning",
|
|
557
557
|
"Use the DIVIDE function for division",
|
|
558
558
|
lambda obj, tom: re.search(
|
|
559
|
-
r"\]\s*\/(?!\/)(?!\*)
|
|
559
|
+
r"\]\s*\/(?!\/)(?!\*)|\)\s*\/(?!\/)(?!\*)",
|
|
560
560
|
obj.Expression,
|
|
561
561
|
flags=re.IGNORECASE,
|
|
562
562
|
),
|
|
@@ -5,6 +5,7 @@ from sempy._utils._log import log
|
|
|
5
5
|
from sempy_labs._helper_functions import (
|
|
6
6
|
resolve_workspace_name_and_id,
|
|
7
7
|
resolve_dataset_name_and_id,
|
|
8
|
+
resolve_workspace_id,
|
|
8
9
|
)
|
|
9
10
|
import sempy_labs._icons as icons
|
|
10
11
|
from uuid import UUID
|
|
@@ -43,7 +44,7 @@ def export_model_to_onelake(
|
|
|
43
44
|
destination_workspace = workspace_name
|
|
44
45
|
destination_workspace_id = workspace_id
|
|
45
46
|
else:
|
|
46
|
-
destination_workspace_id =
|
|
47
|
+
destination_workspace_id = resolve_workspace_id(workspace=destination_workspace)
|
|
47
48
|
|
|
48
49
|
tmsl = f"""
|
|
49
50
|
{{
|
sempy_labs/_semantic_models.py
CHANGED
|
@@ -7,6 +7,7 @@ from sempy_labs._helper_functions import (
|
|
|
7
7
|
_update_dataframe_datatypes,
|
|
8
8
|
resolve_workspace_name_and_id,
|
|
9
9
|
resolve_dataset_name_and_id,
|
|
10
|
+
delete_item,
|
|
10
11
|
)
|
|
11
12
|
import sempy_labs._icons as icons
|
|
12
13
|
|
|
@@ -115,3 +116,22 @@ def enable_semantic_model_scheduled_refresh(
|
|
|
115
116
|
print(
|
|
116
117
|
f"{icons.green_dot} Scheduled refresh for the '{dataset_name}' within the '{workspace_name}' workspace has been enabled."
|
|
117
118
|
)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def delete_semantic_model(dataset: str | UUID, workspace: Optional[str | UUID] = None):
|
|
122
|
+
"""
|
|
123
|
+
Deletes a semantic model.
|
|
124
|
+
|
|
125
|
+
This is a wrapper function for the following API: `Items - Delete Semantic Model <https://learn.microsoft.com/rest/api/fabric/semanticmodel/items/delete-semantic-model>`_.
|
|
126
|
+
|
|
127
|
+
Parameters
|
|
128
|
+
----------
|
|
129
|
+
dataset: str | uuid.UUID
|
|
130
|
+
Name or ID of the semantic model.
|
|
131
|
+
workspace : str | uuid.UUID, default=None
|
|
132
|
+
The Fabric workspace name or ID.
|
|
133
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
134
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
delete_item(item=dataset, type="SemanticModel", workspace=workspace)
|
sempy_labs/_sql.py
CHANGED
|
@@ -60,14 +60,18 @@ class ConnectBase:
|
|
|
60
60
|
item=item, workspace=workspace_id, type=endpoint_type.capitalize()
|
|
61
61
|
)
|
|
62
62
|
|
|
63
|
+
endpoint_for_url = (
|
|
64
|
+
"sqlDatabases" if endpoint_type == "sqldatabase" else f"{endpoint_type}s"
|
|
65
|
+
)
|
|
66
|
+
|
|
63
67
|
# Get the TDS endpoint
|
|
64
68
|
response = _base_api(
|
|
65
|
-
request=f"v1/workspaces/{workspace_id}/{
|
|
69
|
+
request=f"v1/workspaces/{workspace_id}/{endpoint_for_url}/{resource_id}"
|
|
66
70
|
)
|
|
67
71
|
|
|
68
72
|
if endpoint_type == "warehouse":
|
|
69
73
|
tds_endpoint = response.json().get("properties", {}).get("connectionString")
|
|
70
|
-
|
|
74
|
+
elif endpoint_type == "sqldatabase":
|
|
71
75
|
tds_endpoint = response.json().get("properties", {}).get("serverFqdn")
|
|
72
76
|
else:
|
|
73
77
|
tds_endpoint = (
|