semantic-link-labs 0.9.10__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of semantic-link-labs might be problematic. Click here for more details.
- {semantic_link_labs-0.9.10.dist-info → semantic_link_labs-0.10.0.dist-info}/METADATA +28 -21
- {semantic_link_labs-0.9.10.dist-info → semantic_link_labs-0.10.0.dist-info}/RECORD +38 -31
- {semantic_link_labs-0.9.10.dist-info → semantic_link_labs-0.10.0.dist-info}/WHEEL +1 -1
- sempy_labs/__init__.py +26 -1
- sempy_labs/_delta_analyzer.py +9 -8
- sempy_labs/_dictionary_diffs.py +221 -0
- sempy_labs/_environments.py +19 -1
- sempy_labs/_generate_semantic_model.py +1 -1
- sempy_labs/_helper_functions.py +358 -134
- sempy_labs/_kusto.py +25 -23
- sempy_labs/_list_functions.py +13 -35
- sempy_labs/_model_bpa_rules.py +13 -3
- sempy_labs/_notebooks.py +44 -11
- sempy_labs/_semantic_models.py +93 -1
- sempy_labs/_sql.py +4 -3
- sempy_labs/_tags.py +194 -0
- sempy_labs/_user_delegation_key.py +42 -0
- sempy_labs/_variable_libraries.py +89 -0
- sempy_labs/_vpax.py +388 -0
- sempy_labs/admin/__init__.py +8 -0
- sempy_labs/admin/_tags.py +126 -0
- sempy_labs/directlake/_generate_shared_expression.py +5 -1
- sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +55 -5
- sempy_labs/dotnet_lib/dotnet.runtime.config.json +10 -0
- sempy_labs/lakehouse/__init__.py +14 -0
- sempy_labs/lakehouse/_blobs.py +100 -85
- sempy_labs/lakehouse/_get_lakehouse_tables.py +1 -13
- sempy_labs/lakehouse/_helper.py +211 -0
- sempy_labs/lakehouse/_lakehouse.py +1 -1
- sempy_labs/lakehouse/_livy_sessions.py +137 -0
- sempy_labs/report/__init__.py +2 -0
- sempy_labs/report/_download_report.py +1 -1
- sempy_labs/report/_generate_report.py +5 -1
- sempy_labs/report/_report_helper.py +27 -128
- sempy_labs/report/_reportwrapper.py +1903 -1165
- sempy_labs/tom/_model.py +83 -21
- sempy_labs/report/_bpareporttemplate/.pbi/localSettings.json +0 -9
- sempy_labs/report/_bpareporttemplate/.platform +0 -11
- {semantic_link_labs-0.9.10.dist-info → semantic_link_labs-0.10.0.dist-info}/licenses/LICENSE +0 -0
- {semantic_link_labs-0.9.10.dist-info → semantic_link_labs-0.10.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
from sempy_labs._helper_functions import (
|
|
2
|
+
resolve_workspace_id,
|
|
3
|
+
resolve_lakehouse_id,
|
|
4
|
+
_base_api,
|
|
5
|
+
_create_dataframe,
|
|
6
|
+
_update_dataframe_datatypes,
|
|
7
|
+
)
|
|
8
|
+
import pandas as pd
|
|
9
|
+
from typing import Optional
|
|
10
|
+
from uuid import UUID
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def list_livy_sessions(
|
|
14
|
+
lakehouse: Optional[str | UUID] = None, workspace: Optional[str | UUID] = None
|
|
15
|
+
) -> pd.DataFrame:
|
|
16
|
+
"""
|
|
17
|
+
Shows a list of livy sessions from the specified item identifier.
|
|
18
|
+
|
|
19
|
+
This is a wrapper function for the following API: `Livy Sessions - List Livy Sessions <https://learn.microsoft.com/rest/api/fabric/lakehouse/livy-sessions/list-livy-sessions>`_.
|
|
20
|
+
|
|
21
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
lakehouse : str | uuid.UUID, default=None
|
|
26
|
+
The Fabric lakehouse name or ID.
|
|
27
|
+
Defaults to None which resolves to the lakehouse attached to the notebook.
|
|
28
|
+
workspace : str | uuid.UUID, default=None
|
|
29
|
+
The Fabric workspace name or ID.
|
|
30
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
31
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
32
|
+
|
|
33
|
+
Returns
|
|
34
|
+
-------
|
|
35
|
+
pandas.DataFrame
|
|
36
|
+
A pandas dataframe showing a list of livy sessions from the specified item identifier.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
columns = {
|
|
40
|
+
"Spark Application Id": "string",
|
|
41
|
+
"State:": "string",
|
|
42
|
+
"Livy Id": "string",
|
|
43
|
+
"Origin": "string",
|
|
44
|
+
"Attempt Number": "int",
|
|
45
|
+
"Max Number Of Attempts": "int",
|
|
46
|
+
"Livy Name": "string",
|
|
47
|
+
"Submitter Id": "string",
|
|
48
|
+
"Submitter Type": "string",
|
|
49
|
+
"Item Workspace Id": "string",
|
|
50
|
+
"Item Id": "string",
|
|
51
|
+
"Item Reference Type": "string",
|
|
52
|
+
"Item Name": "string",
|
|
53
|
+
"Item Type": "string",
|
|
54
|
+
"Job Type": "string",
|
|
55
|
+
"Submitted Date Time": "str",
|
|
56
|
+
"Start Date Time": "str",
|
|
57
|
+
"End Date Time": "string",
|
|
58
|
+
"Queued Duration Value": "int",
|
|
59
|
+
"Queued Duration Time Unit": "string",
|
|
60
|
+
"Running Duration Value": "int",
|
|
61
|
+
"Running Duration Time Unit": "string",
|
|
62
|
+
"Total Duration Value": "int",
|
|
63
|
+
"Total Duration Time Unit": "string",
|
|
64
|
+
"Job Instance Id": "string",
|
|
65
|
+
"Creator Item Workspace Id": "string",
|
|
66
|
+
"Creator Item Id": "string",
|
|
67
|
+
"Creator Item Reference Type": "string",
|
|
68
|
+
"Creator Item Name": "string",
|
|
69
|
+
"Creator Item Type": "string",
|
|
70
|
+
"Cancellation Reason": "string",
|
|
71
|
+
"Capacity Id": "string",
|
|
72
|
+
"Operation Name": "string",
|
|
73
|
+
"Runtime Version": "string",
|
|
74
|
+
"Livy Session Item Resource Uri": "string",
|
|
75
|
+
}
|
|
76
|
+
df = _create_dataframe(columns=columns)
|
|
77
|
+
|
|
78
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
79
|
+
lakehouse_id = resolve_lakehouse_id(lakehouse, workspace_id)
|
|
80
|
+
|
|
81
|
+
responses = _base_api(
|
|
82
|
+
request=f"/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}/livySessions",
|
|
83
|
+
uses_pagination=True,
|
|
84
|
+
client="fabric_sp",
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
dfs = []
|
|
88
|
+
|
|
89
|
+
for r in responses:
|
|
90
|
+
for v in r.get("value", []):
|
|
91
|
+
queued_duration = v.get("queuedDuration", {})
|
|
92
|
+
running_duration = v.get("runningDuration", {})
|
|
93
|
+
total_duration = v.get("totalDuration", {})
|
|
94
|
+
new_data = {
|
|
95
|
+
"Spark Application Id": v.get("sparkApplicationId"),
|
|
96
|
+
"State:": v.get("state"),
|
|
97
|
+
"Livy Id": v.get("livyId"),
|
|
98
|
+
"Origin": v.get("origin"),
|
|
99
|
+
"Attempt Number": v.get("attemptNumber"),
|
|
100
|
+
"Max Number Of Attempts": v.get("maxNumberOfAttempts"),
|
|
101
|
+
"Livy Name": v.get("livyName"),
|
|
102
|
+
"Submitter Id": v["submitter"].get("id"),
|
|
103
|
+
"Submitter Type": v["submitter"].get("type"),
|
|
104
|
+
"Item Workspace Id": v["item"].get("workspaceId"),
|
|
105
|
+
"Item Id": v["item"].get("itemId"),
|
|
106
|
+
"Item Reference Type": v["item"].get("referenceType"),
|
|
107
|
+
"Item Name": v.get("itemName"),
|
|
108
|
+
"Item Type": v.get("itemType"),
|
|
109
|
+
"Job Type": v.get("jobType"),
|
|
110
|
+
"Submitted Date Time": v.get("submittedDateTime"),
|
|
111
|
+
"Start Date Time": v.get("startDateTime"),
|
|
112
|
+
"End Date Time": v.get("endDateTime"),
|
|
113
|
+
"Queued Duration Value": queued_duration.get("value"),
|
|
114
|
+
"Queued Duration Time Unit": queued_duration.get("timeUnit"),
|
|
115
|
+
"Running Duration Value": running_duration.get("value"),
|
|
116
|
+
"Running Duration Time Unit": running_duration.get("timeUnit"),
|
|
117
|
+
"Total Duration Value": total_duration.get("value"),
|
|
118
|
+
"Total Duration Time Unit": total_duration.get("timeUnit"),
|
|
119
|
+
"Job Instance Id": v.get("jobInstanceId"),
|
|
120
|
+
"Creator Item Workspace Id": v["creatorItem"].get("workspaceId"),
|
|
121
|
+
"Creator Item Id": v["creatorItem"].get("itemId"),
|
|
122
|
+
"Creator Item Reference Type": v["creatorItem"].get("referenceType"),
|
|
123
|
+
"Creator Item Name": v.get("creatorItemName"),
|
|
124
|
+
"Creator Item Type": v.get("creatorItemType"),
|
|
125
|
+
"Cancellation Reason": v.get("cancellationReason"),
|
|
126
|
+
"Capacity Id": v.get("capacityId"),
|
|
127
|
+
"Operation Name": v.get("operationName"),
|
|
128
|
+
"Runtime Version": v.get("runtimeVersion"),
|
|
129
|
+
"Livy Session Item Resource Uri": v.get("livySessionItemResourceUri"),
|
|
130
|
+
}
|
|
131
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
132
|
+
|
|
133
|
+
if dfs:
|
|
134
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
135
|
+
_update_dataframe_datatypes(dataframe=df, column_map=columns)
|
|
136
|
+
|
|
137
|
+
return df
|
sempy_labs/report/__init__.py
CHANGED
|
@@ -3,6 +3,7 @@ from sempy_labs.report._save_report import (
|
|
|
3
3
|
)
|
|
4
4
|
from sempy_labs.report._reportwrapper import (
|
|
5
5
|
ReportWrapper,
|
|
6
|
+
connect_report,
|
|
6
7
|
)
|
|
7
8
|
from sempy_labs.report._paginated import (
|
|
8
9
|
get_report_datasources,
|
|
@@ -50,4 +51,5 @@ __all__ = [
|
|
|
50
51
|
"get_report_datasources",
|
|
51
52
|
"download_report",
|
|
52
53
|
"save_report_as_pbip",
|
|
54
|
+
"connect_report",
|
|
53
55
|
]
|
|
@@ -22,7 +22,7 @@ def download_report(
|
|
|
22
22
|
"""
|
|
23
23
|
Downloads the specified report from the specified workspace to a Power BI .pbix file.
|
|
24
24
|
|
|
25
|
-
This is a wrapper function for the following API: `Reports - Export Report In Group <https://learn.microsoft.com/rest/api/power-bi/reports/export-report-in-group
|
|
25
|
+
This is a wrapper function for the following API: `Reports - Export Report In Group <https://learn.microsoft.com/rest/api/power-bi/reports/export-report-in-group>`_.
|
|
26
26
|
|
|
27
27
|
Parameters
|
|
28
28
|
----------
|
|
@@ -178,6 +178,7 @@ def update_report_from_reportjson(
|
|
|
178
178
|
)
|
|
179
179
|
|
|
180
180
|
|
|
181
|
+
@log
|
|
181
182
|
def get_report_definition(
|
|
182
183
|
report: str | UUID,
|
|
183
184
|
workspace: Optional[str | UUID] = None,
|
|
@@ -206,7 +207,10 @@ def get_report_definition(
|
|
|
206
207
|
"""
|
|
207
208
|
|
|
208
209
|
return get_item_definition(
|
|
209
|
-
item=report,
|
|
210
|
+
item=report,
|
|
211
|
+
type="Report",
|
|
212
|
+
workspace=workspace,
|
|
213
|
+
return_dataframe=return_dataframe,
|
|
210
214
|
)
|
|
211
215
|
|
|
212
216
|
|
|
@@ -1,14 +1,5 @@
|
|
|
1
|
-
import sempy.fabric as fabric
|
|
2
|
-
from typing import Tuple, Optional
|
|
3
|
-
import sempy_labs._icons as icons
|
|
4
|
-
import re
|
|
5
|
-
import base64
|
|
6
|
-
import json
|
|
7
1
|
import requests
|
|
8
|
-
|
|
9
|
-
from sempy_labs._helper_functions import (
|
|
10
|
-
resolve_workspace_name_and_id,
|
|
11
|
-
)
|
|
2
|
+
import sempy_labs._icons as icons
|
|
12
3
|
|
|
13
4
|
|
|
14
5
|
vis_type_mapping = {
|
|
@@ -47,10 +38,9 @@ vis_type_mapping = {
|
|
|
47
38
|
"decompositionTreeVisual": "Decomposition tree",
|
|
48
39
|
"qnaVisual": "Q&A",
|
|
49
40
|
"aiNarratives": "Narrative",
|
|
50
|
-
"scorecard": "
|
|
41
|
+
"scorecard": "Goals (Preview)",
|
|
51
42
|
"rdlVisual": "Paginated report",
|
|
52
43
|
"cardVisual": "Card (new)",
|
|
53
|
-
"advancedSlicerVisual": "Slicer (new)",
|
|
54
44
|
"actionButton": "Button",
|
|
55
45
|
"bookmarkNavigator": "Bookmark navigator",
|
|
56
46
|
"image": "Image",
|
|
@@ -58,8 +48,33 @@ vis_type_mapping = {
|
|
|
58
48
|
"pageNavigator": "Page navigator",
|
|
59
49
|
"shape": "Shape",
|
|
60
50
|
"Group": "Group",
|
|
51
|
+
"listSlicer": "List Slicer",
|
|
52
|
+
"advancedSlicerVisual": "Button Slicer",
|
|
53
|
+
"FlowVisual_C29F1DCC_81F5_4973_94AD_0517D44CC06A": "Power Automate for Power BI",
|
|
61
54
|
}
|
|
62
55
|
|
|
56
|
+
|
|
57
|
+
def generate_visual_file_path(page_file_path: str, visual_id: str) -> str:
|
|
58
|
+
|
|
59
|
+
return page_file_path.split("/page.json")[0] + f"/visuals/{visual_id}.json"
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def resolve_visual_type(visual_type: str) -> str:
|
|
63
|
+
vt_lower = visual_type.lower()
|
|
64
|
+
|
|
65
|
+
vis_map_lower = {k.lower(): v for k, v in vis_type_mapping.items()}
|
|
66
|
+
flipped_lower = {v.lower(): k for k, v in vis_type_mapping.items()}
|
|
67
|
+
|
|
68
|
+
if vt_lower in vis_map_lower:
|
|
69
|
+
resolved = vis_map_lower.get(vt_lower)
|
|
70
|
+
elif vt_lower in flipped_lower:
|
|
71
|
+
resolved = flipped_lower.get(vt_lower)
|
|
72
|
+
else:
|
|
73
|
+
raise ValueError(f"{icons.red_dot} Unknown visual type: {visual_type}")
|
|
74
|
+
|
|
75
|
+
return resolved
|
|
76
|
+
|
|
77
|
+
|
|
63
78
|
page_type_mapping = {
|
|
64
79
|
(320, 240): "Tooltip",
|
|
65
80
|
(816, 1056): "Letter",
|
|
@@ -70,22 +85,6 @@ page_type_mapping = {
|
|
|
70
85
|
page_types = ["Tooltip", "Letter", "4:3", "16:9"]
|
|
71
86
|
|
|
72
87
|
|
|
73
|
-
def get_web_url(report: str, workspace: Optional[str | UUID] = None):
|
|
74
|
-
|
|
75
|
-
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
76
|
-
|
|
77
|
-
dfR = fabric.list_reports(workspace=workspace_id)
|
|
78
|
-
dfR_filt = dfR[dfR["Name"] == report]
|
|
79
|
-
|
|
80
|
-
if len(dfR_filt) == 0:
|
|
81
|
-
raise ValueError(
|
|
82
|
-
f"{icons.red_dot} The '{report}' report does not exist within the '{workspace_name}' workspace."
|
|
83
|
-
)
|
|
84
|
-
web_url = dfR_filt["Web Url"].iloc[0]
|
|
85
|
-
|
|
86
|
-
return web_url
|
|
87
|
-
|
|
88
|
-
|
|
89
88
|
def populate_custom_visual_display_names():
|
|
90
89
|
|
|
91
90
|
url = "https://catalogapi.azure.com/offers?api-version=2018-08-01-beta&storefront=appsource&$filter=offerType+eq+%27PowerBIVisuals%27"
|
|
@@ -128,106 +127,6 @@ def populate_custom_visual_display_names():
|
|
|
128
127
|
vis_type_mapping[vizId] = displayName
|
|
129
128
|
|
|
130
129
|
|
|
131
|
-
def resolve_page_name(self, page_name: str) -> Tuple[str, str, str]:
|
|
132
|
-
|
|
133
|
-
dfP = self.list_pages()
|
|
134
|
-
if any(r["Page Name"] == page_name for _, r in dfP.iterrows()):
|
|
135
|
-
valid_page_name = page_name
|
|
136
|
-
dfP_filt = dfP[dfP["Page Name"] == page_name]
|
|
137
|
-
valid_display_name = dfP_filt["Page Display Name"].iloc[0]
|
|
138
|
-
file_path = dfP_filt["File Path"].iloc[0]
|
|
139
|
-
elif any(r["Page Display Name"] == page_name for _, r in dfP.iterrows()):
|
|
140
|
-
valid_display_name = page_name
|
|
141
|
-
dfP_filt = dfP[dfP["Page Display Name"] == page_name]
|
|
142
|
-
valid_page_name = dfP_filt["Page Name"].iloc[0]
|
|
143
|
-
file_path = dfP_filt["File Path"].iloc[0]
|
|
144
|
-
else:
|
|
145
|
-
raise ValueError(
|
|
146
|
-
f"{icons.red_dot} Invalid page name. The '{page_name}' page does not exist in the '{self._report}' report within the '{self._workspace}' workspace."
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
return valid_page_name, valid_display_name, file_path
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
def visual_page_mapping(self) -> Tuple[dict, dict]:
|
|
153
|
-
|
|
154
|
-
page_mapping = {}
|
|
155
|
-
visual_mapping = {}
|
|
156
|
-
rd = self.rdef
|
|
157
|
-
for _, r in rd.iterrows():
|
|
158
|
-
file_path = r["path"]
|
|
159
|
-
payload = r["payload"]
|
|
160
|
-
if file_path.endswith("/page.json"):
|
|
161
|
-
pattern_page = r"/pages/(.*?)/page.json"
|
|
162
|
-
page_name = re.search(pattern_page, file_path).group(1)
|
|
163
|
-
obj_file = base64.b64decode(payload).decode("utf-8")
|
|
164
|
-
obj_json = json.loads(obj_file)
|
|
165
|
-
page_id = obj_json.get("name")
|
|
166
|
-
page_display = obj_json.get("displayName")
|
|
167
|
-
page_mapping[page_name] = (page_id, page_display)
|
|
168
|
-
for _, r in rd.iterrows():
|
|
169
|
-
file_path = r["path"]
|
|
170
|
-
payload = r["payload"]
|
|
171
|
-
if file_path.endswith("/visual.json"):
|
|
172
|
-
pattern_page = r"/pages/(.*?)/visuals/"
|
|
173
|
-
page_name = re.search(pattern_page, file_path).group(1)
|
|
174
|
-
visual_mapping[file_path] = (
|
|
175
|
-
page_mapping.get(page_name)[0],
|
|
176
|
-
page_mapping.get(page_name)[1],
|
|
177
|
-
)
|
|
178
|
-
|
|
179
|
-
return page_mapping, visual_mapping
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
def resolve_visual_name(
|
|
183
|
-
self, page_name: str, visual_name: str
|
|
184
|
-
) -> Tuple[str, str, str, str]:
|
|
185
|
-
"""
|
|
186
|
-
Obtains the page name, page display name, and the file path for a given page in a report.
|
|
187
|
-
|
|
188
|
-
Parameters
|
|
189
|
-
----------
|
|
190
|
-
page_name : str
|
|
191
|
-
The name of the page of the report - either the page name (GUID) or the page display name.
|
|
192
|
-
visual_name : str
|
|
193
|
-
The name of the visual of the report.
|
|
194
|
-
|
|
195
|
-
Returns
|
|
196
|
-
-------
|
|
197
|
-
Tuple[str, str, str, str] Page name, page display name, visual name, file path from the report definition.
|
|
198
|
-
|
|
199
|
-
"""
|
|
200
|
-
|
|
201
|
-
dfV = self.list_visuals()
|
|
202
|
-
if any(
|
|
203
|
-
(r["Page Name"] == page_name) & (r["Visual Name"] == visual_name)
|
|
204
|
-
for _, r in dfV.iterrows()
|
|
205
|
-
):
|
|
206
|
-
valid_page_name = page_name
|
|
207
|
-
dfV_filt = dfV[
|
|
208
|
-
(dfV["Page Name"] == page_name) & (dfV["Visual Name"] == visual_name)
|
|
209
|
-
]
|
|
210
|
-
file_path = dfV_filt["File Path"].iloc[0]
|
|
211
|
-
valid_display_name = dfV_filt["Page Display Name"].iloc[0]
|
|
212
|
-
elif any(
|
|
213
|
-
(r["Page Display Name"] == page_name) & (r["Visual Name"] == visual_name)
|
|
214
|
-
for _, r in dfV.iterrows()
|
|
215
|
-
):
|
|
216
|
-
valid_display_name = page_name
|
|
217
|
-
dfV_filt = dfV[
|
|
218
|
-
(dfV["Page Display Name"] == page_name)
|
|
219
|
-
& (dfV["Visual Name"] == visual_name)
|
|
220
|
-
]
|
|
221
|
-
file_path = dfV_filt["File Path"].iloc[0]
|
|
222
|
-
valid_page_name = dfV_filt["Page Name"].iloc[0]
|
|
223
|
-
else:
|
|
224
|
-
raise ValueError(
|
|
225
|
-
f"{icons.red_dot} Invalid page/visual name. The '{visual_name}' visual on the '{page_name}' page does not exist in the '{self._report}' report within the '{self._workspace}' workspace."
|
|
226
|
-
)
|
|
227
|
-
|
|
228
|
-
return valid_page_name, valid_display_name, visual_name, file_path
|
|
229
|
-
|
|
230
|
-
|
|
231
130
|
def find_entity_property_pairs(data, result=None, keys_path=None):
|
|
232
131
|
|
|
233
132
|
if result is None:
|