semantic-link-labs 0.11.1__py3-none-any.whl → 0.11.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of semantic-link-labs might be problematic. Click here for more details.
- {semantic_link_labs-0.11.1.dist-info → semantic_link_labs-0.11.3.dist-info}/METADATA +7 -6
- semantic_link_labs-0.11.3.dist-info/RECORD +212 -0
- sempy_labs/__init__.py +65 -71
- sempy_labs/_a_lib_info.py +1 -1
- sempy_labs/_ai.py +1 -1
- sempy_labs/_capacities.py +2 -2
- sempy_labs/_capacity_migration.py +5 -5
- sempy_labs/_clear_cache.py +1 -1
- sempy_labs/_connections.py +2 -2
- sempy_labs/_dashboards.py +16 -16
- sempy_labs/_data_pipelines.py +1 -1
- sempy_labs/_dataflows.py +101 -26
- sempy_labs/_dax.py +3 -3
- sempy_labs/_delta_analyzer.py +4 -4
- sempy_labs/_delta_analyzer_history.py +1 -1
- sempy_labs/_deployment_pipelines.py +1 -1
- sempy_labs/_environments.py +22 -21
- sempy_labs/_eventhouses.py +12 -11
- sempy_labs/_eventstreams.py +12 -11
- sempy_labs/_external_data_shares.py +78 -23
- sempy_labs/_gateways.py +47 -45
- sempy_labs/_generate_semantic_model.py +3 -3
- sempy_labs/_git.py +1 -1
- sempy_labs/_graphQL.py +12 -11
- sempy_labs/_helper_functions.py +169 -5
- sempy_labs/_job_scheduler.py +56 -54
- sempy_labs/_kql_databases.py +16 -17
- sempy_labs/_kql_querysets.py +12 -11
- sempy_labs/_kusto.py +2 -2
- sempy_labs/_labels.py +126 -0
- sempy_labs/_list_functions.py +2 -2
- sempy_labs/_managed_private_endpoints.py +18 -15
- sempy_labs/_mirrored_databases.py +16 -15
- sempy_labs/_mirrored_warehouses.py +12 -11
- sempy_labs/_ml_experiments.py +11 -10
- sempy_labs/_model_auto_build.py +3 -3
- sempy_labs/_model_bpa.py +5 -5
- sempy_labs/_model_bpa_bulk.py +3 -3
- sempy_labs/_model_dependencies.py +1 -1
- sempy_labs/_mounted_data_factories.py +12 -12
- sempy_labs/_notebooks.py +151 -2
- sempy_labs/_one_lake_integration.py +1 -1
- sempy_labs/_query_scale_out.py +1 -1
- sempy_labs/_refresh_semantic_model.py +1 -1
- sempy_labs/_semantic_models.py +30 -28
- sempy_labs/_spark.py +1 -1
- sempy_labs/_sql.py +1 -1
- sempy_labs/_sql_endpoints.py +12 -11
- sempy_labs/_sqldatabase.py +15 -15
- sempy_labs/_tags.py +11 -10
- sempy_labs/_translations.py +1 -1
- sempy_labs/_user_delegation_key.py +2 -2
- sempy_labs/_vertipaq.py +3 -3
- sempy_labs/_vpax.py +1 -1
- sempy_labs/_warehouses.py +15 -14
- sempy_labs/_workloads.py +1 -1
- sempy_labs/_workspace_identity.py +1 -1
- sempy_labs/_workspaces.py +14 -13
- sempy_labs/admin/__init__.py +18 -18
- sempy_labs/admin/_activities.py +46 -46
- sempy_labs/admin/_apps.py +28 -26
- sempy_labs/admin/_artifacts.py +15 -15
- sempy_labs/admin/_basic_functions.py +1 -2
- sempy_labs/admin/_capacities.py +84 -82
- sempy_labs/admin/_dataflows.py +2 -2
- sempy_labs/admin/_datasets.py +50 -48
- sempy_labs/admin/_domains.py +25 -19
- sempy_labs/admin/_external_data_share.py +24 -22
- sempy_labs/admin/_git.py +17 -17
- sempy_labs/admin/_items.py +47 -45
- sempy_labs/admin/_reports.py +61 -58
- sempy_labs/admin/_scanner.py +2 -2
- sempy_labs/admin/_shared.py +18 -18
- sempy_labs/admin/_tags.py +2 -2
- sempy_labs/admin/_tenant.py +57 -51
- sempy_labs/admin/_users.py +16 -15
- sempy_labs/admin/_workspaces.py +2 -2
- sempy_labs/directlake/__init__.py +12 -12
- sempy_labs/directlake/_directlake_schema_compare.py +3 -3
- sempy_labs/directlake/_directlake_schema_sync.py +9 -7
- sempy_labs/directlake/_dl_helper.py +5 -2
- sempy_labs/directlake/_generate_shared_expression.py +1 -1
- sempy_labs/directlake/_get_directlake_lakehouse.py +1 -1
- sempy_labs/directlake/_guardrails.py +1 -1
- sempy_labs/directlake/_list_directlake_model_calc_tables.py +3 -3
- sempy_labs/directlake/_show_unsupported_directlake_objects.py +1 -1
- sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +3 -3
- sempy_labs/directlake/_update_directlake_partition_entity.py +4 -4
- sempy_labs/directlake/_warm_cache.py +3 -3
- sempy_labs/graph/__init__.py +3 -3
- sempy_labs/graph/_groups.py +81 -78
- sempy_labs/graph/_teams.py +21 -21
- sempy_labs/graph/_users.py +109 -10
- sempy_labs/lakehouse/__init__.py +7 -7
- sempy_labs/lakehouse/_blobs.py +30 -30
- sempy_labs/lakehouse/_get_lakehouse_columns.py +2 -2
- sempy_labs/lakehouse/_get_lakehouse_tables.py +29 -27
- sempy_labs/lakehouse/_helper.py +38 -1
- sempy_labs/lakehouse/_lakehouse.py +16 -7
- sempy_labs/lakehouse/_livy_sessions.py +47 -42
- sempy_labs/lakehouse/_shortcuts.py +22 -21
- sempy_labs/migration/__init__.py +8 -8
- sempy_labs/migration/_create_pqt_file.py +2 -2
- sempy_labs/migration/_migrate_calctables_to_lakehouse.py +35 -44
- sempy_labs/migration/_migrate_calctables_to_semantic_model.py +9 -20
- sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +5 -9
- sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +11 -20
- sempy_labs/migration/_migration_validation.py +1 -2
- sempy_labs/migration/_refresh_calc_tables.py +2 -2
- sempy_labs/mirrored_azure_databricks_catalog/__init__.py +2 -2
- sempy_labs/mirrored_azure_databricks_catalog/_discover.py +40 -40
- sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +1 -1
- sempy_labs/ml_model/__init__.py +23 -0
- sempy_labs/ml_model/_functions.py +427 -0
- sempy_labs/report/__init__.py +10 -10
- sempy_labs/report/_download_report.py +2 -2
- sempy_labs/report/_export_report.py +2 -2
- sempy_labs/report/_generate_report.py +1 -1
- sempy_labs/report/_paginated.py +1 -1
- sempy_labs/report/_report_bpa.py +4 -3
- sempy_labs/report/_report_functions.py +3 -3
- sempy_labs/report/_report_list_functions.py +3 -3
- sempy_labs/report/_report_rebind.py +1 -1
- sempy_labs/report/_reportwrapper.py +248 -250
- sempy_labs/report/_save_report.py +3 -3
- sempy_labs/theme/_org_themes.py +19 -6
- sempy_labs/tom/__init__.py +1 -1
- sempy_labs/tom/_model.py +13 -8
- sempy_labs/variable_library/__init__.py +19 -0
- sempy_labs/variable_library/_functions.py +403 -0
- semantic_link_labs-0.11.1.dist-info/RECORD +0 -210
- sempy_labs/_dax_query_view.py +0 -57
- sempy_labs/_ml_models.py +0 -110
- sempy_labs/_variable_libraries.py +0 -91
- {semantic_link_labs-0.11.1.dist-info → semantic_link_labs-0.11.3.dist-info}/WHEEL +0 -0
- {semantic_link_labs-0.11.1.dist-info → semantic_link_labs-0.11.3.dist-info}/licenses/LICENSE +0 -0
- {semantic_link_labs-0.11.1.dist-info → semantic_link_labs-0.11.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,427 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
from typing import Any, Optional, List
|
|
3
|
+
from .._helper_functions import (
|
|
4
|
+
_update_dataframe_datatypes,
|
|
5
|
+
resolve_item_id,
|
|
6
|
+
resolve_item_name_and_id,
|
|
7
|
+
resolve_workspace_id,
|
|
8
|
+
_base_api,
|
|
9
|
+
delete_item,
|
|
10
|
+
_create_dataframe,
|
|
11
|
+
create_item,
|
|
12
|
+
resolve_workspace_name_and_id,
|
|
13
|
+
)
|
|
14
|
+
from uuid import UUID
|
|
15
|
+
from sempy._utils._log import log
|
|
16
|
+
import sempy_labs._icons as icons
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@log
|
|
20
|
+
def list_ml_models(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
21
|
+
"""
|
|
22
|
+
Shows the ML models within a workspace.
|
|
23
|
+
|
|
24
|
+
This is a wrapper function for the following API: `Items - List ML Models <https://learn.microsoft.com/rest/api/fabric/mlmodel/items/list-ml-models>`_.
|
|
25
|
+
|
|
26
|
+
Parameters
|
|
27
|
+
----------
|
|
28
|
+
workspace : str | uuid.UUID, default=None
|
|
29
|
+
The Fabric workspace name or ID.
|
|
30
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
31
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
32
|
+
|
|
33
|
+
Returns
|
|
34
|
+
-------
|
|
35
|
+
pandas.DataFrame
|
|
36
|
+
A pandas dataframe showing the ML models within a workspace.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
columns = {
|
|
40
|
+
"ML Model Name": "string",
|
|
41
|
+
"ML Model Id": "string",
|
|
42
|
+
"Description": "string",
|
|
43
|
+
}
|
|
44
|
+
df = _create_dataframe(columns=columns)
|
|
45
|
+
|
|
46
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
47
|
+
|
|
48
|
+
responses = _base_api(
|
|
49
|
+
request=f"/v1/workspaces/{workspace_id}/mlModels",
|
|
50
|
+
status_codes=200,
|
|
51
|
+
uses_pagination=True,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
rows = []
|
|
55
|
+
for r in responses:
|
|
56
|
+
for v in r.get("value", []):
|
|
57
|
+
model_id = v.get("id")
|
|
58
|
+
modelName = v.get("displayName")
|
|
59
|
+
desc = v.get("description")
|
|
60
|
+
|
|
61
|
+
rows.append(
|
|
62
|
+
{
|
|
63
|
+
"ML Model Name": modelName,
|
|
64
|
+
"ML Model Id": model_id,
|
|
65
|
+
"Description": desc,
|
|
66
|
+
}
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
if rows:
|
|
70
|
+
df = pd.DataFrame(rows, columns=list(columns.keys()))
|
|
71
|
+
|
|
72
|
+
return df
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@log
|
|
76
|
+
def create_ml_model(
|
|
77
|
+
name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
|
|
78
|
+
):
|
|
79
|
+
"""
|
|
80
|
+
Creates a Fabric ML model.
|
|
81
|
+
|
|
82
|
+
This is a wrapper function for the following API: `Items - Create ML Model <https://learn.microsoft.com/rest/api/fabric/mlmodel/items/create-ml-model>`_.
|
|
83
|
+
|
|
84
|
+
Parameters
|
|
85
|
+
----------
|
|
86
|
+
name: str
|
|
87
|
+
Name of the ML model.
|
|
88
|
+
description : str, default=None
|
|
89
|
+
A description of the ML model.
|
|
90
|
+
workspace : str | uuid.UUID, default=None
|
|
91
|
+
The Fabric workspace name or ID.
|
|
92
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
93
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
create_item(name=name, description=description, type="MLModel", workspace=workspace)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
@log
|
|
100
|
+
def delete_ml_model(ml_model: str | UUID, workspace: Optional[str | UUID] = None):
|
|
101
|
+
"""
|
|
102
|
+
Deletes a Fabric ML model.
|
|
103
|
+
|
|
104
|
+
This is a wrapper function for the following API: `Items - Delete ML Model <https://learn.microsoft.com/rest/api/fabric/mlmodel/items/delete-ml-model>`_.
|
|
105
|
+
|
|
106
|
+
Parameters
|
|
107
|
+
----------
|
|
108
|
+
ml_model: str | uuid.UUID
|
|
109
|
+
Name or ID of the ML model.
|
|
110
|
+
workspace : str | uuid.UUID, default=None
|
|
111
|
+
The Fabric workspace name or ID.
|
|
112
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
113
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
delete_item(item=ml_model, type="MLModel", workspace=workspace)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
@log
|
|
120
|
+
def activate_ml_model_endpoint_version(
|
|
121
|
+
ml_model: str | UUID, name: str, workspace: Optional[str | UUID] = None
|
|
122
|
+
):
|
|
123
|
+
"""
|
|
124
|
+
Activates the specified model version endpoint.
|
|
125
|
+
|
|
126
|
+
This is a wrapper function for the following API: `Endpoint - Activate ML Model Endpoint Version <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/activate-ml-model-endpoint-version>`_.
|
|
127
|
+
|
|
128
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
129
|
+
|
|
130
|
+
Parameters
|
|
131
|
+
----------
|
|
132
|
+
ml_model: str | uuid.UUID
|
|
133
|
+
Name or ID of the ML model.
|
|
134
|
+
name: str
|
|
135
|
+
The ML model version name.
|
|
136
|
+
workspace : str | uuid.UUID, default=None
|
|
137
|
+
The Fabric workspace name or ID.
|
|
138
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
139
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
140
|
+
"""
|
|
141
|
+
|
|
142
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
143
|
+
(model_name, model_id) = resolve_item_name_and_id(
|
|
144
|
+
item=ml_model, type="MLModel", workspace=workspace
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
_base_api(
|
|
148
|
+
request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/versions/{name}/activate",
|
|
149
|
+
method="post",
|
|
150
|
+
client="fabric_sp",
|
|
151
|
+
lro_return_status_code=True,
|
|
152
|
+
status_codes=[200, 202],
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
print(
|
|
156
|
+
f"{icons.green_dot} The {model_name} model version {name} has been activated in the {workspace_name} workspace."
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
@log
|
|
161
|
+
def deactivate_ml_model_endpoint_version(
|
|
162
|
+
ml_model: str | UUID, name: str, workspace: Optional[str | UUID] = None
|
|
163
|
+
):
|
|
164
|
+
"""
|
|
165
|
+
Deactivates the specified model version endpoint.
|
|
166
|
+
|
|
167
|
+
This is a wrapper function for the following API: `Endpoint - Deactivate ML Model Endpoint Version <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/deactivate-ml-model-endpoint-version>`_.
|
|
168
|
+
|
|
169
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
170
|
+
|
|
171
|
+
Parameters
|
|
172
|
+
----------
|
|
173
|
+
ml_model: str | uuid.UUID
|
|
174
|
+
Name or ID of the ML model.
|
|
175
|
+
name: str
|
|
176
|
+
The ML model version name.
|
|
177
|
+
workspace : str | uuid.UUID, default=None
|
|
178
|
+
The Fabric workspace name or ID.
|
|
179
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
180
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
181
|
+
"""
|
|
182
|
+
|
|
183
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
184
|
+
(model_name, model_id) = resolve_item_name_and_id(
|
|
185
|
+
item=ml_model, type="MLModel", workspace=workspace
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
_base_api(
|
|
189
|
+
request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/versions/{name}/deactivate",
|
|
190
|
+
method="post",
|
|
191
|
+
client="fabric_sp",
|
|
192
|
+
lro_return_status_code=True,
|
|
193
|
+
status_codes=[200, 202],
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
print(
|
|
197
|
+
f"{icons.green_dot} The {model_name} model version {name} has been deactivated in the {workspace_name} workspace."
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
@log
|
|
202
|
+
def deactivate_all_ml_model_endpoint_versions(
|
|
203
|
+
ml_model: str | UUID, workspace: Optional[str | UUID] = None
|
|
204
|
+
):
|
|
205
|
+
"""
|
|
206
|
+
Deactivates the specified machine learning model and its version's endpoints.
|
|
207
|
+
|
|
208
|
+
This is a wrapper function for the following API: `Endpoint - Deactivate All ML Model Endpoint Versions <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/deactivate-all-ml-model-endpoint-versions>`_.
|
|
209
|
+
|
|
210
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
211
|
+
|
|
212
|
+
Parameters
|
|
213
|
+
----------
|
|
214
|
+
ml_model: str | uuid.UUID
|
|
215
|
+
Name or ID of the ML model.
|
|
216
|
+
workspace : str | uuid.UUID, default=None
|
|
217
|
+
The Fabric workspace name or ID.
|
|
218
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
219
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
220
|
+
"""
|
|
221
|
+
|
|
222
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
223
|
+
(model_name, model_id) = resolve_item_name_and_id(
|
|
224
|
+
item=ml_model, type="MLModel", workspace=workspace
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
_base_api(
|
|
228
|
+
request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/versions/deactivateAll",
|
|
229
|
+
method="post",
|
|
230
|
+
client="fabric_sp",
|
|
231
|
+
lro_return_status_code=True,
|
|
232
|
+
status_codes=[200, 202],
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
print(
|
|
236
|
+
f"{icons.green_dot} All endpoint versions of the {model_name} model within the {workspace_name} workspace have been deactivated."
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
@log
|
|
241
|
+
def list_ml_model_endpoint_versions(
|
|
242
|
+
ml_model: str | UUID, workspace: Optional[str | UUID] = None
|
|
243
|
+
) -> pd.DataFrame:
|
|
244
|
+
"""
|
|
245
|
+
Lists all machine learning model endpoint versions.
|
|
246
|
+
|
|
247
|
+
This is a wrapper function for the following API: `Endpoint - List ML Model Endpoint Versions <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/list-ml-model-endpoint-versions>`_.
|
|
248
|
+
|
|
249
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
250
|
+
|
|
251
|
+
Parameters
|
|
252
|
+
----------
|
|
253
|
+
ml_model: str | uuid.UUID
|
|
254
|
+
Name or ID of the ML model.
|
|
255
|
+
workspace : str | uuid.UUID, default=None
|
|
256
|
+
The Fabric workspace name or ID.
|
|
257
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
258
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
259
|
+
|
|
260
|
+
Returns
|
|
261
|
+
-------
|
|
262
|
+
pandas.DataFrame
|
|
263
|
+
A pandas dataframe showing the ML model endpoint versions within a workspace.
|
|
264
|
+
"""
|
|
265
|
+
|
|
266
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
267
|
+
model_id = resolve_item_id(item=ml_model, type="MLModel", workspace=workspace)
|
|
268
|
+
|
|
269
|
+
columns = {
|
|
270
|
+
"Version Name": "string",
|
|
271
|
+
"Status": "string",
|
|
272
|
+
"Type": "string",
|
|
273
|
+
"Name": "string",
|
|
274
|
+
"Required": "bool",
|
|
275
|
+
"Scale Rule": "string",
|
|
276
|
+
}
|
|
277
|
+
df = _create_dataframe(columns=columns)
|
|
278
|
+
|
|
279
|
+
responses = _base_api(
|
|
280
|
+
request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/versions",
|
|
281
|
+
client="fabric_sp",
|
|
282
|
+
uses_pagination=True,
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
rows = []
|
|
286
|
+
for r in responses:
|
|
287
|
+
for version in r.get("value", []):
|
|
288
|
+
base = {
|
|
289
|
+
"Version Name": version.get("versionName"),
|
|
290
|
+
"Status": version.get("status"),
|
|
291
|
+
"Scale Rule": version.get("scaleRule"),
|
|
292
|
+
}
|
|
293
|
+
for sig_type in ["inputSignature", "outputSignature"]:
|
|
294
|
+
for entry in version.get(sig_type, []):
|
|
295
|
+
rows.append(
|
|
296
|
+
{
|
|
297
|
+
**base,
|
|
298
|
+
"Signature Type": (
|
|
299
|
+
"Input" if sig_type == "inputSignature" else "Output"
|
|
300
|
+
),
|
|
301
|
+
"Name": entry.get("name"),
|
|
302
|
+
"Type": entry.get("type"),
|
|
303
|
+
"Required": entry.get("required"),
|
|
304
|
+
}
|
|
305
|
+
)
|
|
306
|
+
# Handle versions with no signatures
|
|
307
|
+
if "inputSignature" not in version and "outputSignature" not in version:
|
|
308
|
+
rows.append(base)
|
|
309
|
+
|
|
310
|
+
if rows:
|
|
311
|
+
df = pd.DataFrame(rows, columns=list(columns.keys()))
|
|
312
|
+
_update_dataframe_datatypes(dataframe=df, column_map=columns)
|
|
313
|
+
|
|
314
|
+
return df
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
@log
|
|
318
|
+
def score_ml_model_endpoint(
|
|
319
|
+
ml_model: str | UUID,
|
|
320
|
+
inputs: List[List[Any]],
|
|
321
|
+
orientation: str = "values",
|
|
322
|
+
workspace: Optional[str | UUID] = None,
|
|
323
|
+
) -> dict:
|
|
324
|
+
"""
|
|
325
|
+
Scores input data using the default version of the endpoint and returns results.
|
|
326
|
+
|
|
327
|
+
This is a wrapper function for the following API: `Endpoint - Score ML Model Endpoint <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/score-ml-model-endpoint>`_.
|
|
328
|
+
|
|
329
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
330
|
+
|
|
331
|
+
Parameters
|
|
332
|
+
----------
|
|
333
|
+
ml_model: str | uuid.UUID
|
|
334
|
+
Name or ID of the ML model.
|
|
335
|
+
inputs: List[List[Any]]
|
|
336
|
+
Machine learning inputs to score in the form of Pandas dataset arrays that can include strings, numbers, integers and booleans.
|
|
337
|
+
orientation: str, default='values'
|
|
338
|
+
`Orientation <https://learn.microsoft.com/en-us/rest/api/fabric/mlmodel/endpoint/score-ml-model-endpoint?tabs=HTTP#orientation>`_ of the input data.
|
|
339
|
+
workspace : str | uuid.UUID, default=None
|
|
340
|
+
The Fabric workspace name or ID.
|
|
341
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
342
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
343
|
+
"""
|
|
344
|
+
|
|
345
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
346
|
+
model_id = resolve_item_id(item=ml_model, type="MLModel", workspace=workspace)
|
|
347
|
+
|
|
348
|
+
orientation = _validate_orientation(orientation)
|
|
349
|
+
payload = {
|
|
350
|
+
"formatType": "dataframe",
|
|
351
|
+
"orientation": orientation,
|
|
352
|
+
"inputs": inputs,
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
result = _base_api(
|
|
356
|
+
request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/score",
|
|
357
|
+
method="post",
|
|
358
|
+
client="fabric_sp",
|
|
359
|
+
payload=payload,
|
|
360
|
+
lro_return_json=True,
|
|
361
|
+
status_codes=[200, 202],
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
return result
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
@log
|
|
368
|
+
def score_ml_model_endpoint_version(
|
|
369
|
+
ml_model: str | UUID,
|
|
370
|
+
name: str,
|
|
371
|
+
inputs: List[List[Any]],
|
|
372
|
+
orientation: str = "values",
|
|
373
|
+
workspace: Optional[str | UUID] = None,
|
|
374
|
+
) -> dict:
|
|
375
|
+
"""
|
|
376
|
+
Scores input data using the default version of the endpoint and returns results.
|
|
377
|
+
|
|
378
|
+
This is a wrapper function for the following API: `Endpoint - Score ML Model Endpoint Version <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/score-ml-model-endpoint-version>`_.
|
|
379
|
+
|
|
380
|
+
Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
|
|
381
|
+
|
|
382
|
+
Parameters
|
|
383
|
+
----------
|
|
384
|
+
ml_model: str | uuid.UUID
|
|
385
|
+
Name or ID of the ML model.
|
|
386
|
+
name: str
|
|
387
|
+
The ML model version name.
|
|
388
|
+
inputs: List[List[Any]]
|
|
389
|
+
Machine learning inputs to score in the form of Pandas dataset arrays that can include strings, numbers, integers and booleans.
|
|
390
|
+
orientation: str, default='values'
|
|
391
|
+
`Orientation <https://learn.microsoft.com/en-us/rest/api/fabric/mlmodel/endpoint/score-ml-model-endpoint?tabs=HTTP#orientation>`_ of the input data.
|
|
392
|
+
workspace : str | uuid.UUID, default=None
|
|
393
|
+
The Fabric workspace name or ID.
|
|
394
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
395
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
396
|
+
"""
|
|
397
|
+
|
|
398
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
399
|
+
model_id = resolve_item_id(item=ml_model, type="MLModel", workspace=workspace)
|
|
400
|
+
|
|
401
|
+
orientation = _validate_orientation(orientation)
|
|
402
|
+
payload = {
|
|
403
|
+
"formatType": "dataframe",
|
|
404
|
+
"orientation": orientation,
|
|
405
|
+
"inputs": inputs,
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
result = _base_api(
|
|
409
|
+
request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/versions/{name}/score",
|
|
410
|
+
method="post",
|
|
411
|
+
client="fabric_sp",
|
|
412
|
+
payload=payload,
|
|
413
|
+
lro_return_json=True,
|
|
414
|
+
status_codes=[200, 202],
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
return result
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def _validate_orientation(orientation: str):
|
|
421
|
+
|
|
422
|
+
orientation = orientation.lower()
|
|
423
|
+
if orientation not in ["split", "values", "record", "index", "table"]:
|
|
424
|
+
raise ValueError(
|
|
425
|
+
f"Invalid orientation '{orientation}'. Must be one of 'split', 'values', 'record', 'index', or 'table'."
|
|
426
|
+
)
|
|
427
|
+
return orientation
|
sempy_labs/report/__init__.py
CHANGED
|
@@ -1,34 +1,34 @@
|
|
|
1
|
-
from
|
|
1
|
+
from ._save_report import (
|
|
2
2
|
save_report_as_pbip,
|
|
3
3
|
)
|
|
4
|
-
from
|
|
4
|
+
from ._reportwrapper import (
|
|
5
5
|
ReportWrapper,
|
|
6
6
|
connect_report,
|
|
7
7
|
)
|
|
8
|
-
from
|
|
8
|
+
from ._paginated import (
|
|
9
9
|
get_report_datasources,
|
|
10
10
|
)
|
|
11
|
-
from
|
|
11
|
+
from ._generate_report import (
|
|
12
12
|
create_report_from_reportjson,
|
|
13
13
|
get_report_definition,
|
|
14
14
|
update_report_from_reportjson,
|
|
15
15
|
create_model_bpa_report,
|
|
16
16
|
)
|
|
17
|
-
from
|
|
18
|
-
from
|
|
17
|
+
from ._download_report import download_report
|
|
18
|
+
from ._report_functions import (
|
|
19
19
|
get_report_json,
|
|
20
20
|
# report_dependency_tree,
|
|
21
21
|
clone_report,
|
|
22
22
|
launch_report,
|
|
23
23
|
# translate_report_titles
|
|
24
24
|
)
|
|
25
|
-
from
|
|
25
|
+
from ._report_rebind import (
|
|
26
26
|
report_rebind,
|
|
27
27
|
report_rebind_all,
|
|
28
28
|
)
|
|
29
|
-
from
|
|
30
|
-
from
|
|
31
|
-
from
|
|
29
|
+
from ._report_bpa_rules import report_bpa_rules
|
|
30
|
+
from ._report_bpa import run_report_bpa
|
|
31
|
+
from ._export_report import (
|
|
32
32
|
export_report,
|
|
33
33
|
)
|
|
34
34
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import sempy_labs._icons as icons
|
|
2
2
|
from typing import Optional
|
|
3
|
-
from
|
|
3
|
+
from .._helper_functions import (
|
|
4
4
|
resolve_workspace_name_and_id,
|
|
5
5
|
resolve_lakehouse_name_and_id,
|
|
6
6
|
_base_api,
|
|
@@ -8,7 +8,7 @@ from sempy_labs._helper_functions import (
|
|
|
8
8
|
_mount,
|
|
9
9
|
resolve_workspace_name,
|
|
10
10
|
)
|
|
11
|
-
from
|
|
11
|
+
from ..lakehouse._lakehouse import lakehouse_attached
|
|
12
12
|
from uuid import UUID
|
|
13
13
|
from sempy._utils._log import log
|
|
14
14
|
|
|
@@ -2,7 +2,7 @@ import sempy.fabric as fabric
|
|
|
2
2
|
import json
|
|
3
3
|
import os
|
|
4
4
|
import time
|
|
5
|
-
from
|
|
5
|
+
from .._helper_functions import (
|
|
6
6
|
generate_embedded_filter,
|
|
7
7
|
resolve_workspace_name_and_id,
|
|
8
8
|
_base_api,
|
|
@@ -12,7 +12,7 @@ from typing import Optional
|
|
|
12
12
|
from sempy._utils._log import log
|
|
13
13
|
import sempy_labs._icons as icons
|
|
14
14
|
from uuid import UUID
|
|
15
|
-
from
|
|
15
|
+
from ._report_functions import (
|
|
16
16
|
list_report_visuals,
|
|
17
17
|
list_report_pages,
|
|
18
18
|
)
|
sempy_labs/report/_paginated.py
CHANGED
sempy_labs/report/_report_bpa.py
CHANGED
|
@@ -2,8 +2,9 @@ from typing import Optional
|
|
|
2
2
|
import pandas as pd
|
|
3
3
|
import datetime
|
|
4
4
|
from sempy._utils._log import log
|
|
5
|
-
from
|
|
6
|
-
from
|
|
5
|
+
from ._reportwrapper import connect_report
|
|
6
|
+
from ._report_bpa_rules import report_bpa_rules
|
|
7
|
+
from .._helper_functions import (
|
|
7
8
|
format_dax_object_name,
|
|
8
9
|
save_as_delta_table,
|
|
9
10
|
resolve_item_name_and_id,
|
|
@@ -11,7 +12,7 @@ from sempy_labs._helper_functions import (
|
|
|
11
12
|
_get_column_aggregate,
|
|
12
13
|
resolve_workspace_name_and_id,
|
|
13
14
|
)
|
|
14
|
-
from
|
|
15
|
+
from ..lakehouse import get_lakehouse_tables, lakehouse_attached
|
|
15
16
|
import sempy_labs._icons as icons
|
|
16
17
|
from IPython.display import display, HTML
|
|
17
18
|
from uuid import UUID
|
|
@@ -6,9 +6,9 @@ import copy
|
|
|
6
6
|
from anytree import Node, RenderTree
|
|
7
7
|
from powerbiclient import Report
|
|
8
8
|
from pyspark.sql.functions import col, flatten
|
|
9
|
-
from
|
|
10
|
-
from
|
|
11
|
-
from
|
|
9
|
+
from ._generate_report import update_report_from_reportjson
|
|
10
|
+
from ..lakehouse._lakehouse import lakehouse_attached
|
|
11
|
+
from .._helper_functions import (
|
|
12
12
|
resolve_report_id,
|
|
13
13
|
language_validate,
|
|
14
14
|
resolve_workspace_name_and_id,
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
import sempy.fabric as fabric
|
|
2
2
|
from typing import Optional
|
|
3
3
|
import pandas as pd
|
|
4
|
-
from
|
|
4
|
+
from .._helper_functions import (
|
|
5
5
|
format_dax_object_name,
|
|
6
6
|
resolve_workspace_name_and_id,
|
|
7
7
|
resolve_dataset_name_and_id,
|
|
8
8
|
)
|
|
9
|
-
from
|
|
10
|
-
from
|
|
9
|
+
from ._reportwrapper import ReportWrapper
|
|
10
|
+
from .._list_functions import list_reports_using_semantic_model
|
|
11
11
|
from uuid import UUID
|
|
12
12
|
|
|
13
13
|
|