semantic-link-labs 0.11.2__py3-none-any.whl → 0.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (93) hide show
  1. {semantic_link_labs-0.11.2.dist-info → semantic_link_labs-0.12.0.dist-info}/METADATA +7 -6
  2. {semantic_link_labs-0.11.2.dist-info → semantic_link_labs-0.12.0.dist-info}/RECORD +90 -84
  3. sempy_labs/__init__.py +18 -18
  4. sempy_labs/_a_lib_info.py +1 -1
  5. sempy_labs/_authentication.py +81 -32
  6. sempy_labs/_capacities.py +2 -2
  7. sempy_labs/_capacity_migration.py +4 -4
  8. sempy_labs/_clear_cache.py +1 -1
  9. sempy_labs/_connections.py +107 -70
  10. sempy_labs/_dashboards.py +6 -2
  11. sempy_labs/_data_pipelines.py +1 -1
  12. sempy_labs/_dataflows.py +1 -1
  13. sempy_labs/_dax.py +3 -3
  14. sempy_labs/_delta_analyzer.py +4 -4
  15. sempy_labs/_delta_analyzer_history.py +1 -1
  16. sempy_labs/_deployment_pipelines.py +1 -1
  17. sempy_labs/_environments.py +1 -1
  18. sempy_labs/_eventhouses.py +9 -3
  19. sempy_labs/_eventstreams.py +1 -1
  20. sempy_labs/_external_data_shares.py +56 -2
  21. sempy_labs/_gateways.py +14 -7
  22. sempy_labs/_generate_semantic_model.py +7 -12
  23. sempy_labs/_git.py +1 -1
  24. sempy_labs/_graphQL.py +1 -1
  25. sempy_labs/_helper_functions.py +293 -22
  26. sempy_labs/_job_scheduler.py +12 -1
  27. sempy_labs/_kql_databases.py +1 -1
  28. sempy_labs/_kql_querysets.py +10 -2
  29. sempy_labs/_kusto.py +2 -2
  30. sempy_labs/_labels.py +126 -0
  31. sempy_labs/_list_functions.py +2 -2
  32. sempy_labs/_managed_private_endpoints.py +1 -1
  33. sempy_labs/_mirrored_databases.py +40 -16
  34. sempy_labs/_mirrored_warehouses.py +1 -1
  35. sempy_labs/_ml_experiments.py +1 -1
  36. sempy_labs/_model_bpa.py +6 -6
  37. sempy_labs/_model_bpa_bulk.py +3 -3
  38. sempy_labs/_model_dependencies.py +1 -1
  39. sempy_labs/_mounted_data_factories.py +3 -3
  40. sempy_labs/_notebooks.py +153 -3
  41. sempy_labs/_query_scale_out.py +2 -2
  42. sempy_labs/_refresh_semantic_model.py +1 -1
  43. sempy_labs/_semantic_models.py +15 -3
  44. sempy_labs/_spark.py +1 -1
  45. sempy_labs/_sql.py +3 -3
  46. sempy_labs/_sql_endpoints.py +5 -3
  47. sempy_labs/_sqldatabase.py +5 -1
  48. sempy_labs/_tags.py +3 -1
  49. sempy_labs/_translations.py +7 -360
  50. sempy_labs/_user_delegation_key.py +2 -2
  51. sempy_labs/_utils.py +27 -0
  52. sempy_labs/_vertipaq.py +3 -3
  53. sempy_labs/_vpax.py +1 -1
  54. sempy_labs/_warehouses.py +5 -0
  55. sempy_labs/_workloads.py +1 -1
  56. sempy_labs/_workspace_identity.py +1 -1
  57. sempy_labs/_workspaces.py +145 -11
  58. sempy_labs/admin/__init__.py +6 -0
  59. sempy_labs/admin/_capacities.py +34 -11
  60. sempy_labs/admin/_items.py +2 -2
  61. sempy_labs/admin/_tenant_keys.py +89 -0
  62. sempy_labs/directlake/_dl_helper.py +5 -2
  63. sempy_labs/graph/_users.py +3 -5
  64. sempy_labs/lakehouse/__init__.py +4 -0
  65. sempy_labs/lakehouse/_helper.py +18 -9
  66. sempy_labs/lakehouse/_lakehouse.py +18 -9
  67. sempy_labs/lakehouse/_materialized_lake_views.py +76 -0
  68. sempy_labs/lakehouse/_shortcuts.py +8 -2
  69. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +38 -47
  70. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +12 -22
  71. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +7 -11
  72. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +14 -23
  73. sempy_labs/ml_model/__init__.py +23 -0
  74. sempy_labs/ml_model/_functions.py +427 -0
  75. sempy_labs/report/_bpareporttemplate/.pbi/localSettings.json +9 -0
  76. sempy_labs/report/_bpareporttemplate/.platform +11 -0
  77. sempy_labs/report/_download_report.py +4 -1
  78. sempy_labs/report/_export_report.py +12 -5
  79. sempy_labs/report/_generate_report.py +11 -3
  80. sempy_labs/report/_paginated.py +21 -15
  81. sempy_labs/report/_report_functions.py +19 -11
  82. sempy_labs/report/_report_rebind.py +21 -10
  83. sempy_labs/report/_reportwrapper.py +1 -1
  84. sempy_labs/theme/_org_themes.py +5 -6
  85. sempy_labs/tom/_model.py +13 -19
  86. sempy_labs/variable_library/__init__.py +19 -0
  87. sempy_labs/variable_library/_functions.py +403 -0
  88. sempy_labs/_dax_query_view.py +0 -57
  89. sempy_labs/_ml_models.py +0 -111
  90. sempy_labs/_variable_libraries.py +0 -92
  91. {semantic_link_labs-0.11.2.dist-info → semantic_link_labs-0.12.0.dist-info}/WHEEL +0 -0
  92. {semantic_link_labs-0.11.2.dist-info → semantic_link_labs-0.12.0.dist-info}/licenses/LICENSE +0 -0
  93. {semantic_link_labs-0.11.2.dist-info → semantic_link_labs-0.12.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,427 @@
1
+ import pandas as pd
2
+ from typing import Any, Optional, List
3
+ from .._helper_functions import (
4
+ _update_dataframe_datatypes,
5
+ resolve_item_id,
6
+ resolve_item_name_and_id,
7
+ resolve_workspace_id,
8
+ _base_api,
9
+ delete_item,
10
+ _create_dataframe,
11
+ create_item,
12
+ resolve_workspace_name_and_id,
13
+ )
14
+ from uuid import UUID
15
+ from sempy._utils._log import log
16
+ import sempy_labs._icons as icons
17
+
18
+
19
+ @log
20
+ def list_ml_models(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
21
+ """
22
+ Shows the ML models within a workspace.
23
+
24
+ This is a wrapper function for the following API: `Items - List ML Models <https://learn.microsoft.com/rest/api/fabric/mlmodel/items/list-ml-models>`_.
25
+
26
+ Parameters
27
+ ----------
28
+ workspace : str | uuid.UUID, default=None
29
+ The Fabric workspace name or ID.
30
+ Defaults to None which resolves to the workspace of the attached lakehouse
31
+ or if no lakehouse attached, resolves to the workspace of the notebook.
32
+
33
+ Returns
34
+ -------
35
+ pandas.DataFrame
36
+ A pandas dataframe showing the ML models within a workspace.
37
+ """
38
+
39
+ columns = {
40
+ "ML Model Name": "string",
41
+ "ML Model Id": "string",
42
+ "Description": "string",
43
+ }
44
+ df = _create_dataframe(columns=columns)
45
+
46
+ workspace_id = resolve_workspace_id(workspace)
47
+
48
+ responses = _base_api(
49
+ request=f"/v1/workspaces/{workspace_id}/mlModels",
50
+ status_codes=200,
51
+ uses_pagination=True,
52
+ )
53
+
54
+ rows = []
55
+ for r in responses:
56
+ for v in r.get("value", []):
57
+ model_id = v.get("id")
58
+ modelName = v.get("displayName")
59
+ desc = v.get("description")
60
+
61
+ rows.append(
62
+ {
63
+ "ML Model Name": modelName,
64
+ "ML Model Id": model_id,
65
+ "Description": desc,
66
+ }
67
+ )
68
+
69
+ if rows:
70
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
71
+
72
+ return df
73
+
74
+
75
+ @log
76
+ def create_ml_model(
77
+ name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
78
+ ):
79
+ """
80
+ Creates a Fabric ML model.
81
+
82
+ This is a wrapper function for the following API: `Items - Create ML Model <https://learn.microsoft.com/rest/api/fabric/mlmodel/items/create-ml-model>`_.
83
+
84
+ Parameters
85
+ ----------
86
+ name: str
87
+ Name of the ML model.
88
+ description : str, default=None
89
+ A description of the ML model.
90
+ workspace : str | uuid.UUID, default=None
91
+ The Fabric workspace name or ID.
92
+ Defaults to None which resolves to the workspace of the attached lakehouse
93
+ or if no lakehouse attached, resolves to the workspace of the notebook.
94
+ """
95
+
96
+ create_item(name=name, description=description, type="MLModel", workspace=workspace)
97
+
98
+
99
+ @log
100
+ def delete_ml_model(ml_model: str | UUID, workspace: Optional[str | UUID] = None):
101
+ """
102
+ Deletes a Fabric ML model.
103
+
104
+ This is a wrapper function for the following API: `Items - Delete ML Model <https://learn.microsoft.com/rest/api/fabric/mlmodel/items/delete-ml-model>`_.
105
+
106
+ Parameters
107
+ ----------
108
+ ml_model: str | uuid.UUID
109
+ Name or ID of the ML model.
110
+ workspace : str | uuid.UUID, default=None
111
+ The Fabric workspace name or ID.
112
+ Defaults to None which resolves to the workspace of the attached lakehouse
113
+ or if no lakehouse attached, resolves to the workspace of the notebook.
114
+ """
115
+
116
+ delete_item(item=ml_model, type="MLModel", workspace=workspace)
117
+
118
+
119
+ @log
120
+ def activate_ml_model_endpoint_version(
121
+ ml_model: str | UUID, name: str, workspace: Optional[str | UUID] = None
122
+ ):
123
+ """
124
+ Activates the specified model version endpoint.
125
+
126
+ This is a wrapper function for the following API: `Endpoint - Activate ML Model Endpoint Version <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/activate-ml-model-endpoint-version>`_.
127
+
128
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
129
+
130
+ Parameters
131
+ ----------
132
+ ml_model: str | uuid.UUID
133
+ Name or ID of the ML model.
134
+ name: str
135
+ The ML model version name.
136
+ workspace : str | uuid.UUID, default=None
137
+ The Fabric workspace name or ID.
138
+ Defaults to None which resolves to the workspace of the attached lakehouse
139
+ or if no lakehouse attached, resolves to the workspace of the notebook.
140
+ """
141
+
142
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
143
+ (model_name, model_id) = resolve_item_name_and_id(
144
+ item=ml_model, type="MLModel", workspace=workspace
145
+ )
146
+
147
+ _base_api(
148
+ request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/versions/{name}/activate",
149
+ method="post",
150
+ client="fabric_sp",
151
+ lro_return_status_code=True,
152
+ status_codes=[200, 202],
153
+ )
154
+
155
+ print(
156
+ f"{icons.green_dot} The {model_name} model version {name} has been activated in the {workspace_name} workspace."
157
+ )
158
+
159
+
160
+ @log
161
+ def deactivate_ml_model_endpoint_version(
162
+ ml_model: str | UUID, name: str, workspace: Optional[str | UUID] = None
163
+ ):
164
+ """
165
+ Deactivates the specified model version endpoint.
166
+
167
+ This is a wrapper function for the following API: `Endpoint - Deactivate ML Model Endpoint Version <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/deactivate-ml-model-endpoint-version>`_.
168
+
169
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
170
+
171
+ Parameters
172
+ ----------
173
+ ml_model: str | uuid.UUID
174
+ Name or ID of the ML model.
175
+ name: str
176
+ The ML model version name.
177
+ workspace : str | uuid.UUID, default=None
178
+ The Fabric workspace name or ID.
179
+ Defaults to None which resolves to the workspace of the attached lakehouse
180
+ or if no lakehouse attached, resolves to the workspace of the notebook.
181
+ """
182
+
183
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
184
+ (model_name, model_id) = resolve_item_name_and_id(
185
+ item=ml_model, type="MLModel", workspace=workspace
186
+ )
187
+
188
+ _base_api(
189
+ request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/versions/{name}/deactivate",
190
+ method="post",
191
+ client="fabric_sp",
192
+ lro_return_status_code=True,
193
+ status_codes=[200, 202],
194
+ )
195
+
196
+ print(
197
+ f"{icons.green_dot} The {model_name} model version {name} has been deactivated in the {workspace_name} workspace."
198
+ )
199
+
200
+
201
+ @log
202
+ def deactivate_all_ml_model_endpoint_versions(
203
+ ml_model: str | UUID, workspace: Optional[str | UUID] = None
204
+ ):
205
+ """
206
+ Deactivates the specified machine learning model and its version's endpoints.
207
+
208
+ This is a wrapper function for the following API: `Endpoint - Deactivate All ML Model Endpoint Versions <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/deactivate-all-ml-model-endpoint-versions>`_.
209
+
210
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
211
+
212
+ Parameters
213
+ ----------
214
+ ml_model: str | uuid.UUID
215
+ Name or ID of the ML model.
216
+ workspace : str | uuid.UUID, default=None
217
+ The Fabric workspace name or ID.
218
+ Defaults to None which resolves to the workspace of the attached lakehouse
219
+ or if no lakehouse attached, resolves to the workspace of the notebook.
220
+ """
221
+
222
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
223
+ (model_name, model_id) = resolve_item_name_and_id(
224
+ item=ml_model, type="MLModel", workspace=workspace
225
+ )
226
+
227
+ _base_api(
228
+ request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/versions/deactivateAll",
229
+ method="post",
230
+ client="fabric_sp",
231
+ lro_return_status_code=True,
232
+ status_codes=[200, 202],
233
+ )
234
+
235
+ print(
236
+ f"{icons.green_dot} All endpoint versions of the {model_name} model within the {workspace_name} workspace have been deactivated."
237
+ )
238
+
239
+
240
+ @log
241
+ def list_ml_model_endpoint_versions(
242
+ ml_model: str | UUID, workspace: Optional[str | UUID] = None
243
+ ) -> pd.DataFrame:
244
+ """
245
+ Lists all machine learning model endpoint versions.
246
+
247
+ This is a wrapper function for the following API: `Endpoint - List ML Model Endpoint Versions <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/list-ml-model-endpoint-versions>`_.
248
+
249
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
250
+
251
+ Parameters
252
+ ----------
253
+ ml_model: str | uuid.UUID
254
+ Name or ID of the ML model.
255
+ workspace : str | uuid.UUID, default=None
256
+ The Fabric workspace name or ID.
257
+ Defaults to None which resolves to the workspace of the attached lakehouse
258
+ or if no lakehouse attached, resolves to the workspace of the notebook.
259
+
260
+ Returns
261
+ -------
262
+ pandas.DataFrame
263
+ A pandas dataframe showing the ML model endpoint versions within a workspace.
264
+ """
265
+
266
+ workspace_id = resolve_workspace_id(workspace)
267
+ model_id = resolve_item_id(item=ml_model, type="MLModel", workspace=workspace)
268
+
269
+ columns = {
270
+ "Version Name": "string",
271
+ "Status": "string",
272
+ "Type": "string",
273
+ "Name": "string",
274
+ "Required": "bool",
275
+ "Scale Rule": "string",
276
+ }
277
+ df = _create_dataframe(columns=columns)
278
+
279
+ responses = _base_api(
280
+ request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/versions",
281
+ client="fabric_sp",
282
+ uses_pagination=True,
283
+ )
284
+
285
+ rows = []
286
+ for r in responses:
287
+ for version in r.get("value", []):
288
+ base = {
289
+ "Version Name": version.get("versionName"),
290
+ "Status": version.get("status"),
291
+ "Scale Rule": version.get("scaleRule"),
292
+ }
293
+ for sig_type in ["inputSignature", "outputSignature"]:
294
+ for entry in version.get(sig_type, []):
295
+ rows.append(
296
+ {
297
+ **base,
298
+ "Signature Type": (
299
+ "Input" if sig_type == "inputSignature" else "Output"
300
+ ),
301
+ "Name": entry.get("name"),
302
+ "Type": entry.get("type"),
303
+ "Required": entry.get("required"),
304
+ }
305
+ )
306
+ # Handle versions with no signatures
307
+ if "inputSignature" not in version and "outputSignature" not in version:
308
+ rows.append(base)
309
+
310
+ if rows:
311
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
312
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
313
+
314
+ return df
315
+
316
+
317
+ @log
318
+ def score_ml_model_endpoint(
319
+ ml_model: str | UUID,
320
+ inputs: List[List[Any]],
321
+ orientation: str = "values",
322
+ workspace: Optional[str | UUID] = None,
323
+ ) -> dict:
324
+ """
325
+ Scores input data using the default version of the endpoint and returns results.
326
+
327
+ This is a wrapper function for the following API: `Endpoint - Score ML Model Endpoint <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/score-ml-model-endpoint>`_.
328
+
329
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
330
+
331
+ Parameters
332
+ ----------
333
+ ml_model: str | uuid.UUID
334
+ Name or ID of the ML model.
335
+ inputs: List[List[Any]]
336
+ Machine learning inputs to score in the form of Pandas dataset arrays that can include strings, numbers, integers and booleans.
337
+ orientation: str, default='values'
338
+ `Orientation <https://learn.microsoft.com/en-us/rest/api/fabric/mlmodel/endpoint/score-ml-model-endpoint?tabs=HTTP#orientation>`_ of the input data.
339
+ workspace : str | uuid.UUID, default=None
340
+ The Fabric workspace name or ID.
341
+ Defaults to None which resolves to the workspace of the attached lakehouse
342
+ or if no lakehouse attached, resolves to the workspace of the notebook.
343
+ """
344
+
345
+ workspace_id = resolve_workspace_id(workspace)
346
+ model_id = resolve_item_id(item=ml_model, type="MLModel", workspace=workspace)
347
+
348
+ orientation = _validate_orientation(orientation)
349
+ payload = {
350
+ "formatType": "dataframe",
351
+ "orientation": orientation,
352
+ "inputs": inputs,
353
+ }
354
+
355
+ result = _base_api(
356
+ request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/score",
357
+ method="post",
358
+ client="fabric_sp",
359
+ payload=payload,
360
+ lro_return_json=True,
361
+ status_codes=[200, 202],
362
+ )
363
+
364
+ return result
365
+
366
+
367
+ @log
368
+ def score_ml_model_endpoint_version(
369
+ ml_model: str | UUID,
370
+ name: str,
371
+ inputs: List[List[Any]],
372
+ orientation: str = "values",
373
+ workspace: Optional[str | UUID] = None,
374
+ ) -> dict:
375
+ """
376
+ Scores input data using the default version of the endpoint and returns results.
377
+
378
+ This is a wrapper function for the following API: `Endpoint - Score ML Model Endpoint Version <https://learn.microsoft.com/rest/api/fabric/mlmodel/endpoint/score-ml-model-endpoint-version>`_.
379
+
380
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
381
+
382
+ Parameters
383
+ ----------
384
+ ml_model: str | uuid.UUID
385
+ Name or ID of the ML model.
386
+ name: str
387
+ The ML model version name.
388
+ inputs: List[List[Any]]
389
+ Machine learning inputs to score in the form of Pandas dataset arrays that can include strings, numbers, integers and booleans.
390
+ orientation: str, default='values'
391
+ `Orientation <https://learn.microsoft.com/en-us/rest/api/fabric/mlmodel/endpoint/score-ml-model-endpoint?tabs=HTTP#orientation>`_ of the input data.
392
+ workspace : str | uuid.UUID, default=None
393
+ The Fabric workspace name or ID.
394
+ Defaults to None which resolves to the workspace of the attached lakehouse
395
+ or if no lakehouse attached, resolves to the workspace of the notebook.
396
+ """
397
+
398
+ workspace_id = resolve_workspace_id(workspace)
399
+ model_id = resolve_item_id(item=ml_model, type="MLModel", workspace=workspace)
400
+
401
+ orientation = _validate_orientation(orientation)
402
+ payload = {
403
+ "formatType": "dataframe",
404
+ "orientation": orientation,
405
+ "inputs": inputs,
406
+ }
407
+
408
+ result = _base_api(
409
+ request=f"/v1/workspaces/{workspace_id}/mlmodels/{model_id}/endpoint/versions/{name}/score",
410
+ method="post",
411
+ client="fabric_sp",
412
+ payload=payload,
413
+ lro_return_json=True,
414
+ status_codes=[200, 202],
415
+ )
416
+
417
+ return result
418
+
419
+
420
+ def _validate_orientation(orientation: str):
421
+
422
+ orientation = orientation.lower()
423
+ if orientation not in ["split", "values", "record", "index", "table"]:
424
+ raise ValueError(
425
+ f"Invalid orientation '{orientation}'. Must be one of 'split', 'values', 'record', 'index', or 'table'."
426
+ )
427
+ return orientation
@@ -0,0 +1,9 @@
1
+ {
2
+ "version": "1.0",
3
+ "remoteArtifacts": [
4
+ {
5
+ "reportId": "6a91c344-dba8-4ebf-bedb-e07134f2a204"
6
+ }
7
+ ],
8
+ "securityBindingsSignature": "AQAAANCMnd8BFdERjHoAwE/Cl+sBAAAAMAVu7l76YU6Sl11KOiJdgQAAAAACAAAAAAAQZgAAAAEAACAAAAD1Ty+c7tZLT9/Sjguxhn/5ivfLWfUMNtgudyJ3BKzzZgAAAAAOgAAAAAIAACAAAABAyGf+iKMwvmNtcoRczjgebeIm0nlc9SFYTBYv3N7yvVADAADQN3JsIsvJUcKKc9WMo2EhiE21odezpd35tb+yudHwA/RYhypMA3fwiCTwArLefBZQ3vZ7KYh4MjihXS07i9o1XVqxAmDoli83Yhs/Wei+0HIfYOT5HOVHLUEul5x41Yx/7Bdfhc881SK6IoaJogBdwsiJVxPne+niMYqJQA6qLEPyJ33g6ucUxLA40lwdbN2cMWFzRn6tymmicDPwH0hcGPDMWwseAU+OuUeidkneRWhUGs6lkiiXLiO6kmY5RKq+S4FdtR19/e1B6EjAd94zSw+M5jQzYxn4eCZzWYiB+8Zd/jy07lfyLoGwagNqiQzbcNONqQd5w0n+8/+n4zGkBi2UojfRXoGaYDirQeZMTbt3pfPx2PArxsJ8dF0iT634pHiCF1ZFdtY+79JaFLUUG+Yf7JJv8IxuuuF74tAp4NYmuOij4hTDaf8Jafa5IoRVh7ICkwrjJyVQ8dG7I3tr0VvR+toBPG3Zlbm9BijcaBxhh1AINhnRAIkENOnPFQVH7l3Ml7B60H8Tst6ic3ihCCMYjtmN+NNWqFrJKT2trilh5TAxN+ei4H5fPwM9S7zb2bH5jhExcYTtoe7iCzxOvBsoYoFM+7FMjn9R2FATNICktYdbKDo1Of+u4oZ1+RsvBHQBVaMhSCoZ7+K5T5pZayNK3V2UID3wOuLOYvouxXXr4NVFsdgiV2oMuxTWeqmd/4bLxeqe3uTkGFmQU4mumF2YVsNbdO3IcRXhhrCCZ27ffzXBsH+lE3EhusD37Z0dsVbVVlG8AHXCh7Atgd8n73/eSI5mvj36DCOSRBVauItIATIa2FXueKA7vU6lRDYBSX8FCC2qkeN6dWpMoN5uXXEBsb5Yot1Fgrovcyl5lk7rh772Xon4FaIYFHZpklsY3JK5EXp3bF8UOE6ByN1ZucmkGgYRcTT/up/Uc86TLN6env9XXL4FQYPlReiOGWKBLVi9OoXGRLDshspniULtV3EwQ6WsjF2AyQ+WdLj3bbWKzG5Mg9jvANLrjycZAGWskh4X5JDGiv4TiJmnYQ/xPZAKKiowpVIHikLeG76uXFI+bxtpihV9+DaEJy4UxisHQxwuvUsQs38u3SHgpJmT8CNssZl41+T/IJdoQwJFLUAAAACnUQZGV9DvcOyrj8HBpXBVB5PuOQDxLB4HZOevHqCB5dc5z787E93B51QmN7I15fF6GCdWwN5f94gv1er2dtN3"
9
+ }
@@ -0,0 +1,11 @@
1
+ {
2
+ "$schema": "https://developer.microsoft.com/json-schemas/fabric/gitIntegration/platformProperties/2.0.0/schema.json",
3
+ "metadata": {
4
+ "type": "Report",
5
+ "displayName": "BPAReport"
6
+ },
7
+ "config": {
8
+ "version": "2.0",
9
+ "logicalId": "a201f2cd-fd25-465f-bfbc-33b151e38b31"
10
+ }
11
+ }
@@ -25,6 +25,8 @@ def download_report(
25
25
 
26
26
  This is a wrapper function for the following API: `Reports - Export Report In Group <https://learn.microsoft.com/rest/api/power-bi/reports/export-report-in-group>`_.
27
27
 
28
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
29
+
28
30
  Parameters
29
31
  ----------
30
32
  report: str | uuid.UUID
@@ -59,7 +61,8 @@ def download_report(
59
61
  report_id = resolve_item_id(item=report, type="Report", workspace=workspace)
60
62
 
61
63
  response = _base_api(
62
- request=f"v1.0/myorg/groups/{workspace_id}/reports/{report_id}/Export?downloadType={download_type}"
64
+ request=f"v1.0/myorg/groups/{workspace_id}/reports/{report_id}/Export?downloadType={download_type}",
65
+ client="fabric_sp",
63
66
  )
64
67
 
65
68
  # Save file to the attached lakehouse
@@ -2,7 +2,7 @@ import sempy.fabric as fabric
2
2
  import json
3
3
  import os
4
4
  import time
5
- from .._helper_functions import (
5
+ from sempy_labs._helper_functions import (
6
6
  generate_embedded_filter,
7
7
  resolve_workspace_name_and_id,
8
8
  _base_api,
@@ -12,7 +12,7 @@ from typing import Optional
12
12
  from sempy._utils._log import log
13
13
  import sempy_labs._icons as icons
14
14
  from uuid import UUID
15
- from ._report_functions import (
15
+ from sempy_labs.report._report_functions import (
16
16
  list_report_visuals,
17
17
  list_report_pages,
18
18
  )
@@ -36,6 +36,8 @@ def export_report(
36
36
 
37
37
  This is a wrapper function for the following APIs: `Reports - Export To File In Group <https://learn.microsoft.com/rest/api/power-bi/reports/export-to-file-in-group>`_, `Reports - Get Export To File Status In Group <https://learn.microsoft.com/rest/api/power-bi/reports/get-export-to-file-status-in-group>`_, `Reports - Get File Of Export To File In Group <https://learn.microsoft.com/rest/api/power-bi/reports/get-file-of-export-to-file-in-group>`_.
38
38
 
39
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
40
+
39
41
  Parameters
40
42
  ----------
41
43
  report : str
@@ -241,22 +243,27 @@ def export_report(
241
243
  method="post",
242
244
  payload=request_body,
243
245
  status_codes=202,
246
+ client="fabric_sp",
244
247
  )
245
248
  export_id = json.loads(response.content).get("id")
246
249
 
247
250
  get_status_url = f"{base_url}/exports/{export_id}"
248
- response = _base_api(request=get_status_url, status_codes=[200, 202])
251
+ response = _base_api(
252
+ request=get_status_url, status_codes=[200, 202], client="fabric_sp"
253
+ )
249
254
  response_body = json.loads(response.content)
250
255
  while response_body["status"] not in ["Succeeded", "Failed"]:
251
256
  time.sleep(3)
252
- response = _base_api(request=get_status_url, status_codes=[200, 202])
257
+ response = _base_api(
258
+ request=get_status_url, status_codes=[200, 202], client="fabric_sp"
259
+ )
253
260
  response_body = json.loads(response.content)
254
261
  if response_body["status"] == "Failed":
255
262
  raise ValueError(
256
263
  f"{icons.red_dot} The export for the '{report}' report within the '{workspace_name}' workspace in the '{export_format}' format has failed."
257
264
  )
258
265
  else:
259
- response = _base_api(request=f"{get_status_url}/file")
266
+ response = _base_api(request=f"{get_status_url}/file", client="fabric_sp")
260
267
  print(
261
268
  f"{icons.in_progress} Saving the '{export_format}' export for the '{report}' report within the '{workspace_name}' workspace to the lakehouse..."
262
269
  )
@@ -3,7 +3,7 @@ import pandas as pd
3
3
  import json
4
4
  import os
5
5
  from typing import Optional
6
- from .._helper_functions import (
6
+ from sempy_labs._helper_functions import (
7
7
  resolve_workspace_name_and_id,
8
8
  _conv_b64,
9
9
  resolve_dataset_name_and_id,
@@ -11,7 +11,7 @@ from .._helper_functions import (
11
11
  _update_dataframe_datatypes,
12
12
  _base_api,
13
13
  resolve_item_id,
14
- get_item_definition,
14
+ _get_item_definition,
15
15
  )
16
16
  import sempy_labs._icons as icons
17
17
  from sempy._utils._log import log
@@ -31,6 +31,8 @@ def create_report_from_reportjson(
31
31
 
32
32
  This is a wrapper function for the following API: `Items - Create Report <https://learn.microsoft.com/rest/api/fabric/report/items/create-report>`_.
33
33
 
34
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
35
+
34
36
  Parameters
35
37
  ----------
36
38
  report : str
@@ -113,6 +115,7 @@ def create_report_from_reportjson(
113
115
  payload=request_body,
114
116
  lro_return_status_code=True,
115
117
  status_codes=[201, 202],
118
+ client="fabric_sp",
116
119
  )
117
120
 
118
121
  print(
@@ -129,6 +132,8 @@ def update_report_from_reportjson(
129
132
 
130
133
  This is a wrapper function for the following API: `Items - Update Report Definition <https://learn.microsoft.com/rest/api/fabric/report/items/update-report-definition>`_.
131
134
 
135
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
136
+
132
137
  Parameters
133
138
  ----------
134
139
  report : str | uuid.UUID
@@ -173,6 +178,7 @@ def update_report_from_reportjson(
173
178
  payload=payload,
174
179
  lro_return_status_code=True,
175
180
  status_codes=None,
181
+ client="fabric_sp",
176
182
  )
177
183
 
178
184
  print(
@@ -191,6 +197,8 @@ def get_report_definition(
191
197
 
192
198
  This is a wrapper function for the following API: `Items - Get Report Definition <https://learn.microsoft.com/rest/api/fabric/report/items/get-report-definition>`_.
193
199
 
200
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
201
+
194
202
  Parameters
195
203
  ----------
196
204
  report : str | uuid.UUID
@@ -208,7 +216,7 @@ def get_report_definition(
208
216
  The collection of report definition files within a pandas dataframe.
209
217
  """
210
218
 
211
- return get_item_definition(
219
+ return _get_item_definition(
212
220
  item=report,
213
221
  type="Report",
214
222
  workspace=workspace,
@@ -1,8 +1,8 @@
1
1
  from typing import Optional
2
2
  import pandas as pd
3
3
  from uuid import UUID
4
- from .._helper_functions import (
5
- resolve_workspace_name_and_id,
4
+ from sempy_labs._helper_functions import (
5
+ resolve_workspace_id,
6
6
  _base_api,
7
7
  resolve_item_id,
8
8
  _create_dataframe,
@@ -18,6 +18,8 @@ def get_report_datasources(
18
18
  """
19
19
  Returns a list of data sources for the specified paginated report (RDL) from the specified workspace.
20
20
 
21
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
22
+
21
23
  Parameters
22
24
  ----------
23
25
  report : str | uuid.UUID
@@ -44,27 +46,31 @@ def get_report_datasources(
44
46
  }
45
47
  df = _create_dataframe(columns=columns)
46
48
 
47
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
49
+ workspace_id = resolve_workspace_id(workspace)
48
50
  report_id = resolve_item_id(
49
51
  item=report, type="PaginatedReport", workspace=workspace
50
52
  )
51
53
 
52
54
  response = _base_api(
53
- request=f"v1.0/myorg/groups/{workspace_id}/reports/{report_id}/datasources"
55
+ request=f"v1.0/myorg/groups/{workspace_id}/reports/{report_id}/datasources",
56
+ client="fabric_sp",
54
57
  )
55
58
 
59
+ rows = []
56
60
  for i in response.json().get("value", []):
57
61
  conn = i.get("connectionDetails", {})
58
- new_data = {
59
- "Report Name": report,
60
- "Report Id": report_id,
61
- "Datasource Id": i.get("datasourceId"),
62
- "Datasource Type": i.get("datasourceType"),
63
- "Gateway Id": i.get("gatewayId"),
64
- "Server": conn.get("server") if conn else None,
65
- "Database": conn.get("database") if conn else None,
66
- }
67
-
68
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
62
+ rows.append(
63
+ {
64
+ "Report Name": report,
65
+ "Report Id": report_id,
66
+ "Datasource Id": i.get("datasourceId"),
67
+ "Datasource Type": i.get("datasourceType"),
68
+ "Gateway Id": i.get("gatewayId"),
69
+ "Server": conn.get("server") if conn else None,
70
+ "Database": conn.get("database") if conn else None,
71
+ }
72
+ )
73
+ if rows:
74
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
69
75
 
70
76
  return df