semantic-link-labs 0.10.1__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (94) hide show
  1. {semantic_link_labs-0.10.1.dist-info → semantic_link_labs-0.11.0.dist-info}/METADATA +6 -5
  2. {semantic_link_labs-0.10.1.dist-info → semantic_link_labs-0.11.0.dist-info}/RECORD +94 -92
  3. sempy_labs/__init__.py +4 -0
  4. sempy_labs/_a_lib_info.py +1 -1
  5. sempy_labs/_capacities.py +2 -0
  6. sempy_labs/_connections.py +11 -0
  7. sempy_labs/_dashboards.py +9 -4
  8. sempy_labs/_data_pipelines.py +5 -0
  9. sempy_labs/_dataflows.py +284 -17
  10. sempy_labs/_daxformatter.py +2 -0
  11. sempy_labs/_delta_analyzer_history.py +4 -1
  12. sempy_labs/_deployment_pipelines.py +4 -0
  13. sempy_labs/_documentation.py +3 -0
  14. sempy_labs/_environments.py +10 -1
  15. sempy_labs/_eventhouses.py +12 -5
  16. sempy_labs/_eventstreams.py +11 -3
  17. sempy_labs/_external_data_shares.py +8 -2
  18. sempy_labs/_gateways.py +26 -5
  19. sempy_labs/_git.py +11 -0
  20. sempy_labs/_graphQL.py +10 -3
  21. sempy_labs/_helper_functions.py +62 -10
  22. sempy_labs/_job_scheduler.py +54 -7
  23. sempy_labs/_kql_databases.py +11 -2
  24. sempy_labs/_kql_querysets.py +11 -3
  25. sempy_labs/_list_functions.py +17 -2
  26. sempy_labs/_managed_private_endpoints.py +11 -2
  27. sempy_labs/_mirrored_databases.py +17 -3
  28. sempy_labs/_mirrored_warehouses.py +9 -3
  29. sempy_labs/_ml_experiments.py +11 -3
  30. sempy_labs/_ml_models.py +11 -3
  31. sempy_labs/_model_bpa_rules.py +2 -0
  32. sempy_labs/_mounted_data_factories.py +12 -8
  33. sempy_labs/_notebooks.py +3 -0
  34. sempy_labs/_refresh_semantic_model.py +1 -0
  35. sempy_labs/_semantic_models.py +6 -0
  36. sempy_labs/_spark.py +7 -0
  37. sempy_labs/_sql_endpoints.py +54 -31
  38. sempy_labs/_sqldatabase.py +13 -4
  39. sempy_labs/_tags.py +5 -1
  40. sempy_labs/_user_delegation_key.py +2 -0
  41. sempy_labs/_variable_libraries.py +3 -1
  42. sempy_labs/_warehouses.py +13 -3
  43. sempy_labs/_workloads.py +3 -0
  44. sempy_labs/_workspace_identity.py +3 -0
  45. sempy_labs/_workspaces.py +14 -1
  46. sempy_labs/admin/__init__.py +2 -0
  47. sempy_labs/admin/_activities.py +6 -5
  48. sempy_labs/admin/_apps.py +31 -31
  49. sempy_labs/admin/_artifacts.py +8 -3
  50. sempy_labs/admin/_basic_functions.py +5 -0
  51. sempy_labs/admin/_capacities.py +39 -28
  52. sempy_labs/admin/_datasets.py +51 -51
  53. sempy_labs/admin/_domains.py +17 -1
  54. sempy_labs/admin/_external_data_share.py +8 -2
  55. sempy_labs/admin/_git.py +14 -9
  56. sempy_labs/admin/_items.py +15 -2
  57. sempy_labs/admin/_reports.py +64 -65
  58. sempy_labs/admin/_shared.py +7 -1
  59. sempy_labs/admin/_tags.py +5 -0
  60. sempy_labs/admin/_tenant.py +5 -2
  61. sempy_labs/admin/_users.py +9 -3
  62. sempy_labs/admin/_workspaces.py +88 -0
  63. sempy_labs/directlake/_dl_helper.py +2 -0
  64. sempy_labs/directlake/_generate_shared_expression.py +2 -0
  65. sempy_labs/directlake/_get_directlake_lakehouse.py +2 -4
  66. sempy_labs/directlake/_get_shared_expression.py +2 -0
  67. sempy_labs/directlake/_guardrails.py +2 -0
  68. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +2 -0
  69. sempy_labs/directlake/_warm_cache.py +1 -0
  70. sempy_labs/graph/_groups.py +22 -7
  71. sempy_labs/graph/_teams.py +7 -2
  72. sempy_labs/graph/_users.py +1 -0
  73. sempy_labs/lakehouse/_blobs.py +1 -0
  74. sempy_labs/lakehouse/_get_lakehouse_tables.py +88 -27
  75. sempy_labs/lakehouse/_helper.py +2 -0
  76. sempy_labs/lakehouse/_lakehouse.py +38 -5
  77. sempy_labs/lakehouse/_livy_sessions.py +2 -1
  78. sempy_labs/lakehouse/_shortcuts.py +7 -1
  79. sempy_labs/migration/_direct_lake_to_import.py +2 -0
  80. sempy_labs/mirrored_azure_databricks_catalog/_discover.py +4 -0
  81. sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +2 -0
  82. sempy_labs/report/_download_report.py +2 -1
  83. sempy_labs/report/_generate_report.py +2 -0
  84. sempy_labs/report/_paginated.py +2 -0
  85. sempy_labs/report/_report_bpa.py +110 -122
  86. sempy_labs/report/_report_bpa_rules.py +2 -0
  87. sempy_labs/report/_report_functions.py +7 -0
  88. sempy_labs/report/_reportwrapper.py +64 -31
  89. sempy_labs/theme/__init__.py +12 -0
  90. sempy_labs/theme/_org_themes.py +96 -0
  91. sempy_labs/tom/_model.py +509 -34
  92. {semantic_link_labs-0.10.1.dist-info → semantic_link_labs-0.11.0.dist-info}/WHEEL +0 -0
  93. {semantic_link_labs-0.10.1.dist-info → semantic_link_labs-0.11.0.dist-info}/licenses/LICENSE +0 -0
  94. {semantic_link_labs-0.10.1.dist-info → semantic_link_labs-0.11.0.dist-info}/top_level.txt +0 -0
@@ -6,6 +6,7 @@ from sempy_labs.admin._workspaces import (
6
6
  add_user_to_workspace,
7
7
  delete_user_from_workspace,
8
8
  restore_deleted_workspace,
9
+ list_orphaned_workspaces,
9
10
  )
10
11
  from sempy_labs.admin._artifacts import (
11
12
  list_unused_artifacts,
@@ -139,6 +140,7 @@ __all__ = [
139
140
  "add_user_to_workspace",
140
141
  "delete_user_from_workspace",
141
142
  "restore_deleted_workspace",
143
+ "list_orphaned_workspaces",
142
144
  "list_capacity_users",
143
145
  "list_user_subscriptions",
144
146
  "list_report_subscriptions",
@@ -107,6 +107,8 @@ def list_activity_events(
107
107
 
108
108
  responses = _base_api(request=url, client="fabric_sp", uses_pagination=True)
109
109
 
110
+ dfs = []
111
+
110
112
  for r in responses:
111
113
  if return_dataframe:
112
114
  for i in r.get("activityEventEntities", []):
@@ -150,17 +152,16 @@ def list_activity_events(
150
152
  "Consumption Method": i.get("ConsumptionMethod"),
151
153
  "Artifact Kind": i.get("ArtifactKind"),
152
154
  }
153
- df = pd.concat(
154
- [df, pd.DataFrame(new_data, index=[0])],
155
- ignore_index=True,
156
- )
155
+ dfs.append(pd.DataFrame(new_data, index=[0]))
157
156
  else:
158
157
  response_json["activityEventEntities"].extend(
159
158
  r.get("activityEventEntities")
160
159
  )
161
160
 
162
161
  if return_dataframe:
163
- _update_dataframe_datatypes(dataframe=df, column_map=columns)
162
+ if dfs:
163
+ df = pd.concat(dfs, ignore_index=True)
164
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
164
165
  return df
165
166
  else:
166
167
  return response_json
sempy_labs/admin/_apps.py CHANGED
@@ -9,8 +9,10 @@ from sempy_labs._helper_functions import (
9
9
  )
10
10
  from uuid import UUID
11
11
  import sempy_labs._icons as icons
12
+ from sempy._utils._log import log
12
13
 
13
14
 
15
+ @log
14
16
  def list_apps(
15
17
  top: Optional[int] = 1000,
16
18
  skip: Optional[int] = None,
@@ -56,26 +58,25 @@ def list_apps(
56
58
  url = _build_url(url, params)
57
59
  response = _base_api(request=url, client="fabric_sp")
58
60
 
59
- rows = []
61
+ dfs = []
60
62
  for v in response.json().get("value", []):
61
- rows.append(
62
- {
63
- "App Name": v.get("name"),
64
- "App Id": v.get("id"),
65
- "Description": v.get("description"),
66
- "Published By": v.get("publishedBy"),
67
- "Last Update": v.get("lastUpdate"),
68
- }
69
- )
70
-
71
- if rows:
72
- df = pd.DataFrame(rows, columns=list(columns.keys()))
73
-
74
- _update_dataframe_datatypes(dataframe=df, column_map=columns)
63
+ new_data = {
64
+ "App Name": v.get("name"),
65
+ "App Id": v.get("id"),
66
+ "Description": v.get("description"),
67
+ "Published By": v.get("publishedBy"),
68
+ "Last Update": v.get("lastUpdate"),
69
+ }
70
+ dfs.append(pd.DataFrame(new_data, index=[0]))
71
+
72
+ if dfs:
73
+ df = pd.concat(dfs, ignore_index=True)
74
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
75
75
 
76
76
  return df
77
77
 
78
78
 
79
+ @log
79
80
  def _resolve_app_id(app: str | UUID) -> str:
80
81
  if _is_valid_uuid(app):
81
82
  return app
@@ -87,6 +88,7 @@ def _resolve_app_id(app: str | UUID) -> str:
87
88
  return df_filt["App Id"].iloc[0]
88
89
 
89
90
 
91
+ @log
90
92
  def list_app_users(app: str | UUID) -> pd.DataFrame:
91
93
  """
92
94
  Shows a list of users that have access to the specified app.
@@ -122,22 +124,20 @@ def list_app_users(app: str | UUID) -> pd.DataFrame:
122
124
  url = f"/v1.0/myorg/admin/apps/{app_id}/users"
123
125
  response = _base_api(request=url, client="fabric_sp")
124
126
 
125
- rows = []
127
+ dfs = []
126
128
  for v in response.json().get("value", []):
127
- rows.append(
128
- {
129
- "User Name": v.get("displayName"),
130
- "Email Address": v.get("emailAddress"),
131
- "App User Access Right": v.get("appUserAccessRight"),
132
- "Identifier": v.get("identifier"),
133
- "Graph Id": v.get("graphId"),
134
- "Principal Type": v.get("principalType"),
135
- }
136
- )
137
-
138
- if rows:
139
- df = pd.DataFrame(rows, columns=list(columns.keys()))
140
-
141
- _update_dataframe_datatypes(dataframe=df, column_map=columns)
129
+ new_data = {
130
+ "User Name": v.get("displayName"),
131
+ "Email Address": v.get("emailAddress"),
132
+ "App User Access Right": v.get("appUserAccessRight"),
133
+ "Identifier": v.get("identifier"),
134
+ "Graph Id": v.get("graphId"),
135
+ "Principal Type": v.get("principalType"),
136
+ }
137
+ dfs.append(pd.DataFrame(new_data, index=[0]))
138
+
139
+ if dfs:
140
+ df = pd.concat(dfs, ignore_index=True)
141
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
142
142
 
143
143
  return df
@@ -9,8 +9,10 @@ from sempy_labs.admin._basic_functions import (
9
9
  _create_dataframe,
10
10
  _update_dataframe_datatypes,
11
11
  )
12
+ from sempy._utils._log import log
12
13
 
13
14
 
15
+ @log
14
16
  def list_unused_artifacts(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
15
17
  """
16
18
  Returns a list of datasets, reports, and dashboards that have not been used within 30 days for the specified workspace.
@@ -25,7 +27,7 @@ def list_unused_artifacts(workspace: Optional[str | UUID] = None) -> pd.DataFram
25
27
  A pandas dataframe showing a list of datasets, reports, and dashboards that have not been used within 30 days for the specified workspace.
26
28
  """
27
29
 
28
- (workspace_name, workspace_id) = _resolve_workspace_name_and_id(workspace)
30
+ (_, workspace_id) = _resolve_workspace_name_and_id(workspace)
29
31
 
30
32
  columns = {
31
33
  "Artifact Name": "string",
@@ -44,6 +46,7 @@ def list_unused_artifacts(workspace: Optional[str | UUID] = None) -> pd.DataFram
44
46
  uses_pagination=True,
45
47
  )
46
48
 
49
+ dfs = []
47
50
  for r in responses:
48
51
  for i in r.get("unusedArtifactEntities", []):
49
52
  new_data = {
@@ -55,8 +58,10 @@ def list_unused_artifacts(workspace: Optional[str | UUID] = None) -> pd.DataFram
55
58
  "Last Accessed Date Time": i.get("lastAccessedDateTime"),
56
59
  }
57
60
 
58
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
61
+ dfs.append(pd.DataFrame(new_data, index=[0]))
59
62
 
60
- _update_dataframe_datatypes(dataframe=df, column_map=columns)
63
+ if dfs:
64
+ df = pd.concat(dfs, ignore_index=True)
65
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
61
66
 
62
67
  return df
@@ -249,6 +249,7 @@ def unassign_workspaces_from_capacity(
249
249
  )
250
250
 
251
251
 
252
+ @log
252
253
  def list_modified_workspaces(
253
254
  modified_since: Optional[str] = None,
254
255
  exclude_inactive_workspaces: Optional[bool] = False,
@@ -298,6 +299,7 @@ def list_modified_workspaces(
298
299
  return df
299
300
 
300
301
 
302
+ @log
301
303
  def list_workspace_access_details(
302
304
  workspace: Optional[Union[str, UUID]] = None,
303
305
  ) -> pd.DataFrame:
@@ -350,6 +352,7 @@ def list_workspace_access_details(
350
352
  return df
351
353
 
352
354
 
355
+ @log
353
356
  def _resolve_workspace_name(workspace_id: Optional[UUID] = None) -> str:
354
357
  from sempy_labs._helper_functions import _get_fabric_context_setting
355
358
  from sempy.fabric.exceptions import FabricHTTPException
@@ -372,6 +375,7 @@ def _resolve_workspace_name(workspace_id: Optional[UUID] = None) -> str:
372
375
  return workspace_name
373
376
 
374
377
 
378
+ @log
375
379
  def _resolve_workspace_name_and_id(
376
380
  workspace: str | UUID,
377
381
  ) -> Tuple[str, UUID]:
@@ -397,6 +401,7 @@ def _resolve_workspace_name_and_id(
397
401
  return workspace_name, workspace_id
398
402
 
399
403
 
404
+ @log
400
405
  def list_workspace_users(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
401
406
  """
402
407
  Shows a list of users that have access to the specified workspace.
@@ -13,6 +13,7 @@ from sempy_labs._helper_functions import (
13
13
  )
14
14
 
15
15
 
16
+ @log
16
17
  def patch_capacity(capacity: str | UUID, tenant_key_id: UUID):
17
18
  """
18
19
  Changes specific capacity information. Currently, this API call only supports changing the capacity's encryption key.
@@ -44,6 +45,7 @@ def patch_capacity(capacity: str | UUID, tenant_key_id: UUID):
44
45
  )
45
46
 
46
47
 
48
+ @log
47
49
  def _resolve_capacity_name_and_id(
48
50
  capacity: str | UUID,
49
51
  ) -> Tuple[str, UUID]:
@@ -58,6 +60,7 @@ def _resolve_capacity_name_and_id(
58
60
  return capacity_name, capacity_id
59
61
 
60
62
 
63
+ @log
61
64
  def _resolve_capacity_id(
62
65
  capacity: str | UUID,
63
66
  ) -> UUID:
@@ -76,6 +79,7 @@ def _resolve_capacity_id(
76
79
  return capacity_id
77
80
 
78
81
 
82
+ @log
79
83
  def _list_capacities_meta() -> pd.DataFrame:
80
84
  """
81
85
  Shows the a list of capacities and their properties. This function is the admin version.
@@ -102,6 +106,7 @@ def _list_capacities_meta() -> pd.DataFrame:
102
106
  request="/v1.0/myorg/admin/capacities", client="fabric_sp", uses_pagination=True
103
107
  )
104
108
 
109
+ dfs = []
105
110
  for r in responses:
106
111
  for i in r.get("value", []):
107
112
  new_data = {
@@ -112,11 +117,15 @@ def _list_capacities_meta() -> pd.DataFrame:
112
117
  "State": i.get("state"),
113
118
  "Admins": [i.get("admins", [])],
114
119
  }
115
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
120
+ dfs.append(pd.DataFrame(new_data, index=[0]))
121
+
122
+ if dfs:
123
+ df = pd.concat(dfs, ignore_index=True)
116
124
 
117
125
  return df
118
126
 
119
127
 
128
+ @log
120
129
  def get_capacity_assignment_status(
121
130
  workspace: Optional[str | UUID] = None,
122
131
  ) -> pd.DataFrame:
@@ -178,6 +187,7 @@ def get_capacity_assignment_status(
178
187
  return df
179
188
 
180
189
 
190
+ @log
181
191
  def get_capacity_state(capacity: Optional[str | UUID] = None):
182
192
  """
183
193
  Gets the state of a capacity.
@@ -248,6 +258,7 @@ def list_capacities(
248
258
  request="/v1.0/myorg/admin/capacities", client="fabric_sp", uses_pagination=True
249
259
  )
250
260
 
261
+ dfs = []
251
262
  for r in responses:
252
263
  for i in r.get("value", []):
253
264
  new_data = {
@@ -258,7 +269,10 @@ def list_capacities(
258
269
  "State": i.get("state"),
259
270
  "Admins": [i.get("admins", [])],
260
271
  }
261
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
272
+ dfs.append(pd.DataFrame(new_data, index=[0]))
273
+
274
+ if dfs:
275
+ df = pd.concat(dfs, ignore_index=True)
262
276
 
263
277
  if capacity is not None:
264
278
  if _is_valid_uuid(capacity):
@@ -269,6 +283,7 @@ def list_capacities(
269
283
  return df
270
284
 
271
285
 
286
+ @log
272
287
  def list_capacity_users(capacity: str | UUID) -> pd.DataFrame:
273
288
  """
274
289
  Shows a list of users that have access to the specified workspace.
@@ -288,7 +303,7 @@ def list_capacity_users(capacity: str | UUID) -> pd.DataFrame:
288
303
  A pandas dataframe showing a list of users that have access to the specified workspace.
289
304
  """
290
305
 
291
- (capacity_name, capacity_id) = _resolve_capacity_name_and_id(capacity)
306
+ (_, capacity_id) = _resolve_capacity_name_and_id(capacity)
292
307
 
293
308
  columns = {
294
309
  "User Name": "string",
@@ -307,25 +322,23 @@ def list_capacity_users(capacity: str | UUID) -> pd.DataFrame:
307
322
  request=f"/v1.0/myorg/admin/capacities/{capacity_id}/users", client="fabric_sp"
308
323
  )
309
324
 
310
- rows = []
325
+ dfs = []
311
326
  for v in response.json().get("value", []):
312
- rows.append(
313
- {
314
- "User Name": v.get("displayName"),
315
- "Email Address": v.get("emailAddress"),
316
- "Capacity User Access Right": v.get("capacityUserAccessRight"),
317
- "Identifier": v.get("identifier"),
318
- "Graph Id": v.get("graphId"),
319
- "Principal Type": v.get("principalType"),
320
- "User Type": v.get("userType"),
321
- "Profile": v.get("profile"),
322
- }
323
- )
324
-
325
- if rows:
326
- df = pd.DataFrame(rows, columns=list(columns.keys()))
327
+ new_data = {
328
+ "User Name": v.get("displayName"),
329
+ "Email Address": v.get("emailAddress"),
330
+ "Capacity User Access Right": v.get("capacityUserAccessRight"),
331
+ "Identifier": v.get("identifier"),
332
+ "Graph Id": v.get("graphId"),
333
+ "Principal Type": v.get("principalType"),
334
+ "User Type": v.get("userType"),
335
+ "Profile": v.get("profile"),
336
+ }
337
+ dfs.append(pd.DataFrame(new_data, index=[0]))
327
338
 
328
- _update_dataframe_datatypes(dataframe=df, column_map=columns)
339
+ if dfs:
340
+ df = pd.concat(dfs, ignore_index=True)
341
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
329
342
 
330
343
  return df
331
344
 
@@ -419,11 +432,10 @@ def get_refreshables(
419
432
 
420
433
  url = _build_url(url, params)
421
434
 
422
- responses = _base_api(request=url, client="fabric_sp")
435
+ response = _base_api(request=url, client="fabric_sp")
423
436
 
424
- refreshables = []
425
-
426
- for i in responses.json().get("value", []):
437
+ dfs = []
438
+ for i in response.json().get("value", []):
427
439
  last_refresh = i.get("lastRefresh", {})
428
440
  refresh_schedule = i.get("refreshSchedule", {})
429
441
  new_data = {
@@ -461,11 +473,10 @@ def get_refreshables(
461
473
  "Refresh Schedule Notify Option": refresh_schedule.get("notifyOption"),
462
474
  "Configured By": i.get("configuredBy"),
463
475
  }
476
+ dfs.append(pd.DataFrame(new_data, index=[0]))
464
477
 
465
- refreshables.append(new_data)
466
-
467
- if len(refreshables) > 0:
468
- df = pd.DataFrame(refreshables)
478
+ if dfs:
479
+ df = pd.concat(dfs, ignore_index=True)
469
480
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
470
481
 
471
482
  return df
@@ -9,8 +9,10 @@ from sempy_labs._helper_functions import (
9
9
  )
10
10
  from uuid import UUID
11
11
  import sempy_labs._icons as icons
12
+ from sempy._utils._log import log
12
13
 
13
14
 
15
+ @log
14
16
  def list_datasets(
15
17
  top: Optional[int] = None,
16
18
  filter: Optional[str] = None,
@@ -77,46 +79,45 @@ def list_datasets(
77
79
  url = _build_url(url, params)
78
80
  response = _base_api(request=url, client="fabric_sp")
79
81
 
80
- rows = []
82
+ dfs = []
81
83
  for v in response.json().get("value", []):
82
- rows.append(
83
- {
84
- "Dataset Id": v.get("id"),
85
- "Dataset Name": v.get("name"),
86
- "Web URL": v.get("webUrl"),
87
- "Add Rows API Enabled": v.get("addRowsAPIEnabled"),
88
- "Configured By": v.get("configuredBy"),
89
- "Is Refreshable": v.get("isRefreshable"),
90
- "Is Effective Identity Required": v.get("isEffectiveIdentityRequired"),
91
- "Is Effective Identity Roles Required": v.get(
92
- "isEffectiveIdentityRolesRequired"
93
- ),
94
- "Target Storage Mode": v.get("targetStorageMode"),
95
- "Created Date": pd.to_datetime(v.get("createdDate")),
96
- "Content Provider Type": v.get("contentProviderType"),
97
- "Create Report Embed URL": v.get("createReportEmbedURL"),
98
- "QnA Embed URL": v.get("qnaEmbedURL"),
99
- "Upstream Datasets": v.get("upstreamDatasets", []),
100
- "Users": v.get("users", []),
101
- "Is In Place Sharing Enabled": v.get("isInPlaceSharingEnabled"),
102
- "Workspace Id": v.get("workspaceId"),
103
- "Auto Sync Read Only Replicas": v.get("queryScaleOutSettings", {}).get(
104
- "autoSyncReadOnlyReplicas"
105
- ),
106
- "Max Read Only Replicas": v.get("queryScaleOutSettings", {}).get(
107
- "maxReadOnlyReplicas"
108
- ),
109
- }
110
- )
111
-
112
- if rows:
113
- df = pd.DataFrame(rows, columns=list(columns.keys()))
114
-
115
- _update_dataframe_datatypes(dataframe=df, column_map=columns)
84
+ new_data = {
85
+ "Dataset Id": v.get("id"),
86
+ "Dataset Name": v.get("name"),
87
+ "Web URL": v.get("webUrl"),
88
+ "Add Rows API Enabled": v.get("addRowsAPIEnabled"),
89
+ "Configured By": v.get("configuredBy"),
90
+ "Is Refreshable": v.get("isRefreshable"),
91
+ "Is Effective Identity Required": v.get("isEffectiveIdentityRequired"),
92
+ "Is Effective Identity Roles Required": v.get(
93
+ "isEffectiveIdentityRolesRequired"
94
+ ),
95
+ "Target Storage Mode": v.get("targetStorageMode"),
96
+ "Created Date": pd.to_datetime(v.get("createdDate")),
97
+ "Content Provider Type": v.get("contentProviderType"),
98
+ "Create Report Embed URL": v.get("createReportEmbedURL"),
99
+ "QnA Embed URL": v.get("qnaEmbedURL"),
100
+ "Upstream Datasets": v.get("upstreamDatasets", []),
101
+ "Users": v.get("users", []),
102
+ "Is In Place Sharing Enabled": v.get("isInPlaceSharingEnabled"),
103
+ "Workspace Id": v.get("workspaceId"),
104
+ "Auto Sync Read Only Replicas": v.get("queryScaleOutSettings", {}).get(
105
+ "autoSyncReadOnlyReplicas"
106
+ ),
107
+ "Max Read Only Replicas": v.get("queryScaleOutSettings", {}).get(
108
+ "maxReadOnlyReplicas"
109
+ ),
110
+ }
111
+ dfs.append(pd.DataFrame(new_data, index=[0]))
112
+
113
+ if dfs:
114
+ df = pd.concat(dfs, ignore_index=True)
115
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
116
116
 
117
117
  return df
118
118
 
119
119
 
120
+ @log
120
121
  def _resolve_dataset_id(dataset: str | UUID) -> str:
121
122
  if _is_valid_uuid(dataset):
122
123
  return dataset
@@ -128,6 +129,7 @@ def _resolve_dataset_id(dataset: str | UUID) -> str:
128
129
  return df_filt["Dataset Id"].iloc[0]
129
130
 
130
131
 
132
+ @log
131
133
  def list_dataset_users(dataset: str | UUID) -> pd.DataFrame:
132
134
  """
133
135
  Shows a list of users that have access to the specified dataset.
@@ -163,22 +165,20 @@ def list_dataset_users(dataset: str | UUID) -> pd.DataFrame:
163
165
  url = f"/v1.0/myorg/admin/datasets/{dataset_id}/users"
164
166
  response = _base_api(request=url, client="fabric_sp")
165
167
 
166
- rows = []
168
+ dfs = []
167
169
  for v in response.json().get("value", []):
168
- rows.append(
169
- {
170
- "User Name": v.get("displayName"),
171
- "Email Address": v.get("emailAddress"),
172
- "Dataset User Access Right": v.get("datasetUserAccessRight"),
173
- "Identifier": v.get("identifier"),
174
- "Graph Id": v.get("graphId"),
175
- "Principal Type": v.get("principalType"),
176
- }
177
- )
178
-
179
- if rows:
180
- df = pd.DataFrame(rows, columns=list(columns.keys()))
181
-
182
- _update_dataframe_datatypes(dataframe=df, column_map=columns)
170
+ new_data = {
171
+ "User Name": v.get("displayName"),
172
+ "Email Address": v.get("emailAddress"),
173
+ "Dataset User Access Right": v.get("datasetUserAccessRight"),
174
+ "Identifier": v.get("identifier"),
175
+ "Graph Id": v.get("graphId"),
176
+ "Principal Type": v.get("principalType"),
177
+ }
178
+ dfs.append(pd.DataFrame(new_data, index=[0]))
179
+
180
+ if dfs:
181
+ df = pd.concat(dfs, ignore_index=True)
182
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
183
183
 
184
184
  return df
@@ -8,8 +8,10 @@ from sempy_labs._helper_functions import (
8
8
  _create_dataframe,
9
9
  _is_valid_uuid,
10
10
  )
11
+ from sempy._utils._log import log
11
12
 
12
13
 
14
+ @log
13
15
  def resolve_domain_id(domain: Optional[str | UUID] = None, **kwargs) -> UUID:
14
16
  """
15
17
  Obtains the domain Id for a given domain name.
@@ -45,6 +47,7 @@ def resolve_domain_id(domain: Optional[str | UUID] = None, **kwargs) -> UUID:
45
47
  return dfL_filt["Domain ID"].iloc[0]
46
48
 
47
49
 
50
+ @log
48
51
  def resolve_domain_name(domain: Optional[str | UUID], **kwargs) -> UUID:
49
52
  """
50
53
  Obtains the domain name for a given domain ID.
@@ -80,6 +83,7 @@ def resolve_domain_name(domain: Optional[str | UUID], **kwargs) -> UUID:
80
83
  return dfL_filt["Domain Name"].iloc[0]
81
84
 
82
85
 
86
+ @log
83
87
  def list_domains(non_empty_only: bool = False) -> pd.DataFrame:
84
88
  """
85
89
  Shows a list of domains.
@@ -115,6 +119,7 @@ def list_domains(non_empty_only: bool = False) -> pd.DataFrame:
115
119
 
116
120
  response = _base_api(request=url, client="fabric_sp")
117
121
 
122
+ dfs = []
118
123
  for v in response.json().get("domains", []):
119
124
  new_data = {
120
125
  "Domain ID": v.get("id"),
@@ -123,11 +128,15 @@ def list_domains(non_empty_only: bool = False) -> pd.DataFrame:
123
128
  "Parent Domain ID": v.get("parentDomainId"),
124
129
  "Contributors Scope": v.get("contributorsScope"),
125
130
  }
126
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
131
+ dfs.append(pd.DataFrame(new_data, index=[0]))
132
+
133
+ if dfs:
134
+ df = pd.concat(dfs, ignore_index=True)
127
135
 
128
136
  return df
129
137
 
130
138
 
139
+ @log
131
140
  def list_domain_workspaces(domain: Optional[str] = None, **kwargs) -> pd.DataFrame:
132
141
  """
133
142
  Shows a list of workspaces within the domain.
@@ -178,6 +187,7 @@ def list_domain_workspaces(domain: Optional[str] = None, **kwargs) -> pd.DataFra
178
187
  return df
179
188
 
180
189
 
190
+ @log
181
191
  def create_domain(
182
192
  domain_name: str,
183
193
  description: Optional[str] = None,
@@ -222,6 +232,7 @@ def create_domain(
222
232
  print(f"{icons.green_dot} The '{domain_name}' domain has been created.")
223
233
 
224
234
 
235
+ @log
225
236
  def delete_domain(domain: Optional[str | UUID], **kwargs):
226
237
  """
227
238
  Deletes a domain.
@@ -249,6 +260,7 @@ def delete_domain(domain: Optional[str | UUID], **kwargs):
249
260
  print(f"{icons.green_dot} The '{domain}' domain has been deleted.")
250
261
 
251
262
 
263
+ @log
252
264
  def update_domain(
253
265
  domain: Optional[str | UUID] = None,
254
266
  description: Optional[str] = None,
@@ -301,6 +313,7 @@ def update_domain(
301
313
  print(f"{icons.green_dot} The '{domain_name}' domain has been updated.")
302
314
 
303
315
 
316
+ @log
304
317
  def assign_domain_workspaces_by_capacities(
305
318
  domain: str | UUID,
306
319
  capacity_names: str | List[str],
@@ -369,6 +382,7 @@ def assign_domain_workspaces_by_capacities(
369
382
  )
370
383
 
371
384
 
385
+ @log
372
386
  def assign_domain_workspaces(domain: str | UUID, workspace_names: str | List[str]):
373
387
  """
374
388
  Assigns workspaces to the specified domain by workspace.
@@ -420,6 +434,7 @@ def assign_domain_workspaces(domain: str | UUID, workspace_names: str | List[str
420
434
  )
421
435
 
422
436
 
437
+ @log
423
438
  def unassign_all_domain_workspaces(domain: str | UUID):
424
439
  """
425
440
  Unassigns all workspaces from the specified domain.
@@ -446,6 +461,7 @@ def unassign_all_domain_workspaces(domain: str | UUID):
446
461
  )
447
462
 
448
463
 
464
+ @log
449
465
  def unassign_domain_workspaces(
450
466
  domain: str | UUID,
451
467
  workspace_names: str | List[str],
@@ -7,8 +7,10 @@ from sempy_labs._helper_functions import (
7
7
  _create_dataframe,
8
8
  _update_dataframe_datatypes,
9
9
  )
10
+ from sempy._utils._log import log
10
11
 
11
12
 
13
+ @log
12
14
  def list_external_data_shares() -> pd.DataFrame:
13
15
  """
14
16
  Lists external data shares in the tenant. This function is for admins.
@@ -39,6 +41,7 @@ def list_external_data_shares() -> pd.DataFrame:
39
41
 
40
42
  response = _base_api(request="/v1/admin/items/externalDataShares")
41
43
 
44
+ dfs = []
42
45
  for i in response.json().get("value", []):
43
46
  cp = i.get("creatorPrincipal", {})
44
47
  new_data = {
@@ -56,13 +59,16 @@ def list_external_data_shares() -> pd.DataFrame:
56
59
  "Invitation URL": i.get("invitationUrl"),
57
60
  }
58
61
 
59
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
62
+ dfs.append(pd.DataFrame(new_data, index=[0]))
60
63
 
61
- _update_dataframe_datatypes(dataframe=df, column_map=columns)
64
+ if dfs:
65
+ df = pd.concat(dfs, ignore_index=True)
66
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
62
67
 
63
68
  return df
64
69
 
65
70
 
71
+ @log
66
72
  def revoke_external_data_share(
67
73
  external_data_share_id: UUID, item_id: UUID, workspace: str | UUID
68
74
  ):