semantic-link-labs 0.10.1__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (94) hide show
  1. {semantic_link_labs-0.10.1.dist-info → semantic_link_labs-0.11.0.dist-info}/METADATA +6 -5
  2. {semantic_link_labs-0.10.1.dist-info → semantic_link_labs-0.11.0.dist-info}/RECORD +94 -92
  3. sempy_labs/__init__.py +4 -0
  4. sempy_labs/_a_lib_info.py +1 -1
  5. sempy_labs/_capacities.py +2 -0
  6. sempy_labs/_connections.py +11 -0
  7. sempy_labs/_dashboards.py +9 -4
  8. sempy_labs/_data_pipelines.py +5 -0
  9. sempy_labs/_dataflows.py +284 -17
  10. sempy_labs/_daxformatter.py +2 -0
  11. sempy_labs/_delta_analyzer_history.py +4 -1
  12. sempy_labs/_deployment_pipelines.py +4 -0
  13. sempy_labs/_documentation.py +3 -0
  14. sempy_labs/_environments.py +10 -1
  15. sempy_labs/_eventhouses.py +12 -5
  16. sempy_labs/_eventstreams.py +11 -3
  17. sempy_labs/_external_data_shares.py +8 -2
  18. sempy_labs/_gateways.py +26 -5
  19. sempy_labs/_git.py +11 -0
  20. sempy_labs/_graphQL.py +10 -3
  21. sempy_labs/_helper_functions.py +62 -10
  22. sempy_labs/_job_scheduler.py +54 -7
  23. sempy_labs/_kql_databases.py +11 -2
  24. sempy_labs/_kql_querysets.py +11 -3
  25. sempy_labs/_list_functions.py +17 -2
  26. sempy_labs/_managed_private_endpoints.py +11 -2
  27. sempy_labs/_mirrored_databases.py +17 -3
  28. sempy_labs/_mirrored_warehouses.py +9 -3
  29. sempy_labs/_ml_experiments.py +11 -3
  30. sempy_labs/_ml_models.py +11 -3
  31. sempy_labs/_model_bpa_rules.py +2 -0
  32. sempy_labs/_mounted_data_factories.py +12 -8
  33. sempy_labs/_notebooks.py +3 -0
  34. sempy_labs/_refresh_semantic_model.py +1 -0
  35. sempy_labs/_semantic_models.py +6 -0
  36. sempy_labs/_spark.py +7 -0
  37. sempy_labs/_sql_endpoints.py +54 -31
  38. sempy_labs/_sqldatabase.py +13 -4
  39. sempy_labs/_tags.py +5 -1
  40. sempy_labs/_user_delegation_key.py +2 -0
  41. sempy_labs/_variable_libraries.py +3 -1
  42. sempy_labs/_warehouses.py +13 -3
  43. sempy_labs/_workloads.py +3 -0
  44. sempy_labs/_workspace_identity.py +3 -0
  45. sempy_labs/_workspaces.py +14 -1
  46. sempy_labs/admin/__init__.py +2 -0
  47. sempy_labs/admin/_activities.py +6 -5
  48. sempy_labs/admin/_apps.py +31 -31
  49. sempy_labs/admin/_artifacts.py +8 -3
  50. sempy_labs/admin/_basic_functions.py +5 -0
  51. sempy_labs/admin/_capacities.py +39 -28
  52. sempy_labs/admin/_datasets.py +51 -51
  53. sempy_labs/admin/_domains.py +17 -1
  54. sempy_labs/admin/_external_data_share.py +8 -2
  55. sempy_labs/admin/_git.py +14 -9
  56. sempy_labs/admin/_items.py +15 -2
  57. sempy_labs/admin/_reports.py +64 -65
  58. sempy_labs/admin/_shared.py +7 -1
  59. sempy_labs/admin/_tags.py +5 -0
  60. sempy_labs/admin/_tenant.py +5 -2
  61. sempy_labs/admin/_users.py +9 -3
  62. sempy_labs/admin/_workspaces.py +88 -0
  63. sempy_labs/directlake/_dl_helper.py +2 -0
  64. sempy_labs/directlake/_generate_shared_expression.py +2 -0
  65. sempy_labs/directlake/_get_directlake_lakehouse.py +2 -4
  66. sempy_labs/directlake/_get_shared_expression.py +2 -0
  67. sempy_labs/directlake/_guardrails.py +2 -0
  68. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +2 -0
  69. sempy_labs/directlake/_warm_cache.py +1 -0
  70. sempy_labs/graph/_groups.py +22 -7
  71. sempy_labs/graph/_teams.py +7 -2
  72. sempy_labs/graph/_users.py +1 -0
  73. sempy_labs/lakehouse/_blobs.py +1 -0
  74. sempy_labs/lakehouse/_get_lakehouse_tables.py +88 -27
  75. sempy_labs/lakehouse/_helper.py +2 -0
  76. sempy_labs/lakehouse/_lakehouse.py +38 -5
  77. sempy_labs/lakehouse/_livy_sessions.py +2 -1
  78. sempy_labs/lakehouse/_shortcuts.py +7 -1
  79. sempy_labs/migration/_direct_lake_to_import.py +2 -0
  80. sempy_labs/mirrored_azure_databricks_catalog/_discover.py +4 -0
  81. sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +2 -0
  82. sempy_labs/report/_download_report.py +2 -1
  83. sempy_labs/report/_generate_report.py +2 -0
  84. sempy_labs/report/_paginated.py +2 -0
  85. sempy_labs/report/_report_bpa.py +110 -122
  86. sempy_labs/report/_report_bpa_rules.py +2 -0
  87. sempy_labs/report/_report_functions.py +7 -0
  88. sempy_labs/report/_reportwrapper.py +64 -31
  89. sempy_labs/theme/__init__.py +12 -0
  90. sempy_labs/theme/_org_themes.py +96 -0
  91. sempy_labs/tom/_model.py +509 -34
  92. {semantic_link_labs-0.10.1.dist-info → semantic_link_labs-0.11.0.dist-info}/WHEEL +0 -0
  93. {semantic_link_labs-0.10.1.dist-info → semantic_link_labs-0.11.0.dist-info}/licenses/LICENSE +0 -0
  94. {semantic_link_labs-0.10.1.dist-info → semantic_link_labs-0.11.0.dist-info}/top_level.txt +0 -0
@@ -7,6 +7,7 @@ from sempy_labs._helper_functions import (
7
7
  _update_dataframe_datatypes,
8
8
  _base_api,
9
9
  _create_dataframe,
10
+ resolve_workspace_id,
10
11
  )
11
12
  from uuid import UUID
12
13
  import sempy_labs._icons as icons
@@ -38,9 +39,9 @@ def list_item_job_instances(
38
39
  Shows a list of job instances for the specified item.
39
40
  """
40
41
 
41
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
42
+ workspace_id = resolve_workspace_id(workspace)
42
43
  (item_name, item_id) = resolve_item_name_and_id(
43
- item=item, type=type, workspace=workspace
44
+ item=item, type=type, workspace=workspace_id
44
45
  )
45
46
 
46
47
  columns = {
@@ -87,8 +88,48 @@ def list_item_job_instances(
87
88
 
88
89
  if dfs:
89
90
  df = pd.concat(dfs, ignore_index=True)
91
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
92
+
93
+ return df
94
+
95
+
96
+ @log
97
+ def _get_item_job_instance(url: str) -> pd.DataFrame:
98
+
99
+ columns = {
100
+ "Job Instance Id": "string",
101
+ "Item Id": "string",
102
+ "Job Type": "string",
103
+ "Invoke Type": "string",
104
+ "Status": "string",
105
+ "Root Activity Id": "string",
106
+ "Start Time UTC": "datetime",
107
+ "End Time UTC": "string",
108
+ "Error Message": "string",
109
+ }
110
+ df = _create_dataframe(columns=columns)
111
+
112
+ response = _base_api(request=url)
90
113
 
91
- df = _update_dataframe_datatypes(dataframe=df, column_map=columns)
114
+ dfs = []
115
+ for v in response.json().get("value", []):
116
+ fail = v.get("failureReason", {})
117
+ new_data = {
118
+ "Job Instance Id": v.get("id"),
119
+ "Item Id": v.get("itemId"),
120
+ "Job Type": v.get("jobType"),
121
+ "Invoke Type": v.get("invokeType"),
122
+ "Status": v.get("status"),
123
+ "Root Activity Id": v.get("rootActivityId"),
124
+ "Start Time UTC": v.get("startTimeUtc"),
125
+ "End Time UTC": v.get("endTimeUtc"),
126
+ "Error Message": fail.get("message") if fail is not None else "",
127
+ }
128
+ dfs.append(pd.DataFrame(new_data, index=[0]))
129
+
130
+ if dfs:
131
+ df = pd.concat(dfs, ignore_index=True)
132
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
92
133
 
93
134
  return df
94
135
 
@@ -124,9 +165,9 @@ def list_item_schedules(
124
165
  Shows a list of scheduling settings for one specific item.
125
166
  """
126
167
 
127
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
168
+ workspace_id = resolve_workspace_id(workspace)
128
169
  (item_name, item_id) = resolve_item_name_and_id(
129
- item=item, type=type, workspace=workspace
170
+ item=item, type=type, workspace=workspace_id
130
171
  )
131
172
 
132
173
  columns = {
@@ -149,6 +190,7 @@ def list_item_schedules(
149
190
  request=f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/{job_type}/schedules"
150
191
  )
151
192
 
193
+ dfs = []
152
194
  for v in response.json().get("value", []):
153
195
  config = v.get("configuration", {})
154
196
  own = v.get("owner", {})
@@ -167,9 +209,11 @@ def list_item_schedules(
167
209
  "Owner Type": own.get("type"),
168
210
  }
169
211
 
170
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
212
+ dfs.append(pd.DataFrame(new_data, index=[0]))
171
213
 
172
- _update_dataframe_datatypes(dataframe=df, column_map=columns)
214
+ if dfs:
215
+ df = pd.concat(dfs, ignore_index=True)
216
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
173
217
 
174
218
  return df
175
219
 
@@ -215,6 +259,7 @@ def run_on_demand_item_job(
215
259
  print(f"{icons.green_dot} The '{item_name}' {type.lower()} has been executed.")
216
260
 
217
261
 
262
+ @log
218
263
  def create_item_schedule_cron(
219
264
  item: str | UUID,
220
265
  type: str,
@@ -283,6 +328,7 @@ def create_item_schedule_cron(
283
328
  )
284
329
 
285
330
 
331
+ @log
286
332
  def create_item_schedule_daily(
287
333
  item: str | UUID,
288
334
  type: str,
@@ -351,6 +397,7 @@ def create_item_schedule_daily(
351
397
  )
352
398
 
353
399
 
400
+ @log
354
401
  def create_item_schedule_weekly(
355
402
  item: str | UUID,
356
403
  type: str,
@@ -11,8 +11,10 @@ from sempy_labs._helper_functions import (
11
11
  )
12
12
  from uuid import UUID
13
13
  import sempy_labs._icons as icons
14
+ from sempy._utils._log import log
14
15
 
15
16
 
17
+ @log
16
18
  def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
17
19
  """
18
20
  Shows the KQL databases within a workspace.
@@ -45,7 +47,7 @@ def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
45
47
  }
46
48
  df = _create_dataframe(columns=columns)
47
49
 
48
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
50
+ workspace_id = resolve_workspace_id(workspace)
49
51
 
50
52
  responses = _base_api(
51
53
  request=f"v1/workspaces/{workspace_id}/kqlDatabases",
@@ -53,6 +55,7 @@ def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
53
55
  client="fabric_sp",
54
56
  )
55
57
 
58
+ dfs = []
56
59
  for r in responses:
57
60
  for v in r.get("value", []):
58
61
  prop = v.get("properties", {})
@@ -66,11 +69,15 @@ def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
66
69
  "Ingestion Service URI": prop.get("ingestionServiceUri"),
67
70
  "Database Type": prop.get("databaseType"),
68
71
  }
69
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
72
+ dfs.append(pd.DataFrame(new_data, index=[0]))
73
+
74
+ if dfs:
75
+ df = pd.concat(dfs, ignore_index=True)
70
76
 
71
77
  return df
72
78
 
73
79
 
80
+ @log
74
81
  def _create_kql_database(
75
82
  name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
76
83
  ):
@@ -96,6 +103,7 @@ def _create_kql_database(
96
103
  )
97
104
 
98
105
 
106
+ @log
99
107
  def delete_kql_database(
100
108
  kql_database: str | UUID,
101
109
  workspace: Optional[str | UUID] = None,
@@ -125,6 +133,7 @@ def delete_kql_database(
125
133
  delete_item(item=kql_database, type="KQLDatabase", workspace=workspace)
126
134
 
127
135
 
136
+ @log
128
137
  def _resolve_cluster_uri(
129
138
  kql_database: str | UUID, workspace: Optional[str | UUID] = None
130
139
  ) -> str:
@@ -2,15 +2,17 @@ import pandas as pd
2
2
  import sempy_labs._icons as icons
3
3
  from typing import Optional
4
4
  from sempy_labs._helper_functions import (
5
- resolve_workspace_name_and_id,
5
+ resolve_workspace_id,
6
6
  _base_api,
7
7
  _create_dataframe,
8
8
  delete_item,
9
9
  create_item,
10
10
  )
11
11
  from uuid import UUID
12
+ from sempy._utils._log import log
12
13
 
13
14
 
15
+ @log
14
16
  def list_kql_querysets(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
15
17
  """
16
18
  Shows the KQL querysets within a workspace.
@@ -37,12 +39,13 @@ def list_kql_querysets(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
37
39
  }
38
40
  df = _create_dataframe(columns=columns)
39
41
 
40
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
42
+ workspace_id = resolve_workspace_id(workspace)
41
43
 
42
44
  responses = _base_api(
43
45
  request=f"v1/workspaces/{workspace_id}/kqlQuerysets", uses_pagination=True
44
46
  )
45
47
 
48
+ dfs = []
46
49
  for r in responses:
47
50
  for v in r.get("value", []):
48
51
  new_data = {
@@ -50,11 +53,15 @@ def list_kql_querysets(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
50
53
  "KQL Queryset Id": v.get("id"),
51
54
  "Description": v.get("description"),
52
55
  }
53
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
56
+ dfs.append(pd.DataFrame(new_data, index=[0]))
57
+
58
+ if dfs:
59
+ df = pd.concat(dfs, ignore_index=True)
54
60
 
55
61
  return df
56
62
 
57
63
 
64
+ @log
58
65
  def create_kql_queryset(
59
66
  name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
60
67
  ):
@@ -80,6 +87,7 @@ def create_kql_queryset(
80
87
  )
81
88
 
82
89
 
90
+ @log
83
91
  def delete_kql_queryset(
84
92
  kql_queryset: str | UUID, workspace: Optional[str | UUID] = None, **kwargs
85
93
  ):
@@ -18,6 +18,7 @@ import json
18
18
  from collections import defaultdict
19
19
 
20
20
 
21
+ @log
21
22
  def get_object_level_security(
22
23
  dataset: str | UUID, workspace: Optional[str | UUID] = None
23
24
  ) -> pd.DataFrame:
@@ -70,6 +71,7 @@ def get_object_level_security(
70
71
  return df
71
72
 
72
73
 
74
+ @log
73
75
  def list_tables(
74
76
  dataset: str | UUID, workspace: Optional[str | UUID] = None, extended: bool = False
75
77
  ) -> pd.DataFrame:
@@ -249,6 +251,7 @@ def list_tables(
249
251
  return df
250
252
 
251
253
 
254
+ @log
252
255
  def list_annotations(
253
256
  dataset: str | UUID, workspace: Optional[str | UUID] = None
254
257
  ) -> pd.DataFrame:
@@ -481,6 +484,7 @@ def list_annotations(
481
484
  return df
482
485
 
483
486
 
487
+ @log
484
488
  def list_columns(
485
489
  dataset: str | UUID,
486
490
  workspace: Optional[str | UUID] = None,
@@ -583,6 +587,7 @@ def list_columns(
583
587
  return dfC
584
588
 
585
589
 
590
+ @log
586
591
  def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
587
592
  """
588
593
  Shows the lakehouses within a workspace.
@@ -642,6 +647,7 @@ def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
642
647
  return df
643
648
 
644
649
 
650
+ @log
645
651
  def list_datamarts(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
646
652
  """
647
653
  Shows the datamarts within a workspace.
@@ -684,6 +690,7 @@ def list_datamarts(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
684
690
  return df
685
691
 
686
692
 
693
+ @log
687
694
  def update_item(
688
695
  item_type: str,
689
696
  current_name: str,
@@ -749,6 +756,7 @@ def update_item(
749
756
  )
750
757
 
751
758
 
759
+ @log
752
760
  def list_relationships(
753
761
  dataset: str | UUID, workspace: Optional[str | UUID] = None, extended: bool = False
754
762
  ) -> pd.DataFrame:
@@ -839,6 +847,7 @@ def list_relationships(
839
847
  return dfR
840
848
 
841
849
 
850
+ @log
842
851
  def list_kpis(
843
852
  dataset: str | UUID, workspace: Optional[str | UUID] = None
844
853
  ) -> pd.DataFrame:
@@ -907,6 +916,7 @@ def list_kpis(
907
916
  return df
908
917
 
909
918
 
919
+ @log
910
920
  def list_semantic_model_objects(
911
921
  dataset: str | UUID, workspace: Optional[str | UUID] = None
912
922
  ) -> pd.DataFrame:
@@ -1077,6 +1087,7 @@ def list_semantic_model_objects(
1077
1087
  return df
1078
1088
 
1079
1089
 
1090
+ @log
1080
1091
  def list_shortcuts(
1081
1092
  lakehouse: Optional[str] = None,
1082
1093
  workspace: Optional[str | UUID] = None,
@@ -1115,6 +1126,7 @@ def list_shortcuts(
1115
1126
  return list_shortcuts(lakehouse=lakehouse, workspace=workspace, path=path)
1116
1127
 
1117
1128
 
1129
+ @log
1118
1130
  def list_reports_using_semantic_model(
1119
1131
  dataset: str | UUID, workspace: Optional[str | UUID] = None
1120
1132
  ) -> pd.DataFrame:
@@ -1179,6 +1191,7 @@ def list_reports_using_semantic_model(
1179
1191
  # df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1180
1192
 
1181
1193
 
1194
+ @log
1182
1195
  def list_report_semantic_model_objects(
1183
1196
  dataset: str | UUID, workspace: Optional[str | UUID] = None, extended: bool = False
1184
1197
  ) -> pd.DataFrame:
@@ -1273,6 +1286,7 @@ def list_report_semantic_model_objects(
1273
1286
  return dfRO
1274
1287
 
1275
1288
 
1289
+ @log
1276
1290
  def list_semantic_model_object_report_usage(
1277
1291
  dataset: str | UUID,
1278
1292
  workspace: Optional[str | UUID] = None,
@@ -1398,6 +1412,7 @@ def list_semantic_model_object_report_usage(
1398
1412
  return final_df
1399
1413
 
1400
1414
 
1415
+ @log
1401
1416
  def list_server_properties(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
1402
1417
  """
1403
1418
  Lists the `properties <https://learn.microsoft.com/dotnet/api/microsoft.analysisservices.serverproperty?view=analysisservices-dotnet>`_ of the Analysis Services instance.
@@ -1445,6 +1460,7 @@ def list_server_properties(workspace: Optional[str | UUID] = None) -> pd.DataFra
1445
1460
  return df
1446
1461
 
1447
1462
 
1463
+ @log
1448
1464
  def list_semantic_model_errors(
1449
1465
  dataset: str | UUID, workspace: Optional[str | UUID]
1450
1466
  ) -> pd.DataFrame:
@@ -1605,8 +1621,7 @@ def list_synonyms(dataset: str | UUID, workspace: Optional[str] = None):
1605
1621
  merged_terms = defaultdict(dict)
1606
1622
  for t in v.get("Terms", []):
1607
1623
  for term, properties in t.items():
1608
- normalized_term = term.lower()
1609
- merged_terms[normalized_term].update(properties)
1624
+ merged_terms[term].update(properties)
1610
1625
 
1611
1626
  for term, props in merged_terms.items():
1612
1627
  new_data = {
@@ -7,10 +7,13 @@ from sempy_labs._helper_functions import (
7
7
  _base_api,
8
8
  _print_success,
9
9
  _create_dataframe,
10
+ resolve_workspace_id,
10
11
  )
11
12
  from uuid import UUID
13
+ from sempy._utils._log import log
12
14
 
13
15
 
16
+ @log
14
17
  def create_managed_private_endpoint(
15
18
  name: str,
16
19
  target_private_link_resource_id: UUID,
@@ -72,6 +75,7 @@ def create_managed_private_endpoint(
72
75
  )
73
76
 
74
77
 
78
+ @log
75
79
  def list_managed_private_endpoints(
76
80
  workspace: Optional[str | UUID] = None,
77
81
  ) -> pd.DataFrame:
@@ -106,7 +110,7 @@ def list_managed_private_endpoints(
106
110
  }
107
111
  df = _create_dataframe(columns=columns)
108
112
 
109
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
113
+ workspace_id = resolve_workspace_id(workspace)
110
114
 
111
115
  responses = _base_api(
112
116
  request=f"/v1/workspaces/{workspace_id}/managedPrivateEndpoints",
@@ -114,6 +118,7 @@ def list_managed_private_endpoints(
114
118
  client="fabric_sp",
115
119
  )
116
120
 
121
+ dfs = []
117
122
  for r in responses:
118
123
  for v in r.get("value", []):
119
124
  conn = v.get("connectionState", {})
@@ -126,11 +131,15 @@ def list_managed_private_endpoints(
126
131
  "Connection Description": conn.get("description"),
127
132
  "Target Subresource Type": v.get("targetSubresourceType"),
128
133
  }
129
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
134
+ dfs.append(pd.DataFrame(new_data, index=[0]))
135
+
136
+ if dfs:
137
+ df = pd.concat(dfs, ignore_index=True)
130
138
 
131
139
  return df
132
140
 
133
141
 
142
+ @log
134
143
  def delete_managed_private_endpoint(
135
144
  managed_private_endpoint: str | UUID, workspace: Optional[str | UUID] = None
136
145
  ):
@@ -2,7 +2,6 @@ import pandas as pd
2
2
  from typing import Optional
3
3
  from sempy_labs._helper_functions import (
4
4
  resolve_workspace_name_and_id,
5
- _decode_b64,
6
5
  _update_dataframe_datatypes,
7
6
  _base_api,
8
7
  resolve_item_id,
@@ -10,12 +9,15 @@ from sempy_labs._helper_functions import (
10
9
  delete_item,
11
10
  create_item,
12
11
  get_item_definition,
12
+ resolve_workspace_id,
13
13
  )
14
14
  import sempy_labs._icons as icons
15
15
  import base64
16
16
  from uuid import UUID
17
+ from sempy._utils._log import log
17
18
 
18
19
 
20
+ @log
19
21
  def list_mirrored_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
20
22
  """
21
23
  Shows the mirrored databases within a workspace.
@@ -49,13 +51,14 @@ def list_mirrored_databases(workspace: Optional[str | UUID] = None) -> pd.DataFr
49
51
  }
50
52
  df = _create_dataframe(columns=columns)
51
53
 
52
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
54
+ workspace_id = resolve_workspace_id(workspace)
53
55
  responses = _base_api(
54
56
  request=f"/v1/workspaces/{workspace_id}/mirroredDatabases",
55
57
  uses_pagination=True,
56
58
  client="fabric_sp",
57
59
  )
58
60
 
61
+ dfs = []
59
62
  for r in responses:
60
63
  for v in r.get("value", []):
61
64
  prop = v.get("properties", {})
@@ -70,11 +73,15 @@ def list_mirrored_databases(workspace: Optional[str | UUID] = None) -> pd.DataFr
70
73
  "Provisioning Status": sql.get("provisioningStatus"),
71
74
  "Default Schema": prop.get("defaultSchema"),
72
75
  }
73
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
76
+ dfs.append(pd.DataFrame(new_data, index=[0]))
77
+
78
+ if dfs:
79
+ df = pd.concat(dfs, ignore_index=True)
74
80
 
75
81
  return df
76
82
 
77
83
 
84
+ @log
78
85
  def create_mirrored_database(
79
86
  name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
80
87
  ):
@@ -100,6 +107,7 @@ def create_mirrored_database(
100
107
  )
101
108
 
102
109
 
110
+ @log
103
111
  def delete_mirrored_database(
104
112
  mirrored_database: str, workspace: Optional[str | UUID] = None
105
113
  ):
@@ -121,6 +129,7 @@ def delete_mirrored_database(
121
129
  delete_item(item=mirrored_database, type="MirroredDatabase", workspace=workspace)
122
130
 
123
131
 
132
+ @log
124
133
  def get_mirroring_status(
125
134
  mirrored_database: str | UUID, workspace: Optional[str | UUID] = None
126
135
  ) -> str:
@@ -156,6 +165,7 @@ def get_mirroring_status(
156
165
  return response.json().get("status", {})
157
166
 
158
167
 
168
+ @log
159
169
  def get_tables_mirroring_status(
160
170
  mirrored_database: str | UUID, workspace: Optional[str | UUID] = None
161
171
  ) -> pd.DataFrame:
@@ -219,6 +229,7 @@ def get_tables_mirroring_status(
219
229
  return df
220
230
 
221
231
 
232
+ @log
222
233
  def start_mirroring(
223
234
  mirrored_database: str | UUID, workspace: Optional[str | UUID] = None
224
235
  ):
@@ -252,6 +263,7 @@ def start_mirroring(
252
263
  )
253
264
 
254
265
 
266
+ @log
255
267
  def stop_mirroring(
256
268
  mirrored_database: str | UUID, workspace: Optional[str | UUID] = None
257
269
  ):
@@ -285,6 +297,7 @@ def stop_mirroring(
285
297
  )
286
298
 
287
299
 
300
+ @log
288
301
  def get_mirrored_database_definition(
289
302
  mirrored_database: str | UUID,
290
303
  workspace: Optional[str | UUID] = None,
@@ -322,6 +335,7 @@ def get_mirrored_database_definition(
322
335
  )
323
336
 
324
337
 
338
+ @log
325
339
  def update_mirrored_database_definition(
326
340
  mirrored_database: str | UUID,
327
341
  mirrored_database_content: dict,
@@ -1,13 +1,15 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
3
  from sempy_labs._helper_functions import (
4
- resolve_workspace_name_and_id,
4
+ resolve_workspace_id,
5
5
  _base_api,
6
6
  _create_dataframe,
7
7
  )
8
8
  from uuid import UUID
9
+ from sempy._utils._log import log
9
10
 
10
11
 
12
+ @log
11
13
  def list_mirrored_warehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
12
14
  """
13
15
  Shows the mirrored warehouses within a workspace.
@@ -34,13 +36,14 @@ def list_mirrored_warehouses(workspace: Optional[str | UUID] = None) -> pd.DataF
34
36
  }
35
37
  df = _create_dataframe(columns=columns)
36
38
 
37
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
39
+ workspace_id = resolve_workspace_id(workspace)
38
40
  responses = _base_api(
39
41
  request=f"/v1/workspaces/{workspace_id}/mirroredWarehouses",
40
42
  status_codes=200,
41
43
  uses_pagination=True,
42
44
  )
43
45
 
46
+ dfs = []
44
47
  for r in responses:
45
48
  for v in r.get("value", []):
46
49
  new_data = {
@@ -48,6 +51,9 @@ def list_mirrored_warehouses(workspace: Optional[str | UUID] = None) -> pd.DataF
48
51
  "Mirrored Warehouse Id": v.get("id"),
49
52
  "Description": v.get("description"),
50
53
  }
51
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
54
+ dfs.append(pd.DataFrame(new_data, index=[0]))
55
+
56
+ if dfs:
57
+ df = pd.concat(dfs, ignore_index=True)
52
58
 
53
59
  return df
@@ -1,15 +1,17 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
3
  from sempy_labs._helper_functions import (
4
- resolve_workspace_name_and_id,
4
+ resolve_workspace_id,
5
5
  _base_api,
6
6
  delete_item,
7
7
  _create_dataframe,
8
8
  create_item,
9
9
  )
10
10
  from uuid import UUID
11
+ from sempy._utils._log import log
11
12
 
12
13
 
14
+ @log
13
15
  def list_ml_experiments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
14
16
  """
15
17
  Shows the ML experiments within a workspace.
@@ -36,7 +38,7 @@ def list_ml_experiments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
36
38
  }
37
39
  df = _create_dataframe(columns=columns)
38
40
 
39
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
41
+ workspace_id = resolve_workspace_id(workspace)
40
42
 
41
43
  responses = _base_api(
42
44
  request=f"/v1/workspaces/{workspace_id}/mlExperiments",
@@ -44,6 +46,7 @@ def list_ml_experiments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
44
46
  uses_pagination=True,
45
47
  )
46
48
 
49
+ dfs = []
47
50
  for r in responses:
48
51
  for v in r.get("value", []):
49
52
  model_id = v.get("id")
@@ -55,11 +58,15 @@ def list_ml_experiments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
55
58
  "ML Experiment Id": model_id,
56
59
  "Description": desc,
57
60
  }
58
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
61
+ dfs.append(pd.DataFrame(new_data, index=[0]))
62
+
63
+ if dfs:
64
+ df = pd.concat(dfs, ignore_index=True)
59
65
 
60
66
  return df
61
67
 
62
68
 
69
+ @log
63
70
  def create_ml_experiment(
64
71
  name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
65
72
  ):
@@ -85,6 +92,7 @@ def create_ml_experiment(
85
92
  )
86
93
 
87
94
 
95
+ @log
88
96
  def delete_ml_experiment(name: str, workspace: Optional[str | UUID] = None):
89
97
  """
90
98
  Deletes a Fabric ML experiment.
sempy_labs/_ml_models.py CHANGED
@@ -1,15 +1,17 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
3
  from sempy_labs._helper_functions import (
4
- resolve_workspace_name_and_id,
4
+ resolve_workspace_id,
5
5
  _base_api,
6
6
  delete_item,
7
7
  _create_dataframe,
8
8
  create_item,
9
9
  )
10
10
  from uuid import UUID
11
+ from sempy._utils._log import log
11
12
 
12
13
 
14
+ @log
13
15
  def list_ml_models(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
14
16
  """
15
17
  Shows the ML models within a workspace.
@@ -36,7 +38,7 @@ def list_ml_models(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
36
38
  }
37
39
  df = _create_dataframe(columns=columns)
38
40
 
39
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
41
+ workspace_id = resolve_workspace_id(workspace)
40
42
 
41
43
  responses = _base_api(
42
44
  request=f"/v1/workspaces/{workspace_id}/mlModels",
@@ -44,6 +46,7 @@ def list_ml_models(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
44
46
  uses_pagination=True,
45
47
  )
46
48
 
49
+ dfs = []
47
50
  for r in responses:
48
51
  for v in r.get("value", []):
49
52
  model_id = v.get("id")
@@ -55,11 +58,15 @@ def list_ml_models(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
55
58
  "ML Model Id": model_id,
56
59
  "Description": desc,
57
60
  }
58
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
61
+ dfs.append(pd.DataFrame(new_data, index=[0]))
62
+
63
+ if dfs:
64
+ df = pd.concat(dfs, ignore_index=True)
59
65
 
60
66
  return df
61
67
 
62
68
 
69
+ @log
63
70
  def create_ml_model(
64
71
  name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
65
72
  ):
@@ -83,6 +90,7 @@ def create_ml_model(
83
90
  create_item(name=name, description=description, type="MLModel", workspace=workspace)
84
91
 
85
92
 
93
+ @log
86
94
  def delete_ml_model(name: str | UUID, workspace: Optional[str | UUID] = None):
87
95
  """
88
96
  Deletes a Fabric ML model.