semantic-link-labs 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (95) hide show
  1. {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/METADATA +9 -6
  2. {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/RECORD +95 -87
  3. sempy_labs/__init__.py +11 -1
  4. sempy_labs/_a_lib_info.py +2 -0
  5. sempy_labs/_capacities.py +2 -0
  6. sempy_labs/_connections.py +11 -0
  7. sempy_labs/_dashboards.py +9 -4
  8. sempy_labs/_data_pipelines.py +5 -0
  9. sempy_labs/_dataflows.py +284 -17
  10. sempy_labs/_daxformatter.py +80 -0
  11. sempy_labs/_delta_analyzer_history.py +4 -1
  12. sempy_labs/_deployment_pipelines.py +4 -0
  13. sempy_labs/_documentation.py +3 -0
  14. sempy_labs/_environments.py +10 -1
  15. sempy_labs/_eventhouses.py +12 -5
  16. sempy_labs/_eventstreams.py +11 -3
  17. sempy_labs/_external_data_shares.py +8 -2
  18. sempy_labs/_gateways.py +26 -5
  19. sempy_labs/_git.py +11 -0
  20. sempy_labs/_graphQL.py +10 -3
  21. sempy_labs/_helper_functions.py +62 -10
  22. sempy_labs/_job_scheduler.py +54 -7
  23. sempy_labs/_kql_databases.py +11 -2
  24. sempy_labs/_kql_querysets.py +11 -3
  25. sempy_labs/_list_functions.py +17 -45
  26. sempy_labs/_managed_private_endpoints.py +11 -2
  27. sempy_labs/_mirrored_databases.py +17 -3
  28. sempy_labs/_mirrored_warehouses.py +9 -3
  29. sempy_labs/_ml_experiments.py +11 -3
  30. sempy_labs/_ml_models.py +11 -3
  31. sempy_labs/_model_bpa_rules.py +2 -0
  32. sempy_labs/_mounted_data_factories.py +12 -8
  33. sempy_labs/_notebooks.py +6 -3
  34. sempy_labs/_refresh_semantic_model.py +1 -0
  35. sempy_labs/_semantic_models.py +107 -0
  36. sempy_labs/_spark.py +7 -0
  37. sempy_labs/_sql_endpoints.py +208 -0
  38. sempy_labs/_sqldatabase.py +13 -4
  39. sempy_labs/_tags.py +5 -1
  40. sempy_labs/_user_delegation_key.py +2 -0
  41. sempy_labs/_variable_libraries.py +3 -1
  42. sempy_labs/_warehouses.py +13 -3
  43. sempy_labs/_workloads.py +3 -0
  44. sempy_labs/_workspace_identity.py +3 -0
  45. sempy_labs/_workspaces.py +14 -1
  46. sempy_labs/admin/__init__.py +2 -0
  47. sempy_labs/admin/_activities.py +6 -5
  48. sempy_labs/admin/_apps.py +31 -31
  49. sempy_labs/admin/_artifacts.py +8 -3
  50. sempy_labs/admin/_basic_functions.py +5 -0
  51. sempy_labs/admin/_capacities.py +39 -28
  52. sempy_labs/admin/_datasets.py +51 -51
  53. sempy_labs/admin/_domains.py +17 -1
  54. sempy_labs/admin/_external_data_share.py +8 -2
  55. sempy_labs/admin/_git.py +14 -9
  56. sempy_labs/admin/_items.py +15 -2
  57. sempy_labs/admin/_reports.py +64 -65
  58. sempy_labs/admin/_shared.py +7 -1
  59. sempy_labs/admin/_tags.py +5 -0
  60. sempy_labs/admin/_tenant.py +5 -2
  61. sempy_labs/admin/_users.py +9 -3
  62. sempy_labs/admin/_workspaces.py +88 -0
  63. sempy_labs/directlake/_dl_helper.py +2 -0
  64. sempy_labs/directlake/_generate_shared_expression.py +2 -0
  65. sempy_labs/directlake/_get_directlake_lakehouse.py +2 -4
  66. sempy_labs/directlake/_get_shared_expression.py +2 -0
  67. sempy_labs/directlake/_guardrails.py +2 -0
  68. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +5 -3
  69. sempy_labs/directlake/_warm_cache.py +1 -0
  70. sempy_labs/graph/_groups.py +22 -7
  71. sempy_labs/graph/_teams.py +7 -2
  72. sempy_labs/graph/_users.py +1 -0
  73. sempy_labs/lakehouse/_blobs.py +1 -0
  74. sempy_labs/lakehouse/_get_lakehouse_tables.py +88 -27
  75. sempy_labs/lakehouse/_helper.py +2 -0
  76. sempy_labs/lakehouse/_lakehouse.py +38 -5
  77. sempy_labs/lakehouse/_livy_sessions.py +2 -1
  78. sempy_labs/lakehouse/_shortcuts.py +7 -1
  79. sempy_labs/migration/_direct_lake_to_import.py +2 -0
  80. sempy_labs/mirrored_azure_databricks_catalog/__init__.py +15 -0
  81. sempy_labs/mirrored_azure_databricks_catalog/_discover.py +213 -0
  82. sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +45 -0
  83. sempy_labs/report/_download_report.py +2 -1
  84. sempy_labs/report/_generate_report.py +2 -0
  85. sempy_labs/report/_paginated.py +2 -0
  86. sempy_labs/report/_report_bpa.py +110 -122
  87. sempy_labs/report/_report_bpa_rules.py +2 -0
  88. sempy_labs/report/_report_functions.py +7 -0
  89. sempy_labs/report/_reportwrapper.py +86 -48
  90. sempy_labs/theme/__init__.py +12 -0
  91. sempy_labs/theme/_org_themes.py +96 -0
  92. sempy_labs/tom/_model.py +702 -35
  93. {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/WHEEL +0 -0
  94. {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/licenses/LICENSE +0 -0
  95. {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/top_level.txt +0 -0
@@ -11,8 +11,14 @@ from sempy_labs._helper_functions import (
11
11
  )
12
12
  import sempy_labs._icons as icons
13
13
  import re
14
+ import time
15
+ import pandas as pd
16
+ from sempy_labs._job_scheduler import (
17
+ _get_item_job_instance,
18
+ )
14
19
 
15
20
 
21
+ @log
16
22
  def lakehouse_attached() -> bool:
17
23
  """
18
24
  Identifies if a lakehouse is attached to the notebook.
@@ -33,6 +39,7 @@ def lakehouse_attached() -> bool:
33
39
  return False
34
40
 
35
41
 
42
+ @log
36
43
  def _optimize_table(path):
37
44
 
38
45
  if _pure_python_notebook():
@@ -46,6 +53,7 @@ def _optimize_table(path):
46
53
  DeltaTable.forPath(spark, path).optimize().executeCompaction()
47
54
 
48
55
 
56
+ @log
49
57
  def _vacuum_table(path, retain_n_hours):
50
58
 
51
59
  if _pure_python_notebook():
@@ -145,6 +153,7 @@ def vacuum_lakehouse_tables(
145
153
  _vacuum_table(path=path, retain_n_hours=retain_n_hours)
146
154
 
147
155
 
156
+ @log
148
157
  def run_table_maintenance(
149
158
  table_name: str,
150
159
  optimize: bool = False,
@@ -154,7 +163,7 @@ def run_table_maintenance(
154
163
  schema: Optional[str] = None,
155
164
  lakehouse: Optional[str | UUID] = None,
156
165
  workspace: Optional[str | UUID] = None,
157
- ):
166
+ ) -> pd.DataFrame:
158
167
  """
159
168
  Runs table maintenance operations on the specified table within the lakehouse.
160
169
 
@@ -181,6 +190,11 @@ def run_table_maintenance(
181
190
  The Fabric workspace name or ID used by the lakehouse.
182
191
  Defaults to None which resolves to the workspace of the attached lakehouse
183
192
  or if no lakehouse attached, resolves to the workspace of the notebook.
193
+
194
+ Returns
195
+ -------
196
+ pandas.DataFrame
197
+ A DataFrame containing the job instance details of the table maintenance operation.
184
198
  """
185
199
 
186
200
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
@@ -223,13 +237,32 @@ def run_table_maintenance(
223
237
  if vacuum and retention_period is not None:
224
238
  payload["executionData"]["vacuumSettings"]["retentionPeriod"] = retention_period
225
239
 
226
- _base_api(
240
+ response = _base_api(
227
241
  request=f"/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}/jobs/instances?jobType=TableMaintenance",
228
242
  method="post",
229
243
  payload=payload,
230
244
  status_codes=202,
231
245
  )
232
246
 
233
- print(
234
- f"{icons.green_dot} The table maintenance job for the '{table_name}' table in the '{lakehouse_name}' lakehouse within the '{workspace_name}' workspace has been initiated."
235
- )
247
+ f"{icons.in_progress} The table maintenance job for the '{table_name}' table in the '{lakehouse_name}' lakehouse within the '{workspace_name}' workspace has been initiated."
248
+
249
+ status_url = response.headers.get("Location").split("fabric.microsoft.com")[1]
250
+ status = None
251
+ while status not in ["Completed", "Failed"]:
252
+ response = _base_api(request=status_url)
253
+ status = response.json().get("status")
254
+ time.sleep(10)
255
+
256
+ df = _get_item_job_instance(url=status_url)
257
+
258
+ if status == "Completed":
259
+ print(
260
+ f"{icons.green_dot} The table maintenance job for the '{table_name}' table in the '{lakehouse_name}' lakehouse within the '{workspace_name}' workspace has succeeded."
261
+ )
262
+ else:
263
+ print(status)
264
+ print(
265
+ f"{icons.red_dot} The table maintenance job for the '{table_name}' table in the '{lakehouse_name}' lakehouse within the '{workspace_name}' workspace has failed."
266
+ )
267
+
268
+ return df
@@ -8,8 +8,10 @@ from sempy_labs._helper_functions import (
8
8
  import pandas as pd
9
9
  from typing import Optional
10
10
  from uuid import UUID
11
+ from sempy._utils._log import log
11
12
 
12
13
 
14
+ @log
13
15
  def list_livy_sessions(
14
16
  lakehouse: Optional[str | UUID] = None, workspace: Optional[str | UUID] = None
15
17
  ) -> pd.DataFrame:
@@ -85,7 +87,6 @@ def list_livy_sessions(
85
87
  )
86
88
 
87
89
  dfs = []
88
-
89
90
  for r in responses:
90
91
  for v in r.get("value", []):
91
92
  queued_duration = v.get("queuedDuration", {})
@@ -216,6 +216,7 @@ def create_shortcut(
216
216
  )
217
217
 
218
218
 
219
+ @log
219
220
  def delete_shortcut(
220
221
  shortcut_name: str,
221
222
  shortcut_path: str = "Tables",
@@ -260,6 +261,7 @@ def delete_shortcut(
260
261
  )
261
262
 
262
263
 
264
+ @log
263
265
  def reset_shortcut_cache(workspace: Optional[str | UUID] = None):
264
266
  """
265
267
  Deletes any cached files that were stored while reading from shortcuts.
@@ -369,6 +371,7 @@ def list_shortcuts(
369
371
  "S3Compatible": "s3Compatible",
370
372
  }
371
373
 
374
+ dfs = []
372
375
  for r in responses:
373
376
  for i in r.get("value", []):
374
377
  tgt = i.get("target", {})
@@ -415,6 +418,9 @@ def list_shortcuts(
415
418
  "SubPath": sub_path,
416
419
  "Source Properties Raw": str(tgt),
417
420
  }
418
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
421
+ dfs.append(pd.DataFrame(new_data, index=[0]))
422
+
423
+ if dfs:
424
+ df = pd.concat(dfs, ignore_index=True)
419
425
 
420
426
  return df
@@ -2,8 +2,10 @@ import sempy
2
2
  from uuid import UUID
3
3
  import sempy_labs._icons as icons
4
4
  from typing import Optional
5
+ from sempy._utils._log import log
5
6
 
6
7
 
8
+ @log
7
9
  def migrate_direct_lake_to_import(
8
10
  dataset: str | UUID,
9
11
  workspace: Optional[str | UUID] = None,
@@ -0,0 +1,15 @@
1
+ from sempy_labs.mirrored_azure_databricks_catalog._refresh_catalog_metadata import (
2
+ refresh_catalog_metadata,
3
+ )
4
+ from sempy_labs.mirrored_azure_databricks_catalog._discover import (
5
+ discover_catalogs,
6
+ discover_schemas,
7
+ discover_tables,
8
+ )
9
+
10
+ __all__ = [
11
+ "refresh_catalog_metadata",
12
+ "discover_catalogs",
13
+ "discover_schemas",
14
+ "discover_tables",
15
+ ]
@@ -0,0 +1,213 @@
1
+ from uuid import UUID
2
+ from typing import Optional
3
+ from sempy_labs._helper_functions import (
4
+ resolve_workspace_id,
5
+ _base_api,
6
+ _create_dataframe,
7
+ )
8
+ import pandas as pd
9
+ from sempy._utils._log import log
10
+
11
+
12
+ @log
13
+ def discover_catalogs(
14
+ databricks_workspace_connection_id: UUID,
15
+ workspace: Optional[str | UUID] = None,
16
+ max_results: Optional[int] = None,
17
+ ) -> pd.DataFrame:
18
+ """
19
+ Returns a list of catalogs from Unity Catalog.
20
+
21
+ This is a wrapper function for the following API: `Databricks Metadata Discovery - Discover Catalogs <https://learn.microsoft.comrest/api/fabric/mirroredazuredatabrickscatalog/databricks-metadata-discovery/discover-catalogs>`_.
22
+
23
+ Parameters
24
+ ----------
25
+ databricks_workspace_connection_id : uuid.UUID
26
+ The ID of the Databricks workspace connection.
27
+ workspace : str | uuid.UUID, default=None
28
+ The workspace name or ID.
29
+ Defaults to None which resolves to the workspace of the attached lakehouse
30
+ or if no lakehouse attached, resolves to the workspace of the notebook.
31
+ max_results : int, default=None
32
+ The maximum number of results to return. If not specified, all results are returned.
33
+
34
+ Returns
35
+ -------
36
+ pandas.DataFrame
37
+ A pandas dataframe showing a list of catalogs from Unity Catalog.
38
+ """
39
+
40
+ workspace_id = resolve_workspace_id(workspace)
41
+
42
+ url = f"/v1/workspaces/{workspace_id}/azuredatabricks/catalogs?databricksWorkspaceConnectionId={databricks_workspace_connection_id}"
43
+ if max_results:
44
+ url += f"&maxResults={max_results}"
45
+
46
+ responses = _base_api(request=url, uses_pagination=True)
47
+
48
+ columns = {
49
+ "Catalog Name": "str",
50
+ "Catalog Full Name": "str",
51
+ "Catalog Type": "str",
52
+ "Storage Location": "str",
53
+ }
54
+
55
+ df = _create_dataframe(columns=columns)
56
+
57
+ dfs = []
58
+ for r in responses:
59
+ for i in r.get("value", []):
60
+ new_data = {
61
+ "Catalog Name": i.get("name"),
62
+ "Catalog Full Name": i.get("fullName"),
63
+ "Catalog Type": i.get("catalogType"),
64
+ "Storage Location": i.get("storageLocation"),
65
+ }
66
+
67
+ dfs.append(pd.DataFrame(new_data, index=[0]))
68
+
69
+ if dfs:
70
+ df = pd.concat(dfs, ignore_index=True)
71
+
72
+ return df
73
+
74
+
75
+ @log
76
+ def discover_schemas(
77
+ catalog: str,
78
+ databricks_workspace_connection_id: UUID,
79
+ workspace: Optional[str | UUID] = None,
80
+ max_results: Optional[int] = None,
81
+ ) -> pd.DataFrame:
82
+ """
83
+ Returns a list of schemas in the given catalog from Unity Catalog.
84
+
85
+ This is a wrapper function for the following API: `Databricks Metadata Discovery - Discover Schemas <https://learn.microsoft.comrest/api/fabric/mirroredazuredatabrickscatalog/databricks-metadata-discovery/discover-schemas>`_.
86
+
87
+ Parameters
88
+ ----------
89
+ catalog : str
90
+ The name of the catalog.
91
+ databricks_workspace_connection_id : uuid.UUID
92
+ The ID of the Databricks workspace connection.
93
+ workspace : str | uuid.UUID, default=None
94
+ The workspace name or ID.
95
+ Defaults to None which resolves to the workspace of the attached lakehouse
96
+ or if no lakehouse attached, resolves to the workspace of the notebook.
97
+ max_results : int, default=None
98
+ The maximum number of results to return. If not specified, all results are returned.
99
+
100
+ Returns
101
+ -------
102
+ pandas.DataFrame
103
+ A pandas dataframe showing a list of schemas in the given catalog from Unity Catalog.
104
+ """
105
+
106
+ workspace_id = resolve_workspace_id(workspace)
107
+
108
+ url = f"/v1/workspaces/{workspace_id}/azuredatabricks/catalogs/{catalog}/schemas?databricksWorkspaceConnectionId={databricks_workspace_connection_id}"
109
+ if max_results:
110
+ url += f"&maxResults={max_results}"
111
+
112
+ responses = _base_api(request=url, uses_pagination=True)
113
+
114
+ columns = {
115
+ "Catalog Name": "str",
116
+ "Schema Name": "str",
117
+ "Schema Full Name": "str",
118
+ "Storage Location": "str",
119
+ }
120
+
121
+ df = _create_dataframe(columns=columns)
122
+
123
+ dfs = []
124
+ for r in responses:
125
+ for i in r.get("value", []):
126
+ new_data = {
127
+ "Catalog Name": catalog,
128
+ "Schema Name": i.get("name"),
129
+ "Schema Full Name": i.get("fullName"),
130
+ "Storage Location": i.get("storageLocation"),
131
+ }
132
+
133
+ dfs.append(pd.DataFrame(new_data, index=[0]))
134
+
135
+ if dfs:
136
+ df = pd.concat(dfs, ignore_index=True)
137
+
138
+ return df
139
+
140
+
141
+ @log
142
+ def discover_tables(
143
+ catalog: str,
144
+ schema: str,
145
+ databricks_workspace_connection_id: UUID,
146
+ workspace: Optional[str | UUID] = None,
147
+ max_results: Optional[int] = None,
148
+ ) -> pd.DataFrame:
149
+ """
150
+ Returns a list of schemas in the given catalog from Unity Catalog.
151
+
152
+ This is a wrapper function for the following API: `Databricks Metadata Discovery - Discover Tables <https://learn.microsoft.comrest/api/fabric/mirroredazuredatabrickscatalog/databricks-metadata-discovery/discover-tables>`_.
153
+
154
+ Parameters
155
+ ----------
156
+ catalog : str
157
+ The name of the catalog.
158
+ schema : str
159
+ The name of the schema.
160
+ databricks_workspace_connection_id : uuid.UUID
161
+ The ID of the Databricks workspace connection.
162
+ workspace : str | uuid.UUID, default=None
163
+ The workspace name or ID.
164
+ Defaults to None which resolves to the workspace of the attached lakehouse
165
+ or if no lakehouse attached, resolves to the workspace of the notebook.
166
+ max_results : int, default=None
167
+ The maximum number of results to return. If not specified, all results are returned.
168
+
169
+ Returns
170
+ -------
171
+ pandas.DataFrame
172
+ A pandas dataframe showing a list of schemas in the given catalog from Unity Catalog.
173
+ """
174
+
175
+ workspace_id = resolve_workspace_id(workspace)
176
+
177
+ url = f"/v1/workspaces/{workspace_id}/azuredatabricks/catalogs/{catalog}/schemas/{schema}/tables?databricksWorkspaceConnectionId={databricks_workspace_connection_id}"
178
+ if max_results:
179
+ url += f"&maxResults={max_results}"
180
+
181
+ responses = _base_api(request=url, uses_pagination=True)
182
+
183
+ columns = {
184
+ "Catalog Name": "str",
185
+ "Schema Name": "str",
186
+ "Table Name": "str",
187
+ "Table Full Name": "str",
188
+ "Storage Location": "str",
189
+ "Table Type": "str",
190
+ "Data Source Format": "str",
191
+ }
192
+
193
+ df = _create_dataframe(columns=columns)
194
+
195
+ dfs = []
196
+ for r in responses:
197
+ for i in r.get("value", []):
198
+ new_data = {
199
+ "Catalog Name": catalog,
200
+ "Schema Name": schema,
201
+ "Table Name": i.get("name"),
202
+ "Table Full Name": i.get("fullName"),
203
+ "Storage Location": i.get("storageLocation"),
204
+ "Table Type": i.get("tableType"),
205
+ "Data Source Format": i.get("dataSourceFormat"),
206
+ }
207
+
208
+ dfs.append(pd.DataFrame(new_data, index=[0]))
209
+
210
+ if dfs:
211
+ df = pd.concat(dfs, ignore_index=True)
212
+
213
+ return df
@@ -0,0 +1,45 @@
1
+ from uuid import UUID
2
+ from typing import Optional
3
+ from sempy_labs._helper_functions import (
4
+ resolve_workspace_name_and_id,
5
+ resolve_item_name_and_id,
6
+ _base_api,
7
+ )
8
+ import sempy_labs._icons as icons
9
+ from sempy._utils._log import log
10
+
11
+
12
+ @log
13
+ def refresh_catalog_metadata(
14
+ mirrored_azure_databricks_catalog: str | UUID,
15
+ workspace: Optional[str | UUID] = None,
16
+ ):
17
+ """
18
+ Refresh Databricks catalog metadata in mirroredAzureDatabricksCatalogs Item.
19
+
20
+ This is a wrapper function for the following API: `Refresh Metadata - Items RefreshCatalogMetadata <https://learn.microsoft.com/rest/api/fabric/mirroredazuredatabrickscatalog/refresh-metadata/items-refresh-catalog-metadata>`_.
21
+
22
+ Parameters
23
+ ----------
24
+ mirrored_azure_databricks_catalog : str | uuid.UUID
25
+ The name or ID of the mirrored Azure Databricks catalog.
26
+ workspace : str | uuie.UUID, default=None
27
+ The workspace name or ID.
28
+ Defaults to None which resolves to the workspace of the attached lakehouse
29
+ or if no lakehouse attached, resolves to the workspace of the notebook
30
+ """
31
+
32
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
33
+ (catalog_name, catalog_id) = resolve_item_name_and_id(
34
+ mirrored_azure_databricks_catalog
35
+ )
36
+
37
+ _base_api(
38
+ request=f"/v1/workspaces/{workspace_id}/mirroredAzureDatabricksCatalogs/{catalog_id}/refreshCatalogMetadata",
39
+ method="post",
40
+ lro_return_status_code=True,
41
+ )
42
+
43
+ print(
44
+ f"{icons.green_dot} The '{catalog_name}' Databricks Catalog metadata within the '{workspace_name}' workspace has been refreshed."
45
+ )
@@ -1,4 +1,3 @@
1
- import sempy.fabric as fabric
2
1
  import sempy_labs._icons as icons
3
2
  from typing import Optional
4
3
  from sempy_labs._helper_functions import (
@@ -11,8 +10,10 @@ from sempy_labs._helper_functions import (
11
10
  )
12
11
  from sempy_labs.lakehouse._lakehouse import lakehouse_attached
13
12
  from uuid import UUID
13
+ from sempy._utils._log import log
14
14
 
15
15
 
16
+ @log
16
17
  def download_report(
17
18
  report: str | UUID,
18
19
  file_name: Optional[str] = None,
@@ -18,6 +18,7 @@ from sempy._utils._log import log
18
18
  from uuid import UUID
19
19
 
20
20
 
21
+ @log
21
22
  def create_report_from_reportjson(
22
23
  report: str,
23
24
  dataset: str | UUID,
@@ -119,6 +120,7 @@ def create_report_from_reportjson(
119
120
  )
120
121
 
121
122
 
123
+ @log
122
124
  def update_report_from_reportjson(
123
125
  report: str | UUID, report_json: dict, workspace: Optional[str | UUID] = None
124
126
  ):
@@ -7,8 +7,10 @@ from sempy_labs._helper_functions import (
7
7
  resolve_item_id,
8
8
  _create_dataframe,
9
9
  )
10
+ from sempy._utils._log import log
10
11
 
11
12
 
13
+ @log
12
14
  def get_report_datasources(
13
15
  report: str | UUID,
14
16
  workspace: Optional[str | UUID] = None,