semantic-link-labs 0.9.9__py3-none-any.whl → 0.9.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (49) hide show
  1. {semantic_link_labs-0.9.9.dist-info → semantic_link_labs-0.9.11.dist-info}/METADATA +30 -22
  2. {semantic_link_labs-0.9.9.dist-info → semantic_link_labs-0.9.11.dist-info}/RECORD +47 -40
  3. {semantic_link_labs-0.9.9.dist-info → semantic_link_labs-0.9.11.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +28 -1
  5. sempy_labs/_clear_cache.py +12 -0
  6. sempy_labs/_dax.py +8 -2
  7. sempy_labs/_delta_analyzer.py +17 -26
  8. sempy_labs/_environments.py +19 -1
  9. sempy_labs/_generate_semantic_model.py +7 -8
  10. sempy_labs/_helper_functions.py +351 -151
  11. sempy_labs/_kql_databases.py +18 -0
  12. sempy_labs/_kusto.py +137 -0
  13. sempy_labs/_list_functions.py +18 -36
  14. sempy_labs/_model_bpa_rules.py +13 -3
  15. sempy_labs/_notebooks.py +44 -11
  16. sempy_labs/_semantic_models.py +93 -1
  17. sempy_labs/_sql.py +3 -2
  18. sempy_labs/_tags.py +194 -0
  19. sempy_labs/_variable_libraries.py +89 -0
  20. sempy_labs/_vertipaq.py +6 -6
  21. sempy_labs/_vpax.py +386 -0
  22. sempy_labs/_warehouses.py +3 -3
  23. sempy_labs/admin/__init__.py +14 -0
  24. sempy_labs/admin/_artifacts.py +3 -3
  25. sempy_labs/admin/_capacities.py +161 -1
  26. sempy_labs/admin/_dataflows.py +45 -0
  27. sempy_labs/admin/_items.py +16 -11
  28. sempy_labs/admin/_tags.py +126 -0
  29. sempy_labs/admin/_tenant.py +5 -5
  30. sempy_labs/directlake/_generate_shared_expression.py +29 -26
  31. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +55 -5
  32. sempy_labs/dotnet_lib/dotnet.runtime.config.json +10 -0
  33. sempy_labs/lakehouse/__init__.py +16 -0
  34. sempy_labs/lakehouse/_blobs.py +115 -63
  35. sempy_labs/lakehouse/_get_lakehouse_columns.py +41 -18
  36. sempy_labs/lakehouse/_get_lakehouse_tables.py +62 -47
  37. sempy_labs/lakehouse/_helper.py +211 -0
  38. sempy_labs/lakehouse/_lakehouse.py +45 -36
  39. sempy_labs/lakehouse/_livy_sessions.py +137 -0
  40. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +7 -12
  41. sempy_labs/migration/_refresh_calc_tables.py +7 -6
  42. sempy_labs/report/_download_report.py +1 -1
  43. sempy_labs/report/_generate_report.py +5 -1
  44. sempy_labs/report/_reportwrapper.py +31 -18
  45. sempy_labs/tom/_model.py +104 -35
  46. sempy_labs/report/_bpareporttemplate/.pbi/localSettings.json +0 -9
  47. sempy_labs/report/_bpareporttemplate/.platform +0 -11
  48. {semantic_link_labs-0.9.9.dist-info → semantic_link_labs-0.9.11.dist-info}/licenses/LICENSE +0 -0
  49. {semantic_link_labs-0.9.9.dist-info → semantic_link_labs-0.9.11.dist-info}/top_level.txt +0 -0
sempy_labs/_kusto.py ADDED
@@ -0,0 +1,137 @@
1
+ import requests
2
+ import pandas as pd
3
+ from sempy.fabric.exceptions import FabricHTTPException
4
+ from sempy._utils._log import log
5
+ import sempy_labs._icons as icons
6
+ from typing import Optional
7
+ from uuid import UUID
8
+ from sempy_labs._kql_databases import _resolve_cluster_uri
9
+ from sempy_labs._helper_functions import resolve_item_id
10
+
11
+
12
+ @log
13
+ def query_kusto(
14
+ query: str,
15
+ kql_database: str | UUID,
16
+ workspace: Optional[str | UUID] = None,
17
+ language: str = "kql",
18
+ ) -> pd.DataFrame:
19
+ """
20
+ Runs a KQL query against a KQL database.
21
+
22
+ Parameters
23
+ ----------
24
+ query : str
25
+ The query (supports KQL or SQL - make sure to specify the language parameter accordingly).
26
+ kql_database : str | uuid.UUID
27
+ The KQL database name or ID.
28
+ workspace : str | uuid.UUID, default=None
29
+ The Fabric workspace name or ID.
30
+ Defaults to None which resolves to the workspace of the attached lakehouse
31
+ or if no lakehouse attached, resolves to the workspace of the notebook.
32
+ language : str, default="kql"
33
+ The language of the query. Currently "kql' and "sql" are supported.
34
+
35
+ Returns
36
+ -------
37
+ pandas.DataFrame
38
+ A pandas dataframe showing the result of the KQL query.
39
+ """
40
+
41
+ import notebookutils
42
+
43
+ language = language.lower()
44
+ if language not in ["kql", "sql"]:
45
+ raise ValueError(
46
+ f"{icons._red_dot} Invalid language '{language}'. Only 'kql' and 'sql' are supported."
47
+ )
48
+
49
+ cluster_uri = _resolve_cluster_uri(kql_database=kql_database, workspace=workspace)
50
+ token = notebookutils.credentials.getToken(cluster_uri)
51
+
52
+ headers = {
53
+ "Authorization": f"Bearer {token}",
54
+ "Content-Type": "application/json",
55
+ "Accept": "application/json",
56
+ }
57
+
58
+ kql_database_id = resolve_item_id(
59
+ item=kql_database, type="KQLDatabase", workspace=workspace
60
+ )
61
+ payload = {"db": kql_database_id, "csl": query}
62
+ if language == "sql":
63
+ payload["properties"] = {"Options": {"query_language": "sql"}}
64
+
65
+ response = requests.post(
66
+ f"{cluster_uri}/v1/rest/query",
67
+ headers=headers,
68
+ json=payload,
69
+ )
70
+
71
+ if response.status_code != 200:
72
+ raise FabricHTTPException(response)
73
+
74
+ results = response.json()
75
+ columns_info = results["Tables"][0]["Columns"]
76
+ rows = results["Tables"][0]["Rows"]
77
+
78
+ df = pd.DataFrame(rows, columns=[col["ColumnName"] for col in columns_info])
79
+
80
+ return df
81
+ # for col_info in columns_info:
82
+ # col_name = col_info["ColumnName"]
83
+ # data_type = col_info["DataType"]
84
+
85
+ # try:
86
+ # if data_type == "DateTime":
87
+ # df[col_name] = pd.to_datetime(df[col_name])
88
+ # elif data_type in ["Int64", "Int32", "Long"]:
89
+ # df[col_name] = (
90
+ # pd.to_numeric(df[col_name], errors="coerce")
91
+ # .fillna(0)
92
+ # .astype("int64")
93
+ # )
94
+ # elif data_type == "Real" or data_type == "Double":
95
+ # df[col_name] = pd.to_numeric(df[col_name], errors="coerce")
96
+ # else:
97
+ # # Convert any other type to string, change as needed
98
+ # df[col_name] = df[col_name].astype(str)
99
+ # except Exception as e:
100
+ # print(
101
+ # f"{icons.yellow_dot} Could not convert column {col_name} to {data_type}, defaulting to string: {str(e)}"
102
+ # )
103
+ # df[col_name] = df[col_name].astype(str)
104
+
105
+ return df
106
+
107
+
108
+ @log
109
+ def query_workspace_monitoring(
110
+ query: str, workspace: Optional[str | UUID] = None, language: str = "kql"
111
+ ) -> pd.DataFrame:
112
+ """
113
+ Runs a query against the Fabric workspace monitoring database. Workspace monitoring must be enabled on the workspace to use this function.
114
+
115
+ Parameters
116
+ ----------
117
+ query : str
118
+ The query (supports KQL or SQL - make sure to specify the language parameter accordingly).
119
+ workspace : str | uuid.UUID, default=None
120
+ The Fabric workspace name or ID.
121
+ Defaults to None which resolves to the workspace of the attached lakehouse
122
+ or if no lakehouse attached, resolves to the workspace of the notebook.
123
+ language : str, default="kql"
124
+ The language of the query. Currently "kql' and "sql" are supported.
125
+
126
+ Returns
127
+ -------
128
+ pandas.DataFrame
129
+ A pandas dataframe showing the result of the query.
130
+ """
131
+
132
+ return query_kusto(
133
+ query=query,
134
+ kql_database="Monitoring KQL database",
135
+ workspace=workspace,
136
+ language=language,
137
+ )
@@ -41,54 +41,32 @@ def get_object_level_security(
41
41
 
42
42
  from sempy_labs.tom import connect_semantic_model
43
43
 
44
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
45
- (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
46
-
47
44
  columns = {
48
45
  "Role Name": "string",
49
46
  "Object Type": "string",
50
47
  "Table Name": "string",
51
48
  "Object Name": "string",
49
+ "Metadata Permission": "string",
52
50
  }
53
51
  df = _create_dataframe(columns=columns)
54
52
 
55
53
  with connect_semantic_model(
56
- dataset=dataset_id, readonly=True, workspace=workspace_id
54
+ dataset=dataset, readonly=True, workspace=workspace
57
55
  ) as tom:
58
56
 
59
57
  for r in tom.model.Roles:
60
58
  for tp in r.TablePermissions:
61
- if len(tp.FilterExpression) == 0:
62
- columnCount = 0
63
- try:
64
- columnCount = len(tp.ColumnPermissions)
65
- except Exception:
66
- pass
67
- objectType = "Table"
68
- if columnCount == 0:
69
- new_data = {
70
- "Role Name": r.Name,
71
- "Object Type": objectType,
72
- "Table Name": tp.Name,
73
- "Object Name": tp.Name,
74
- }
75
- df = pd.concat(
76
- [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
77
- )
78
- else:
79
- objectType = "Column"
80
- for cp in tp.ColumnPermissions:
81
- new_data = {
82
- "Role Name": r.Name,
83
- "Object Type": objectType,
84
- "Table Name": tp.Name,
85
- "Object Name": cp.Name,
86
- }
87
- df = pd.concat(
88
- [df, pd.DataFrame(new_data, index=[0])],
89
- ignore_index=True,
90
- )
91
-
59
+ for cp in tp.ColumnPermissions:
60
+ new_data = {
61
+ "Role Name": r.Name,
62
+ "Object Type": "Column",
63
+ "Table Name": tp.Name,
64
+ "Object Name": cp.Name,
65
+ "Metadata Permission": cp.Permission,
66
+ }
67
+ df = pd.concat(
68
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
69
+ )
92
70
  return df
93
71
 
94
72
 
@@ -240,7 +218,11 @@ def list_tables(
240
218
  "Columns": sum(
241
219
  1 for c in t.Columns if str(c.Type) != "RowNumber"
242
220
  ),
243
- "% DB": round((total_size / model_size) * 100, 2),
221
+ "% DB": (
222
+ round((total_size / model_size) * 100, 2)
223
+ if model_size not in (0, None, float("nan"))
224
+ else 0.0
225
+ ),
244
226
  }
245
227
  )
246
228
 
@@ -674,8 +674,18 @@ def model_bpa_rules(
674
674
  "Provide format string for 'Date' columns",
675
675
  lambda obj, tom: (re.search(r"date", obj.Name, flags=re.IGNORECASE))
676
676
  and (obj.DataType == TOM.DataType.DateTime)
677
- and (obj.FormatString != "mm/dd/yyyy"),
678
- 'Columns of type "DateTime" that have "Month" in their names should be formatted as "mm/dd/yyyy".',
677
+ and (
678
+ obj.FormatString.lower()
679
+ not in [
680
+ "mm/dd/yyyy",
681
+ "mm-dd-yyyy",
682
+ "dd/mm/yyyy",
683
+ "dd-mm-yyyy",
684
+ "yyyy-mm-dd",
685
+ "yyyy/mm/dd",
686
+ ]
687
+ ),
688
+ 'Columns of type "DateTime" that have "Date" in their names should be formatted.',
679
689
  ),
680
690
  (
681
691
  "Formatting",
@@ -789,7 +799,7 @@ def model_bpa_rules(
789
799
  "Formatting",
790
800
  "Column",
791
801
  "Warning",
792
- 'Provide format string for "Month" columns',
802
+ "Provide format string for 'Month' columns",
793
803
  lambda obj, tom: re.search(r"month", obj.Name, flags=re.IGNORECASE)
794
804
  and obj.DataType == TOM.DataType.DateTime
795
805
  and obj.FormatString != "MMMM yyyy",
sempy_labs/_notebooks.py CHANGED
@@ -7,6 +7,7 @@ import requests
7
7
  from sempy._utils._log import log
8
8
  from sempy_labs._helper_functions import (
9
9
  resolve_workspace_name_and_id,
10
+ resolve_workspace_id,
10
11
  _decode_b64,
11
12
  _base_api,
12
13
  resolve_item_id,
@@ -20,13 +21,20 @@ _notebook_prefix = "notebook-content."
20
21
 
21
22
 
22
23
  def _get_notebook_definition_base(
23
- notebook_name: str, workspace: Optional[str | UUID] = None
24
+ notebook_name: str,
25
+ workspace: Optional[str | UUID] = None,
26
+ format: Optional[str] = None,
24
27
  ) -> pd.DataFrame:
25
28
 
26
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
29
+ workspace_id = resolve_workspace_id(workspace)
27
30
  item_id = resolve_item_id(item=notebook_name, type="Notebook", workspace=workspace)
31
+
32
+ url = f"v1/workspaces/{workspace_id}/notebooks/{item_id}/getDefinition"
33
+ if format == "ipynb":
34
+ url += f"?format={format}"
35
+
28
36
  result = _base_api(
29
- request=f"v1/workspaces/{workspace_id}/notebooks/{item_id}/getDefinition",
37
+ request=url,
30
38
  method="post",
31
39
  lro_return_json=True,
32
40
  status_codes=None,
@@ -53,7 +61,10 @@ def _get_notebook_type(
53
61
 
54
62
 
55
63
  def get_notebook_definition(
56
- notebook_name: str, workspace: Optional[str | UUID] = None, decode: bool = True
64
+ notebook_name: str,
65
+ workspace: Optional[str | UUID] = None,
66
+ decode: bool = True,
67
+ format: Optional[str] = None,
57
68
  ) -> str:
58
69
  """
59
70
  Obtains the notebook definition.
@@ -71,6 +82,9 @@ def get_notebook_definition(
71
82
  decode : bool, default=True
72
83
  If True, decodes the notebook definition file into .ipynb format.
73
84
  If False, obtains the notebook definition file in base64 format.
85
+ format : str, default=None
86
+ The only supported value is ipynb
87
+ If provided the format will be in standard .ipynb otherwise the format will be in source code format which is GIT friendly ipynb
74
88
 
75
89
  Returns
76
90
  -------
@@ -79,7 +93,7 @@ def get_notebook_definition(
79
93
  """
80
94
 
81
95
  df_items = _get_notebook_definition_base(
82
- notebook_name=notebook_name, workspace=workspace
96
+ notebook_name=notebook_name, workspace=workspace, format=format
83
97
  )
84
98
  df_items_filt = df_items[df_items["path"].str.startswith(_notebook_prefix)]
85
99
  payload = df_items_filt["payload"].iloc[0]
@@ -163,6 +177,7 @@ def create_notebook(
163
177
  type: str = "py",
164
178
  description: Optional[str] = None,
165
179
  workspace: Optional[str | UUID] = None,
180
+ format: Optional[str] = None,
166
181
  ):
167
182
  """
168
183
  Creates a new notebook with a definition within a workspace.
@@ -182,20 +197,27 @@ def create_notebook(
182
197
  The name or ID of the workspace.
183
198
  Defaults to None which resolves to the workspace of the attached lakehouse
184
199
  or if no lakehouse attached, resolves to the workspace of the notebook.
200
+ format : str, default=None
201
+ If 'ipynb' is provided than notebook_content should be standard ipynb format
202
+ otherwise notebook_content should be GIT friendly format
185
203
  """
186
204
 
187
- notebook_payload = base64.b64encode(notebook_content).decode("utf-8")
205
+ notebook_payload = base64.b64encode(notebook_content.encode("utf-8")).decode(
206
+ "utf-8"
207
+ )
188
208
  definition_payload = {
189
- "format": "ipynb",
190
209
  "parts": [
191
210
  {
192
- "path": f"{_notebook_prefix}.{type}",
211
+ "path": f"{_notebook_prefix}{type}",
193
212
  "payload": notebook_payload,
194
213
  "payloadType": "InlineBase64",
195
214
  }
196
215
  ],
197
216
  }
198
217
 
218
+ if format == "ipynb":
219
+ definition_payload["format"] = "ipynb"
220
+
199
221
  create_item(
200
222
  name=name,
201
223
  type="Notebook",
@@ -206,7 +228,10 @@ def create_notebook(
206
228
 
207
229
 
208
230
  def update_notebook_definition(
209
- name: str, notebook_content: str, workspace: Optional[str | UUID] = None
231
+ name: str,
232
+ notebook_content: str,
233
+ workspace: Optional[str | UUID] = None,
234
+ format: Optional[str] = None,
210
235
  ):
211
236
  """
212
237
  Updates an existing notebook with a new definition.
@@ -221,10 +246,15 @@ def update_notebook_definition(
221
246
  The name or ID of the workspace.
222
247
  Defaults to None which resolves to the workspace of the attached lakehouse
223
248
  or if no lakehouse attached, resolves to the workspace of the notebook.
249
+ format : str, default=None
250
+ If 'ipynb' is provided than notebook_content should be standard ipynb format
251
+ otherwise notebook_content should be GIT friendly format
224
252
  """
225
253
 
226
254
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
227
- notebook_payload = base64.b64encode(notebook_content)
255
+ notebook_payload = base64.b64encode(notebook_content.encode("utf-8")).decode(
256
+ "utf-8"
257
+ )
228
258
  item_id = resolve_item_id(item=name, type="Notebook", workspace=workspace)
229
259
  type = _get_notebook_type(notebook_name=name, workspace=workspace)
230
260
 
@@ -232,7 +262,7 @@ def update_notebook_definition(
232
262
  "definition": {
233
263
  "parts": [
234
264
  {
235
- "path": f"{_notebook_prefix}.{type}",
265
+ "path": f"{_notebook_prefix}{type}",
236
266
  "payload": notebook_payload,
237
267
  "payloadType": "InlineBase64",
238
268
  }
@@ -240,6 +270,9 @@ def update_notebook_definition(
240
270
  },
241
271
  }
242
272
 
273
+ if format == "ipynb":
274
+ payload["definition"]["format"] = "ipynb"
275
+
243
276
  _base_api(
244
277
  request=f"v1/workspaces/{workspace_id}/notebooks/{item_id}/updateDefinition",
245
278
  payload=payload,
@@ -1,5 +1,5 @@
1
1
  from uuid import UUID
2
- from typing import Optional
2
+ from typing import Optional, List
3
3
  import pandas as pd
4
4
  from sempy_labs._helper_functions import (
5
5
  _create_dataframe,
@@ -10,6 +10,7 @@ from sempy_labs._helper_functions import (
10
10
  delete_item,
11
11
  )
12
12
  import sempy_labs._icons as icons
13
+ import re
13
14
 
14
15
 
15
16
  def get_semantic_model_refresh_schedule(
@@ -135,3 +136,94 @@ def delete_semantic_model(dataset: str | UUID, workspace: Optional[str | UUID] =
135
136
  """
136
137
 
137
138
  delete_item(item=dataset, type="SemanticModel", workspace=workspace)
139
+
140
+
141
+ def update_semantic_model_refresh_schedule(
142
+ dataset: str | UUID,
143
+ days: Optional[str | List[str]] = None,
144
+ times: Optional[str | List[str]] = None,
145
+ time_zone: Optional[str] = None,
146
+ workspace: Optional[str | UUID] = None,
147
+ ):
148
+ """
149
+ Updates the refresh schedule for the specified dataset from the specified workspace.
150
+
151
+ This is a wrapper function for the following API: `Datasets - Update Refresh Schedule In Group <https://learn.microsoft.com/rest/api/power-bi/datasets/update-refresh-schedule-in-group>`_.
152
+
153
+ Parameters
154
+ ----------
155
+ dataset : str | uuid.UUID
156
+ Name or ID of the semantic model.
157
+ days : str | list[str], default=None
158
+ The days of the week to refresh the dataset.
159
+ Valid values are: "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday".
160
+ Defaults to None which means the refresh schedule will not be updated.
161
+ times : str | list[str], default=None
162
+ The times of the day to refresh the dataset.
163
+ Valid format is "HH:MM" (24-hour format).
164
+ Defaults to None which means the refresh schedule will not be updated.
165
+ time_zone : str, default=None
166
+ The time zone to use for the refresh schedule.
167
+ Defaults to None which means the refresh schedule will not be updated.
168
+ workspace : str | uuid.UUID, default=None
169
+ The workspace name or ID.
170
+ Defaults to None which resolves to the workspace of the attached lakehouse
171
+ or if no lakehouse attached, resolves to the workspace of the notebook.
172
+ """
173
+
174
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
175
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace)
176
+
177
+ payload = {"value": {}}
178
+
179
+ def is_valid_time_format(time_str):
180
+ pattern = r"^(?:[01]\d|2[0-3]):[0-5]\d$"
181
+ return re.match(pattern, time_str) is not None
182
+
183
+ weekdays = [
184
+ "Monday",
185
+ "Tuesday",
186
+ "Wednesday",
187
+ "Thursday",
188
+ "Friday",
189
+ "Sunday",
190
+ "Saturday",
191
+ ]
192
+ if days:
193
+ if isinstance(days, str):
194
+ days = [days]
195
+ for i in range(len(days)):
196
+ days[i] = days[i].capitalize()
197
+ if days[i] not in weekdays:
198
+ raise ValueError(
199
+ f"{icons.red_dot} Invalid day '{days[i]}'. Valid days are: {weekdays}"
200
+ )
201
+ payload["value"]["days"] = days
202
+ if times:
203
+ if isinstance(times, str):
204
+ times = [times]
205
+ for i in range(len(times)):
206
+ if not is_valid_time_format(times[i]):
207
+ raise ValueError(
208
+ f"{icons.red_dot} Invalid time '{times[i]}'. Valid time format is 'HH:MM' (24-hour format)."
209
+ )
210
+ payload["value"]["times"] = times
211
+ if time_zone:
212
+ payload["value"]["localTimeZoneId"] = time_zone
213
+
214
+ if not payload.get("value"):
215
+ print(
216
+ f"{icons.info} No changes were made to the refresh schedule for the '{dataset_name}' within the '{workspace_name}' workspace."
217
+ )
218
+ return
219
+
220
+ _base_api(
221
+ request=f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}/refreshSchedule",
222
+ method="patch",
223
+ client="fabric_sp",
224
+ payload=payload,
225
+ )
226
+
227
+ print(
228
+ f"{icons.green_dot} Refresh schedule for the '{dataset_name}' within the '{workspace_name}' workspace has been updated."
229
+ )
sempy_labs/_sql.py CHANGED
@@ -185,7 +185,7 @@ class ConnectWarehouse(ConnectBase):
185
185
  class ConnectLakehouse(ConnectBase):
186
186
  def __init__(
187
187
  self,
188
- lakehouse: str | UUID,
188
+ lakehouse: Optional[str | UUID] = None,
189
189
  workspace: Optional[Union[str, UUID]] = None,
190
190
  timeout: int = 30,
191
191
  ):
@@ -194,8 +194,9 @@ class ConnectLakehouse(ConnectBase):
194
194
 
195
195
  Parameters
196
196
  ----------
197
- lakehouse : str | uuid.UUID
197
+ lakehouse : str | uuid.UUID, default=None
198
198
  The name or ID of the Fabric lakehouse.
199
+ Defaults to None which resolves to the lakehouse attached to the notebook.
199
200
  workspace : str | uuid.UUID, default=None
200
201
  The name or ID of the workspace.
201
202
  Defaults to None which resolves to the workspace of the attached lakehouse