semantic-link-labs 0.12.4__py3-none-any.whl → 0.12.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

@@ -0,0 +1,486 @@
1
+ import pandas as pd
2
+ from sempy_labs._helper_functions import (
3
+ _is_valid_uuid,
4
+ _base_api,
5
+ _update_dataframe_datatypes,
6
+ _create_dataframe,
7
+ resolve_workspace_id,
8
+ )
9
+ from sempy._utils._log import log
10
+ import sempy_labs._icons as icons
11
+ from uuid import UUID
12
+ from typing import Optional
13
+
14
+
15
+ @log
16
+ def resolve_deployment_pipeline_id(deployment_pipeline: str | UUID) -> UUID:
17
+ """
18
+ Obtains the Id for a given deployment pipeline.
19
+
20
+ Parameters
21
+ ----------
22
+ deployment_pipeline : str | uuid.UUID
23
+ The deployment pipeline name or ID.
24
+
25
+ Returns
26
+ -------
27
+ uuid.UUID
28
+ The deployment pipeline Id.
29
+ """
30
+
31
+ if _is_valid_uuid(deployment_pipeline):
32
+ return deployment_pipeline
33
+ else:
34
+ dfP = list()
35
+ dfP_filt = dfP[dfP["Deployment Pipeline Name"] == deployment_pipeline]
36
+ if len(dfP_filt) == 0:
37
+ raise ValueError(
38
+ f"{icons.red_dot} The '{deployment_pipeline}' deployment pipeline is not valid."
39
+ )
40
+ return dfP_filt["Deployment Pipeline Id"].iloc[0]
41
+
42
+
43
+ @log
44
+ def resolve_stage_id(deployment_pipeline_id: UUID, stage: str | UUID):
45
+
46
+ dfPS = list_deployment_pipeline_stages(deployment_pipeline=deployment_pipeline_id)
47
+
48
+ if _is_valid_uuid(stage):
49
+ dfPS_filt = dfPS[dfPS["Deployment Pipeline Stage Id"] == stage]
50
+ else:
51
+ dfPS_filt = dfPS[dfPS["Deployment Pipeline Stage Name"] == stage]
52
+ if dfPS.empty:
53
+ raise ValueError(
54
+ f"{icons.red_dot} The '{stage}' stage does not exist within the '{deployment_pipeline_id}' deployment pipeline."
55
+ )
56
+ return dfPS_filt["Deployment Pipeline Stage Id"].iloc[0]
57
+
58
+
59
+ @log
60
+ def list_deployment_pipelines() -> pd.DataFrame:
61
+ """
62
+ Shows a list of deployment pipelines the user can access.
63
+
64
+ This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipelines <https://learn.microsoft.com/rest/api/fabric/core/deployment-pipelines/list-deployment-pipelines>`_.
65
+
66
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
67
+
68
+ Returns
69
+ -------
70
+ pandas.DataFrame
71
+ A pandas dataframe showing a list of deployment pipelines the user can access.
72
+ """
73
+
74
+ columns = {
75
+ "Deployment Pipeline Id": "string",
76
+ "Deployment Pipeline Name": "string",
77
+ "Description": "string",
78
+ }
79
+ df = _create_dataframe(columns=columns)
80
+
81
+ responses = _base_api(
82
+ request="/v1/deploymentPipelines",
83
+ status_codes=200,
84
+ uses_pagination=True,
85
+ client="fabric_sp",
86
+ )
87
+
88
+ rows = []
89
+ for r in responses:
90
+ for v in r.get("value", []):
91
+ rows.append(
92
+ {
93
+ "Deployment Pipeline Id": v.get("id"),
94
+ "Deployment Pipeline Name": v.get("displayName"),
95
+ "Description": v.get("description"),
96
+ }
97
+ )
98
+
99
+ if rows:
100
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
101
+
102
+ return df
103
+
104
+
105
+ @log
106
+ def list_deployment_pipeline_stages(deployment_pipeline: str | UUID) -> pd.DataFrame:
107
+ """
108
+ Shows the specified deployment pipeline stages.
109
+
110
+ This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipeline Stages <https://learn.microsoft.com/rest/api/fabric/core/deployment-pipelines/list-deployment-pipeline-stages>`_.
111
+
112
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
113
+
114
+ Parameters
115
+ ----------
116
+ deployment_pipeline : str | uuid.UUID
117
+ The deployment pipeline name or ID.
118
+
119
+ Returns
120
+ -------
121
+ pandas.DataFrame
122
+ A pandas dataframe showing the specified deployment pipeline stages.
123
+ """
124
+
125
+ columns = {
126
+ "Deployment Pipeline Stage Id": "string",
127
+ "Deployment Pipeline Stage Name": "string",
128
+ "Order": "int",
129
+ "Description": "string",
130
+ "Workspace Id": "string",
131
+ "Workspace Name": "string",
132
+ "Public": "bool",
133
+ }
134
+ df = _create_dataframe(columns=columns)
135
+
136
+ deployment_pipeline_id = resolve_deployment_pipeline_id(
137
+ deployment_pipeline=deployment_pipeline
138
+ )
139
+
140
+ responses = _base_api(
141
+ request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/stages",
142
+ status_codes=200,
143
+ uses_pagination=True,
144
+ client="fabric_sp",
145
+ )
146
+
147
+ rows = []
148
+ for r in responses:
149
+ for v in r.get("value", []):
150
+ rows.append(
151
+ {
152
+ "Deployment Pipeline Stage Id": v.get("id"),
153
+ "Deployment Pipeline Stage Name": v.get("displayName"),
154
+ "Description": v.get("description"),
155
+ "Order": v.get("order"),
156
+ "Workspace Id": v.get("workspaceId"),
157
+ "Workspace Name": v.get("workspaceName"),
158
+ "Public": v.get("isPublic"),
159
+ }
160
+ )
161
+
162
+ if rows:
163
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
164
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
165
+
166
+ return df
167
+
168
+
169
+ @log
170
+ def list_deployment_pipeline_stage_items(
171
+ deployment_pipeline: str | UUID,
172
+ stage: str | UUID,
173
+ ) -> pd.DataFrame:
174
+ """
175
+ Shows the supported items from the workspace assigned to the specified stage of the specified deployment pipeline.
176
+
177
+ This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipeline Stage Items <https://learn.microsoft.com/rest/api/fabric/core/deployment-pipelines/list-deployment-pipeline-stage-items>`_.
178
+
179
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
180
+
181
+ Parameters
182
+ ----------
183
+ deployment_pipeline : str | uuid.UUID
184
+ The deployment pipeline name or ID.
185
+ stage : str | uuid.UUID
186
+ The deployment pipeline stage name or ID.
187
+
188
+ Returns
189
+ -------
190
+ pandas.DataFrame
191
+ A pandas dataframe showing the supported items from the workspace assigned to the specified stage of the specified deployment pipeline.
192
+ """
193
+
194
+ columns = {
195
+ "Deployment Pipeline Stage Item Id": "string",
196
+ "Deployment Pipeline Stage Item Name": "string",
197
+ "Item Type": "string",
198
+ "Source Item Id": "string",
199
+ "Target Item Id": "string",
200
+ "Last Deployment Time": "string",
201
+ }
202
+ df = _create_dataframe(columns=columns)
203
+
204
+ deployment_pipeline_id = resolve_deployment_pipeline_id(
205
+ deployment_pipeline=deployment_pipeline
206
+ )
207
+
208
+ stage_id = resolve_stage_id(deployment_pipeline_id, stage)
209
+
210
+ responses = _base_api(
211
+ request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/stages/{stage_id}/items",
212
+ status_codes=200,
213
+ uses_pagination=True,
214
+ client="fabric_sp",
215
+ )
216
+
217
+ rows = []
218
+ for r in responses:
219
+ for v in r.get("value", []):
220
+ rows.append(
221
+ {
222
+ "Deployment Pipeline Stage Item Id": v.get("itemId"),
223
+ "Deployment Pipeline Stage Item Name": v.get("itemDisplayName"),
224
+ "Item Type": v.get("itemType"),
225
+ "Source Item Id": v.get("sourceItemId"),
226
+ "Target Item Id": v.get("targetItemId"),
227
+ "Last Deployment Time": v.get("lastDeploymentTime"),
228
+ }
229
+ )
230
+
231
+ if rows:
232
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
233
+
234
+ return df
235
+
236
+
237
+ @log
238
+ def list_deployment_pipeline_role_assignments(
239
+ deployment_pipeline: str | UUID,
240
+ ) -> pd.DataFrame:
241
+ """
242
+ Shows the role assignments for the specified deployment pipeline.
243
+
244
+ This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipeline Role Assignments <https://learn.microsoft.com/rest/api/fabric/core/deployment-pipelines/list-deployment-pipeline-role-assignments>`_.
245
+
246
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
247
+
248
+ Parameters
249
+ ----------
250
+ deployment_pipeline : str | uuid.UUID
251
+ The deployment pipeline name or ID.
252
+
253
+ Returns
254
+ -------
255
+ pandas.DataFrame
256
+ A pandas dataframe showing the role assignments for the specified deployment pipeline.
257
+ """
258
+
259
+ columns = {
260
+ "Role": "string",
261
+ "Principal Id": "string",
262
+ "Principal Type": "string",
263
+ }
264
+ df = _create_dataframe(columns=columns)
265
+
266
+ deployment_pipeline_id = resolve_deployment_pipeline_id(
267
+ deployment_pipeline=deployment_pipeline
268
+ )
269
+
270
+ responses = _base_api(
271
+ request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/roleAssignments",
272
+ uses_pagination=True,
273
+ client="fabric_sp",
274
+ )
275
+
276
+ rows = []
277
+ for r in responses:
278
+ for v in r.get("value", []):
279
+ principal = v.get("principal", {})
280
+ rows.append(
281
+ {
282
+ "Role": v.get("role"),
283
+ "Principal Id": principal.get("id"),
284
+ "Principal Type Name": principal.get("type"),
285
+ }
286
+ )
287
+
288
+ if rows:
289
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
290
+
291
+ return df
292
+
293
+
294
+ @log
295
+ def delete_deployment_pipeline(
296
+ deployment_pipeline: str | UUID,
297
+ ):
298
+ """
299
+ Deletes the specified deployment pipeline.
300
+
301
+ This is a wrapper function for the following API: `Deployment Pipelines - Delete Deployment Pipeline <https://learn.microsoft.com/rest/api/fabric/core/deployment-pipelines/delete-deployment-pipeline>`_.
302
+
303
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
304
+
305
+ Parameters
306
+ ----------
307
+ deployment_pipeline : str | uuid.UUID
308
+ The deployment pipeline name or ID.
309
+ """
310
+
311
+ deployment_pipeline_id = resolve_deployment_pipeline_id(
312
+ deployment_pipeline=deployment_pipeline
313
+ )
314
+
315
+ _base_api(
316
+ request=f"/v1/deploymentPipelines/{deployment_pipeline_id}",
317
+ method="delete",
318
+ client="fabric_sp",
319
+ )
320
+
321
+ print(
322
+ f"{icons.green_dot} The '{deployment_pipeline}' deployment pipeline has been deleted successfully."
323
+ )
324
+
325
+
326
+ @log
327
+ def list_deployment_pipeline_operations(
328
+ deployment_pipeline: str | UUID,
329
+ ) -> pd.DataFrame:
330
+ """
331
+ Shows the operations for the specified deployment pipeline.
332
+
333
+ This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipeline Operations <https://learn.microsoft.com/rest/api/fabric/core/deployment-pipelines/list-deployment-pipeline-operations>`_.
334
+
335
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
336
+
337
+ Parameters
338
+ ----------
339
+ deployment_pipeline : str | uuid.UUID
340
+ The deployment pipeline name or ID.
341
+
342
+ Returns
343
+ -------
344
+ pandas.DataFrame
345
+ A pandas dataframe showing the operations for the specified deployment pipeline.
346
+ """
347
+
348
+ columns = {
349
+ "Operation Id": "string",
350
+ "Type": "string",
351
+ "Status": "string",
352
+ "Last Updated Time": "string",
353
+ "Execution Start Time": "datetime_coerce",
354
+ "Execution End Time": "datetime_coerce",
355
+ "Source Stage Id": "string",
356
+ "Target Stage Id": "string",
357
+ "Note": "string",
358
+ "New Items Count": "int",
359
+ "Different Items Count": "int",
360
+ "No Difference Items Count": "int",
361
+ "Performed By Id": "string",
362
+ "Performed By Type": "string",
363
+ }
364
+ df = _create_dataframe(columns=columns)
365
+
366
+ deployment_pipeline_id = resolve_deployment_pipeline_id(
367
+ deployment_pipeline=deployment_pipeline
368
+ )
369
+
370
+ responses = _base_api(
371
+ request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/operations",
372
+ uses_pagination=True,
373
+ client="fabric_sp",
374
+ )
375
+
376
+ rows = []
377
+ for r in responses:
378
+ for v in r.get("value", []):
379
+ p = v.get("preDeploymentDiffInformation", {})
380
+ rows.append(
381
+ {
382
+ "Operation Id": v.get("id"),
383
+ "Type": v.get("type"),
384
+ "Status": v.get("status"),
385
+ "Last Updated Time": v.get("lastUpdatedTime"),
386
+ "Execution Start Time": v.get("executionStartTime"),
387
+ "Execution End Time": v.get("executionEndTime"),
388
+ "Source Stage Id": v.get("sourceStageId"),
389
+ "Target Stage Id": v.get("targetStageId"),
390
+ "Note": v.get("note", {}).get("content"),
391
+ "New Items Count": p.get("newItemsCount"),
392
+ "Different Items Count": p.get("differentItemsCount"),
393
+ "No Difference Items Count": p.get("noDifferenceItemsCount"),
394
+ "Performed By Id": v.get("performedBy", {}).get("id"),
395
+ "Performed By Type": v.get("performedBy", {}).get("type"),
396
+ }
397
+ )
398
+
399
+ if rows:
400
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
401
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
402
+
403
+ return df
404
+
405
+
406
+ @log
407
+ def unassign_workspace_from_stage(
408
+ deployment_pipeline: str | UUID,
409
+ stage: str | UUID,
410
+ ):
411
+ """
412
+ Unassigns the workspace from the specified stage of the specified deployment pipeline.
413
+
414
+ This is a wrapper function for the following API: `Deployment Pipelines - Unassign Workspace From Stage <https://learn.microsoft.com/rest/api/fabric/core/deployment-pipelines/unassign-workspace-from-stage>`_.
415
+
416
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
417
+
418
+ Parameters
419
+ ----------
420
+ deployment_pipeline : str | uuid.UUID
421
+ The deployment pipeline name or ID.
422
+ stage : str | uuid.UUID
423
+ The deployment pipeline stage name or ID.
424
+ """
425
+
426
+ deployment_pipeline_id = resolve_deployment_pipeline_id(
427
+ deployment_pipeline=deployment_pipeline
428
+ )
429
+
430
+ stage_id = resolve_stage_id(deployment_pipeline_id, stage)
431
+
432
+ _base_api(
433
+ request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/stages/{stage_id}/unassignWorkspace",
434
+ method="post",
435
+ client="fabric_sp",
436
+ )
437
+
438
+ print(
439
+ f"{icons.green_dot} The workspace has been unassigned from the '{stage}' stage of the '{deployment_pipeline}' deployment pipeline successfully."
440
+ )
441
+
442
+
443
+ @log
444
+ def assign_workspace_to_stage(
445
+ deployment_pipeline: str | UUID,
446
+ stage: str | UUID,
447
+ workspace: Optional[str | UUID] = None,
448
+ ):
449
+ """
450
+ Unassigns the workspace from the specified stage of the specified deployment pipeline.
451
+
452
+ This is a wrapper function for the following API: `Deployment Pipelines - Assign Workspace To Stage <https://learn.microsoft.com/rest/api/fabric/core/deployment-pipelines/assign-workspace-to-stage>`_.
453
+
454
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
455
+
456
+ Parameters
457
+ ----------
458
+ deployment_pipeline : str | uuid.UUID
459
+ The deployment pipeline name or ID.
460
+ stage : str | uuid.UUID
461
+ The deployment pipeline stage name or ID.
462
+ workspace : str | uuid.UUID, default=None
463
+ The Fabric workspace name or ID.
464
+ Defaults to None which resolves to the workspace of the attached lakehouse
465
+ or if no lakehouse attached, resolves to the workspace of the notebook.
466
+ """
467
+
468
+ deployment_pipeline_id = resolve_deployment_pipeline_id(
469
+ deployment_pipeline=deployment_pipeline
470
+ )
471
+
472
+ stage_id = resolve_stage_id(deployment_pipeline_id, stage)
473
+ workspace_id = resolve_workspace_id(workspace=workspace)
474
+
475
+ payload = {"workspaceId": workspace_id}
476
+
477
+ _base_api(
478
+ request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/stages/{stage_id}/assignWorkspace",
479
+ method="post",
480
+ client="fabric_sp",
481
+ payload=payload,
482
+ )
483
+
484
+ print(
485
+ f"{icons.green_dot} The workspace has been assigned to the '{stage}' stage of the '{deployment_pipeline}' deployment pipeline successfully."
486
+ )
@@ -1,8 +1,7 @@
1
- import sempy
2
- from ..tom import connect_semantic_model
3
- from .._refresh_semantic_model import refresh_semantic_model
4
- from ._dl_helper import get_direct_lake_source
5
- from .._helper_functions import (
1
+ from sempy_labs.tom import connect_semantic_model
2
+ from sempy_labs._refresh_semantic_model import refresh_semantic_model
3
+ from sempy_labs.directlake._dl_helper import get_direct_lake_source
4
+ from sempy_labs._helper_functions import (
6
5
  _convert_data_type,
7
6
  resolve_dataset_name_and_id,
8
7
  resolve_workspace_name_and_id,
@@ -12,6 +11,7 @@ from sempy._utils._log import log
12
11
  from typing import List, Optional, Union
13
12
  import sempy_labs._icons as icons
14
13
  from uuid import UUID
14
+ import json
15
15
 
16
16
 
17
17
  @log
@@ -76,6 +76,7 @@ def update_direct_lake_partition_entity(
76
76
  for p in t.Partitions
77
77
  if t.Name == tName
78
78
  )
79
+ current_slt = tom.model.Tables[tName].SourceLineageTag
79
80
 
80
81
  if part_name is None:
81
82
  raise ValueError(
@@ -85,14 +86,56 @@ def update_direct_lake_partition_entity(
85
86
  tom.model.Tables[tName].Partitions[part_name].Source.EntityName = eName
86
87
 
87
88
  # Update source lineage tag
88
- existing_schema = (
89
- tom.model.Tables[tName].Partitions[part_name].Source.SchemaName or "dbo"
90
- )
91
- if schema is None:
92
- schema = existing_schema
89
+ if schema:
90
+ # Only set schema for DL over SQL (not DL over OneLake)
91
+ expression_source_name = (
92
+ tom.model.Tables[tName]
93
+ .Partitions[part_name]
94
+ .Source.ExpressionSource.Name
95
+ )
96
+ expr = tom.model.Expressions[expression_source_name].Expression
97
+ if "Sql.Database" in expr:
98
+ tom.model.Tables[tName].Partitions[
99
+ part_name
100
+ ].Source.SchemaName = schema
101
+ tom.model.Tables[tName].SourceLineageTag = f"[{schema}].[{eName}]"
102
+ else:
103
+ tom.model.Tables[tName].SourceLineageTag = f"[dbo].[{eName}]"
104
+
105
+ new_slt = tom.model.Tables[tName].SourceLineageTag
106
+
107
+ # PBI_RemovedChildren logic
108
+ try:
109
+ e_name = (
110
+ tom.model.Tables[tName]
111
+ .Partitions[part_name]
112
+ .Source.ExpressionSource.Name
113
+ )
114
+ ann = tom.get_annotation_value(
115
+ object=tom.model.Expressions[e_name], name="PBI_RemovedChildren"
116
+ )
117
+ if ann:
118
+ e = json.loads(ann)
119
+ for i in e:
120
+ sltag = (
121
+ i.get("remoteItemId", {})
122
+ .get("analysisServicesObject", {})
123
+ .get("sourceLineageTag", {})
124
+ )
125
+ if sltag == current_slt:
126
+ i["remoteItemId"]["analysisServicesObject"][
127
+ "sourceLineageTag"
128
+ ] = new_slt
129
+ tom.set_annotation(
130
+ object=tom.model.Expressions[e_name],
131
+ name="PBI_RemovedChildren",
132
+ value=json.dumps(e),
133
+ )
134
+ except Exception as e:
135
+ print(
136
+ f"{icons.yellow_dot} Warning: Could not update PBI_RemovedChildren annotation for table '{tName}'. {str(e)}"
137
+ )
93
138
 
94
- tom.model.Tables[tName].Partitions[part_name].Source.SchemaName = schema
95
- tom.model.Tables[tName].SourceLineageTag = f"[{schema}].[{eName}]"
96
139
  print(
97
140
  f"{icons.green_dot} The '{tName}' table in the '{dataset_name}' semantic model within the '{workspace_name}' workspace has been updated to point to the '{eName}' table."
98
141
  )
@@ -105,6 +148,7 @@ def add_table_to_direct_lake_semantic_model(
105
148
  lakehouse_table_name: str,
106
149
  refresh: bool = True,
107
150
  workspace: Optional[str | UUID] = None,
151
+ columns: Optional[List[str] | str] = None,
108
152
  ):
109
153
  """
110
154
  Adds a table and all of its columns to a Direct Lake semantic model, based on a Fabric lakehouse table.
@@ -123,12 +167,12 @@ def add_table_to_direct_lake_semantic_model(
123
167
  The name or ID of the Fabric workspace in which the semantic model resides.
124
168
  Defaults to None which resolves to the workspace of the attached lakehouse
125
169
  or if no lakehouse attached, resolves to the workspace of the notebook.
170
+ columns : List[str] | str, default=None
171
+ A list of column names to add to the table. If None, all columns from the
172
+ lakehouse table will be added.
126
173
  """
127
174
 
128
- sempy.fabric._client._utils._init_analysis_services()
129
- import Microsoft.AnalysisServices.Tabular as TOM
130
175
  from sempy_labs.lakehouse._get_lakehouse_columns import get_lakehouse_columns
131
- from sempy_labs.lakehouse._get_lakehouse_tables import get_lakehouse_tables
132
176
 
133
177
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
134
178
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
@@ -147,6 +191,9 @@ def add_table_to_direct_lake_semantic_model(
147
191
  f"{icons.red_dot} This function only supports Direct Lake semantic models where the source lakehouse resides in the same workpace as the semantic model."
148
192
  )
149
193
 
194
+ if isinstance(columns, str):
195
+ columns = [columns]
196
+
150
197
  lakehouse_workspace = resolve_workspace_name(workspace_id=lakehouse_workspace_id)
151
198
 
152
199
  with connect_semantic_model(
@@ -160,41 +207,26 @@ def add_table_to_direct_lake_semantic_model(
160
207
  f"{icons.red_dot} This function is only valid for Direct Lake semantic models or semantic models with no tables."
161
208
  )
162
209
 
163
- if any(
164
- p.Name == lakehouse_table_name
165
- for p in tom.all_partitions()
166
- if p.SourceType == TOM.PartitionSourceType.Entity
167
- ):
168
- t_name = next(
169
- p.Parent.Name
170
- for p in tom.all_partitions()
171
- if p.Name
172
- == lakehouse_table_name & p.SourceType
173
- == TOM.PartitionSourceType.Entity
174
- )
175
- raise ValueError(
176
- f"The '{lakehouse_table_name}' table already exists in the '{dataset_name}' semantic model within the '{workspace_name}' workspace as the '{t_name}' table."
177
- )
178
-
179
210
  if any(t.Name == table_name for t in tom.model.Tables):
180
211
  raise ValueError(
181
212
  f"The '{table_name}' table already exists in the '{dataset_name}' semantic model within the '{workspace_name}' workspace."
182
213
  )
183
214
 
184
- dfL = get_lakehouse_tables(
215
+ dfLC = get_lakehouse_columns(
185
216
  lakehouse=lakehouse_name, workspace=lakehouse_workspace
186
217
  )
187
- dfL_filt = dfL[dfL["Table Name"] == lakehouse_table_name]
188
-
189
- if len(dfL_filt) == 0:
218
+ dfLC_filt = dfLC[dfLC["Table Name"] == lakehouse_table_name]
219
+ if dfLC_filt.empty:
190
220
  raise ValueError(
191
- f"The '{lakehouse_table_name}' table does not exist in the '{lakehouse_name}' lakehouse within the '{lakehouse_workspace}' workspace."
221
+ f"{icons.red_dot} The '{lakehouse_table_name}' table was not found in the '{lakehouse_name}' lakehouse within the '{lakehouse_workspace}' workspace."
192
222
  )
223
+ if columns:
224
+ dfLC_filt = dfLC_filt[dfLC_filt["Column Name"].isin(columns)]
193
225
 
194
- dfLC = get_lakehouse_columns(
195
- lakehouse=lakehouse_name, workspace=lakehouse_workspace
196
- )
197
- dfLC_filt = dfLC[dfLC["Table Name"] == lakehouse_table_name]
226
+ if dfLC_filt.empty:
227
+ raise ValueError(
228
+ f"{icons.red_dot} No matching columns were found in the '{lakehouse_table_name}' table in the '{lakehouse_name}' lakehouse within the '{lakehouse_workspace}' workspace."
229
+ )
198
230
 
199
231
  tom.add_table(name=table_name)
200
232
  print(
@@ -207,7 +239,7 @@ def add_table_to_direct_lake_semantic_model(
207
239
  f"{icons.green_dot} The '{lakehouse_table_name}' partition has been added to the '{table_name}' table in the '{dataset_name}' semantic model within the '{workspace_name}' workspace."
208
240
  )
209
241
 
210
- for i, r in dfLC_filt.iterrows():
242
+ for _, r in dfLC_filt.iterrows():
211
243
  lakeCName = r["Column Name"]
212
244
  dType = r["Data Type"]
213
245
  dt = _convert_data_type(dType)
@@ -9,6 +9,7 @@ from ._groups import (
9
9
  create_group,
10
10
  delete_group,
11
11
  update_group,
12
+ list_group_transitive_members,
12
13
  )
13
14
  from ._users import (
14
15
  resolve_user_id,
@@ -54,4 +55,5 @@ __all__ = [
54
55
  "resolve_sensitivity_label_id",
55
56
  "add_user_license",
56
57
  "remove_user_license",
58
+ "list_group_transitive_members",
57
59
  ]