semantic-link-labs 0.9.6__py3-none-any.whl → 0.9.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (35) hide show
  1. {semantic_link_labs-0.9.6.dist-info → semantic_link_labs-0.9.7.dist-info}/METADATA +7 -5
  2. {semantic_link_labs-0.9.6.dist-info → semantic_link_labs-0.9.7.dist-info}/RECORD +35 -32
  3. {semantic_link_labs-0.9.6.dist-info → semantic_link_labs-0.9.7.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +4 -0
  5. sempy_labs/_ai.py +3 -1
  6. sempy_labs/_capacities.py +0 -1
  7. sempy_labs/_dax_query_view.py +2 -0
  8. sempy_labs/_delta_analyzer_history.py +298 -0
  9. sempy_labs/_helper_functions.py +171 -15
  10. sempy_labs/_icons.py +6 -6
  11. sempy_labs/_list_functions.py +3 -1
  12. sempy_labs/_model_bpa_bulk.py +10 -11
  13. sempy_labs/_model_bpa_rules.py +1 -1
  14. sempy_labs/admin/_basic_functions.py +28 -2
  15. sempy_labs/admin/_reports.py +1 -1
  16. sempy_labs/admin/_scanner.py +0 -2
  17. sempy_labs/admin/_tenant.py +8 -3
  18. sempy_labs/directlake/_generate_shared_expression.py +9 -1
  19. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +82 -36
  20. sempy_labs/directlake/_update_directlake_partition_entity.py +3 -0
  21. sempy_labs/graph/_groups.py +6 -0
  22. sempy_labs/graph/_teams.py +2 -0
  23. sempy_labs/graph/_users.py +4 -0
  24. sempy_labs/lakehouse/__init__.py +12 -3
  25. sempy_labs/lakehouse/_blobs.py +231 -0
  26. sempy_labs/lakehouse/_shortcuts.py +22 -3
  27. sempy_labs/migration/_direct_lake_to_import.py +47 -10
  28. sempy_labs/report/__init__.py +4 -0
  29. sempy_labs/report/_report_functions.py +3 -3
  30. sempy_labs/report/_report_helper.py +17 -5
  31. sempy_labs/report/_reportwrapper.py +17 -8
  32. sempy_labs/report/_save_report.py +147 -0
  33. sempy_labs/tom/_model.py +154 -23
  34. {semantic_link_labs-0.9.6.dist-info → semantic_link_labs-0.9.7.dist-info/licenses}/LICENSE +0 -0
  35. {semantic_link_labs-0.9.6.dist-info → semantic_link_labs-0.9.7.dist-info}/top_level.txt +0 -0
@@ -1,11 +1,16 @@
1
1
  import sempy
2
2
  from uuid import UUID
3
3
  import sempy_labs._icons as icons
4
+ from typing import Optional
4
5
 
5
6
 
6
- def migrate_direct_lake_to_import(dataset: str | UUID, workspace: str | UUID):
7
+ def migrate_direct_lake_to_import(
8
+ dataset: str | UUID,
9
+ workspace: Optional[str | UUID] = None,
10
+ mode: str = "import",
11
+ ):
7
12
  """
8
- Migrates a semantic model from Direct Lake mode to import mode. After running this function, you must go to the semantic model settings and update the cloud connection. Not doing so will result in an inablity to refresh/use the semantic model.
13
+ Migrates a semantic model or specific table(s) from a Direct Lake mode to import or DirectQuery mode. After running this function, you must go to the semantic model settings and update the cloud connection. Not doing so will result in an inablity to refresh/use the semantic model.
9
14
 
10
15
  Parameters
11
16
  ----------
@@ -15,12 +20,29 @@ def migrate_direct_lake_to_import(dataset: str | UUID, workspace: str | UUID):
15
20
  The Fabric workspace name or ID.
16
21
  Defaults to None which resolves to the workspace of the attached lakehouse
17
22
  or if no lakehouse attached, resolves to the workspace of the notebook.
23
+ mode : str, default="import"
24
+ The mode to migrate to. Can be either "import" or "directquery".
18
25
  """
19
26
 
20
27
  sempy.fabric._client._utils._init_analysis_services()
21
28
  import Microsoft.AnalysisServices.Tabular as TOM
22
29
  from sempy_labs.tom import connect_semantic_model
23
30
 
31
+ modes = {
32
+ "import": "Import",
33
+ "directquery": "DirectQuery",
34
+ "dq": "DirectQuery",
35
+ }
36
+
37
+ # Resolve mode
38
+ mode = mode.lower()
39
+ actual_mode = modes.get(mode)
40
+ if actual_mode is None:
41
+ raise ValueError(f"Invalid mode '{mode}'. Must be one of {list(modes.keys())}.")
42
+
43
+ # if isinstance(tables, str):
44
+ # tables = [tables]
45
+
24
46
  with connect_semantic_model(
25
47
  dataset=dataset, workspace=workspace, readonly=False
26
48
  ) as tom:
@@ -31,7 +53,14 @@ def migrate_direct_lake_to_import(dataset: str | UUID, workspace: str | UUID):
31
53
  )
32
54
  return
33
55
 
34
- for t in tom.model.Tables:
56
+ # if tables is None:
57
+ table_list = [t for t in tom.model.Tables]
58
+ # else:
59
+ # table_list = [t for t in tom.model.Tables if t.Name in tables]
60
+ # if not table_list:
61
+ # raise ValueError(f"{icons.red_dot} No tables found to migrate.")
62
+
63
+ for t in table_list:
35
64
  table_name = t.Name
36
65
  if t.Partitions.Count == 1 and all(
37
66
  p.Mode == TOM.ModeType.DirectLake for p in t.Partitions
@@ -51,16 +80,24 @@ def migrate_direct_lake_to_import(dataset: str | UUID, workspace: str | UUID):
51
80
  table_name=table_name,
52
81
  partition_name=partition_name,
53
82
  expression=expression,
54
- mode="Import",
83
+ mode=actual_mode,
55
84
  )
56
85
  # Remove Direct Lake partition
57
86
  tom.remove_object(object=p)
87
+ # if tables is not None:
88
+ # print(
89
+ # f"{icons.green_dot} The '{table_name}' table has been migrated to '{actual_mode}' mode."
90
+ # )
58
91
 
59
92
  tom.model.Model.DefaultMode = TOM.ModeType.Import
93
+ # if tables is None:
94
+ print(
95
+ f"{icons.green_dot} All tables which were in Direct Lake mode have been migrated to '{actual_mode}' mode."
96
+ )
60
97
 
61
- # Check
62
- # for t in tom.model.Tables:
63
- # if t.Partitions.Count == 1 and all(p.Mode == TOM.ModeType.Import for p in t.Partitions) and t.CalculationGroup is None:
64
- # p = next(p for p in t.Partitions)
65
- # print(p.Name)
66
- # print(p.Source.Expression)
98
+ # Check
99
+ # for t in tom.model.Tables:
100
+ # if t.Partitions.Count == 1 and all(p.Mode == TOM.ModeType.Import for p in t.Partitions) and t.CalculationGroup is None:
101
+ # p = next(p for p in t.Partitions)
102
+ # print(p.Name)
103
+ # print(p.Source.Expression)
@@ -1,3 +1,6 @@
1
+ from sempy_labs.report._save_report import (
2
+ save_report_as_pbip,
3
+ )
1
4
  from sempy_labs.report._reportwrapper import (
2
5
  ReportWrapper,
3
6
  )
@@ -46,4 +49,5 @@ __all__ = [
46
49
  "run_report_bpa",
47
50
  "get_report_datasources",
48
51
  "download_report",
52
+ "save_report_as_pbip",
49
53
  ]
@@ -116,9 +116,9 @@ def report_dependency_tree(workspace: Optional[str | UUID] = None):
116
116
  dfR.rename(columns={"Name": "Report Name"}, inplace=True)
117
117
  dfR = dfR[["Report Name", "Dataset Name"]]
118
118
 
119
- report_icon = "\U0001F4F6"
120
- dataset_icon = "\U0001F9CA"
121
- workspace_icon = "\U0001F465"
119
+ report_icon = "\U0001f4f6"
120
+ dataset_icon = "\U0001f9ca"
121
+ workspace_icon = "\U0001f465"
122
122
 
123
123
  node_dict = {}
124
124
  rootNode = Node(workspace_name)
@@ -236,15 +236,27 @@ def find_entity_property_pairs(data, result=None, keys_path=None):
236
236
  keys_path = []
237
237
 
238
238
  if isinstance(data, dict):
239
+ expression = data.get("Expression", {})
240
+ source_ref = (
241
+ expression.get("SourceRef", {}) if isinstance(expression, dict) else {}
242
+ )
243
+
239
244
  if (
240
- "Entity" in data.get("Expression", {}).get("SourceRef", {})
245
+ isinstance(source_ref, dict)
246
+ and "Entity" in source_ref
241
247
  and "Property" in data
242
248
  ):
243
- entity = data.get("Expression", {}).get("SourceRef", {}).get("Entity", {})
244
- property_value = data.get("Property")
245
- object_type = keys_path[-1].replace("HierarchyLevel", "Hierarchy")
249
+ entity = source_ref.get("Entity", "")
250
+ property_value = data.get("Property", "")
251
+
252
+ object_type = (
253
+ keys_path[-1].replace("HierarchyLevel", "Hierarchy")
254
+ if keys_path
255
+ else "Unknown"
256
+ )
246
257
  result[property_value] = (entity, object_type)
247
- keys_path.pop()
258
+ if keys_path:
259
+ keys_path.pop()
248
260
 
249
261
  # Recursively search the rest of the dictionary
250
262
  for key, value in data.items():
@@ -966,17 +966,25 @@ class ReportWrapper:
966
966
  keys_path = []
967
967
 
968
968
  if isinstance(data, dict):
969
+ expression = data.get("Expression", {})
970
+ source_ref = (
971
+ expression.get("SourceRef", {})
972
+ if isinstance(expression, dict)
973
+ else {}
974
+ )
969
975
  if (
970
- "Entity" in data.get("Expression", {}).get("SourceRef", {})
976
+ isinstance(source_ref, dict)
977
+ and "Entity" in source_ref
971
978
  and "Property" in data
972
979
  ):
973
- entity = (
974
- data.get("Expression", {})
975
- .get("SourceRef", {})
976
- .get("Entity", {})
980
+ entity = source_ref.get("Entity", "")
981
+ property_value = data.get("Property", "")
982
+
983
+ object_type = (
984
+ keys_path[-1].replace("HierarchyLevel", "Hierarchy")
985
+ if keys_path
986
+ else "Unknown"
977
987
  )
978
- property_value = data.get("Property", {})
979
- object_type = keys_path[-1].replace("HierarchyLevel", "Hierarchy")
980
988
  is_agg = keys_path[-3] == "Aggregation"
981
989
  is_viz_calc = keys_path[-3] == "NativeVisualCalculation"
982
990
  is_sparkline = keys_path[-3] == "SparklineData"
@@ -987,7 +995,8 @@ class ReportWrapper:
987
995
  is_viz_calc,
988
996
  is_sparkline,
989
997
  )
990
- keys_path.pop()
998
+ if keys_path:
999
+ keys_path.pop()
991
1000
 
992
1001
  # Recursively search the rest of the dictionary
993
1002
  for key, value in data.items():
@@ -0,0 +1,147 @@
1
+ import os
2
+ import base64
3
+ import json
4
+ import sempy.fabric as fabric
5
+ import sempy_labs._icons as icons
6
+ from sempy_labs.report._generate_report import get_report_definition
7
+ from sempy_labs._generate_semantic_model import get_semantic_model_definition
8
+ from sempy_labs._helper_functions import (
9
+ _mount,
10
+ resolve_workspace_name_and_id,
11
+ resolve_item_name,
12
+ resolve_workspace_name,
13
+ resolve_item_name_and_id,
14
+ )
15
+ from uuid import UUID
16
+ from sempy._utils._log import log
17
+ from typing import Optional
18
+
19
+
20
+ @log
21
+ def save_report_as_pbip(
22
+ report: str | UUID,
23
+ workspace: Optional[str | UUID] = None,
24
+ thick_report: bool = True,
25
+ live_connect: bool = True,
26
+ lakehouse: Optional[str | UUID] = None,
27
+ lakehouse_workspace: Optional[str | UUID] = None,
28
+ ):
29
+ """
30
+ Saves a report as a .pbip file to the default lakehouse attached to the notebook.
31
+
32
+ Parameters
33
+ ----------
34
+ report : str | uuid.UUID
35
+ Name or ID of the Power BI report.
36
+ workspace : str | uuid.UUID, default=None
37
+ The name or ID of the Fabric workspace in which the report resides.
38
+ Defaults to None which resolves to the workspace of the attached lakehouse
39
+ or if no lakehouse attached, resolves to the workspace of the notebook.
40
+ thick_report : bool, default=True
41
+ If set to True, saves the report and underlying semantic model.
42
+ If set to False, saves just the report.
43
+ live_connect : bool, default=True
44
+ If set to True, saves a .pbip live-connected to the workspace in the Power BI / Fabric service.
45
+ If set to False, saves a .pbip with a local model, independent from the Power BI / Fabric service.
46
+ lakehouse : str | uuid.UUID, default=None
47
+ The Fabric lakehouse name or ID. This will be the lakehouse to which the report is saved.
48
+ Defaults to None which resolves to the lakehouse attached to the notebook.
49
+ lakehouse_workspace : str | uuid.UUID, default=None
50
+ The Fabric workspace name or ID used by the lakehouse.
51
+ Defaults to None which resolves to the workspace of the attached lakehouse
52
+ or if no lakehouse attached, resolves to the workspace of the notebook.
53
+ """
54
+
55
+ (report_workspace_name, report_workspace_id) = resolve_workspace_name_and_id(
56
+ workspace
57
+ )
58
+ (report_name, report_id) = resolve_item_name_and_id(
59
+ item=report, type="Report", workspace=workspace
60
+ )
61
+ indent = 2
62
+
63
+ local_path = _mount(lakehouse=lakehouse, workspace=lakehouse_workspace)
64
+ save_location = f"{local_path}/Files"
65
+
66
+ # Find semantic model info
67
+ dfR = fabric.list_reports(workspace=workspace)
68
+ dfR_filt = dfR[dfR["Id"] == report_id]
69
+ if dfR_filt.empty:
70
+ raise ValueError(
71
+ f"{icons.red_dot} The '{report} report does not exist within the '{report_workspace_name} workspace."
72
+ )
73
+
74
+ dataset_id = dfR_filt["Dataset Id"].iloc[0]
75
+ dataset_workspace_id = dfR_filt["Dataset Workspace Id"].iloc[0]
76
+ dataset_name = resolve_item_name(item_id=dataset_id, workspace=dataset_workspace_id)
77
+ dataset_workspace_name = resolve_workspace_name(dataset_workspace_id)
78
+ path_prefix = f"{save_location}/{report_workspace_name}/{report_name}/{report_name}"
79
+
80
+ # Local model not supported if the report and model are in different workspaces
81
+ if dataset_workspace_name != report_workspace_name and not live_connect:
82
+ live_connect = True
83
+ print(
84
+ f"{icons.warning} The '{report_name}' report from the '{report_workspace_name}' workspace is being saved as a live-connected report/model."
85
+ )
86
+
87
+ def add_files(name, type, object_workspace):
88
+
89
+ path_prefix_full = f"{path_prefix}.{type}"
90
+
91
+ if type == "Report":
92
+ dataframe = get_report_definition(report=name, workspace=workspace)
93
+ elif type == "SemanticModel":
94
+ dataframe = get_semantic_model_definition(
95
+ dataset=name, workspace=object_workspace
96
+ )
97
+ else:
98
+ raise NotImplementedError
99
+
100
+ # Create and save files based on dataset/report definition
101
+ for _, r in dataframe.iterrows():
102
+ path = r["path"]
103
+ file_content = base64.b64decode(r["payload"])
104
+ file_path = f"{path_prefix_full}/{path}"
105
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
106
+
107
+ # Update the definition.pbir file for local models
108
+ if not live_connect and type == "Report" and path == "definition.pbir":
109
+ file_content = {
110
+ "version": "1.0",
111
+ "datasetReference": {
112
+ "byPath": {"path": f"../{report_name}.SemanticModel"},
113
+ "byConnection": None,
114
+ },
115
+ }
116
+
117
+ with open(file_path, "w") as f:
118
+ json.dump(file_content, f, indent=indent)
119
+ else:
120
+ with open(file_path, "wb") as f:
121
+ f.write(file_content)
122
+
123
+ # Create and save .pbip file for report, converting the file extension
124
+ if type == "Report":
125
+ # Standard .pbip file content
126
+ pbip = {
127
+ "version": "1.0",
128
+ "artifacts": [{"report": {"path": f"{report_name}.Report"}}],
129
+ "settings": {"enableAutoRecovery": True},
130
+ }
131
+ # Ensure the directory exists
132
+ os.makedirs(os.path.dirname(path_prefix), exist_ok=True)
133
+ # Write the .pbip file directly
134
+ pbip_final = f"{path_prefix}.pbip"
135
+ with open(pbip_final, "w") as file:
136
+ json.dump(pbip, file, indent=indent)
137
+
138
+ add_files(name=report_name, type="Report", object_workspace=workspace)
139
+ if thick_report:
140
+ add_files(
141
+ name=dataset_name,
142
+ type="SemanticModel",
143
+ object_workspace=dataset_workspace_name,
144
+ )
145
+ print(
146
+ f"{icons.green_dot} The '{report_name}' report within the '{report_workspace_name}' workspace has been saved to this location: {save_location}."
147
+ )
sempy_labs/tom/_model.py CHANGED
@@ -11,6 +11,9 @@ from sempy_labs._helper_functions import (
11
11
  resolve_dataset_name_and_id,
12
12
  resolve_workspace_name_and_id,
13
13
  _base_api,
14
+ resolve_workspace_id,
15
+ resolve_item_id,
16
+ resolve_lakehouse_id,
14
17
  )
15
18
  from sempy_labs._list_functions import list_relationships
16
19
  from sempy_labs._refresh_semantic_model import refresh_semantic_model
@@ -84,7 +87,7 @@ class TOMWrapper:
84
87
  # No token provider (standard authentication)
85
88
  if self._token_provider is None:
86
89
  self._tom_server = fabric.create_tom_server(
87
- readonly=readonly, workspace=workspace_id
90
+ dataset=dataset, readonly=readonly, workspace=workspace_id
88
91
  )
89
92
  # Service Principal Authentication for Azure AS via token provider
90
93
  elif self._is_azure_as:
@@ -2257,7 +2260,7 @@ class TOMWrapper:
2257
2260
 
2258
2261
  if validate:
2259
2262
  dax_query = f"""
2260
- define measure '{table_name}'[test] =
2263
+ define measure '{table_name}'[test] =
2261
2264
  var mn = MIN('{table_name}'[{column_name}])
2262
2265
  var ma = MAX('{table_name}'[{column_name}])
2263
2266
  var x = COUNTROWS(DISTINCT('{table_name}'[{column_name}]))
@@ -3309,14 +3312,16 @@ class TOMWrapper:
3309
3312
  .tolist()
3310
3313
  )
3311
3314
  cols = (
3312
- fil[fil["Referenced Object Type"] == "Column"][
3315
+ fil[fil["Referenced Object Type"].isin(["Column", "Calc Column"])][
3313
3316
  "Referenced Full Object Name"
3314
3317
  ]
3315
3318
  .unique()
3316
3319
  .tolist()
3317
3320
  )
3318
3321
  tbls = (
3319
- fil[fil["Referenced Object Type"] == "Table"]["Referenced Table"]
3322
+ fil[fil["Referenced Object Type"].isin(["Table", "Calc Table"])][
3323
+ "Referenced Table"
3324
+ ]
3320
3325
  .unique()
3321
3326
  .tolist()
3322
3327
  )
@@ -3489,7 +3494,7 @@ class TOMWrapper:
3489
3494
  tableList.append(c.Parent.Name)
3490
3495
  if (
3491
3496
  re.search(
3492
- create_pattern(tableList, re.escape(obj.Name)),
3497
+ create_pattern(tableList, obj.Name),
3493
3498
  expr,
3494
3499
  )
3495
3500
  is not None
@@ -4857,6 +4862,18 @@ class TOMWrapper:
4857
4862
 
4858
4863
  result_df = pd.DataFrame(columns=["Table Name", "Object Name", "Object Type"])
4859
4864
 
4865
+ def add_to_result(table_name, object_name, object_type, dataframe):
4866
+
4867
+ new_data = {
4868
+ "Table Name": table_name,
4869
+ "Object Name": object_name,
4870
+ "Object Type": object_type,
4871
+ }
4872
+
4873
+ return pd.concat(
4874
+ [dataframe, pd.DataFrame(new_data, index=[0])], ignore_index=True
4875
+ )
4876
+
4860
4877
  for _, r in filt.iterrows():
4861
4878
  added = False
4862
4879
  obj_type = r["Referenced Object Type"]
@@ -4890,15 +4907,7 @@ class TOMWrapper:
4890
4907
  )
4891
4908
  added = True
4892
4909
  if added:
4893
- new_data = {
4894
- "Table Name": table_name,
4895
- "Object Name": object_name,
4896
- "Object Type": obj_type,
4897
- }
4898
-
4899
- result_df = pd.concat(
4900
- [result_df, pd.DataFrame(new_data, index=[0])], ignore_index=True
4901
- )
4910
+ result_df = add_to_result(table_name, object_name, obj_type, result_df)
4902
4911
 
4903
4912
  # Reduce model...
4904
4913
 
@@ -4921,11 +4930,21 @@ class TOMWrapper:
4921
4930
  self.add_to_perspective(
4922
4931
  object=r.FromColumn, perspective_name=perspective_name
4923
4932
  )
4933
+
4934
+ result_df = add_to_result(
4935
+ r.FromTable.Name, r.FromColumn.Name, "Column", result_df
4936
+ )
4924
4937
  if not self.in_perspective(r.ToColumn, perspective_name=perspective_name):
4938
+ table_name = r.ToTable.Name
4939
+ object_name = r.ToColumn.Name
4925
4940
  self.add_to_perspective(
4926
4941
  object=r.ToColumn, perspective_name=perspective_name
4927
4942
  )
4928
4943
 
4944
+ result_df = add_to_result(
4945
+ r.ToTable.Name, r.ToColumn.Name, "Column", result_df
4946
+ )
4947
+
4929
4948
  # Remove objects not in the perspective
4930
4949
  for t in self.model.Tables:
4931
4950
  if not self.in_perspective(object=t, perspective_name=perspective_name):
@@ -4943,6 +4962,111 @@ class TOMWrapper:
4943
4962
  # Return the objects added to the perspective based on dependencies
4944
4963
  return result_df.drop_duplicates()
4945
4964
 
4965
+ def convert_direct_lake_to_import(
4966
+ self,
4967
+ table_name: str,
4968
+ entity_name: Optional[str] = None,
4969
+ schema: Optional[str] = None,
4970
+ source: Optional[str | UUID] = None,
4971
+ source_type: str = "Lakehouse",
4972
+ source_workspace: Optional[str | UUID] = None,
4973
+ ):
4974
+ """
4975
+ Converts a Direct Lake table's partition to an import-mode partition.
4976
+
4977
+ The entity_name and schema parameters default to using the existing values in the Direct Lake partition. The source, source_type, and source_workspace
4978
+ parameters do not default to existing values. This is because it may not always be possible to reconcile the source and its workspace.
4979
+
4980
+ Parameters
4981
+ ----------
4982
+ table_name : str
4983
+ The table name.
4984
+ entity_name : str, default=None
4985
+ The entity name of the Direct Lake partition (the table name in the source).
4986
+ schema : str, default=None
4987
+ The schema of the source table. Defaults to None which resolves to the existing schema.
4988
+ source : str | uuid.UUID, default=None
4989
+ The source name or ID. This is the name or ID of the Lakehouse or Warehouse.
4990
+ source_type : str, default="Lakehouse"
4991
+ The source type (i.e. "Lakehouse" or "Warehouse").
4992
+ source_workspace: str | uuid.UUID, default=None
4993
+ The workspace name or ID of the source. This is the workspace in which the Lakehouse or Warehouse exists.
4994
+ Defaults to None which resolves to the workspace of the attached lakehouse
4995
+ or if no lakehouse attached, resolves to the workspace of the notebook.
4996
+ """
4997
+ import Microsoft.AnalysisServices.Tabular as TOM
4998
+
4999
+ p = next(p for p in self.model.Tables[table_name].Partitions)
5000
+ if p.Mode != TOM.ModeType.DirectLake:
5001
+ print(f"{icons.info} The '{table_name}' table is not in Direct Lake mode.")
5002
+ return
5003
+
5004
+ partition_name = p.Name
5005
+ partition_entity_name = entity_name or p.Source.EntityName
5006
+ partition_schema = schema or p.Source.SchemaName
5007
+
5008
+ # Update name of the Direct Lake partition (will be removed later)
5009
+ self.model.Tables[table_name].Partitions[
5010
+ partition_name
5011
+ ].Name = f"{partition_name}_remove"
5012
+
5013
+ source_workspace_id = resolve_workspace_id(workspace=source_workspace)
5014
+ if source_type == "Lakehouse":
5015
+ item_id = resolve_lakehouse_id(
5016
+ lakehouse=source, workspace=source_workspace_id
5017
+ )
5018
+ else:
5019
+ item_id = resolve_item_id(
5020
+ item=source, type=source_type, workspace=source_workspace_id
5021
+ )
5022
+
5023
+ def _generate_m_expression(
5024
+ workspace_id, artifact_id, artifact_type, table_name, schema_name
5025
+ ):
5026
+ """
5027
+ Generates the M expression for the import partition.
5028
+ """
5029
+
5030
+ if artifact_type == "Lakehouse":
5031
+ type_id = "lakehouseId"
5032
+ elif artifact_type == "Warehouse":
5033
+ type_id = "warehouseId"
5034
+ else:
5035
+ raise NotImplementedError
5036
+
5037
+ full_table_name = (
5038
+ f"{schema_name}.{table_name}" if schema_name else table_name
5039
+ )
5040
+
5041
+ return f"""let
5042
+ Source = {artifact_type}.Contents(null),
5043
+ #"Workspace" = Source{{[workspaceId="{workspace_id}"]}}[Data],
5044
+ #"Artifact" = #"Workspace"{{[{type_id}="{artifact_id}"]}}[Data],
5045
+ result = #"Artifact"{{[Id="{full_table_name}",ItemKind="Table"]}}[Data]
5046
+ in
5047
+ result
5048
+ """
5049
+
5050
+ m_expression = _generate_m_expression(
5051
+ source_workspace_id,
5052
+ item_id,
5053
+ source_type,
5054
+ partition_entity_name,
5055
+ partition_schema,
5056
+ )
5057
+
5058
+ # Add the import partition
5059
+ self.add_m_partition(
5060
+ table_name=table_name,
5061
+ partition_name=f"{partition_name}",
5062
+ expression=m_expression,
5063
+ mode="Import",
5064
+ )
5065
+ # Remove the Direct Lake partition
5066
+ self.remove_object(object=p)
5067
+
5068
+ print(f"{icons.green_dot} The '{table_name}' table has been converted to Import mode.")
5069
+
4946
5070
  def close(self):
4947
5071
 
4948
5072
  if not self._readonly and self.model is not None:
@@ -4956,18 +5080,25 @@ class TOMWrapper:
4956
5080
  p.SourceType == TOM.PartitionSourceType.Entity
4957
5081
  for p in t.Partitions
4958
5082
  ):
4959
- if t.LineageTag in list(self._table_map.keys()):
4960
- if self._table_map.get(t.LineageTag) != t.Name:
4961
- self.add_changed_property(object=t, property="Name")
5083
+ entity_name = next(p.Source.EntityName for p in t.Partitions)
5084
+ if t.Name != entity_name:
5085
+ self.add_changed_property(object=t, property="Name")
5086
+ # if t.LineageTag in list(self._table_map.keys()):
5087
+ # if self._table_map.get(t.LineageTag) != t.Name:
5088
+ # self.add_changed_property(object=t, property="Name")
4962
5089
 
4963
5090
  for c in self.all_columns():
5091
+ # if c.LineageTag in list(self._column_map.keys()):
5092
+ if any(
5093
+ p.SourceType == TOM.PartitionSourceType.Entity
5094
+ for p in c.Parent.Partitions
5095
+ ):
5096
+ if c.Name != c.SourceColumn:
5097
+ self.add_changed_property(object=c, property="Name")
5098
+ # c.SourceLineageTag = c.SourceColumn
5099
+ # if self._column_map.get(c.LineageTag)[0] != c.Name:
5100
+ # self.add_changed_property(object=c, property="Name")
4964
5101
  if c.LineageTag in list(self._column_map.keys()):
4965
- if any(
4966
- p.SourceType == TOM.PartitionSourceType.Entity
4967
- for p in c.Parent.Partitions
4968
- ):
4969
- if self._column_map.get(c.LineageTag)[0] != c.Name:
4970
- self.add_changed_property(object=c, property="Name")
4971
5102
  if self._column_map.get(c.LineageTag)[1] != c.DataType:
4972
5103
  self.add_changed_property(object=c, property="DataType")
4973
5104