semantic-link-labs 0.8.8__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

sempy_labs/_git.py CHANGED
@@ -277,43 +277,48 @@ def commit_to_git(
277
277
  workspace, workspace_id = resolve_workspace_name_and_id(workspace)
278
278
 
279
279
  gs = get_git_status(workspace=workspace)
280
- workspace_head = gs["Workspace Head"].iloc[0]
280
+ if not gs.empty:
281
+ workspace_head = gs["Workspace Head"].iloc[0]
281
282
 
282
- if item_ids is None:
283
- commit_mode = "All"
284
- else:
285
- commit_mode = "Selective"
283
+ if item_ids is None:
284
+ commit_mode = "All"
285
+ else:
286
+ commit_mode = "Selective"
286
287
 
287
- if isinstance(item_ids, str):
288
- item_ids = [item_ids]
288
+ if isinstance(item_ids, str):
289
+ item_ids = [item_ids]
289
290
 
290
- request_body = {
291
- "mode": commit_mode,
292
- "workspaceHead": workspace_head,
293
- "comment": comment,
294
- }
291
+ request_body = {
292
+ "mode": commit_mode,
293
+ "workspaceHead": workspace_head,
294
+ "comment": comment,
295
+ }
295
296
 
296
- if item_ids is not None:
297
- request_body["items"] = [{"objectId": item_id} for item_id in item_ids]
297
+ if item_ids is not None:
298
+ request_body["items"] = [{"objectId": item_id} for item_id in item_ids]
298
299
 
299
- client = fabric.FabricRestClient()
300
- response = client.post(
301
- f"/v1/workspaces/{workspace_id}/git/commitToGit",
302
- json=request_body,
303
- )
300
+ client = fabric.FabricRestClient()
301
+ response = client.post(
302
+ f"/v1/workspaces/{workspace_id}/git/commitToGit",
303
+ json=request_body,
304
+ )
304
305
 
305
- if response.status_code not in [200, 202]:
306
- raise FabricHTTPException(response)
306
+ if response.status_code not in [200, 202]:
307
+ raise FabricHTTPException(response)
307
308
 
308
- lro(client, response)
309
+ lro(client=client, response=response, return_status_code=True)
309
310
 
310
- if commit_mode == "All":
311
- print(
312
- f"{icons.green_dot} All items within the '{workspace}' workspace have been committed to Git."
313
- )
311
+ if commit_mode == "All":
312
+ print(
313
+ f"{icons.green_dot} All items within the '{workspace}' workspace have been committed to Git."
314
+ )
315
+ else:
316
+ print(
317
+ f"{icons.green_dot} The {item_ids} items within the '{workspace}' workspace have been committed to Git."
318
+ )
314
319
  else:
315
320
  print(
316
- f"{icons.green_dot} The {item_ids} items ithin the '{workspace}' workspace have been committed to Git."
321
+ f"{icons.info} Git already up to date: no modified items found within the '{workspace}' workspace."
317
322
  )
318
323
 
319
324
 
@@ -160,14 +160,34 @@ def resolve_report_name(report_id: UUID, workspace: Optional[str] = None) -> str
160
160
  return obj
161
161
 
162
162
 
163
- def resolve_dataset_id(dataset: str, workspace: Optional[str] = None) -> UUID:
163
+ def resolve_dataset_name_and_id(
164
+ dataset: str | UUID, workspace: Optional[str] = None
165
+ ) -> Tuple[str, UUID]:
166
+
167
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
168
+
169
+ if _is_valid_uuid(dataset):
170
+ dataset_id = dataset
171
+ dataset_name = fabric.resolve_item_name(
172
+ item_id=dataset_id, type="SemanticModel", workspace=workspace_id
173
+ )
174
+ else:
175
+ dataset_name = dataset
176
+ dataset_id = fabric.resolve_item_id(
177
+ item_name=dataset, type="SemanticModel", workspace=workspace_id
178
+ )
179
+
180
+ return dataset_name, dataset_id
181
+
182
+
183
+ def resolve_dataset_id(dataset: str | UUID, workspace: Optional[str] = None) -> UUID:
164
184
  """
165
185
  Obtains the ID of the semantic model.
166
186
 
167
187
  Parameters
168
188
  ----------
169
- dataset : str
170
- The name of the semantic model.
189
+ dataset : str | UUID
190
+ The name or ID of the semantic model.
171
191
  workspace : str, default=None
172
192
  The Fabric workspace name.
173
193
  Defaults to None which resolves to the workspace of the attached lakehouse
@@ -179,15 +199,14 @@ def resolve_dataset_id(dataset: str, workspace: Optional[str] = None) -> UUID:
179
199
  The ID of the semantic model.
180
200
  """
181
201
 
182
- if workspace is None:
183
- workspace_id = fabric.get_workspace_id()
184
- workspace = fabric.resolve_workspace_name(workspace_id)
185
-
186
- obj = fabric.resolve_item_id(
187
- item_name=dataset, type="SemanticModel", workspace=workspace
188
- )
202
+ if _is_valid_uuid(dataset):
203
+ dataset_id = dataset
204
+ else:
205
+ dataset_id = fabric.resolve_item_id(
206
+ item_name=dataset, type="SemanticModel", workspace=workspace
207
+ )
189
208
 
190
- return obj
209
+ return dataset_id
191
210
 
192
211
 
193
212
  def resolve_dataset_name(dataset_id: UUID, workspace: Optional[str] = None) -> str:
@@ -761,13 +780,19 @@ def get_capacity_id(workspace: Optional[str] = None) -> UUID:
761
780
  The capacity Id.
762
781
  """
763
782
 
764
- workspace = fabric.resolve_workspace_name(workspace)
765
- filter_condition = urllib.parse.quote(workspace)
766
- dfW = fabric.list_workspaces(filter=f"name eq '{filter_condition}'")
767
- if len(dfW) == 0:
768
- raise ValueError(f"{icons.red_dot} The '{workspace}' does not exist'.")
783
+ if workspace is None:
784
+ capacity_id = _get_x_id(name="trident.capacity.id")
785
+ else:
786
+
787
+ workspace = fabric.resolve_workspace_name(workspace)
788
+ filter_condition = urllib.parse.quote(workspace)
789
+ dfW = fabric.list_workspaces(filter=f"name eq '{filter_condition}'")
790
+ if len(dfW) == 0:
791
+ raise ValueError(f"{icons.red_dot} The '{workspace}' does not exist'.")
792
+
793
+ capacity_id = dfW["Capacity Id"].iloc[0]
769
794
 
770
- return dfW["Capacity Id"].iloc[0]
795
+ return capacity_id
771
796
 
772
797
 
773
798
  def get_capacity_name(workspace: Optional[str] = None) -> str:
@@ -1167,20 +1192,20 @@ def _make_list_unique(my_list):
1167
1192
 
1168
1193
  def _get_partition_map(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
1169
1194
 
1170
- if workspace is None:
1171
- workspace = fabric.resolve_workspace_name()
1195
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
1196
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
1172
1197
 
1173
1198
  partitions = fabric.evaluate_dax(
1174
- dataset=dataset,
1175
- workspace=workspace,
1199
+ dataset=dataset_id,
1200
+ workspace=workspace_id,
1176
1201
  dax_string="""
1177
1202
  select [ID] AS [PartitionID], [TableID], [Name] AS [PartitionName] from $system.tmschema_partitions
1178
1203
  """,
1179
1204
  )
1180
1205
 
1181
1206
  tables = fabric.evaluate_dax(
1182
- dataset=dataset,
1183
- workspace=workspace,
1207
+ dataset=dataset_id,
1208
+ workspace=workspace_id,
1184
1209
  dax_string="""
1185
1210
  select [ID] AS [TableID], [Name] AS [TableName] from $system.tmschema_tables
1186
1211
  """,
@@ -1352,3 +1377,15 @@ def _is_valid_uuid(
1352
1377
  return True
1353
1378
  except ValueError:
1354
1379
  return False
1380
+
1381
+
1382
+ def _get_fabric_context_setting(name: str):
1383
+
1384
+ from synapse.ml.internal_utils.session_utils import get_fabric_context
1385
+
1386
+ return get_fabric_context().get(name)
1387
+
1388
+
1389
+ def get_tenant_id():
1390
+
1391
+ _get_fabric_context_setting(name="trident.tenant.id")
@@ -7,23 +7,25 @@ from sempy_labs._helper_functions import (
7
7
  pagination,
8
8
  resolve_item_type,
9
9
  format_dax_object_name,
10
+ resolve_dataset_name_and_id,
10
11
  )
11
12
  import pandas as pd
12
13
  from typing import Optional
13
14
  import sempy_labs._icons as icons
14
15
  from sempy.fabric.exceptions import FabricHTTPException
16
+ from uuid import UUID
15
17
 
16
18
 
17
19
  def get_object_level_security(
18
- dataset: str, workspace: Optional[str] = None
20
+ dataset: str | UUID, workspace: Optional[str] = None
19
21
  ) -> pd.DataFrame:
20
22
  """
21
23
  Shows the object level security for the semantic model.
22
24
 
23
25
  Parameters
24
26
  ----------
25
- dataset : str
26
- Name of the semantic model.
27
+ dataset : str | UUID
28
+ Name or ID of the semantic model.
27
29
  workspace : str, default=None
28
30
  The Fabric workspace name.
29
31
  Defaults to None which resolves to the workspace of the attached lakehouse
@@ -37,12 +39,13 @@ def get_object_level_security(
37
39
 
38
40
  from sempy_labs.tom import connect_semantic_model
39
41
 
40
- workspace = fabric.resolve_workspace_name(workspace)
42
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
43
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
41
44
 
42
45
  df = pd.DataFrame(columns=["Role Name", "Object Type", "Table Name", "Object Name"])
43
46
 
44
47
  with connect_semantic_model(
45
- dataset=dataset, readonly=True, workspace=workspace
48
+ dataset=dataset_id, readonly=True, workspace=workspace_id
46
49
  ) as tom:
47
50
 
48
51
  for r in tom.model.Roles:
@@ -82,15 +85,15 @@ def get_object_level_security(
82
85
 
83
86
 
84
87
  def list_tables(
85
- dataset: str, workspace: Optional[str] = None, extended: bool = False
88
+ dataset: str | UUID, workspace: Optional[str] = None, extended: bool = False
86
89
  ) -> pd.DataFrame:
87
90
  """
88
91
  Shows a semantic model's tables and their properties.
89
92
 
90
93
  Parameters
91
94
  ----------
92
- dataset : str
93
- Name of the semantic model.
95
+ dataset : str | UUID
96
+ Name or ID of the semantic model.
94
97
  workspace : str, default=None
95
98
  The Fabric workspace name.
96
99
  Defaults to None which resolves to the workspace of the attached lakehouse
@@ -106,7 +109,8 @@ def list_tables(
106
109
 
107
110
  from sempy_labs.tom import connect_semantic_model
108
111
 
109
- workspace = fabric.resolve_workspace_name(workspace)
112
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
113
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
110
114
 
111
115
  df = pd.DataFrame(
112
116
  columns=[
@@ -121,20 +125,20 @@ def list_tables(
121
125
  )
122
126
 
123
127
  with connect_semantic_model(
124
- dataset=dataset, workspace=workspace, readonly=True
128
+ dataset=dataset_id, workspace=workspace_id, readonly=True
125
129
  ) as tom:
126
130
  if extended:
127
131
  dict_df = fabric.evaluate_dax(
128
- dataset=dataset,
129
- workspace=workspace,
132
+ dataset=dataset_id,
133
+ workspace=workspace_id,
130
134
  dax_string="""
131
135
  EVALUATE SELECTCOLUMNS(FILTER(INFO.STORAGETABLECOLUMNS(), [COLUMN_TYPE] = "BASIC_DATA"),[DIMENSION_NAME],[DICTIONARY_SIZE])
132
136
  """,
133
137
  )
134
138
  dict_sum = dict_df.groupby("[DIMENSION_NAME]")["[DICTIONARY_SIZE]"].sum()
135
139
  data = fabric.evaluate_dax(
136
- dataset=dataset,
137
- workspace=workspace,
140
+ dataset=dataset_id,
141
+ workspace=workspace_id,
138
142
  dax_string="""EVALUATE SELECTCOLUMNS(INFO.STORAGETABLECOLUMNSEGMENTS(),[TABLE_ID],[DIMENSION_NAME],[USED_SIZE])""",
139
143
  )
140
144
  data_sum = (
@@ -162,8 +166,8 @@ def list_tables(
162
166
  .sum()
163
167
  )
164
168
  rc = fabric.evaluate_dax(
165
- dataset=dataset,
166
- workspace=workspace,
169
+ dataset=dataset_id,
170
+ workspace=workspace_id,
167
171
  dax_string="""
168
172
  SELECT [DIMENSION_NAME],[ROWS_COUNT] FROM $SYSTEM.DISCOVER_STORAGE_TABLES
169
173
  WHERE RIGHT ( LEFT ( TABLE_ID, 2 ), 1 ) <> '$'
@@ -616,11 +620,7 @@ def list_dashboards(workspace: Optional[str] = None) -> pd.DataFrame:
616
620
  ]
617
621
  )
618
622
 
619
- if workspace == "None":
620
- workspace_id = fabric.get_workspace_id()
621
- workspace = fabric.resovle_workspace_name(workspace_id)
622
- else:
623
- workspace_id = fabric.resolve_workspace_id(workspace)
623
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
624
624
 
625
625
  client = fabric.PowerBIRestClient()
626
626
  response = client.get(f"/v1.0/myorg/groups/{workspace_id}/dashboards")
@@ -635,8 +635,8 @@ def list_dashboards(workspace: Optional[str] = None) -> pd.DataFrame:
635
635
  "Web URL": v.get("webUrl"),
636
636
  "Embed URL": v.get("embedUrl"),
637
637
  "Data Classification": v.get("dataClassification"),
638
- "Users": [v.get("users")],
639
- "Subscriptions": [v.get("subscriptions")],
638
+ "Users": v.get("users"),
639
+ "Subscriptions": v.get("subscriptions"),
640
640
  }
641
641
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
642
642
 
@@ -854,15 +854,15 @@ def update_item(
854
854
 
855
855
 
856
856
  def list_relationships(
857
- dataset: str, workspace: Optional[str] = None, extended: bool = False
857
+ dataset: str | UUID, workspace: Optional[str] = None, extended: bool = False
858
858
  ) -> pd.DataFrame:
859
859
  """
860
860
  Shows a semantic model's relationships and their properties.
861
861
 
862
862
  Parameters
863
863
  ----------
864
- dataset: str
865
- Name of the semantic model.
864
+ dataset: str | UUID
865
+ Name or UUID of the semantic model.
866
866
  workspace : str, default=None
867
867
  The Fabric workspace name.
868
868
  Defaults to None which resolves to the workspace of the attached lakehouse
@@ -876,17 +876,18 @@ def list_relationships(
876
876
  A pandas dataframe showing the object level security for the semantic model.
877
877
  """
878
878
 
879
- workspace = fabric.resolve_workspace_name(workspace)
879
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
880
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
880
881
 
881
- dfR = fabric.list_relationships(dataset=dataset, workspace=workspace)
882
+ dfR = fabric.list_relationships(dataset=dataset_id, workspace=workspace_id)
882
883
  dfR["From Object"] = format_dax_object_name(dfR["From Table"], dfR["From Column"])
883
884
  dfR["To Object"] = format_dax_object_name(dfR["To Table"], dfR["To Column"])
884
885
 
885
886
  if extended:
886
887
  # Used to map the Relationship IDs
887
888
  rel = fabric.evaluate_dax(
888
- dataset=dataset,
889
- workspace=workspace,
889
+ dataset=dataset_id,
890
+ workspace=workspace_id,
890
891
  dax_string="""
891
892
  SELECT
892
893
  [ID] AS [RelationshipID]
@@ -897,8 +898,8 @@ def list_relationships(
897
898
 
898
899
  # USED_SIZE shows the Relationship Size where TABLE_ID starts with R$
899
900
  cs = fabric.evaluate_dax(
900
- dataset=dataset,
901
- workspace=workspace,
901
+ dataset=dataset_id,
902
+ workspace=workspace_id,
902
903
  dax_string="""
903
904
  SELECT
904
905
  [TABLE_ID]
@@ -1574,3 +1575,148 @@ def list_semantic_model_object_report_usage(
1574
1575
  final_df.reset_index(drop=True, inplace=True)
1575
1576
 
1576
1577
  return final_df
1578
+
1579
+
1580
+ def list_server_properties(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
1581
+ """
1582
+ Lists the `properties <https://learn.microsoft.com/dotnet/api/microsoft.analysisservices.serverproperty?view=analysisservices-dotnet>`_ of the Analysis Services instance.
1583
+
1584
+ Parameters
1585
+ ----------
1586
+ workspace : str, default=None
1587
+ The Fabric workspace name.
1588
+ Defaults to None which resolves to the workspace of the attached lakehouse
1589
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1590
+
1591
+ Returns
1592
+ -------
1593
+ pandas.DataFrame
1594
+ A pandas dataframe showing a list of the server properties.
1595
+ """
1596
+
1597
+ tom_server = fabric.create_tom_server(readonly=True, workspace=workspace)
1598
+
1599
+ rows = [
1600
+ {
1601
+ "Name": sp.Name,
1602
+ "Value": sp.Value,
1603
+ "Default Value": sp.DefaultValue,
1604
+ "Is Read Only": sp.IsReadOnly,
1605
+ "Requires Restart": sp.RequiresRestart,
1606
+ "Units": sp.Units,
1607
+ "Category": sp.Category,
1608
+ }
1609
+ for sp in tom_server.ServerProperties
1610
+ ]
1611
+
1612
+ tom_server.Dispose()
1613
+ df = pd.DataFrame(rows)
1614
+
1615
+ bool_cols = ["Is Read Only", "Requires Restart"]
1616
+ df[bool_cols] = df[bool_cols].astype(bool)
1617
+
1618
+ return df
1619
+
1620
+
1621
+ def list_semantic_model_errors(
1622
+ dataset: str | UUID, workspace: Optional[str | UUID]
1623
+ ) -> pd.DataFrame:
1624
+ """
1625
+ Shows a list of a semantic model's errors and their error messages (if they exist).
1626
+
1627
+ Parameters
1628
+ ----------
1629
+ dataset : str | UUID
1630
+ Name or ID of the semantic model.
1631
+ workspace : str | UUID, default=None
1632
+ The Fabric workspace name or ID.
1633
+ Defaults to None which resolves to the workspace of the attached lakehouse
1634
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1635
+
1636
+ Returns
1637
+ -------
1638
+ pandas.DataFrame
1639
+ A pandas dataframe showing a list of the errors and error messages for a given semantic model.
1640
+ """
1641
+
1642
+ from sempy_labs.tom import connect_semantic_model
1643
+
1644
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
1645
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(
1646
+ dataset, workspace=workspace_id
1647
+ )
1648
+
1649
+ error_rows = []
1650
+
1651
+ with connect_semantic_model(
1652
+ dataset=dataset_id, workspace=workspace_id, readonly=True
1653
+ ) as tom:
1654
+ # Define mappings of TOM objects to object types and attributes
1655
+ error_checks = [
1656
+ ("Column", tom.all_columns, lambda o: o.ErrorMessage),
1657
+ ("Partition", tom.all_partitions, lambda o: o.ErrorMessage),
1658
+ (
1659
+ "Partition - Data Coverage Expression",
1660
+ tom.all_partitions,
1661
+ lambda o: (
1662
+ o.DataCoverageDefinition.ErrorMessage
1663
+ if o.DataCoverageDefinition
1664
+ else ""
1665
+ ),
1666
+ ),
1667
+ ("Row Level Security", tom.all_rls, lambda o: o.ErrorMessage),
1668
+ ("Calculation Item", tom.all_calculation_items, lambda o: o.ErrorMessage),
1669
+ ("Measure", tom.all_measures, lambda o: o.ErrorMessage),
1670
+ (
1671
+ "Measure - Detail Rows Expression",
1672
+ tom.all_measures,
1673
+ lambda o: (
1674
+ o.DetailRowsDefinition.ErrorMessage
1675
+ if o.DetailRowsDefinition
1676
+ else ""
1677
+ ),
1678
+ ),
1679
+ (
1680
+ "Measure - Format String Expression",
1681
+ tom.all_measures,
1682
+ lambda o: (
1683
+ o.FormatStringDefinition.ErrorMessage
1684
+ if o.FormatStringDefinition
1685
+ else ""
1686
+ ),
1687
+ ),
1688
+ (
1689
+ "Calculation Group - Multiple or Empty Selection Expression",
1690
+ tom.all_calculation_groups,
1691
+ lambda o: (
1692
+ o.CalculationGroup.MultipleOrEmptySelectionExpression.ErrorMessage
1693
+ if o.CalculationGroup.MultipleOrEmptySelectionExpression
1694
+ else ""
1695
+ ),
1696
+ ),
1697
+ (
1698
+ "Calculation Group - No Selection Expression",
1699
+ tom.all_calculation_groups,
1700
+ lambda o: (
1701
+ o.CalculationGroup.NoSelectionExpression.ErrorMessage
1702
+ if o.CalculationGroup.NoSelectionExpression
1703
+ else ""
1704
+ ),
1705
+ ),
1706
+ ]
1707
+
1708
+ # Iterate over all error checks
1709
+ for object_type, getter, error_extractor in error_checks:
1710
+ for obj in getter():
1711
+ error_message = error_extractor(obj)
1712
+ if error_message: # Only add rows if there's an error message
1713
+ error_rows.append(
1714
+ {
1715
+ "Object Type": object_type,
1716
+ "Table Name": obj.Parent.Name,
1717
+ "Object Name": obj.Name,
1718
+ "Error Message": error_message,
1719
+ }
1720
+ )
1721
+
1722
+ return pd.DataFrame(error_rows)
sempy_labs/_model_bpa.py CHANGED
@@ -10,9 +10,10 @@ from sempy_labs._helper_functions import (
10
10
  create_relationship_name,
11
11
  save_as_delta_table,
12
12
  resolve_workspace_capacity,
13
- resolve_dataset_id,
13
+ resolve_dataset_name_and_id,
14
14
  get_language_codes,
15
15
  _get_max_run_id,
16
+ resolve_workspace_name_and_id,
16
17
  )
17
18
  from sempy_labs.lakehouse import get_lakehouse_tables, lakehouse_attached
18
19
  from sempy_labs.tom import connect_semantic_model
@@ -23,11 +24,12 @@ import sempy_labs._icons as icons
23
24
  from pyspark.sql.functions import col, flatten
24
25
  from pyspark.sql.types import StructType, StructField, StringType
25
26
  import os
27
+ from uuid import UUID
26
28
 
27
29
 
28
30
  @log
29
31
  def run_model_bpa(
30
- dataset: str,
32
+ dataset: str | UUID,
31
33
  rules: Optional[pd.DataFrame] = None,
32
34
  workspace: Optional[str] = None,
33
35
  export: bool = False,
@@ -41,8 +43,8 @@ def run_model_bpa(
41
43
 
42
44
  Parameters
43
45
  ----------
44
- dataset : str
45
- Name of the semantic model.
46
+ dataset : str | UUID
47
+ Name or ID of the semantic model.
46
48
  rules : pandas.DataFrame, default=None
47
49
  A pandas dataframe containing rules to be evaluated.
48
50
  workspace : str, default=None
@@ -105,28 +107,27 @@ def run_model_bpa(
105
107
  if language is not None:
106
108
  language = map_language(language, language_list)
107
109
 
108
- workspace = fabric.resolve_workspace_name(workspace)
110
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
111
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(
112
+ dataset, workspace=workspace_id
113
+ )
109
114
 
110
115
  if language is not None and language not in language_list:
111
116
  print(
112
117
  f"{icons.yellow_dot} The '{language}' language code is not in our predefined language list. Please file an issue and let us know which language code you are using: https://github.com/microsoft/semantic-link-labs/issues/new?assignees=&labels=&projects=&template=bug_report.md&title=."
113
118
  )
114
119
 
115
- if extended:
116
- icons.sll_tags.append("ModelBPAExtended")
117
- with connect_semantic_model(
118
- dataset=dataset, workspace=workspace, readonly=False
119
- ) as tom:
120
- tom.set_vertipaq_annotations()
121
-
122
120
  with connect_semantic_model(
123
- dataset=dataset, workspace=workspace, readonly=True
121
+ dataset=dataset_id, workspace=workspace_id, readonly=True
124
122
  ) as tom:
125
123
 
124
+ if extended:
125
+ tom.set_vertipaq_annotations()
126
+
126
127
  # Do not run BPA for models with no tables
127
128
  if tom.model.Tables.Count == 0:
128
129
  print(
129
- f"{icons.warning} The '{dataset}' semantic model within the '{workspace}' workspace has no tables and therefore there are no valid BPA results."
130
+ f"{icons.warning} The '{dataset_name}' semantic model within the '{workspace_name}' workspace has no tables and therefore there are no valid BPA results."
130
131
  )
131
132
  finalDF = pd.DataFrame(
132
133
  columns=[
@@ -140,7 +141,9 @@ def run_model_bpa(
140
141
  ]
141
142
  )
142
143
  else:
143
- dep = get_model_calc_dependencies(dataset=dataset, workspace=workspace)
144
+ dep = get_model_calc_dependencies(
145
+ dataset=dataset_id, workspace=workspace_id
146
+ )
144
147
 
145
148
  def translate_using_po(rule_file):
146
149
  current_dir = os.path.dirname(os.path.abspath(__file__))
@@ -386,20 +389,19 @@ def run_model_bpa(
386
389
  runId = max_run_id + 1
387
390
 
388
391
  now = datetime.datetime.now()
389
- dfD = fabric.list_datasets(workspace=workspace, mode="rest")
390
- dfD_filt = dfD[dfD["Dataset Name"] == dataset]
392
+ dfD = fabric.list_datasets(workspace=workspace_id, mode="rest")
393
+ dfD_filt = dfD[dfD["Dataset Id"] == dataset_id]
391
394
  configured_by = dfD_filt["Configured By"].iloc[0]
392
- capacity_id, capacity_name = resolve_workspace_capacity(workspace=workspace)
395
+ capacity_id, capacity_name = resolve_workspace_capacity(workspace=workspace_id)
393
396
  dfExport["Capacity Name"] = capacity_name
394
397
  dfExport["Capacity Id"] = capacity_id
395
- dfExport["Workspace Name"] = workspace
396
- dfExport["Workspace Id"] = fabric.resolve_workspace_id(workspace)
397
- dfExport["Dataset Name"] = dataset
398
- dfExport["Dataset Id"] = resolve_dataset_id(dataset, workspace)
398
+ dfExport["Workspace Name"] = workspace_name
399
+ dfExport["Workspace Id"] = workspace_id
400
+ dfExport["Dataset Name"] = dataset_name
401
+ dfExport["Dataset Id"] = dataset_id
399
402
  dfExport["Configured By"] = configured_by
400
403
  dfExport["Timestamp"] = now
401
404
  dfExport["RunId"] = runId
402
- dfExport["Configured By"] = configured_by
403
405
  dfExport["RunId"] = dfExport["RunId"].astype("int")
404
406
 
405
407
  dfExport = dfExport[list(icons.bpa_schema.keys())]