semantic-link-labs 0.4.1__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (52) hide show
  1. {semantic_link_labs-0.4.1.dist-info → semantic_link_labs-0.4.2.dist-info}/METADATA +1 -1
  2. semantic_link_labs-0.4.2.dist-info/RECORD +53 -0
  3. sempy_labs/__init__.py +25 -25
  4. sempy_labs/_ai.py +28 -27
  5. sempy_labs/_clear_cache.py +2 -1
  6. sempy_labs/_dax.py +5 -9
  7. sempy_labs/_generate_semantic_model.py +7 -8
  8. sempy_labs/_helper_functions.py +17 -13
  9. sempy_labs/_icons.py +5 -0
  10. sempy_labs/_list_functions.py +273 -17
  11. sempy_labs/_model_auto_build.py +1 -1
  12. sempy_labs/_model_bpa.py +37 -37
  13. sempy_labs/_model_dependencies.py +11 -12
  14. sempy_labs/_one_lake_integration.py +15 -22
  15. sempy_labs/_query_scale_out.py +1 -1
  16. sempy_labs/_refresh_semantic_model.py +4 -4
  17. sempy_labs/_translations.py +5 -5
  18. sempy_labs/_vertipaq.py +11 -11
  19. sempy_labs/directlake/_directlake_schema_compare.py +11 -9
  20. sempy_labs/directlake/_directlake_schema_sync.py +36 -37
  21. sempy_labs/directlake/_fallback.py +3 -3
  22. sempy_labs/directlake/_get_directlake_lakehouse.py +3 -4
  23. sempy_labs/directlake/_get_shared_expression.py +3 -3
  24. sempy_labs/directlake/_guardrails.py +3 -3
  25. sempy_labs/directlake/_list_directlake_model_calc_tables.py +28 -25
  26. sempy_labs/directlake/_show_unsupported_directlake_objects.py +4 -4
  27. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +10 -11
  28. sempy_labs/directlake/_update_directlake_partition_entity.py +25 -9
  29. sempy_labs/directlake/_warm_cache.py +5 -7
  30. sempy_labs/lakehouse/__init__.py +0 -2
  31. sempy_labs/lakehouse/_get_lakehouse_columns.py +3 -2
  32. sempy_labs/lakehouse/_get_lakehouse_tables.py +10 -7
  33. sempy_labs/lakehouse/_lakehouse.py +6 -5
  34. sempy_labs/lakehouse/_shortcuts.py +8 -106
  35. sempy_labs/migration/__init__.py +4 -2
  36. sempy_labs/migration/_create_pqt_file.py +2 -2
  37. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +7 -7
  38. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +4 -4
  39. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +5 -6
  40. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +6 -6
  41. sempy_labs/migration/_migration_validation.py +1 -164
  42. sempy_labs/migration/_refresh_calc_tables.py +3 -5
  43. sempy_labs/report/__init__.py +2 -2
  44. sempy_labs/report/_generate_report.py +14 -15
  45. sempy_labs/report/_report_functions.py +11 -10
  46. sempy_labs/report/_report_rebind.py +6 -7
  47. sempy_labs/tom/__init__.py +6 -0
  48. sempy_labs/{_tom.py → tom/_model.py} +166 -187
  49. semantic_link_labs-0.4.1.dist-info/RECORD +0 -52
  50. {semantic_link_labs-0.4.1.dist-info → semantic_link_labs-0.4.2.dist-info}/LICENSE +0 -0
  51. {semantic_link_labs-0.4.1.dist-info → semantic_link_labs-0.4.2.dist-info}/WHEEL +0 -0
  52. {semantic_link_labs-0.4.1.dist-info → semantic_link_labs-0.4.2.dist-info}/top_level.txt +0 -0
@@ -1,11 +1,15 @@
1
+ import sempy
1
2
  import sempy.fabric as fabric
2
- from sempy_labs._helper_functions import resolve_workspace_name_and_id
3
+ from sempy_labs._helper_functions import (
4
+ resolve_workspace_name_and_id,
5
+ resolve_lakehouse_name,
6
+ create_relationship_name,
7
+ resolve_lakehouse_id)
3
8
  import pandas as pd
4
9
  import json, time
5
10
  from pyspark.sql import SparkSession
6
11
  from typing import Optional
7
12
 
8
-
9
13
  def get_object_level_security(dataset: str, workspace: Optional[str] = None):
10
14
  """
11
15
  Shows the object level security for the semantic model.
@@ -25,7 +29,7 @@ def get_object_level_security(dataset: str, workspace: Optional[str] = None):
25
29
  A pandas dataframe showing the object level security for the semantic model.
26
30
  """
27
31
 
28
- if workspace == None:
32
+ if workspace is None:
29
33
  workspace_id = fabric.get_workspace_id()
30
34
  workspace = fabric.resolve_workspace_name(workspace_id)
31
35
 
@@ -84,7 +88,7 @@ def list_tables(dataset: str, workspace: Optional[str] = None):
84
88
  A pandas dataframe showing the semantic model's tables and their properties.
85
89
  """
86
90
 
87
- if workspace == None:
91
+ if workspace is None:
88
92
  workspace_id = fabric.get_workspace_id()
89
93
  workspace = fabric.resolve_workspace_name(workspace_id)
90
94
 
@@ -150,7 +154,7 @@ def list_annotations(dataset: str, workspace: Optional[str] = None):
150
154
  A pandas dataframe showing the semantic model's annotations and their properties.
151
155
  """
152
156
 
153
- if workspace == None:
157
+ if workspace is None:
154
158
  workspace_id = fabric.get_workspace_id()
155
159
  workspace = fabric.resolve_workspace_name(workspace_id)
156
160
 
@@ -380,7 +384,7 @@ def list_columns(
380
384
  get_direct_lake_lakehouse,
381
385
  )
382
386
 
383
- if workspace == None:
387
+ if workspace is None:
384
388
  workspace_id = fabric.get_workspace_id()
385
389
  workspace = fabric.resolve_workspace_name(workspace_id)
386
390
 
@@ -1028,10 +1032,10 @@ def create_warehouse(
1028
1032
 
1029
1033
  (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1030
1034
 
1031
- if description == None:
1032
- request_body = {"displayName": warehouse}
1033
- else:
1034
- request_body = {"displayName": warehouse, "description": description}
1035
+ request_body = {"displayName": warehouse}
1036
+
1037
+ if description:
1038
+ request_body["description"] = description
1035
1039
 
1036
1040
  client = fabric.FabricRestClient()
1037
1041
  response = client.post(
@@ -1119,10 +1123,9 @@ def update_item(
1119
1123
 
1120
1124
  itemId = dfI_filt["Id"].iloc[0]
1121
1125
 
1122
- if description == None:
1123
- request_body = {"displayName": new_name}
1124
- else:
1125
- request_body = {"displayName": new_name, "description": description}
1126
+ request_body = {"displayName": new_name}
1127
+ if description:
1128
+ request_body["description"] = description
1126
1129
 
1127
1130
  client = fabric.FabricRestClient()
1128
1131
  response = client.patch(
@@ -1130,7 +1133,7 @@ def update_item(
1130
1133
  )
1131
1134
 
1132
1135
  if response.status_code == 200:
1133
- if description == None:
1136
+ if description is None:
1134
1137
  print(
1135
1138
  f"The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}'"
1136
1139
  )
@@ -1167,7 +1170,7 @@ def list_relationships(
1167
1170
  A pandas dataframe showing the object level security for the semantic model.
1168
1171
  """
1169
1172
 
1170
- if workspace == None:
1173
+ if workspace is None:
1171
1174
  workspace_id = fabric.get_workspace_id()
1172
1175
  workspace = fabric.resolve_workspace_name(workspace_id)
1173
1176
 
@@ -1286,7 +1289,7 @@ def list_kpis(dataset: str, workspace: Optional[str] = None):
1286
1289
  A pandas dataframe showing the KPIs for the semantic model.
1287
1290
  """
1288
1291
 
1289
- from ._tom import connect_semantic_model
1292
+ from .tom import connect_semantic_model
1290
1293
 
1291
1294
  with connect_semantic_model(
1292
1295
  dataset=dataset, workspace=workspace, readonly=True
@@ -1370,3 +1373,256 @@ def list_workspace_role_assignments(workspace: Optional[str] = None):
1370
1373
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1371
1374
 
1372
1375
  return df
1376
+
1377
+ def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None):
1378
+ """
1379
+ Shows a list of semantic model objects.
1380
+
1381
+ Parameters
1382
+ ----------
1383
+ dataset : str
1384
+ Name of the semantic model.
1385
+ workspace : str, default=None
1386
+ The Fabric workspace name.
1387
+ Defaults to None which resolves to the workspace of the attached lakehouse
1388
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1389
+
1390
+
1391
+ Returns
1392
+ -------
1393
+ pandas.DataFrame
1394
+ A pandas dataframe showing a list of objects in the semantic model
1395
+ """
1396
+ from .tom import connect_semantic_model
1397
+
1398
+ df = pd.DataFrame(columns=["Parent Name", "Object Name", "Object Type"])
1399
+ with connect_semantic_model(
1400
+ dataset=dataset, workspace=workspace, readonly=True
1401
+ ) as tom:
1402
+ for t in tom.model.Tables:
1403
+ if t.CalculationGroup is not None:
1404
+ new_data = {
1405
+ "Parent Name": t.Parent.Name,
1406
+ "Object Name": t.Name,
1407
+ "Object Type": "Calculation Group",
1408
+ }
1409
+ df = pd.concat(
1410
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1411
+ )
1412
+ for ci in t.CalculationGroup.CalculationItems:
1413
+ new_data = {
1414
+ "Parent Name": t.Name,
1415
+ "Object Name": ci.Name,
1416
+ "Object Type": str(ci.ObjectType),
1417
+ }
1418
+ df = pd.concat(
1419
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1420
+ )
1421
+ elif any(str(p.SourceType) == "Calculated" for p in t.Partitions):
1422
+ new_data = {
1423
+ "Parent Name": t.Parent.Name,
1424
+ "Object Name": t.Name,
1425
+ "Object Type": "Calculated Table",
1426
+ }
1427
+ df = pd.concat(
1428
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1429
+ )
1430
+ else:
1431
+ new_data = {
1432
+ "Parent Name": t.Parent.Name,
1433
+ "Object Name": t.Name,
1434
+ "Object Type": str(t.ObjectType),
1435
+ }
1436
+ df = pd.concat(
1437
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1438
+ )
1439
+ for c in t.Columns:
1440
+ if str(c.Type) != "RowNumber":
1441
+ if str(c.Type) == "Calculated":
1442
+ new_data = {
1443
+ "Parent Name": c.Parent.Name,
1444
+ "Object Name": c.Name,
1445
+ "Object Type": "Calculated Column",
1446
+ }
1447
+ df = pd.concat(
1448
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1449
+ )
1450
+ else:
1451
+ new_data = {
1452
+ "Parent Name": c.Parent.Name,
1453
+ "Object Name": c.Name,
1454
+ "Object Type": str(c.ObjectType),
1455
+ }
1456
+ df = pd.concat(
1457
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1458
+ )
1459
+ for m in t.Measures:
1460
+ new_data = {
1461
+ "Parent Name": m.Parent.Name,
1462
+ "Object Name": m.Name,
1463
+ "Object Type": str(m.ObjectType),
1464
+ }
1465
+ df = pd.concat(
1466
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1467
+ )
1468
+ for h in t.Hierarchies:
1469
+ new_data = {
1470
+ "Parent Name": h.Parent.Name,
1471
+ "Object Name": h.Name,
1472
+ "Object Type": str(h.ObjectType),
1473
+ }
1474
+ df = pd.concat(
1475
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1476
+ )
1477
+ for l in h.Levels:
1478
+ new_data = {
1479
+ "Parent Name": l.Parent.Name,
1480
+ "Object Name": l.Name,
1481
+ "Object Type": str(l.ObjectType),
1482
+ }
1483
+ df = pd.concat(
1484
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1485
+ )
1486
+ for p in t.Partitions:
1487
+ new_data = {
1488
+ "Parent Name": p.Parent.Name,
1489
+ "Object Name": p.Name,
1490
+ "Object Type": str(p.ObjectType),
1491
+ }
1492
+ df = pd.concat(
1493
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1494
+ )
1495
+ for r in tom.model.Relationships:
1496
+ rName = create_relationship_name(
1497
+ r.FromTable.Name, r.FromColumn.Name, r.ToTable.Name, r.ToColumn.Name
1498
+ )
1499
+ new_data = {
1500
+ "Parent Name": r.Parent.Name,
1501
+ "Object Name": rName,
1502
+ "Object Type": str(r.ObjectType),
1503
+ }
1504
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1505
+ for role in tom.model.Roles:
1506
+ new_data = {
1507
+ "Parent Name": role.Parent.Name,
1508
+ "Object Name": role.Name,
1509
+ "Object Type": str(role.ObjectType),
1510
+ }
1511
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1512
+ for rls in role.TablePermissions:
1513
+ new_data = {
1514
+ "Parent Name": role.Name,
1515
+ "Object Name": rls.Name,
1516
+ "Object Type": str(rls.ObjectType),
1517
+ }
1518
+ df = pd.concat(
1519
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1520
+ )
1521
+ for tr in tom.model.Cultures:
1522
+ new_data = {
1523
+ "Parent Name": tr.Parent.Name,
1524
+ "Object Name": tr.Name,
1525
+ "Object Type": str(tr.ObjectType),
1526
+ }
1527
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1528
+ for per in tom.model.Perspectives:
1529
+ new_data = {
1530
+ "Parent Name": per.Parent.Name,
1531
+ "Object Name": per.Name,
1532
+ "Object Type": str(per.ObjectType),
1533
+ }
1534
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1535
+
1536
+ return df
1537
+
1538
+ def list_shortcuts(
1539
+ lakehouse: Optional[str] = None, workspace: Optional[str] = None
1540
+ ) -> pd.DataFrame:
1541
+ """
1542
+ Shows all shortcuts which exist in a Fabric lakehouse.
1543
+
1544
+ Parameters
1545
+ ----------
1546
+ lakehouse : str, default=None
1547
+ The Fabric lakehouse name.
1548
+ Defaults to None which resolves to the lakehouse attached to the notebook.
1549
+ workspace : str, default=None
1550
+ The name of the Fabric workspace in which lakehouse resides.
1551
+ Defaults to None which resolves to the workspace of the attached lakehouse
1552
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1553
+
1554
+ Returns
1555
+ -------
1556
+ pandas.DataFrame
1557
+ A pandas dataframe showing all the shortcuts which exist in the specified lakehouse.
1558
+ """
1559
+
1560
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1561
+
1562
+ if lakehouse is None:
1563
+ lakehouse_id = fabric.get_lakehouse_id()
1564
+ lakehouse = resolve_lakehouse_name(lakehouse_id, workspace)
1565
+ else:
1566
+ lakehouse_id = resolve_lakehouse_id(lakehouse, workspace)
1567
+
1568
+ df = pd.DataFrame(
1569
+ columns=[
1570
+ "Shortcut Name",
1571
+ "Shortcut Path",
1572
+ "Source",
1573
+ "Source Lakehouse Name",
1574
+ "Source Workspace Name",
1575
+ "Source Path",
1576
+ "Source Connection ID",
1577
+ "Source Location",
1578
+ "Source SubPath",
1579
+ ]
1580
+ )
1581
+
1582
+ client = fabric.FabricRestClient()
1583
+ response = client.get(
1584
+ f"/v1/workspaces/{workspace_id}/items/{lakehouse_id}/shortcuts"
1585
+ )
1586
+ if response.status_code == 200:
1587
+ for s in response.json()["value"]:
1588
+ shortcutName = s["name"]
1589
+ shortcutPath = s["path"]
1590
+ source = list(s["target"].keys())[0]
1591
+ (
1592
+ sourceLakehouseName,
1593
+ sourceWorkspaceName,
1594
+ sourcePath,
1595
+ connectionId,
1596
+ location,
1597
+ subpath,
1598
+ ) = (None, None, None, None, None, None)
1599
+ if source == "oneLake":
1600
+ sourceLakehouseId = s["target"][source]["itemId"]
1601
+ sourcePath = s["target"][source]["path"]
1602
+ sourceWorkspaceId = s["target"][source]["workspaceId"]
1603
+ sourceWorkspaceName = fabric.resolve_workspace_name(sourceWorkspaceId)
1604
+ sourceLakehouseName = resolve_lakehouse_name(
1605
+ sourceLakehouseId, sourceWorkspaceName
1606
+ )
1607
+ else:
1608
+ connectionId = s["target"][source]["connectionId"]
1609
+ location = s["target"][source]["location"]
1610
+ subpath = s["target"][source]["subpath"]
1611
+
1612
+ new_data = {
1613
+ "Shortcut Name": shortcutName,
1614
+ "Shortcut Path": shortcutPath,
1615
+ "Source": source,
1616
+ "Source Lakehouse Name": sourceLakehouseName,
1617
+ "Source Workspace Name": sourceWorkspaceName,
1618
+ "Source Path": sourcePath,
1619
+ "Source Connection ID": connectionId,
1620
+ "Source Location": location,
1621
+ "Source SubPath": subpath,
1622
+ }
1623
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1624
+
1625
+ print(
1626
+ f"This function relies on an API which is not yet official as of May 21, 2024. Once the API becomes official this function will work as expected."
1627
+ )
1628
+ return df
@@ -1,7 +1,7 @@
1
1
  import sempy
2
2
  import sempy.fabric as fabric
3
3
  import pandas as pd
4
- from sempy_labs._tom import connect_semantic_model
4
+ from sempy_labs.tom import connect_semantic_model
5
5
  from sempy_labs._generate_semantic_model import create_blank_semantic_model
6
6
  from sempy_labs.directlake._get_shared_expression import get_shared_expression
7
7
  from typing import List, Optional, Union
sempy_labs/_model_bpa.py CHANGED
@@ -70,8 +70,8 @@ def model_bpa_rules():
70
70
  "Table",
71
71
  "Warning",
72
72
  "Avoid using many-to-many relationships on tables used for dynamic row level security",
73
- lambda df: (df["Used in M2M Relationship"] == True)
74
- & (df["Used in Dynamic RLS"] == True),
73
+ lambda df: (df["Used in M2M Relationship"] is True)
74
+ & (df["Used in Dynamic RLS"] is True),
75
75
  "Using many-to-many relationships on tables which use dynamic row level security can cause serious query performance degradation. This pattern's performance problems compound when snowflaking multiple many-to-many relationships against a table which contains row level security. Instead, use one of the patterns shown in the article below where a single dimension table relates many-to-one to a security table.",
76
76
  "https://www.elegantbi.com/post/dynamicrlspatterns",
77
77
  ),
@@ -88,12 +88,12 @@ def model_bpa_rules():
88
88
  "Column",
89
89
  "Warning",
90
90
  "Set IsAvailableInMdx to false on non-attribute columns",
91
- lambda df: (df["Is Direct Lake"] == False)
92
- & (df["Is Available in MDX"] == True)
93
- & ((df["Hidden"] == True) | (df["Parent Is Hidden"] == True))
94
- & (df["Used in Sort By"] == False)
95
- & (df["Used in Hierarchy"] == False)
96
- & (df["Sort By Column"] == None),
91
+ lambda df: (df["Is Direct Lake"] is False)
92
+ & (df["Is Available in MDX"] is True)
93
+ & ((df["Hidden"] is True) | (df["Parent Is Hidden"] is True))
94
+ & (df["Used in Sort By"] is False)
95
+ & (df["Used in Hierarchy"] is False)
96
+ & (df["Sort By Column"] is None),
97
97
  "To speed up processing time and conserve memory after processing, attribute hierarchies should not be built for columns that are never used for slicing by MDX clients. In other words, all hidden columns that are not used as a Sort By Column or referenced in user hierarchies should have their IsAvailableInMdx property set to false. The IsAvailableInMdx property is not relevant for Direct Lake models.",
98
98
  "https://blog.crossjoin.co.uk/2018/07/02/isavailableinmdx-ssas-tabular",
99
99
  ),
@@ -219,7 +219,7 @@ def model_bpa_rules():
219
219
  "Table",
220
220
  "Warning",
221
221
  "Large tables should be partitioned",
222
- lambda df: (df["Is Direct Lake"] == False)
222
+ lambda df: (df["Is Direct Lake"] is False)
223
223
  & (df["Partition Count"] == 1)
224
224
  & (df["Row Count"] > 25000000),
225
225
  "Large tables should be partitioned in order to optimize processing. This is not relevant for semantic models in Direct Lake mode as they can only have one partition per table.",
@@ -306,11 +306,11 @@ def model_bpa_rules():
306
306
  "Column",
307
307
  "Warning",
308
308
  "Set IsAvailableInMdx to true on necessary columns",
309
- lambda df: (df["Is Direct Lake"] == False)
310
- & (df["Is Available in MDX"] == False)
309
+ lambda df: (df["Is Direct Lake"] is False)
310
+ & (df["Is Available in MDX"] is False)
311
311
  & (
312
- (df["Used in Sort By"] == True)
313
- | (df["Used in Hierarchy"] == True)
312
+ (df["Used in Sort By"] is True)
313
+ | (df["Used in Hierarchy"] is True)
314
314
  | (df["Sort By Column"] != None)
315
315
  ),
316
316
  "In order to avoid errors, ensure that attribute hierarchies are enabled if a column is used for sorting another column, used in a hierarchy, used in variations, or is sorted by another column. The IsAvailableInMdx property is not relevant for Direct Lake models.",
@@ -320,8 +320,8 @@ def model_bpa_rules():
320
320
  "Table",
321
321
  "Error",
322
322
  "Avoid the USERELATIONSHIP function and RLS against the same table",
323
- lambda df: (df["USERELATIONSHIP Used"] == True)
324
- & (df["Used in RLS"] == True),
323
+ lambda df: (df["USERELATIONSHIP Used"] is True)
324
+ & (df["Used in RLS"] is True),
325
325
  "The USERELATIONSHIP function may not be used against a table which also leverages row-level security (RLS). This will generate an error when using the particular measure in a visual. This rule will highlight the table which is used in a measure's USERELATIONSHIP function as well as RLS.",
326
326
  "https://blog.crossjoin.co.uk/2013/05/10/userelationship-and-tabular-row-security",
327
327
  ),
@@ -494,7 +494,7 @@ def model_bpa_rules():
494
494
  "Table",
495
495
  "Warning",
496
496
  "Ensure tables have relationships",
497
- lambda df: (df["Used in Relationship"] == False)
497
+ lambda df: (df["Used in Relationship"] is False)
498
498
  & (df["Type"] != "Calculation Group"),
499
499
  "This rule highlights tables which are not connected to any other table in the model with a relationship.",
500
500
  ),
@@ -511,7 +511,7 @@ def model_bpa_rules():
511
511
  "Column",
512
512
  "Info",
513
513
  "Visible objects with no description",
514
- lambda df: (df["Hidden"] == False) & (df["Description"].str.len() == 0),
514
+ lambda df: (df["Hidden"] is False) & (df["Description"].str.len() == 0),
515
515
  "Calculation groups have no function unless they have calculation items.",
516
516
  ),
517
517
  (
@@ -595,7 +595,7 @@ def model_bpa_rules():
595
595
  "Column",
596
596
  "Info",
597
597
  "Hide foreign keys",
598
- lambda df: (df["Foreign Key"]) & (df["Hidden"] == False),
598
+ lambda df: (df["Foreign Key"]) & (df["Hidden"] is False),
599
599
  "Foreign keys should always be hidden.",
600
600
  ),
601
601
  (
@@ -603,7 +603,7 @@ def model_bpa_rules():
603
603
  "Column",
604
604
  "Info",
605
605
  "Mark primary keys",
606
- lambda df: (df["Primary Key"]) & (df["Key"] == False),
606
+ lambda df: (df["Primary Key"]) & (df["Key"] is False),
607
607
  "Set the 'Key' property to 'True' for primary key columns within the column properties.",
608
608
  ),
609
609
  (
@@ -744,7 +744,7 @@ def run_model_bpa(
744
744
  message="This pattern is interpreted as a regular expression, and has match groups.",
745
745
  )
746
746
 
747
- if workspace == None:
747
+ if workspace is None:
748
748
  workspace_id = fabric.get_workspace_id()
749
749
  workspace = fabric.resolve_workspace_name(workspace_id)
750
750
 
@@ -798,13 +798,13 @@ def run_model_bpa(
798
798
  cols = ["From Cardinality", "To Cardinality"]
799
799
 
800
800
  for col in cols:
801
- if not col in dfR:
801
+ if col not in dfR:
802
802
  dfR[col] = None
803
803
 
804
804
  cols = ["Parent Is Hidden"]
805
805
 
806
806
  for col in cols:
807
- if not col in dfM:
807
+ if col not in dfM:
808
808
  dfM[col] = None
809
809
 
810
810
  # Data Coverage Definition rule
@@ -842,9 +842,9 @@ def run_model_bpa(
842
842
  dataset=dataset,
843
843
  workspace=workspace,
844
844
  dax_string="""
845
- SELECT [FUNCTION_NAME]
845
+ SELECT [FUNCTION_NAME]
846
846
  FROM $SYSTEM.MDSCHEMA_FUNCTIONS
847
- WHERE [INTERFACE_NAME] = 'DATETIME'
847
+ WHERE [INTERFACE_NAME] = 'DATETIME'
848
848
  """,
849
849
  )
850
850
 
@@ -951,7 +951,7 @@ def run_model_bpa(
951
951
  dfD["Has Date Table"] = any(
952
952
  (r["Parent Data Category"] == "Time")
953
953
  & (r["Data Type"] == "DateTime")
954
- & (r["Key"] == True)
954
+ & (r["Key"] is True)
955
955
  for i, r in dfC.iterrows()
956
956
  )
957
957
  # dfC['In Date Table'] = dfC['Table Name'].isin(dfT.loc[dfT['Data Category'] == "Time", 'Name'])
@@ -1008,7 +1008,7 @@ def run_model_bpa(
1008
1008
  dfM["Referenced By"].fillna(0, inplace=True)
1009
1009
  dfM["Referenced By"] = dfM["Referenced By"].fillna(0).astype(int)
1010
1010
 
1011
- pattern = "[^\( ][a-zA-Z0-9_()-]+\[[^\[]+\]|'[^']+'\[[^\[]+\]|\[[^\[]+\]"
1011
+ pattern = r"[^\( ][a-zA-Z0-9_()-]+\[[^\[]+\]|'[^']+'\[[^\[]+\]|\[[^\[]+\]"
1012
1012
 
1013
1013
  dfM["Has Fully Qualified Measure Reference"] = False
1014
1014
  dfM["Has Unqualified Column Reference"] = False
@@ -1033,7 +1033,7 @@ def run_model_bpa(
1033
1033
  dfM.at[i, "Has Fully Qualified Measure Reference"] = True
1034
1034
 
1035
1035
  dfR["Inactive without USERELATIONSHIP"] = False
1036
- for i, r in dfR[dfR["Active"] == False].iterrows():
1036
+ for i, r in dfR[dfR["Active"] is False].iterrows():
1037
1037
  fromTable = r["From Table"]
1038
1038
  fromColumn = r["From Column"]
1039
1039
  toTable = r["To Table"]
@@ -1041,15 +1041,15 @@ def run_model_bpa(
1041
1041
 
1042
1042
  dfM_filt = dfM[
1043
1043
  dfM["Measure Expression"].str.contains(
1044
- "(?i)USERELATIONSHIP\s*\(\s*'*"
1045
- + fromTable
1046
- + "'*\["
1047
- + fromColumn
1048
- + "\]\s*,\s*'*"
1049
- + toTable
1050
- + "'*\["
1051
- + toColumn
1052
- + "\]",
1044
+ r"(?i)USERELATIONSHIP\s*\(\s*'*"
1045
+ + re.escape(fromTable)
1046
+ + r"'*\["
1047
+ + re.escape(fromColumn)
1048
+ + r"\]\s*,\s*'*"
1049
+ + re.escape(toTable)
1050
+ + r"'*\["
1051
+ + re.escape(toColumn)
1052
+ + r"\]",
1053
1053
  regex=True,
1054
1054
  )
1055
1055
  ]
@@ -1183,7 +1183,7 @@ def run_model_bpa(
1183
1183
 
1184
1184
  if export:
1185
1185
  lakeAttach = lakehouse_attached()
1186
- if lakeAttach == False:
1186
+ if lakeAttach is False:
1187
1187
  print(
1188
1188
  f"In order to save the Best Practice Analyzer results, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook."
1189
1189
  )
@@ -1,12 +1,11 @@
1
- import sempy
2
1
  import sempy.fabric as fabric
3
2
  import pandas as pd
4
3
  from sempy_labs._helper_functions import format_dax_object_name
5
- from typing import List, Optional, Union
4
+ from typing import Any, Dict, Optional
6
5
  from anytree import Node, RenderTree
7
6
  from sempy._utils._log import log
8
7
 
9
-
8
+ @log
10
9
  def get_measure_dependencies(dataset: str, workspace: Optional[str] = None):
11
10
  """
12
11
  Shows all dependencies for all measures in a semantic model.
@@ -26,7 +25,7 @@ def get_measure_dependencies(dataset: str, workspace: Optional[str] = None):
26
25
  Shows all dependencies for all measures in the semantic model.
27
26
  """
28
27
 
29
- if workspace == None:
28
+ if workspace is None:
30
29
  workspace_id = fabric.get_workspace_id()
31
30
  workspace = fabric.resolve_workspace_name(workspace_id)
32
31
 
@@ -64,11 +63,11 @@ def get_measure_dependencies(dataset: str, workspace: Optional[str] = None):
64
63
  axis=1,
65
64
  )
66
65
 
67
- while any(df["Done"] == False):
66
+ while any(df["Done"] is False):
68
67
  for i, r in df.iterrows():
69
68
  rObjFull = r["Referenced Full Object Name"]
70
69
  rObj = r["Referenced Object"]
71
- if r["Done"] == False:
70
+ if r["Done"] is False:
72
71
  dep_filt = dep[dep["Full Object Name"] == rObjFull]
73
72
 
74
73
  for index, dependency in dep_filt.iterrows():
@@ -131,7 +130,7 @@ def get_measure_dependencies(dataset: str, workspace: Optional[str] = None):
131
130
 
132
131
  return df
133
132
 
134
-
133
+ @log
135
134
  def get_model_calc_dependencies(dataset: str, workspace: Optional[str] = None):
136
135
  """
137
136
  Shows all dependencies for all objects in a semantic model.
@@ -151,7 +150,7 @@ def get_model_calc_dependencies(dataset: str, workspace: Optional[str] = None):
151
150
  Shows all dependencies for all objects in the semantic model.
152
151
  """
153
152
 
154
- if workspace == None:
153
+ if workspace is None:
155
154
  workspace_id = fabric.get_workspace_id()
156
155
  workspace = fabric.resolve_workspace_name(workspace_id)
157
156
 
@@ -192,11 +191,11 @@ def get_model_calc_dependencies(dataset: str, workspace: Optional[str] = None):
192
191
  lambda row: False if row["Referenced Object Type"] in objs else True, axis=1
193
192
  )
194
193
 
195
- while any(df["Done"] == False):
194
+ while any(df["Done"] is False):
196
195
  for i, r in df.iterrows():
197
196
  rObjFull = r["Referenced Full Object Name"]
198
197
  rObj = r["Referenced Object"]
199
- if r["Done"] == False:
198
+ if r["Done"] is False:
200
199
  dep_filt = dep[dep["Full Object Name"] == rObjFull]
201
200
 
202
201
  for index, dependency in dep_filt.iterrows():
@@ -283,7 +282,7 @@ def measure_dependency_tree(
283
282
 
284
283
  """
285
284
 
286
- if workspace == None:
285
+ if workspace is None:
287
286
  workspace_id = fabric.get_workspace_id()
288
287
  workspace = fabric.resolve_workspace_name(workspace_id)
289
288
 
@@ -300,7 +299,7 @@ def measure_dependency_tree(
300
299
  df_filt = md[md["Object Name"] == measure_name]
301
300
 
302
301
  # Create a dictionary to hold references to nodes
303
- node_dict = {}
302
+ node_dict: Dict[str, Any] = {}
304
303
  measureIcon = "\u2211"
305
304
  tableIcon = "\u229E"
306
305
  columnIcon = "\u229F"