semantic-link-labs 0.8.10__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (81) hide show
  1. {semantic_link_labs-0.8.10.dist-info → semantic_link_labs-0.9.0.dist-info}/METADATA +6 -5
  2. {semantic_link_labs-0.8.10.dist-info → semantic_link_labs-0.9.0.dist-info}/RECORD +81 -80
  3. {semantic_link_labs-0.8.10.dist-info → semantic_link_labs-0.9.0.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +34 -3
  5. sempy_labs/_authentication.py +80 -4
  6. sempy_labs/_capacities.py +770 -200
  7. sempy_labs/_capacity_migration.py +7 -37
  8. sempy_labs/_clear_cache.py +37 -35
  9. sempy_labs/_connections.py +13 -13
  10. sempy_labs/_data_pipelines.py +20 -20
  11. sempy_labs/_dataflows.py +27 -28
  12. sempy_labs/_dax.py +41 -47
  13. sempy_labs/_deployment_pipelines.py +1 -1
  14. sempy_labs/_environments.py +26 -23
  15. sempy_labs/_eventhouses.py +16 -15
  16. sempy_labs/_eventstreams.py +16 -15
  17. sempy_labs/_external_data_shares.py +18 -20
  18. sempy_labs/_gateways.py +16 -14
  19. sempy_labs/_generate_semantic_model.py +107 -62
  20. sempy_labs/_git.py +105 -43
  21. sempy_labs/_helper_functions.py +251 -194
  22. sempy_labs/_job_scheduler.py +227 -0
  23. sempy_labs/_kql_databases.py +16 -15
  24. sempy_labs/_kql_querysets.py +16 -15
  25. sempy_labs/_list_functions.py +150 -126
  26. sempy_labs/_managed_private_endpoints.py +19 -17
  27. sempy_labs/_mirrored_databases.py +51 -48
  28. sempy_labs/_mirrored_warehouses.py +5 -4
  29. sempy_labs/_ml_experiments.py +16 -15
  30. sempy_labs/_ml_models.py +15 -14
  31. sempy_labs/_model_bpa.py +210 -207
  32. sempy_labs/_model_bpa_bulk.py +2 -2
  33. sempy_labs/_model_bpa_rules.py +3 -3
  34. sempy_labs/_model_dependencies.py +55 -29
  35. sempy_labs/_notebooks.py +29 -25
  36. sempy_labs/_one_lake_integration.py +23 -26
  37. sempy_labs/_query_scale_out.py +75 -64
  38. sempy_labs/_refresh_semantic_model.py +25 -26
  39. sempy_labs/_spark.py +33 -32
  40. sempy_labs/_sql.py +19 -12
  41. sempy_labs/_translations.py +10 -7
  42. sempy_labs/_vertipaq.py +38 -33
  43. sempy_labs/_warehouses.py +26 -25
  44. sempy_labs/_workspace_identity.py +11 -10
  45. sempy_labs/_workspaces.py +40 -33
  46. sempy_labs/admin/_basic_functions.py +166 -115
  47. sempy_labs/admin/_domains.py +7 -2
  48. sempy_labs/admin/_external_data_share.py +3 -3
  49. sempy_labs/admin/_git.py +4 -1
  50. sempy_labs/admin/_items.py +11 -6
  51. sempy_labs/admin/_scanner.py +10 -5
  52. sempy_labs/directlake/_directlake_schema_compare.py +25 -16
  53. sempy_labs/directlake/_directlake_schema_sync.py +24 -12
  54. sempy_labs/directlake/_dl_helper.py +74 -55
  55. sempy_labs/directlake/_generate_shared_expression.py +10 -9
  56. sempy_labs/directlake/_get_directlake_lakehouse.py +32 -36
  57. sempy_labs/directlake/_get_shared_expression.py +4 -3
  58. sempy_labs/directlake/_guardrails.py +12 -6
  59. sempy_labs/directlake/_list_directlake_model_calc_tables.py +15 -9
  60. sempy_labs/directlake/_show_unsupported_directlake_objects.py +16 -10
  61. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +35 -31
  62. sempy_labs/directlake/_update_directlake_partition_entity.py +39 -31
  63. sempy_labs/directlake/_warm_cache.py +87 -65
  64. sempy_labs/lakehouse/_get_lakehouse_columns.py +23 -26
  65. sempy_labs/lakehouse/_get_lakehouse_tables.py +27 -38
  66. sempy_labs/lakehouse/_lakehouse.py +7 -20
  67. sempy_labs/lakehouse/_shortcuts.py +42 -23
  68. sempy_labs/migration/_create_pqt_file.py +16 -11
  69. sempy_labs/migration/_refresh_calc_tables.py +16 -10
  70. sempy_labs/report/_download_report.py +9 -8
  71. sempy_labs/report/_generate_report.py +85 -44
  72. sempy_labs/report/_paginated.py +9 -9
  73. sempy_labs/report/_report_bpa.py +15 -11
  74. sempy_labs/report/_report_functions.py +80 -91
  75. sempy_labs/report/_report_helper.py +8 -4
  76. sempy_labs/report/_report_list_functions.py +24 -13
  77. sempy_labs/report/_report_rebind.py +17 -16
  78. sempy_labs/report/_reportwrapper.py +41 -33
  79. sempy_labs/tom/_model.py +139 -21
  80. {semantic_link_labs-0.8.10.dist-info → semantic_link_labs-0.9.0.dist-info}/LICENSE +0 -0
  81. {semantic_link_labs-0.8.10.dist-info → semantic_link_labs-0.9.0.dist-info}/top_level.txt +0 -0
sempy_labs/_model_bpa.py CHANGED
@@ -12,7 +12,7 @@ from sempy_labs._helper_functions import (
12
12
  resolve_workspace_capacity,
13
13
  resolve_dataset_name_and_id,
14
14
  get_language_codes,
15
- _get_max_run_id,
15
+ _get_column_aggregate,
16
16
  resolve_workspace_name_and_id,
17
17
  )
18
18
  from sempy_labs.lakehouse import get_lakehouse_tables, lakehouse_attached
@@ -36,6 +36,7 @@ def run_model_bpa(
36
36
  return_dataframe: bool = False,
37
37
  extended: bool = False,
38
38
  language: Optional[str] = None,
39
+ check_dependencies: bool = True,
39
40
  **kwargs,
40
41
  ):
41
42
  """
@@ -43,12 +44,12 @@ def run_model_bpa(
43
44
 
44
45
  Parameters
45
46
  ----------
46
- dataset : str | UUID
47
+ dataset : str | uuid.UUID
47
48
  Name or ID of the semantic model.
48
49
  rules : pandas.DataFrame, default=None
49
50
  A pandas dataframe containing rules to be evaluated.
50
- workspace : str, default=None
51
- The Fabric workspace name.
51
+ workspace : str | uuid.UUID, default=None
52
+ The Fabric workspace name or ID.
52
53
  Defaults to None which resolves to the workspace of the attached lakehouse
53
54
  or if no lakehouse attached, resolves to the workspace of the notebook.
54
55
  export : bool, default=False
@@ -60,6 +61,8 @@ def run_model_bpa(
60
61
  language : str, default=None
61
62
  Specifying a language name or code (i.e. 'it-IT' for Italian) will auto-translate the Category, Rule Name and Description into the specified language.
62
63
  Defaults to None which resolves to English.
64
+ check_dependencies : bool, default=True
65
+ If True, leverages the model dependencies from get_model_calc_dependencies to evaluate the rules. Set this parameter to False if running the rules against a semantic model in a shared capacity.
63
66
 
64
67
  Returns
65
68
  -------
@@ -129,236 +132,236 @@ def run_model_bpa(
129
132
  print(
130
133
  f"{icons.warning} The '{dataset_name}' semantic model within the '{workspace_name}' workspace has no tables and therefore there are no valid BPA results."
131
134
  )
132
- finalDF = pd.DataFrame(
135
+ return
136
+
137
+ if check_dependencies:
138
+ dep = get_model_calc_dependencies(
139
+ dataset=dataset_id, workspace=workspace_id
140
+ )
141
+ else:
142
+ dep = pd.DataFrame(
133
143
  columns=[
134
- "Category",
135
- "Rule Name",
136
- "Severity",
137
- "Object Type",
144
+ "Table Name",
138
145
  "Object Name",
139
- "Description",
140
- "URL",
146
+ "Object Type",
147
+ "Expression",
148
+ "Referenced Table",
149
+ "Referenced Object",
150
+ "Referenced Object Type",
151
+ "Full Object Name",
152
+ "Referenced Full Object Name",
153
+ "Parent Node",
141
154
  ]
142
155
  )
143
- else:
144
- dep = get_model_calc_dependencies(
145
- dataset=dataset_id, workspace=workspace_id
156
+
157
+ def translate_using_po(rule_file):
158
+ current_dir = os.path.dirname(os.path.abspath(__file__))
159
+ translation_file = (
160
+ f"{current_dir}/_bpa_translation/_model/_translations_{language}.po"
146
161
  )
162
+ for c in ["Category", "Description", "Rule Name"]:
163
+ po = polib.pofile(translation_file)
164
+ for entry in po:
165
+ if entry.tcomment == c.lower().replace(" ", "_"):
166
+ rule_file.loc[rule_file["Rule Name"] == entry.msgid, c] = (
167
+ entry.msgstr
168
+ )
147
169
 
148
- def translate_using_po(rule_file):
149
- current_dir = os.path.dirname(os.path.abspath(__file__))
150
- translation_file = (
151
- f"{current_dir}/_bpa_translation/_model/_translations_{language}.po"
152
- )
153
- for c in ["Category", "Description", "Rule Name"]:
154
- po = polib.pofile(translation_file)
155
- for entry in po:
156
- if entry.tcomment == c.lower().replace(" ", "_"):
157
- rule_file.loc[rule_file["Rule Name"] == entry.msgid, c] = (
158
- entry.msgstr
159
- )
170
+ translated = False
160
171
 
161
- translated = False
172
+ # Translations
173
+ if language is not None and rules is None and language in language_list:
174
+ rules = model_bpa_rules(dependencies=dep)
175
+ translate_using_po(rules)
176
+ translated = True
177
+ if rules is None:
178
+ rules = model_bpa_rules(dependencies=dep)
179
+ if language is not None and not translated:
162
180
 
163
- # Translations
164
- if language is not None and rules is None and language in language_list:
165
- rules = model_bpa_rules(dependencies=dep)
166
- translate_using_po(rules)
167
- translated = True
168
- if rules is None:
169
- rules = model_bpa_rules(dependencies=dep)
170
- if language is not None and not translated:
181
+ def translate_using_spark(rule_file):
171
182
 
172
- def translate_using_spark(rule_file):
183
+ from synapse.ml.services import Translate
184
+ from pyspark.sql import SparkSession
173
185
 
174
- from synapse.ml.services import Translate
175
- from pyspark.sql import SparkSession
186
+ rules_temp = rule_file.copy()
187
+ rules_temp = rules_temp.drop(["Expression", "URL", "Severity"], axis=1)
176
188
 
177
- rules_temp = rule_file.copy()
178
- rules_temp = rules_temp.drop(
179
- ["Expression", "URL", "Severity"], axis=1
180
- )
189
+ schema = StructType(
190
+ [
191
+ StructField("Category", StringType(), True),
192
+ StructField("Scope", StringType(), True),
193
+ StructField("Rule Name", StringType(), True),
194
+ StructField("Description", StringType(), True),
195
+ ]
196
+ )
181
197
 
182
- schema = StructType(
183
- [
184
- StructField("Category", StringType(), True),
185
- StructField("Scope", StringType(), True),
186
- StructField("Rule Name", StringType(), True),
187
- StructField("Description", StringType(), True),
188
- ]
198
+ spark = SparkSession.builder.getOrCreate()
199
+ dfRules = spark.createDataFrame(rules_temp, schema)
200
+
201
+ columns = ["Category", "Rule Name", "Description"]
202
+ for clm in columns:
203
+ translate = (
204
+ Translate()
205
+ .setTextCol(clm)
206
+ .setToLanguage(language)
207
+ .setOutputCol("translation")
208
+ .setConcurrency(5)
189
209
  )
190
210
 
191
- spark = SparkSession.builder.getOrCreate()
192
- dfRules = spark.createDataFrame(rules_temp, schema)
193
-
194
- columns = ["Category", "Rule Name", "Description"]
195
- for clm in columns:
196
- translate = (
197
- Translate()
198
- .setTextCol(clm)
199
- .setToLanguage(language)
200
- .setOutputCol("translation")
201
- .setConcurrency(5)
202
- )
203
-
204
- if clm == "Rule Name":
205
- transDF = (
206
- translate.transform(dfRules)
207
- .withColumn(
208
- "translation",
209
- flatten(col("translation.translations")),
210
- )
211
- .withColumn("translation", col("translation.text"))
212
- .select(clm, "translation")
211
+ if clm == "Rule Name":
212
+ transDF = (
213
+ translate.transform(dfRules)
214
+ .withColumn(
215
+ "translation",
216
+ flatten(col("translation.translations")),
213
217
  )
214
- else:
215
- transDF = (
216
- translate.transform(dfRules)
217
- .withColumn(
218
- "translation",
219
- flatten(col("translation.translations")),
220
- )
221
- .withColumn("translation", col("translation.text"))
222
- .select("Rule Name", clm, "translation")
218
+ .withColumn("translation", col("translation.text"))
219
+ .select(clm, "translation")
220
+ )
221
+ else:
222
+ transDF = (
223
+ translate.transform(dfRules)
224
+ .withColumn(
225
+ "translation",
226
+ flatten(col("translation.translations")),
223
227
  )
224
-
225
- df_panda = transDF.toPandas()
226
- rule_file = pd.merge(
227
- rule_file,
228
- df_panda[["Rule Name", "translation"]],
229
- on="Rule Name",
230
- how="left",
228
+ .withColumn("translation", col("translation.text"))
229
+ .select("Rule Name", clm, "translation")
231
230
  )
232
231
 
233
- rule_file = rule_file.rename(
234
- columns={"translation": f"{clm}Translated"}
235
- )
236
- rule_file[f"{clm}Translated"] = rule_file[
237
- f"{clm}Translated"
238
- ].apply(lambda x: x[0] if x is not None else None)
232
+ df_panda = transDF.toPandas()
233
+ rule_file = pd.merge(
234
+ rule_file,
235
+ df_panda[["Rule Name", "translation"]],
236
+ on="Rule Name",
237
+ how="left",
238
+ )
239
+
240
+ rule_file = rule_file.rename(
241
+ columns={"translation": f"{clm}Translated"}
242
+ )
243
+ rule_file[f"{clm}Translated"] = rule_file[f"{clm}Translated"].apply(
244
+ lambda x: x[0] if x is not None else None
245
+ )
239
246
 
240
- for clm in columns:
241
- rule_file = rule_file.drop([clm], axis=1)
242
- rule_file = rule_file.rename(columns={f"{clm}Translated": clm})
247
+ for clm in columns:
248
+ rule_file = rule_file.drop([clm], axis=1)
249
+ rule_file = rule_file.rename(columns={f"{clm}Translated": clm})
243
250
 
244
- return rule_file
251
+ return rule_file
245
252
 
246
- rules = translate_using_spark(rules)
253
+ rules = translate_using_spark(rules)
247
254
 
248
- rules.loc[rules["Severity"] == "Warning", "Severity"] = icons.warning
249
- rules.loc[rules["Severity"] == "Error", "Severity"] = icons.error
250
- rules.loc[rules["Severity"] == "Info", "Severity"] = icons.info
255
+ rules.loc[rules["Severity"] == "Warning", "Severity"] = icons.warning
256
+ rules.loc[rules["Severity"] == "Error", "Severity"] = icons.error
257
+ rules.loc[rules["Severity"] == "Info", "Severity"] = icons.info
251
258
 
252
- pd.set_option("display.max_colwidth", 1000)
259
+ pd.set_option("display.max_colwidth", 1000)
253
260
 
254
- violations = pd.DataFrame(columns=["Object Name", "Scope", "Rule Name"])
261
+ violations = pd.DataFrame(columns=["Object Name", "Scope", "Rule Name"])
255
262
 
256
- scope_to_dataframe = {
257
- "Relationship": (
258
- tom.model.Relationships,
259
- lambda obj: create_relationship_name(
260
- obj.FromTable.Name,
261
- obj.FromColumn.Name,
262
- obj.ToTable.Name,
263
- obj.ToColumn.Name,
264
- ),
265
- ),
266
- "Column": (
267
- tom.all_columns(),
268
- lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
269
- ),
270
- "Measure": (tom.all_measures(), lambda obj: obj.Name),
271
- "Hierarchy": (
272
- tom.all_hierarchies(),
273
- lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
274
- ),
275
- "Table": (tom.model.Tables, lambda obj: obj.Name),
276
- "Role": (tom.model.Roles, lambda obj: obj.Name),
277
- "Model": (tom.model, lambda obj: obj.Model.Name),
278
- "Calculation Item": (
279
- tom.all_calculation_items(),
280
- lambda obj: format_dax_object_name(obj.Parent.Table.Name, obj.Name),
281
- ),
282
- "Row Level Security": (
283
- tom.all_rls(),
284
- lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
285
- ),
286
- "Partition": (
287
- tom.all_partitions(),
288
- lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
263
+ scope_to_dataframe = {
264
+ "Relationship": (
265
+ tom.model.Relationships,
266
+ lambda obj: create_relationship_name(
267
+ obj.FromTable.Name,
268
+ obj.FromColumn.Name,
269
+ obj.ToTable.Name,
270
+ obj.ToColumn.Name,
289
271
  ),
290
- }
291
-
292
- for i, r in rules.iterrows():
293
- ruleName = r["Rule Name"]
294
- expr = r["Expression"]
295
- scopes = r["Scope"]
296
-
297
- if isinstance(scopes, str):
298
- scopes = [scopes]
299
-
300
- for scope in scopes:
301
- func = scope_to_dataframe[scope][0]
302
- nm = scope_to_dataframe[scope][1]
303
-
304
- if scope == "Model":
305
- x = []
306
- if expr(func, tom):
307
- x = ["Model"]
308
- elif scope == "Measure":
309
- x = [nm(obj) for obj in tom.all_measures() if expr(obj, tom)]
310
- elif scope == "Column":
311
- x = [nm(obj) for obj in tom.all_columns() if expr(obj, tom)]
312
- elif scope == "Partition":
313
- x = [nm(obj) for obj in tom.all_partitions() if expr(obj, tom)]
314
- elif scope == "Hierarchy":
315
- x = [nm(obj) for obj in tom.all_hierarchies() if expr(obj, tom)]
316
- elif scope == "Table":
317
- x = [nm(obj) for obj in tom.model.Tables if expr(obj, tom)]
318
- elif scope == "Relationship":
319
- x = [
320
- nm(obj) for obj in tom.model.Relationships if expr(obj, tom)
321
- ]
322
- elif scope == "Role":
323
- x = [nm(obj) for obj in tom.model.Roles if expr(obj, tom)]
324
- elif scope == "Row Level Security":
325
- x = [nm(obj) for obj in tom.all_rls() if expr(obj, tom)]
326
- elif scope == "Calculation Item":
327
- x = [
328
- nm(obj)
329
- for obj in tom.all_calculation_items()
330
- if expr(obj, tom)
331
- ]
332
-
333
- if len(x) > 0:
334
- new_data = {
335
- "Object Name": x,
336
- "Scope": scope,
337
- "Rule Name": ruleName,
338
- }
339
- violations = pd.concat(
340
- [violations, pd.DataFrame(new_data)], ignore_index=True
341
- )
272
+ ),
273
+ "Column": (
274
+ tom.all_columns(),
275
+ lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
276
+ ),
277
+ "Measure": (tom.all_measures(), lambda obj: obj.Name),
278
+ "Hierarchy": (
279
+ tom.all_hierarchies(),
280
+ lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
281
+ ),
282
+ "Table": (tom.model.Tables, lambda obj: obj.Name),
283
+ "Role": (tom.model.Roles, lambda obj: obj.Name),
284
+ "Model": (tom.model, lambda obj: obj.Model.Name),
285
+ "Calculation Item": (
286
+ tom.all_calculation_items(),
287
+ lambda obj: format_dax_object_name(obj.Parent.Table.Name, obj.Name),
288
+ ),
289
+ "Row Level Security": (
290
+ tom.all_rls(),
291
+ lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
292
+ ),
293
+ "Partition": (
294
+ tom.all_partitions(),
295
+ lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
296
+ ),
297
+ }
342
298
 
343
- prepDF = pd.merge(
344
- violations,
345
- rules[["Rule Name", "Category", "Severity", "Description", "URL"]],
346
- left_on="Rule Name",
347
- right_on="Rule Name",
348
- how="left",
349
- )
350
- prepDF.rename(columns={"Scope": "Object Type"}, inplace=True)
351
- finalDF = prepDF[
352
- [
353
- "Category",
354
- "Rule Name",
355
- "Severity",
356
- "Object Type",
357
- "Object Name",
358
- "Description",
359
- "URL",
360
- ]
299
+ for i, r in rules.iterrows():
300
+ ruleName = r["Rule Name"]
301
+ expr = r["Expression"]
302
+ scopes = r["Scope"]
303
+
304
+ if isinstance(scopes, str):
305
+ scopes = [scopes]
306
+
307
+ for scope in scopes:
308
+ func = scope_to_dataframe[scope][0]
309
+ nm = scope_to_dataframe[scope][1]
310
+
311
+ if scope == "Model":
312
+ x = []
313
+ if expr(func, tom):
314
+ x = ["Model"]
315
+ elif scope == "Measure":
316
+ x = [nm(obj) for obj in tom.all_measures() if expr(obj, tom)]
317
+ elif scope == "Column":
318
+ x = [nm(obj) for obj in tom.all_columns() if expr(obj, tom)]
319
+ elif scope == "Partition":
320
+ x = [nm(obj) for obj in tom.all_partitions() if expr(obj, tom)]
321
+ elif scope == "Hierarchy":
322
+ x = [nm(obj) for obj in tom.all_hierarchies() if expr(obj, tom)]
323
+ elif scope == "Table":
324
+ x = [nm(obj) for obj in tom.model.Tables if expr(obj, tom)]
325
+ elif scope == "Relationship":
326
+ x = [nm(obj) for obj in tom.model.Relationships if expr(obj, tom)]
327
+ elif scope == "Role":
328
+ x = [nm(obj) for obj in tom.model.Roles if expr(obj, tom)]
329
+ elif scope == "Row Level Security":
330
+ x = [nm(obj) for obj in tom.all_rls() if expr(obj, tom)]
331
+ elif scope == "Calculation Item":
332
+ x = [
333
+ nm(obj) for obj in tom.all_calculation_items() if expr(obj, tom)
334
+ ]
335
+
336
+ if len(x) > 0:
337
+ new_data = {
338
+ "Object Name": x,
339
+ "Scope": scope,
340
+ "Rule Name": ruleName,
341
+ }
342
+ violations = pd.concat(
343
+ [violations, pd.DataFrame(new_data)], ignore_index=True
344
+ )
345
+
346
+ prepDF = pd.merge(
347
+ violations,
348
+ rules[["Rule Name", "Category", "Severity", "Description", "URL"]],
349
+ left_on="Rule Name",
350
+ right_on="Rule Name",
351
+ how="left",
352
+ )
353
+ prepDF.rename(columns={"Scope": "Object Type"}, inplace=True)
354
+ finalDF = prepDF[
355
+ [
356
+ "Category",
357
+ "Rule Name",
358
+ "Severity",
359
+ "Object Type",
360
+ "Object Name",
361
+ "Description",
362
+ "URL",
361
363
  ]
364
+ ]
362
365
 
363
366
  if export:
364
367
  if not lakehouse_attached():
@@ -383,7 +386,7 @@ def run_model_bpa(
383
386
  if len(lakeT_filt) == 0:
384
387
  runId = 1
385
388
  else:
386
- max_run_id = _get_max_run_id(
389
+ max_run_id = _get_column_aggregate(
387
390
  lakehouse=lakehouse, table_name=delta_table_name
388
391
  )
389
392
  runId = max_run_id + 1
@@ -6,7 +6,7 @@ from sempy_labs._helper_functions import (
6
6
  save_as_delta_table,
7
7
  resolve_workspace_capacity,
8
8
  retry,
9
- _get_max_run_id,
9
+ _get_column_aggregate,
10
10
  )
11
11
  from sempy_labs.lakehouse import (
12
12
  get_lakehouse_tables,
@@ -76,7 +76,7 @@ def run_model_bpa_bulk(
76
76
  if len(lakeT_filt) == 0:
77
77
  runId = 1
78
78
  else:
79
- max_run_id = _get_max_run_id(lakehouse=lakehouse, table_name=output_table)
79
+ max_run_id = _get_column_aggregate(lakehouse=lakehouse, table_name=output_table)
80
80
  runId = max_run_id + 1
81
81
 
82
82
  if isinstance(workspace, str):
@@ -64,7 +64,7 @@ def model_bpa_rules(
64
64
  obj.FromCardinality == TOM.RelationshipEndCardinality.Many
65
65
  and obj.ToCardinality == TOM.RelationshipEndCardinality.Many
66
66
  )
67
- or str(obj.CrossFilteringBehavior) == "BothDirections"
67
+ or str(obj.CrossFilteringBehavior) == "BothDirections",
68
68
  "Bi-directional and many-to-many relationships may cause performance degradation or even have unintended consequences. Make sure to check these specific relationships to ensure they are working as designed and are actually necessary.",
69
69
  "https://www.sqlbi.com/articles/bidirectional-relationships-and-ambiguity-in-dax",
70
70
  ),
@@ -402,8 +402,8 @@ def model_bpa_rules(
402
402
  lambda obj, tom: tom.is_direct_lake() is False
403
403
  and obj.IsAvailableInMDX is False
404
404
  and (
405
- tom.used_in_sort_by(column=obj)
406
- or tom.used_in_hierarchies(column=obj)
405
+ any(tom.used_in_sort_by(column=obj))
406
+ or any(tom.used_in_hierarchies(column=obj))
407
407
  or obj.SortByColumn is not None
408
408
  ),
409
409
  "In order to avoid errors, ensure that attribute hierarchies are enabled if a column is used for sorting another column, used in a hierarchy, used in variations, or is sorted by another column. The IsAvailableInMdx property is not relevant for Direct Lake models.",