semantic-link-labs 0.8.11__py3-none-any.whl → 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (40) hide show
  1. {semantic_link_labs-0.8.11.dist-info → semantic_link_labs-0.9.1.dist-info}/METADATA +9 -6
  2. {semantic_link_labs-0.8.11.dist-info → semantic_link_labs-0.9.1.dist-info}/RECORD +40 -40
  3. {semantic_link_labs-0.8.11.dist-info → semantic_link_labs-0.9.1.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +29 -2
  5. sempy_labs/_authentication.py +78 -4
  6. sempy_labs/_capacities.py +770 -200
  7. sempy_labs/_capacity_migration.py +7 -37
  8. sempy_labs/_clear_cache.py +8 -8
  9. sempy_labs/_deployment_pipelines.py +1 -1
  10. sempy_labs/_gateways.py +2 -0
  11. sempy_labs/_generate_semantic_model.py +8 -0
  12. sempy_labs/_helper_functions.py +119 -79
  13. sempy_labs/_job_scheduler.py +138 -3
  14. sempy_labs/_list_functions.py +40 -31
  15. sempy_labs/_model_bpa.py +207 -204
  16. sempy_labs/_model_bpa_bulk.py +2 -2
  17. sempy_labs/_model_bpa_rules.py +3 -3
  18. sempy_labs/_notebooks.py +2 -0
  19. sempy_labs/_query_scale_out.py +8 -0
  20. sempy_labs/_sql.py +11 -7
  21. sempy_labs/_vertipaq.py +4 -2
  22. sempy_labs/_warehouses.py +6 -6
  23. sempy_labs/admin/_basic_functions.py +156 -103
  24. sempy_labs/admin/_domains.py +7 -2
  25. sempy_labs/admin/_git.py +4 -1
  26. sempy_labs/admin/_items.py +7 -2
  27. sempy_labs/admin/_scanner.py +7 -4
  28. sempy_labs/directlake/_directlake_schema_compare.py +7 -2
  29. sempy_labs/directlake/_directlake_schema_sync.py +6 -0
  30. sempy_labs/directlake/_dl_helper.py +51 -31
  31. sempy_labs/directlake/_get_directlake_lakehouse.py +20 -27
  32. sempy_labs/directlake/_update_directlake_partition_entity.py +5 -0
  33. sempy_labs/lakehouse/_get_lakehouse_columns.py +17 -22
  34. sempy_labs/lakehouse/_get_lakehouse_tables.py +20 -32
  35. sempy_labs/lakehouse/_lakehouse.py +2 -19
  36. sempy_labs/report/_generate_report.py +45 -0
  37. sempy_labs/report/_report_bpa.py +2 -2
  38. sempy_labs/tom/_model.py +97 -16
  39. {semantic_link_labs-0.8.11.dist-info → semantic_link_labs-0.9.1.dist-info}/LICENSE +0 -0
  40. {semantic_link_labs-0.8.11.dist-info → semantic_link_labs-0.9.1.dist-info}/top_level.txt +0 -0
sempy_labs/_model_bpa.py CHANGED
@@ -12,7 +12,7 @@ from sempy_labs._helper_functions import (
12
12
  resolve_workspace_capacity,
13
13
  resolve_dataset_name_and_id,
14
14
  get_language_codes,
15
- _get_max_run_id,
15
+ _get_column_aggregate,
16
16
  resolve_workspace_name_and_id,
17
17
  )
18
18
  from sempy_labs.lakehouse import get_lakehouse_tables, lakehouse_attached
@@ -36,6 +36,7 @@ def run_model_bpa(
36
36
  return_dataframe: bool = False,
37
37
  extended: bool = False,
38
38
  language: Optional[str] = None,
39
+ check_dependencies: bool = True,
39
40
  **kwargs,
40
41
  ):
41
42
  """
@@ -60,6 +61,8 @@ def run_model_bpa(
60
61
  language : str, default=None
61
62
  Specifying a language name or code (i.e. 'it-IT' for Italian) will auto-translate the Category, Rule Name and Description into the specified language.
62
63
  Defaults to None which resolves to English.
64
+ check_dependencies : bool, default=True
65
+ If True, leverages the model dependencies from get_model_calc_dependencies to evaluate the rules. Set this parameter to False if running the rules against a semantic model in a shared capacity.
63
66
 
64
67
  Returns
65
68
  -------
@@ -129,236 +132,236 @@ def run_model_bpa(
129
132
  print(
130
133
  f"{icons.warning} The '{dataset_name}' semantic model within the '{workspace_name}' workspace has no tables and therefore there are no valid BPA results."
131
134
  )
132
- finalDF = pd.DataFrame(
135
+ return
136
+
137
+ if check_dependencies:
138
+ dep = get_model_calc_dependencies(
139
+ dataset=dataset_id, workspace=workspace_id
140
+ )
141
+ else:
142
+ dep = pd.DataFrame(
133
143
  columns=[
134
- "Category",
135
- "Rule Name",
136
- "Severity",
137
- "Object Type",
144
+ "Table Name",
138
145
  "Object Name",
139
- "Description",
140
- "URL",
146
+ "Object Type",
147
+ "Expression",
148
+ "Referenced Table",
149
+ "Referenced Object",
150
+ "Referenced Object Type",
151
+ "Full Object Name",
152
+ "Referenced Full Object Name",
153
+ "Parent Node",
141
154
  ]
142
155
  )
143
- else:
144
- dep = get_model_calc_dependencies(
145
- dataset=dataset_id, workspace=workspace_id
156
+
157
+ def translate_using_po(rule_file):
158
+ current_dir = os.path.dirname(os.path.abspath(__file__))
159
+ translation_file = (
160
+ f"{current_dir}/_bpa_translation/_model/_translations_{language}.po"
146
161
  )
162
+ for c in ["Category", "Description", "Rule Name"]:
163
+ po = polib.pofile(translation_file)
164
+ for entry in po:
165
+ if entry.tcomment == c.lower().replace(" ", "_"):
166
+ rule_file.loc[rule_file["Rule Name"] == entry.msgid, c] = (
167
+ entry.msgstr
168
+ )
147
169
 
148
- def translate_using_po(rule_file):
149
- current_dir = os.path.dirname(os.path.abspath(__file__))
150
- translation_file = (
151
- f"{current_dir}/_bpa_translation/_model/_translations_{language}.po"
152
- )
153
- for c in ["Category", "Description", "Rule Name"]:
154
- po = polib.pofile(translation_file)
155
- for entry in po:
156
- if entry.tcomment == c.lower().replace(" ", "_"):
157
- rule_file.loc[rule_file["Rule Name"] == entry.msgid, c] = (
158
- entry.msgstr
159
- )
170
+ translated = False
160
171
 
161
- translated = False
172
+ # Translations
173
+ if language is not None and rules is None and language in language_list:
174
+ rules = model_bpa_rules(dependencies=dep)
175
+ translate_using_po(rules)
176
+ translated = True
177
+ if rules is None:
178
+ rules = model_bpa_rules(dependencies=dep)
179
+ if language is not None and not translated:
162
180
 
163
- # Translations
164
- if language is not None and rules is None and language in language_list:
165
- rules = model_bpa_rules(dependencies=dep)
166
- translate_using_po(rules)
167
- translated = True
168
- if rules is None:
169
- rules = model_bpa_rules(dependencies=dep)
170
- if language is not None and not translated:
181
+ def translate_using_spark(rule_file):
171
182
 
172
- def translate_using_spark(rule_file):
183
+ from synapse.ml.services import Translate
184
+ from pyspark.sql import SparkSession
173
185
 
174
- from synapse.ml.services import Translate
175
- from pyspark.sql import SparkSession
186
+ rules_temp = rule_file.copy()
187
+ rules_temp = rules_temp.drop(["Expression", "URL", "Severity"], axis=1)
176
188
 
177
- rules_temp = rule_file.copy()
178
- rules_temp = rules_temp.drop(
179
- ["Expression", "URL", "Severity"], axis=1
180
- )
189
+ schema = StructType(
190
+ [
191
+ StructField("Category", StringType(), True),
192
+ StructField("Scope", StringType(), True),
193
+ StructField("Rule Name", StringType(), True),
194
+ StructField("Description", StringType(), True),
195
+ ]
196
+ )
181
197
 
182
- schema = StructType(
183
- [
184
- StructField("Category", StringType(), True),
185
- StructField("Scope", StringType(), True),
186
- StructField("Rule Name", StringType(), True),
187
- StructField("Description", StringType(), True),
188
- ]
198
+ spark = SparkSession.builder.getOrCreate()
199
+ dfRules = spark.createDataFrame(rules_temp, schema)
200
+
201
+ columns = ["Category", "Rule Name", "Description"]
202
+ for clm in columns:
203
+ translate = (
204
+ Translate()
205
+ .setTextCol(clm)
206
+ .setToLanguage(language)
207
+ .setOutputCol("translation")
208
+ .setConcurrency(5)
189
209
  )
190
210
 
191
- spark = SparkSession.builder.getOrCreate()
192
- dfRules = spark.createDataFrame(rules_temp, schema)
193
-
194
- columns = ["Category", "Rule Name", "Description"]
195
- for clm in columns:
196
- translate = (
197
- Translate()
198
- .setTextCol(clm)
199
- .setToLanguage(language)
200
- .setOutputCol("translation")
201
- .setConcurrency(5)
202
- )
203
-
204
- if clm == "Rule Name":
205
- transDF = (
206
- translate.transform(dfRules)
207
- .withColumn(
208
- "translation",
209
- flatten(col("translation.translations")),
210
- )
211
- .withColumn("translation", col("translation.text"))
212
- .select(clm, "translation")
211
+ if clm == "Rule Name":
212
+ transDF = (
213
+ translate.transform(dfRules)
214
+ .withColumn(
215
+ "translation",
216
+ flatten(col("translation.translations")),
213
217
  )
214
- else:
215
- transDF = (
216
- translate.transform(dfRules)
217
- .withColumn(
218
- "translation",
219
- flatten(col("translation.translations")),
220
- )
221
- .withColumn("translation", col("translation.text"))
222
- .select("Rule Name", clm, "translation")
218
+ .withColumn("translation", col("translation.text"))
219
+ .select(clm, "translation")
220
+ )
221
+ else:
222
+ transDF = (
223
+ translate.transform(dfRules)
224
+ .withColumn(
225
+ "translation",
226
+ flatten(col("translation.translations")),
223
227
  )
224
-
225
- df_panda = transDF.toPandas()
226
- rule_file = pd.merge(
227
- rule_file,
228
- df_panda[["Rule Name", "translation"]],
229
- on="Rule Name",
230
- how="left",
228
+ .withColumn("translation", col("translation.text"))
229
+ .select("Rule Name", clm, "translation")
231
230
  )
232
231
 
233
- rule_file = rule_file.rename(
234
- columns={"translation": f"{clm}Translated"}
235
- )
236
- rule_file[f"{clm}Translated"] = rule_file[
237
- f"{clm}Translated"
238
- ].apply(lambda x: x[0] if x is not None else None)
232
+ df_panda = transDF.toPandas()
233
+ rule_file = pd.merge(
234
+ rule_file,
235
+ df_panda[["Rule Name", "translation"]],
236
+ on="Rule Name",
237
+ how="left",
238
+ )
239
+
240
+ rule_file = rule_file.rename(
241
+ columns={"translation": f"{clm}Translated"}
242
+ )
243
+ rule_file[f"{clm}Translated"] = rule_file[f"{clm}Translated"].apply(
244
+ lambda x: x[0] if x is not None else None
245
+ )
239
246
 
240
- for clm in columns:
241
- rule_file = rule_file.drop([clm], axis=1)
242
- rule_file = rule_file.rename(columns={f"{clm}Translated": clm})
247
+ for clm in columns:
248
+ rule_file = rule_file.drop([clm], axis=1)
249
+ rule_file = rule_file.rename(columns={f"{clm}Translated": clm})
243
250
 
244
- return rule_file
251
+ return rule_file
245
252
 
246
- rules = translate_using_spark(rules)
253
+ rules = translate_using_spark(rules)
247
254
 
248
- rules.loc[rules["Severity"] == "Warning", "Severity"] = icons.warning
249
- rules.loc[rules["Severity"] == "Error", "Severity"] = icons.error
250
- rules.loc[rules["Severity"] == "Info", "Severity"] = icons.info
255
+ rules.loc[rules["Severity"] == "Warning", "Severity"] = icons.warning
256
+ rules.loc[rules["Severity"] == "Error", "Severity"] = icons.error
257
+ rules.loc[rules["Severity"] == "Info", "Severity"] = icons.info
251
258
 
252
- pd.set_option("display.max_colwidth", 1000)
259
+ pd.set_option("display.max_colwidth", 1000)
253
260
 
254
- violations = pd.DataFrame(columns=["Object Name", "Scope", "Rule Name"])
261
+ violations = pd.DataFrame(columns=["Object Name", "Scope", "Rule Name"])
255
262
 
256
- scope_to_dataframe = {
257
- "Relationship": (
258
- tom.model.Relationships,
259
- lambda obj: create_relationship_name(
260
- obj.FromTable.Name,
261
- obj.FromColumn.Name,
262
- obj.ToTable.Name,
263
- obj.ToColumn.Name,
264
- ),
265
- ),
266
- "Column": (
267
- tom.all_columns(),
268
- lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
269
- ),
270
- "Measure": (tom.all_measures(), lambda obj: obj.Name),
271
- "Hierarchy": (
272
- tom.all_hierarchies(),
273
- lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
274
- ),
275
- "Table": (tom.model.Tables, lambda obj: obj.Name),
276
- "Role": (tom.model.Roles, lambda obj: obj.Name),
277
- "Model": (tom.model, lambda obj: obj.Model.Name),
278
- "Calculation Item": (
279
- tom.all_calculation_items(),
280
- lambda obj: format_dax_object_name(obj.Parent.Table.Name, obj.Name),
281
- ),
282
- "Row Level Security": (
283
- tom.all_rls(),
284
- lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
285
- ),
286
- "Partition": (
287
- tom.all_partitions(),
288
- lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
263
+ scope_to_dataframe = {
264
+ "Relationship": (
265
+ tom.model.Relationships,
266
+ lambda obj: create_relationship_name(
267
+ obj.FromTable.Name,
268
+ obj.FromColumn.Name,
269
+ obj.ToTable.Name,
270
+ obj.ToColumn.Name,
289
271
  ),
290
- }
291
-
292
- for i, r in rules.iterrows():
293
- ruleName = r["Rule Name"]
294
- expr = r["Expression"]
295
- scopes = r["Scope"]
296
-
297
- if isinstance(scopes, str):
298
- scopes = [scopes]
299
-
300
- for scope in scopes:
301
- func = scope_to_dataframe[scope][0]
302
- nm = scope_to_dataframe[scope][1]
303
-
304
- if scope == "Model":
305
- x = []
306
- if expr(func, tom):
307
- x = ["Model"]
308
- elif scope == "Measure":
309
- x = [nm(obj) for obj in tom.all_measures() if expr(obj, tom)]
310
- elif scope == "Column":
311
- x = [nm(obj) for obj in tom.all_columns() if expr(obj, tom)]
312
- elif scope == "Partition":
313
- x = [nm(obj) for obj in tom.all_partitions() if expr(obj, tom)]
314
- elif scope == "Hierarchy":
315
- x = [nm(obj) for obj in tom.all_hierarchies() if expr(obj, tom)]
316
- elif scope == "Table":
317
- x = [nm(obj) for obj in tom.model.Tables if expr(obj, tom)]
318
- elif scope == "Relationship":
319
- x = [
320
- nm(obj) for obj in tom.model.Relationships if expr(obj, tom)
321
- ]
322
- elif scope == "Role":
323
- x = [nm(obj) for obj in tom.model.Roles if expr(obj, tom)]
324
- elif scope == "Row Level Security":
325
- x = [nm(obj) for obj in tom.all_rls() if expr(obj, tom)]
326
- elif scope == "Calculation Item":
327
- x = [
328
- nm(obj)
329
- for obj in tom.all_calculation_items()
330
- if expr(obj, tom)
331
- ]
332
-
333
- if len(x) > 0:
334
- new_data = {
335
- "Object Name": x,
336
- "Scope": scope,
337
- "Rule Name": ruleName,
338
- }
339
- violations = pd.concat(
340
- [violations, pd.DataFrame(new_data)], ignore_index=True
341
- )
272
+ ),
273
+ "Column": (
274
+ tom.all_columns(),
275
+ lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
276
+ ),
277
+ "Measure": (tom.all_measures(), lambda obj: obj.Name),
278
+ "Hierarchy": (
279
+ tom.all_hierarchies(),
280
+ lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
281
+ ),
282
+ "Table": (tom.model.Tables, lambda obj: obj.Name),
283
+ "Role": (tom.model.Roles, lambda obj: obj.Name),
284
+ "Model": (tom.model, lambda obj: obj.Model.Name),
285
+ "Calculation Item": (
286
+ tom.all_calculation_items(),
287
+ lambda obj: format_dax_object_name(obj.Parent.Table.Name, obj.Name),
288
+ ),
289
+ "Row Level Security": (
290
+ tom.all_rls(),
291
+ lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
292
+ ),
293
+ "Partition": (
294
+ tom.all_partitions(),
295
+ lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
296
+ ),
297
+ }
342
298
 
343
- prepDF = pd.merge(
344
- violations,
345
- rules[["Rule Name", "Category", "Severity", "Description", "URL"]],
346
- left_on="Rule Name",
347
- right_on="Rule Name",
348
- how="left",
349
- )
350
- prepDF.rename(columns={"Scope": "Object Type"}, inplace=True)
351
- finalDF = prepDF[
352
- [
353
- "Category",
354
- "Rule Name",
355
- "Severity",
356
- "Object Type",
357
- "Object Name",
358
- "Description",
359
- "URL",
360
- ]
299
+ for i, r in rules.iterrows():
300
+ ruleName = r["Rule Name"]
301
+ expr = r["Expression"]
302
+ scopes = r["Scope"]
303
+
304
+ if isinstance(scopes, str):
305
+ scopes = [scopes]
306
+
307
+ for scope in scopes:
308
+ func = scope_to_dataframe[scope][0]
309
+ nm = scope_to_dataframe[scope][1]
310
+
311
+ if scope == "Model":
312
+ x = []
313
+ if expr(func, tom):
314
+ x = ["Model"]
315
+ elif scope == "Measure":
316
+ x = [nm(obj) for obj in tom.all_measures() if expr(obj, tom)]
317
+ elif scope == "Column":
318
+ x = [nm(obj) for obj in tom.all_columns() if expr(obj, tom)]
319
+ elif scope == "Partition":
320
+ x = [nm(obj) for obj in tom.all_partitions() if expr(obj, tom)]
321
+ elif scope == "Hierarchy":
322
+ x = [nm(obj) for obj in tom.all_hierarchies() if expr(obj, tom)]
323
+ elif scope == "Table":
324
+ x = [nm(obj) for obj in tom.model.Tables if expr(obj, tom)]
325
+ elif scope == "Relationship":
326
+ x = [nm(obj) for obj in tom.model.Relationships if expr(obj, tom)]
327
+ elif scope == "Role":
328
+ x = [nm(obj) for obj in tom.model.Roles if expr(obj, tom)]
329
+ elif scope == "Row Level Security":
330
+ x = [nm(obj) for obj in tom.all_rls() if expr(obj, tom)]
331
+ elif scope == "Calculation Item":
332
+ x = [
333
+ nm(obj) for obj in tom.all_calculation_items() if expr(obj, tom)
334
+ ]
335
+
336
+ if len(x) > 0:
337
+ new_data = {
338
+ "Object Name": x,
339
+ "Scope": scope,
340
+ "Rule Name": ruleName,
341
+ }
342
+ violations = pd.concat(
343
+ [violations, pd.DataFrame(new_data)], ignore_index=True
344
+ )
345
+
346
+ prepDF = pd.merge(
347
+ violations,
348
+ rules[["Rule Name", "Category", "Severity", "Description", "URL"]],
349
+ left_on="Rule Name",
350
+ right_on="Rule Name",
351
+ how="left",
352
+ )
353
+ prepDF.rename(columns={"Scope": "Object Type"}, inplace=True)
354
+ finalDF = prepDF[
355
+ [
356
+ "Category",
357
+ "Rule Name",
358
+ "Severity",
359
+ "Object Type",
360
+ "Object Name",
361
+ "Description",
362
+ "URL",
361
363
  ]
364
+ ]
362
365
 
363
366
  if export:
364
367
  if not lakehouse_attached():
@@ -383,7 +386,7 @@ def run_model_bpa(
383
386
  if len(lakeT_filt) == 0:
384
387
  runId = 1
385
388
  else:
386
- max_run_id = _get_max_run_id(
389
+ max_run_id = _get_column_aggregate(
387
390
  lakehouse=lakehouse, table_name=delta_table_name
388
391
  )
389
392
  runId = max_run_id + 1
@@ -6,7 +6,7 @@ from sempy_labs._helper_functions import (
6
6
  save_as_delta_table,
7
7
  resolve_workspace_capacity,
8
8
  retry,
9
- _get_max_run_id,
9
+ _get_column_aggregate,
10
10
  )
11
11
  from sempy_labs.lakehouse import (
12
12
  get_lakehouse_tables,
@@ -76,7 +76,7 @@ def run_model_bpa_bulk(
76
76
  if len(lakeT_filt) == 0:
77
77
  runId = 1
78
78
  else:
79
- max_run_id = _get_max_run_id(lakehouse=lakehouse, table_name=output_table)
79
+ max_run_id = _get_column_aggregate(lakehouse=lakehouse, table_name=output_table)
80
80
  runId = max_run_id + 1
81
81
 
82
82
  if isinstance(workspace, str):
@@ -64,7 +64,7 @@ def model_bpa_rules(
64
64
  obj.FromCardinality == TOM.RelationshipEndCardinality.Many
65
65
  and obj.ToCardinality == TOM.RelationshipEndCardinality.Many
66
66
  )
67
- or str(obj.CrossFilteringBehavior) == "BothDirections"
67
+ or str(obj.CrossFilteringBehavior) == "BothDirections",
68
68
  "Bi-directional and many-to-many relationships may cause performance degradation or even have unintended consequences. Make sure to check these specific relationships to ensure they are working as designed and are actually necessary.",
69
69
  "https://www.sqlbi.com/articles/bidirectional-relationships-and-ambiguity-in-dax",
70
70
  ),
@@ -402,8 +402,8 @@ def model_bpa_rules(
402
402
  lambda obj, tom: tom.is_direct_lake() is False
403
403
  and obj.IsAvailableInMDX is False
404
404
  and (
405
- tom.used_in_sort_by(column=obj)
406
- or tom.used_in_hierarchies(column=obj)
405
+ any(tom.used_in_sort_by(column=obj))
406
+ or any(tom.used_in_hierarchies(column=obj))
407
407
  or obj.SortByColumn is not None
408
408
  ),
409
409
  "In order to avoid errors, ensure that attribute hierarchies are enabled if a column is used for sorting another column, used in a hierarchy, used in variations, or is sorted by another column. The IsAvailableInMdx property is not relevant for Direct Lake models.",
sempy_labs/_notebooks.py CHANGED
@@ -4,6 +4,7 @@ import sempy_labs._icons as icons
4
4
  from typing import Optional
5
5
  import base64
6
6
  import requests
7
+ from sempy._utils._log import log
7
8
  from sempy_labs._helper_functions import (
8
9
  resolve_workspace_name_and_id,
9
10
  lro,
@@ -91,6 +92,7 @@ def get_notebook_definition(
91
92
  return result
92
93
 
93
94
 
95
+ @log
94
96
  def import_notebook_from_web(
95
97
  notebook_name: str,
96
98
  url: str,
@@ -4,12 +4,14 @@ from sempy_labs._helper_functions import (
4
4
  resolve_workspace_name_and_id,
5
5
  resolve_dataset_name_and_id,
6
6
  )
7
+ from sempy._utils._log import log
7
8
  from typing import Optional, Tuple
8
9
  import sempy_labs._icons as icons
9
10
  from sempy.fabric.exceptions import FabricHTTPException
10
11
  from uuid import UUID
11
12
 
12
13
 
14
+ @log
13
15
  def qso_sync(dataset: str | UUID, workspace: Optional[str | UUID] = None):
14
16
  """
15
17
  Triggers a query scale-out sync of read-only replicas for the specified dataset from the specified workspace.
@@ -41,6 +43,7 @@ def qso_sync(dataset: str | UUID, workspace: Optional[str | UUID] = None):
41
43
  )
42
44
 
43
45
 
46
+ @log
44
47
  def qso_sync_status(
45
48
  dataset: str | UUID, workspace: Optional[str | UUID] = None
46
49
  ) -> Tuple[pd.DataFrame, pd.DataFrame]:
@@ -140,6 +143,7 @@ def qso_sync_status(
140
143
  return df, dfRep
141
144
 
142
145
 
146
+ @log
143
147
  def disable_qso(
144
148
  dataset: str | UUID, workspace: Optional[str | UUID] = None
145
149
  ) -> pd.DataFrame:
@@ -184,6 +188,7 @@ def disable_qso(
184
188
  return df
185
189
 
186
190
 
191
+ @log
187
192
  def set_qso(
188
193
  dataset: str | UUID,
189
194
  auto_sync: bool = True,
@@ -259,6 +264,7 @@ def set_qso(
259
264
  return df
260
265
 
261
266
 
267
+ @log
262
268
  def set_semantic_model_storage_format(
263
269
  dataset: str | UUID, storage_format: str, workspace: Optional[str | UUID] = None
264
270
  ):
@@ -318,6 +324,7 @@ def set_semantic_model_storage_format(
318
324
  )
319
325
 
320
326
 
327
+ @log
321
328
  def list_qso_settings(
322
329
  dataset: Optional[str | UUID] = None, workspace: Optional[str | UUID] = None
323
330
  ) -> pd.DataFrame:
@@ -384,6 +391,7 @@ def list_qso_settings(
384
391
  return df
385
392
 
386
393
 
394
+ @log
387
395
  def set_workspace_default_storage_format(
388
396
  storage_format: str, workspace: Optional[str | UUID] = None
389
397
  ):
sempy_labs/_sql.py CHANGED
@@ -6,8 +6,8 @@ import struct
6
6
  from itertools import chain, repeat
7
7
  from sempy.fabric.exceptions import FabricHTTPException
8
8
  from sempy_labs._helper_functions import (
9
- resolve_warehouse_id,
10
- resolve_lakehouse_id,
9
+ resolve_lakehouse_name_and_id,
10
+ resolve_item_name_and_id,
11
11
  resolve_workspace_name_and_id,
12
12
  )
13
13
  from uuid import UUID
@@ -35,7 +35,7 @@ def _bytes2mswin_bstr(value: bytes) -> bytes:
35
35
  class ConnectBase:
36
36
  def __init__(
37
37
  self,
38
- name: str,
38
+ item: str,
39
39
  workspace: Optional[Union[str, UUID]] = None,
40
40
  timeout: Optional[int] = None,
41
41
  endpoint_type: str = "warehouse",
@@ -45,11 +45,15 @@ class ConnectBase:
45
45
 
46
46
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
47
47
 
48
- # Resolve the appropriate ID (warehouse or lakehouse)
48
+ # Resolve the appropriate ID and name (warehouse or lakehouse)
49
49
  if endpoint_type == "warehouse":
50
- resource_id = resolve_warehouse_id(warehouse=name, workspace=workspace_id)
50
+ (resource_id, resource_name) = resolve_item_name_and_id(
51
+ item=item, type=endpoint_type.capitalize(), workspace=workspace_id
52
+ )
51
53
  else:
52
- resource_id = resolve_lakehouse_id(lakehouse=name, workspace=workspace_id)
54
+ (resource_id, resource_name) = resolve_lakehouse_name_and_id(
55
+ lakehouse=item, workspace=workspace_id
56
+ )
53
57
 
54
58
  # Get the TDS endpoint
55
59
  client = fabric.FabricRestClient()
@@ -72,7 +76,7 @@ class ConnectBase:
72
76
  # Set up the connection string
73
77
  access_token = SynapseTokenProvider()()
74
78
  tokenstruct = _bytes2mswin_bstr(access_token.encode())
75
- conn_str = f"DRIVER={{ODBC Driver 18 for SQL Server}};SERVER={tds_endpoint};DATABASE={name};Encrypt=Yes;"
79
+ conn_str = f"DRIVER={{ODBC Driver 18 for SQL Server}};SERVER={tds_endpoint};DATABASE={resource_name};Encrypt=Yes;"
76
80
 
77
81
  if timeout is not None:
78
82
  conn_str += f"Connect Timeout={timeout};"
sempy_labs/_vertipaq.py CHANGED
@@ -12,7 +12,7 @@ from sempy_labs._helper_functions import (
12
12
  resolve_lakehouse_name,
13
13
  save_as_delta_table,
14
14
  resolve_workspace_capacity,
15
- _get_max_run_id,
15
+ _get_column_aggregate,
16
16
  resolve_workspace_name_and_id,
17
17
  resolve_dataset_name_and_id,
18
18
  )
@@ -519,7 +519,9 @@ def vertipaq_analyzer(
519
519
  if len(lakeT_filt) == 0:
520
520
  runId = 1
521
521
  else:
522
- max_run_id = _get_max_run_id(lakehouse=lakehouse, table_name=lakeTName)
522
+ max_run_id = _get_column_aggregate(
523
+ lakehouse=lakehouse, table_name=lakeTName
524
+ )
523
525
  runId = max_run_id + 1
524
526
 
525
527
  dfMap = {