semantic-link-labs 0.9.2__py3-none-any.whl → 0.9.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (54) hide show
  1. {semantic_link_labs-0.9.2.dist-info → semantic_link_labs-0.9.4.dist-info}/METADATA +10 -6
  2. {semantic_link_labs-0.9.2.dist-info → semantic_link_labs-0.9.4.dist-info}/RECORD +54 -44
  3. {semantic_link_labs-0.9.2.dist-info → semantic_link_labs-0.9.4.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +27 -1
  5. sempy_labs/_ai.py +8 -5
  6. sempy_labs/_capacity_migration.py +3 -2
  7. sempy_labs/_connections.py +45 -9
  8. sempy_labs/_dax.py +17 -3
  9. sempy_labs/_delta_analyzer.py +308 -138
  10. sempy_labs/_eventhouses.py +70 -1
  11. sempy_labs/_gateways.py +56 -8
  12. sempy_labs/_generate_semantic_model.py +30 -9
  13. sempy_labs/_helper_functions.py +84 -9
  14. sempy_labs/_job_scheduler.py +226 -2
  15. sempy_labs/_list_functions.py +42 -19
  16. sempy_labs/_ml_experiments.py +1 -1
  17. sempy_labs/_model_bpa.py +17 -2
  18. sempy_labs/_model_bpa_rules.py +20 -8
  19. sempy_labs/_semantic_models.py +117 -0
  20. sempy_labs/_sql.py +73 -6
  21. sempy_labs/_sqldatabase.py +227 -0
  22. sempy_labs/_translations.py +2 -2
  23. sempy_labs/_vertipaq.py +3 -3
  24. sempy_labs/_warehouses.py +1 -1
  25. sempy_labs/admin/__init__.py +49 -8
  26. sempy_labs/admin/_activities.py +166 -0
  27. sempy_labs/admin/_apps.py +143 -0
  28. sempy_labs/admin/_basic_functions.py +32 -652
  29. sempy_labs/admin/_capacities.py +250 -0
  30. sempy_labs/admin/_datasets.py +184 -0
  31. sempy_labs/admin/_domains.py +1 -3
  32. sempy_labs/admin/_items.py +3 -1
  33. sempy_labs/admin/_reports.py +165 -0
  34. sempy_labs/admin/_scanner.py +53 -49
  35. sempy_labs/admin/_shared.py +74 -0
  36. sempy_labs/admin/_tenant.py +489 -0
  37. sempy_labs/directlake/_dl_helper.py +0 -1
  38. sempy_labs/directlake/_update_directlake_partition_entity.py +6 -0
  39. sempy_labs/graph/_teams.py +1 -1
  40. sempy_labs/graph/_users.py +9 -1
  41. sempy_labs/lakehouse/_get_lakehouse_columns.py +2 -2
  42. sempy_labs/lakehouse/_get_lakehouse_tables.py +2 -2
  43. sempy_labs/lakehouse/_lakehouse.py +3 -3
  44. sempy_labs/lakehouse/_shortcuts.py +29 -16
  45. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +2 -2
  46. sempy_labs/migration/_refresh_calc_tables.py +2 -2
  47. sempy_labs/report/__init__.py +3 -1
  48. sempy_labs/report/_download_report.py +4 -1
  49. sempy_labs/report/_export_report.py +272 -0
  50. sempy_labs/report/_report_functions.py +11 -263
  51. sempy_labs/report/_report_rebind.py +1 -1
  52. sempy_labs/tom/_model.py +281 -29
  53. {semantic_link_labs-0.9.2.dist-info → semantic_link_labs-0.9.4.dist-info}/LICENSE +0 -0
  54. {semantic_link_labs-0.9.2.dist-info → semantic_link_labs-0.9.4.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  from sempy._utils._log import log
2
2
  import pandas as pd
3
- from typing import Optional
3
+ from typing import Optional, List
4
4
  from sempy_labs._helper_functions import (
5
5
  resolve_workspace_name_and_id,
6
6
  resolve_item_name_and_id,
@@ -189,7 +189,7 @@ def run_on_demand_item_job(
189
189
  Parameters
190
190
  ----------
191
191
  item : str | uuid.UUID
192
- The item name or ID
192
+ The item name or ID.
193
193
  type : str, default=None
194
194
  The item `type <https://learn.microsoft.com/rest/api/fabric/core/items/list-items?tabs=HTTP#itemtype>`_. If specifying the item name as the item, the item type is required.
195
195
  job_type : str, default="DefaultJob"
@@ -213,3 +213,227 @@ def run_on_demand_item_job(
213
213
  )
214
214
 
215
215
  print(f"{icons.green_dot} The '{item_name}' {type.lower()} has been executed.")
216
+
217
+
218
+ def create_item_schedule_cron(
219
+ item: str | UUID,
220
+ type: str,
221
+ start_date_time: str,
222
+ end_date_time: str,
223
+ local_time_zone: str,
224
+ job_type: str = "DefaultJob",
225
+ interval_minutes: int = 10,
226
+ enabled: bool = True,
227
+ workspace: Optional[str | UUID] = None,
228
+ ):
229
+ """
230
+ Create a new schedule for an item based on a `chronological time <https://learn.microsoft.com/rest/api/fabric/core/job-scheduler/create-item-schedule?tabs=HTTP#cronscheduleconfig>`_.
231
+
232
+ This is a wrapper function for the following API: `Job Scheduler - Create Item Schedule <https://learn.microsoft.com/rest/api/fabric/core/job-scheduler/create-item-schedule>`_.
233
+
234
+ Parameters
235
+ ----------
236
+ item : str | uuid.UUID
237
+ The item name or ID.
238
+ type : str
239
+ The item `type <https://learn.microsoft.com/rest/api/fabric/core/items/list-items?tabs=HTTP#itemtype>`_. If specifying the item name as the item, the item type is required.
240
+ start_date_time: str
241
+ The start date and time of the schedule. Example: "2024-04-28T00:00:00".
242
+ end_date_time: str
243
+ The end date and time of the schedule. Must be later than the start_date_time. Example: "2024-04-30T23:59:00".
244
+ local_time_zone: str
245
+ The `time zone <https://learn.microsoft.com/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11>`_ of the schedule. Example: "Central Standard Time".
246
+ job_type : str, default="DefaultJob"
247
+ The job type.
248
+ interval_minutes: int, default=10
249
+ The schedule interval (in minutes).
250
+ enabled: bool, default=True
251
+ Whether the schedule is enabled.
252
+ workspace : str | uuid.UUID, default=None
253
+ The workspace name or ID.
254
+ Defaults to None which resolves to the workspace of the attached lakehouse
255
+ or if no lakehouse attached, resolves to the workspace of the notebook.
256
+ """
257
+
258
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
259
+ (item_name, item_id) = resolve_item_name_and_id(
260
+ item=item, type=type, workspace=workspace
261
+ )
262
+
263
+ payload = {
264
+ "enabled": enabled,
265
+ "configuration": {
266
+ "startDateTime": start_date_time,
267
+ "endDateTime": end_date_time,
268
+ "localTimeZoneId": local_time_zone,
269
+ "type": "Cron",
270
+ "interval": interval_minutes,
271
+ },
272
+ }
273
+
274
+ _base_api(
275
+ request=f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/{job_type}/schedules",
276
+ method="post",
277
+ payload=payload,
278
+ status_codes=201,
279
+ )
280
+
281
+ print(
282
+ f"{icons.green_dot} The schedule for the '{item_name}' {type.lower()} has been created."
283
+ )
284
+
285
+
286
+ def create_item_schedule_daily(
287
+ item: str | UUID,
288
+ type: str,
289
+ start_date_time: str,
290
+ end_date_time: str,
291
+ local_time_zone: str,
292
+ times: List[str],
293
+ job_type: str = "DefaultJob",
294
+ enabled: bool = True,
295
+ workspace: Optional[str | UUID] = None,
296
+ ):
297
+ """
298
+ Create a new daily schedule for an item.
299
+
300
+ This is a wrapper function for the following API: `Job Scheduler - Create Item Schedule <https://learn.microsoft.com/rest/api/fabric/core/job-scheduler/create-item-schedule>`_.
301
+
302
+ Parameters
303
+ ----------
304
+ item : str | uuid.UUID
305
+ The item name or ID.
306
+ type : str
307
+ The item `type <https://learn.microsoft.com/rest/api/fabric/core/items/list-items?tabs=HTTP#itemtype>`_. If specifying the item name as the item, the item type is required.
308
+ start_date_time: str
309
+ The start date and time of the schedule. Example: "2024-04-28T00:00:00".
310
+ end_date_time: str
311
+ The end date and time of the schedule. Must be later than the start_date_time. Example: "2024-04-30T23:59:00".
312
+ local_time_zone: str
313
+ The `time zone <https://learn.microsoft.com/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11>`_ of the schedule. Example: "Central Standard Time".
314
+ times : List[str]
315
+ A list of time slots in hh:mm format, at most 100 elements are allowed. Example: ["00:00", "12:00"].
316
+ job_type : str, default="DefaultJob"
317
+ The job type.
318
+ enabled: bool, default=True
319
+ Whether the schedule is enabled.
320
+ workspace : str | uuid.UUID, default=None
321
+ The workspace name or ID.
322
+ Defaults to None which resolves to the workspace of the attached lakehouse
323
+ or if no lakehouse attached, resolves to the workspace of the notebook.
324
+ """
325
+
326
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
327
+ (item_name, item_id) = resolve_item_name_and_id(
328
+ item=item, type=type, workspace=workspace
329
+ )
330
+
331
+ payload = {
332
+ "enabled": enabled,
333
+ "configuration": {
334
+ "startDateTime": start_date_time,
335
+ "endDateTime": end_date_time,
336
+ "localTimeZoneId": local_time_zone,
337
+ "type": "Daily",
338
+ "times": times,
339
+ },
340
+ }
341
+
342
+ _base_api(
343
+ request=f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/{job_type}/schedules",
344
+ method="post",
345
+ payload=payload,
346
+ status_codes=201,
347
+ )
348
+
349
+ print(
350
+ f"{icons.green_dot} The schedule for the '{item_name}' {type.lower()} has been created."
351
+ )
352
+
353
+
354
+ def create_item_schedule_weekly(
355
+ item: str | UUID,
356
+ type: str,
357
+ start_date_time: str,
358
+ end_date_time: str,
359
+ local_time_zone: str,
360
+ times: List[str],
361
+ weekdays: List[str],
362
+ job_type: str = "DefaultJob",
363
+ enabled: bool = True,
364
+ workspace: Optional[str | UUID] = None,
365
+ ):
366
+ """
367
+ Create a new daily schedule for an item.
368
+
369
+ This is a wrapper function for the following API: `Job Scheduler - Create Item Schedule <https://learn.microsoft.com/rest/api/fabric/core/job-scheduler/create-item-schedule>`_.
370
+
371
+ Parameters
372
+ ----------
373
+ item : str | uuid.UUID
374
+ The item name or ID.
375
+ type : str
376
+ The item `type <https://learn.microsoft.com/rest/api/fabric/core/items/list-items?tabs=HTTP#itemtype>`_. If specifying the item name as the item, the item type is required.
377
+ start_date_time: str
378
+ The start date and time of the schedule. Example: "2024-04-28T00:00:00".
379
+ end_date_time: str
380
+ The end date and time of the schedule. Must be later than the start_date_time. Example: "2024-04-30T23:59:00".
381
+ local_time_zone: str
382
+ The `time zone <https://learn.microsoft.com/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11>`_ of the schedule. Example: "Central Standard Time".
383
+ times : List[str]
384
+ A list of time slots in hh:mm format, at most 100 elements are allowed. Example: ["00:00", "12:00"].
385
+ weekdays : List[str]
386
+ A list of weekdays. Example: ["Monday", "Tuesday"].
387
+ job_type : str, default="DefaultJob"
388
+ The job type.
389
+ enabled: bool, default=True
390
+ Whether the schedule is enabled.
391
+ workspace : str | uuid.UUID, default=None
392
+ The workspace name or ID.
393
+ Defaults to None which resolves to the workspace of the attached lakehouse
394
+ or if no lakehouse attached, resolves to the workspace of the notebook.
395
+ """
396
+
397
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
398
+ (item_name, item_id) = resolve_item_name_and_id(
399
+ item=item, type=type, workspace=workspace
400
+ )
401
+
402
+ weekdays = [w.capitalize() for w in weekdays]
403
+ weekday_list = [
404
+ "Sunday",
405
+ "Monday",
406
+ "Tuesday",
407
+ "Wednesday",
408
+ "Thursday",
409
+ "Friday",
410
+ "Saturday",
411
+ ]
412
+ for weekday in weekdays:
413
+ if weekday not in weekday_list:
414
+ raise ValueError(
415
+ f"{icons.red_dot} Invalid weekday: {weekday}. Must be one of {weekday_list}."
416
+ )
417
+
418
+ payload = {
419
+ "enabled": enabled,
420
+ "configuration": {
421
+ "startDateTime": start_date_time,
422
+ "endDateTime": end_date_time,
423
+ "localTimeZoneId": local_time_zone,
424
+ "type": "Weekly",
425
+ "times": times,
426
+ "weekdays": weekdays,
427
+ },
428
+ }
429
+
430
+ _base_api(
431
+ request=f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/{job_type}/schedules",
432
+ method="post",
433
+ payload=payload,
434
+ status_codes=201,
435
+ )
436
+
437
+ print(
438
+ f"{icons.green_dot} The schedule for the '{item_name}' {type.lower()} has been created."
439
+ )
@@ -9,6 +9,7 @@ from sempy_labs._helper_functions import (
9
9
  _update_dataframe_datatypes,
10
10
  _base_api,
11
11
  _create_dataframe,
12
+ _run_spark_sql_query,
12
13
  )
13
14
  from sempy._utils._log import log
14
15
  import pandas as pd
@@ -584,14 +585,12 @@ def list_columns(
584
585
  query = f"{query} FROM {lakehouse}.{lakeTName}"
585
586
  sql_statements.append((table_name, query))
586
587
 
587
- spark = SparkSession.builder.getOrCreate()
588
-
589
588
  for o in sql_statements:
590
589
  tName = o[0]
591
590
  query = o[1]
592
591
 
593
592
  # Run the query
594
- df = spark.sql(query)
593
+ df = _run_spark_sql_query(query)
595
594
 
596
595
  for column in df.columns:
597
596
  x = df.collect()[0][column]
@@ -1240,22 +1239,46 @@ def list_shortcuts(
1240
1239
  uses_pagination=True,
1241
1240
  )
1242
1241
 
1242
+ sources = [
1243
+ "s3Compatible",
1244
+ "googleCloudStorage",
1245
+ "externalDataShare",
1246
+ "amazonS3",
1247
+ "adlsGen2",
1248
+ "dataverse",
1249
+ ]
1250
+ sources_locpath = ["s3Compatible", "googleCloudStorage", "amazonS3", "adlsGen2"]
1251
+
1243
1252
  for r in responses:
1244
1253
  for i in r.get("value", []):
1245
1254
  tgt = i.get("target", {})
1246
- s3_compat = tgt.get("s3Compatible", {})
1247
- gcs = tgt.get("googleCloudStorage", {})
1248
- eds = tgt.get("externalDataShare", {})
1249
- connection_id = (
1250
- s3_compat.get("connectionId")
1251
- or gcs.get("connectionId")
1252
- or eds.get("connectionId")
1253
- or None
1255
+ one_lake = tgt.get("oneLake", {})
1256
+ connection_id = next(
1257
+ (
1258
+ tgt.get(source, {}).get("connectionId")
1259
+ for source in sources
1260
+ if tgt.get(source)
1261
+ ),
1262
+ None,
1263
+ )
1264
+ location = next(
1265
+ (
1266
+ tgt.get(source, {}).get("location")
1267
+ for source in sources_locpath
1268
+ if tgt.get(source)
1269
+ ),
1270
+ None,
1271
+ )
1272
+ sub_path = next(
1273
+ (
1274
+ tgt.get(source, {}).get("subpath")
1275
+ for source in sources_locpath
1276
+ if tgt.get(source)
1277
+ ),
1278
+ None,
1254
1279
  )
1255
- location = s3_compat.get("location") or gcs.get("location") or None
1256
- sub_path = s3_compat.get("subpath") or gcs.get("subpath") or None
1257
- source_workspace_id = tgt.get("oneLake", {}).get("workspaceId")
1258
- source_item_id = tgt.get("oneLake", {}).get("itemId")
1280
+ source_workspace_id = one_lake.get("workspaceId")
1281
+ source_item_id = one_lake.get("itemId")
1259
1282
  source_workspace_name = (
1260
1283
  fabric.resolve_workspace_name(source_workspace_id)
1261
1284
  if source_workspace_id is not None
@@ -1281,10 +1304,10 @@ def list_shortcuts(
1281
1304
  if source_item_id is not None
1282
1305
  else None
1283
1306
  ),
1284
- "OneLake Path": tgt.get("oneLake", {}).get("path"),
1307
+ "OneLake Path": one_lake.get("path"),
1285
1308
  "Connection Id": connection_id,
1286
1309
  "Location": location,
1287
- "Bucket": s3_compat.get("bucket"),
1310
+ "Bucket": tgt.get("s3Compatible", {}).get("bucket"),
1288
1311
  "SubPath": sub_path,
1289
1312
  }
1290
1313
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
@@ -1367,7 +1390,7 @@ def list_reports_using_semantic_model(
1367
1390
  & (dfR["Dataset Workspace Id"] == workspace_id)
1368
1391
  ][["Name", "Id"]]
1369
1392
  dfR_filt.rename(columns={"Name": "Report Name", "Id": "Report Id"}, inplace=True)
1370
- dfR_filt["Report Worskpace Name"] = workspace_name
1393
+ dfR_filt["Report Workspace Name"] = workspace_name
1371
1394
  dfR_filt["Report Workspace Id"] = workspace_id
1372
1395
 
1373
1396
  return dfR_filt
@@ -1775,7 +1798,7 @@ def list_synonyms(dataset: str | UUID, workspace: Optional[str] = None):
1775
1798
  "State": "string",
1776
1799
  "Source": "string",
1777
1800
  "Weight": "float_fillna",
1778
- "Last Modified": "datetime",
1801
+ "Last Modified": "string",
1779
1802
  }
1780
1803
 
1781
1804
  df = _create_dataframe(columns=columns)
@@ -91,7 +91,7 @@ def create_ml_experiment(
91
91
  _base_api(
92
92
  request=f"/v1/workspaces/{workspace_id}/mlExperiments",
93
93
  method="post",
94
- json=payload,
94
+ payload=payload,
95
95
  status_codes=[201, 202],
96
96
  lro_return_status_code=True,
97
97
  )
sempy_labs/_model_bpa.py CHANGED
@@ -14,6 +14,7 @@ from sempy_labs._helper_functions import (
14
14
  get_language_codes,
15
15
  _get_column_aggregate,
16
16
  resolve_workspace_name_and_id,
17
+ _create_spark_session,
17
18
  )
18
19
  from sempy_labs.lakehouse import get_lakehouse_tables, lakehouse_attached
19
20
  from sempy_labs.tom import connect_semantic_model
@@ -181,7 +182,6 @@ def run_model_bpa(
181
182
  def translate_using_spark(rule_file):
182
183
 
183
184
  from synapse.ml.services import Translate
184
- from pyspark.sql import SparkSession
185
185
 
186
186
  rules_temp = rule_file.copy()
187
187
  rules_temp = rules_temp.drop(["Expression", "URL", "Severity"], axis=1)
@@ -195,7 +195,7 @@ def run_model_bpa(
195
195
  ]
196
196
  )
197
197
 
198
- spark = SparkSession.builder.getOrCreate()
198
+ spark = _create_spark_session()
199
199
  dfRules = spark.createDataFrame(rules_temp, schema)
200
200
 
201
201
  columns = ["Category", "Rule Name", "Description"]
@@ -274,12 +274,17 @@ def run_model_bpa(
274
274
  tom.all_columns(),
275
275
  lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
276
276
  ),
277
+ "Calculated Column": (
278
+ tom.all_calculated_columns(),
279
+ lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
280
+ ),
277
281
  "Measure": (tom.all_measures(), lambda obj: obj.Name),
278
282
  "Hierarchy": (
279
283
  tom.all_hierarchies(),
280
284
  lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
281
285
  ),
282
286
  "Table": (tom.model.Tables, lambda obj: obj.Name),
287
+ "Calculated Table": (tom.all_calculated_tables(), lambda obj: obj.Name),
283
288
  "Role": (tom.model.Roles, lambda obj: obj.Name),
284
289
  "Model": (tom.model, lambda obj: obj.Model.Name),
285
290
  "Calculation Item": (
@@ -322,6 +327,10 @@ def run_model_bpa(
322
327
  x = [nm(obj) for obj in tom.all_hierarchies() if expr(obj, tom)]
323
328
  elif scope == "Table":
324
329
  x = [nm(obj) for obj in tom.model.Tables if expr(obj, tom)]
330
+ elif scope == "Calculated Table":
331
+ x = [
332
+ nm(obj) for obj in tom.all_calculated_tables() if expr(obj, tom)
333
+ ]
325
334
  elif scope == "Relationship":
326
335
  x = [nm(obj) for obj in tom.model.Relationships if expr(obj, tom)]
327
336
  elif scope == "Role":
@@ -332,6 +341,12 @@ def run_model_bpa(
332
341
  x = [
333
342
  nm(obj) for obj in tom.all_calculation_items() if expr(obj, tom)
334
343
  ]
344
+ elif scope == "Calculated Column":
345
+ x = [
346
+ nm(obj)
347
+ for obj in tom.all_calculated_columns()
348
+ if expr(obj, tom)
349
+ ]
335
350
 
336
351
  if len(x) > 0:
337
352
  new_data = {
@@ -416,7 +416,7 @@ def model_bpa_rules(
416
416
  lambda obj, tom: any(
417
417
  re.search(
418
418
  r"USERELATIONSHIP\s*\(\s*.+?(?=])\]\s*,\s*'*"
419
- + obj.Name
419
+ + re.escape(obj.Name)
420
420
  + r"'*\[",
421
421
  m.Expression,
422
422
  flags=re.IGNORECASE,
@@ -455,7 +455,9 @@ def model_bpa_rules(
455
455
  "Warning",
456
456
  "The EVALUATEANDLOG function should not be used in production models",
457
457
  lambda obj, tom: re.search(
458
- r"evaluateandlog\s*\(", obj.Expression, flags=re.IGNORECASE
458
+ r"evaluateandlog\s*\(",
459
+ obj.Expression,
460
+ flags=re.IGNORECASE,
459
461
  ),
460
462
  "The EVALUATEANDLOG function is meant to be used only in development/test environments and should not be used in production models.",
461
463
  "https://pbidax.wordpress.com/2022/08/16/introduce-the-dax-evaluateandlog-function",
@@ -563,7 +565,12 @@ def model_bpa_rules(
563
565
  ),
564
566
  (
565
567
  "DAX Expressions",
566
- "Measure",
568
+ [
569
+ "Measure",
570
+ "Calculated Table",
571
+ "Calculated Column",
572
+ "Calculation Item",
573
+ ],
567
574
  "Error",
568
575
  "Column references should be fully qualified",
569
576
  lambda obj, tom: any(
@@ -574,7 +581,12 @@ def model_bpa_rules(
574
581
  ),
575
582
  (
576
583
  "DAX Expressions",
577
- "Measure",
584
+ [
585
+ "Measure",
586
+ "Calculated Table",
587
+ "Calculated Column",
588
+ "Calculation Item",
589
+ ],
578
590
  "Error",
579
591
  "Measure references should be unqualified",
580
592
  lambda obj, tom: any(
@@ -592,13 +604,13 @@ def model_bpa_rules(
592
604
  and not any(
593
605
  re.search(
594
606
  r"USERELATIONSHIP\s*\(\s*\'*"
595
- + obj.FromTable.Name
607
+ + re.escape(obj.FromTable.Name)
596
608
  + r"'*\["
597
- + obj.FromColumn.Name
609
+ + re.escape(obj.FromColumn.Name)
598
610
  + r"\]\s*,\s*'*"
599
- + obj.ToTable.Name
611
+ + re.escape(obj.ToTable.Name)
600
612
  + r"'*\["
601
- + obj.ToColumn.Name
613
+ + re.escape(obj.ToColumn.Name)
602
614
  + r"\]",
603
615
  m.Expression,
604
616
  flags=re.IGNORECASE,
@@ -0,0 +1,117 @@
1
+ from uuid import UUID
2
+ from typing import Optional
3
+ import pandas as pd
4
+ from sempy_labs._helper_functions import (
5
+ _create_dataframe,
6
+ _base_api,
7
+ _update_dataframe_datatypes,
8
+ resolve_workspace_name_and_id,
9
+ resolve_dataset_name_and_id,
10
+ )
11
+ import sempy_labs._icons as icons
12
+
13
+
14
+ def get_semantic_model_refresh_schedule(
15
+ dataset: str | UUID, workspace: Optional[str | UUID] = None
16
+ ) -> pd.DataFrame:
17
+ """
18
+ Gets the refresh schedule for the specified dataset from the specified workspace.
19
+
20
+ Parameters
21
+ ----------
22
+ dataset : str | uuid.UUID
23
+ Name or ID of the semantic model.
24
+ workspace : str | uuid.UUID, default=None
25
+ The workspace name or ID.
26
+ Defaults to None which resolves to the workspace of the attached lakehouse
27
+ or if no lakehouse attached, resolves to the workspace of the notebook.
28
+
29
+ Returns
30
+ -------
31
+ pandas.DataFrame
32
+ Shows the refresh schedule for the specified dataset from the specified workspace.
33
+ """
34
+
35
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
36
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace)
37
+
38
+ columns = {
39
+ "Days": "str",
40
+ "Times": "str",
41
+ "Enabled": "bool",
42
+ "Local Time Zone Id": "str",
43
+ "Notify Option": "str",
44
+ }
45
+
46
+ column_map = {
47
+ "days": "Days",
48
+ "times": "Times",
49
+ "enabled": "Enabled",
50
+ "localTimeZoneId": "Local Time Zone Id",
51
+ "notifyOption": "Notify Option",
52
+ }
53
+
54
+ df = _create_dataframe(columns)
55
+
56
+ result = _base_api(
57
+ request=f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}/refreshSchedule"
58
+ ).json()
59
+
60
+ df = (
61
+ pd.json_normalize(result)
62
+ .drop(columns=["@odata.context"], errors="ignore")
63
+ .rename(columns=column_map)
64
+ )
65
+
66
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
67
+
68
+ return df
69
+
70
+
71
+ def enable_semantic_model_scheduled_refresh(
72
+ dataset: str | UUID,
73
+ workspace: Optional[str | UUID] = None,
74
+ enable: bool = True,
75
+ ):
76
+ """
77
+ Enables the scheduled refresh for the specified dataset from the specified workspace.
78
+
79
+ Parameters
80
+ ----------
81
+ dataset : str | uuid.UUID
82
+ Name or ID of the semantic model.
83
+ workspace : str | uuid.UUID, default=None
84
+ The workspace name or ID.
85
+ Defaults to None which resolves to the workspace of the attached lakehouse
86
+ or if no lakehouse attached, resolves to the workspace of the notebook.
87
+ enable : bool, default=True
88
+ If True, enables the scheduled refresh.
89
+ If False, disables the scheduled refresh.
90
+ """
91
+
92
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
93
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace)
94
+
95
+ df = get_semantic_model_refresh_schedule(dataset=dataset, workspace=workspace)
96
+ status = df["Enabled"].iloc[0]
97
+
98
+ if enable and status:
99
+ print(
100
+ f"{icons.info} Scheduled refresh for the '{dataset_name}' within the '{workspace_name}' workspace is already enabled."
101
+ )
102
+ elif not enable and not status:
103
+ print(
104
+ f"{icons.info} Scheduled refresh for the '{dataset_name}' within the '{workspace_name}' workspace is already disabled."
105
+ )
106
+ else:
107
+ payload = {"value": {"enabled": enable}}
108
+
109
+ _base_api(
110
+ request=f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}/refreshSchedule",
111
+ method="patch",
112
+ payload=payload,
113
+ )
114
+
115
+ print(
116
+ f"{icons.green_dot} Scheduled refresh for the '{dataset_name}' within the '{workspace_name}' workspace has been enabled."
117
+ )