semantic-link-labs 0.9.3__py3-none-any.whl → 0.9.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (41) hide show
  1. {semantic_link_labs-0.9.3.dist-info → semantic_link_labs-0.9.4.dist-info}/METADATA +9 -6
  2. {semantic_link_labs-0.9.3.dist-info → semantic_link_labs-0.9.4.dist-info}/RECORD +41 -31
  3. {semantic_link_labs-0.9.3.dist-info → semantic_link_labs-0.9.4.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +27 -1
  5. sempy_labs/_capacity_migration.py +3 -2
  6. sempy_labs/_dax.py +17 -3
  7. sempy_labs/_delta_analyzer.py +279 -127
  8. sempy_labs/_eventhouses.py +70 -1
  9. sempy_labs/_generate_semantic_model.py +30 -9
  10. sempy_labs/_helper_functions.py +30 -1
  11. sempy_labs/_job_scheduler.py +226 -2
  12. sempy_labs/_list_functions.py +40 -16
  13. sempy_labs/_model_bpa.py +15 -0
  14. sempy_labs/_model_bpa_rules.py +12 -2
  15. sempy_labs/_semantic_models.py +117 -0
  16. sempy_labs/_sql.py +73 -6
  17. sempy_labs/_sqldatabase.py +227 -0
  18. sempy_labs/admin/__init__.py +49 -8
  19. sempy_labs/admin/_activities.py +166 -0
  20. sempy_labs/admin/_apps.py +143 -0
  21. sempy_labs/admin/_basic_functions.py +32 -652
  22. sempy_labs/admin/_capacities.py +250 -0
  23. sempy_labs/admin/_datasets.py +184 -0
  24. sempy_labs/admin/_domains.py +1 -1
  25. sempy_labs/admin/_items.py +3 -1
  26. sempy_labs/admin/_reports.py +165 -0
  27. sempy_labs/admin/_scanner.py +0 -1
  28. sempy_labs/admin/_shared.py +74 -0
  29. sempy_labs/admin/_tenant.py +489 -0
  30. sempy_labs/directlake/_dl_helper.py +0 -1
  31. sempy_labs/directlake/_update_directlake_partition_entity.py +6 -0
  32. sempy_labs/graph/_teams.py +1 -1
  33. sempy_labs/graph/_users.py +9 -1
  34. sempy_labs/lakehouse/_shortcuts.py +28 -15
  35. sempy_labs/report/__init__.py +3 -1
  36. sempy_labs/report/_download_report.py +4 -1
  37. sempy_labs/report/_export_report.py +272 -0
  38. sempy_labs/report/_report_functions.py +9 -261
  39. sempy_labs/tom/_model.py +278 -29
  40. {semantic_link_labs-0.9.3.dist-info → semantic_link_labs-0.9.4.dist-info}/LICENSE +0 -0
  41. {semantic_link_labs-0.9.3.dist-info → semantic_link_labs-0.9.4.dist-info}/top_level.txt +0 -0
@@ -11,6 +11,7 @@ from sempy_labs._helper_functions import (
11
11
  _conv_b64,
12
12
  _decode_b64,
13
13
  _base_api,
14
+ _mount,
14
15
  )
15
16
  from sempy_labs.lakehouse._lakehouse import lakehouse_attached
16
17
  import sempy_labs._icons as icons
@@ -252,6 +253,7 @@ def deploy_semantic_model(
252
253
  target_workspace: Optional[str | UUID] = None,
253
254
  refresh_target_dataset: bool = True,
254
255
  overwrite: bool = False,
256
+ perspective: Optional[str] = None,
255
257
  ):
256
258
  """
257
259
  Deploys a semantic model based on an existing semantic model.
@@ -274,6 +276,8 @@ def deploy_semantic_model(
274
276
  If set to True, this will initiate a full refresh of the target semantic model in the target workspace.
275
277
  overwrite : bool, default=False
276
278
  If set to True, overwrites the existing semantic model in the workspace if it exists.
279
+ perspective : str, default=None
280
+ Set this to the name of a perspective in the model and it will reduce the deployed model down to the tables/columns/measures/hierarchies within that perspective.
277
281
  """
278
282
 
279
283
  (source_workspace_name, source_workspace_id) = resolve_workspace_name_and_id(
@@ -307,7 +311,21 @@ def deploy_semantic_model(
307
311
  f"{icons.warning} The '{target_dataset}' semantic model already exists within the '{target_workspace_name}' workspace. The 'overwrite' parameter is set to False so the source semantic model was not deployed to the target destination."
308
312
  )
309
313
 
310
- bim = get_semantic_model_bim(dataset=source_dataset, workspace=source_workspace_id)
314
+ if perspective is not None:
315
+
316
+ from sempy_labs.tom import connect_semantic_model
317
+
318
+ with connect_semantic_model(
319
+ dataset=source_dataset, workspace=source_workspace, readonly=True
320
+ ) as tom:
321
+
322
+ df_added = tom._reduce_model(perspective_name=perspective)
323
+ bim = tom.get_bim()
324
+
325
+ else:
326
+ bim = get_semantic_model_bim(
327
+ dataset=source_dataset, workspace=source_workspace_id
328
+ )
311
329
 
312
330
  # Create the semantic model if the model does not exist
313
331
  if dfD_filt.empty:
@@ -325,6 +343,9 @@ def deploy_semantic_model(
325
343
  if refresh_target_dataset:
326
344
  refresh_semantic_model(dataset=target_dataset, workspace=target_workspace_id)
327
345
 
346
+ if perspective is not None:
347
+ return df_added
348
+
328
349
 
329
350
  @log
330
351
  def get_semantic_model_bim(
@@ -368,16 +389,16 @@ def get_semantic_model_bim(
368
389
  f"{icons.red_dot} In order to save the model.bim file, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook."
369
390
  )
370
391
 
371
- lakehouse = resolve_lakehouse_name()
372
- folderPath = "/lakehouse/default/Files"
373
- fileExt = ".bim"
374
- if not save_to_file_name.endswith(fileExt):
375
- save_to_file_name = f"{save_to_file_name}{fileExt}"
376
- filePath = os.path.join(folderPath, save_to_file_name)
377
- with open(filePath, "w") as json_file:
392
+ local_path = _mount()
393
+ save_folder = f"{local_path}/Files"
394
+ file_ext = ".bim"
395
+ if not save_to_file_name.endswith(file_ext):
396
+ save_to_file_name = f"{save_to_file_name}{file_ext}"
397
+ file_path = os.path.join(save_folder, save_to_file_name)
398
+ with open(file_path, "w") as json_file:
378
399
  json.dump(bimJson, json_file, indent=4)
379
400
  print(
380
- f"{icons.green_dot} The {fileExt} file for the '{dataset_name}' semantic model has been saved to the '{lakehouse}' in this location: '{filePath}'.\n\n"
401
+ f"{icons.green_dot} The {file_ext} file for the '{dataset_name}' semantic model has been saved to the lakehouse attached to the notebook within: 'Files/{save_to_file_name}'.\n\n"
381
402
  )
382
403
 
383
404
  return bimJson
@@ -512,7 +512,6 @@ def save_as_delta_table(
512
512
  or if no lakehouse attached, resolves to the workspace of the notebook.
513
513
  """
514
514
 
515
- from pyspark.sql import SparkSession
516
515
  from pyspark.sql.types import (
517
516
  StringType,
518
517
  IntegerType,
@@ -1637,3 +1636,33 @@ def _run_spark_sql_query(query):
1637
1636
  spark = _create_spark_session()
1638
1637
 
1639
1638
  return spark.sql(query)
1639
+
1640
+
1641
+ def _mount(lakehouse, workspace) -> str:
1642
+ """
1643
+ Mounts a lakehouse to a notebook if it is not already mounted. Returns the local path to the lakehouse.
1644
+ """
1645
+
1646
+ import notebookutils
1647
+
1648
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace=workspace)
1649
+ (lakehouse_name, lakehouse_id) = resolve_lakehouse_name_and_id(
1650
+ lakehouse=lakehouse, workspace=workspace
1651
+ )
1652
+
1653
+ lake_path = create_abfss_path(lakehouse_id, workspace_id)
1654
+ mounts = notebookutils.fs.mounts()
1655
+ mount_point = f"/{workspace_name.replace(' ', '')}{lakehouse_name.replace(' ', '')}"
1656
+ if not any(i.get("source") == lake_path for i in mounts):
1657
+ # Mount lakehouse if not mounted
1658
+ notebookutils.fs.mount(lake_path, mount_point)
1659
+ print(
1660
+ f"{icons.green_dot} Mounted the '{lakehouse_name}' lakehouse within the '{workspace_name}' to the notebook."
1661
+ )
1662
+
1663
+ mounts = notebookutils.fs.mounts()
1664
+ local_path = next(
1665
+ i.get("localPath") for i in mounts if i.get("source") == lake_path
1666
+ )
1667
+
1668
+ return local_path
@@ -1,6 +1,6 @@
1
1
  from sempy._utils._log import log
2
2
  import pandas as pd
3
- from typing import Optional
3
+ from typing import Optional, List
4
4
  from sempy_labs._helper_functions import (
5
5
  resolve_workspace_name_and_id,
6
6
  resolve_item_name_and_id,
@@ -189,7 +189,7 @@ def run_on_demand_item_job(
189
189
  Parameters
190
190
  ----------
191
191
  item : str | uuid.UUID
192
- The item name or ID
192
+ The item name or ID.
193
193
  type : str, default=None
194
194
  The item `type <https://learn.microsoft.com/rest/api/fabric/core/items/list-items?tabs=HTTP#itemtype>`_. If specifying the item name as the item, the item type is required.
195
195
  job_type : str, default="DefaultJob"
@@ -213,3 +213,227 @@ def run_on_demand_item_job(
213
213
  )
214
214
 
215
215
  print(f"{icons.green_dot} The '{item_name}' {type.lower()} has been executed.")
216
+
217
+
218
+ def create_item_schedule_cron(
219
+ item: str | UUID,
220
+ type: str,
221
+ start_date_time: str,
222
+ end_date_time: str,
223
+ local_time_zone: str,
224
+ job_type: str = "DefaultJob",
225
+ interval_minutes: int = 10,
226
+ enabled: bool = True,
227
+ workspace: Optional[str | UUID] = None,
228
+ ):
229
+ """
230
+ Create a new schedule for an item based on a `chronological time <https://learn.microsoft.com/rest/api/fabric/core/job-scheduler/create-item-schedule?tabs=HTTP#cronscheduleconfig>`_.
231
+
232
+ This is a wrapper function for the following API: `Job Scheduler - Create Item Schedule <https://learn.microsoft.com/rest/api/fabric/core/job-scheduler/create-item-schedule>`_.
233
+
234
+ Parameters
235
+ ----------
236
+ item : str | uuid.UUID
237
+ The item name or ID.
238
+ type : str
239
+ The item `type <https://learn.microsoft.com/rest/api/fabric/core/items/list-items?tabs=HTTP#itemtype>`_. If specifying the item name as the item, the item type is required.
240
+ start_date_time: str
241
+ The start date and time of the schedule. Example: "2024-04-28T00:00:00".
242
+ end_date_time: str
243
+ The end date and time of the schedule. Must be later than the start_date_time. Example: "2024-04-30T23:59:00".
244
+ local_time_zone: str
245
+ The `time zone <https://learn.microsoft.com/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11>`_ of the schedule. Example: "Central Standard Time".
246
+ job_type : str, default="DefaultJob"
247
+ The job type.
248
+ interval_minutes: int, default=10
249
+ The schedule interval (in minutes).
250
+ enabled: bool, default=True
251
+ Whether the schedule is enabled.
252
+ workspace : str | uuid.UUID, default=None
253
+ The workspace name or ID.
254
+ Defaults to None which resolves to the workspace of the attached lakehouse
255
+ or if no lakehouse attached, resolves to the workspace of the notebook.
256
+ """
257
+
258
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
259
+ (item_name, item_id) = resolve_item_name_and_id(
260
+ item=item, type=type, workspace=workspace
261
+ )
262
+
263
+ payload = {
264
+ "enabled": enabled,
265
+ "configuration": {
266
+ "startDateTime": start_date_time,
267
+ "endDateTime": end_date_time,
268
+ "localTimeZoneId": local_time_zone,
269
+ "type": "Cron",
270
+ "interval": interval_minutes,
271
+ },
272
+ }
273
+
274
+ _base_api(
275
+ request=f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/{job_type}/schedules",
276
+ method="post",
277
+ payload=payload,
278
+ status_codes=201,
279
+ )
280
+
281
+ print(
282
+ f"{icons.green_dot} The schedule for the '{item_name}' {type.lower()} has been created."
283
+ )
284
+
285
+
286
+ def create_item_schedule_daily(
287
+ item: str | UUID,
288
+ type: str,
289
+ start_date_time: str,
290
+ end_date_time: str,
291
+ local_time_zone: str,
292
+ times: List[str],
293
+ job_type: str = "DefaultJob",
294
+ enabled: bool = True,
295
+ workspace: Optional[str | UUID] = None,
296
+ ):
297
+ """
298
+ Create a new daily schedule for an item.
299
+
300
+ This is a wrapper function for the following API: `Job Scheduler - Create Item Schedule <https://learn.microsoft.com/rest/api/fabric/core/job-scheduler/create-item-schedule>`_.
301
+
302
+ Parameters
303
+ ----------
304
+ item : str | uuid.UUID
305
+ The item name or ID.
306
+ type : str
307
+ The item `type <https://learn.microsoft.com/rest/api/fabric/core/items/list-items?tabs=HTTP#itemtype>`_. If specifying the item name as the item, the item type is required.
308
+ start_date_time: str
309
+ The start date and time of the schedule. Example: "2024-04-28T00:00:00".
310
+ end_date_time: str
311
+ The end date and time of the schedule. Must be later than the start_date_time. Example: "2024-04-30T23:59:00".
312
+ local_time_zone: str
313
+ The `time zone <https://learn.microsoft.com/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11>`_ of the schedule. Example: "Central Standard Time".
314
+ times : List[str]
315
+ A list of time slots in hh:mm format, at most 100 elements are allowed. Example: ["00:00", "12:00"].
316
+ job_type : str, default="DefaultJob"
317
+ The job type.
318
+ enabled: bool, default=True
319
+ Whether the schedule is enabled.
320
+ workspace : str | uuid.UUID, default=None
321
+ The workspace name or ID.
322
+ Defaults to None which resolves to the workspace of the attached lakehouse
323
+ or if no lakehouse attached, resolves to the workspace of the notebook.
324
+ """
325
+
326
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
327
+ (item_name, item_id) = resolve_item_name_and_id(
328
+ item=item, type=type, workspace=workspace
329
+ )
330
+
331
+ payload = {
332
+ "enabled": enabled,
333
+ "configuration": {
334
+ "startDateTime": start_date_time,
335
+ "endDateTime": end_date_time,
336
+ "localTimeZoneId": local_time_zone,
337
+ "type": "Daily",
338
+ "times": times,
339
+ },
340
+ }
341
+
342
+ _base_api(
343
+ request=f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/{job_type}/schedules",
344
+ method="post",
345
+ payload=payload,
346
+ status_codes=201,
347
+ )
348
+
349
+ print(
350
+ f"{icons.green_dot} The schedule for the '{item_name}' {type.lower()} has been created."
351
+ )
352
+
353
+
354
+ def create_item_schedule_weekly(
355
+ item: str | UUID,
356
+ type: str,
357
+ start_date_time: str,
358
+ end_date_time: str,
359
+ local_time_zone: str,
360
+ times: List[str],
361
+ weekdays: List[str],
362
+ job_type: str = "DefaultJob",
363
+ enabled: bool = True,
364
+ workspace: Optional[str | UUID] = None,
365
+ ):
366
+ """
367
+ Create a new daily schedule for an item.
368
+
369
+ This is a wrapper function for the following API: `Job Scheduler - Create Item Schedule <https://learn.microsoft.com/rest/api/fabric/core/job-scheduler/create-item-schedule>`_.
370
+
371
+ Parameters
372
+ ----------
373
+ item : str | uuid.UUID
374
+ The item name or ID.
375
+ type : str
376
+ The item `type <https://learn.microsoft.com/rest/api/fabric/core/items/list-items?tabs=HTTP#itemtype>`_. If specifying the item name as the item, the item type is required.
377
+ start_date_time: str
378
+ The start date and time of the schedule. Example: "2024-04-28T00:00:00".
379
+ end_date_time: str
380
+ The end date and time of the schedule. Must be later than the start_date_time. Example: "2024-04-30T23:59:00".
381
+ local_time_zone: str
382
+ The `time zone <https://learn.microsoft.com/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11>`_ of the schedule. Example: "Central Standard Time".
383
+ times : List[str]
384
+ A list of time slots in hh:mm format, at most 100 elements are allowed. Example: ["00:00", "12:00"].
385
+ weekdays : List[str]
386
+ A list of weekdays. Example: ["Monday", "Tuesday"].
387
+ job_type : str, default="DefaultJob"
388
+ The job type.
389
+ enabled: bool, default=True
390
+ Whether the schedule is enabled.
391
+ workspace : str | uuid.UUID, default=None
392
+ The workspace name or ID.
393
+ Defaults to None which resolves to the workspace of the attached lakehouse
394
+ or if no lakehouse attached, resolves to the workspace of the notebook.
395
+ """
396
+
397
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
398
+ (item_name, item_id) = resolve_item_name_and_id(
399
+ item=item, type=type, workspace=workspace
400
+ )
401
+
402
+ weekdays = [w.capitalize() for w in weekdays]
403
+ weekday_list = [
404
+ "Sunday",
405
+ "Monday",
406
+ "Tuesday",
407
+ "Wednesday",
408
+ "Thursday",
409
+ "Friday",
410
+ "Saturday",
411
+ ]
412
+ for weekday in weekdays:
413
+ if weekday not in weekday_list:
414
+ raise ValueError(
415
+ f"{icons.red_dot} Invalid weekday: {weekday}. Must be one of {weekday_list}."
416
+ )
417
+
418
+ payload = {
419
+ "enabled": enabled,
420
+ "configuration": {
421
+ "startDateTime": start_date_time,
422
+ "endDateTime": end_date_time,
423
+ "localTimeZoneId": local_time_zone,
424
+ "type": "Weekly",
425
+ "times": times,
426
+ "weekdays": weekdays,
427
+ },
428
+ }
429
+
430
+ _base_api(
431
+ request=f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/{job_type}/schedules",
432
+ method="post",
433
+ payload=payload,
434
+ status_codes=201,
435
+ )
436
+
437
+ print(
438
+ f"{icons.green_dot} The schedule for the '{item_name}' {type.lower()} has been created."
439
+ )
@@ -1239,22 +1239,46 @@ def list_shortcuts(
1239
1239
  uses_pagination=True,
1240
1240
  )
1241
1241
 
1242
+ sources = [
1243
+ "s3Compatible",
1244
+ "googleCloudStorage",
1245
+ "externalDataShare",
1246
+ "amazonS3",
1247
+ "adlsGen2",
1248
+ "dataverse",
1249
+ ]
1250
+ sources_locpath = ["s3Compatible", "googleCloudStorage", "amazonS3", "adlsGen2"]
1251
+
1242
1252
  for r in responses:
1243
1253
  for i in r.get("value", []):
1244
1254
  tgt = i.get("target", {})
1245
- s3_compat = tgt.get("s3Compatible", {})
1246
- gcs = tgt.get("googleCloudStorage", {})
1247
- eds = tgt.get("externalDataShare", {})
1248
- connection_id = (
1249
- s3_compat.get("connectionId")
1250
- or gcs.get("connectionId")
1251
- or eds.get("connectionId")
1252
- or None
1255
+ one_lake = tgt.get("oneLake", {})
1256
+ connection_id = next(
1257
+ (
1258
+ tgt.get(source, {}).get("connectionId")
1259
+ for source in sources
1260
+ if tgt.get(source)
1261
+ ),
1262
+ None,
1263
+ )
1264
+ location = next(
1265
+ (
1266
+ tgt.get(source, {}).get("location")
1267
+ for source in sources_locpath
1268
+ if tgt.get(source)
1269
+ ),
1270
+ None,
1271
+ )
1272
+ sub_path = next(
1273
+ (
1274
+ tgt.get(source, {}).get("subpath")
1275
+ for source in sources_locpath
1276
+ if tgt.get(source)
1277
+ ),
1278
+ None,
1253
1279
  )
1254
- location = s3_compat.get("location") or gcs.get("location") or None
1255
- sub_path = s3_compat.get("subpath") or gcs.get("subpath") or None
1256
- source_workspace_id = tgt.get("oneLake", {}).get("workspaceId")
1257
- source_item_id = tgt.get("oneLake", {}).get("itemId")
1280
+ source_workspace_id = one_lake.get("workspaceId")
1281
+ source_item_id = one_lake.get("itemId")
1258
1282
  source_workspace_name = (
1259
1283
  fabric.resolve_workspace_name(source_workspace_id)
1260
1284
  if source_workspace_id is not None
@@ -1280,10 +1304,10 @@ def list_shortcuts(
1280
1304
  if source_item_id is not None
1281
1305
  else None
1282
1306
  ),
1283
- "OneLake Path": tgt.get("oneLake", {}).get("path"),
1307
+ "OneLake Path": one_lake.get("path"),
1284
1308
  "Connection Id": connection_id,
1285
1309
  "Location": location,
1286
- "Bucket": s3_compat.get("bucket"),
1310
+ "Bucket": tgt.get("s3Compatible", {}).get("bucket"),
1287
1311
  "SubPath": sub_path,
1288
1312
  }
1289
1313
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
@@ -1366,7 +1390,7 @@ def list_reports_using_semantic_model(
1366
1390
  & (dfR["Dataset Workspace Id"] == workspace_id)
1367
1391
  ][["Name", "Id"]]
1368
1392
  dfR_filt.rename(columns={"Name": "Report Name", "Id": "Report Id"}, inplace=True)
1369
- dfR_filt["Report Worskpace Name"] = workspace_name
1393
+ dfR_filt["Report Workspace Name"] = workspace_name
1370
1394
  dfR_filt["Report Workspace Id"] = workspace_id
1371
1395
 
1372
1396
  return dfR_filt
@@ -1774,7 +1798,7 @@ def list_synonyms(dataset: str | UUID, workspace: Optional[str] = None):
1774
1798
  "State": "string",
1775
1799
  "Source": "string",
1776
1800
  "Weight": "float_fillna",
1777
- "Last Modified": "datetime",
1801
+ "Last Modified": "string",
1778
1802
  }
1779
1803
 
1780
1804
  df = _create_dataframe(columns=columns)
sempy_labs/_model_bpa.py CHANGED
@@ -274,12 +274,17 @@ def run_model_bpa(
274
274
  tom.all_columns(),
275
275
  lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
276
276
  ),
277
+ "Calculated Column": (
278
+ tom.all_calculated_columns(),
279
+ lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
280
+ ),
277
281
  "Measure": (tom.all_measures(), lambda obj: obj.Name),
278
282
  "Hierarchy": (
279
283
  tom.all_hierarchies(),
280
284
  lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
281
285
  ),
282
286
  "Table": (tom.model.Tables, lambda obj: obj.Name),
287
+ "Calculated Table": (tom.all_calculated_tables(), lambda obj: obj.Name),
283
288
  "Role": (tom.model.Roles, lambda obj: obj.Name),
284
289
  "Model": (tom.model, lambda obj: obj.Model.Name),
285
290
  "Calculation Item": (
@@ -322,6 +327,10 @@ def run_model_bpa(
322
327
  x = [nm(obj) for obj in tom.all_hierarchies() if expr(obj, tom)]
323
328
  elif scope == "Table":
324
329
  x = [nm(obj) for obj in tom.model.Tables if expr(obj, tom)]
330
+ elif scope == "Calculated Table":
331
+ x = [
332
+ nm(obj) for obj in tom.all_calculated_tables() if expr(obj, tom)
333
+ ]
325
334
  elif scope == "Relationship":
326
335
  x = [nm(obj) for obj in tom.model.Relationships if expr(obj, tom)]
327
336
  elif scope == "Role":
@@ -332,6 +341,12 @@ def run_model_bpa(
332
341
  x = [
333
342
  nm(obj) for obj in tom.all_calculation_items() if expr(obj, tom)
334
343
  ]
344
+ elif scope == "Calculated Column":
345
+ x = [
346
+ nm(obj)
347
+ for obj in tom.all_calculated_columns()
348
+ if expr(obj, tom)
349
+ ]
335
350
 
336
351
  if len(x) > 0:
337
352
  new_data = {
@@ -565,7 +565,12 @@ def model_bpa_rules(
565
565
  ),
566
566
  (
567
567
  "DAX Expressions",
568
- "Measure",
568
+ [
569
+ "Measure",
570
+ "Calculated Table",
571
+ "Calculated Column",
572
+ "Calculation Item",
573
+ ],
569
574
  "Error",
570
575
  "Column references should be fully qualified",
571
576
  lambda obj, tom: any(
@@ -576,7 +581,12 @@ def model_bpa_rules(
576
581
  ),
577
582
  (
578
583
  "DAX Expressions",
579
- "Measure",
584
+ [
585
+ "Measure",
586
+ "Calculated Table",
587
+ "Calculated Column",
588
+ "Calculation Item",
589
+ ],
580
590
  "Error",
581
591
  "Measure references should be unqualified",
582
592
  lambda obj, tom: any(
@@ -0,0 +1,117 @@
1
+ from uuid import UUID
2
+ from typing import Optional
3
+ import pandas as pd
4
+ from sempy_labs._helper_functions import (
5
+ _create_dataframe,
6
+ _base_api,
7
+ _update_dataframe_datatypes,
8
+ resolve_workspace_name_and_id,
9
+ resolve_dataset_name_and_id,
10
+ )
11
+ import sempy_labs._icons as icons
12
+
13
+
14
+ def get_semantic_model_refresh_schedule(
15
+ dataset: str | UUID, workspace: Optional[str | UUID] = None
16
+ ) -> pd.DataFrame:
17
+ """
18
+ Gets the refresh schedule for the specified dataset from the specified workspace.
19
+
20
+ Parameters
21
+ ----------
22
+ dataset : str | uuid.UUID
23
+ Name or ID of the semantic model.
24
+ workspace : str | uuid.UUID, default=None
25
+ The workspace name or ID.
26
+ Defaults to None which resolves to the workspace of the attached lakehouse
27
+ or if no lakehouse attached, resolves to the workspace of the notebook.
28
+
29
+ Returns
30
+ -------
31
+ pandas.DataFrame
32
+ Shows the refresh schedule for the specified dataset from the specified workspace.
33
+ """
34
+
35
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
36
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace)
37
+
38
+ columns = {
39
+ "Days": "str",
40
+ "Times": "str",
41
+ "Enabled": "bool",
42
+ "Local Time Zone Id": "str",
43
+ "Notify Option": "str",
44
+ }
45
+
46
+ column_map = {
47
+ "days": "Days",
48
+ "times": "Times",
49
+ "enabled": "Enabled",
50
+ "localTimeZoneId": "Local Time Zone Id",
51
+ "notifyOption": "Notify Option",
52
+ }
53
+
54
+ df = _create_dataframe(columns)
55
+
56
+ result = _base_api(
57
+ request=f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}/refreshSchedule"
58
+ ).json()
59
+
60
+ df = (
61
+ pd.json_normalize(result)
62
+ .drop(columns=["@odata.context"], errors="ignore")
63
+ .rename(columns=column_map)
64
+ )
65
+
66
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
67
+
68
+ return df
69
+
70
+
71
+ def enable_semantic_model_scheduled_refresh(
72
+ dataset: str | UUID,
73
+ workspace: Optional[str | UUID] = None,
74
+ enable: bool = True,
75
+ ):
76
+ """
77
+ Enables the scheduled refresh for the specified dataset from the specified workspace.
78
+
79
+ Parameters
80
+ ----------
81
+ dataset : str | uuid.UUID
82
+ Name or ID of the semantic model.
83
+ workspace : str | uuid.UUID, default=None
84
+ The workspace name or ID.
85
+ Defaults to None which resolves to the workspace of the attached lakehouse
86
+ or if no lakehouse attached, resolves to the workspace of the notebook.
87
+ enable : bool, default=True
88
+ If True, enables the scheduled refresh.
89
+ If False, disables the scheduled refresh.
90
+ """
91
+
92
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
93
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace)
94
+
95
+ df = get_semantic_model_refresh_schedule(dataset=dataset, workspace=workspace)
96
+ status = df["Enabled"].iloc[0]
97
+
98
+ if enable and status:
99
+ print(
100
+ f"{icons.info} Scheduled refresh for the '{dataset_name}' within the '{workspace_name}' workspace is already enabled."
101
+ )
102
+ elif not enable and not status:
103
+ print(
104
+ f"{icons.info} Scheduled refresh for the '{dataset_name}' within the '{workspace_name}' workspace is already disabled."
105
+ )
106
+ else:
107
+ payload = {"value": {"enabled": enable}}
108
+
109
+ _base_api(
110
+ request=f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}/refreshSchedule",
111
+ method="patch",
112
+ payload=payload,
113
+ )
114
+
115
+ print(
116
+ f"{icons.green_dot} Scheduled refresh for the '{dataset_name}' within the '{workspace_name}' workspace has been enabled."
117
+ )