semantic-link-labs 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (95) hide show
  1. {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/METADATA +9 -6
  2. {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/RECORD +95 -87
  3. sempy_labs/__init__.py +11 -1
  4. sempy_labs/_a_lib_info.py +2 -0
  5. sempy_labs/_capacities.py +2 -0
  6. sempy_labs/_connections.py +11 -0
  7. sempy_labs/_dashboards.py +9 -4
  8. sempy_labs/_data_pipelines.py +5 -0
  9. sempy_labs/_dataflows.py +284 -17
  10. sempy_labs/_daxformatter.py +80 -0
  11. sempy_labs/_delta_analyzer_history.py +4 -1
  12. sempy_labs/_deployment_pipelines.py +4 -0
  13. sempy_labs/_documentation.py +3 -0
  14. sempy_labs/_environments.py +10 -1
  15. sempy_labs/_eventhouses.py +12 -5
  16. sempy_labs/_eventstreams.py +11 -3
  17. sempy_labs/_external_data_shares.py +8 -2
  18. sempy_labs/_gateways.py +26 -5
  19. sempy_labs/_git.py +11 -0
  20. sempy_labs/_graphQL.py +10 -3
  21. sempy_labs/_helper_functions.py +62 -10
  22. sempy_labs/_job_scheduler.py +54 -7
  23. sempy_labs/_kql_databases.py +11 -2
  24. sempy_labs/_kql_querysets.py +11 -3
  25. sempy_labs/_list_functions.py +17 -45
  26. sempy_labs/_managed_private_endpoints.py +11 -2
  27. sempy_labs/_mirrored_databases.py +17 -3
  28. sempy_labs/_mirrored_warehouses.py +9 -3
  29. sempy_labs/_ml_experiments.py +11 -3
  30. sempy_labs/_ml_models.py +11 -3
  31. sempy_labs/_model_bpa_rules.py +2 -0
  32. sempy_labs/_mounted_data_factories.py +12 -8
  33. sempy_labs/_notebooks.py +6 -3
  34. sempy_labs/_refresh_semantic_model.py +1 -0
  35. sempy_labs/_semantic_models.py +107 -0
  36. sempy_labs/_spark.py +7 -0
  37. sempy_labs/_sql_endpoints.py +208 -0
  38. sempy_labs/_sqldatabase.py +13 -4
  39. sempy_labs/_tags.py +5 -1
  40. sempy_labs/_user_delegation_key.py +2 -0
  41. sempy_labs/_variable_libraries.py +3 -1
  42. sempy_labs/_warehouses.py +13 -3
  43. sempy_labs/_workloads.py +3 -0
  44. sempy_labs/_workspace_identity.py +3 -0
  45. sempy_labs/_workspaces.py +14 -1
  46. sempy_labs/admin/__init__.py +2 -0
  47. sempy_labs/admin/_activities.py +6 -5
  48. sempy_labs/admin/_apps.py +31 -31
  49. sempy_labs/admin/_artifacts.py +8 -3
  50. sempy_labs/admin/_basic_functions.py +5 -0
  51. sempy_labs/admin/_capacities.py +39 -28
  52. sempy_labs/admin/_datasets.py +51 -51
  53. sempy_labs/admin/_domains.py +17 -1
  54. sempy_labs/admin/_external_data_share.py +8 -2
  55. sempy_labs/admin/_git.py +14 -9
  56. sempy_labs/admin/_items.py +15 -2
  57. sempy_labs/admin/_reports.py +64 -65
  58. sempy_labs/admin/_shared.py +7 -1
  59. sempy_labs/admin/_tags.py +5 -0
  60. sempy_labs/admin/_tenant.py +5 -2
  61. sempy_labs/admin/_users.py +9 -3
  62. sempy_labs/admin/_workspaces.py +88 -0
  63. sempy_labs/directlake/_dl_helper.py +2 -0
  64. sempy_labs/directlake/_generate_shared_expression.py +2 -0
  65. sempy_labs/directlake/_get_directlake_lakehouse.py +2 -4
  66. sempy_labs/directlake/_get_shared_expression.py +2 -0
  67. sempy_labs/directlake/_guardrails.py +2 -0
  68. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +5 -3
  69. sempy_labs/directlake/_warm_cache.py +1 -0
  70. sempy_labs/graph/_groups.py +22 -7
  71. sempy_labs/graph/_teams.py +7 -2
  72. sempy_labs/graph/_users.py +1 -0
  73. sempy_labs/lakehouse/_blobs.py +1 -0
  74. sempy_labs/lakehouse/_get_lakehouse_tables.py +88 -27
  75. sempy_labs/lakehouse/_helper.py +2 -0
  76. sempy_labs/lakehouse/_lakehouse.py +38 -5
  77. sempy_labs/lakehouse/_livy_sessions.py +2 -1
  78. sempy_labs/lakehouse/_shortcuts.py +7 -1
  79. sempy_labs/migration/_direct_lake_to_import.py +2 -0
  80. sempy_labs/mirrored_azure_databricks_catalog/__init__.py +15 -0
  81. sempy_labs/mirrored_azure_databricks_catalog/_discover.py +213 -0
  82. sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +45 -0
  83. sempy_labs/report/_download_report.py +2 -1
  84. sempy_labs/report/_generate_report.py +2 -0
  85. sempy_labs/report/_paginated.py +2 -0
  86. sempy_labs/report/_report_bpa.py +110 -122
  87. sempy_labs/report/_report_bpa_rules.py +2 -0
  88. sempy_labs/report/_report_functions.py +7 -0
  89. sempy_labs/report/_reportwrapper.py +86 -48
  90. sempy_labs/theme/__init__.py +12 -0
  91. sempy_labs/theme/_org_themes.py +96 -0
  92. sempy_labs/tom/_model.py +702 -35
  93. {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/WHEEL +0 -0
  94. {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/licenses/LICENSE +0 -0
  95. {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/top_level.txt +0 -0
sempy_labs/_dataflows.py CHANGED
@@ -6,10 +6,16 @@ from sempy_labs._helper_functions import (
6
6
  _base_api,
7
7
  _create_dataframe,
8
8
  resolve_workspace_name,
9
+ resolve_workspace_id,
10
+ _decode_b64,
11
+ _conv_b64,
12
+ get_jsonpath_value,
9
13
  )
10
14
  from typing import Optional, Tuple
11
15
  import sempy_labs._icons as icons
12
16
  from uuid import UUID
17
+ from jsonpath_ng.ext import parse
18
+ import json
13
19
 
14
20
 
15
21
  def list_dataflows(workspace: Optional[str | UUID] = None):
@@ -29,34 +35,55 @@ def list_dataflows(workspace: Optional[str | UUID] = None):
29
35
  A pandas dataframe showing the dataflows which exist within a workspace.
30
36
  """
31
37
 
32
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
38
+ workspace_id = resolve_workspace_id(workspace)
33
39
 
34
40
  columns = {
35
41
  "Dataflow Id": "string",
36
42
  "Dataflow Name": "string",
43
+ "Description": "string",
37
44
  "Configured By": "string",
38
45
  "Users": "string",
39
- "Generation": "int",
46
+ "Generation": "string",
40
47
  }
41
48
  df = _create_dataframe(columns=columns)
42
49
 
43
- response = _base_api(request=f"/v1.0/myorg/groups/{workspace_id}/dataflows")
44
-
45
- data = [] # Collect rows here
50
+ response = _base_api(
51
+ request=f"/v1.0/myorg/groups/{workspace_id}/dataflows", client="fabric_sp"
52
+ )
46
53
 
54
+ dfs = []
47
55
  for v in response.json().get("value", []):
56
+ gen = v.get("generation")
48
57
  new_data = {
49
58
  "Dataflow Id": v.get("objectId"),
50
59
  "Dataflow Name": v.get("name"),
60
+ "Description": "",
51
61
  "Configured By": v.get("configuredBy"),
52
- "Users": v.get("users", []),
53
- "Generation": v.get("generation"),
62
+ "Users": ", ".join(v.get("users", [])),
63
+ "Generation": "Gen2" if gen == 2 else "Gen1",
54
64
  }
55
- data.append(new_data)
65
+ dfs.append(pd.DataFrame(new_data, index=[0]))
56
66
 
57
- if data:
58
- df = pd.DataFrame(data)
67
+ responses = _base_api(
68
+ request=f"/v1/workspaces/{workspace_id}/dataflows",
69
+ client="fabric_sp",
70
+ uses_pagination=True,
71
+ )
72
+ for r in responses:
73
+ for v in r.get("value", []):
74
+ gen = v.get("generation")
75
+ new_data = {
76
+ "Dataflow Id": v.get("id"),
77
+ "Dataflow Name": v.get("displayName"),
78
+ "Description": v.get("description"),
79
+ "Configured By": "",
80
+ "Users": "",
81
+ "Generation": "Gen2 CI/CD",
82
+ }
83
+ dfs.append(pd.DataFrame(new_data, index=[0]))
59
84
 
85
+ if dfs:
86
+ df = pd.concat(dfs, ignore_index=True)
60
87
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
61
88
 
62
89
  return df
@@ -162,8 +189,10 @@ def list_upstream_dataflows(
162
189
  """
163
190
 
164
191
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
165
- (dataflow_name, dataflow_id) = _resolve_dataflow_name_and_id(
166
- dataflow=dataflow, workspace=workspace_id
192
+ (dataflow_name, dataflow_id, dataflow_generation) = (
193
+ _resolve_dataflow_name_and_id_and_generation(
194
+ dataflow=dataflow, workspace=workspace_id
195
+ )
167
196
  )
168
197
 
169
198
  columns = {
@@ -188,7 +217,7 @@ def list_upstream_dataflows(
188
217
  tgt_dataflow_id = v.get("targetDataflowId")
189
218
  tgt_workspace_id = v.get("groupId")
190
219
  tgt_workspace_name = resolve_workspace_name(workspace_id=tgt_workspace_id)
191
- (tgt_dataflow_name, _) = _resolve_dataflow_name_and_id(
220
+ (tgt_dataflow_name, _, _) = _resolve_dataflow_name_and_id_and_generation(
192
221
  dataflow=tgt_dataflow_id, workspace=tgt_workspace_id
193
222
  )
194
223
 
@@ -215,9 +244,9 @@ def list_upstream_dataflows(
215
244
  return df
216
245
 
217
246
 
218
- def _resolve_dataflow_name_and_id(
247
+ def _resolve_dataflow_name_and_id_and_generation(
219
248
  dataflow: str | UUID, workspace: Optional[str | UUID] = None
220
- ) -> Tuple[str, UUID]:
249
+ ) -> Tuple[str, UUID, str]:
221
250
 
222
251
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
223
252
 
@@ -228,12 +257,250 @@ def _resolve_dataflow_name_and_id(
228
257
  else:
229
258
  dfD_filt = dfD[dfD["Dataflow Name"] == dataflow]
230
259
 
231
- if len(dfD_filt) == 0:
260
+ if dfD_filt.empty:
232
261
  raise ValueError(
233
262
  f"{icons.red_dot} The '{dataflow}' dataflow does not exist within the '{workspace_name}' workspace."
234
263
  )
235
264
 
236
265
  dataflow_id = dfD_filt["Dataflow Id"].iloc[0]
237
266
  dataflow_name = dfD_filt["Dataflow Name"].iloc[0]
267
+ dataflow_generation = dfD_filt["Generation"].iloc[0]
268
+
269
+ return (dataflow_name, dataflow_id, dataflow_generation)
270
+
271
+
272
+ def get_dataflow_definition(
273
+ dataflow: str | UUID,
274
+ workspace: Optional[str | UUID] = None,
275
+ decode: bool = True,
276
+ ) -> dict:
277
+ """
278
+ Obtains the definition of a dataflow. This supports Gen1, Gen2 and Gen 2 CI/CD dataflows.
279
+
280
+ This is a wrapper function for the following API: `Dataflows - Get Dataflow <https://learn.microsoft.com/rest/api/power-bi/dataflows/get-dataflow>`_.
281
+
282
+ Parameters
283
+ ----------
284
+ dataflow : str | uuid.UUID
285
+ The name or ID of the dataflow.
286
+ workspace : str | uuid.UUID, default=None
287
+ The Fabric workspace name.
288
+ Defaults to None, which resolves to the workspace of the attached lakehouse
289
+ or if no lakehouse is attached, resolves to the workspace of the notebook.
290
+ decode : bool, optional
291
+ If True, decodes the dataflow definition file.
292
+
293
+ Returns
294
+ -------
295
+ dict
296
+ The dataflow definition.
297
+ """
298
+
299
+ workspace_id = resolve_workspace_id(workspace)
300
+
301
+ (dataflow_name, dataflow_id, dataflow_generation) = (
302
+ _resolve_dataflow_name_and_id_and_generation(
303
+ dataflow=dataflow, workspace=workspace_id
304
+ )
305
+ )
306
+
307
+ if dataflow_generation == "Gen2 CI/CD":
308
+ result = _base_api(
309
+ request=f"/v1/workspaces/{workspace_id}/items/{dataflow_id}/getDefinition",
310
+ client="fabric_sp",
311
+ method="post",
312
+ lro_return_json=True,
313
+ status_codes=[200, 202],
314
+ )
315
+
316
+ if decode:
317
+ # Decode the payload from base64
318
+ definition = {"definition": {"parts": []}}
319
+
320
+ for part in result.get("definition", {}).get("parts", []):
321
+ path = part.get("path")
322
+ payload = part.get("payload")
323
+ decoded_payload = _decode_b64(payload)
324
+ definition["definition"]["parts"].append(
325
+ {"path": path, "payload": decoded_payload}
326
+ )
327
+ return definition
328
+ else:
329
+ return result
330
+ else:
331
+ result = _base_api(
332
+ request=f"/v1.0/myorg/groups/{workspace_id}/dataflows/{dataflow_id}",
333
+ client="fabric_sp",
334
+ method="get",
335
+ ).json()
336
+
337
+ return result
338
+
339
+
340
+ def upgrade_dataflow(
341
+ dataflow: str | UUID,
342
+ workspace: Optional[str | UUID] = None,
343
+ new_dataflow_name: Optional[str] = None,
344
+ new_dataflow_workspace: Optional[str | UUID] = None,
345
+ ):
346
+ """
347
+ Creates a Dataflow Gen2 CI/CD item based on the mashup definition from an existing Gen1/Gen2 dataflow. After running this function, update the connections in the dataflow to ensure the data can be properly refreshed.
238
348
 
239
- return dataflow_name, dataflow_id
349
+ Parameters
350
+ ----------
351
+ dataflow : str | uuid.UUID
352
+ The name or ID of the dataflow.
353
+ workspace : str | uuid.UUID, default=None
354
+ The workspace name or ID.
355
+ Defaults to None which resolves to the workspace of the attached lakehouse
356
+ or if no lakehouse attached, resolves to the workspace of the notebook.
357
+ new_dataflow_name: str, default=None
358
+ Name of the new dataflow.
359
+ new_dataflow_workspace : str | uuid.UUID, default=None
360
+ The Fabric workspace name or ID of the dataflow to be created.
361
+ Defaults to None which resolves to the existing workspace of the attached lakehouse
362
+ or if no lakehouse attached, resolves to the workspace of the notebook.
363
+ """
364
+
365
+ # Resolve the workspace name and ID
366
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
367
+
368
+ # Resolve the dataflow name and ID
369
+ (dataflow_name, dataflow_id, dataflow_generation) = (
370
+ _resolve_dataflow_name_and_id_and_generation(dataflow, workspace_id)
371
+ )
372
+
373
+ if dataflow_generation == "Gen2 CI/CD":
374
+ # Print an error message that the dataflow is already a native Fabric item
375
+ print(
376
+ f"{icons.info} The dataflow '{dataflow_name}' is already a Fabric native Dataflow Gen2 item. No changes made."
377
+ )
378
+ return
379
+
380
+ (new_dataflow_workspace, new_dataflow_workspace_id) = resolve_workspace_name_and_id(
381
+ new_dataflow_workspace
382
+ )
383
+
384
+ # If no new dataflow name is provided, use the existing dataflow name
385
+ if not new_dataflow_name:
386
+ new_dataflow_name = dataflow_name
387
+
388
+ # Get dataflow definition
389
+ definition = get_dataflow_definition(dataflow, workspace_id)
390
+
391
+ # Check for linked table references
392
+ matches = (
393
+ parse("$['pbi:mashup'].connectionOverrides[*].kind").find(definition) or []
394
+ )
395
+ if any(match.value in {"PowerPlatformDataflows", "PowerBI"} for match in matches):
396
+ print(
397
+ f"""{icons.red_dot} The dataflow '{dataflow_name}' contains a linked table reference to an existing dataflow as a connection source and will not be upgraded. No changes were made.
398
+ - To track the upstream lineage of linked tables across dataflows use the list_upstream_dataflows function.
399
+ - To automatically remove the tables and upgrade the existing dataflow use the upgrade_powerbippdf_dataflow function."""
400
+ )
401
+ return
402
+
403
+ description = get_jsonpath_value(data=definition, path="$.description")
404
+
405
+ payload = {
406
+ "displayName": new_dataflow_name,
407
+ }
408
+ if description:
409
+ payload["description"] = description
410
+
411
+ # Query Groups
412
+ matches = parse("$.annotations[?(@.name=='pbi:QueryGroups')].value").find(
413
+ definition
414
+ )
415
+ query_groups_value = json.loads(matches[0].value) if matches else []
416
+
417
+ # Prepare the dataflow definition
418
+ query_metadata = {
419
+ "formatVersion": "202502",
420
+ "computeEngineSettings": {}, # How to set this?
421
+ "name": new_dataflow_name,
422
+ "queryGroups": query_groups_value,
423
+ "documentLocale": get_jsonpath_value(data=definition, path="$.culture"),
424
+ "queriesMetadata": get_jsonpath_value(
425
+ data=definition, path="$['pbi:mashup'].queriesMetadata"
426
+ ),
427
+ "fastCombine": get_jsonpath_value(
428
+ data=definition, path="$['pbi:mashup'].fastCombine", default=False
429
+ ),
430
+ "allowNativeQueries": get_jsonpath_value(
431
+ data=definition, path="$['pbi:mashup'].allowNativeQueries", default=False
432
+ ),
433
+ # "connections": [],
434
+ }
435
+
436
+ mashup_doc = get_jsonpath_value(data=definition, path="$['pbi:mashup'].document")
437
+
438
+ # Add the dataflow definition to the payload
439
+ new_definition = {
440
+ "parts": [
441
+ {
442
+ "path": "queryMetadata.json",
443
+ "payload": _conv_b64(query_metadata),
444
+ "payloadType": "InlineBase64",
445
+ },
446
+ {
447
+ "path": "mashup.pq",
448
+ "payload": _conv_b64(mashup_doc, json_dumps=False),
449
+ "payloadType": "InlineBase64",
450
+ },
451
+ ]
452
+ }
453
+
454
+ create_dataflow(
455
+ name=new_dataflow_name,
456
+ workspace=new_dataflow_workspace,
457
+ definition=new_definition,
458
+ )
459
+
460
+
461
+ def create_dataflow(
462
+ name: str,
463
+ workspace: Optional[str | UUID] = None,
464
+ description: Optional[str] = None,
465
+ definition: Optional[dict] = None,
466
+ ):
467
+ """
468
+ Creates a native Fabric Dataflow Gen2 CI/CD item.
469
+
470
+ Parameters
471
+ ----------
472
+ name : str
473
+ The name the dataflow.
474
+ workspace : str | uuid.UUID, default=None
475
+ The workspace name or ID.
476
+ Defaults to None which resolves to the workspace of the attached lakehouse
477
+ or if no lakehouse attached, resolves to the workspace of the notebook.
478
+ description : str, default=None
479
+ The description of the dataflow.
480
+ definition : dict, default=None
481
+ The definition of the dataflow in the form of a dictionary.
482
+ """
483
+
484
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
485
+
486
+ payload = {
487
+ "displayName": name,
488
+ }
489
+ if description:
490
+ payload["description"] = description
491
+
492
+ if definition:
493
+ payload["definition"] = definition
494
+
495
+ _base_api(
496
+ request=f"/v1/workspaces/{workspace_id}/dataflows",
497
+ method="post",
498
+ payload=payload,
499
+ client="fabric_sp",
500
+ lro_return_json=True,
501
+ status_codes=[201, 202],
502
+ )
503
+
504
+ print(
505
+ f"{icons.green_dot} The dataflow '{name}' has been created within the '{workspace_name}' workspace."
506
+ )
@@ -0,0 +1,80 @@
1
+ import requests
2
+ from typing import List, Optional
3
+ from sempy_labs._a_lib_info import lib_name, lib_version
4
+ from sempy._utils._log import log
5
+
6
+
7
+ @log
8
+ def _format_dax(
9
+ expressions: str | List[str],
10
+ skip_space_after_function_name: bool = False,
11
+ metadata: Optional[List[dict]] = None,
12
+ ) -> List[str]:
13
+
14
+ if isinstance(expressions, str):
15
+ expressions = [expressions]
16
+ metadata = [metadata] if metadata else [{}]
17
+
18
+ # Add variable assignment to each expression
19
+ expressions = [f"x :={item}" for item in expressions]
20
+
21
+ url = "https://daxformatter.azurewebsites.net/api/daxformatter/daxtextformatmulti"
22
+
23
+ payload = {
24
+ "Dax": expressions,
25
+ "MaxLineLength": 0,
26
+ "SkipSpaceAfterFunctionName": skip_space_after_function_name,
27
+ "ListSeparator": ",",
28
+ "DecimalSeparator": ".",
29
+ }
30
+
31
+ headers = {
32
+ "Accept": "application/json, text/javascript, */*; q=0.01",
33
+ "Accept-Encoding": "gzip,deflate",
34
+ "Accept-Language": "en-US,en;q=0.8",
35
+ "Content-Type": "application/json; charset=UTF-8",
36
+ "Host": "daxformatter.azurewebsites.net",
37
+ "Expect": "100-continue",
38
+ "Connection": "Keep-Alive",
39
+ "CallerApp": lib_name,
40
+ "CallerVersion": lib_version,
41
+ }
42
+
43
+ response = requests.post(url, json=payload, headers=headers)
44
+ result = []
45
+ for idx, dax in enumerate(response.json()):
46
+ formatted_dax = dax.get("formatted")
47
+ errors = dax.get("errors")
48
+ if errors:
49
+ meta = metadata[idx] if metadata and idx < len(metadata) else {}
50
+ obj_name = meta.get("name", "Unknown")
51
+ table_name = meta.get("table", "Unknown")
52
+ obj_type = meta.get("type", "Unknown")
53
+ if obj_type == "calculated_tables":
54
+ raise ValueError(
55
+ f"DAX formatting failed for the '{obj_name}' calculated table: {errors}"
56
+ )
57
+ elif obj_type == "calculated_columns":
58
+ raise ValueError(
59
+ f"DAX formatting failed for the '{table_name}'[{obj_name}] calculated column: {errors}"
60
+ )
61
+ elif obj_type == "calculation_items":
62
+ raise ValueError(
63
+ f"DAX formatting failed for the '{table_name}'[{obj_name}] calculation item: {errors}"
64
+ )
65
+ elif obj_type == "measures":
66
+ raise ValueError(
67
+ f"DAX formatting failed for the '{obj_name}' measure: {errors}"
68
+ )
69
+ elif obj_type == "rls":
70
+ raise ValueError(
71
+ f"DAX formatting failed for the row level security expression on the '{table_name}' table within the '{obj_name}' role: {errors}"
72
+ )
73
+ else:
74
+ NotImplementedError()
75
+ else:
76
+ if formatted_dax.startswith("x :="):
77
+ formatted_dax = formatted_dax[4:]
78
+ formatted_dax = formatted_dax.strip()
79
+ result.append(formatted_dax)
80
+ return result
@@ -55,7 +55,10 @@ def delta_analyzer_history(
55
55
 
56
56
  table_path = create_abfss_path(lakehouse_id, workspace_id, table_name, schema)
57
57
  local_path = _mount(lakehouse=lakehouse, workspace=workspace)
58
- table_path_local = f"{local_path}/Tables/{table_name}"
58
+ if schema: # use schema if specified
59
+ table_path_local = f"{local_path}/Tables/{schema}/{table_name}"
60
+ else:
61
+ table_path_local = f"{local_path}/Tables/{table_name}"
59
62
  delta_table_path = f"{table_path}/_delta_log"
60
63
 
61
64
  files = notebookutils.fs.ls(delta_table_path)
@@ -7,8 +7,10 @@ from sempy_labs._helper_functions import (
7
7
  )
8
8
  import sempy_labs._icons as icons
9
9
  from uuid import UUID
10
+ from sempy._utils._log import log
10
11
 
11
12
 
13
+ @log
12
14
  def list_deployment_pipelines() -> pd.DataFrame:
13
15
  """
14
16
  Shows a list of deployment pipelines the user can access.
@@ -46,6 +48,7 @@ def list_deployment_pipelines() -> pd.DataFrame:
46
48
  return df
47
49
 
48
50
 
51
+ @log
49
52
  def list_deployment_pipeline_stages(deployment_pipeline: str | UUID) -> pd.DataFrame:
50
53
  """
51
54
  Shows the specified deployment pipeline stages.
@@ -104,6 +107,7 @@ def list_deployment_pipeline_stages(deployment_pipeline: str | UUID) -> pd.DataF
104
107
  return df
105
108
 
106
109
 
110
+ @log
107
111
  def list_deployment_pipeline_stage_items(
108
112
  deployment_pipeline: str | UUID,
109
113
  stage: str | UUID,
@@ -2,8 +2,10 @@ import sempy
2
2
  import sempy.fabric as fabric
3
3
  import pandas as pd
4
4
  from typing import List, Optional
5
+ from sempy._utils._log import log
5
6
 
6
7
 
8
+ @log
7
9
  def list_all_items(workspaces: Optional[str | List[str]] = None):
8
10
 
9
11
  df = pd.DataFrame(
@@ -41,6 +43,7 @@ def list_all_items(workspaces: Optional[str | List[str]] = None):
41
43
  return df
42
44
 
43
45
 
46
+ @log
44
47
  def data_dictionary(dataset: str, workspace: Optional[str | None] = None):
45
48
 
46
49
  from sempy_labs.tom import connect_semantic_model
@@ -11,8 +11,10 @@ from sempy_labs._helper_functions import (
11
11
  create_item,
12
12
  )
13
13
  from uuid import UUID
14
+ from sempy._utils._log import log
14
15
 
15
16
 
17
+ @log
16
18
  def create_environment(
17
19
  environment: str,
18
20
  description: Optional[str] = None,
@@ -43,6 +45,7 @@ def create_environment(
43
45
  )
44
46
 
45
47
 
48
+ @log
46
49
  def list_environments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
47
50
  """
48
51
  Shows the environments within a workspace.
@@ -85,6 +88,7 @@ def list_environments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
85
88
  client="fabric_sp",
86
89
  )
87
90
 
91
+ dfs = []
88
92
  for r in responses:
89
93
  for v in r.get("value", []):
90
94
  pub = v.get("properties", {}).get("publishDetails", {})
@@ -103,11 +107,15 @@ def list_environments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
103
107
  .get("sparkSettings", {})
104
108
  .get("state"),
105
109
  }
106
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
110
+ dfs.append(pd.DataFrame(new_data, index=[0]))
111
+
112
+ if dfs:
113
+ df = pd.concat(dfs, ignore_index=True)
107
114
 
108
115
  return df
109
116
 
110
117
 
118
+ @log
111
119
  def delete_environment(environment: str | UUID, workspace: Optional[str | UUID] = None):
112
120
  """
113
121
  Deletes a Fabric environment.
@@ -127,6 +135,7 @@ def delete_environment(environment: str | UUID, workspace: Optional[str | UUID]
127
135
  delete_item(item=environment, type="Environment", workspace=workspace)
128
136
 
129
137
 
138
+ @log
130
139
  def publish_environment(
131
140
  environment: str | UUID, workspace: Optional[str | UUID] = None
132
141
  ):
@@ -1,20 +1,20 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
3
  from sempy_labs._helper_functions import (
4
- resolve_workspace_name_and_id,
5
4
  _base_api,
6
- resolve_item_id,
7
5
  _create_dataframe,
8
6
  _conv_b64,
9
- _decode_b64,
10
7
  delete_item,
11
8
  create_item,
12
9
  get_item_definition,
10
+ resolve_workspace_id,
13
11
  )
14
12
  from uuid import UUID
15
13
  import sempy_labs._icons as icons
14
+ from sempy._utils._log import log
16
15
 
17
16
 
17
+ @log
18
18
  def create_eventhouse(
19
19
  name: str,
20
20
  definition: Optional[dict],
@@ -66,6 +66,7 @@ def create_eventhouse(
66
66
  )
67
67
 
68
68
 
69
+ @log
69
70
  def list_eventhouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
70
71
  """
71
72
  Shows the eventhouses within a workspace.
@@ -94,7 +95,7 @@ def list_eventhouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
94
95
  }
95
96
  df = _create_dataframe(columns=columns)
96
97
 
97
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
98
+ workspace_id = resolve_workspace_id(workspace)
98
99
 
99
100
  responses = _base_api(
100
101
  request=f"/v1/workspaces/{workspace_id}/eventhouses",
@@ -102,6 +103,7 @@ def list_eventhouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
102
103
  client="fabric_sp",
103
104
  )
104
105
 
106
+ dfs = []
105
107
  for r in responses:
106
108
  for v in r.get("value", []):
107
109
  new_data = {
@@ -109,11 +111,15 @@ def list_eventhouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
109
111
  "Eventhouse Id": v.get("id"),
110
112
  "Description": v.get("description"),
111
113
  }
112
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
114
+ dfs.append(pd.DataFrame(new_data, index=[0]))
115
+
116
+ if dfs:
117
+ df = pd.concat(dfs, ignore_index=True)
113
118
 
114
119
  return df
115
120
 
116
121
 
122
+ @log
117
123
  def delete_eventhouse(name: str, workspace: Optional[str | UUID] = None):
118
124
  """
119
125
  Deletes a Fabric eventhouse.
@@ -133,6 +139,7 @@ def delete_eventhouse(name: str, workspace: Optional[str | UUID] = None):
133
139
  delete_item(item=name, type="Eventhouse", workspace=workspace)
134
140
 
135
141
 
142
+ @log
136
143
  def get_eventhouse_definition(
137
144
  eventhouse: str | UUID,
138
145
  workspace: Optional[str | UUID] = None,
@@ -1,16 +1,18 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
3
  from sempy_labs._helper_functions import (
4
- resolve_workspace_name_and_id,
5
4
  _base_api,
6
5
  delete_item,
7
6
  _create_dataframe,
8
7
  create_item,
8
+ resolve_workspace_id,
9
9
  )
10
10
  from uuid import UUID
11
11
  import sempy_labs._icons as icons
12
+ from sempy._utils._log import log
12
13
 
13
14
 
15
+ @log
14
16
  def list_eventstreams(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
15
17
  """
16
18
  Shows the eventstreams within a workspace.
@@ -37,11 +39,12 @@ def list_eventstreams(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
37
39
  }
38
40
  df = _create_dataframe(columns=columns)
39
41
 
40
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
42
+ workspace_id = resolve_workspace_id(workspace)
41
43
  responses = _base_api(
42
44
  request=f"/v1/workspaces/{workspace_id}/eventstreams", uses_pagination=True
43
45
  )
44
46
 
47
+ dfs = []
45
48
  for r in responses:
46
49
  for v in r.get("value", []):
47
50
  new_data = {
@@ -49,11 +52,15 @@ def list_eventstreams(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
49
52
  "Eventstream Id": v.get("id"),
50
53
  "Description": v.get("description"),
51
54
  }
52
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
55
+ dfs.append(pd.DataFrame(new_data, index=[0]))
56
+
57
+ if dfs:
58
+ df = pd.concat(dfs, ignore_index=True)
53
59
 
54
60
  return df
55
61
 
56
62
 
63
+ @log
57
64
  def create_eventstream(
58
65
  name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
59
66
  ):
@@ -79,6 +86,7 @@ def create_eventstream(
79
86
  )
80
87
 
81
88
 
89
+ @log
82
90
  def delete_eventstream(
83
91
  eventstream: str | UUID, workspace: Optional[str | UUID] = None, **kwargs
84
92
  ):