semantic-link-labs 0.11.0__py3-none-any.whl → 0.11.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (131) hide show
  1. {semantic_link_labs-0.11.0.dist-info → semantic_link_labs-0.11.2.dist-info}/METADATA +6 -4
  2. semantic_link_labs-0.11.2.dist-info/RECORD +210 -0
  3. sempy_labs/__init__.py +56 -56
  4. sempy_labs/_a_lib_info.py +1 -1
  5. sempy_labs/_ai.py +1 -1
  6. sempy_labs/_capacities.py +2 -2
  7. sempy_labs/_capacity_migration.py +5 -5
  8. sempy_labs/_clear_cache.py +1 -1
  9. sempy_labs/_connections.py +2 -2
  10. sempy_labs/_dashboards.py +16 -16
  11. sempy_labs/_data_pipelines.py +1 -1
  12. sempy_labs/_dataflows.py +101 -26
  13. sempy_labs/_dax.py +3 -3
  14. sempy_labs/_dax_query_view.py +1 -1
  15. sempy_labs/_delta_analyzer.py +4 -4
  16. sempy_labs/_delta_analyzer_history.py +1 -1
  17. sempy_labs/_deployment_pipelines.py +1 -1
  18. sempy_labs/_environments.py +22 -21
  19. sempy_labs/_eventhouses.py +12 -11
  20. sempy_labs/_eventstreams.py +12 -11
  21. sempy_labs/_external_data_shares.py +23 -22
  22. sempy_labs/_gateways.py +47 -45
  23. sempy_labs/_generate_semantic_model.py +3 -3
  24. sempy_labs/_git.py +1 -1
  25. sempy_labs/_graphQL.py +12 -11
  26. sempy_labs/_job_scheduler.py +56 -54
  27. sempy_labs/_kql_databases.py +16 -17
  28. sempy_labs/_kql_querysets.py +12 -11
  29. sempy_labs/_kusto.py +2 -2
  30. sempy_labs/_list_functions.py +1 -1
  31. sempy_labs/_managed_private_endpoints.py +18 -15
  32. sempy_labs/_mirrored_databases.py +16 -15
  33. sempy_labs/_mirrored_warehouses.py +12 -11
  34. sempy_labs/_ml_experiments.py +11 -10
  35. sempy_labs/_ml_models.py +11 -10
  36. sempy_labs/_model_auto_build.py +3 -3
  37. sempy_labs/_model_bpa.py +5 -5
  38. sempy_labs/_model_bpa_bulk.py +3 -3
  39. sempy_labs/_model_dependencies.py +1 -1
  40. sempy_labs/_mounted_data_factories.py +12 -12
  41. sempy_labs/_notebooks.py +1 -1
  42. sempy_labs/_one_lake_integration.py +1 -1
  43. sempy_labs/_query_scale_out.py +1 -1
  44. sempy_labs/_refresh_semantic_model.py +1 -1
  45. sempy_labs/_semantic_models.py +30 -28
  46. sempy_labs/_spark.py +1 -1
  47. sempy_labs/_sql.py +1 -1
  48. sempy_labs/_sql_endpoints.py +12 -11
  49. sempy_labs/_sqldatabase.py +15 -15
  50. sempy_labs/_tags.py +11 -10
  51. sempy_labs/_translations.py +1 -1
  52. sempy_labs/_user_delegation_key.py +2 -2
  53. sempy_labs/_variable_libraries.py +13 -12
  54. sempy_labs/_vertipaq.py +3 -3
  55. sempy_labs/_vpax.py +1 -1
  56. sempy_labs/_warehouses.py +15 -14
  57. sempy_labs/_workloads.py +1 -1
  58. sempy_labs/_workspace_identity.py +1 -1
  59. sempy_labs/_workspaces.py +14 -13
  60. sempy_labs/admin/__init__.py +18 -18
  61. sempy_labs/admin/_activities.py +46 -46
  62. sempy_labs/admin/_apps.py +28 -26
  63. sempy_labs/admin/_artifacts.py +15 -15
  64. sempy_labs/admin/_basic_functions.py +1 -2
  65. sempy_labs/admin/_capacities.py +86 -82
  66. sempy_labs/admin/_dataflows.py +2 -2
  67. sempy_labs/admin/_datasets.py +50 -48
  68. sempy_labs/admin/_domains.py +25 -19
  69. sempy_labs/admin/_external_data_share.py +24 -22
  70. sempy_labs/admin/_git.py +17 -17
  71. sempy_labs/admin/_items.py +47 -45
  72. sempy_labs/admin/_reports.py +61 -58
  73. sempy_labs/admin/_scanner.py +2 -2
  74. sempy_labs/admin/_shared.py +18 -18
  75. sempy_labs/admin/_tags.py +2 -2
  76. sempy_labs/admin/_tenant.py +57 -51
  77. sempy_labs/admin/_users.py +16 -15
  78. sempy_labs/admin/_workspaces.py +2 -2
  79. sempy_labs/directlake/__init__.py +12 -12
  80. sempy_labs/directlake/_directlake_schema_compare.py +3 -3
  81. sempy_labs/directlake/_directlake_schema_sync.py +9 -7
  82. sempy_labs/directlake/_dl_helper.py +1 -1
  83. sempy_labs/directlake/_generate_shared_expression.py +1 -1
  84. sempy_labs/directlake/_get_directlake_lakehouse.py +1 -1
  85. sempy_labs/directlake/_guardrails.py +1 -1
  86. sempy_labs/directlake/_list_directlake_model_calc_tables.py +3 -3
  87. sempy_labs/directlake/_show_unsupported_directlake_objects.py +1 -1
  88. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +3 -3
  89. sempy_labs/directlake/_update_directlake_partition_entity.py +4 -4
  90. sempy_labs/directlake/_warm_cache.py +3 -3
  91. sempy_labs/graph/__init__.py +3 -3
  92. sempy_labs/graph/_groups.py +81 -78
  93. sempy_labs/graph/_teams.py +21 -21
  94. sempy_labs/graph/_users.py +111 -10
  95. sempy_labs/lakehouse/__init__.py +7 -7
  96. sempy_labs/lakehouse/_blobs.py +30 -30
  97. sempy_labs/lakehouse/_get_lakehouse_columns.py +2 -2
  98. sempy_labs/lakehouse/_get_lakehouse_tables.py +29 -27
  99. sempy_labs/lakehouse/_helper.py +30 -2
  100. sempy_labs/lakehouse/_lakehouse.py +2 -2
  101. sempy_labs/lakehouse/_livy_sessions.py +47 -42
  102. sempy_labs/lakehouse/_shortcuts.py +22 -21
  103. sempy_labs/migration/__init__.py +8 -8
  104. sempy_labs/migration/_create_pqt_file.py +2 -2
  105. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +3 -3
  106. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +3 -4
  107. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +2 -2
  108. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +4 -4
  109. sempy_labs/migration/_migration_validation.py +1 -2
  110. sempy_labs/migration/_refresh_calc_tables.py +2 -2
  111. sempy_labs/mirrored_azure_databricks_catalog/__init__.py +2 -2
  112. sempy_labs/mirrored_azure_databricks_catalog/_discover.py +40 -40
  113. sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +1 -1
  114. sempy_labs/report/__init__.py +10 -10
  115. sempy_labs/report/_download_report.py +2 -2
  116. sempy_labs/report/_export_report.py +2 -2
  117. sempy_labs/report/_generate_report.py +1 -1
  118. sempy_labs/report/_paginated.py +1 -1
  119. sempy_labs/report/_report_bpa.py +4 -3
  120. sempy_labs/report/_report_functions.py +3 -3
  121. sempy_labs/report/_report_list_functions.py +3 -3
  122. sempy_labs/report/_report_rebind.py +1 -1
  123. sempy_labs/report/_reportwrapper.py +247 -249
  124. sempy_labs/report/_save_report.py +3 -3
  125. sempy_labs/theme/_org_themes.py +35 -1
  126. sempy_labs/tom/__init__.py +1 -1
  127. sempy_labs/tom/_model.py +23 -20
  128. semantic_link_labs-0.11.0.dist-info/RECORD +0 -210
  129. {semantic_link_labs-0.11.0.dist-info → semantic_link_labs-0.11.2.dist-info}/WHEEL +0 -0
  130. {semantic_link_labs-0.11.0.dist-info → semantic_link_labs-0.11.2.dist-info}/licenses/LICENSE +0 -0
  131. {semantic_link_labs-0.11.0.dist-info → semantic_link_labs-0.11.2.dist-info}/top_level.txt +0 -0
@@ -2,20 +2,20 @@ import sempy.fabric as fabric
2
2
  from typing import Optional, List
3
3
  from sempy._utils._log import log
4
4
  import sempy_labs._icons as icons
5
- from sempy_labs._workspaces import assign_workspace_to_capacity
6
- from sempy_labs.admin import (
5
+ from ._workspaces import assign_workspace_to_capacity
6
+ from .admin import (
7
7
  assign_workspaces_to_capacity,
8
8
  )
9
- from sempy_labs.admin._capacities import (
9
+ from .admin._capacities import (
10
10
  _list_capacities_meta,
11
11
  list_capacities,
12
12
  )
13
- from sempy_labs._helper_functions import (
13
+ from ._helper_functions import (
14
14
  resolve_capacity_id,
15
15
  convert_to_alphanumeric_lowercase,
16
16
  _base_api,
17
17
  )
18
- from sempy_labs._capacities import create_fabric_capacity
18
+ from ._capacities import create_fabric_capacity
19
19
  from uuid import UUID
20
20
 
21
21
 
@@ -1,5 +1,5 @@
1
1
  import sempy.fabric as fabric
2
- from sempy_labs._helper_functions import (
2
+ from ._helper_functions import (
3
3
  is_default_semantic_model,
4
4
  _get_adls_client,
5
5
  resolve_workspace_name_and_id,
@@ -1,6 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  _is_valid_uuid,
5
5
  resolve_workspace_name_and_id,
6
6
  _update_dataframe_datatypes,
@@ -10,7 +10,7 @@ from sempy_labs._helper_functions import (
10
10
  )
11
11
  from uuid import UUID
12
12
  import sempy_labs._icons as icons
13
- from sempy_labs._gateways import _resolve_gateway_id
13
+ from ._gateways import _resolve_gateway_id
14
14
  from sempy._utils._log import log
15
15
 
16
16
 
sempy_labs/_dashboards.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from typing import Optional
2
2
  from uuid import UUID
3
3
  import pandas as pd
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  _create_dataframe,
6
6
  _base_api,
7
7
  resolve_workspace_id,
@@ -44,22 +44,22 @@ def list_dashboards(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
44
44
 
45
45
  response = _base_api(request=f"/v1.0/myorg/groups/{workspace_id}/dashboards")
46
46
 
47
- dfs = []
47
+ rows = []
48
48
  for v in response.json().get("value", []):
49
- new_data = {
50
- "Dashboard ID": v.get("id"),
51
- "Dashboard Name": v.get("displayName"),
52
- "Read Only": v.get("isReadOnly"),
53
- "Web URL": v.get("webUrl"),
54
- "Embed URL": v.get("embedUrl"),
55
- "Data Classification": v.get("dataClassification"),
56
- "Users": v.get("users"),
57
- "Subscriptions": v.get("subscriptions"),
58
- }
59
- dfs.append(pd.DataFrame(new_data, index=[0]))
60
-
61
- if dfs:
62
- df = pd.concat(dfs, ignore_index=True)
49
+ rows.append(
50
+ {
51
+ "Dashboard ID": v.get("id"),
52
+ "Dashboard Name": v.get("displayName"),
53
+ "Read Only": v.get("isReadOnly"),
54
+ "Web URL": v.get("webUrl"),
55
+ "Embed URL": v.get("embedUrl"),
56
+ "Data Classification": v.get("dataClassification"),
57
+ "Users": v.get("users"),
58
+ "Subscriptions": v.get("subscriptions"),
59
+ }
60
+ )
61
+ if rows:
62
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
63
63
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
64
64
 
65
65
  return df
@@ -1,6 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  resolve_workspace_name_and_id,
5
5
  _decode_b64,
6
6
  _base_api,
sempy_labs/_dataflows.py CHANGED
@@ -1,5 +1,5 @@
1
1
  import pandas as pd
2
- from sempy_labs._helper_functions import (
2
+ from ._helper_functions import (
3
3
  resolve_workspace_name_and_id,
4
4
  _is_valid_uuid,
5
5
  _update_dataframe_datatypes,
@@ -51,18 +51,19 @@ def list_dataflows(workspace: Optional[str | UUID] = None):
51
51
  request=f"/v1.0/myorg/groups/{workspace_id}/dataflows", client="fabric_sp"
52
52
  )
53
53
 
54
- dfs = []
54
+ rows = []
55
55
  for v in response.json().get("value", []):
56
56
  gen = v.get("generation")
57
- new_data = {
58
- "Dataflow Id": v.get("objectId"),
59
- "Dataflow Name": v.get("name"),
60
- "Description": "",
61
- "Configured By": v.get("configuredBy"),
62
- "Users": ", ".join(v.get("users", [])),
63
- "Generation": "Gen2" if gen == 2 else "Gen1",
64
- }
65
- dfs.append(pd.DataFrame(new_data, index=[0]))
57
+ rows.append(
58
+ {
59
+ "Dataflow Id": v.get("objectId"),
60
+ "Dataflow Name": v.get("name"),
61
+ "Description": "",
62
+ "Configured By": v.get("configuredBy"),
63
+ "Users": ", ".join(v.get("users", [])),
64
+ "Generation": "Gen2" if gen == 2 else "Gen1",
65
+ }
66
+ )
66
67
 
67
68
  responses = _base_api(
68
69
  request=f"/v1/workspaces/{workspace_id}/dataflows",
@@ -72,18 +73,19 @@ def list_dataflows(workspace: Optional[str | UUID] = None):
72
73
  for r in responses:
73
74
  for v in r.get("value", []):
74
75
  gen = v.get("generation")
75
- new_data = {
76
- "Dataflow Id": v.get("id"),
77
- "Dataflow Name": v.get("displayName"),
78
- "Description": v.get("description"),
79
- "Configured By": "",
80
- "Users": "",
81
- "Generation": "Gen2 CI/CD",
82
- }
83
- dfs.append(pd.DataFrame(new_data, index=[0]))
76
+ rows.append(
77
+ {
78
+ "Dataflow Id": v.get("id"),
79
+ "Dataflow Name": v.get("displayName"),
80
+ "Description": v.get("description"),
81
+ "Configured By": "",
82
+ "Users": "",
83
+ "Generation": "Gen2 CI/CD",
84
+ }
85
+ )
84
86
 
85
- if dfs:
86
- df = pd.concat(dfs, ignore_index=True)
87
+ if rows:
88
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
87
89
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
88
90
 
89
91
  return df
@@ -414,6 +416,32 @@ def upgrade_dataflow(
414
416
  )
415
417
  query_groups_value = json.loads(matches[0].value) if matches else []
416
418
 
419
+ queries_metadata = get_jsonpath_value(
420
+ data=definition, path="$['pbi:mashup'].queriesMetadata"
421
+ )
422
+
423
+ default_staging = True if "DefaultStaging" in queries_metadata else False
424
+
425
+ # Collect keys to delete
426
+ keys_to_delete = [
427
+ key
428
+ for key in queries_metadata
429
+ if key.endswith("_DataDestination")
430
+ or key.endswith("_WriteToDataDestination")
431
+ or key.endswith("_TransformForWriteToDataDestination")
432
+ or key == "FastCopyStaging"
433
+ ]
434
+
435
+ # Delete them
436
+ for key in keys_to_delete:
437
+ del queries_metadata[key]
438
+
439
+ # Set load enabled and isHidden
440
+ for key, items in queries_metadata.items():
441
+ items["loadEnabled"] = False
442
+ if key in ["DefaultDestination", "DefaultStaging"]:
443
+ items["isHidden"] = True
444
+
417
445
  # Prepare the dataflow definition
418
446
  query_metadata = {
419
447
  "formatVersion": "202502",
@@ -421,9 +449,7 @@ def upgrade_dataflow(
421
449
  "name": new_dataflow_name,
422
450
  "queryGroups": query_groups_value,
423
451
  "documentLocale": get_jsonpath_value(data=definition, path="$.culture"),
424
- "queriesMetadata": get_jsonpath_value(
425
- data=definition, path="$['pbi:mashup'].queriesMetadata"
426
- ),
452
+ "queriesMetadata": queries_metadata,
427
453
  "fastCombine": get_jsonpath_value(
428
454
  data=definition, path="$['pbi:mashup'].fastCombine", default=False
429
455
  ),
@@ -433,8 +459,57 @@ def upgrade_dataflow(
433
459
  # "connections": [],
434
460
  }
435
461
 
462
+ fast_copy = get_jsonpath_value(
463
+ data=definition, path="$['ppdf:fastCopy']", default=False
464
+ )
465
+ max_concurrency = get_jsonpath_value(
466
+ data=definition, path="$['ppdf:maxConcurrency']"
467
+ )
468
+ if fast_copy:
469
+ query_metadata["computeEngineSettings"] = {}
470
+
471
+ if max_concurrency:
472
+ query_metadata["computeEngineSettings"]["maxConcurrency"] = max_concurrency
473
+
436
474
  mashup_doc = get_jsonpath_value(data=definition, path="$['pbi:mashup'].document")
437
475
 
476
+ # Remove the FastCopyStaging section if it exists
477
+ new_mashup_doc = ""
478
+ if default_staging and fast_copy:
479
+ new_mashup_doc = '[DefaultOutputDestinationSettings = [DestinationDefinition = [Kind = "Reference", QueryName = "DefaultDestination", IsNewTarget = true], UpdateMethod = [Kind = "Replace"]], StagingDefinition = [Kind = "FastCopy"]]\r\nsection Section1'
480
+ elif default_staging and not fast_copy:
481
+ new_mashup_doc = '[DefaultOutputDestinationSettings = [DestinationDefinition = [Kind = "Reference", QueryName = "DefaultDestination", IsNewTarget = true], UpdateMethod = [Kind = "Replace"]]\r\nsection Section1'
482
+ elif not default_staging and fast_copy:
483
+ new_mashup_doc = '[StagingDefinition = [Kind = "FastCopy"]]\r\nsection Section1'
484
+ else:
485
+ new_mashup_doc = "section Section1"
486
+ for i in mashup_doc.split(";\r\nshared "):
487
+ # if 'IsParameterQuery=true' in i:
488
+ # Add to queries_metadata
489
+ if not (
490
+ "FastCopyStaging = let" in i
491
+ or '_WriteToDataDestination" = let' in i
492
+ or "_WriteToDataDestination = let" in i
493
+ or '_DataDestination" = let' in i
494
+ or "_DataDestination = let" in i
495
+ or '_TransformForWriteToDataDestination" = let' in i
496
+ or "_TransformForWriteToDataDestination = let" in i
497
+ ):
498
+ if i != "section Section1":
499
+ if default_staging and (
500
+ "IsParameterQuery=true" not in i
501
+ and not i.startswith("DefaultStaging")
502
+ and not i.startswith("DefaultDestination")
503
+ ):
504
+ new_mashup_doc += (
505
+ ";\r\n[BindToDefaultDestination = true]\r\nshared " + i
506
+ )
507
+ else:
508
+ new_mashup_doc += ";\r\nshared " + i
509
+ new_mashup_doc = f"{new_mashup_doc};"
510
+
511
+ return new_mashup_doc, query_metadata
512
+
438
513
  # Add the dataflow definition to the payload
439
514
  new_definition = {
440
515
  "parts": [
@@ -445,7 +520,7 @@ def upgrade_dataflow(
445
520
  },
446
521
  {
447
522
  "path": "mashup.pq",
448
- "payload": _conv_b64(mashup_doc, json_dumps=False),
523
+ "payload": _conv_b64(new_mashup_doc, json_dumps=False),
449
524
  "payloadType": "InlineBase64",
450
525
  },
451
526
  ]
sempy_labs/_dax.py CHANGED
@@ -1,17 +1,17 @@
1
1
  import sempy.fabric as fabric
2
2
  import pandas as pd
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  resolve_workspace_name_and_id,
5
5
  format_dax_object_name,
6
6
  resolve_dataset_name_and_id,
7
7
  _base_api,
8
8
  generate_guid,
9
9
  )
10
- from sempy_labs._model_dependencies import get_model_calc_dependencies
10
+ from ._model_dependencies import get_model_calc_dependencies
11
11
  from typing import Optional, List, Tuple
12
12
  from sempy._utils._log import log
13
13
  from uuid import UUID
14
- from sempy_labs.directlake._warm_cache import _put_columns_into_memory
14
+ from .directlake._warm_cache import _put_columns_into_memory
15
15
  import sempy_labs._icons as icons
16
16
  import time
17
17
 
@@ -1,6 +1,6 @@
1
1
  from typing import Optional
2
2
  from uuid import UUID
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  resolve_dataset_id,
5
5
  _get_fabric_context_setting,
6
6
  resolve_workspace_id,
@@ -5,7 +5,7 @@ import os
5
5
  from uuid import UUID
6
6
  from typing import Dict, Optional
7
7
  import pyarrow.parquet as pq
8
- from sempy_labs._helper_functions import (
8
+ from ._helper_functions import (
9
9
  create_abfss_path,
10
10
  save_as_delta_table,
11
11
  _get_column_aggregate,
@@ -21,11 +21,11 @@ from sempy_labs._helper_functions import (
21
21
  _get_delta_table,
22
22
  )
23
23
  from sempy._utils._log import log
24
- from sempy_labs.lakehouse._get_lakehouse_tables import get_lakehouse_tables
25
- from sempy_labs.lakehouse._lakehouse import (
24
+ from .lakehouse._get_lakehouse_tables import get_lakehouse_tables
25
+ from .lakehouse._lakehouse import (
26
26
  lakehouse_attached,
27
27
  )
28
- from sempy_labs.lakehouse._helper import (
28
+ from .lakehouse._helper import (
29
29
  is_v_ordered,
30
30
  )
31
31
  import sempy_labs._icons as icons
@@ -1,7 +1,7 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
3
  import pyarrow.parquet as pq
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  create_abfss_path,
6
6
  resolve_workspace_id,
7
7
  resolve_lakehouse_id,
@@ -1,5 +1,5 @@
1
1
  import pandas as pd
2
- from sempy_labs._helper_functions import (
2
+ from ._helper_functions import (
3
3
  _is_valid_uuid,
4
4
  _base_api,
5
5
  _update_dataframe_datatypes,
@@ -1,7 +1,7 @@
1
1
  import pandas as pd
2
2
  import sempy_labs._icons as icons
3
3
  from typing import Optional
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  resolve_workspace_name_and_id,
6
6
  resolve_workspace_id,
7
7
  _base_api,
@@ -88,29 +88,30 @@ def list_environments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
88
88
  client="fabric_sp",
89
89
  )
90
90
 
91
- dfs = []
91
+ rows = []
92
92
  for r in responses:
93
93
  for v in r.get("value", []):
94
94
  pub = v.get("properties", {}).get("publishDetails", {})
95
- new_data = {
96
- "Environment Name": v.get("displayName"),
97
- "Environment Id": v.get("id"),
98
- "Description": v.get("description"),
99
- "Publish State": pub.get("state"),
100
- "Publish Target Version": pub.get("targetVersion"),
101
- "Publish Start Time": pub.get("startTime"),
102
- "Publish End Time": pub.get("endTime"),
103
- "Spark Libraries State": pub.get("componentPublishInfo", {})
104
- .get("sparkLibraries", {})
105
- .get("state"),
106
- "Spark Settings State": pub.get("componentPublishInfo", {})
107
- .get("sparkSettings", {})
108
- .get("state"),
109
- }
110
- dfs.append(pd.DataFrame(new_data, index=[0]))
111
-
112
- if dfs:
113
- df = pd.concat(dfs, ignore_index=True)
95
+ rows.append(
96
+ {
97
+ "Environment Name": v.get("displayName"),
98
+ "Environment Id": v.get("id"),
99
+ "Description": v.get("description"),
100
+ "Publish State": pub.get("state"),
101
+ "Publish Target Version": pub.get("targetVersion"),
102
+ "Publish Start Time": pub.get("startTime"),
103
+ "Publish End Time": pub.get("endTime"),
104
+ "Spark Libraries State": pub.get("componentPublishInfo", {})
105
+ .get("sparkLibraries", {})
106
+ .get("state"),
107
+ "Spark Settings State": pub.get("componentPublishInfo", {})
108
+ .get("sparkSettings", {})
109
+ .get("state"),
110
+ }
111
+ )
112
+
113
+ if rows:
114
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
114
115
 
115
116
  return df
116
117
 
@@ -1,6 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  _base_api,
5
5
  _create_dataframe,
6
6
  _conv_b64,
@@ -103,18 +103,19 @@ def list_eventhouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
103
103
  client="fabric_sp",
104
104
  )
105
105
 
106
- dfs = []
106
+ rows = []
107
107
  for r in responses:
108
108
  for v in r.get("value", []):
109
- new_data = {
110
- "Eventhouse Name": v.get("displayName"),
111
- "Eventhouse Id": v.get("id"),
112
- "Description": v.get("description"),
113
- }
114
- dfs.append(pd.DataFrame(new_data, index=[0]))
115
-
116
- if dfs:
117
- df = pd.concat(dfs, ignore_index=True)
109
+ rows.append(
110
+ {
111
+ "Eventhouse Name": v.get("displayName"),
112
+ "Eventhouse Id": v.get("id"),
113
+ "Description": v.get("description"),
114
+ }
115
+ )
116
+
117
+ if rows:
118
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
118
119
 
119
120
  return df
120
121
 
@@ -1,6 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  _base_api,
5
5
  delete_item,
6
6
  _create_dataframe,
@@ -44,18 +44,19 @@ def list_eventstreams(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
44
44
  request=f"/v1/workspaces/{workspace_id}/eventstreams", uses_pagination=True
45
45
  )
46
46
 
47
- dfs = []
47
+ rows = []
48
48
  for r in responses:
49
49
  for v in r.get("value", []):
50
- new_data = {
51
- "Eventstream Name": v.get("displayName"),
52
- "Eventstream Id": v.get("id"),
53
- "Description": v.get("description"),
54
- }
55
- dfs.append(pd.DataFrame(new_data, index=[0]))
56
-
57
- if dfs:
58
- df = pd.concat(dfs, ignore_index=True)
50
+ rows.append(
51
+ {
52
+ "Eventstream Name": v.get("displayName"),
53
+ "Eventstream Id": v.get("id"),
54
+ "Description": v.get("description"),
55
+ }
56
+ )
57
+
58
+ if rows:
59
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
59
60
 
60
61
  return df
61
62
 
@@ -2,7 +2,7 @@ from uuid import UUID
2
2
  import pandas as pd
3
3
  from typing import Optional, List
4
4
  import sempy_labs._icons as icons
5
- from sempy_labs._helper_functions import (
5
+ from ._helper_functions import (
6
6
  resolve_workspace_name_and_id,
7
7
  _base_api,
8
8
  _create_dataframe,
@@ -147,29 +147,30 @@ def list_external_data_shares_in_item(
147
147
  uses_pagination=True,
148
148
  )
149
149
 
150
- dfs = []
150
+ rows = []
151
151
  for r in responses:
152
152
  for i in r.get("value", []):
153
153
  item_id = i.get("itemId")
154
- new_data = {
155
- "External Data Share Id": i.get("id"),
156
- "Paths": [i.get("paths")],
157
- "Creator Principal Id": i.get("creatorPrincipal", {}).get("id"),
158
- "Creator Principal Type": i.get("creatorPrincipal", {}).get("type"),
159
- "Recipient User Principal Name": i.get("recipient", {}).get(
160
- "userPrincipalName"
161
- ),
162
- "Status": i.get("status"),
163
- "Expiration Time UTC": i.get("expriationTimeUtc"),
164
- "Workspace Id": i.get("workspaceId"),
165
- "Item Id": item_id,
166
- "Item Name": item_name,
167
- "Item Type": item_type,
168
- "Invitation URL": i.get("invitationUrl"),
169
- }
170
- dfs.append(pd.DataFrame(new_data, index=[0]))
171
-
172
- if dfs:
173
- df = pd.concat(dfs, ignore_index=True)
154
+ rows.append(
155
+ {
156
+ "External Data Share Id": i.get("id"),
157
+ "Paths": [i.get("paths")],
158
+ "Creator Principal Id": i.get("creatorPrincipal", {}).get("id"),
159
+ "Creator Principal Type": i.get("creatorPrincipal", {}).get("type"),
160
+ "Recipient User Principal Name": i.get("recipient", {}).get(
161
+ "userPrincipalName"
162
+ ),
163
+ "Status": i.get("status"),
164
+ "Expiration Time UTC": i.get("expriationTimeUtc"),
165
+ "Workspace Id": i.get("workspaceId"),
166
+ "Item Id": item_id,
167
+ "Item Name": item_name,
168
+ "Item Type": item_type,
169
+ "Invitation URL": i.get("invitationUrl"),
170
+ }
171
+ )
172
+
173
+ if rows:
174
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
174
175
 
175
176
  return df