semantic-link-labs 0.11.0__py3-none-any.whl → 0.11.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (131) hide show
  1. {semantic_link_labs-0.11.0.dist-info → semantic_link_labs-0.11.2.dist-info}/METADATA +6 -4
  2. semantic_link_labs-0.11.2.dist-info/RECORD +210 -0
  3. sempy_labs/__init__.py +56 -56
  4. sempy_labs/_a_lib_info.py +1 -1
  5. sempy_labs/_ai.py +1 -1
  6. sempy_labs/_capacities.py +2 -2
  7. sempy_labs/_capacity_migration.py +5 -5
  8. sempy_labs/_clear_cache.py +1 -1
  9. sempy_labs/_connections.py +2 -2
  10. sempy_labs/_dashboards.py +16 -16
  11. sempy_labs/_data_pipelines.py +1 -1
  12. sempy_labs/_dataflows.py +101 -26
  13. sempy_labs/_dax.py +3 -3
  14. sempy_labs/_dax_query_view.py +1 -1
  15. sempy_labs/_delta_analyzer.py +4 -4
  16. sempy_labs/_delta_analyzer_history.py +1 -1
  17. sempy_labs/_deployment_pipelines.py +1 -1
  18. sempy_labs/_environments.py +22 -21
  19. sempy_labs/_eventhouses.py +12 -11
  20. sempy_labs/_eventstreams.py +12 -11
  21. sempy_labs/_external_data_shares.py +23 -22
  22. sempy_labs/_gateways.py +47 -45
  23. sempy_labs/_generate_semantic_model.py +3 -3
  24. sempy_labs/_git.py +1 -1
  25. sempy_labs/_graphQL.py +12 -11
  26. sempy_labs/_job_scheduler.py +56 -54
  27. sempy_labs/_kql_databases.py +16 -17
  28. sempy_labs/_kql_querysets.py +12 -11
  29. sempy_labs/_kusto.py +2 -2
  30. sempy_labs/_list_functions.py +1 -1
  31. sempy_labs/_managed_private_endpoints.py +18 -15
  32. sempy_labs/_mirrored_databases.py +16 -15
  33. sempy_labs/_mirrored_warehouses.py +12 -11
  34. sempy_labs/_ml_experiments.py +11 -10
  35. sempy_labs/_ml_models.py +11 -10
  36. sempy_labs/_model_auto_build.py +3 -3
  37. sempy_labs/_model_bpa.py +5 -5
  38. sempy_labs/_model_bpa_bulk.py +3 -3
  39. sempy_labs/_model_dependencies.py +1 -1
  40. sempy_labs/_mounted_data_factories.py +12 -12
  41. sempy_labs/_notebooks.py +1 -1
  42. sempy_labs/_one_lake_integration.py +1 -1
  43. sempy_labs/_query_scale_out.py +1 -1
  44. sempy_labs/_refresh_semantic_model.py +1 -1
  45. sempy_labs/_semantic_models.py +30 -28
  46. sempy_labs/_spark.py +1 -1
  47. sempy_labs/_sql.py +1 -1
  48. sempy_labs/_sql_endpoints.py +12 -11
  49. sempy_labs/_sqldatabase.py +15 -15
  50. sempy_labs/_tags.py +11 -10
  51. sempy_labs/_translations.py +1 -1
  52. sempy_labs/_user_delegation_key.py +2 -2
  53. sempy_labs/_variable_libraries.py +13 -12
  54. sempy_labs/_vertipaq.py +3 -3
  55. sempy_labs/_vpax.py +1 -1
  56. sempy_labs/_warehouses.py +15 -14
  57. sempy_labs/_workloads.py +1 -1
  58. sempy_labs/_workspace_identity.py +1 -1
  59. sempy_labs/_workspaces.py +14 -13
  60. sempy_labs/admin/__init__.py +18 -18
  61. sempy_labs/admin/_activities.py +46 -46
  62. sempy_labs/admin/_apps.py +28 -26
  63. sempy_labs/admin/_artifacts.py +15 -15
  64. sempy_labs/admin/_basic_functions.py +1 -2
  65. sempy_labs/admin/_capacities.py +86 -82
  66. sempy_labs/admin/_dataflows.py +2 -2
  67. sempy_labs/admin/_datasets.py +50 -48
  68. sempy_labs/admin/_domains.py +25 -19
  69. sempy_labs/admin/_external_data_share.py +24 -22
  70. sempy_labs/admin/_git.py +17 -17
  71. sempy_labs/admin/_items.py +47 -45
  72. sempy_labs/admin/_reports.py +61 -58
  73. sempy_labs/admin/_scanner.py +2 -2
  74. sempy_labs/admin/_shared.py +18 -18
  75. sempy_labs/admin/_tags.py +2 -2
  76. sempy_labs/admin/_tenant.py +57 -51
  77. sempy_labs/admin/_users.py +16 -15
  78. sempy_labs/admin/_workspaces.py +2 -2
  79. sempy_labs/directlake/__init__.py +12 -12
  80. sempy_labs/directlake/_directlake_schema_compare.py +3 -3
  81. sempy_labs/directlake/_directlake_schema_sync.py +9 -7
  82. sempy_labs/directlake/_dl_helper.py +1 -1
  83. sempy_labs/directlake/_generate_shared_expression.py +1 -1
  84. sempy_labs/directlake/_get_directlake_lakehouse.py +1 -1
  85. sempy_labs/directlake/_guardrails.py +1 -1
  86. sempy_labs/directlake/_list_directlake_model_calc_tables.py +3 -3
  87. sempy_labs/directlake/_show_unsupported_directlake_objects.py +1 -1
  88. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +3 -3
  89. sempy_labs/directlake/_update_directlake_partition_entity.py +4 -4
  90. sempy_labs/directlake/_warm_cache.py +3 -3
  91. sempy_labs/graph/__init__.py +3 -3
  92. sempy_labs/graph/_groups.py +81 -78
  93. sempy_labs/graph/_teams.py +21 -21
  94. sempy_labs/graph/_users.py +111 -10
  95. sempy_labs/lakehouse/__init__.py +7 -7
  96. sempy_labs/lakehouse/_blobs.py +30 -30
  97. sempy_labs/lakehouse/_get_lakehouse_columns.py +2 -2
  98. sempy_labs/lakehouse/_get_lakehouse_tables.py +29 -27
  99. sempy_labs/lakehouse/_helper.py +30 -2
  100. sempy_labs/lakehouse/_lakehouse.py +2 -2
  101. sempy_labs/lakehouse/_livy_sessions.py +47 -42
  102. sempy_labs/lakehouse/_shortcuts.py +22 -21
  103. sempy_labs/migration/__init__.py +8 -8
  104. sempy_labs/migration/_create_pqt_file.py +2 -2
  105. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +3 -3
  106. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +3 -4
  107. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +2 -2
  108. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +4 -4
  109. sempy_labs/migration/_migration_validation.py +1 -2
  110. sempy_labs/migration/_refresh_calc_tables.py +2 -2
  111. sempy_labs/mirrored_azure_databricks_catalog/__init__.py +2 -2
  112. sempy_labs/mirrored_azure_databricks_catalog/_discover.py +40 -40
  113. sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +1 -1
  114. sempy_labs/report/__init__.py +10 -10
  115. sempy_labs/report/_download_report.py +2 -2
  116. sempy_labs/report/_export_report.py +2 -2
  117. sempy_labs/report/_generate_report.py +1 -1
  118. sempy_labs/report/_paginated.py +1 -1
  119. sempy_labs/report/_report_bpa.py +4 -3
  120. sempy_labs/report/_report_functions.py +3 -3
  121. sempy_labs/report/_report_list_functions.py +3 -3
  122. sempy_labs/report/_report_rebind.py +1 -1
  123. sempy_labs/report/_reportwrapper.py +247 -249
  124. sempy_labs/report/_save_report.py +3 -3
  125. sempy_labs/theme/_org_themes.py +35 -1
  126. sempy_labs/tom/__init__.py +1 -1
  127. sempy_labs/tom/_model.py +23 -20
  128. semantic_link_labs-0.11.0.dist-info/RECORD +0 -210
  129. {semantic_link_labs-0.11.0.dist-info → semantic_link_labs-0.11.2.dist-info}/WHEEL +0 -0
  130. {semantic_link_labs-0.11.0.dist-info → semantic_link_labs-0.11.2.dist-info}/licenses/LICENSE +0 -0
  131. {semantic_link_labs-0.11.0.dist-info → semantic_link_labs-0.11.2.dist-info}/top_level.txt +0 -0
sempy_labs/_gateways.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from sempy._utils._log import log
2
2
  import pandas as pd
3
3
  from typing import Optional
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  _is_valid_uuid,
6
6
  resolve_capacity_id,
7
7
  resolve_workspace_name_and_id,
@@ -47,26 +47,28 @@ def list_gateways() -> pd.DataFrame:
47
47
  request="/v1/gateways", client="fabric_sp", uses_pagination=True
48
48
  )
49
49
 
50
- dfs = []
50
+ rows = []
51
51
  for r in responses:
52
52
  for v in r.get("value", []):
53
- new_data = {
54
- "Gateway Name": v.get("displayName"),
55
- "Gateway Id": v.get("id"),
56
- "Type": v.get("type"),
57
- "Public Key Exponent": v.get("publicKey", {}).get("exponent"),
58
- "Public Key Modulus": v.get("publicKey", {}).get("modulus"),
59
- "Version": v.get("version"),
60
- "Number Of Member Gateways": v.get("numberOfMemberGateways", 0),
61
- "Load Balancing Setting": v.get("loadBalancingSetting"),
62
- "Allow Cloud Connection Refresh": v.get("allowCloudConnectionRefresh"),
63
- "Allow Custom Connectors": v.get("allowCustomConnectors"),
64
- }
65
-
66
- dfs.append(pd.DataFrame(new_data, index=[0]))
67
-
68
- if dfs:
69
- df = pd.concat(dfs, ignore_index=True)
53
+ rows.append(
54
+ {
55
+ "Gateway Name": v.get("displayName"),
56
+ "Gateway Id": v.get("id"),
57
+ "Type": v.get("type"),
58
+ "Public Key Exponent": v.get("publicKey", {}).get("exponent"),
59
+ "Public Key Modulus": v.get("publicKey", {}).get("modulus"),
60
+ "Version": v.get("version"),
61
+ "Number Of Member Gateways": v.get("numberOfMemberGateways", 0),
62
+ "Load Balancing Setting": v.get("loadBalancingSetting"),
63
+ "Allow Cloud Connection Refresh": v.get(
64
+ "allowCloudConnectionRefresh"
65
+ ),
66
+ "Allow Custom Connectors": v.get("allowCustomConnectors"),
67
+ }
68
+ )
69
+
70
+ if rows:
71
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
70
72
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
71
73
 
72
74
  return df
@@ -141,20 +143,20 @@ def list_gateway_role_assigments(gateway: str | UUID) -> pd.DataFrame:
141
143
  uses_pagination=True,
142
144
  )
143
145
 
144
- dfs = []
146
+ rows = []
145
147
  for r in responses:
146
148
  for v in r.get("value", []):
147
- new_data = {
148
- "Gateway Role Assignment Id": v.get("id"),
149
- "Principal Id": v.get("principal", {}).get("id"),
150
- "Principal Type": v.get("principal", {}).get("type"),
151
- "Role": v.get("role"),
152
- }
153
-
154
- dfs.append(pd.DataFrame(new_data, index=[0]))
155
-
156
- if dfs:
157
- df = pd.concat(dfs, ignore_index=True)
149
+ rows.append(
150
+ {
151
+ "Gateway Role Assignment Id": v.get("id"),
152
+ "Principal Id": v.get("principal", {}).get("id"),
153
+ "Principal Type": v.get("principal", {}).get("type"),
154
+ "Role": v.get("role"),
155
+ }
156
+ )
157
+
158
+ if rows:
159
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
158
160
 
159
161
  return df
160
162
 
@@ -274,21 +276,21 @@ def list_gateway_members(gateway: str | UUID) -> pd.DataFrame:
274
276
  request=f"/v1/gateways/{gateway_id}/members", client="fabric_sp"
275
277
  )
276
278
 
277
- dfs = []
279
+ rows = []
278
280
  for v in response.json().get("value", []):
279
- new_data = {
280
- "Member Id": v.get("id"),
281
- "Member Name": v.get("displayName"),
282
- "Public Key Exponent": v.get("publicKey", {}).get("exponent"),
283
- "Public Key Modulus": v.get("publicKey", {}).get("modulus"),
284
- "Version": v.get("version"),
285
- "Enabled": v.get("enabled"),
286
- }
287
-
288
- dfs.append(pd.DataFrame(new_data, index=[0]))
289
-
290
- if dfs:
291
- df = pd.concat(dfs, ignore_index=True)
281
+ rows.append(
282
+ {
283
+ "Member Id": v.get("id"),
284
+ "Member Name": v.get("displayName"),
285
+ "Public Key Exponent": v.get("publicKey", {}).get("exponent"),
286
+ "Public Key Modulus": v.get("publicKey", {}).get("modulus"),
287
+ "Version": v.get("version"),
288
+ "Enabled": v.get("enabled"),
289
+ }
290
+ )
291
+
292
+ if rows:
293
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
292
294
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
293
295
 
294
296
  return df
@@ -4,7 +4,7 @@ import json
4
4
  import os
5
5
  from typing import Optional, List
6
6
  from sempy._utils._log import log
7
- from sempy_labs._helper_functions import (
7
+ from ._helper_functions import (
8
8
  resolve_workspace_name_and_id,
9
9
  resolve_dataset_name_and_id,
10
10
  _conv_b64,
@@ -13,9 +13,9 @@ from sempy_labs._helper_functions import (
13
13
  _mount,
14
14
  resolve_workspace_id,
15
15
  )
16
- from sempy_labs.lakehouse._lakehouse import lakehouse_attached
16
+ from .lakehouse._lakehouse import lakehouse_attached
17
17
  import sempy_labs._icons as icons
18
- from sempy_labs._refresh_semantic_model import refresh_semantic_model
18
+ from ._refresh_semantic_model import refresh_semantic_model
19
19
  from uuid import UUID
20
20
 
21
21
 
sempy_labs/_git.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import pandas as pd
2
2
  import sempy_labs._icons as icons
3
3
  from typing import Optional, List
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  resolve_workspace_name_and_id,
6
6
  _base_api,
7
7
  _create_dataframe,
sempy_labs/_graphQL.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import pandas as pd
2
2
  from uuid import UUID
3
3
  from typing import Optional
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  _base_api,
6
6
  _create_dataframe,
7
7
  resolve_workspace_id,
@@ -47,18 +47,19 @@ def list_graphql_apis(workspace: Optional[str | UUID]) -> pd.DataFrame:
47
47
  client="fabric_sp",
48
48
  )
49
49
 
50
- dfs = []
50
+ rows = []
51
51
  for r in responses:
52
52
  for v in r.get("value", []):
53
- new_data = {
54
- "GraphQL API Name": v.get("displayName"),
55
- "GraphQL API Id": v.get("id"),
56
- "Description": v.get("description"),
57
- }
58
- dfs.append(pd.DataFrame(new_data, index=[0]))
59
-
60
- if dfs:
61
- df = pd.concat(dfs, ignore_index=True)
53
+ rows.append(
54
+ {
55
+ "GraphQL API Name": v.get("displayName"),
56
+ "GraphQL API Id": v.get("id"),
57
+ "Description": v.get("description"),
58
+ }
59
+ )
60
+
61
+ if rows:
62
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
62
63
 
63
64
  return df
64
65
 
@@ -1,7 +1,7 @@
1
1
  from sempy._utils._log import log
2
2
  import pandas as pd
3
3
  from typing import Optional, List
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  resolve_workspace_name_and_id,
6
6
  resolve_item_name_and_id,
7
7
  _update_dataframe_datatypes,
@@ -67,27 +67,28 @@ def list_item_job_instances(
67
67
  if not responses[0].get("value"):
68
68
  return df
69
69
 
70
- dfs = []
70
+ rows = []
71
71
  for r in responses:
72
72
  for v in r.get("value", []):
73
73
  fail = v.get("failureReason", {})
74
- new_data = {
75
- "Job Instance Id": v.get("id"),
76
- "Item Name": item_name,
77
- "Item Id": v.get("itemId"),
78
- "Item Type": type,
79
- "Job Type": v.get("jobType"),
80
- "Invoke Type": v.get("invokeType"),
81
- "Status": v.get("status"),
82
- "Root Activity Id": v.get("rootActivityId"),
83
- "Start Time UTC": v.get("startTimeUtc"),
84
- "End Time UTC": v.get("endTimeUtc"),
85
- "Error Message": fail.get("message") if fail is not None else "",
86
- }
87
- dfs.append(pd.DataFrame(new_data, index=[0]))
74
+ rows.append(
75
+ {
76
+ "Job Instance Id": v.get("id"),
77
+ "Item Name": item_name,
78
+ "Item Id": v.get("itemId"),
79
+ "Item Type": type,
80
+ "Job Type": v.get("jobType"),
81
+ "Invoke Type": v.get("invokeType"),
82
+ "Status": v.get("status"),
83
+ "Root Activity Id": v.get("rootActivityId"),
84
+ "Start Time UTC": v.get("startTimeUtc"),
85
+ "End Time UTC": v.get("endTimeUtc"),
86
+ "Error Message": fail.get("message") if fail is not None else "",
87
+ }
88
+ )
88
89
 
89
- if dfs:
90
- df = pd.concat(dfs, ignore_index=True)
90
+ if rows:
91
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
91
92
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
92
93
 
93
94
  return df
@@ -111,24 +112,25 @@ def _get_item_job_instance(url: str) -> pd.DataFrame:
111
112
 
112
113
  response = _base_api(request=url)
113
114
 
114
- dfs = []
115
+ rows = []
115
116
  for v in response.json().get("value", []):
116
117
  fail = v.get("failureReason", {})
117
- new_data = {
118
- "Job Instance Id": v.get("id"),
119
- "Item Id": v.get("itemId"),
120
- "Job Type": v.get("jobType"),
121
- "Invoke Type": v.get("invokeType"),
122
- "Status": v.get("status"),
123
- "Root Activity Id": v.get("rootActivityId"),
124
- "Start Time UTC": v.get("startTimeUtc"),
125
- "End Time UTC": v.get("endTimeUtc"),
126
- "Error Message": fail.get("message") if fail is not None else "",
127
- }
128
- dfs.append(pd.DataFrame(new_data, index=[0]))
129
-
130
- if dfs:
131
- df = pd.concat(dfs, ignore_index=True)
118
+ rows.append(
119
+ {
120
+ "Job Instance Id": v.get("id"),
121
+ "Item Id": v.get("itemId"),
122
+ "Job Type": v.get("jobType"),
123
+ "Invoke Type": v.get("invokeType"),
124
+ "Status": v.get("status"),
125
+ "Root Activity Id": v.get("rootActivityId"),
126
+ "Start Time UTC": v.get("startTimeUtc"),
127
+ "End Time UTC": v.get("endTimeUtc"),
128
+ "Error Message": fail.get("message") if fail is not None else "",
129
+ }
130
+ )
131
+
132
+ if rows:
133
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
132
134
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
133
135
 
134
136
  return df
@@ -190,29 +192,29 @@ def list_item_schedules(
190
192
  request=f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/{job_type}/schedules"
191
193
  )
192
194
 
193
- dfs = []
195
+ rows = []
194
196
  for v in response.json().get("value", []):
195
197
  config = v.get("configuration", {})
196
198
  own = v.get("owner", {})
197
- new_data = {
198
- "Job Schedule Id": v.get("id"),
199
- "Enabled": v.get("enabled"),
200
- "Created Date Time": v.get("createdDateTime"),
201
- "Start Date Time": config.get("startDateTime"),
202
- "End Date Time": config.get("endDateTime"),
203
- "Local Time Zone Id": config.get("localTimeZoneId"),
204
- "Type": config.get("type"),
205
- "Interval": config.get("interval"),
206
- "Weekdays": config.get("weekdays"),
207
- "Times": config.get("times"),
208
- "Owner Id": own.get("id"),
209
- "Owner Type": own.get("type"),
210
- }
211
-
212
- dfs.append(pd.DataFrame(new_data, index=[0]))
213
-
214
- if dfs:
215
- df = pd.concat(dfs, ignore_index=True)
199
+ rows.append(
200
+ {
201
+ "Job Schedule Id": v.get("id"),
202
+ "Enabled": v.get("enabled"),
203
+ "Created Date Time": v.get("createdDateTime"),
204
+ "Start Date Time": config.get("startDateTime"),
205
+ "End Date Time": config.get("endDateTime"),
206
+ "Local Time Zone Id": config.get("localTimeZoneId"),
207
+ "Type": config.get("type"),
208
+ "Interval": config.get("interval"),
209
+ "Weekdays": config.get("weekdays"),
210
+ "Times": config.get("times"),
211
+ "Owner Id": own.get("id"),
212
+ "Owner Type": own.get("type"),
213
+ }
214
+ )
215
+
216
+ if rows:
217
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
216
218
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
217
219
 
218
220
  return df
@@ -1,7 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
4
- resolve_workspace_name_and_id,
3
+ from ._helper_functions import (
5
4
  _base_api,
6
5
  _create_dataframe,
7
6
  delete_item,
@@ -55,24 +54,24 @@ def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
55
54
  client="fabric_sp",
56
55
  )
57
56
 
58
- dfs = []
57
+ rows = []
59
58
  for r in responses:
60
59
  for v in r.get("value", []):
61
60
  prop = v.get("properties", {})
62
-
63
- new_data = {
64
- "KQL Database Name": v.get("displayName"),
65
- "KQL Database Id": v.get("id"),
66
- "Description": v.get("description"),
67
- "Parent Eventhouse Item Id": prop.get("parentEventhouseItemId"),
68
- "Query Service URI": prop.get("queryServiceUri"),
69
- "Ingestion Service URI": prop.get("ingestionServiceUri"),
70
- "Database Type": prop.get("databaseType"),
71
- }
72
- dfs.append(pd.DataFrame(new_data, index=[0]))
73
-
74
- if dfs:
75
- df = pd.concat(dfs, ignore_index=True)
61
+ rows.append(
62
+ {
63
+ "KQL Database Name": v.get("displayName"),
64
+ "KQL Database Id": v.get("id"),
65
+ "Description": v.get("description"),
66
+ "Parent Eventhouse Item Id": prop.get("parentEventhouseItemId"),
67
+ "Query Service URI": prop.get("queryServiceUri"),
68
+ "Ingestion Service URI": prop.get("ingestionServiceUri"),
69
+ "Database Type": prop.get("databaseType"),
70
+ }
71
+ )
72
+
73
+ if rows:
74
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
76
75
 
77
76
  return df
78
77
 
@@ -1,7 +1,7 @@
1
1
  import pandas as pd
2
2
  import sempy_labs._icons as icons
3
3
  from typing import Optional
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  resolve_workspace_id,
6
6
  _base_api,
7
7
  _create_dataframe,
@@ -45,18 +45,19 @@ def list_kql_querysets(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
45
45
  request=f"v1/workspaces/{workspace_id}/kqlQuerysets", uses_pagination=True
46
46
  )
47
47
 
48
- dfs = []
48
+ rows = []
49
49
  for r in responses:
50
50
  for v in r.get("value", []):
51
- new_data = {
52
- "KQL Queryset Name": v.get("displayName"),
53
- "KQL Queryset Id": v.get("id"),
54
- "Description": v.get("description"),
55
- }
56
- dfs.append(pd.DataFrame(new_data, index=[0]))
57
-
58
- if dfs:
59
- df = pd.concat(dfs, ignore_index=True)
51
+ rows.append(
52
+ {
53
+ "KQL Queryset Name": v.get("displayName"),
54
+ "KQL Queryset Id": v.get("id"),
55
+ "Description": v.get("description"),
56
+ }
57
+ )
58
+
59
+ if rows:
60
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
60
61
 
61
62
  return df
62
63
 
sempy_labs/_kusto.py CHANGED
@@ -5,8 +5,8 @@ from sempy._utils._log import log
5
5
  import sempy_labs._icons as icons
6
6
  from typing import Optional
7
7
  from uuid import UUID
8
- from sempy_labs._kql_databases import _resolve_cluster_uri
9
- from sempy_labs._helper_functions import resolve_item_id
8
+ from ._kql_databases import _resolve_cluster_uri
9
+ from ._helper_functions import resolve_item_id
10
10
 
11
11
 
12
12
  @log
@@ -1,5 +1,5 @@
1
1
  import sempy.fabric as fabric
2
- from sempy_labs._helper_functions import (
2
+ from ._helper_functions import (
3
3
  resolve_workspace_name_and_id,
4
4
  create_relationship_name,
5
5
  format_dax_object_name,
@@ -1,7 +1,7 @@
1
1
  import pandas as pd
2
2
  import sempy_labs._icons as icons
3
3
  from typing import Optional
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  resolve_workspace_name_and_id,
6
6
  _is_valid_uuid,
7
7
  _base_api,
@@ -118,23 +118,26 @@ def list_managed_private_endpoints(
118
118
  client="fabric_sp",
119
119
  )
120
120
 
121
- dfs = []
121
+ rows = []
122
122
  for r in responses:
123
123
  for v in r.get("value", []):
124
124
  conn = v.get("connectionState", {})
125
- new_data = {
126
- "Managed Private Endpoint Name": v.get("name"),
127
- "Managed Private Endpoint Id": v.get("id"),
128
- "Target Private Link Resource Id": v.get("targetPrivateLinkResourceId"),
129
- "Provisioning State": v.get("provisioningState"),
130
- "Connection Status": conn.get("status"),
131
- "Connection Description": conn.get("description"),
132
- "Target Subresource Type": v.get("targetSubresourceType"),
133
- }
134
- dfs.append(pd.DataFrame(new_data, index=[0]))
135
-
136
- if dfs:
137
- df = pd.concat(dfs, ignore_index=True)
125
+ rows.append(
126
+ {
127
+ "Managed Private Endpoint Name": v.get("name"),
128
+ "Managed Private Endpoint Id": v.get("id"),
129
+ "Target Private Link Resource Id": v.get(
130
+ "targetPrivateLinkResourceId"
131
+ ),
132
+ "Provisioning State": v.get("provisioningState"),
133
+ "Connection Status": conn.get("status"),
134
+ "Connection Description": conn.get("description"),
135
+ "Target Subresource Type": v.get("targetSubresourceType"),
136
+ }
137
+ )
138
+
139
+ if rows:
140
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
138
141
 
139
142
  return df
140
143
 
@@ -1,6 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  resolve_workspace_name_and_id,
5
5
  _update_dataframe_datatypes,
6
6
  _base_api,
@@ -58,25 +58,26 @@ def list_mirrored_databases(workspace: Optional[str | UUID] = None) -> pd.DataFr
58
58
  client="fabric_sp",
59
59
  )
60
60
 
61
- dfs = []
61
+ rows = []
62
62
  for r in responses:
63
63
  for v in r.get("value", []):
64
64
  prop = v.get("properties", {})
65
65
  sql = prop.get("sqlEndpointProperties", {})
66
- new_data = {
67
- "Mirrored Database Name": v.get("displayName"),
68
- "Mirrored Database Id": v.get("id"),
69
- "Description": v.get("description"),
70
- "OneLake Tables Path": prop.get("oneLakeTablesPath"),
71
- "SQL Endpoint Connection String": sql.get("connectionString"),
72
- "SQL Endpoint Id": sql.get("id"),
73
- "Provisioning Status": sql.get("provisioningStatus"),
74
- "Default Schema": prop.get("defaultSchema"),
75
- }
76
- dfs.append(pd.DataFrame(new_data, index=[0]))
66
+ rows.append(
67
+ {
68
+ "Mirrored Database Name": v.get("displayName"),
69
+ "Mirrored Database Id": v.get("id"),
70
+ "Description": v.get("description"),
71
+ "OneLake Tables Path": prop.get("oneLakeTablesPath"),
72
+ "SQL Endpoint Connection String": sql.get("connectionString"),
73
+ "SQL Endpoint Id": sql.get("id"),
74
+ "Provisioning Status": sql.get("provisioningStatus"),
75
+ "Default Schema": prop.get("defaultSchema"),
76
+ }
77
+ )
77
78
 
78
- if dfs:
79
- df = pd.concat(dfs, ignore_index=True)
79
+ if rows:
80
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
80
81
 
81
82
  return df
82
83
 
@@ -1,6 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  resolve_workspace_id,
5
5
  _base_api,
6
6
  _create_dataframe,
@@ -43,17 +43,18 @@ def list_mirrored_warehouses(workspace: Optional[str | UUID] = None) -> pd.DataF
43
43
  uses_pagination=True,
44
44
  )
45
45
 
46
- dfs = []
46
+ rows = []
47
47
  for r in responses:
48
48
  for v in r.get("value", []):
49
- new_data = {
50
- "Mirrored Warehouse Name": v.get("displayName"),
51
- "Mirrored Warehouse Id": v.get("id"),
52
- "Description": v.get("description"),
53
- }
54
- dfs.append(pd.DataFrame(new_data, index=[0]))
55
-
56
- if dfs:
57
- df = pd.concat(dfs, ignore_index=True)
49
+ rows.append(
50
+ {
51
+ "Mirrored Warehouse Name": v.get("displayName"),
52
+ "Mirrored Warehouse Id": v.get("id"),
53
+ "Description": v.get("description"),
54
+ }
55
+ )
56
+
57
+ if rows:
58
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
58
59
 
59
60
  return df
@@ -1,6 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  resolve_workspace_id,
5
5
  _base_api,
6
6
  delete_item,
@@ -46,22 +46,23 @@ def list_ml_experiments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
46
46
  uses_pagination=True,
47
47
  )
48
48
 
49
- dfs = []
49
+ rows = []
50
50
  for r in responses:
51
51
  for v in r.get("value", []):
52
52
  model_id = v.get("id")
53
53
  modelName = v.get("displayName")
54
54
  desc = v.get("description")
55
55
 
56
- new_data = {
57
- "ML Experiment Name": modelName,
58
- "ML Experiment Id": model_id,
59
- "Description": desc,
60
- }
61
- dfs.append(pd.DataFrame(new_data, index=[0]))
56
+ rows.append(
57
+ {
58
+ "ML Experiment Name": modelName,
59
+ "ML Experiment Id": model_id,
60
+ "Description": desc,
61
+ }
62
+ )
62
63
 
63
- if dfs:
64
- df = pd.concat(dfs, ignore_index=True)
64
+ if rows:
65
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
65
66
 
66
67
  return df
67
68