semantic-link-labs 0.11.1__py3-none-any.whl → 0.11.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (137) hide show
  1. {semantic_link_labs-0.11.1.dist-info → semantic_link_labs-0.11.3.dist-info}/METADATA +7 -6
  2. semantic_link_labs-0.11.3.dist-info/RECORD +212 -0
  3. sempy_labs/__init__.py +65 -71
  4. sempy_labs/_a_lib_info.py +1 -1
  5. sempy_labs/_ai.py +1 -1
  6. sempy_labs/_capacities.py +2 -2
  7. sempy_labs/_capacity_migration.py +5 -5
  8. sempy_labs/_clear_cache.py +1 -1
  9. sempy_labs/_connections.py +2 -2
  10. sempy_labs/_dashboards.py +16 -16
  11. sempy_labs/_data_pipelines.py +1 -1
  12. sempy_labs/_dataflows.py +101 -26
  13. sempy_labs/_dax.py +3 -3
  14. sempy_labs/_delta_analyzer.py +4 -4
  15. sempy_labs/_delta_analyzer_history.py +1 -1
  16. sempy_labs/_deployment_pipelines.py +1 -1
  17. sempy_labs/_environments.py +22 -21
  18. sempy_labs/_eventhouses.py +12 -11
  19. sempy_labs/_eventstreams.py +12 -11
  20. sempy_labs/_external_data_shares.py +78 -23
  21. sempy_labs/_gateways.py +47 -45
  22. sempy_labs/_generate_semantic_model.py +3 -3
  23. sempy_labs/_git.py +1 -1
  24. sempy_labs/_graphQL.py +12 -11
  25. sempy_labs/_helper_functions.py +169 -5
  26. sempy_labs/_job_scheduler.py +56 -54
  27. sempy_labs/_kql_databases.py +16 -17
  28. sempy_labs/_kql_querysets.py +12 -11
  29. sempy_labs/_kusto.py +2 -2
  30. sempy_labs/_labels.py +126 -0
  31. sempy_labs/_list_functions.py +2 -2
  32. sempy_labs/_managed_private_endpoints.py +18 -15
  33. sempy_labs/_mirrored_databases.py +16 -15
  34. sempy_labs/_mirrored_warehouses.py +12 -11
  35. sempy_labs/_ml_experiments.py +11 -10
  36. sempy_labs/_model_auto_build.py +3 -3
  37. sempy_labs/_model_bpa.py +5 -5
  38. sempy_labs/_model_bpa_bulk.py +3 -3
  39. sempy_labs/_model_dependencies.py +1 -1
  40. sempy_labs/_mounted_data_factories.py +12 -12
  41. sempy_labs/_notebooks.py +151 -2
  42. sempy_labs/_one_lake_integration.py +1 -1
  43. sempy_labs/_query_scale_out.py +1 -1
  44. sempy_labs/_refresh_semantic_model.py +1 -1
  45. sempy_labs/_semantic_models.py +30 -28
  46. sempy_labs/_spark.py +1 -1
  47. sempy_labs/_sql.py +1 -1
  48. sempy_labs/_sql_endpoints.py +12 -11
  49. sempy_labs/_sqldatabase.py +15 -15
  50. sempy_labs/_tags.py +11 -10
  51. sempy_labs/_translations.py +1 -1
  52. sempy_labs/_user_delegation_key.py +2 -2
  53. sempy_labs/_vertipaq.py +3 -3
  54. sempy_labs/_vpax.py +1 -1
  55. sempy_labs/_warehouses.py +15 -14
  56. sempy_labs/_workloads.py +1 -1
  57. sempy_labs/_workspace_identity.py +1 -1
  58. sempy_labs/_workspaces.py +14 -13
  59. sempy_labs/admin/__init__.py +18 -18
  60. sempy_labs/admin/_activities.py +46 -46
  61. sempy_labs/admin/_apps.py +28 -26
  62. sempy_labs/admin/_artifacts.py +15 -15
  63. sempy_labs/admin/_basic_functions.py +1 -2
  64. sempy_labs/admin/_capacities.py +84 -82
  65. sempy_labs/admin/_dataflows.py +2 -2
  66. sempy_labs/admin/_datasets.py +50 -48
  67. sempy_labs/admin/_domains.py +25 -19
  68. sempy_labs/admin/_external_data_share.py +24 -22
  69. sempy_labs/admin/_git.py +17 -17
  70. sempy_labs/admin/_items.py +47 -45
  71. sempy_labs/admin/_reports.py +61 -58
  72. sempy_labs/admin/_scanner.py +2 -2
  73. sempy_labs/admin/_shared.py +18 -18
  74. sempy_labs/admin/_tags.py +2 -2
  75. sempy_labs/admin/_tenant.py +57 -51
  76. sempy_labs/admin/_users.py +16 -15
  77. sempy_labs/admin/_workspaces.py +2 -2
  78. sempy_labs/directlake/__init__.py +12 -12
  79. sempy_labs/directlake/_directlake_schema_compare.py +3 -3
  80. sempy_labs/directlake/_directlake_schema_sync.py +9 -7
  81. sempy_labs/directlake/_dl_helper.py +5 -2
  82. sempy_labs/directlake/_generate_shared_expression.py +1 -1
  83. sempy_labs/directlake/_get_directlake_lakehouse.py +1 -1
  84. sempy_labs/directlake/_guardrails.py +1 -1
  85. sempy_labs/directlake/_list_directlake_model_calc_tables.py +3 -3
  86. sempy_labs/directlake/_show_unsupported_directlake_objects.py +1 -1
  87. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +3 -3
  88. sempy_labs/directlake/_update_directlake_partition_entity.py +4 -4
  89. sempy_labs/directlake/_warm_cache.py +3 -3
  90. sempy_labs/graph/__init__.py +3 -3
  91. sempy_labs/graph/_groups.py +81 -78
  92. sempy_labs/graph/_teams.py +21 -21
  93. sempy_labs/graph/_users.py +109 -10
  94. sempy_labs/lakehouse/__init__.py +7 -7
  95. sempy_labs/lakehouse/_blobs.py +30 -30
  96. sempy_labs/lakehouse/_get_lakehouse_columns.py +2 -2
  97. sempy_labs/lakehouse/_get_lakehouse_tables.py +29 -27
  98. sempy_labs/lakehouse/_helper.py +38 -1
  99. sempy_labs/lakehouse/_lakehouse.py +16 -7
  100. sempy_labs/lakehouse/_livy_sessions.py +47 -42
  101. sempy_labs/lakehouse/_shortcuts.py +22 -21
  102. sempy_labs/migration/__init__.py +8 -8
  103. sempy_labs/migration/_create_pqt_file.py +2 -2
  104. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +35 -44
  105. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +9 -20
  106. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +5 -9
  107. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +11 -20
  108. sempy_labs/migration/_migration_validation.py +1 -2
  109. sempy_labs/migration/_refresh_calc_tables.py +2 -2
  110. sempy_labs/mirrored_azure_databricks_catalog/__init__.py +2 -2
  111. sempy_labs/mirrored_azure_databricks_catalog/_discover.py +40 -40
  112. sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +1 -1
  113. sempy_labs/ml_model/__init__.py +23 -0
  114. sempy_labs/ml_model/_functions.py +427 -0
  115. sempy_labs/report/__init__.py +10 -10
  116. sempy_labs/report/_download_report.py +2 -2
  117. sempy_labs/report/_export_report.py +2 -2
  118. sempy_labs/report/_generate_report.py +1 -1
  119. sempy_labs/report/_paginated.py +1 -1
  120. sempy_labs/report/_report_bpa.py +4 -3
  121. sempy_labs/report/_report_functions.py +3 -3
  122. sempy_labs/report/_report_list_functions.py +3 -3
  123. sempy_labs/report/_report_rebind.py +1 -1
  124. sempy_labs/report/_reportwrapper.py +248 -250
  125. sempy_labs/report/_save_report.py +3 -3
  126. sempy_labs/theme/_org_themes.py +19 -6
  127. sempy_labs/tom/__init__.py +1 -1
  128. sempy_labs/tom/_model.py +13 -8
  129. sempy_labs/variable_library/__init__.py +19 -0
  130. sempy_labs/variable_library/_functions.py +403 -0
  131. semantic_link_labs-0.11.1.dist-info/RECORD +0 -210
  132. sempy_labs/_dax_query_view.py +0 -57
  133. sempy_labs/_ml_models.py +0 -110
  134. sempy_labs/_variable_libraries.py +0 -91
  135. {semantic_link_labs-0.11.1.dist-info → semantic_link_labs-0.11.3.dist-info}/WHEEL +0 -0
  136. {semantic_link_labs-0.11.1.dist-info → semantic_link_labs-0.11.3.dist-info}/licenses/LICENSE +0 -0
  137. {semantic_link_labs-0.11.1.dist-info → semantic_link_labs-0.11.3.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  from sempy._utils._log import log
2
2
  import pandas as pd
3
3
  from typing import Optional, List
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  resolve_workspace_name_and_id,
6
6
  resolve_item_name_and_id,
7
7
  _update_dataframe_datatypes,
@@ -67,27 +67,28 @@ def list_item_job_instances(
67
67
  if not responses[0].get("value"):
68
68
  return df
69
69
 
70
- dfs = []
70
+ rows = []
71
71
  for r in responses:
72
72
  for v in r.get("value", []):
73
73
  fail = v.get("failureReason", {})
74
- new_data = {
75
- "Job Instance Id": v.get("id"),
76
- "Item Name": item_name,
77
- "Item Id": v.get("itemId"),
78
- "Item Type": type,
79
- "Job Type": v.get("jobType"),
80
- "Invoke Type": v.get("invokeType"),
81
- "Status": v.get("status"),
82
- "Root Activity Id": v.get("rootActivityId"),
83
- "Start Time UTC": v.get("startTimeUtc"),
84
- "End Time UTC": v.get("endTimeUtc"),
85
- "Error Message": fail.get("message") if fail is not None else "",
86
- }
87
- dfs.append(pd.DataFrame(new_data, index=[0]))
74
+ rows.append(
75
+ {
76
+ "Job Instance Id": v.get("id"),
77
+ "Item Name": item_name,
78
+ "Item Id": v.get("itemId"),
79
+ "Item Type": type,
80
+ "Job Type": v.get("jobType"),
81
+ "Invoke Type": v.get("invokeType"),
82
+ "Status": v.get("status"),
83
+ "Root Activity Id": v.get("rootActivityId"),
84
+ "Start Time UTC": v.get("startTimeUtc"),
85
+ "End Time UTC": v.get("endTimeUtc"),
86
+ "Error Message": fail.get("message") if fail is not None else "",
87
+ }
88
+ )
88
89
 
89
- if dfs:
90
- df = pd.concat(dfs, ignore_index=True)
90
+ if rows:
91
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
91
92
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
92
93
 
93
94
  return df
@@ -111,24 +112,25 @@ def _get_item_job_instance(url: str) -> pd.DataFrame:
111
112
 
112
113
  response = _base_api(request=url)
113
114
 
114
- dfs = []
115
+ rows = []
115
116
  for v in response.json().get("value", []):
116
117
  fail = v.get("failureReason", {})
117
- new_data = {
118
- "Job Instance Id": v.get("id"),
119
- "Item Id": v.get("itemId"),
120
- "Job Type": v.get("jobType"),
121
- "Invoke Type": v.get("invokeType"),
122
- "Status": v.get("status"),
123
- "Root Activity Id": v.get("rootActivityId"),
124
- "Start Time UTC": v.get("startTimeUtc"),
125
- "End Time UTC": v.get("endTimeUtc"),
126
- "Error Message": fail.get("message") if fail is not None else "",
127
- }
128
- dfs.append(pd.DataFrame(new_data, index=[0]))
129
-
130
- if dfs:
131
- df = pd.concat(dfs, ignore_index=True)
118
+ rows.append(
119
+ {
120
+ "Job Instance Id": v.get("id"),
121
+ "Item Id": v.get("itemId"),
122
+ "Job Type": v.get("jobType"),
123
+ "Invoke Type": v.get("invokeType"),
124
+ "Status": v.get("status"),
125
+ "Root Activity Id": v.get("rootActivityId"),
126
+ "Start Time UTC": v.get("startTimeUtc"),
127
+ "End Time UTC": v.get("endTimeUtc"),
128
+ "Error Message": fail.get("message") if fail is not None else "",
129
+ }
130
+ )
131
+
132
+ if rows:
133
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
132
134
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
133
135
 
134
136
  return df
@@ -190,29 +192,29 @@ def list_item_schedules(
190
192
  request=f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/{job_type}/schedules"
191
193
  )
192
194
 
193
- dfs = []
195
+ rows = []
194
196
  for v in response.json().get("value", []):
195
197
  config = v.get("configuration", {})
196
198
  own = v.get("owner", {})
197
- new_data = {
198
- "Job Schedule Id": v.get("id"),
199
- "Enabled": v.get("enabled"),
200
- "Created Date Time": v.get("createdDateTime"),
201
- "Start Date Time": config.get("startDateTime"),
202
- "End Date Time": config.get("endDateTime"),
203
- "Local Time Zone Id": config.get("localTimeZoneId"),
204
- "Type": config.get("type"),
205
- "Interval": config.get("interval"),
206
- "Weekdays": config.get("weekdays"),
207
- "Times": config.get("times"),
208
- "Owner Id": own.get("id"),
209
- "Owner Type": own.get("type"),
210
- }
211
-
212
- dfs.append(pd.DataFrame(new_data, index=[0]))
213
-
214
- if dfs:
215
- df = pd.concat(dfs, ignore_index=True)
199
+ rows.append(
200
+ {
201
+ "Job Schedule Id": v.get("id"),
202
+ "Enabled": v.get("enabled"),
203
+ "Created Date Time": v.get("createdDateTime"),
204
+ "Start Date Time": config.get("startDateTime"),
205
+ "End Date Time": config.get("endDateTime"),
206
+ "Local Time Zone Id": config.get("localTimeZoneId"),
207
+ "Type": config.get("type"),
208
+ "Interval": config.get("interval"),
209
+ "Weekdays": config.get("weekdays"),
210
+ "Times": config.get("times"),
211
+ "Owner Id": own.get("id"),
212
+ "Owner Type": own.get("type"),
213
+ }
214
+ )
215
+
216
+ if rows:
217
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
216
218
  _update_dataframe_datatypes(dataframe=df, column_map=columns)
217
219
 
218
220
  return df
@@ -1,7 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
4
- resolve_workspace_name_and_id,
3
+ from ._helper_functions import (
5
4
  _base_api,
6
5
  _create_dataframe,
7
6
  delete_item,
@@ -55,24 +54,24 @@ def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
55
54
  client="fabric_sp",
56
55
  )
57
56
 
58
- dfs = []
57
+ rows = []
59
58
  for r in responses:
60
59
  for v in r.get("value", []):
61
60
  prop = v.get("properties", {})
62
-
63
- new_data = {
64
- "KQL Database Name": v.get("displayName"),
65
- "KQL Database Id": v.get("id"),
66
- "Description": v.get("description"),
67
- "Parent Eventhouse Item Id": prop.get("parentEventhouseItemId"),
68
- "Query Service URI": prop.get("queryServiceUri"),
69
- "Ingestion Service URI": prop.get("ingestionServiceUri"),
70
- "Database Type": prop.get("databaseType"),
71
- }
72
- dfs.append(pd.DataFrame(new_data, index=[0]))
73
-
74
- if dfs:
75
- df = pd.concat(dfs, ignore_index=True)
61
+ rows.append(
62
+ {
63
+ "KQL Database Name": v.get("displayName"),
64
+ "KQL Database Id": v.get("id"),
65
+ "Description": v.get("description"),
66
+ "Parent Eventhouse Item Id": prop.get("parentEventhouseItemId"),
67
+ "Query Service URI": prop.get("queryServiceUri"),
68
+ "Ingestion Service URI": prop.get("ingestionServiceUri"),
69
+ "Database Type": prop.get("databaseType"),
70
+ }
71
+ )
72
+
73
+ if rows:
74
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
76
75
 
77
76
  return df
78
77
 
@@ -1,7 +1,7 @@
1
1
  import pandas as pd
2
2
  import sempy_labs._icons as icons
3
3
  from typing import Optional
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  resolve_workspace_id,
6
6
  _base_api,
7
7
  _create_dataframe,
@@ -45,18 +45,19 @@ def list_kql_querysets(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
45
45
  request=f"v1/workspaces/{workspace_id}/kqlQuerysets", uses_pagination=True
46
46
  )
47
47
 
48
- dfs = []
48
+ rows = []
49
49
  for r in responses:
50
50
  for v in r.get("value", []):
51
- new_data = {
52
- "KQL Queryset Name": v.get("displayName"),
53
- "KQL Queryset Id": v.get("id"),
54
- "Description": v.get("description"),
55
- }
56
- dfs.append(pd.DataFrame(new_data, index=[0]))
57
-
58
- if dfs:
59
- df = pd.concat(dfs, ignore_index=True)
51
+ rows.append(
52
+ {
53
+ "KQL Queryset Name": v.get("displayName"),
54
+ "KQL Queryset Id": v.get("id"),
55
+ "Description": v.get("description"),
56
+ }
57
+ )
58
+
59
+ if rows:
60
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
60
61
 
61
62
  return df
62
63
 
sempy_labs/_kusto.py CHANGED
@@ -5,8 +5,8 @@ from sempy._utils._log import log
5
5
  import sempy_labs._icons as icons
6
6
  from typing import Optional
7
7
  from uuid import UUID
8
- from sempy_labs._kql_databases import _resolve_cluster_uri
9
- from sempy_labs._helper_functions import resolve_item_id
8
+ from ._kql_databases import _resolve_cluster_uri
9
+ from ._helper_functions import resolve_item_id
10
10
 
11
11
 
12
12
  @log
sempy_labs/_labels.py ADDED
@@ -0,0 +1,126 @@
1
+ import sempy.fabric as fabric
2
+ import requests
3
+ import pandas as pd
4
+ from typing import Optional, Union
5
+ from uuid import UUID
6
+ from sempy.fabric.exceptions import FabricHTTPException
7
+ from sempy._utils._log import log
8
+
9
+
10
+ @log
11
+ def list_item_labels(workspace: Optional[Union[str, UUID]] = None) -> pd.DataFrame:
12
+ """
13
+ List all items within a workspace and shows their sensitivity labels.
14
+
15
+ NOTE: This function uses an internal API and is subject to change/break without notice.
16
+
17
+ Parameters
18
+ ----------
19
+ workspace : str | uuid.UUID, default=None
20
+ The Fabric workspace name or ID.
21
+ Defaults to None which resolves to the workspace of the attached lakehouse
22
+ or if no lakehouse attached, resolves to the workspace of the notebook.
23
+ Returns
24
+ -------
25
+ pandas.DataFrame
26
+ A pandas dataframe showing a list of all items within a workspace and their sensitivity labels.
27
+ """
28
+
29
+ import notebookutils
30
+
31
+ token = notebookutils.credentials.getToken("pbi")
32
+ headers = {"Authorization": f"Bearer {token}"}
33
+
34
+ # Item types handled in special payload fields
35
+ grouped_types = {
36
+ "dashboards": "Dashboard",
37
+ "reports": "Report",
38
+ "models": "SemanticModel",
39
+ "dataflows": "Dataflow",
40
+ "datamarts": "Datamart",
41
+ }
42
+
43
+ # All other item types go into 'artifacts'
44
+ fabric_items = [
45
+ "Datamart",
46
+ "Lakehouse",
47
+ "Eventhouse",
48
+ "Environment",
49
+ "KQLDatabase",
50
+ "KQLQueryset",
51
+ "KQLDashboard",
52
+ "DataPipeline",
53
+ "Notebook",
54
+ "SparkJobDefinition",
55
+ "MLExperiment",
56
+ "MLModel",
57
+ "Warehouse",
58
+ "Eventstream",
59
+ "SQLEndpoint",
60
+ "MirroredWarehouse",
61
+ "MirroredDatabase",
62
+ "Reflex",
63
+ "GraphQLApi",
64
+ "MountedDataFactory",
65
+ "SQLDatabase",
66
+ "CopyJob",
67
+ "VariableLibrary",
68
+ "Dataflow",
69
+ "ApacheAirflowJob",
70
+ "WarehouseSnapshot",
71
+ "DigitalTwinBuilder",
72
+ "DigitalTwinBuilderFlow",
73
+ "MirroredAzureDatabricksCatalog",
74
+ "DataAgent",
75
+ "UserDataFunction",
76
+ ]
77
+
78
+ dfI = fabric.list_items(workspace=workspace)
79
+
80
+ payload = {
81
+ key: [{"artifactId": i} for i in dfI[dfI["Type"] == value]["Id"].tolist()]
82
+ for key, value in grouped_types.items()
83
+ }
84
+
85
+ # Add generic artifact types
86
+ artifact_ids = dfI[dfI["Type"].isin(fabric_items)]["Id"].tolist()
87
+ if artifact_ids:
88
+ payload["artifacts"] = [{"artifactId": i} for i in artifact_ids]
89
+
90
+ client = fabric.PowerBIRestClient()
91
+ response = client.get("/v1.0/myorg/capacities")
92
+ if response.status_code != 200:
93
+ raise FabricHTTPException("Failed to retrieve URL prefix.")
94
+ context = response.json().get("@odata.context")
95
+ prefix = context.split("/v1.0")[0]
96
+
97
+ response = requests.post(
98
+ f"{prefix}/metadata/informationProtection/artifacts",
99
+ json=payload,
100
+ headers=headers,
101
+ )
102
+ if response.status_code != 200:
103
+ raise FabricHTTPException(f"Failed to retrieve labels: {response.text}")
104
+ result = response.json()
105
+
106
+ label_keys = [
107
+ "artifactInformationProtections",
108
+ "datasetInformationProtections",
109
+ "reportInformationProtections",
110
+ "dashboardInformationProtections",
111
+ ]
112
+
113
+ rows = [
114
+ {
115
+ "Id": item.get("artifactObjectId"),
116
+ "Label Id": item.get("labelId"),
117
+ "Label Name": item.get("name"),
118
+ "Parent Label Name": item.get("parent", {}).get("name"),
119
+ "Label Description": item.get("tooltip"),
120
+ }
121
+ for key in label_keys
122
+ for item in result.get(key, [])
123
+ ]
124
+
125
+ df_labels = pd.DataFrame(rows)
126
+ return dfI.merge(df_labels, on="Id", how="left")
@@ -1,5 +1,5 @@
1
1
  import sempy.fabric as fabric
2
- from sempy_labs._helper_functions import (
2
+ from ._helper_functions import (
3
3
  resolve_workspace_name_and_id,
4
4
  create_relationship_name,
5
5
  format_dax_object_name,
@@ -1131,7 +1131,7 @@ def list_reports_using_semantic_model(
1131
1131
  dataset: str | UUID, workspace: Optional[str | UUID] = None
1132
1132
  ) -> pd.DataFrame:
1133
1133
  """
1134
- Shows a list of all the reports (in all workspaces) which use a given semantic model.
1134
+ Shows a list of all the reports which use a given semantic model. This is limited to the reports which are in the same workspace as the semantic model.
1135
1135
 
1136
1136
  Parameters
1137
1137
  ----------
@@ -1,7 +1,7 @@
1
1
  import pandas as pd
2
2
  import sempy_labs._icons as icons
3
3
  from typing import Optional
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  resolve_workspace_name_and_id,
6
6
  _is_valid_uuid,
7
7
  _base_api,
@@ -118,23 +118,26 @@ def list_managed_private_endpoints(
118
118
  client="fabric_sp",
119
119
  )
120
120
 
121
- dfs = []
121
+ rows = []
122
122
  for r in responses:
123
123
  for v in r.get("value", []):
124
124
  conn = v.get("connectionState", {})
125
- new_data = {
126
- "Managed Private Endpoint Name": v.get("name"),
127
- "Managed Private Endpoint Id": v.get("id"),
128
- "Target Private Link Resource Id": v.get("targetPrivateLinkResourceId"),
129
- "Provisioning State": v.get("provisioningState"),
130
- "Connection Status": conn.get("status"),
131
- "Connection Description": conn.get("description"),
132
- "Target Subresource Type": v.get("targetSubresourceType"),
133
- }
134
- dfs.append(pd.DataFrame(new_data, index=[0]))
135
-
136
- if dfs:
137
- df = pd.concat(dfs, ignore_index=True)
125
+ rows.append(
126
+ {
127
+ "Managed Private Endpoint Name": v.get("name"),
128
+ "Managed Private Endpoint Id": v.get("id"),
129
+ "Target Private Link Resource Id": v.get(
130
+ "targetPrivateLinkResourceId"
131
+ ),
132
+ "Provisioning State": v.get("provisioningState"),
133
+ "Connection Status": conn.get("status"),
134
+ "Connection Description": conn.get("description"),
135
+ "Target Subresource Type": v.get("targetSubresourceType"),
136
+ }
137
+ )
138
+
139
+ if rows:
140
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
138
141
 
139
142
  return df
140
143
 
@@ -1,6 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  resolve_workspace_name_and_id,
5
5
  _update_dataframe_datatypes,
6
6
  _base_api,
@@ -58,25 +58,26 @@ def list_mirrored_databases(workspace: Optional[str | UUID] = None) -> pd.DataFr
58
58
  client="fabric_sp",
59
59
  )
60
60
 
61
- dfs = []
61
+ rows = []
62
62
  for r in responses:
63
63
  for v in r.get("value", []):
64
64
  prop = v.get("properties", {})
65
65
  sql = prop.get("sqlEndpointProperties", {})
66
- new_data = {
67
- "Mirrored Database Name": v.get("displayName"),
68
- "Mirrored Database Id": v.get("id"),
69
- "Description": v.get("description"),
70
- "OneLake Tables Path": prop.get("oneLakeTablesPath"),
71
- "SQL Endpoint Connection String": sql.get("connectionString"),
72
- "SQL Endpoint Id": sql.get("id"),
73
- "Provisioning Status": sql.get("provisioningStatus"),
74
- "Default Schema": prop.get("defaultSchema"),
75
- }
76
- dfs.append(pd.DataFrame(new_data, index=[0]))
66
+ rows.append(
67
+ {
68
+ "Mirrored Database Name": v.get("displayName"),
69
+ "Mirrored Database Id": v.get("id"),
70
+ "Description": v.get("description"),
71
+ "OneLake Tables Path": prop.get("oneLakeTablesPath"),
72
+ "SQL Endpoint Connection String": sql.get("connectionString"),
73
+ "SQL Endpoint Id": sql.get("id"),
74
+ "Provisioning Status": sql.get("provisioningStatus"),
75
+ "Default Schema": prop.get("defaultSchema"),
76
+ }
77
+ )
77
78
 
78
- if dfs:
79
- df = pd.concat(dfs, ignore_index=True)
79
+ if rows:
80
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
80
81
 
81
82
  return df
82
83
 
@@ -1,6 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  resolve_workspace_id,
5
5
  _base_api,
6
6
  _create_dataframe,
@@ -43,17 +43,18 @@ def list_mirrored_warehouses(workspace: Optional[str | UUID] = None) -> pd.DataF
43
43
  uses_pagination=True,
44
44
  )
45
45
 
46
- dfs = []
46
+ rows = []
47
47
  for r in responses:
48
48
  for v in r.get("value", []):
49
- new_data = {
50
- "Mirrored Warehouse Name": v.get("displayName"),
51
- "Mirrored Warehouse Id": v.get("id"),
52
- "Description": v.get("description"),
53
- }
54
- dfs.append(pd.DataFrame(new_data, index=[0]))
55
-
56
- if dfs:
57
- df = pd.concat(dfs, ignore_index=True)
49
+ rows.append(
50
+ {
51
+ "Mirrored Warehouse Name": v.get("displayName"),
52
+ "Mirrored Warehouse Id": v.get("id"),
53
+ "Description": v.get("description"),
54
+ }
55
+ )
56
+
57
+ if rows:
58
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
58
59
 
59
60
  return df
@@ -1,6 +1,6 @@
1
1
  import pandas as pd
2
2
  from typing import Optional
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  resolve_workspace_id,
5
5
  _base_api,
6
6
  delete_item,
@@ -46,22 +46,23 @@ def list_ml_experiments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
46
46
  uses_pagination=True,
47
47
  )
48
48
 
49
- dfs = []
49
+ rows = []
50
50
  for r in responses:
51
51
  for v in r.get("value", []):
52
52
  model_id = v.get("id")
53
53
  modelName = v.get("displayName")
54
54
  desc = v.get("description")
55
55
 
56
- new_data = {
57
- "ML Experiment Name": modelName,
58
- "ML Experiment Id": model_id,
59
- "Description": desc,
60
- }
61
- dfs.append(pd.DataFrame(new_data, index=[0]))
56
+ rows.append(
57
+ {
58
+ "ML Experiment Name": modelName,
59
+ "ML Experiment Id": model_id,
60
+ "Description": desc,
61
+ }
62
+ )
62
63
 
63
- if dfs:
64
- df = pd.concat(dfs, ignore_index=True)
64
+ if rows:
65
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
65
66
 
66
67
  return df
67
68
 
@@ -1,8 +1,8 @@
1
1
  import sempy.fabric as fabric
2
2
  import pandas as pd
3
- from sempy_labs.tom import connect_semantic_model
4
- from sempy_labs._generate_semantic_model import create_blank_semantic_model
5
- from sempy_labs.directlake._generate_shared_expression import generate_shared_expression
3
+ from .tom import connect_semantic_model
4
+ from ._generate_semantic_model import create_blank_semantic_model
5
+ from .directlake._generate_shared_expression import generate_shared_expression
6
6
  from typing import Optional
7
7
  from sempy._utils._log import log
8
8
 
sempy_labs/_model_bpa.py CHANGED
@@ -3,8 +3,8 @@ import pandas as pd
3
3
  import warnings
4
4
  import datetime
5
5
  from IPython.display import display, HTML
6
- from sempy_labs._model_dependencies import get_model_calc_dependencies
7
- from sempy_labs._helper_functions import (
6
+ from ._model_dependencies import get_model_calc_dependencies
7
+ from ._helper_functions import (
8
8
  format_dax_object_name,
9
9
  create_relationship_name,
10
10
  save_as_delta_table,
@@ -15,9 +15,9 @@ from sempy_labs._helper_functions import (
15
15
  resolve_workspace_name_and_id,
16
16
  _create_spark_session,
17
17
  )
18
- from sempy_labs.lakehouse import get_lakehouse_tables, lakehouse_attached
19
- from sempy_labs.tom import connect_semantic_model
20
- from sempy_labs._model_bpa_rules import model_bpa_rules
18
+ from .lakehouse import get_lakehouse_tables, lakehouse_attached
19
+ from .tom import connect_semantic_model
20
+ from ._model_bpa_rules import model_bpa_rules
21
21
  from typing import Optional
22
22
  from sempy._utils._log import log
23
23
  import sempy_labs._icons as icons
@@ -1,7 +1,7 @@
1
1
  import sempy.fabric as fabric
2
2
  import pandas as pd
3
3
  import datetime
4
- from sempy_labs._helper_functions import (
4
+ from ._helper_functions import (
5
5
  save_as_delta_table,
6
6
  resolve_workspace_capacity,
7
7
  retry,
@@ -9,11 +9,11 @@ from sempy_labs._helper_functions import (
9
9
  resolve_workspace_id,
10
10
  resolve_lakehouse_name_and_id,
11
11
  )
12
- from sempy_labs.lakehouse import (
12
+ from .lakehouse import (
13
13
  get_lakehouse_tables,
14
14
  lakehouse_attached,
15
15
  )
16
- from sempy_labs._model_bpa import run_model_bpa
16
+ from ._model_bpa import run_model_bpa
17
17
  from typing import Optional, List
18
18
  from sempy._utils._log import log
19
19
  import sempy_labs._icons as icons
@@ -1,6 +1,6 @@
1
1
  import sempy.fabric as fabric
2
2
  import pandas as pd
3
- from sempy_labs._helper_functions import (
3
+ from ._helper_functions import (
4
4
  format_dax_object_name,
5
5
  resolve_dataset_name_and_id,
6
6
  resolve_workspace_name_and_id,