semantic-link-labs 0.12.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (243) hide show
  1. semantic_link_labs-0.12.8.dist-info/METADATA +354 -0
  2. semantic_link_labs-0.12.8.dist-info/RECORD +243 -0
  3. semantic_link_labs-0.12.8.dist-info/WHEEL +5 -0
  4. semantic_link_labs-0.12.8.dist-info/licenses/LICENSE +21 -0
  5. semantic_link_labs-0.12.8.dist-info/top_level.txt +1 -0
  6. sempy_labs/__init__.py +606 -0
  7. sempy_labs/_a_lib_info.py +2 -0
  8. sempy_labs/_ai.py +437 -0
  9. sempy_labs/_authentication.py +264 -0
  10. sempy_labs/_bpa_translation/_model/_translations_am-ET.po +869 -0
  11. sempy_labs/_bpa_translation/_model/_translations_ar-AE.po +908 -0
  12. sempy_labs/_bpa_translation/_model/_translations_bg-BG.po +968 -0
  13. sempy_labs/_bpa_translation/_model/_translations_ca-ES.po +963 -0
  14. sempy_labs/_bpa_translation/_model/_translations_cs-CZ.po +943 -0
  15. sempy_labs/_bpa_translation/_model/_translations_da-DK.po +945 -0
  16. sempy_labs/_bpa_translation/_model/_translations_de-DE.po +988 -0
  17. sempy_labs/_bpa_translation/_model/_translations_el-GR.po +993 -0
  18. sempy_labs/_bpa_translation/_model/_translations_es-ES.po +971 -0
  19. sempy_labs/_bpa_translation/_model/_translations_fa-IR.po +933 -0
  20. sempy_labs/_bpa_translation/_model/_translations_fi-FI.po +942 -0
  21. sempy_labs/_bpa_translation/_model/_translations_fr-FR.po +994 -0
  22. sempy_labs/_bpa_translation/_model/_translations_ga-IE.po +967 -0
  23. sempy_labs/_bpa_translation/_model/_translations_he-IL.po +902 -0
  24. sempy_labs/_bpa_translation/_model/_translations_hi-IN.po +944 -0
  25. sempy_labs/_bpa_translation/_model/_translations_hu-HU.po +963 -0
  26. sempy_labs/_bpa_translation/_model/_translations_id-ID.po +946 -0
  27. sempy_labs/_bpa_translation/_model/_translations_is-IS.po +939 -0
  28. sempy_labs/_bpa_translation/_model/_translations_it-IT.po +986 -0
  29. sempy_labs/_bpa_translation/_model/_translations_ja-JP.po +846 -0
  30. sempy_labs/_bpa_translation/_model/_translations_ko-KR.po +839 -0
  31. sempy_labs/_bpa_translation/_model/_translations_mt-MT.po +967 -0
  32. sempy_labs/_bpa_translation/_model/_translations_nl-NL.po +978 -0
  33. sempy_labs/_bpa_translation/_model/_translations_pl-PL.po +962 -0
  34. sempy_labs/_bpa_translation/_model/_translations_pt-BR.po +962 -0
  35. sempy_labs/_bpa_translation/_model/_translations_pt-PT.po +957 -0
  36. sempy_labs/_bpa_translation/_model/_translations_ro-RO.po +968 -0
  37. sempy_labs/_bpa_translation/_model/_translations_ru-RU.po +964 -0
  38. sempy_labs/_bpa_translation/_model/_translations_sk-SK.po +952 -0
  39. sempy_labs/_bpa_translation/_model/_translations_sl-SL.po +950 -0
  40. sempy_labs/_bpa_translation/_model/_translations_sv-SE.po +942 -0
  41. sempy_labs/_bpa_translation/_model/_translations_ta-IN.po +976 -0
  42. sempy_labs/_bpa_translation/_model/_translations_te-IN.po +947 -0
  43. sempy_labs/_bpa_translation/_model/_translations_th-TH.po +924 -0
  44. sempy_labs/_bpa_translation/_model/_translations_tr-TR.po +953 -0
  45. sempy_labs/_bpa_translation/_model/_translations_uk-UA.po +961 -0
  46. sempy_labs/_bpa_translation/_model/_translations_zh-CN.po +804 -0
  47. sempy_labs/_bpa_translation/_model/_translations_zu-ZA.po +969 -0
  48. sempy_labs/_capacities.py +1198 -0
  49. sempy_labs/_capacity_migration.py +660 -0
  50. sempy_labs/_clear_cache.py +351 -0
  51. sempy_labs/_connections.py +610 -0
  52. sempy_labs/_dashboards.py +69 -0
  53. sempy_labs/_data_access_security.py +98 -0
  54. sempy_labs/_data_pipelines.py +162 -0
  55. sempy_labs/_dataflows.py +668 -0
  56. sempy_labs/_dax.py +501 -0
  57. sempy_labs/_daxformatter.py +80 -0
  58. sempy_labs/_delta_analyzer.py +467 -0
  59. sempy_labs/_delta_analyzer_history.py +301 -0
  60. sempy_labs/_dictionary_diffs.py +221 -0
  61. sempy_labs/_documentation.py +147 -0
  62. sempy_labs/_domains.py +51 -0
  63. sempy_labs/_eventhouses.py +182 -0
  64. sempy_labs/_external_data_shares.py +230 -0
  65. sempy_labs/_gateways.py +521 -0
  66. sempy_labs/_generate_semantic_model.py +521 -0
  67. sempy_labs/_get_connection_string.py +84 -0
  68. sempy_labs/_git.py +543 -0
  69. sempy_labs/_graphQL.py +90 -0
  70. sempy_labs/_helper_functions.py +2833 -0
  71. sempy_labs/_icons.py +149 -0
  72. sempy_labs/_job_scheduler.py +609 -0
  73. sempy_labs/_kql_databases.py +149 -0
  74. sempy_labs/_kql_querysets.py +124 -0
  75. sempy_labs/_kusto.py +137 -0
  76. sempy_labs/_labels.py +124 -0
  77. sempy_labs/_list_functions.py +1720 -0
  78. sempy_labs/_managed_private_endpoints.py +253 -0
  79. sempy_labs/_mirrored_databases.py +416 -0
  80. sempy_labs/_mirrored_warehouses.py +60 -0
  81. sempy_labs/_ml_experiments.py +113 -0
  82. sempy_labs/_model_auto_build.py +140 -0
  83. sempy_labs/_model_bpa.py +557 -0
  84. sempy_labs/_model_bpa_bulk.py +378 -0
  85. sempy_labs/_model_bpa_rules.py +859 -0
  86. sempy_labs/_model_dependencies.py +343 -0
  87. sempy_labs/_mounted_data_factories.py +123 -0
  88. sempy_labs/_notebooks.py +441 -0
  89. sempy_labs/_one_lake_integration.py +151 -0
  90. sempy_labs/_onelake.py +131 -0
  91. sempy_labs/_query_scale_out.py +433 -0
  92. sempy_labs/_refresh_semantic_model.py +435 -0
  93. sempy_labs/_semantic_models.py +468 -0
  94. sempy_labs/_spark.py +455 -0
  95. sempy_labs/_sql.py +241 -0
  96. sempy_labs/_sql_audit_settings.py +207 -0
  97. sempy_labs/_sql_endpoints.py +214 -0
  98. sempy_labs/_tags.py +201 -0
  99. sempy_labs/_translations.py +43 -0
  100. sempy_labs/_user_delegation_key.py +44 -0
  101. sempy_labs/_utils.py +79 -0
  102. sempy_labs/_vertipaq.py +1021 -0
  103. sempy_labs/_vpax.py +388 -0
  104. sempy_labs/_warehouses.py +234 -0
  105. sempy_labs/_workloads.py +140 -0
  106. sempy_labs/_workspace_identity.py +72 -0
  107. sempy_labs/_workspaces.py +595 -0
  108. sempy_labs/admin/__init__.py +170 -0
  109. sempy_labs/admin/_activities.py +167 -0
  110. sempy_labs/admin/_apps.py +145 -0
  111. sempy_labs/admin/_artifacts.py +65 -0
  112. sempy_labs/admin/_basic_functions.py +463 -0
  113. sempy_labs/admin/_capacities.py +508 -0
  114. sempy_labs/admin/_dataflows.py +45 -0
  115. sempy_labs/admin/_datasets.py +186 -0
  116. sempy_labs/admin/_domains.py +522 -0
  117. sempy_labs/admin/_external_data_share.py +100 -0
  118. sempy_labs/admin/_git.py +72 -0
  119. sempy_labs/admin/_items.py +265 -0
  120. sempy_labs/admin/_labels.py +211 -0
  121. sempy_labs/admin/_reports.py +241 -0
  122. sempy_labs/admin/_scanner.py +118 -0
  123. sempy_labs/admin/_shared.py +82 -0
  124. sempy_labs/admin/_sharing_links.py +110 -0
  125. sempy_labs/admin/_tags.py +131 -0
  126. sempy_labs/admin/_tenant.py +503 -0
  127. sempy_labs/admin/_tenant_keys.py +89 -0
  128. sempy_labs/admin/_users.py +140 -0
  129. sempy_labs/admin/_workspaces.py +236 -0
  130. sempy_labs/deployment_pipeline/__init__.py +23 -0
  131. sempy_labs/deployment_pipeline/_items.py +580 -0
  132. sempy_labs/directlake/__init__.py +57 -0
  133. sempy_labs/directlake/_autosync.py +58 -0
  134. sempy_labs/directlake/_directlake_schema_compare.py +120 -0
  135. sempy_labs/directlake/_directlake_schema_sync.py +161 -0
  136. sempy_labs/directlake/_dl_helper.py +274 -0
  137. sempy_labs/directlake/_generate_shared_expression.py +94 -0
  138. sempy_labs/directlake/_get_directlake_lakehouse.py +62 -0
  139. sempy_labs/directlake/_get_shared_expression.py +34 -0
  140. sempy_labs/directlake/_guardrails.py +96 -0
  141. sempy_labs/directlake/_list_directlake_model_calc_tables.py +70 -0
  142. sempy_labs/directlake/_show_unsupported_directlake_objects.py +90 -0
  143. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +239 -0
  144. sempy_labs/directlake/_update_directlake_partition_entity.py +259 -0
  145. sempy_labs/directlake/_warm_cache.py +236 -0
  146. sempy_labs/dotnet_lib/dotnet.runtime.config.json +10 -0
  147. sempy_labs/environment/__init__.py +23 -0
  148. sempy_labs/environment/_items.py +212 -0
  149. sempy_labs/environment/_pubstage.py +223 -0
  150. sempy_labs/eventstream/__init__.py +37 -0
  151. sempy_labs/eventstream/_items.py +263 -0
  152. sempy_labs/eventstream/_topology.py +652 -0
  153. sempy_labs/graph/__init__.py +59 -0
  154. sempy_labs/graph/_groups.py +651 -0
  155. sempy_labs/graph/_sensitivity_labels.py +120 -0
  156. sempy_labs/graph/_teams.py +125 -0
  157. sempy_labs/graph/_user_licenses.py +96 -0
  158. sempy_labs/graph/_users.py +516 -0
  159. sempy_labs/graph_model/__init__.py +15 -0
  160. sempy_labs/graph_model/_background_jobs.py +63 -0
  161. sempy_labs/graph_model/_items.py +149 -0
  162. sempy_labs/lakehouse/__init__.py +67 -0
  163. sempy_labs/lakehouse/_blobs.py +247 -0
  164. sempy_labs/lakehouse/_get_lakehouse_columns.py +102 -0
  165. sempy_labs/lakehouse/_get_lakehouse_tables.py +274 -0
  166. sempy_labs/lakehouse/_helper.py +250 -0
  167. sempy_labs/lakehouse/_lakehouse.py +351 -0
  168. sempy_labs/lakehouse/_livy_sessions.py +143 -0
  169. sempy_labs/lakehouse/_materialized_lake_views.py +157 -0
  170. sempy_labs/lakehouse/_partitioning.py +165 -0
  171. sempy_labs/lakehouse/_schemas.py +217 -0
  172. sempy_labs/lakehouse/_shortcuts.py +440 -0
  173. sempy_labs/migration/__init__.py +35 -0
  174. sempy_labs/migration/_create_pqt_file.py +238 -0
  175. sempy_labs/migration/_direct_lake_to_import.py +105 -0
  176. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +398 -0
  177. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +148 -0
  178. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +533 -0
  179. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +172 -0
  180. sempy_labs/migration/_migration_validation.py +71 -0
  181. sempy_labs/migration/_refresh_calc_tables.py +131 -0
  182. sempy_labs/mirrored_azure_databricks_catalog/__init__.py +15 -0
  183. sempy_labs/mirrored_azure_databricks_catalog/_discover.py +213 -0
  184. sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +45 -0
  185. sempy_labs/ml_model/__init__.py +23 -0
  186. sempy_labs/ml_model/_functions.py +427 -0
  187. sempy_labs/report/_BPAReportTemplate.json +232 -0
  188. sempy_labs/report/__init__.py +55 -0
  189. sempy_labs/report/_bpareporttemplate/.pbi/localSettings.json +9 -0
  190. sempy_labs/report/_bpareporttemplate/.platform +11 -0
  191. sempy_labs/report/_bpareporttemplate/StaticResources/SharedResources/BaseThemes/CY24SU06.json +710 -0
  192. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/page.json +11 -0
  193. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/1b08bce3bebabb0a27a8/visual.json +191 -0
  194. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/2f22ddb70c301693c165/visual.json +438 -0
  195. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/3b1182230aa6c600b43a/visual.json +127 -0
  196. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/58577ba6380c69891500/visual.json +576 -0
  197. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/a2a8fa5028b3b776c96c/visual.json +207 -0
  198. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/adfd47ef30652707b987/visual.json +506 -0
  199. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/b6a80ee459e716e170b1/visual.json +127 -0
  200. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/ce3130a721c020cc3d81/visual.json +513 -0
  201. sempy_labs/report/_bpareporttemplate/definition/pages/92735ae19b31712208ad/page.json +8 -0
  202. sempy_labs/report/_bpareporttemplate/definition/pages/92735ae19b31712208ad/visuals/66e60dfb526437cd78d1/visual.json +112 -0
  203. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/page.json +11 -0
  204. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/07deb8bce824e1be37d7/visual.json +513 -0
  205. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0b1c68838818b32ad03b/visual.json +352 -0
  206. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0c171de9d2683d10b930/visual.json +37 -0
  207. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0efa01be0510e40a645e/visual.json +542 -0
  208. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/6bf2f0eb830ab53cc668/visual.json +221 -0
  209. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/88d8141cb8500b60030c/visual.json +127 -0
  210. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/a753273590beed656a03/visual.json +576 -0
  211. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/b8fdc82cddd61ac447bc/visual.json +127 -0
  212. sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/page.json +9 -0
  213. sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/visuals/ce8532a7e25020271077/visual.json +38 -0
  214. sempy_labs/report/_bpareporttemplate/definition/pages/pages.json +10 -0
  215. sempy_labs/report/_bpareporttemplate/definition/report.json +176 -0
  216. sempy_labs/report/_bpareporttemplate/definition/version.json +4 -0
  217. sempy_labs/report/_bpareporttemplate/definition.pbir +14 -0
  218. sempy_labs/report/_download_report.py +76 -0
  219. sempy_labs/report/_export_report.py +257 -0
  220. sempy_labs/report/_generate_report.py +427 -0
  221. sempy_labs/report/_paginated.py +76 -0
  222. sempy_labs/report/_report_bpa.py +354 -0
  223. sempy_labs/report/_report_bpa_rules.py +115 -0
  224. sempy_labs/report/_report_functions.py +581 -0
  225. sempy_labs/report/_report_helper.py +227 -0
  226. sempy_labs/report/_report_list_functions.py +110 -0
  227. sempy_labs/report/_report_rebind.py +149 -0
  228. sempy_labs/report/_reportwrapper.py +3100 -0
  229. sempy_labs/report/_save_report.py +147 -0
  230. sempy_labs/snowflake_database/__init__.py +10 -0
  231. sempy_labs/snowflake_database/_items.py +105 -0
  232. sempy_labs/sql_database/__init__.py +21 -0
  233. sempy_labs/sql_database/_items.py +201 -0
  234. sempy_labs/sql_database/_mirroring.py +79 -0
  235. sempy_labs/theme/__init__.py +12 -0
  236. sempy_labs/theme/_org_themes.py +129 -0
  237. sempy_labs/tom/__init__.py +3 -0
  238. sempy_labs/tom/_model.py +5977 -0
  239. sempy_labs/variable_library/__init__.py +19 -0
  240. sempy_labs/variable_library/_functions.py +403 -0
  241. sempy_labs/warehouse/__init__.py +28 -0
  242. sempy_labs/warehouse/_items.py +234 -0
  243. sempy_labs/warehouse/_restore_points.py +309 -0
sempy_labs/_spark.py ADDED
@@ -0,0 +1,455 @@
1
+ import pandas as pd
2
+ import sempy_labs._icons as icons
3
+ from typing import Optional
4
+ from sempy_labs._helper_functions import (
5
+ resolve_workspace_id,
6
+ resolve_workspace_name_and_id,
7
+ _update_dataframe_datatypes,
8
+ _base_api,
9
+ _create_dataframe,
10
+ )
11
+ from uuid import UUID
12
+ from sempy._utils._log import log
13
+
14
+
15
+ @log
16
+ def list_custom_pools(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
17
+ """
18
+ Lists all `custom pools <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
19
+
20
+ This is a wrapper function for the following API: `Custom Pools - List Workspace Custom Pools <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/list-workspace-custom-pools>`_.
21
+
22
+ Parameters
23
+ ----------
24
+ workspace : str | uuid.UUID, default=None
25
+ The name of the Fabric workspace.
26
+ Defaults to None which resolves to the workspace of the attached lakehouse
27
+ or if no lakehouse attached, resolves to the workspace of the notebook.
28
+
29
+ Returns
30
+ -------
31
+ pandas.DataFrame
32
+ A pandas dataframe showing all the custom pools within the Fabric workspace.
33
+ """
34
+
35
+ workspace_id = resolve_workspace_id(workspace)
36
+
37
+ columns = {
38
+ "Custom Pool ID": "string",
39
+ "Custom Pool Name": "string",
40
+ "Type": "string",
41
+ "Node Family": "string",
42
+ "Node Size": "string",
43
+ "Auto Scale Enabled": "bool",
44
+ "Auto Scale Min Node Count": "int",
45
+ "Auto Scale Max Node Count": "int",
46
+ "Dynamic Executor Allocation Enabled": "bool",
47
+ "Dynamic Executor Allocation Min Executors": "int",
48
+ "Dynamic Executor Allocation Max Executors": "int",
49
+ }
50
+ df = _create_dataframe(columns=columns)
51
+
52
+ response = _base_api(request=f"/v1/workspaces/{workspace_id}/spark/pools")
53
+
54
+ rows = []
55
+ for i in response.json()["value"]:
56
+
57
+ aScale = i.get("autoScale", {})
58
+ d = i.get("dynamicExecutorAllocation", {})
59
+
60
+ rows.append(
61
+ {
62
+ "Custom Pool ID": i.get("id"),
63
+ "Custom Pool Name": i.get("name"),
64
+ "Type": i.get("type"),
65
+ "Node Family": i.get("nodeFamily"),
66
+ "Node Size": i.get("nodeSize"),
67
+ "Auto Scale Enabled": aScale.get("enabled"),
68
+ "Auto Scale Min Node Count": aScale.get("minNodeCount"),
69
+ "Auto Scale Max Node Count": aScale.get("maxNodeCount"),
70
+ "Dynamic Executor Allocation Enabled": d.get("enabled"),
71
+ "Dynamic Executor Allocation Min Executors": d.get("minExecutors"),
72
+ "Dynamic Executor Allocation Max Executors": d.get("maxExecutors"),
73
+ }
74
+ )
75
+
76
+ if rows:
77
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
78
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
79
+
80
+ return df
81
+
82
+
83
+ @log
84
+ def create_custom_pool(
85
+ pool_name: str,
86
+ node_size: str,
87
+ min_node_count: int,
88
+ max_node_count: int,
89
+ min_executors: int,
90
+ max_executors: int,
91
+ node_family: str = "MemoryOptimized",
92
+ auto_scale_enabled: bool = True,
93
+ dynamic_executor_allocation_enabled: bool = True,
94
+ workspace: Optional[str | UUID] = None,
95
+ ):
96
+ """
97
+ Creates a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
98
+
99
+ This is a wrapper function for the following API: `Custom Pools - Create Workspace Custom Pool <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool>`_.
100
+
101
+ Parameters
102
+ ----------
103
+ pool_name : str
104
+ The custom pool name.
105
+ node_size : str
106
+ The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
107
+ min_node_count : int
108
+ The `minimum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
109
+ max_node_count : int
110
+ The maximum node count.
111
+ min_executors : int
112
+ The `minimum executors <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
113
+ max_executors : int
114
+ The maximum executors.
115
+ node_family : str, default='MemoryOptimized'
116
+ The `node family <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodefamily>`_.
117
+ auto_scale_enabled : bool, default=True
118
+ The status of `auto scale <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
119
+ dynamic_executor_allocation_enabled : bool, default=True
120
+ The status of the `dynamic executor allocation <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
121
+ workspace : str | uuid.UUID, default=None
122
+ The name or ID of the Fabric workspace.
123
+ Defaults to None which resolves to the workspace of the attached lakehouse
124
+ or if no lakehouse attached, resolves to the workspace of the notebook.
125
+ """
126
+
127
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
128
+
129
+ payload = {
130
+ "name": pool_name,
131
+ "nodeFamily": node_family,
132
+ "nodeSize": node_size,
133
+ "autoScale": {
134
+ "enabled": auto_scale_enabled,
135
+ "minNodeCount": min_node_count,
136
+ "maxNodeCount": max_node_count,
137
+ },
138
+ "dynamicExecutorAllocation": {
139
+ "enabled": dynamic_executor_allocation_enabled,
140
+ "minExecutors": min_executors,
141
+ "maxExecutors": max_executors,
142
+ },
143
+ }
144
+
145
+ _base_api(
146
+ request=f"/v1/workspaces/{workspace_id}/spark/pools",
147
+ payload=payload,
148
+ method="post",
149
+ status_codes=201,
150
+ )
151
+ print(
152
+ f"{icons.green_dot} The '{pool_name}' spark pool has been created within the '{workspace_name}' workspace."
153
+ )
154
+
155
+
156
+ @log
157
+ def update_custom_pool(
158
+ pool_name: str,
159
+ node_size: Optional[str] = None,
160
+ min_node_count: Optional[int] = None,
161
+ max_node_count: Optional[int] = None,
162
+ min_executors: Optional[int] = None,
163
+ max_executors: Optional[int] = None,
164
+ node_family: Optional[str] = None,
165
+ auto_scale_enabled: Optional[bool] = None,
166
+ dynamic_executor_allocation_enabled: Optional[bool] = None,
167
+ workspace: Optional[str | UUID] = None,
168
+ ):
169
+ """
170
+ Updates the properties of a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
171
+
172
+ This is a wrapper function for the following API: `Custom Pools - Update Workspace Custom Pool <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/update-workspace-custom-pool>`_.
173
+
174
+ Parameters
175
+ ----------
176
+ pool_name : str
177
+ The custom pool name.
178
+ node_size : str, default=None
179
+ The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
180
+ Defaults to None which keeps the existing property setting.
181
+ min_node_count : int, default=None
182
+ The `minimum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
183
+ Defaults to None which keeps the existing property setting.
184
+ max_node_count : int, default=None
185
+ The maximum node count.
186
+ Defaults to None which keeps the existing property setting.
187
+ min_executors : int, default=None
188
+ The `minimum executors <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
189
+ Defaults to None which keeps the existing property setting.
190
+ max_executors : int, default=None
191
+ The maximum executors.
192
+ Defaults to None which keeps the existing property setting.
193
+ node_family : str, default=None
194
+ The `node family <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodefamily>`_.
195
+ Defaults to None which keeps the existing property setting.
196
+ auto_scale_enabled : bool, default=None
197
+ The status of `auto scale <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
198
+ Defaults to None which keeps the existing property setting.
199
+ dynamic_executor_allocation_enabled : bool, default=None
200
+ The status of the `dynamic executor allocation <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
201
+ Defaults to None which keeps the existing property setting.
202
+ workspace : str | uuid.UUID, default=None
203
+ The name or ID of the Fabric workspace.
204
+ Defaults to None which resolves to the workspace of the attached lakehouse
205
+ or if no lakehouse attached, resolves to the workspace of the notebook.
206
+ """
207
+
208
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
209
+
210
+ df = list_custom_pools(workspace=workspace)
211
+ df_pool = df[df["Custom Pool Name"] == pool_name]
212
+
213
+ if len(df_pool) == 0:
214
+ raise ValueError(
215
+ f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace_name}'. Please choose a valid custom pool."
216
+ )
217
+
218
+ if node_family is None:
219
+ node_family = df_pool["Node Family"].iloc[0]
220
+ if node_size is None:
221
+ node_size = df_pool["Node Size"].iloc[0]
222
+ if auto_scale_enabled is None:
223
+ auto_scale_enabled = bool(df_pool["Auto Scale Enabled"].iloc[0])
224
+ if min_node_count is None:
225
+ min_node_count = int(df_pool["Min Node Count"].iloc[0])
226
+ if max_node_count is None:
227
+ max_node_count = int(df_pool["Max Node Count"].iloc[0])
228
+ if dynamic_executor_allocation_enabled is None:
229
+ dynamic_executor_allocation_enabled = bool(
230
+ df_pool["Dynami Executor Allocation Enabled"].iloc[0]
231
+ )
232
+ if min_executors is None:
233
+ min_executors = int(df_pool["Min Executors"].iloc[0])
234
+ if max_executors is None:
235
+ max_executors = int(df_pool["Max Executors"].iloc[0])
236
+
237
+ payload = {
238
+ "name": pool_name,
239
+ "nodeFamily": node_family,
240
+ "nodeSize": node_size,
241
+ "autoScale": {
242
+ "enabled": auto_scale_enabled,
243
+ "minNodeCount": min_node_count,
244
+ "maxNodeCount": max_node_count,
245
+ },
246
+ "dynamicExecutorAllocation": {
247
+ "enabled": dynamic_executor_allocation_enabled,
248
+ "minExecutors": min_executors,
249
+ "maxExecutors": max_executors,
250
+ },
251
+ }
252
+
253
+ _base_api(
254
+ request=f"/v1/workspaces/{workspace_id}/spark/pools",
255
+ payload=payload,
256
+ method="post",
257
+ )
258
+ print(
259
+ f"{icons.green_dot} The '{pool_name}' spark pool within the '{workspace_name}' workspace has been updated."
260
+ )
261
+
262
+
263
+ @log
264
+ def delete_custom_pool(pool_name: str, workspace: Optional[str | UUID] = None):
265
+ """
266
+ Deletes a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
267
+
268
+ This is a wrapper function for the following API: `Custom Pools - Delete Workspace Custom Pool <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/delete-workspace-custom-pool>`_.
269
+
270
+ Parameters
271
+ ----------
272
+ pool_name : str
273
+ The custom pool name.
274
+ workspace : str | uuid.UUID, default=None
275
+ The name or ID of the Fabric workspace.
276
+ Defaults to None which resolves to the workspace of the attached lakehouse
277
+ or if no lakehouse attached, resolves to the workspace of the notebook.
278
+ """
279
+
280
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
281
+
282
+ dfL = list_custom_pools(workspace=workspace_id)
283
+ dfL_filt = dfL[dfL["Custom Pool Name"] == pool_name]
284
+
285
+ if dfL_filt.empty:
286
+ raise ValueError(
287
+ f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace_name}' workspace."
288
+ )
289
+ pool_id = dfL_filt["Custom Pool ID"].iloc[0]
290
+
291
+ _base_api(
292
+ request=f"/v1/workspaces/{workspace_id}/spark/pools/{pool_id}", method="delete"
293
+ )
294
+ print(
295
+ f"{icons.green_dot} The '{pool_name}' spark pool has been deleted from the '{workspace_name}' workspace."
296
+ )
297
+
298
+
299
+ @log
300
+ def get_spark_settings(
301
+ workspace: Optional[str | UUID] = None, return_dataframe: bool = True
302
+ ) -> pd.DataFrame | dict:
303
+ """
304
+ Shows the spark settings for a workspace.
305
+
306
+ This is a wrapper function for the following API: `Workspace Settings - Get Spark Settings <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/get-spark-settings>`_.
307
+
308
+ Parameters
309
+ ----------
310
+ workspace : str | uuid.UUID, default=None
311
+ The name or ID of the Fabric workspace.
312
+ Defaults to None which resolves to the workspace of the attached lakehouse
313
+ or if no lakehouse attached, resolves to the workspace of the notebook.
314
+ return_dataframe : bool, default=True
315
+ If True, returns a pandas dataframe. If False, returns a json dictionary.
316
+
317
+ Returns
318
+ -------
319
+ pandas.DataFrame | dict
320
+ A pandas dataframe showing the spark settings for a workspace.
321
+ """
322
+
323
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
324
+
325
+ df = pd.DataFrame(
326
+ columns=[
327
+ "Automatic Log Enabled",
328
+ "High Concurrency Enabled",
329
+ "Customize Compute Enabled",
330
+ "Default Pool Name",
331
+ "Default Pool Type",
332
+ "Max Node Count",
333
+ "Max Executors",
334
+ "Environment Name",
335
+ "Runtime Version",
336
+ ]
337
+ )
338
+
339
+ response = _base_api(request=f"/v1/workspaces/{workspace_id}/spark/settings")
340
+
341
+ i = response.json()
342
+ p = i.get("pool")
343
+ dp = i.get("pool", {}).get("defaultPool", {})
344
+ sp = i.get("pool", {}).get("starterPool", {})
345
+ e = i.get("environment", {})
346
+
347
+ new_data = {
348
+ "Automatic Log Enabled": i.get("automaticLog").get("enabled"),
349
+ "High Concurrency Enabled": i.get("highConcurrency").get(
350
+ "notebookInteractiveRunEnabled"
351
+ ),
352
+ "Customize Compute Enabled": p.get("customizeComputeEnabled"),
353
+ "Default Pool Name": dp.get("name"),
354
+ "Default Pool Type": dp.get("type"),
355
+ "Max Node Count": sp.get("maxNodeCount"),
356
+ "Max Node Executors": sp.get("maxExecutors"),
357
+ "Environment Name": e.get("name"),
358
+ "Runtime Version": e.get("runtimeVersion"),
359
+ }
360
+ df = pd.DataFrame([new_data])
361
+
362
+ column_map = {
363
+ "Automatic Log Enabled": "bool",
364
+ "High Concurrency Enabled": "bool",
365
+ "Customize Compute Enabled": "bool",
366
+ }
367
+
368
+ _update_dataframe_datatypes(dataframe=df, column_map=column_map)
369
+
370
+ if return_dataframe:
371
+ return df
372
+ else:
373
+ return response.json()
374
+
375
+
376
+ @log
377
+ def update_spark_settings(
378
+ automatic_log_enabled: Optional[bool] = None,
379
+ high_concurrency_enabled: Optional[bool] = None,
380
+ customize_compute_enabled: Optional[bool] = None,
381
+ default_pool_name: Optional[str] = None,
382
+ max_node_count: Optional[int] = None,
383
+ max_executors: Optional[int] = None,
384
+ environment_name: Optional[str] = None,
385
+ runtime_version: Optional[str] = None,
386
+ workspace: Optional[str | UUID] = None,
387
+ ):
388
+ """
389
+ Updates the spark settings for a workspace.
390
+
391
+ This is a wrapper function for the following API: `Workspace Settings - Update Spark Settings <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings>`_.
392
+
393
+ Parameters
394
+ ----------
395
+ automatic_log_enabled : bool, default=None
396
+ The status of the `automatic log <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#automaticlogproperties>`_.
397
+ Defaults to None which keeps the existing property setting.
398
+ high_concurrency_enabled : bool, default=None
399
+ The status of the `high concurrency <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#highconcurrencyproperties>`_ for notebook interactive run.
400
+ Defaults to None which keeps the existing property setting.
401
+ customize_compute_enabled : bool, default=None
402
+ `Customize compute <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#poolproperties>`_ configurations for items.
403
+ Defaults to None which keeps the existing property setting.
404
+ default_pool_name : str, default=None
405
+ `Default pool <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#poolproperties>`_ for workspace.
406
+ Defaults to None which keeps the existing property setting.
407
+ max_node_count : int, default=None
408
+ The maximum node count.
409
+ Defaults to None which keeps the existing property setting.
410
+ max_executors : int, default=None
411
+ The maximum executors.
412
+ Defaults to None which keeps the existing property setting.
413
+ environment_name : str, default=None
414
+ The name of the `default environment <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#environmentproperties>`_. Empty string indicated there is no workspace default environment
415
+ Defaults to None which keeps the existing property setting.
416
+ runtime_version : str, default=None
417
+ The `runtime version <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#environmentproperties>`_.
418
+ Defaults to None which keeps the existing property setting.
419
+ workspace : str | uuid.UUID, default=None
420
+ The name or ID of the Fabric workspace.
421
+ Defaults to None which resolves to the workspace of the attached lakehouse
422
+ or if no lakehouse attached, resolves to the workspace of the notebook.
423
+ """
424
+
425
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
426
+
427
+ payload = get_spark_settings(workspace=workspace, return_dataframe=False)
428
+
429
+ if automatic_log_enabled is not None:
430
+ payload["automaticLog"]["enabled"] = automatic_log_enabled
431
+ if high_concurrency_enabled is not None:
432
+ payload["highConcurrency"][
433
+ "notebookInteractiveRunEnabled"
434
+ ] = high_concurrency_enabled
435
+ if customize_compute_enabled is not None:
436
+ payload["pool"]["customizeComputeEnabled"] = customize_compute_enabled
437
+ if default_pool_name is not None:
438
+ payload["pool"]["defaultPool"]["name"] = default_pool_name
439
+ if max_node_count is not None:
440
+ payload["pool"]["starterPool"]["maxNodeCount"] = max_node_count
441
+ if max_executors is not None:
442
+ payload["pool"]["starterPool"]["maxExecutors"] = max_executors
443
+ if environment_name is not None:
444
+ payload["environment"]["name"] = environment_name
445
+ if runtime_version is not None:
446
+ payload["environment"]["runtimeVersion"] = runtime_version
447
+
448
+ _base_api(
449
+ request=f"/v1/workspaces/{workspace_id}/spark/settings",
450
+ payload=payload,
451
+ method="patch",
452
+ )
453
+ print(
454
+ f"{icons.green_dot} The spark settings within the '{workspace_name}' workspace have been updated accordingly."
455
+ )
sempy_labs/_sql.py ADDED
@@ -0,0 +1,241 @@
1
+ import pandas as pd
2
+ from typing import Optional, Union, List
3
+ from sempy._utils._log import log
4
+ import struct
5
+ from itertools import chain, repeat
6
+ from sempy_labs._helper_functions import (
7
+ resolve_lakehouse_name_and_id,
8
+ resolve_item_name_and_id,
9
+ resolve_workspace_name_and_id,
10
+ _base_api,
11
+ )
12
+ from uuid import UUID
13
+
14
+
15
+ def _bytes2mswin_bstr(value: bytes) -> bytes:
16
+ """Convert a sequence of bytes into a (MS-Windows) BSTR (as bytes).
17
+
18
+ See https://github.com/mkleehammer/pyodbc/issues/228#issuecomment-319190980
19
+ for the original code. It appears the input is converted to an
20
+ MS-Windows BSTR (in 'Little-endian' format).
21
+
22
+ See https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-dtyp\
23
+ /692a42a9-06ce-4394-b9bc-5d2a50440168
24
+ for more info on BSTR.
25
+
26
+ :param value: the sequence of bytes to convert
27
+ :return: the converted value (as a sequence of bytes)
28
+ """
29
+
30
+ encoded_bytes = bytes(chain.from_iterable(zip(value, repeat(0))))
31
+ return struct.pack("<i", len(encoded_bytes)) + encoded_bytes
32
+
33
+
34
+ class ConnectBase:
35
+ def __init__(
36
+ self,
37
+ item: str | UUID,
38
+ workspace: Optional[Union[str, UUID]] = None,
39
+ timeout: Optional[int] = None,
40
+ endpoint_type: str = "warehouse",
41
+ ):
42
+ from sempy.fabric._credentials import get_access_token
43
+ import pyodbc
44
+
45
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
46
+
47
+ # Resolve the appropriate ID and name (warehouse or lakehouse)
48
+ if endpoint_type == "sqldatabase":
49
+ # SQLDatabase is has special case for resolving the name and id
50
+ (resource_name, resource_id) = resolve_item_name_and_id(
51
+ item=item, type="SQLDatabase", workspace=workspace_id
52
+ )
53
+ elif endpoint_type == "lakehouse":
54
+ (resource_name, resource_id) = resolve_lakehouse_name_and_id(
55
+ lakehouse=item,
56
+ workspace=workspace_id,
57
+ )
58
+ else:
59
+ (resource_name, resource_id) = resolve_item_name_and_id(
60
+ item=item, workspace=workspace_id, type=endpoint_type.capitalize()
61
+ )
62
+
63
+ endpoint_for_url = (
64
+ "sqlDatabases" if endpoint_type == "sqldatabase" else f"{endpoint_type}s"
65
+ )
66
+
67
+ # Get the TDS endpoint
68
+ response = _base_api(
69
+ request=f"v1/workspaces/{workspace_id}/{endpoint_for_url}/{resource_id}"
70
+ )
71
+
72
+ if endpoint_type == "warehouse":
73
+ tds_endpoint = response.json().get("properties", {}).get("connectionString")
74
+ elif endpoint_type == "sqldatabase":
75
+ tds_endpoint = response.json().get("properties", {}).get("serverFqdn")
76
+ else:
77
+ tds_endpoint = (
78
+ response.json()
79
+ .get("properties", {})
80
+ .get("sqlEndpointProperties", {})
81
+ .get("connectionString")
82
+ )
83
+
84
+ # Set up the connection string
85
+ access_token = get_access_token("sql").token
86
+ tokenstruct = _bytes2mswin_bstr(access_token.encode())
87
+ if endpoint_type == "sqldatabase":
88
+ conn_str = f"DRIVER={{ODBC Driver 18 for SQL Server}};SERVER={tds_endpoint};DATABASE={resource_name}-{resource_id};Encrypt=Yes;"
89
+ else:
90
+ conn_str = f"DRIVER={{ODBC Driver 18 for SQL Server}};SERVER={tds_endpoint};DATABASE={resource_name};Encrypt=Yes;"
91
+
92
+ if timeout is not None:
93
+ conn_str += f"Connect Timeout={timeout};"
94
+
95
+ self.connection = pyodbc.connect(conn_str, attrs_before={1256: tokenstruct})
96
+
97
+ @log
98
+ def query(
99
+ self, sql: Union[str, List[str]]
100
+ ) -> Union[List[pd.DataFrame], pd.DataFrame, None]:
101
+ """
102
+ Runs a SQL or T-SQL query (or multiple queries) against a Fabric Warehouse/Lakehouse.
103
+
104
+ Parameters
105
+ ----------
106
+ sql : str or List[str]
107
+ A single SQL or T-SQL query, or a list of queries to be executed.
108
+
109
+ Returns
110
+ -------
111
+ Union[List[pandas.DataFrame], pandas.DataFrame, None]
112
+ A list of pandas DataFrames if multiple SQL queries return results,
113
+ a single DataFrame if one query is executed and returns results, or None.
114
+ """
115
+
116
+ cursor = None
117
+ results = []
118
+
119
+ if isinstance(sql, str):
120
+ sql = [sql]
121
+
122
+ try:
123
+ cursor = self.connection.cursor()
124
+
125
+ for sql_query in sql:
126
+ cursor.execute(sql_query)
127
+
128
+ # Commit for non-select queries (like CREATE, INSERT, etc.)
129
+ if not cursor.description:
130
+ self.connection.commit()
131
+ else:
132
+ # Fetch and append results for queries that return a result set
133
+ result = pd.DataFrame.from_records(
134
+ cursor.fetchall(),
135
+ columns=[col[0] for col in cursor.description],
136
+ )
137
+ results.append(result)
138
+
139
+ # Return results if any queries returned a result set
140
+ return results if len(results) > 1 else (results[0] if results else None)
141
+
142
+ finally:
143
+ if cursor:
144
+ cursor.close()
145
+
146
+ def __enter__(self):
147
+ return self
148
+
149
+ def __exit__(self, type, value, traceback):
150
+ self.close()
151
+
152
+ def close(self):
153
+ self.connection.close()
154
+
155
+
156
+ class ConnectWarehouse(ConnectBase):
157
+ def __init__(
158
+ self,
159
+ warehouse: str | UUID,
160
+ workspace: Optional[Union[str, UUID]] = None,
161
+ timeout: int = 30,
162
+ ):
163
+ """
164
+ Run a SQL or T-SQL query against a Fabric Warehouse.
165
+
166
+ Parameters
167
+ ----------
168
+ warehouse : str | uuid.UUID
169
+ The name or ID of the Fabric warehouse.
170
+ workspace : str | uuid.UUID, default=None
171
+ The name or ID of the workspace.
172
+ Defaults to None which resolves to the workspace of the attached lakehouse
173
+ or if no lakehouse attached, resolves to the workspace of the notebook.
174
+ timeout : int, default=30
175
+ The timeout for the connection in seconds.
176
+ """
177
+ super().__init__(
178
+ item=warehouse,
179
+ workspace=workspace,
180
+ timeout=timeout,
181
+ endpoint_type="warehouse",
182
+ )
183
+
184
+
185
+ class ConnectLakehouse(ConnectBase):
186
+ def __init__(
187
+ self,
188
+ lakehouse: Optional[str | UUID] = None,
189
+ workspace: Optional[Union[str, UUID]] = None,
190
+ timeout: int = 30,
191
+ ):
192
+ """
193
+ Run a SQL or T-SQL query against a Fabric lakehouse.
194
+
195
+ Parameters
196
+ ----------
197
+ lakehouse : str | uuid.UUID, default=None
198
+ The name or ID of the Fabric lakehouse.
199
+ Defaults to None which resolves to the lakehouse attached to the notebook.
200
+ workspace : str | uuid.UUID, default=None
201
+ The name or ID of the workspace.
202
+ Defaults to None which resolves to the workspace of the attached lakehouse
203
+ or if no lakehouse attached, resolves to the workspace of the notebook.
204
+ timeout : int, default=30
205
+ The timeout for the connection in seconds.
206
+ """
207
+ super().__init__(
208
+ item=lakehouse,
209
+ workspace=workspace,
210
+ timeout=timeout,
211
+ endpoint_type="lakehouse",
212
+ )
213
+
214
+
215
+ class ConnectSQLDatabase(ConnectBase):
216
+ def __init__(
217
+ self,
218
+ sql_database: str | UUID,
219
+ workspace: Optional[Union[str, UUID]] = None,
220
+ timeout: int = 30,
221
+ ):
222
+ """
223
+ Run a SQL or T-SQL query against a Fabric SQL database.
224
+
225
+ Parameters
226
+ ----------
227
+ sql_database : str | uuid.UUID
228
+ The name or ID of the Fabric SQL database.
229
+ workspace : str | uuid.UUID, default=None
230
+ The name or ID of the workspace.
231
+ Defaults to None which resolves to the workspace of the attached lakehouse
232
+ or if no lakehouse attached, resolves to the workspace of the notebook.
233
+ timeout : int, default=30
234
+ The timeout for the connection in seconds.
235
+ """
236
+ super().__init__(
237
+ item=sql_database,
238
+ workspace=workspace,
239
+ timeout=timeout,
240
+ endpoint_type="sqldatabase",
241
+ )