semantic-link-labs 0.7.3__py3-none-any.whl → 0.7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (60) hide show
  1. {semantic_link_labs-0.7.3.dist-info → semantic_link_labs-0.7.4.dist-info}/METADATA +14 -3
  2. {semantic_link_labs-0.7.3.dist-info → semantic_link_labs-0.7.4.dist-info}/RECORD +60 -44
  3. {semantic_link_labs-0.7.3.dist-info → semantic_link_labs-0.7.4.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +63 -24
  5. sempy_labs/_bpa_translation/{_translations_am-ET.po → _model/_translations_am-ET.po} +22 -0
  6. sempy_labs/_bpa_translation/{_translations_ar-AE.po → _model/_translations_ar-AE.po} +24 -0
  7. sempy_labs/_bpa_translation/_model/_translations_bg-BG.po +938 -0
  8. sempy_labs/_bpa_translation/_model/_translations_ca-ES.po +934 -0
  9. sempy_labs/_bpa_translation/{_translations_cs-CZ.po → _model/_translations_cs-CZ.po} +179 -157
  10. sempy_labs/_bpa_translation/{_translations_da-DK.po → _model/_translations_da-DK.po} +24 -0
  11. sempy_labs/_bpa_translation/{_translations_de-DE.po → _model/_translations_de-DE.po} +77 -52
  12. sempy_labs/_bpa_translation/{_translations_el-GR.po → _model/_translations_el-GR.po} +25 -0
  13. sempy_labs/_bpa_translation/{_translations_es-ES.po → _model/_translations_es-ES.po} +67 -43
  14. sempy_labs/_bpa_translation/{_translations_fa-IR.po → _model/_translations_fa-IR.po} +24 -0
  15. sempy_labs/_bpa_translation/_model/_translations_fi-FI.po +915 -0
  16. sempy_labs/_bpa_translation/{_translations_fr-FR.po → _model/_translations_fr-FR.po} +83 -57
  17. sempy_labs/_bpa_translation/{_translations_ga-IE.po → _model/_translations_ga-IE.po} +25 -0
  18. sempy_labs/_bpa_translation/{_translations_he-IL.po → _model/_translations_he-IL.po} +23 -0
  19. sempy_labs/_bpa_translation/{_translations_hi-IN.po → _model/_translations_hi-IN.po} +24 -0
  20. sempy_labs/_bpa_translation/{_translations_hu-HU.po → _model/_translations_hu-HU.po} +25 -0
  21. sempy_labs/_bpa_translation/_model/_translations_id-ID.po +918 -0
  22. sempy_labs/_bpa_translation/{_translations_is-IS.po → _model/_translations_is-IS.po} +25 -0
  23. sempy_labs/_bpa_translation/{_translations_it-IT.po → _model/_translations_it-IT.po} +25 -0
  24. sempy_labs/_bpa_translation/{_translations_ja-JP.po → _model/_translations_ja-JP.po} +21 -0
  25. sempy_labs/_bpa_translation/_model/_translations_ko-KR.po +823 -0
  26. sempy_labs/_bpa_translation/_model/_translations_mt-MT.po +937 -0
  27. sempy_labs/_bpa_translation/{_translations_nl-NL.po → _model/_translations_nl-NL.po} +80 -56
  28. sempy_labs/_bpa_translation/{_translations_pl-PL.po → _model/_translations_pl-PL.po} +101 -76
  29. sempy_labs/_bpa_translation/{_translations_pt-BR.po → _model/_translations_pt-BR.po} +25 -0
  30. sempy_labs/_bpa_translation/{_translations_pt-PT.po → _model/_translations_pt-PT.po} +25 -0
  31. sempy_labs/_bpa_translation/_model/_translations_ro-RO.po +939 -0
  32. sempy_labs/_bpa_translation/{_translations_ru-RU.po → _model/_translations_ru-RU.po} +25 -0
  33. sempy_labs/_bpa_translation/_model/_translations_sk-SK.po +925 -0
  34. sempy_labs/_bpa_translation/_model/_translations_sl-SL.po +922 -0
  35. sempy_labs/_bpa_translation/{_translations_ta-IN.po → _model/_translations_ta-IN.po} +26 -0
  36. sempy_labs/_bpa_translation/{_translations_te-IN.po → _model/_translations_te-IN.po} +24 -0
  37. sempy_labs/_bpa_translation/{_translations_th-TH.po → _model/_translations_th-TH.po} +24 -0
  38. sempy_labs/_bpa_translation/_model/_translations_tr-TR.po +925 -0
  39. sempy_labs/_bpa_translation/_model/_translations_uk-UA.po +933 -0
  40. sempy_labs/_bpa_translation/{_translations_zh-CN.po → _model/_translations_zh-CN.po} +116 -97
  41. sempy_labs/_bpa_translation/{_translations_zu-ZA.po → _model/_translations_zu-ZA.po} +25 -0
  42. sempy_labs/_capacities.py +541 -0
  43. sempy_labs/_connections.py +138 -0
  44. sempy_labs/_environments.py +156 -0
  45. sempy_labs/_helper_functions.py +146 -8
  46. sempy_labs/_icons.py +43 -0
  47. sempy_labs/_list_functions.py +35 -900
  48. sempy_labs/_model_bpa.py +8 -32
  49. sempy_labs/_notebooks.py +143 -0
  50. sempy_labs/_query_scale_out.py +28 -7
  51. sempy_labs/_spark.py +465 -0
  52. sempy_labs/_sql.py +35 -11
  53. sempy_labs/_translations.py +3 -0
  54. sempy_labs/_vertipaq.py +160 -99
  55. sempy_labs/_workspaces.py +294 -0
  56. sempy_labs/directlake/_directlake_schema_sync.py +1 -2
  57. sempy_labs/tom/_model.py +5 -1
  58. {semantic_link_labs-0.7.3.dist-info → semantic_link_labs-0.7.4.dist-info}/LICENSE +0 -0
  59. {semantic_link_labs-0.7.3.dist-info → semantic_link_labs-0.7.4.dist-info}/top_level.txt +0 -0
  60. /sempy_labs/_bpa_translation/{_translations_sv-SE.po → _model/_translations_sv-SE.po} +0 -0
@@ -4,15 +4,12 @@ from sempy_labs._helper_functions import (
4
4
  create_relationship_name,
5
5
  resolve_lakehouse_id,
6
6
  resolve_dataset_id,
7
- _decode_b64,
8
7
  pagination,
9
8
  lro,
10
9
  resolve_item_type,
10
+ format_dax_object_name,
11
11
  )
12
12
  import pandas as pd
13
- import base64
14
- import requests
15
- from pyspark.sql import SparkSession
16
13
  from typing import Optional
17
14
  import sempy_labs._icons as icons
18
15
  from sempy.fabric.exceptions import FabricHTTPException
@@ -169,10 +166,19 @@ def list_tables(
169
166
  dataset=dataset,
170
167
  workspace=workspace,
171
168
  dax_string="""
172
- SELECT [DIMENSION_NAME],[DIMENSION_CARDINALITY] FROM $SYSTEM.MDSCHEMA_DIMENSIONS
169
+ SELECT [DIMENSION_NAME],[ROWS_COUNT] FROM $SYSTEM.DISCOVER_STORAGE_TABLES
170
+ WHERE RIGHT ( LEFT ( TABLE_ID, 2 ), 1 ) <> '$'
173
171
  """,
174
172
  )
175
173
 
174
+ model_size = (
175
+ dict_sum.sum()
176
+ + data_sum.sum()
177
+ + hier_sum.sum()
178
+ + rel_sum.sum()
179
+ + uh_sum.sum()
180
+ )
181
+
176
182
  rows = []
177
183
  for t in tom.model.Tables:
178
184
  t_name = t.Name
@@ -209,9 +215,7 @@ def list_tables(
209
215
  new_data.update(
210
216
  {
211
217
  "Row Count": (
212
- rc[rc["DIMENSION_NAME"] == t_name][
213
- "DIMENSION_CARDINALITY"
214
- ].iloc[0]
218
+ rc[rc["DIMENSION_NAME"] == t_name]["ROWS_COUNT"].iloc[0]
215
219
  if not rc.empty
216
220
  else 0
217
221
  ),
@@ -221,24 +225,33 @@ def list_tables(
221
225
  "Hierarchy Size": h_size,
222
226
  "Relationship Size": r_size,
223
227
  "User Hierarchy Size": u_size,
228
+ "Partitions": int(len(t.Partitions)),
229
+ "Columns": sum(
230
+ 1 for c in t.Columns if str(c.Type) != "RowNumber"
231
+ ),
232
+ "% DB": round((total_size / model_size) * 100, 2),
224
233
  }
225
234
  )
226
235
 
227
236
  rows.append(new_data)
228
237
 
229
- int_cols = [
230
- "Row Count",
231
- "Total Size",
232
- "Dictionary Size",
233
- "Data Size",
234
- "Hierarchy Size",
235
- "Relationship Size",
236
- "User Hierarchy Size",
237
- ]
238
- df[int_cols] = df[int_cols].astype(int)
239
-
240
238
  df = pd.DataFrame(rows)
241
239
 
240
+ if extended:
241
+ int_cols = [
242
+ "Row Count",
243
+ "Total Size",
244
+ "Dictionary Size",
245
+ "Data Size",
246
+ "Hierarchy Size",
247
+ "Relationship Size",
248
+ "User Hierarchy Size",
249
+ "Partitions",
250
+ "Columns",
251
+ ]
252
+ df[int_cols] = df[int_cols].astype(int)
253
+ df["% DB"] = df["% DB"].astype(float)
254
+
242
255
  return df
243
256
 
244
257
 
@@ -505,6 +518,7 @@ def list_columns(
505
518
  from sempy_labs.directlake._get_directlake_lakehouse import (
506
519
  get_direct_lake_lakehouse,
507
520
  )
521
+ from pyspark.sql import SparkSession
508
522
 
509
523
  workspace = fabric.resolve_workspace_name(workspace)
510
524
 
@@ -1274,6 +1288,8 @@ def list_relationships(
1274
1288
  workspace = fabric.resolve_workspace_name(workspace)
1275
1289
 
1276
1290
  dfR = fabric.list_relationships(dataset=dataset, workspace=workspace)
1291
+ dfR["From Object"] = format_dax_object_name(dfR["From Table"], dfR["From Column"])
1292
+ dfR["To Object"] = format_dax_object_name(dfR["To Table"], dfR["To Column"])
1277
1293
 
1278
1294
  if extended:
1279
1295
  # Used to map the Relationship IDs
@@ -1393,48 +1409,6 @@ def list_kpis(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
1393
1409
  return df
1394
1410
 
1395
1411
 
1396
- def list_workspace_role_assignments(workspace: Optional[str] = None) -> pd.DataFrame:
1397
- """
1398
- Shows the members of a given workspace.
1399
-
1400
- Parameters
1401
- ----------
1402
- workspace : str, default=None
1403
- The Fabric workspace name.
1404
- Defaults to None which resolves to the workspace of the attached lakehouse
1405
- or if no lakehouse attached, resolves to the workspace of the notebook.
1406
-
1407
- Returns
1408
- -------
1409
- pandas.DataFrame
1410
- A pandas dataframe showing the members of a given workspace and their roles.
1411
- """
1412
-
1413
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1414
-
1415
- df = pd.DataFrame(columns=["User Name", "User Email", "Role Name", "Type"])
1416
-
1417
- client = fabric.FabricRestClient()
1418
- response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments")
1419
- if response.status_code != 200:
1420
- raise FabricHTTPException(response)
1421
-
1422
- responses = pagination(client, response)
1423
-
1424
- for r in responses:
1425
- for i in r.get("value", []):
1426
- principal = i.get("principal", {})
1427
- new_data = {
1428
- "User Name": principal.get("displayName"),
1429
- "Role Name": i.get("role"),
1430
- "Type": principal.get("type"),
1431
- "User Email": principal.get("userDetails", {}).get("userPrincipalName"),
1432
- }
1433
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1434
-
1435
- return df
1436
-
1437
-
1438
1412
  def list_semantic_model_objects(
1439
1413
  dataset: str, workspace: Optional[str] = None
1440
1414
  ) -> pd.DataFrame:
@@ -1709,717 +1683,6 @@ def list_shortcuts(
1709
1683
  return df
1710
1684
 
1711
1685
 
1712
- def list_custom_pools(workspace: Optional[str] = None) -> pd.DataFrame:
1713
- """
1714
- Lists all `custom pools <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
1715
-
1716
- Parameters
1717
- ----------
1718
- workspace : str, default=None
1719
- The name of the Fabric workspace.
1720
- Defaults to None which resolves to the workspace of the attached lakehouse
1721
- or if no lakehouse attached, resolves to the workspace of the notebook.
1722
-
1723
- Returns
1724
- -------
1725
- pandas.DataFrame
1726
- A pandas dataframe showing all the custom pools within the Fabric workspace.
1727
- """
1728
-
1729
- # https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/list-workspace-custom-pools
1730
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1731
-
1732
- df = pd.DataFrame(
1733
- columns=[
1734
- "Custom Pool ID",
1735
- "Custom Pool Name",
1736
- "Type",
1737
- "Node Family",
1738
- "Node Size",
1739
- "Auto Scale Enabled",
1740
- "Auto Scale Min Node Count",
1741
- "Auto Scale Max Node Count",
1742
- "Dynamic Executor Allocation Enabled",
1743
- "Dynamic Executor Allocation Min Executors",
1744
- "Dynamic Executor Allocation Max Executors",
1745
- ]
1746
- )
1747
-
1748
- client = fabric.FabricRestClient()
1749
- response = client.get(f"/v1/workspaces/{workspace_id}/spark/pools")
1750
- if response.status_code != 200:
1751
- raise FabricHTTPException(response)
1752
-
1753
- for i in response.json()["value"]:
1754
-
1755
- aScale = i.get("autoScale", {})
1756
- d = i.get("dynamicExecutorAllocation", {})
1757
-
1758
- new_data = {
1759
- "Custom Pool ID": i.get("id"),
1760
- "Custom Pool Name": i.get("name"),
1761
- "Type": i.get("type"),
1762
- "Node Family": i.get("nodeFamily"),
1763
- "Node Size": i.get("nodeSize"),
1764
- "Auto Scale Enabled": aScale.get("enabled"),
1765
- "Auto Scale Min Node Count": aScale.get("minNodeCount"),
1766
- "Auto Scale Max Node Count": aScale.get("maxNodeCount"),
1767
- "Dynamic Executor Allocation Enabled": d.get("enabled"),
1768
- "Dynamic Executor Allocation Min Executors": d.get("minExecutors"),
1769
- "Dynamic Executor Allocation Max Executors": d.get("maxExecutors"),
1770
- }
1771
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1772
-
1773
- bool_cols = ["Auto Scale Enabled", "Dynamic Executor Allocation Enabled"]
1774
- int_cols = [
1775
- "Auto Scale Min Node Count",
1776
- "Auto Scale Max Node Count",
1777
- "Dynamic Executor Allocation Enabled",
1778
- "Dynamic Executor Allocation Min Executors",
1779
- "Dynamic Executor Allocation Max Executors",
1780
- ]
1781
-
1782
- df[bool_cols] = df[bool_cols].astype(bool)
1783
- df[int_cols] = df[int_cols].astype(int)
1784
-
1785
- return df
1786
-
1787
-
1788
- def create_custom_pool(
1789
- pool_name: str,
1790
- node_size: str,
1791
- min_node_count: int,
1792
- max_node_count: int,
1793
- min_executors: int,
1794
- max_executors: int,
1795
- node_family: Optional[str] = "MemoryOptimized",
1796
- auto_scale_enabled: Optional[bool] = True,
1797
- dynamic_executor_allocation_enabled: Optional[bool] = True,
1798
- workspace: Optional[str] = None,
1799
- ):
1800
- """
1801
- Creates a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
1802
-
1803
- Parameters
1804
- ----------
1805
- pool_name : str
1806
- The custom pool name.
1807
- node_size : str
1808
- The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
1809
- min_node_count : int
1810
- The `minimum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1811
- max_node_count : int
1812
- The `maximum node count <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1813
- min_executors : int
1814
- The `minimum executors <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1815
- max_executors : int
1816
- The `maximum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1817
- node_family : str, default='MemoryOptimized'
1818
- The `node family <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodefamily>`_.
1819
- auto_scale_enabled : bool, default=True
1820
- The status of `auto scale <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1821
- dynamic_executor_allocation_enabled : bool, default=True
1822
- The status of the `dynamic executor allocation <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1823
- workspace : str, default=None
1824
- The name of the Fabric workspace.
1825
- Defaults to None which resolves to the workspace of the attached lakehouse
1826
- or if no lakehouse attached, resolves to the workspace of the notebook.
1827
- """
1828
-
1829
- # https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool
1830
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1831
-
1832
- request_body = {
1833
- "name": pool_name,
1834
- "nodeFamily": node_family,
1835
- "nodeSize": node_size,
1836
- "autoScale": {
1837
- "enabled": auto_scale_enabled,
1838
- "minNodeCount": min_node_count,
1839
- "maxNodeCount": max_node_count,
1840
- },
1841
- "dynamicExecutorAllocation": {
1842
- "enabled": dynamic_executor_allocation_enabled,
1843
- "minExecutors": min_executors,
1844
- "maxExecutors": max_executors,
1845
- },
1846
- }
1847
-
1848
- client = fabric.FabricRestClient()
1849
- response = client.post(
1850
- f"/v1/workspaces/{workspace_id}/spark/pools", json=request_body
1851
- )
1852
-
1853
- if response.status_code != 201:
1854
- raise FabricHTTPException(response)
1855
- print(
1856
- f"{icons.green_dot} The '{pool_name}' spark pool has been created within the '{workspace}' workspace."
1857
- )
1858
-
1859
-
1860
- def update_custom_pool(
1861
- pool_name: str,
1862
- node_size: Optional[str] = None,
1863
- min_node_count: Optional[int] = None,
1864
- max_node_count: Optional[int] = None,
1865
- min_executors: Optional[int] = None,
1866
- max_executors: Optional[int] = None,
1867
- node_family: Optional[str] = None,
1868
- auto_scale_enabled: Optional[bool] = None,
1869
- dynamic_executor_allocation_enabled: Optional[bool] = None,
1870
- workspace: Optional[str] = None,
1871
- ):
1872
- """
1873
- Updates the properties of a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
1874
-
1875
- Parameters
1876
- ----------
1877
- pool_name : str
1878
- The custom pool name.
1879
- node_size : str, default=None
1880
- The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
1881
- Defaults to None which keeps the existing property setting.
1882
- min_node_count : int, default=None
1883
- The `minimum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1884
- Defaults to None which keeps the existing property setting.
1885
- max_node_count : int, default=None
1886
- The `maximum node count <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1887
- Defaults to None which keeps the existing property setting.
1888
- min_executors : int, default=None
1889
- The `minimum executors <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1890
- Defaults to None which keeps the existing property setting.
1891
- max_executors : int, default=None
1892
- The `maximum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1893
- Defaults to None which keeps the existing property setting.
1894
- node_family : str, default=None
1895
- The `node family <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodefamily>`_.
1896
- Defaults to None which keeps the existing property setting.
1897
- auto_scale_enabled : bool, default=None
1898
- The status of `auto scale <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1899
- Defaults to None which keeps the existing property setting.
1900
- dynamic_executor_allocation_enabled : bool, default=None
1901
- The status of the `dynamic executor allocation <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1902
- Defaults to None which keeps the existing property setting.
1903
- workspace : str, default=None
1904
- The name of the Fabric workspace.
1905
- Defaults to None which resolves to the workspace of the attached lakehouse
1906
- or if no lakehouse attached, resolves to the workspace of the notebook.
1907
- """
1908
-
1909
- # https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/update-workspace-custom-pool?tabs=HTTP
1910
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1911
-
1912
- df = list_custom_pools(workspace=workspace)
1913
- df_pool = df[df["Custom Pool Name"] == pool_name]
1914
-
1915
- if len(df_pool) == 0:
1916
- raise ValueError(
1917
- f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace}'. Please choose a valid custom pool."
1918
- )
1919
-
1920
- if node_family is None:
1921
- node_family = df_pool["Node Family"].iloc[0]
1922
- if node_size is None:
1923
- node_size = df_pool["Node Size"].iloc[0]
1924
- if auto_scale_enabled is None:
1925
- auto_scale_enabled = bool(df_pool["Auto Scale Enabled"].iloc[0])
1926
- if min_node_count is None:
1927
- min_node_count = int(df_pool["Min Node Count"].iloc[0])
1928
- if max_node_count is None:
1929
- max_node_count = int(df_pool["Max Node Count"].iloc[0])
1930
- if dynamic_executor_allocation_enabled is None:
1931
- dynamic_executor_allocation_enabled = bool(
1932
- df_pool["Dynami Executor Allocation Enabled"].iloc[0]
1933
- )
1934
- if min_executors is None:
1935
- min_executors = int(df_pool["Min Executors"].iloc[0])
1936
- if max_executors is None:
1937
- max_executors = int(df_pool["Max Executors"].iloc[0])
1938
-
1939
- request_body = {
1940
- "name": pool_name,
1941
- "nodeFamily": node_family,
1942
- "nodeSize": node_size,
1943
- "autoScale": {
1944
- "enabled": auto_scale_enabled,
1945
- "minNodeCount": min_node_count,
1946
- "maxNodeCount": max_node_count,
1947
- },
1948
- "dynamicExecutorAllocation": {
1949
- "enabled": dynamic_executor_allocation_enabled,
1950
- "minExecutors": min_executors,
1951
- "maxExecutors": max_executors,
1952
- },
1953
- }
1954
-
1955
- client = fabric.FabricRestClient()
1956
- response = client.post(
1957
- f"/v1/workspaces/{workspace_id}/spark/pools", json=request_body
1958
- )
1959
-
1960
- if response.status_code != 200:
1961
- raise FabricHTTPException(response)
1962
- print(
1963
- f"{icons.green_dot} The '{pool_name}' spark pool within the '{workspace}' workspace has been updated."
1964
- )
1965
-
1966
-
1967
- def delete_custom_pool(pool_name: str, workspace: Optional[str] = None):
1968
- """
1969
- Deletes a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
1970
-
1971
- Parameters
1972
- ----------
1973
- pool_name : str
1974
- The custom pool name.
1975
- workspace : str, default=None
1976
- The name of the Fabric workspace.
1977
- Defaults to None which resolves to the workspace of the attached lakehouse
1978
- or if no lakehouse attached, resolves to the workspace of the notebook.
1979
- """
1980
-
1981
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1982
-
1983
- dfL = list_custom_pools(workspace=workspace)
1984
- dfL_filt = dfL[dfL["Custom Pool Name"] == pool_name]
1985
-
1986
- if len(dfL_filt) == 0:
1987
- raise ValueError(
1988
- f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace}' workspace."
1989
- )
1990
- poolId = dfL_filt["Custom Pool ID"].iloc[0]
1991
-
1992
- client = fabric.FabricRestClient()
1993
- response = client.delete(f"/v1/workspaces/{workspace_id}/spark/pools/{poolId}")
1994
-
1995
- if response.status_code != 200:
1996
- raise FabricHTTPException(response)
1997
- print(
1998
- f"{icons.green_dot} The '{pool_name}' spark pool has been deleted from the '{workspace}' workspace."
1999
- )
2000
-
2001
-
2002
- def assign_workspace_to_capacity(capacity_name: str, workspace: Optional[str] = None):
2003
- """
2004
- Assigns a workspace to a capacity.
2005
-
2006
- Parameters
2007
- ----------
2008
- capacity_name : str
2009
- The name of the capacity.
2010
- workspace : str, default=None
2011
- The name of the Fabric workspace.
2012
- Defaults to None which resolves to the workspace of the attached lakehouse
2013
- or if no lakehouse attached, resolves to the workspace of the notebook.
2014
- """
2015
-
2016
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2017
-
2018
- dfC = fabric.list_capacities()
2019
- dfC_filt = dfC[dfC["Display Name"] == capacity_name]
2020
-
2021
- if len(dfC_filt) == 0:
2022
- raise ValueError(f"{icons.red_dot} The '{capacity_name}' capacity does not exist.")
2023
-
2024
- capacity_id = dfC_filt["Id"].iloc[0]
2025
-
2026
- request_body = {"capacityId": capacity_id}
2027
-
2028
- client = fabric.FabricRestClient()
2029
- response = client.post(
2030
- f"/v1/workspaces/{workspace_id}/assignToCapacity",
2031
- json=request_body,
2032
- )
2033
-
2034
- if response.status_code not in [200, 202]:
2035
- raise FabricHTTPException(response)
2036
- print(
2037
- f"{icons.green_dot} The '{workspace}' workspace has been assigned to the '{capacity_name}' capacity."
2038
- )
2039
-
2040
-
2041
- def unassign_workspace_from_capacity(workspace: Optional[str] = None):
2042
- """
2043
- Unassigns a workspace from its assigned capacity.
2044
-
2045
- Parameters
2046
- ----------
2047
- workspace : str, default=None
2048
- The name of the Fabric workspace.
2049
- Defaults to None which resolves to the workspace of the attached lakehouse
2050
- or if no lakehouse attached, resolves to the workspace of the notebook.
2051
- """
2052
-
2053
- # https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/unassign-from-capacity?tabs=HTTP
2054
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2055
-
2056
- client = fabric.FabricRestClient()
2057
- response = client.post(
2058
- f"/v1/workspaces/{workspace_id}/unassignFromCapacity"
2059
- )
2060
-
2061
- if response.status_code not in [200, 202]:
2062
- raise FabricHTTPException(response)
2063
- print(
2064
- f"{icons.green_dot} The '{workspace}' workspace has been unassigned from its capacity."
2065
- )
2066
-
2067
-
2068
- def get_spark_settings(workspace: Optional[str] = None) -> pd.DataFrame:
2069
- """
2070
- Shows the spark settings for a workspace.
2071
-
2072
- Parameters
2073
- ----------
2074
- workspace : str, default=None
2075
- The name of the Fabric workspace.
2076
- Defaults to None which resolves to the workspace of the attached lakehouse
2077
- or if no lakehouse attached, resolves to the workspace of the notebook.
2078
-
2079
- Returns
2080
- -------
2081
- pandas.DataFrame
2082
- A pandas dataframe showing the spark settings for a workspace.
2083
- """
2084
-
2085
- # https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/get-spark-settings?tabs=HTTP
2086
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2087
-
2088
- df = pd.DataFrame(
2089
- columns=[
2090
- "Automatic Log Enabled",
2091
- "High Concurrency Enabled",
2092
- "Customize Compute Enabled",
2093
- "Default Pool Name",
2094
- "Default Pool Type",
2095
- "Max Node Count",
2096
- "Max Executors",
2097
- "Environment Name",
2098
- "Runtime Version",
2099
- ]
2100
- )
2101
-
2102
- client = fabric.FabricRestClient()
2103
- response = client.get(f"/v1/workspaces/{workspace_id}/spark/settings")
2104
- if response.status_code != 200:
2105
- raise FabricHTTPException(response)
2106
-
2107
- i = response.json()
2108
- p = i.get("pool")
2109
- dp = i.get("pool", {}).get("defaultPool", {})
2110
- sp = i.get("pool", {}).get("starterPool", {})
2111
- e = i.get("environment", {})
2112
-
2113
- new_data = {
2114
- "Automatic Log Enabled": i.get("automaticLog").get("enabled"),
2115
- "High Concurrency Enabled": i.get("highConcurrency").get(
2116
- "notebookInteractiveRunEnabled"
2117
- ),
2118
- "Customize Compute Enabled": p.get("customizeComputeEnabled"),
2119
- "Default Pool Name": dp.get("name"),
2120
- "Default Pool Type": dp.get("type"),
2121
- "Max Node Count": sp.get("maxNodeCount"),
2122
- "Max Node Executors": sp.get("maxExecutors"),
2123
- "Environment Name": e.get("name"),
2124
- "Runtime Version": e.get("runtimeVersion"),
2125
- }
2126
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
2127
-
2128
- bool_cols = [
2129
- "Automatic Log Enabled",
2130
- "High Concurrency Enabled",
2131
- "Customize Compute Enabled",
2132
- ]
2133
- int_cols = ["Max Node Count", "Max Executors"]
2134
-
2135
- df[bool_cols] = df[bool_cols].astype(bool)
2136
- df[int_cols] = df[int_cols].astype(int)
2137
-
2138
- return df
2139
-
2140
-
2141
- def update_spark_settings(
2142
- automatic_log_enabled: Optional[bool] = None,
2143
- high_concurrency_enabled: Optional[bool] = None,
2144
- customize_compute_enabled: Optional[bool] = None,
2145
- default_pool_name: Optional[str] = None,
2146
- max_node_count: Optional[int] = None,
2147
- max_executors: Optional[int] = None,
2148
- environment_name: Optional[str] = None,
2149
- runtime_version: Optional[str] = None,
2150
- workspace: Optional[str] = None,
2151
- ):
2152
- """
2153
- Updates the spark settings for a workspace.
2154
-
2155
- Parameters
2156
- ----------
2157
- automatic_log_enabled : bool, default=None
2158
- The status of the `automatic log <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#automaticlogproperties>`_.
2159
- Defaults to None which keeps the existing property setting.
2160
- high_concurrency_enabled : bool, default=None
2161
- The status of the `high concurrency <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#highconcurrencyproperties>`_ for notebook interactive run.
2162
- Defaults to None which keeps the existing property setting.
2163
- customize_compute_enabled : bool, default=None
2164
- `Customize compute <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#poolproperties>`_ configurations for items.
2165
- Defaults to None which keeps the existing property setting.
2166
- default_pool_name : str, default=None
2167
- `Default pool <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#poolproperties>`_ for workspace.
2168
- Defaults to None which keeps the existing property setting.
2169
- max_node_count : int, default=None
2170
- The `maximum node count <https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#starterpoolproperties>`_.
2171
- Defaults to None which keeps the existing property setting.
2172
- max_executors : int, default=None
2173
- The `maximum executors <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#starterpoolproperties>`_.
2174
- Defaults to None which keeps the existing property setting.
2175
- environment_name : str, default=None
2176
- The name of the `default environment <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#environmentproperties>`_. Empty string indicated there is no workspace default environment
2177
- Defaults to None which keeps the existing property setting.
2178
- runtime_version : str, default=None
2179
- The `runtime version <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#environmentproperties>`_.
2180
- Defaults to None which keeps the existing property setting.
2181
- workspace : str, default=None
2182
- The name of the Fabric workspace.
2183
- Defaults to None which resolves to the workspace of the attached lakehouse
2184
- or if no lakehouse attached, resolves to the workspace of the notebook.
2185
-
2186
- Returns
2187
- -------
2188
- """
2189
-
2190
- # https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP
2191
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2192
-
2193
- dfS = get_spark_settings(workspace=workspace)
2194
-
2195
- if automatic_log_enabled is None:
2196
- automatic_log_enabled = bool(dfS["Automatic Log Enabled"].iloc[0])
2197
- if high_concurrency_enabled is None:
2198
- high_concurrency_enabled = bool(dfS["High Concurrency Enabled"].iloc[0])
2199
- if customize_compute_enabled is None:
2200
- customize_compute_enabled = bool(dfS["Customize Compute Enabled"].iloc[0])
2201
- if default_pool_name is None:
2202
- default_pool_name = dfS["Default Pool Name"].iloc[0]
2203
- if max_node_count is None:
2204
- max_node_count = int(dfS["Max Node Count"].iloc[0])
2205
- if max_executors is None:
2206
- max_executors = int(dfS["Max Executors"].iloc[0])
2207
- if environment_name is None:
2208
- environment_name = dfS["Environment Name"].iloc[0]
2209
- if runtime_version is None:
2210
- runtime_version = dfS["Runtime Version"].iloc[0]
2211
-
2212
- request_body = {
2213
- "automaticLog": {"enabled": automatic_log_enabled},
2214
- "highConcurrency": {"notebookInteractiveRunEnabled": high_concurrency_enabled},
2215
- "pool": {
2216
- "customizeComputeEnabled": customize_compute_enabled,
2217
- "defaultPool": {"name": default_pool_name, "type": "Workspace"},
2218
- "starterPool": {
2219
- "maxNodeCount": max_node_count,
2220
- "maxExecutors": max_executors,
2221
- },
2222
- },
2223
- "environment": {"name": environment_name, "runtimeVersion": runtime_version},
2224
- }
2225
-
2226
- client = fabric.FabricRestClient()
2227
- response = client.patch(
2228
- f"/v1/workspaces/{workspace_id}/spark/settings", json=request_body
2229
- )
2230
-
2231
- if response.status_code != 200:
2232
- raise FabricHTTPException(response)
2233
- print(
2234
- f"{icons.green_dot} The spark settings within the '{workspace}' workspace have been updated accordingly."
2235
- )
2236
-
2237
-
2238
- def add_user_to_workspace(
2239
- email_address: str,
2240
- role_name: str,
2241
- principal_type: Optional[str] = "User",
2242
- workspace: Optional[str] = None,
2243
- ):
2244
- """
2245
- Adds a user to a workspace.
2246
-
2247
- Parameters
2248
- ----------
2249
- email_address : str
2250
- The email address of the user.
2251
- role_name : str
2252
- The `role <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#groupuseraccessright>`_ of the user within the workspace.
2253
- principal_type : str, default='User'
2254
- The `principal type <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#principaltype>`_.
2255
- workspace : str, default=None
2256
- The name of the workspace.
2257
- Defaults to None which resolves to the workspace of the attached lakehouse
2258
- or if no lakehouse attached, resolves to the workspace of the notebook.
2259
- """
2260
-
2261
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2262
-
2263
- role_names = ["Admin", "Member", "Viewer", "Contributor"]
2264
- role_name = role_name.capitalize()
2265
- if role_name not in role_names:
2266
- raise ValueError(
2267
- f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}."
2268
- )
2269
- plural = "n" if role_name == "Admin" else ""
2270
- principal_types = ["App", "Group", "None", "User"]
2271
- principal_type = principal_type.capitalize()
2272
- if principal_type not in principal_types:
2273
- raise ValueError(
2274
- f"{icons.red_dot} Invalid princpal type. Valid options: {principal_types}."
2275
- )
2276
-
2277
- client = fabric.PowerBIRestClient()
2278
-
2279
- request_body = {
2280
- "emailAddress": email_address,
2281
- "groupUserAccessRight": role_name,
2282
- "principalType": principal_type,
2283
- "identifier": email_address,
2284
- }
2285
-
2286
- response = client.post(
2287
- f"/v1.0/myorg/groups/{workspace_id}/users", json=request_body
2288
- )
2289
-
2290
- if response.status_code != 200:
2291
- raise FabricHTTPException(response)
2292
- print(
2293
- f"{icons.green_dot} The '{email_address}' user has been added as a{plural} '{role_name}' within the '{workspace}' workspace."
2294
- )
2295
-
2296
-
2297
- def delete_user_from_workspace(email_address: str, workspace: Optional[str] = None):
2298
- """
2299
- Removes a user from a workspace.
2300
-
2301
- Parameters
2302
- ----------
2303
- email_address : str
2304
- The email address of the user.
2305
- workspace : str, default=None
2306
- The name of the workspace.
2307
- Defaults to None which resolves to the workspace of the attached lakehouse
2308
- or if no lakehouse attached, resolves to the workspace of the notebook.
2309
-
2310
- Returns
2311
- -------
2312
- """
2313
-
2314
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2315
-
2316
- client = fabric.PowerBIRestClient()
2317
- response = client.delete(f"/v1.0/myorg/groups/{workspace_id}/users/{email_address}")
2318
-
2319
- if response.status_code != 200:
2320
- raise FabricHTTPException(response)
2321
- print(
2322
- f"{icons.green_dot} The '{email_address}' user has been removed from accessing the '{workspace}' workspace."
2323
- )
2324
-
2325
-
2326
- def update_workspace_user(
2327
- email_address: str,
2328
- role_name: str,
2329
- principal_type: Optional[str] = "User",
2330
- workspace: Optional[str] = None,
2331
- ):
2332
- """
2333
- Updates a user's role within a workspace.
2334
-
2335
- Parameters
2336
- ----------
2337
- email_address : str
2338
- The email address of the user.
2339
- role_name : str
2340
- The `role <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#groupuseraccessright>`_ of the user within the workspace.
2341
- principal_type : str, default='User'
2342
- The `principal type <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#principaltype>`_.
2343
- workspace : str, default=None
2344
- The name of the workspace.
2345
- Defaults to None which resolves to the workspace of the attached lakehouse
2346
- or if no lakehouse attached, resolves to the workspace of the notebook.
2347
- """
2348
-
2349
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2350
-
2351
- role_names = ["Admin", "Member", "Viewer", "Contributor"]
2352
- role_name = role_name.capitalize()
2353
- if role_name not in role_names:
2354
- raise ValueError(
2355
- f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}."
2356
- )
2357
- principal_types = ["App", "Group", "None", "User"]
2358
- principal_type = principal_type.capitalize()
2359
- if principal_type not in principal_types:
2360
- raise ValueError(
2361
- f"{icons.red_dot} Invalid princpal type. Valid options: {principal_types}."
2362
- )
2363
-
2364
- request_body = {
2365
- "emailAddress": email_address,
2366
- "groupUserAccessRight": role_name,
2367
- "principalType": principal_type,
2368
- "identifier": email_address,
2369
- }
2370
-
2371
- client = fabric.PowerBIRestClient()
2372
- response = client.put(f"/v1.0/myorg/groups/{workspace_id}/users", json=request_body)
2373
-
2374
- if response.status_code != 200:
2375
- raise FabricHTTPException(response)
2376
- print(
2377
- f"{icons.green_dot} The '{email_address}' user has been updated to a '{role_name}' within the '{workspace}' workspace."
2378
- )
2379
-
2380
-
2381
- def list_workspace_users(workspace: Optional[str] = None) -> pd.DataFrame:
2382
- """
2383
- A list of all the users of a workspace and their roles.
2384
-
2385
- Parameters
2386
- ----------
2387
- workspace : str, default=None
2388
- The name of the workspace.
2389
- Defaults to None which resolves to the workspace of the attached lakehouse
2390
- or if no lakehouse attached, resolves to the workspace of the notebook.
2391
-
2392
- Returns
2393
- -------
2394
- pandas.DataFrame
2395
- A pandas dataframe the users of a workspace and their properties.
2396
- """
2397
-
2398
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2399
-
2400
- df = pd.DataFrame(columns=["User Name", "Email Address", "Role", "Type", "User ID"])
2401
- client = fabric.FabricRestClient()
2402
- response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments")
2403
- if response.status_code != 200:
2404
- raise FabricHTTPException(response)
2405
-
2406
- responses = pagination(client, response)
2407
-
2408
- for r in responses:
2409
- for v in r.get("value", []):
2410
- p = v.get("principal", {})
2411
- new_data = {
2412
- "User Name": p.get("displayName"),
2413
- "User ID": p.get("id"),
2414
- "Type": p.get("type"),
2415
- "Role": v.get("role"),
2416
- "Email Address": p.get("userDetails", {}).get("userPrincipalName"),
2417
- }
2418
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
2419
-
2420
- return df
2421
-
2422
-
2423
1686
  def list_capacities() -> pd.DataFrame:
2424
1687
  """
2425
1688
  Shows the capacities and their properties.
@@ -2456,134 +1719,6 @@ def list_capacities() -> pd.DataFrame:
2456
1719
  return df
2457
1720
 
2458
1721
 
2459
- def get_notebook_definition(
2460
- notebook_name: str, workspace: Optional[str] = None, decode: Optional[bool] = True
2461
- ):
2462
- """
2463
- Obtains the notebook definition.
2464
-
2465
- Parameters
2466
- ----------
2467
- notebook_name : str
2468
- The name of the notebook.
2469
- workspace : str, default=None
2470
- The name of the workspace.
2471
- Defaults to None which resolves to the workspace of the attached lakehouse
2472
- or if no lakehouse attached, resolves to the workspace of the notebook.
2473
- decode : bool, default=True
2474
- If True, decodes the notebook definition file into .ipynb format.
2475
- If False, obtains the notebook definition file in base64 format.
2476
-
2477
- Returns
2478
- -------
2479
- ipynb
2480
- The notebook definition.
2481
- """
2482
-
2483
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2484
-
2485
- dfI = fabric.list_items(workspace=workspace, type="Notebook")
2486
- dfI_filt = dfI[dfI["Display Name"] == notebook_name]
2487
-
2488
- if len(dfI_filt) == 0:
2489
- raise ValueError(
2490
- f"{icons.red_dot} The '{notebook_name}' notebook does not exist within the '{workspace}' workspace."
2491
- )
2492
-
2493
- notebook_id = dfI_filt["Id"].iloc[0]
2494
- client = fabric.FabricRestClient()
2495
- response = client.post(
2496
- f"v1/workspaces/{workspace_id}/notebooks/{notebook_id}/getDefinition",
2497
- )
2498
-
2499
- result = lro(client, response).json()
2500
- df_items = pd.json_normalize(result["definition"]["parts"])
2501
- df_items_filt = df_items[df_items["path"] == "notebook-content.py"]
2502
- payload = df_items_filt["payload"].iloc[0]
2503
-
2504
- if decode:
2505
- result = _decode_b64(payload)
2506
- else:
2507
- result = payload
2508
-
2509
- return result
2510
-
2511
-
2512
- def import_notebook_from_web(
2513
- notebook_name: str,
2514
- url: str,
2515
- description: Optional[str] = None,
2516
- workspace: Optional[str] = None,
2517
- ):
2518
- """
2519
- Creates a new notebook within a workspace based on a Jupyter notebook hosted in the web.
2520
-
2521
- Parameters
2522
- ----------
2523
- notebook_name : str
2524
- The name of the notebook to be created.
2525
- url : str
2526
- The url of the Jupyter Notebook (.ipynb)
2527
- description : str, default=None
2528
- The description of the notebook.
2529
- Defaults to None which does not place a description.
2530
- workspace : str, default=None
2531
- The name of the workspace.
2532
- Defaults to None which resolves to the workspace of the attached lakehouse
2533
- or if no lakehouse attached, resolves to the workspace of the notebook.
2534
-
2535
- Returns
2536
- -------
2537
- """
2538
-
2539
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2540
- client = fabric.FabricRestClient()
2541
- dfI = fabric.list_items(workspace=workspace, type="Notebook")
2542
- dfI_filt = dfI[dfI["Display Name"] == notebook_name]
2543
- if len(dfI_filt) > 0:
2544
- raise ValueError(
2545
- f"{icons.red_dot} The '{notebook_name}' already exists within the '{workspace}' workspace."
2546
- )
2547
-
2548
- # Fix links to go to the raw github file
2549
- starting_text = "https://github.com/"
2550
- starting_text_len = len(starting_text)
2551
- if url.startswith(starting_text):
2552
- url = f"https://raw.githubusercontent.com/{url[starting_text_len:]}".replace(
2553
- "/blob/", "/"
2554
- )
2555
-
2556
- response = requests.get(url)
2557
- if response.status_code != 200:
2558
- raise FabricHTTPException(response)
2559
- file_content = response.content
2560
- notebook_payload = base64.b64encode(file_content)
2561
-
2562
- request_body = {
2563
- "displayName": notebook_name,
2564
- "definition": {
2565
- "format": "ipynb",
2566
- "parts": [
2567
- {
2568
- "path": "notebook-content.py",
2569
- "payload": notebook_payload,
2570
- "payloadType": "InlineBase64",
2571
- }
2572
- ],
2573
- },
2574
- }
2575
- if description is not None:
2576
- request_body["description"] = description
2577
-
2578
- response = client.post(f"v1/workspaces/{workspace_id}/notebooks", json=request_body)
2579
-
2580
- lro(client, response, status_codes=[201, 202])
2581
-
2582
- print(
2583
- f"{icons.green_dot} The '{notebook_name}' notebook was created within the '{workspace}' workspace."
2584
- )
2585
-
2586
-
2587
1722
  def list_reports_using_semantic_model(
2588
1723
  dataset: str, workspace: Optional[str] = None
2589
1724
  ) -> pd.DataFrame: