semantic-link-labs 0.12.2__py3-none-any.whl → 0.12.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (40) hide show
  1. {semantic_link_labs-0.12.2.dist-info → semantic_link_labs-0.12.4.dist-info}/METADATA +5 -3
  2. {semantic_link_labs-0.12.2.dist-info → semantic_link_labs-0.12.4.dist-info}/RECORD +39 -31
  3. sempy_labs/__init__.py +18 -10
  4. sempy_labs/_a_lib_info.py +1 -1
  5. sempy_labs/_authentication.py +1 -1
  6. sempy_labs/_capacities.py +1 -1
  7. sempy_labs/_generate_semantic_model.py +2 -2
  8. sempy_labs/_get_connection_string.py +84 -0
  9. sempy_labs/_git.py +1 -1
  10. sempy_labs/_helper_functions.py +28 -4
  11. sempy_labs/_list_functions.py +55 -5
  12. sempy_labs/_managed_private_endpoints.py +1 -1
  13. sempy_labs/_notebooks.py +4 -2
  14. sempy_labs/_semantic_models.py +118 -0
  15. sempy_labs/_sql_audit_settings.py +208 -0
  16. sempy_labs/_sql_endpoints.py +27 -24
  17. sempy_labs/_utils.py +2 -0
  18. sempy_labs/_warehouses.py +1 -56
  19. sempy_labs/admin/__init__.py +6 -0
  20. sempy_labs/admin/_items.py +3 -3
  21. sempy_labs/admin/_labels.py +211 -0
  22. sempy_labs/directlake/_warm_cache.py +3 -1
  23. sempy_labs/eventstream/__init__.py +37 -0
  24. sempy_labs/eventstream/_items.py +263 -0
  25. sempy_labs/eventstream/_topology.py +652 -0
  26. sempy_labs/graph/__init__.py +12 -0
  27. sempy_labs/graph/_groups.py +60 -53
  28. sempy_labs/graph/_sensitivity_labels.py +120 -0
  29. sempy_labs/graph/_teams.py +19 -18
  30. sempy_labs/graph/_user_licenses.py +96 -0
  31. sempy_labs/graph/_users.py +23 -16
  32. sempy_labs/lakehouse/_get_lakehouse_tables.py +33 -1
  33. sempy_labs/lakehouse/_lakehouse.py +6 -2
  34. sempy_labs/lakehouse/_partitioning.py +165 -0
  35. sempy_labs/report/_reportwrapper.py +15 -5
  36. sempy_labs/tom/_model.py +111 -16
  37. sempy_labs/_eventstreams.py +0 -123
  38. {semantic_link_labs-0.12.2.dist-info → semantic_link_labs-0.12.4.dist-info}/WHEEL +0 -0
  39. {semantic_link_labs-0.12.2.dist-info → semantic_link_labs-0.12.4.dist-info}/licenses/LICENSE +0 -0
  40. {semantic_link_labs-0.12.2.dist-info → semantic_link_labs-0.12.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,165 @@
1
+ from typing import Optional, List
2
+ from uuid import UUID
3
+ from sempy_labs._helper_functions import (
4
+ _create_spark_session,
5
+ create_abfss_path,
6
+ resolve_workspace_id,
7
+ resolve_lakehouse_id,
8
+ _get_delta_table,
9
+ )
10
+ from sempy._utils._log import log
11
+
12
+
13
+ @log
14
+ def _get_partitions(
15
+ table_name: str,
16
+ schema_name: Optional[str] = None,
17
+ lakehouse: Optional[str | UUID] = None,
18
+ workspace: Optional[str | UUID] = None,
19
+ ):
20
+
21
+ workspace_id = resolve_workspace_id(workspace)
22
+ lakehouse_id = resolve_lakehouse_id(lakehouse, workspace)
23
+ path = create_abfss_path(lakehouse_id, workspace_id, table_name, schema_name)
24
+
25
+ delta_table = _get_delta_table(path)
26
+ details_df = delta_table.detail()
27
+
28
+ return details_df.collect()[0].asDict()
29
+
30
+
31
+ @log
32
+ def is_partitioned(
33
+ table: str,
34
+ schema: Optional[str] = None,
35
+ lakehouse: Optional[str | UUID] = None,
36
+ workspace: Optional[str | UUID] = None,
37
+ ) -> bool:
38
+ """
39
+ Checks if a delta table is partitioned.
40
+
41
+ Parameters
42
+ ----------
43
+ table : str
44
+ The name of the delta table.
45
+ schema : str, optional
46
+ The schema of the table to check. If not provided, the default schema is used.
47
+ lakehouse : str | uuid.UUID, default=None
48
+ The Fabric lakehouse name or ID.
49
+ Defaults to None which resolves to the lakehouse attached to the notebook.
50
+ workspace : str | uuid.UUID, default=None
51
+ The Fabric workspace name or ID used by the lakehouse.
52
+ Defaults to None which resolves to the workspace of the attached lakehouse
53
+ or if no lakehouse attached, resolves to the workspace of the notebook.
54
+
55
+ Returns
56
+ -------
57
+ bool
58
+ True if the table is partitioned, False otherwise.
59
+ """
60
+
61
+ details = _get_partitions(
62
+ table_name=table, schema_name=schema, lakehouse=lakehouse, workspace=workspace
63
+ )
64
+ return len(details["partitionColumns"]) > 0
65
+
66
+
67
+ @log
68
+ def list_partitioned_columns(
69
+ table: str,
70
+ schema: Optional[str] = None,
71
+ lakehouse: Optional[str | UUID] = None,
72
+ workspace: Optional[str | UUID] = None,
73
+ ) -> List[str]:
74
+ """
75
+ Lists the partitioned columns of a delta table.
76
+
77
+ Parameters
78
+ ----------
79
+ table : str
80
+ The name of the delta table.
81
+ schema : str, optional
82
+ The schema of the table to check. If not provided, the default schema is used.
83
+ lakehouse : str | uuid.UUID, default=None
84
+ The Fabric lakehouse name or ID.
85
+ Defaults to None which resolves to the lakehouse attached to the notebook.
86
+ workspace : str | uuid.UUID, default=None
87
+ The Fabric workspace name or ID used by the lakehouse.
88
+ Defaults to None which resolves to the workspace of the attached lakehouse
89
+ or if no lakehouse attached, resolves to the workspace of the notebook.
90
+
91
+ Returns
92
+ -------
93
+ List[str]
94
+ The list of partitioned columns.
95
+ """
96
+
97
+ details = _get_partitions(
98
+ table_name=table, schema_name=schema, lakehouse=lakehouse, workspace=workspace
99
+ )
100
+
101
+ return details["partitionColumns"]
102
+
103
+
104
+ @log
105
+ def is_over_partitioned(
106
+ table: str,
107
+ schema: Optional[str] = None,
108
+ lakehouse: Optional[str | UUID] = None,
109
+ workspace: Optional[str | UUID] = None,
110
+ total_table_size_gb: int = 1000,
111
+ average_partition_size_gb: int = 1,
112
+ ) -> bool:
113
+ """
114
+ Checks if a delta table is over-partitioned.
115
+
116
+ Parameters
117
+ ----------
118
+ table : str
119
+ The name of the delta table.
120
+ schema : str, optional
121
+ The schema of the table to check. If not provided, the default schema is used.
122
+ lakehouse : str | uuid.UUID, default=None
123
+ The Fabric lakehouse name or ID.
124
+ Defaults to None which resolves to the lakehouse attached to the notebook.
125
+ workspace : str | uuid.UUID, default=None
126
+ The Fabric workspace name or ID used by the lakehouse.
127
+ Defaults to None which resolves to the workspace of the attached lakehouse
128
+ or if no lakehouse attached, resolves to the workspace of the notebook.
129
+ total_table_size_gb : int, default=1000
130
+ Threshold for total table size in GB (default 1TB).
131
+ average_partition_size_gb : int, default=1
132
+ Threshold for average partition size in GB.
133
+
134
+ Returns
135
+ -------
136
+ bool
137
+ True if the table is over-partitioned, False otherwise.
138
+ """
139
+
140
+ workspace_id = resolve_workspace_id(workspace)
141
+ lakehouse_id = resolve_lakehouse_id(lakehouse, workspace)
142
+ path = create_abfss_path(lakehouse_id, workspace_id, table, schema)
143
+ # Get DeltaTable details
144
+ spark = _create_spark_session()
145
+ details_df = spark.sql(f"DESCRIBE DETAIL delta.`{path}`")
146
+ details = details_df.collect()[0].asDict()
147
+
148
+ # Extract relevant fields
149
+ size_bytes = details["sizeInBytes"]
150
+ partition_cols = details["partitionColumns"]
151
+ num_files = details["numFiles"]
152
+
153
+ total_size_gb = size_bytes / (1024**3)
154
+
155
+ # Only check if the table is partitioned
156
+ if len(partition_cols) > 0 and num_files > 0:
157
+ avg_partition_size_gb = total_size_gb / num_files
158
+
159
+ if (
160
+ total_size_gb < total_table_size_gb
161
+ or avg_partition_size_gb < average_partition_size_gb
162
+ ):
163
+ return True
164
+
165
+ return False
@@ -1886,7 +1886,6 @@ class ReportWrapper:
1886
1886
  )
1887
1887
 
1888
1888
  self._ensure_pbir()
1889
- theme_version = "5.6.4"
1890
1889
 
1891
1890
  # Extract theme_json from theme_file_path
1892
1891
  if theme_file_path:
@@ -1912,14 +1911,25 @@ class ReportWrapper:
1912
1911
  theme_name_full = f"{theme_name}.json"
1913
1912
 
1914
1913
  # Add theme.json file
1915
- self.add(
1916
- file_path=f"StaticResources/RegisteredResources/{theme_name_full}",
1917
- payload=theme_json,
1914
+ try:
1915
+ self.add(
1916
+ file_path=f"StaticResources/RegisteredResources/{theme_name_full}",
1917
+ payload=theme_json,
1918
+ )
1919
+ except Exception:
1920
+ self.update(
1921
+ file_path=f"StaticResources/RegisteredResources/{theme_name_full}",
1922
+ payload=theme_json,
1923
+ )
1924
+
1925
+ rpt_version_at_import = self.get(
1926
+ file_path=self._report_file_path,
1927
+ json_path="$.themeCollection.baseTheme.reportVersionAtImport",
1918
1928
  )
1919
1929
 
1920
1930
  custom_theme = {
1921
1931
  "name": theme_name_full,
1922
- "reportVersionAtImport": theme_version,
1932
+ "reportVersionAtImport": rpt_version_at_import,
1923
1933
  "type": "RegisteredResources",
1924
1934
  }
1925
1935
 
sempy_labs/tom/_model.py CHANGED
@@ -12,7 +12,6 @@ from sempy_labs._helper_functions import (
12
12
  _make_list_unique,
13
13
  resolve_dataset_name_and_id,
14
14
  resolve_workspace_name_and_id,
15
- _base_api,
16
15
  resolve_workspace_id,
17
16
  resolve_item_id,
18
17
  resolve_lakehouse_id,
@@ -152,7 +151,12 @@ class TOMWrapper:
152
151
 
153
152
  self._table_map = {}
154
153
  self._column_map = {}
155
- self._compat_level = self.model.Model.Database.CompatibilityLevel
154
+ self._compat_level = self.model.Database.CompatibilityLevel
155
+
156
+ # Max compat level
157
+ s = self.model.Server.SupportedCompatibilityLevels
158
+ nums = [int(x) for x in s.split(",") if x.strip() != "1000000"]
159
+ self._max_compat_level = max(nums)
156
160
 
157
161
  # Minimum campat level for lineage tags is 1540 (https://learn.microsoft.com/dotnet/api/microsoft.analysisservices.tabular.table.lineagetag?view=analysisservices-dotnet#microsoft-analysisservices-tabular-table-lineagetag)
158
162
  if self._compat_level >= 1540:
@@ -241,6 +245,22 @@ class TOMWrapper:
241
245
  if t.CalculationGroup is not None:
242
246
  yield t
243
247
 
248
+ def all_functions(self):
249
+ """
250
+ Outputs a list of all user-defined functions in the semantic model.
251
+
252
+ Parameters
253
+ ----------
254
+
255
+ Returns
256
+ -------
257
+ Iterator[Microsoft.AnalysisServices.Tabular.Function]
258
+ All user-defined functions within the semantic model.
259
+ """
260
+
261
+ for f in self.model.Functions:
262
+ yield f
263
+
244
264
  def all_measures(self):
245
265
  """
246
266
  Outputs a list of all measures in the semantic model.
@@ -760,6 +780,60 @@ class TOMWrapper:
760
780
  obj.Description = description
761
781
  self.model.Roles.Add(obj)
762
782
 
783
+ def set_compatibility_level(self, compatibility_level: int):
784
+ """
785
+ Sets compatibility level of the semantic model
786
+
787
+ Parameters
788
+ ----------
789
+ compatibility_level : int
790
+ The compatibility level to set the for the semantic model.
791
+ """
792
+ import Microsoft.AnalysisServices.Tabular as TOM
793
+
794
+ if compatibility_level < 1500 or compatibility_level > self._max_compat_level:
795
+ raise ValueError(
796
+ f"{icons.red_dot} Compatibility level must be between 1500 and {self._max_compat_level}."
797
+ )
798
+ if self._compat_level > compatibility_level:
799
+ print(
800
+ f"{icons.warning} Compatibility level can only be increased, not decreased."
801
+ )
802
+ return
803
+
804
+ self.model.Database.CompatibilityLevel = compatibility_level
805
+ bim = TOM.JsonScripter.ScriptCreateOrReplace(self.model.Database)
806
+ fabric.execute_tmsl(script=bim, workspace=self._workspace_id)
807
+
808
+ def set_user_defined_function(self, name: str, expression: str):
809
+ """
810
+ Sets the definition of a `user-defined <https://learn.microsoft.com/en-us/dax/best-practices/dax-user-defined-functions#using-model-explorer>_` function within the semantic model. This function requires that the compatibility level is at least 1702.
811
+
812
+ Parameters
813
+ ----------
814
+ name : str
815
+ Name of the user-defined function.
816
+ expression : str
817
+ The DAX expression for the user-defined function.
818
+ """
819
+ import Microsoft.AnalysisServices.Tabular as TOM
820
+
821
+ if self._compat_level < 1702:
822
+ raise ValueError(
823
+ f"{icons.warning} User-defined functions require a compatibility level of at least 1702. The current compatibility level is {self._compat_level}. Use the 'tom.set_compatibility_level' function to change the compatibility level."
824
+ )
825
+
826
+ existing = [f.Name for f in self.model.Functions]
827
+
828
+ if name in existing:
829
+ self.model.Functions[name].Expression = expression
830
+ else:
831
+ obj = TOM.Function()
832
+ obj.Name = name
833
+ obj.Expression = expression
834
+ obj.LineageTag = generate_guid()
835
+ self.model.Functions.Add(obj)
836
+
763
837
  def set_rls(self, role_name: str, table_name: str, filter_expression: str):
764
838
  """
765
839
  Sets the row level security permissions for a table within a role.
@@ -1909,6 +1983,8 @@ class TOMWrapper:
1909
1983
  object.Parent.CalculationItems.Remove(object.Name)
1910
1984
  elif objType == TOM.ObjectType.TablePermission:
1911
1985
  object.Parent.TablePermissions.Remove(object.Name)
1986
+ elif objType == TOM.ObjectType.Function:
1987
+ object.Parent.Functions.Remove(object.Name)
1912
1988
 
1913
1989
  def used_in_relationships(self, object: Union["TOM.Table", "TOM.Column"]):
1914
1990
  """
@@ -4670,9 +4746,7 @@ class TOMWrapper:
4670
4746
  json=payload,
4671
4747
  )
4672
4748
  if response.status_code != 200:
4673
- raise FabricHTTPException(
4674
- f"Failed to retrieve descriptions: {response.text}"
4675
- )
4749
+ raise FabricHTTPException(response)
4676
4750
 
4677
4751
  for item in response.json().get("modelItems", []):
4678
4752
  ms_name = item["urn"]
@@ -4752,8 +4826,8 @@ class TOMWrapper:
4752
4826
  value_filter_behavior = value_filter_behavior.capitalize()
4753
4827
  min_compat = 1606
4754
4828
 
4755
- if self.model.Model.Database.CompatibilityLevel < min_compat:
4756
- self.model.Model.Database.CompatibilityLevel = min_compat
4829
+ if self.model.Database.CompatibilityLevel < min_compat:
4830
+ self.model.Database.CompatibilityLevel = min_compat
4757
4831
 
4758
4832
  self.model.ValueFilterBehavior = System.Enum.Parse(
4759
4833
  TOM.ValueFilterBehaviorType, value_filter_behavior
@@ -5109,7 +5183,9 @@ class TOMWrapper:
5109
5183
  """
5110
5184
  import Microsoft.AnalysisServices.Tabular as TOM
5111
5185
 
5112
- p = next(p for p in self.model.Tables[table_name].Partitions)
5186
+ t = self.model.Tables[table_name]
5187
+
5188
+ p = next(p for p in t.Partitions)
5113
5189
  if p.Mode != TOM.ModeType.DirectLake:
5114
5190
  print(f"{icons.info} The '{table_name}' table is not in Direct Lake mode.")
5115
5191
  return
@@ -5119,9 +5195,7 @@ class TOMWrapper:
5119
5195
  partition_schema = schema or p.Source.SchemaName
5120
5196
 
5121
5197
  # Update name of the Direct Lake partition (will be removed later)
5122
- self.model.Tables[table_name].Partitions[
5123
- partition_name
5124
- ].Name = f"{partition_name}_remove"
5198
+ t.Partitions[partition_name].Name = f"{partition_name}_remove"
5125
5199
 
5126
5200
  source_workspace_id = resolve_workspace_id(workspace=source_workspace)
5127
5201
  if source_type == "Lakehouse":
@@ -5133,21 +5207,41 @@ class TOMWrapper:
5133
5207
  item=source, type=source_type, workspace=source_workspace_id
5134
5208
  )
5135
5209
 
5210
+ column_pairs = []
5211
+ m_filter = None
5212
+ for c in t.Columns:
5213
+ if c.Type == TOM.ColumnType.Data:
5214
+ if c.Name != c.SourceColumn:
5215
+ column_pairs.append((c.SourceColumn, c.Name))
5216
+
5217
+ if column_pairs:
5218
+ m_filter = (
5219
+ f'#"Renamed Columns" = Table.RenameColumns(ToDelta, {{'
5220
+ + ", ".join([f'{{"{old}", "{new}"}}' for old, new in column_pairs])
5221
+ + "})"
5222
+ )
5223
+
5136
5224
  def _generate_m_expression(
5137
- workspace_id, artifact_id, artifact_type, table_name, schema_name
5225
+ workspace_id, artifact_id, artifact_type, table_name, schema_name, m_filter
5138
5226
  ):
5139
5227
  """
5140
- Generates the M expression for the import partition.
5228
+ Generates the M expression for the import partition. Adds a rename step if any columns have been renamed in the model.
5141
5229
  """
5142
5230
 
5143
5231
  full_table_name = (
5144
5232
  f"{schema_name}/{table_name}" if schema_name else table_name
5145
5233
  )
5146
5234
 
5147
- return f"""let\n\tSource = AzureStorage.DataLake("https://onelake.dfs.fabric.microsoft.com/{workspace_id}/{artifact_id}", [HierarchicalNavigation=true]),
5235
+ code = f"""let\n\tSource = AzureStorage.DataLake("https://onelake.dfs.fabric.microsoft.com/{workspace_id}/{artifact_id}", [HierarchicalNavigation=true]),
5148
5236
  Tables = Source{{[Name = "Tables"]}}[Content],
5149
5237
  ExpressionTable = Tables{{[Name = "{full_table_name}"]}}[Content],
5150
- ToDelta = DeltaLake.Table(ExpressionTable)\nin\n\tToDelta"""
5238
+ ToDelta = DeltaLake.Table(ExpressionTable)"""
5239
+ if m_filter is None:
5240
+ code += "\n in\n\tToDelta"
5241
+ else:
5242
+ code += f',\n\t {m_filter} \n in\n\t#"Renamed Columns"'
5243
+
5244
+ return code
5151
5245
 
5152
5246
  m_expression = _generate_m_expression(
5153
5247
  source_workspace_id,
@@ -5155,6 +5249,7 @@ class TOMWrapper:
5155
5249
  source_type,
5156
5250
  partition_entity_name,
5157
5251
  partition_schema,
5252
+ m_filter,
5158
5253
  )
5159
5254
 
5160
5255
  # Add the import partition
@@ -5822,7 +5917,7 @@ class TOMWrapper:
5822
5917
  import Microsoft.AnalysisServices.Tabular as TOM
5823
5918
 
5824
5919
  # ChangedProperty logic (min compat level is 1567) https://learn.microsoft.com/dotnet/api/microsoft.analysisservices.tabular.changedproperty?view=analysisservices-dotnet
5825
- if self.model.Model.Database.CompatibilityLevel >= 1567:
5920
+ if self.model.Database.CompatibilityLevel >= 1567:
5826
5921
  for t in self.model.Tables:
5827
5922
  if any(
5828
5923
  p.SourceType == TOM.PartitionSourceType.Entity
@@ -1,123 +0,0 @@
1
- import pandas as pd
2
- from typing import Optional
3
- from sempy_labs._helper_functions import (
4
- _base_api,
5
- delete_item,
6
- _create_dataframe,
7
- create_item,
8
- resolve_workspace_id,
9
- )
10
- from uuid import UUID
11
- import sempy_labs._icons as icons
12
- from sempy._utils._log import log
13
-
14
-
15
- @log
16
- def list_eventstreams(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
17
- """
18
- Shows the eventstreams within a workspace.
19
-
20
- This is a wrapper function for the following API: `Items - List Eventstreams <https://learn.microsoft.com/rest/api/fabric/environment/items/list-eventstreams>`_.
21
-
22
- Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
23
-
24
- Parameters
25
- ----------
26
- workspace : str | uuid.UUID, default=None
27
- The Fabric workspace name or ID.
28
- Defaults to None which resolves to the workspace of the attached lakehouse
29
- or if no lakehouse attached, resolves to the workspace of the notebook.
30
-
31
- Returns
32
- -------
33
- pandas.DataFrame
34
- A pandas dataframe showing the eventstreams within a workspace.
35
- """
36
-
37
- columns = {
38
- "Eventstream Name": "string",
39
- "Eventstream Id": "string",
40
- "Description": "string",
41
- }
42
- df = _create_dataframe(columns=columns)
43
-
44
- workspace_id = resolve_workspace_id(workspace)
45
- responses = _base_api(
46
- request=f"/v1/workspaces/{workspace_id}/eventstreams",
47
- uses_pagination=True,
48
- client="fabric_sp",
49
- )
50
-
51
- rows = []
52
- for r in responses:
53
- for v in r.get("value", []):
54
- rows.append(
55
- {
56
- "Eventstream Name": v.get("displayName"),
57
- "Eventstream Id": v.get("id"),
58
- "Description": v.get("description"),
59
- }
60
- )
61
-
62
- if rows:
63
- df = pd.DataFrame(rows, columns=list(columns.keys()))
64
-
65
- return df
66
-
67
-
68
- @log
69
- def create_eventstream(
70
- name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
71
- ):
72
- """
73
- Creates a Fabric eventstream.
74
-
75
- This is a wrapper function for the following API: `Items - Create Eventstream <https://learn.microsoft.com/rest/api/fabric/environment/items/create-eventstream>`_.
76
-
77
- Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
78
-
79
- Parameters
80
- ----------
81
- name: str
82
- Name of the eventstream.
83
- description : str, default=None
84
- A description of the environment.
85
- workspace : str | uuid.UUID, default=None
86
- The Fabric workspace name or ID.
87
- Defaults to None which resolves to the workspace of the attached lakehouse
88
- or if no lakehouse attached, resolves to the workspace of the notebook.
89
- """
90
-
91
- create_item(
92
- name=name, description=description, type="Eventstream", workspace=workspace
93
- )
94
-
95
-
96
- @log
97
- def delete_eventstream(
98
- eventstream: str | UUID, workspace: Optional[str | UUID] = None, **kwargs
99
- ):
100
- """
101
- Deletes a Fabric eventstream.
102
-
103
- This is a wrapper function for the following API: `Items - Delete Eventstream <https://learn.microsoft.com/rest/api/fabric/environment/items/delete-eventstream>`_.
104
-
105
- Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
106
-
107
- Parameters
108
- ----------
109
- eventstream: str | uuid.UUID
110
- Name or ID of the eventstream.
111
- workspace : str | uuid.UUID, default=None
112
- The Fabric workspace name or ID.
113
- Defaults to None which resolves to the workspace of the attached lakehouse
114
- or if no lakehouse attached, resolves to the workspace of the notebook.
115
- """
116
-
117
- if "name" in kwargs:
118
- eventstream = kwargs["name"]
119
- print(
120
- f"{icons.warning} The 'name' parameter is deprecated. Please use 'eventstream' instead."
121
- )
122
-
123
- delete_item(item=eventstream, type="Eventstream", workspace=workspace)