semantic-link-labs 0.8.8__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

@@ -82,7 +82,7 @@ def run_model_bpa_bulk(
82
82
  if isinstance(workspace, str):
83
83
  workspace = [workspace]
84
84
 
85
- dfW = fabric.list_workspaces()
85
+ dfW = fabric.list_workspaces("type ne 'AdminInsights'")
86
86
  if workspace is None:
87
87
  dfW_filt = dfW.copy()
88
88
  else:
@@ -119,16 +119,16 @@ def run_model_bpa_bulk(
119
119
  dfD_filt = dfD[~dfD["Dataset Name"].isin(skip_models)]
120
120
 
121
121
  if len(dfD_filt) > 0:
122
- for i2, r2 in dfD_filt.iterrows():
122
+ for _, r2 in dfD_filt.iterrows():
123
+ dataset_id = r2["Dataset Id"]
123
124
  dataset_name = r2["Dataset Name"]
124
125
  config_by = r2["Configured By"]
125
- dataset_id = r2["Dataset Id"]
126
126
  print(
127
127
  f"{icons.in_progress} Collecting Model BPA stats for the '{dataset_name}' semantic model within the '{wksp}' workspace."
128
128
  )
129
129
  try:
130
130
  bpa_df = run_model_bpa(
131
- dataset=dataset_name,
131
+ dataset=dataset_id,
132
132
  workspace=wksp,
133
133
  language=language,
134
134
  return_dataframe=True,
@@ -150,7 +150,7 @@ def run_model_bpa_bulk(
150
150
 
151
151
  if df.empty:
152
152
  df = bpa_df
153
- if not bpa_df.empty:
153
+ elif not bpa_df.empty:
154
154
  df = pd.concat([df, bpa_df], ignore_index=True)
155
155
  print(
156
156
  f"{icons.green_dot} Collected Model BPA stats for the '{dataset_name}' semantic model within the '{wksp}' workspace."
@@ -1,10 +1,15 @@
1
1
  import sempy.fabric as fabric
2
2
  import pandas as pd
3
- from sempy_labs._helper_functions import format_dax_object_name
3
+ from sempy_labs._helper_functions import (
4
+ format_dax_object_name,
5
+ resolve_dataset_name_and_id,
6
+ resolve_workspace_name_and_id,
7
+ )
4
8
  import sempy_labs._icons as icons
5
9
  from typing import Any, Dict, Optional
6
10
  from anytree import Node, RenderTree
7
11
  from sempy._utils._log import log
12
+ from uuid import UUID
8
13
 
9
14
 
10
15
  @log
@@ -139,15 +144,15 @@ def get_measure_dependencies(
139
144
 
140
145
  @log
141
146
  def get_model_calc_dependencies(
142
- dataset: str, workspace: Optional[str] = None
147
+ dataset: str | UUID, workspace: Optional[str] = None
143
148
  ) -> pd.DataFrame:
144
149
  """
145
150
  Shows all dependencies for all objects in a semantic model.
146
151
 
147
152
  Parameters
148
153
  ----------
149
- dataset : str
150
- Name of the semantic model.
154
+ dataset : str | UUID
155
+ Name or ID of the semantic model.
151
156
  workspace : str, default=None
152
157
  The Fabric workspace name.
153
158
  Defaults to None which resolves to the workspace of the attached lakehouse
@@ -159,10 +164,11 @@ def get_model_calc_dependencies(
159
164
  Shows all dependencies for all objects in the semantic model.
160
165
  """
161
166
 
162
- workspace = fabric.resolve_workspace_name(workspace)
167
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
168
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
163
169
  dep = fabric.evaluate_dax(
164
- dataset=dataset,
165
- workspace=workspace,
170
+ dataset=dataset_id,
171
+ workspace=workspace_id,
166
172
  dax_string="""
167
173
  SELECT
168
174
  [TABLE] AS [Table Name],
@@ -199,7 +205,10 @@ def get_model_calc_dependencies(
199
205
  for _, row in incomplete_rows.iterrows():
200
206
  referenced_full_name = row["Referenced Full Object Name"]
201
207
  referenced_object_type = row["Referenced Object Type"]
202
- dep_filt = dep[(dep["Full Object Name"] == referenced_full_name) & (dep["Object Type"] == referenced_object_type)]
208
+ dep_filt = dep[
209
+ (dep["Full Object Name"] == referenced_full_name)
210
+ & (dep["Object Type"] == referenced_object_type)
211
+ ]
203
212
  # Expand dependencies and update 'Done' status as needed
204
213
  new_rows = []
205
214
  for _, dependency in dep_filt.iterrows():
sempy_labs/_notebooks.py CHANGED
@@ -10,6 +10,42 @@ from sempy_labs._helper_functions import (
10
10
  _decode_b64,
11
11
  )
12
12
  from sempy.fabric.exceptions import FabricHTTPException
13
+ import os
14
+
15
+ _notebook_prefix = "notebook-content."
16
+
17
+
18
+ def _get_notebook_definition_base(
19
+ notebook_name: str, workspace: Optional[str] = None
20
+ ) -> pd.DataFrame:
21
+
22
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
23
+ item_id = fabric.resolve_item_id(
24
+ item_name=notebook_name, type="Notebook", workspace=workspace
25
+ )
26
+ client = fabric.FabricRestClient()
27
+ response = client.post(
28
+ f"v1/workspaces/{workspace_id}/notebooks/{item_id}/getDefinition",
29
+ )
30
+
31
+ result = lro(client, response).json()
32
+
33
+ return pd.json_normalize(result["definition"]["parts"])
34
+
35
+
36
+ def _get_notebook_type(notebook_name: str, workspace: Optional[str] = None) -> str:
37
+
38
+ df_items = _get_notebook_definition_base(
39
+ notebook_name=notebook_name, workspace=workspace
40
+ )
41
+
42
+ file_path = df_items[df_items["path"].str.startswith(_notebook_prefix)][
43
+ "path"
44
+ ].iloc[0]
45
+
46
+ _, file_extension = os.path.splitext(file_path)
47
+
48
+ return file_extension[1:]
13
49
 
14
50
 
15
51
  def get_notebook_definition(
@@ -38,18 +74,10 @@ def get_notebook_definition(
38
74
  The notebook definition.
39
75
  """
40
76
 
41
- (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
42
- item_id = fabric.resolve_item_id(
43
- item_name=notebook_name, type="Notebook", workspace=workspace
77
+ df_items = _get_notebook_definition_base(
78
+ notebook_name=notebook_name, workspace=workspace
44
79
  )
45
- client = fabric.FabricRestClient()
46
- response = client.post(
47
- f"v1/workspaces/{workspace_id}/notebooks/{item_id}/getDefinition",
48
- )
49
-
50
- result = lro(client, response).json()
51
- df_items = pd.json_normalize(result["definition"]["parts"])
52
- df_items_filt = df_items[df_items["path"] == "notebook-content.py"]
80
+ df_items_filt = df_items[df_items["path"].str.startswith(_notebook_prefix)]
53
81
  payload = df_items_filt["payload"].iloc[0]
54
82
 
55
83
  if decode:
@@ -115,9 +143,10 @@ def import_notebook_from_web(
115
143
  description=description,
116
144
  )
117
145
  elif len(dfI_filt) > 0 and overwrite:
118
- update_notebook_definition(
119
- name=notebook_name, notebook_content=response.content, workspace=workspace
120
- )
146
+ print(f"{icons.info} Overwrite of notebooks is currently not supported.")
147
+ # update_notebook_definition(
148
+ # name=notebook_name, notebook_content=response.content, workspace=workspace
149
+ # )
121
150
  else:
122
151
  raise ValueError(
123
152
  f"{icons.red_dot} The '{notebook_name}' already exists within the '{workspace}' workspace and 'overwrite' is set to False."
@@ -127,6 +156,7 @@ def import_notebook_from_web(
127
156
  def create_notebook(
128
157
  name: str,
129
158
  notebook_content: str,
159
+ type: str = "py",
130
160
  description: Optional[str] = None,
131
161
  workspace: Optional[str] = None,
132
162
  ):
@@ -139,6 +169,8 @@ def create_notebook(
139
169
  The name of the notebook to be created.
140
170
  notebook_content : str
141
171
  The Jupyter notebook content (not in Base64 format).
172
+ type : str, default="py"
173
+ The notebook type.
142
174
  description : str, default=None
143
175
  The description of the notebook.
144
176
  Defaults to None which does not place a description.
@@ -158,7 +190,7 @@ def create_notebook(
158
190
  "format": "ipynb",
159
191
  "parts": [
160
192
  {
161
- "path": "notebook-content.py",
193
+ "path": f"{_notebook_prefix}.{type}",
162
194
  "payload": notebook_payload,
163
195
  "payloadType": "InlineBase64",
164
196
  }
@@ -202,13 +234,13 @@ def update_notebook_definition(
202
234
  item_name=name, type="Notebook", workspace=workspace
203
235
  )
204
236
 
237
+ type = _get_notebook_type(notebook_name=name, workspace=workspace_id)
238
+
205
239
  request_body = {
206
- "displayName": name,
207
240
  "definition": {
208
- "format": "ipynb",
209
241
  "parts": [
210
242
  {
211
- "path": "notebook-content.py",
243
+ "path": f"{_notebook_prefix}.{type}",
212
244
  "payload": notebook_payload,
213
245
  "payloadType": "InlineBase64",
214
246
  }
@@ -5,6 +5,7 @@ from sempy_labs._helper_functions import (
5
5
  resolve_workspace_name_and_id,
6
6
  _get_partition_map,
7
7
  _process_and_display_chart,
8
+ resolve_dataset_name_and_id,
8
9
  )
9
10
  from typing import Any, List, Optional, Union
10
11
  from sempy._utils._log import log
@@ -14,11 +15,12 @@ import pandas as pd
14
15
  import warnings
15
16
  import ipywidgets as widgets
16
17
  import json
18
+ from uuid import UUID
17
19
 
18
20
 
19
21
  @log
20
22
  def refresh_semantic_model(
21
- dataset: str,
23
+ dataset: str | UUID,
22
24
  tables: Optional[Union[str, List[str]]] = None,
23
25
  partitions: Optional[Union[str, List[str]]] = None,
24
26
  refresh_type: str = "full",
@@ -34,8 +36,8 @@ def refresh_semantic_model(
34
36
 
35
37
  Parameters
36
38
  ----------
37
- dataset : str
38
- Name of the semantic model.
39
+ dataset : str | UUID
40
+ Name or ID of the semantic model.
39
41
  tables : str, List[str], default=None
40
42
  A string or a list of tables to refresh.
41
43
  partitions: str, List[str], default=None
@@ -65,7 +67,8 @@ def refresh_semantic_model(
65
67
  If 'visualize' is set to True, returns a pandas dataframe showing the SSAS trace output used to generate the visualization.
66
68
  """
67
69
 
68
- workspace = fabric.resolve_workspace_name(workspace)
70
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
71
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
69
72
 
70
73
  if isinstance(tables, str):
71
74
  tables = [tables]
@@ -118,11 +121,11 @@ def refresh_semantic_model(
118
121
  def extract_failure_error():
119
122
  error_messages = []
120
123
  combined_messages = ""
121
- final_message = f"{icons.red_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has failed."
124
+ final_message = f"{icons.red_dot} The refresh of the '{dataset_name}' semantic model within the '{workspace_name}' workspace has failed."
122
125
  for _, r in fabric.get_refresh_execution_details(
123
126
  refresh_request_id=request_id,
124
- dataset=dataset,
125
- workspace=workspace,
127
+ dataset=dataset_id,
128
+ workspace=workspace_id,
126
129
  ).messages.iterrows():
127
130
  error_messages.append(f"{r['Type']}: {r['Message']}")
128
131
 
@@ -135,8 +138,8 @@ def refresh_semantic_model(
135
138
  # Function to perform dataset refresh
136
139
  def refresh_dataset():
137
140
  return fabric.refresh_dataset(
138
- dataset=dataset,
139
- workspace=workspace,
141
+ dataset=dataset_id,
142
+ workspace=workspace_id,
140
143
  refresh_type=refresh_type,
141
144
  retry_count=retry_count,
142
145
  apply_refresh_policy=apply_refresh_policy,
@@ -147,7 +150,9 @@ def refresh_semantic_model(
147
150
 
148
151
  def check_refresh_status(request_id):
149
152
  request_details = fabric.get_refresh_execution_details(
150
- dataset=dataset, refresh_request_id=request_id, workspace=workspace
153
+ dataset=dataset_id,
154
+ refresh_request_id=request_id,
155
+ workspace=workspace_id,
151
156
  )
152
157
  return request_details.status
153
158
 
@@ -169,7 +174,8 @@ def refresh_semantic_model(
169
174
  right_on="PartitionID",
170
175
  how="left",
171
176
  )
172
- _process_and_display_chart(df, title=title, widget=widget)
177
+ if not df.empty:
178
+ _process_and_display_chart(df, title=title, widget=widget)
173
179
  if stop:
174
180
  df.drop(["Object Name", "PartitionID"], axis=1, inplace=True)
175
181
  df.rename(columns={"TableName": "Table Name"}, inplace=True)
@@ -180,7 +186,7 @@ def refresh_semantic_model(
180
186
  if not visualize:
181
187
  request_id = refresh_dataset()
182
188
  print(
183
- f"{icons.in_progress} Refresh of the '{dataset}' semantic model within the '{workspace}' workspace is in progress..."
189
+ f"{icons.in_progress} Refresh of the '{dataset_name}' semantic model within the '{workspace_name}' workspace is in progress..."
184
190
  )
185
191
 
186
192
  # Monitor refresh progress and handle tracing if visualize is enabled
@@ -189,7 +195,7 @@ def refresh_semantic_model(
189
195
  widget = widgets.Output()
190
196
 
191
197
  with fabric.create_trace_connection(
192
- dataset=dataset, workspace=workspace
198
+ dataset=dataset_id, workspace=workspace_id
193
199
  ) as trace_connection:
194
200
  with trace_connection.create_trace(icons.refresh_event_schema) as trace:
195
201
  trace.start()
@@ -204,7 +210,7 @@ def refresh_semantic_model(
204
210
  raise ValueError(extract_failure_error())
205
211
  elif status == "Cancelled":
206
212
  print(
207
- f"{icons.yellow_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has been cancelled."
213
+ f"{icons.yellow_dot} The refresh of the '{dataset_name}' semantic model within the '{workspace_name}' workspace has been cancelled."
208
214
  )
209
215
  return
210
216
 
@@ -231,7 +237,7 @@ def refresh_semantic_model(
231
237
  )
232
238
 
233
239
  print(
234
- f"{icons.green_dot} Refresh '{refresh_type}' of the '{dataset}' semantic model within the '{workspace}' workspace is complete."
240
+ f"{icons.green_dot} Refresh '{refresh_type}' of the '{dataset_name}' semantic model within the '{workspace_name}' workspace is complete."
235
241
  )
236
242
  return final_df
237
243
 
@@ -245,14 +251,14 @@ def refresh_semantic_model(
245
251
  raise ValueError(extract_failure_error())
246
252
  elif status == "Cancelled":
247
253
  print(
248
- f"{icons.yellow_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has been cancelled."
254
+ f"{icons.yellow_dot} The refresh of the '{dataset_name}' semantic model within the '{workspace_name}' workspace has been cancelled."
249
255
  )
250
256
  return
251
257
 
252
258
  time.sleep(3)
253
259
 
254
260
  print(
255
- f"{icons.green_dot} Refresh '{refresh_type}' of the '{dataset}' semantic model within the '{workspace}' workspace is complete."
261
+ f"{icons.green_dot} Refresh '{refresh_type}' of the '{dataset_name}' semantic model within the '{workspace_name}' workspace is complete."
256
262
  )
257
263
 
258
264
  final_output = refresh_and_trace_dataset(
@@ -40,6 +40,8 @@ def translate_semantic_model(
40
40
  from pyspark.sql import SparkSession
41
41
  from sempy_labs.tom import connect_semantic_model
42
42
 
43
+ icons.sll_tags.append("TranslateSemanticModel")
44
+
43
45
  def _clean_text(text, exclude_chars):
44
46
  if exclude_chars:
45
47
  for char in exclude_chars:
@@ -55,7 +57,7 @@ def translate_semantic_model(
55
57
  columns=["Object Type", "Name", "Description", "Display Folder"]
56
58
  )
57
59
 
58
- icons.sll_tags.append("TranslateSemanticModel")
60
+ final_df = pd.DataFrame(columns=["Value", "Translation"])
59
61
 
60
62
  with connect_semantic_model(
61
63
  dataset=dataset, readonly=False, workspace=workspace
@@ -65,9 +67,9 @@ def translate_semantic_model(
65
67
  oName = _clean_text(o.Name, exclude_characters)
66
68
  oDescription = _clean_text(o.Description, exclude_characters)
67
69
  new_data = {
68
- "Object Type": "Table",
69
70
  "Name": o.Name,
70
71
  "TName": oName,
72
+ "Object Type": "Table",
71
73
  "Description": o.Description,
72
74
  "TDescription": oDescription,
73
75
  "Display Folder": None,
@@ -81,9 +83,9 @@ def translate_semantic_model(
81
83
  oDescription = _clean_text(o.Description, exclude_characters)
82
84
  oDisplayFolder = _clean_text(o.DisplayFolder, exclude_characters)
83
85
  new_data = {
84
- "Object Type": "Column",
85
86
  "Name": o.Name,
86
87
  "TName": oName,
88
+ "Object Type": "Column",
87
89
  "Description": o.Description,
88
90
  "TDescription": oDescription,
89
91
  "Display Folder": o.DisplayFolder,
@@ -97,9 +99,9 @@ def translate_semantic_model(
97
99
  oDescription = _clean_text(o.Description, exclude_characters)
98
100
  oDisplayFolder = _clean_text(o.DisplayFolder, exclude_characters)
99
101
  new_data = {
100
- "Object Type": "Measure",
101
102
  "Name": o.Name,
102
103
  "TName": oName,
104
+ "Object Type": "Measure",
103
105
  "Description": o.Description,
104
106
  "TDescription": oDescription,
105
107
  "Display Folder": o.DisplayFolder,
@@ -113,9 +115,9 @@ def translate_semantic_model(
113
115
  oDescription = _clean_text(o.Description, exclude_characters)
114
116
  oDisplayFolder = _clean_text(o.DisplayFolder, exclude_characters)
115
117
  new_data = {
116
- "Object Type": "Hierarchy",
117
118
  "Name": o.Name,
118
119
  "TName": oName,
120
+ "Object Type": "Hierarchy",
119
121
  "Description": o.Description,
120
122
  "TDescription": oDescription,
121
123
  "Display Folder": o.DisplayFolder,
@@ -128,9 +130,9 @@ def translate_semantic_model(
128
130
  oName = _clean_text(o.Name, exclude_characters)
129
131
  oDescription = _clean_text(o.Description, exclude_characters)
130
132
  new_data = {
131
- "Object Type": "Level",
132
133
  "Name": o.Name,
133
134
  "TName": oName,
135
+ "Object Type": "Level",
134
136
  "Description": o.Description,
135
137
  "TDescription": oDescription,
136
138
  "Display Folder": None,
@@ -163,152 +165,82 @@ def translate_semantic_model(
163
165
  )
164
166
 
165
167
  df_panda = transDF.toPandas()
168
+ df_panda = df_panda[~df_panda[clm].isin([None, ""])][[clm, "translation"]]
169
+
170
+ df_panda = df_panda.rename(columns={clm: "value"})
171
+ final_df = pd.concat([final_df, df_panda], ignore_index=True)
166
172
 
167
- def set_translation_if_exists(
168
- obj, obj_type, property_name, property_value, df, lang, index
169
- ):
170
- if property_name in df.columns and len(property_value) > 0:
171
- df_filt = df[
172
- (df["Object Type"] == obj_type)
173
- & (df[property_name] == property_value)
174
- ]
175
- if len(df_filt) == 1:
176
- translation = df_filt["translation"].str[index].iloc[0]
177
- tom.set_translation(
178
- object=obj,
179
- language=lang,
180
- property=property_name,
181
- value=translation,
182
- )
173
+ def set_translation_if_exists(object, language, property, index):
183
174
 
184
- for lang in languages:
185
- i = languages.index(lang)
186
- tom.add_translation(language=lang)
187
- print(
188
- f"{icons.in_progress} Translating {clm.lower()}s into the '{lang}' language..."
175
+ if property == "Name":
176
+ trans = object.Name
177
+ elif property == "Description":
178
+ trans = object.Description
179
+ elif property == "Display Folder":
180
+ trans = object.DisplayFolder
181
+
182
+ df_filt = final_df[final_df["value"] == trans]
183
+ if not df_filt.empty:
184
+ translation_value = df_filt["translation"].str[index].iloc[0]
185
+ tom.set_translation(
186
+ object=object,
187
+ language=language,
188
+ property=property,
189
+ value=translation_value,
190
+ )
191
+
192
+ for language in languages:
193
+ index = languages.index(language)
194
+ tom.add_translation(language=language)
195
+ print(
196
+ f"{icons.in_progress} Translating {clm.lower()}s into the '{language}' language..."
197
+ )
198
+
199
+ for t in tom.model.Tables:
200
+ set_translation_if_exists(
201
+ object=t, language=language, property="Name", index=index
202
+ )
203
+ set_translation_if_exists(
204
+ object=t, language=language, property="Description", index=index
205
+ )
206
+ for c in tom.all_columns():
207
+ set_translation_if_exists(
208
+ object=c, language=language, property="Name", index=index
209
+ )
210
+ set_translation_if_exists(
211
+ object=c, language=language, property="Description", index=index
212
+ )
213
+ set_translation_if_exists(
214
+ object=c, language=language, property="Display Folder", index=index
215
+ )
216
+ for c in tom.all_measures():
217
+ set_translation_if_exists(
218
+ object=c, language=language, property="Name", index=index
219
+ )
220
+ set_translation_if_exists(
221
+ object=c, language=language, property="Description", index=index
222
+ )
223
+ set_translation_if_exists(
224
+ object=c, language=language, property="Display Folder", index=index
225
+ )
226
+ for c in tom.all_hierarchies():
227
+ set_translation_if_exists(
228
+ object=c, language=language, property="Name", index=index
229
+ )
230
+ set_translation_if_exists(
231
+ object=c, language=language, property="Description", index=index
232
+ )
233
+ set_translation_if_exists(
234
+ object=c, language=language, property="Display Folder", index=index
235
+ )
236
+ for c in tom.all_levels():
237
+ set_translation_if_exists(
238
+ object=c, language=language, property="Name", index=index
239
+ )
240
+ set_translation_if_exists(
241
+ object=c, language=language, property="Description", index=index
189
242
  )
190
243
 
191
- for t in tom.model.Tables:
192
- if t.IsHidden is False:
193
- if clm == "Name":
194
- set_translation_if_exists(
195
- t, "Table", "Name", t.Name, df_panda, lang, i
196
- )
197
- elif clm == "Description":
198
- set_translation_if_exists(
199
- t,
200
- "Table",
201
- "Description",
202
- t.Description,
203
- df_panda,
204
- lang,
205
- i,
206
- )
207
- for c in t.Columns:
208
- if c.IsHidden is False:
209
- if clm == "Name":
210
- set_translation_if_exists(
211
- c, "Column", "Name", c.Name, df_panda, lang, i
212
- )
213
- elif clm == "Description":
214
- set_translation_if_exists(
215
- c,
216
- "Column",
217
- "Description",
218
- c.Description,
219
- df_panda,
220
- lang,
221
- i,
222
- )
223
- elif clm == "Display Folder":
224
- set_translation_if_exists(
225
- c,
226
- "Column",
227
- "Display Folder",
228
- c.DisplayFolder,
229
- df_panda,
230
- lang,
231
- i,
232
- )
233
- for h in t.Hierarchies:
234
- if h.IsHidden is False:
235
- if clm == "Name":
236
- set_translation_if_exists(
237
- h,
238
- "Hierarchy",
239
- "Name",
240
- h.Name,
241
- df_panda,
242
- lang,
243
- i,
244
- )
245
- elif clm == "Description":
246
- set_translation_if_exists(
247
- h,
248
- "Hierarchy",
249
- "Description",
250
- h.Description,
251
- df_panda,
252
- lang,
253
- i,
254
- )
255
- elif clm == "Display Folder":
256
- set_translation_if_exists(
257
- h,
258
- "Hierarchy",
259
- "Display Folder",
260
- h.DisplayFolder,
261
- df_panda,
262
- lang,
263
- i,
264
- )
265
- for lev in h.Levels:
266
- if clm == "Name":
267
- set_translation_if_exists(
268
- lev,
269
- "Level",
270
- "Name",
271
- lev.Name,
272
- df_panda,
273
- lang,
274
- i,
275
- )
276
- elif clm == "Description":
277
- set_translation_if_exists(
278
- lev,
279
- "Level",
280
- "Description",
281
- lev.Description,
282
- df_panda,
283
- lang,
284
- i,
285
- )
286
- for ms in t.Measures:
287
- if ms.IsHidden is False:
288
- if clm == "Name":
289
- set_translation_if_exists(
290
- ms, "Measure", "Name", ms.Name, df_panda, lang, i
291
- )
292
- elif clm == "Description":
293
- set_translation_if_exists(
294
- ms,
295
- "Measure",
296
- "Description",
297
- ms.Description,
298
- df_panda,
299
- lang,
300
- i,
301
- )
302
- elif clm == "Display Folder":
303
- set_translation_if_exists(
304
- ms,
305
- "Measure",
306
- "Display Folder",
307
- ms.DisplayFolder,
308
- df_panda,
309
- lang,
310
- i,
311
- )
312
244
  result = pd.DataFrame(
313
245
  columns=[
314
246
  "Language",
sempy_labs/_workspaces.py CHANGED
@@ -153,7 +153,7 @@ def add_user_to_workspace(
153
153
  Parameters
154
154
  ----------
155
155
  email_address : str
156
- The email address of the user.
156
+ The email address of the user. Also accepts the user identifier.
157
157
  role_name : str
158
158
  The `role <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#groupuseraccessright>`_ of the user within the workspace.
159
159
  principal_type : str, default='User'