semantic-link-labs 0.6.0__py3-none-any.whl → 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (104) hide show
  1. semantic_link_labs-0.7.1.dist-info/METADATA +148 -0
  2. semantic_link_labs-0.7.1.dist-info/RECORD +111 -0
  3. {semantic_link_labs-0.6.0.dist-info → semantic_link_labs-0.7.1.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +26 -2
  5. sempy_labs/_ai.py +3 -65
  6. sempy_labs/_bpa_translation/_translations_am-ET.po +828 -0
  7. sempy_labs/_bpa_translation/_translations_ar-AE.po +860 -0
  8. sempy_labs/_bpa_translation/_translations_cs-CZ.po +894 -0
  9. sempy_labs/_bpa_translation/_translations_da-DK.po +894 -0
  10. sempy_labs/_bpa_translation/_translations_de-DE.po +933 -0
  11. sempy_labs/_bpa_translation/_translations_el-GR.po +936 -0
  12. sempy_labs/_bpa_translation/_translations_es-ES.po +915 -0
  13. sempy_labs/_bpa_translation/_translations_fa-IR.po +883 -0
  14. sempy_labs/_bpa_translation/_translations_fr-FR.po +938 -0
  15. sempy_labs/_bpa_translation/_translations_ga-IE.po +912 -0
  16. sempy_labs/_bpa_translation/_translations_he-IL.po +855 -0
  17. sempy_labs/_bpa_translation/_translations_hi-IN.po +892 -0
  18. sempy_labs/_bpa_translation/_translations_hu-HU.po +910 -0
  19. sempy_labs/_bpa_translation/_translations_is-IS.po +887 -0
  20. sempy_labs/_bpa_translation/_translations_it-IT.po +931 -0
  21. sempy_labs/_bpa_translation/_translations_ja-JP.po +805 -0
  22. sempy_labs/_bpa_translation/_translations_nl-NL.po +924 -0
  23. sempy_labs/_bpa_translation/_translations_pl-PL.po +913 -0
  24. sempy_labs/_bpa_translation/_translations_pt-BR.po +909 -0
  25. sempy_labs/_bpa_translation/_translations_pt-PT.po +904 -0
  26. sempy_labs/_bpa_translation/_translations_ru-RU.po +909 -0
  27. sempy_labs/_bpa_translation/_translations_ta-IN.po +922 -0
  28. sempy_labs/_bpa_translation/_translations_te-IN.po +896 -0
  29. sempy_labs/_bpa_translation/_translations_th-TH.po +873 -0
  30. sempy_labs/_bpa_translation/_translations_zh-CN.po +767 -0
  31. sempy_labs/_bpa_translation/_translations_zu-ZA.po +916 -0
  32. sempy_labs/_clear_cache.py +9 -4
  33. sempy_labs/_generate_semantic_model.py +30 -56
  34. sempy_labs/_helper_functions.py +361 -14
  35. sempy_labs/_icons.py +10 -1
  36. sempy_labs/_list_functions.py +539 -260
  37. sempy_labs/_model_bpa.py +194 -18
  38. sempy_labs/_model_bpa_bulk.py +367 -0
  39. sempy_labs/_model_bpa_rules.py +19 -8
  40. sempy_labs/_model_dependencies.py +12 -10
  41. sempy_labs/_one_lake_integration.py +7 -7
  42. sempy_labs/_query_scale_out.py +61 -96
  43. sempy_labs/_refresh_semantic_model.py +7 -0
  44. sempy_labs/_translations.py +154 -1
  45. sempy_labs/_vertipaq.py +103 -90
  46. sempy_labs/directlake/__init__.py +5 -1
  47. sempy_labs/directlake/_directlake_schema_compare.py +27 -31
  48. sempy_labs/directlake/_directlake_schema_sync.py +55 -66
  49. sempy_labs/directlake/_dl_helper.py +233 -0
  50. sempy_labs/directlake/_get_directlake_lakehouse.py +6 -7
  51. sempy_labs/directlake/_get_shared_expression.py +1 -1
  52. sempy_labs/directlake/_guardrails.py +17 -13
  53. sempy_labs/directlake/_update_directlake_partition_entity.py +54 -30
  54. sempy_labs/directlake/_warm_cache.py +1 -1
  55. sempy_labs/lakehouse/__init__.py +2 -0
  56. sempy_labs/lakehouse/_get_lakehouse_tables.py +61 -69
  57. sempy_labs/lakehouse/_lakehouse.py +66 -9
  58. sempy_labs/lakehouse/_shortcuts.py +1 -1
  59. sempy_labs/migration/_create_pqt_file.py +174 -182
  60. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +236 -268
  61. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +75 -73
  62. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +442 -426
  63. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +91 -97
  64. sempy_labs/migration/_refresh_calc_tables.py +92 -101
  65. sempy_labs/report/_BPAReportTemplate.json +232 -0
  66. sempy_labs/report/__init__.py +6 -2
  67. sempy_labs/report/_bpareporttemplate/.pbi/localSettings.json +9 -0
  68. sempy_labs/report/_bpareporttemplate/.platform +11 -0
  69. sempy_labs/report/_bpareporttemplate/StaticResources/SharedResources/BaseThemes/CY24SU06.json +710 -0
  70. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/page.json +11 -0
  71. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/1b08bce3bebabb0a27a8/visual.json +191 -0
  72. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/2f22ddb70c301693c165/visual.json +438 -0
  73. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/3b1182230aa6c600b43a/visual.json +127 -0
  74. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/58577ba6380c69891500/visual.json +576 -0
  75. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/a2a8fa5028b3b776c96c/visual.json +207 -0
  76. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/adfd47ef30652707b987/visual.json +506 -0
  77. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/b6a80ee459e716e170b1/visual.json +127 -0
  78. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/ce3130a721c020cc3d81/visual.json +513 -0
  79. sempy_labs/report/_bpareporttemplate/definition/pages/92735ae19b31712208ad/page.json +8 -0
  80. sempy_labs/report/_bpareporttemplate/definition/pages/92735ae19b31712208ad/visuals/66e60dfb526437cd78d1/visual.json +112 -0
  81. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/page.json +11 -0
  82. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/07deb8bce824e1be37d7/visual.json +513 -0
  83. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0b1c68838818b32ad03b/visual.json +352 -0
  84. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0c171de9d2683d10b930/visual.json +37 -0
  85. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0efa01be0510e40a645e/visual.json +542 -0
  86. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/6bf2f0eb830ab53cc668/visual.json +221 -0
  87. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/88d8141cb8500b60030c/visual.json +127 -0
  88. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/a753273590beed656a03/visual.json +576 -0
  89. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/b8fdc82cddd61ac447bc/visual.json +127 -0
  90. sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/page.json +9 -0
  91. sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/visuals/ce8532a7e25020271077/visual.json +38 -0
  92. sempy_labs/report/_bpareporttemplate/definition/pages/pages.json +10 -0
  93. sempy_labs/report/_bpareporttemplate/definition/report.json +176 -0
  94. sempy_labs/report/_bpareporttemplate/definition/version.json +4 -0
  95. sempy_labs/report/_bpareporttemplate/definition.pbir +14 -0
  96. sempy_labs/report/_generate_report.py +255 -139
  97. sempy_labs/report/_report_functions.py +26 -33
  98. sempy_labs/report/_report_rebind.py +31 -26
  99. sempy_labs/tom/_model.py +75 -58
  100. semantic_link_labs-0.6.0.dist-info/METADATA +0 -22
  101. semantic_link_labs-0.6.0.dist-info/RECORD +0 -54
  102. sempy_labs/directlake/_fallback.py +0 -60
  103. {semantic_link_labs-0.6.0.dist-info → semantic_link_labs-0.7.1.dist-info}/LICENSE +0 -0
  104. {semantic_link_labs-0.6.0.dist-info → semantic_link_labs-0.7.1.dist-info}/top_level.txt +0 -0
@@ -8,9 +8,9 @@ from typing import Optional
8
8
 
9
9
  def model_bpa_rules(
10
10
  dataset: str,
11
- workspace: Optional[str | None] = None,
12
- dependencies: Optional[pd.DataFrame | None] = None,
13
- ):
11
+ workspace: Optional[str] = None,
12
+ dependencies: Optional[pd.DataFrame] = None,
13
+ ) -> pd.DataFrame:
14
14
  """
15
15
  Shows the default rules for the semantic model BPA used by the run_model_bpa function.
16
16
 
@@ -135,6 +135,17 @@ def model_bpa_rules(
135
135
  "Setting the 'Data Coverage Definition' property may lead to better performance because the engine knows when it can only query the import-portion of the table and when it needs to query the DirectQuery portion of the table.",
136
136
  "https://learn.microsoft.com/analysis-services/tom/table-partitions?view=asallproducts-allversions",
137
137
  ),
138
+ (
139
+ "Performance",
140
+ "Model",
141
+ "Warning",
142
+ "Dual mode is only relevant for dimension tables if DirectQuery is used for the corresponding fact table",
143
+ lambda obj: not any(
144
+ p.Mode == TOM.ModeType.DirectQuery for p in tom.all_partitions()
145
+ )
146
+ and any(p.Mode == TOM.ModeType.Dual for p in tom.all_partitions()),
147
+ "Only use Dual mode for dimension tables/partitions where a corresponding fact table is in DirectQuery. Using Dual mode in other circumstances (i.e. rest of the model is in Import mode) may lead to performance issues especially if the number of measures in the model is high.",
148
+ ),
138
149
  (
139
150
  "Performance",
140
151
  "Table",
@@ -413,7 +424,7 @@ def model_bpa_rules(
413
424
  re.search(
414
425
  r"USERELATIONSHIP\s*\(\s*.+?(?=])\]\s*,\s*'*"
415
426
  + obj.Name
416
- + "'*\[",
427
+ + r"'*\[",
417
428
  m.Expression,
418
429
  flags=re.IGNORECASE,
419
430
  )
@@ -590,13 +601,13 @@ def model_bpa_rules(
590
601
  re.search(
591
602
  r"USERELATIONSHIP\s*\(\s*\'*"
592
603
  + obj.FromTable.Name
593
- + "'*\["
604
+ + r"'*\["
594
605
  + obj.FromColumn.Name
595
- + "\]\s*,\s*'*"
606
+ + r"\]\s*,\s*'*"
596
607
  + obj.ToTable.Name
597
- + "'*\["
608
+ + r"'*\["
598
609
  + obj.ToColumn.Name
599
- + "\]",
610
+ + r"\]",
600
611
  m.Expression,
601
612
  flags=re.IGNORECASE,
602
613
  )
@@ -8,7 +8,9 @@ from sempy._utils._log import log
8
8
 
9
9
 
10
10
  @log
11
- def get_measure_dependencies(dataset: str, workspace: Optional[str] = None):
11
+ def get_measure_dependencies(
12
+ dataset: str, workspace: Optional[str] = None
13
+ ) -> pd.DataFrame:
12
14
  """
13
15
  Shows all dependencies for all measures in a semantic model.
14
16
 
@@ -33,7 +35,7 @@ def get_measure_dependencies(dataset: str, workspace: Optional[str] = None):
33
35
  dataset=dataset,
34
36
  workspace=workspace,
35
37
  dax_string="""
36
- SELECT
38
+ SELECT
37
39
  [TABLE] AS [Table Name]
38
40
  ,[OBJECT] AS [Object Name]
39
41
  ,[OBJECT_TYPE] AS [Object Type]
@@ -132,7 +134,9 @@ def get_measure_dependencies(dataset: str, workspace: Optional[str] = None):
132
134
 
133
135
 
134
136
  @log
135
- def get_model_calc_dependencies(dataset: str, workspace: Optional[str] = None):
137
+ def get_model_calc_dependencies(
138
+ dataset: str, workspace: Optional[str] = None
139
+ ) -> pd.DataFrame:
136
140
  """
137
141
  Shows all dependencies for all objects in a semantic model.
138
142
 
@@ -288,7 +292,7 @@ def measure_dependency_tree(
288
292
 
289
293
  if len(dfM_filt) == 0:
290
294
  print(
291
- f"The '{measure_name}' measure does not exist in the '{dataset}' semantic model in the '{workspace}' workspace."
295
+ f"{icons.red_dot} The '{measure_name}' measure does not exist in the '{dataset}' semantic model in the '{workspace}' workspace."
292
296
  )
293
297
  return
294
298
 
@@ -311,19 +315,17 @@ def measure_dependency_tree(
311
315
  if parent_node is None:
312
316
  parent_node = Node(parent_node_name)
313
317
  node_dict[parent_node_name] = parent_node
314
- parent_node.custom_property = icons.measure_icon + " "
318
+ parent_node.custom_property = f"{icons.measure_icon} "
315
319
 
316
320
  # Create the child node
317
321
  child_node_name = ref_obj_name
318
322
  child_node = Node(child_node_name, parent=parent_node)
319
323
  if ref_obj_type == "Column":
320
- child_node.custom_property = (
321
- icons.column_icon + " '" + ref_obj_table_name + "'"
322
- )
324
+ child_node.custom_property = f"{icons.column_icon} '{ref_obj_table_name}'"
323
325
  elif ref_obj_type == "Table":
324
- child_node.custom_property = icons.table_icon + " "
326
+ child_node.custom_property = f"{icons.table_icon} "
325
327
  elif ref_obj_type == "Measure":
326
- child_node.custom_property = icons.measure_icon + " "
328
+ child_node.custom_property = f"{icons.measure_icon} "
327
329
 
328
330
  # Update the dictionary with the child node
329
331
  node_dict[child_node_name] = child_node
@@ -50,13 +50,13 @@ def export_model_to_onelake(
50
50
  {{
51
51
  'export': {{
52
52
  'layout': 'delta',
53
- 'type': 'full',
54
- 'objects': [
55
- {{
53
+ 'type': 'full',
54
+ 'objects': [
55
+ {{
56
56
  'database': '{dataset}'
57
- }}
58
- ]
59
- }}
57
+ }}
58
+ ]
59
+ }}
60
60
  }}
61
61
  """
62
62
 
@@ -120,7 +120,7 @@ def export_model_to_onelake(
120
120
 
121
121
  print(f"{icons.in_progress} Creating shortcuts...\n")
122
122
  for tableName in tables:
123
- tablePath = "Tables/" + tableName
123
+ tablePath = f"Tables/{tableName}"
124
124
  shortcutName = tableName.replace(" ", "")
125
125
  request_body = {
126
126
  "path": "Tables",
@@ -1,7 +1,10 @@
1
1
  import sempy.fabric as fabric
2
2
  import pandas as pd
3
- from sempy_labs._helper_functions import resolve_dataset_id
4
- from typing import Optional
3
+ from sempy_labs._helper_functions import (
4
+ resolve_dataset_id,
5
+ resolve_workspace_name_and_id,
6
+ )
7
+ from typing import Optional, Tuple
5
8
  import sempy_labs._icons as icons
6
9
  from sempy.fabric.exceptions import FabricHTTPException
7
10
 
@@ -18,20 +21,11 @@ def qso_sync(dataset: str, workspace: Optional[str] = None):
18
21
  The Fabric workspace name.
19
22
  Defaults to None which resolves to the workspace of the attached lakehouse
20
23
  or if no lakehouse attached, resolves to the workspace of the notebook.
21
-
22
- Returns
23
- -------
24
-
25
24
  """
26
25
 
27
26
  # https://learn.microsoft.com/en-us/rest/api/power-bi/datasets/trigger-query-scale-out-sync-in-group
28
27
 
29
- if workspace is None:
30
- workspace_id = fabric.get_workspace_id()
31
- workspace = fabric.resolve_workspace_name(workspace_id)
32
- else:
33
- workspace_id = fabric.resolve_workspace_id(workspace)
34
-
28
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
35
29
  dataset_id = resolve_dataset_id(dataset, workspace)
36
30
 
37
31
  client = fabric.PowerBIRestClient()
@@ -46,7 +40,9 @@ def qso_sync(dataset: str, workspace: Optional[str] = None):
46
40
  )
47
41
 
48
42
 
49
- def qso_sync_status(dataset: str, workspace: Optional[str] = None):
43
+ def qso_sync_status(
44
+ dataset: str, workspace: Optional[str] = None
45
+ ) -> Tuple[pd.DataFrame, pd.DataFrame]:
50
46
  """
51
47
  Returns the query scale-out sync status for the specified dataset from the specified workspace.
52
48
 
@@ -61,7 +57,8 @@ def qso_sync_status(dataset: str, workspace: Optional[str] = None):
61
57
 
62
58
  Returns
63
59
  -------
64
-
60
+ Tuple[pandas.DataFrame, pandas.DataFrame]
61
+ 2 pandas dataframes showing the query scale-out sync status.
65
62
  """
66
63
 
67
64
  # https://learn.microsoft.com/en-us/rest/api/power-bi/datasets/get-query-scale-out-sync-status-in-group
@@ -84,12 +81,7 @@ def qso_sync_status(dataset: str, workspace: Optional[str] = None):
84
81
  columns=["Replica ID", "Replica Type", "Replica Version", "Replica Timestamp"]
85
82
  )
86
83
 
87
- if workspace is None:
88
- workspace_id = fabric.get_workspace_id()
89
- workspace = fabric.resolve_workspace_name(workspace_id)
90
- else:
91
- workspace_id = fabric.resolve_workspace_id(workspace)
92
-
84
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
93
85
  dataset_id = resolve_dataset_id(dataset, workspace)
94
86
 
95
87
  client = fabric.PowerBIRestClient()
@@ -101,29 +93,29 @@ def qso_sync_status(dataset: str, workspace: Optional[str] = None):
101
93
  raise FabricHTTPException(response)
102
94
 
103
95
  o = response.json()
104
- sos = o["scaleOutStatus"]
96
+ sos = o.get("scaleOutStatus")
105
97
 
106
98
  if sos == "Enabled":
107
99
  new_data = {
108
- "Scale Out Status": o["scaleOutStatus"],
109
- "Sync Start Time": o["syncStartTime"],
110
- "Sync End Time": o["syncEndTime"],
111
- "Commit Version": o["commitVersion"],
112
- "Commit Timestamp": o["commitTimestamp"],
113
- "Target Sync Version": o["targetSyncVersion"],
114
- "Target Sync Timestamp": o["targetSyncTimestamp"],
115
- "Trigger Reason": o["triggerReason"],
116
- "Min Active Read Version": o["minActiveReadVersion"],
117
- "Min Active Read Timestamp": o["minActiveReadTimestamp"],
100
+ "Scale Out Status": o.get("scaleOutStatus"),
101
+ "Sync Start Time": o.get("syncStartTime"),
102
+ "Sync End Time": o.get("syncEndTime"),
103
+ "Commit Version": o.get("commitVersion"),
104
+ "Commit Timestamp": o.get("commitTimestamp"),
105
+ "Target Sync Version": o.get("targetSyncVersion"),
106
+ "Target Sync Timestamp": o.get("targetSyncTimestamp"),
107
+ "Trigger Reason": o.get("triggerReason"),
108
+ "Min Active Read Version": o.get("minActiveReadVersion"),
109
+ "Min Active Read Timestamp": o.get("minActiveReadTimestamp"),
118
110
  }
119
111
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
120
112
 
121
- for r in o["scaleOutReplicas"]:
113
+ for r in o.get("scaleOutReplicas", []):
122
114
  new_data = {
123
- "Replica ID": r["replicaId"],
124
- "Replica Type": r["replicaType"],
125
- "Replica Version": str(r["replicaVersion"]),
126
- "Replica Timestamp": r["replicaTimestamp"],
115
+ "Replica ID": r.get("replicaId"),
116
+ "Replica Type": r.get("replicaType"),
117
+ "Replica Version": str(r.get("replicaVersion")),
118
+ "Replica Timestamp": r.get("replicaTimestamp"),
127
119
  }
128
120
  dfRep = pd.concat(
129
121
  [dfRep, pd.DataFrame(new_data, index=[0])], ignore_index=True
@@ -147,7 +139,7 @@ def qso_sync_status(dataset: str, workspace: Optional[str] = None):
147
139
  return df, dfRep
148
140
 
149
141
 
150
- def disable_qso(dataset: str, workspace: Optional[str] = None):
142
+ def disable_qso(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
151
143
  """
152
144
  Sets the max read-only replicas to 0, disabling query scale out.
153
145
 
@@ -162,15 +154,11 @@ def disable_qso(dataset: str, workspace: Optional[str] = None):
162
154
 
163
155
  Returns
164
156
  -------
165
-
157
+ pandas.DataFrame
158
+ A pandas dataframe showing the current query scale out settings.
166
159
  """
167
160
 
168
- if workspace is None:
169
- workspace_id = fabric.get_workspace_id()
170
- workspace = fabric.resolve_workspace_name(workspace_id)
171
- else:
172
- workspace_id = fabric.resolve_workspace_id(workspace)
173
-
161
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
174
162
  dataset_id = resolve_dataset_id(dataset, workspace)
175
163
 
176
164
  request_body = {"queryScaleOutSettings": {"maxReadOnlyReplicas": "0"}}
@@ -183,6 +171,7 @@ def disable_qso(dataset: str, workspace: Optional[str] = None):
183
171
  raise FabricHTTPException(response)
184
172
 
185
173
  df = list_qso_settings(dataset=dataset, workspace=workspace)
174
+
186
175
  print(
187
176
  f"{icons.green_dot} Query scale out has been disabled for the '{dataset}' semantic model within the '{workspace}' workspace."
188
177
  )
@@ -195,7 +184,7 @@ def set_qso(
195
184
  auto_sync: Optional[bool] = True,
196
185
  max_read_only_replicas: Optional[int] = -1,
197
186
  workspace: Optional[str] = None,
198
- ):
187
+ ) -> pd.DataFrame:
199
188
  """
200
189
  Sets the query scale out settings for a semantic model.
201
190
 
@@ -214,17 +203,13 @@ def set_qso(
214
203
 
215
204
  Returns
216
205
  -------
217
-
206
+ pandas.DataFrame
207
+ A pandas dataframe showing the current query scale-out settings.
218
208
  """
219
209
 
220
210
  # https://learn.microsoft.com/en-us/rest/api/power-bi/datasets/update-dataset-in-group
221
211
 
222
- if workspace is None:
223
- workspace_id = fabric.get_workspace_id()
224
- workspace = fabric.resolve_workspace_name(workspace_id)
225
- else:
226
- workspace_id = fabric.resolve_workspace_id(workspace)
227
-
212
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
228
213
  dataset_id = resolve_dataset_id(dataset, workspace)
229
214
 
230
215
  if max_read_only_replicas == 0:
@@ -234,31 +219,27 @@ def set_qso(
234
219
  request_body = {
235
220
  "queryScaleOutSettings": {
236
221
  "autoSyncReadOnlyReplicas": auto_sync,
237
- "maxReadOnlyReplicas": str(max_read_only_replicas),
222
+ "maxReadOnlyReplicas": max_read_only_replicas,
238
223
  }
239
224
  }
240
225
 
241
- ssm = set_semantic_model_storage_format(
226
+ set_semantic_model_storage_format(
242
227
  dataset=dataset, storage_format="Large", workspace=workspace
243
228
  )
244
- if ssm == 200:
245
- client = fabric.PowerBIRestClient()
246
- response = client.patch(
247
- f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}",
248
- json=request_body,
249
- )
250
- if response.status_code != 200:
251
- raise FabricHTTPException(response)
229
+ client = fabric.PowerBIRestClient()
230
+ response = client.patch(
231
+ f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}",
232
+ json=request_body,
233
+ )
234
+ if response.status_code != 200:
235
+ raise FabricHTTPException(response)
252
236
 
253
- df = list_qso_settings(dataset=dataset, workspace=workspace)
254
- print(
255
- f"{icons.green_dot} Query scale out has been set on the '{dataset}' semantic model within the '{workspace}' workspace."
256
- )
257
- return df
258
- else:
259
- raise ValueError(
260
- f"{icons.red_dot} Failed to set the '{dataset}' semantic model within the '{workspace}' workspace to large semantic model storage format. This is a prerequisite for enabling Query Scale Out.\n\"https://learn.microsoft.com/power-bi/enterprise/service-premium-scale-out#prerequisites\""
261
- )
237
+ df = list_qso_settings(dataset=dataset, workspace=workspace)
238
+ print(
239
+ f"{icons.green_dot} Query scale out has been set on the '{dataset}' semantic model within the '{workspace}' workspace."
240
+ )
241
+
242
+ return df
262
243
 
263
244
 
264
245
  def set_semantic_model_storage_format(
@@ -277,18 +258,9 @@ def set_semantic_model_storage_format(
277
258
  The Fabric workspace name.
278
259
  Defaults to None which resolves to the workspace of the attached lakehouse
279
260
  or if no lakehouse attached, resolves to the workspace of the notebook.
280
-
281
- Returns
282
- -------
283
-
284
261
  """
285
262
 
286
- if workspace is None:
287
- workspace_id = fabric.get_workspace_id()
288
- workspace = fabric.resolve_workspace_name(workspace_id)
289
- else:
290
- workspace_id = fabric.resolve_workspace_id(workspace)
291
-
263
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
292
264
  dataset_id = resolve_dataset_id(dataset, workspace)
293
265
 
294
266
  storage_format = storage_format.capitalize()
@@ -317,10 +289,10 @@ def set_semantic_model_storage_format(
317
289
  raise FabricHTTPException(response)
318
290
  print(f"{icons.green_dot} Semantic model storage format set to '{storage_format}'.")
319
291
 
320
- return response.status_code
321
-
322
292
 
323
- def list_qso_settings(dataset: Optional[str] = None, workspace: Optional[str] = None):
293
+ def list_qso_settings(
294
+ dataset: Optional[str] = None, workspace: Optional[str] = None
295
+ ) -> pd.DataFrame:
324
296
  """
325
297
  Shows the query scale out settings for a semantic model (or all semantic models within a workspace).
326
298
 
@@ -339,11 +311,7 @@ def list_qso_settings(dataset: Optional[str] = None, workspace: Optional[str] =
339
311
  A pandas dataframe showing the query scale out settings.
340
312
  """
341
313
 
342
- if workspace is None:
343
- workspace_id = fabric.get_workspace_id()
344
- workspace = fabric.resolve_workspace_name(workspace_id)
345
- else:
346
- workspace_id = fabric.resolve_workspace_id(workspace)
314
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
347
315
 
348
316
  if dataset is not None:
349
317
  dataset_id = resolve_dataset_id(dataset, workspace)
@@ -360,7 +328,8 @@ def list_qso_settings(dataset: Optional[str] = None, workspace: Optional[str] =
360
328
  )
361
329
  client = fabric.PowerBIRestClient()
362
330
  response = client.get(f"/v1.0/myorg/groups/{workspace_id}/datasets")
363
- for v in response.json()["value"]:
331
+
332
+ for v in response.json().get("value", []):
364
333
  tsm = v.get("targetStorageMode")
365
334
  if tsm == "Abf":
366
335
  sm = "Small"
@@ -416,14 +385,10 @@ def set_workspace_default_storage_format(
416
385
 
417
386
  if storage_format not in storageFormats:
418
387
  print(
419
- f"Invalid storage format. Please choose from these options: {storageFormats}."
388
+ f"{icons.red_dot} Invalid storage format. Please choose from these options: {storageFormats}."
420
389
  )
421
390
 
422
- if workspace is None:
423
- workspace_id = fabric.get_workspace_id()
424
- workspace = fabric.resolve_workspace_name(workspace_id)
425
- else:
426
- workspace_id = fabric.resolve_workspace_id(workspace)
391
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
427
392
 
428
393
  request_body = {"name": workspace, "defaultDatasetStorageFormat": storage_format}
429
394
 
@@ -16,6 +16,7 @@ def refresh_semantic_model(
16
16
  refresh_type: Optional[str] = None,
17
17
  retry_count: Optional[int] = 0,
18
18
  apply_refresh_policy: Optional[bool] = True,
19
+ max_parallelism: Optional[int] = 10,
19
20
  workspace: Optional[str] = None,
20
21
  ):
21
22
  """
@@ -35,6 +36,10 @@ def refresh_semantic_model(
35
36
  Number of times the operation retries before failing.
36
37
  apply_refresh_policy : bool, default=True
37
38
  If an incremental refresh policy is defined, determines whether to apply the policy. Modes are true or false. If the policy isn't applied, the full process leaves partition definitions unchanged, and fully refreshes all partitions in the table. If commitMode is transactional, applyRefreshPolicy can be true or false. If commitMode is partialBatch, applyRefreshPolicy of true isn't supported, and applyRefreshPolicy must be set to false.
39
+ max_parallelism : int, default=10
40
+ Determines the maximum number of threads that can run the processing commands in parallel.
41
+ This value aligns with the MaxParallelism property that can be set in the TMSL Sequence command or by using other methods.
42
+ Defaults to 10.
38
43
  workspace : str, default=None
39
44
  The Fabric workspace name.
40
45
  Defaults to None which resolves to the workspace of the attached lakehouse
@@ -90,6 +95,7 @@ def refresh_semantic_model(
90
95
  refresh_type=refresh_type,
91
96
  retry_count=retry_count,
92
97
  apply_refresh_policy=apply_refresh_policy,
98
+ max_parallelism=max_parallelism,
93
99
  )
94
100
  else:
95
101
  requestID = fabric.refresh_dataset(
@@ -98,6 +104,7 @@ def refresh_semantic_model(
98
104
  refresh_type=refresh_type,
99
105
  retry_count=retry_count,
100
106
  apply_refresh_policy=apply_refresh_policy,
107
+ max_parallelism=max_parallelism,
101
108
  objects=objects,
102
109
  )
103
110
  print(
@@ -1,3 +1,4 @@
1
+ import sempy
1
2
  import pandas as pd
2
3
  from typing import List, Optional, Union
3
4
  from sempy._utils._log import log
@@ -10,7 +11,7 @@ def translate_semantic_model(
10
11
  languages: Union[str, List[str]],
11
12
  exclude_characters: Optional[str] = None,
12
13
  workspace: Optional[str] = None,
13
- ):
14
+ ) -> pd.DataFrame:
14
15
  """
15
16
  Translates names, descriptions, display folders for all objects in a semantic model.
16
17
 
@@ -29,6 +30,8 @@ def translate_semantic_model(
29
30
 
30
31
  Returns
31
32
  -------
33
+ pandas.DataFrame
34
+ Shows a pandas dataframe which displays all of the translations in the semantic model.
32
35
 
33
36
  """
34
37
 
@@ -302,3 +305,153 @@ def translate_semantic_model(
302
305
  lang,
303
306
  i,
304
307
  )
308
+ result = pd.DataFrame(
309
+ columns=[
310
+ "Language",
311
+ "Object Type",
312
+ "Table Name",
313
+ "Object Name",
314
+ "Translated Object Name",
315
+ "Description",
316
+ "Translated Description",
317
+ "Display Folder",
318
+ "Translated Display Folder",
319
+ ]
320
+ )
321
+ with connect_semantic_model(
322
+ dataset=dataset, readonly=True, workspace=workspace
323
+ ) as tom:
324
+
325
+ sempy.fabric._client._utils._init_analysis_services()
326
+ import Microsoft.AnalysisServices.Tabular as TOM
327
+
328
+ for c in tom.model.Cultures:
329
+ for tr in c.ObjectTranslations:
330
+ oType = str(tr.Object.ObjectType)
331
+ oName = tr.Object.Name
332
+ tValue = tr.Value
333
+ prop = str(tr.Property)
334
+
335
+ if tr.Object.ObjectType == TOM.ObjectType.Table:
336
+ desc = tom.model.Tables[oName].Description
337
+ new_data = {
338
+ "Language": c.Name,
339
+ "Table Name": oName,
340
+ "Object Name": oName,
341
+ "Object Type": oType,
342
+ "Description": desc,
343
+ }
344
+ result = pd.concat(
345
+ [result, pd.DataFrame(new_data, index=[0])], ignore_index=True
346
+ )
347
+ condition = (
348
+ (result["Language"] == c.Name)
349
+ & (result["Table Name"] == oName)
350
+ & (result["Object Name"] == oName)
351
+ & (result["Object Type"] == oType)
352
+ )
353
+ elif tr.Object.ObjectType == TOM.ObjectType.Level:
354
+ hierarchyName = tr.Object.Parent.Name
355
+ tName = tr.Object.Parent.Parent.Name
356
+ levelName = "'" + hierarchyName + "'[" + oName + "]"
357
+ desc = (
358
+ tom.model.Tables[tName]
359
+ .Hierarchies[hierarchyName]
360
+ .Levels[oName]
361
+ .Description
362
+ )
363
+ new_data = {
364
+ "Language": c.Name,
365
+ "Table Name": tName,
366
+ "Object Name": levelName,
367
+ "Object Type": oType,
368
+ "Description": desc,
369
+ }
370
+ result = pd.concat(
371
+ [result, pd.DataFrame(new_data, index=[0])], ignore_index=True
372
+ )
373
+ condition = (
374
+ (result["Language"] == c.Name)
375
+ & (result["Table Name"] == tName)
376
+ & (result["Object Name"] == levelName)
377
+ & (result["Object Type"] == oType)
378
+ )
379
+ elif tr.Object.ObjectType == TOM.ObjectType.Column:
380
+ tName = tr.Object.Table.Name
381
+ desc = tom.model.Tables[tName].Columns[oName].Description
382
+ display_folder = (
383
+ tom.model.Tables[tName].Columns[oName].DisplayFolder
384
+ )
385
+ new_data = {
386
+ "Language": c.Name,
387
+ "Table Name": tName,
388
+ "Object Name": oName,
389
+ "Object Type": oType,
390
+ "Description": desc,
391
+ "Display Folder": display_folder,
392
+ }
393
+ result = pd.concat(
394
+ [result, pd.DataFrame(new_data, index=[0])], ignore_index=True
395
+ )
396
+ condition = (
397
+ (result["Language"] == c.Name)
398
+ & (result["Table Name"] == tName)
399
+ & (result["Object Name"] == oName)
400
+ & (result["Object Type"] == oType)
401
+ )
402
+ elif tr.Object.ObjectType == TOM.ObjectType.Measure:
403
+ tName = tr.Object.Table.Name
404
+ desc = tom.model.Tables[tName].Measures[oName].Description
405
+ display_folder = (
406
+ tom.model.Tables[tName].Measures[oName].DisplayFolder
407
+ )
408
+ new_data = {
409
+ "Language": c.Name,
410
+ "Table Name": tName,
411
+ "Object Name": oName,
412
+ "Object Type": oType,
413
+ "Description": desc,
414
+ "Display Folder": display_folder,
415
+ }
416
+ result = pd.concat(
417
+ [result, pd.DataFrame(new_data, index=[0])], ignore_index=True
418
+ )
419
+ condition = (
420
+ (result["Language"] == c.Name)
421
+ & (result["Table Name"] == tName)
422
+ & (result["Object Name"] == oName)
423
+ & (result["Object Type"] == oType)
424
+ )
425
+ elif tr.Object.ObjectType == TOM.ObjectType.Hierarchy:
426
+ tName = tr.Object.Table.Name
427
+ desc = tom.model.Tables[tName].Hierarchies[oName].Description
428
+ display_folder = (
429
+ tom.model.Tables[tName].Hierarchies[oName].DisplayFolder
430
+ )
431
+ new_data = {
432
+ "Language": c.Name,
433
+ "Table Name": tName,
434
+ "Object Name": oName,
435
+ "Object Type": oType,
436
+ "Description": desc,
437
+ "Display Folder": display_folder,
438
+ }
439
+ result = pd.concat(
440
+ [result, pd.DataFrame(new_data, index=[0])], ignore_index=True
441
+ )
442
+ condition = (
443
+ (result["Language"] == c.Name)
444
+ & (result["Table Name"] == tName)
445
+ & (result["Object Name"] == oName)
446
+ & (result["Object Type"] == oType)
447
+ )
448
+
449
+ if prop == "Caption":
450
+ result.loc[condition, "Translated Object Name"] = tValue
451
+ elif prop == "Description":
452
+ result.loc[condition, "Translated Description"] = tValue
453
+ else:
454
+ result.loc[condition, "Translated Display Folder"] = tValue
455
+ result.fillna("", inplace=True)
456
+
457
+ return result