semantic-link-labs 0.11.2__py3-none-any.whl → 0.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (93) hide show
  1. {semantic_link_labs-0.11.2.dist-info → semantic_link_labs-0.12.0.dist-info}/METADATA +7 -6
  2. {semantic_link_labs-0.11.2.dist-info → semantic_link_labs-0.12.0.dist-info}/RECORD +90 -84
  3. sempy_labs/__init__.py +18 -18
  4. sempy_labs/_a_lib_info.py +1 -1
  5. sempy_labs/_authentication.py +81 -32
  6. sempy_labs/_capacities.py +2 -2
  7. sempy_labs/_capacity_migration.py +4 -4
  8. sempy_labs/_clear_cache.py +1 -1
  9. sempy_labs/_connections.py +107 -70
  10. sempy_labs/_dashboards.py +6 -2
  11. sempy_labs/_data_pipelines.py +1 -1
  12. sempy_labs/_dataflows.py +1 -1
  13. sempy_labs/_dax.py +3 -3
  14. sempy_labs/_delta_analyzer.py +4 -4
  15. sempy_labs/_delta_analyzer_history.py +1 -1
  16. sempy_labs/_deployment_pipelines.py +1 -1
  17. sempy_labs/_environments.py +1 -1
  18. sempy_labs/_eventhouses.py +9 -3
  19. sempy_labs/_eventstreams.py +1 -1
  20. sempy_labs/_external_data_shares.py +56 -2
  21. sempy_labs/_gateways.py +14 -7
  22. sempy_labs/_generate_semantic_model.py +7 -12
  23. sempy_labs/_git.py +1 -1
  24. sempy_labs/_graphQL.py +1 -1
  25. sempy_labs/_helper_functions.py +293 -22
  26. sempy_labs/_job_scheduler.py +12 -1
  27. sempy_labs/_kql_databases.py +1 -1
  28. sempy_labs/_kql_querysets.py +10 -2
  29. sempy_labs/_kusto.py +2 -2
  30. sempy_labs/_labels.py +126 -0
  31. sempy_labs/_list_functions.py +2 -2
  32. sempy_labs/_managed_private_endpoints.py +1 -1
  33. sempy_labs/_mirrored_databases.py +40 -16
  34. sempy_labs/_mirrored_warehouses.py +1 -1
  35. sempy_labs/_ml_experiments.py +1 -1
  36. sempy_labs/_model_bpa.py +6 -6
  37. sempy_labs/_model_bpa_bulk.py +3 -3
  38. sempy_labs/_model_dependencies.py +1 -1
  39. sempy_labs/_mounted_data_factories.py +3 -3
  40. sempy_labs/_notebooks.py +153 -3
  41. sempy_labs/_query_scale_out.py +2 -2
  42. sempy_labs/_refresh_semantic_model.py +1 -1
  43. sempy_labs/_semantic_models.py +15 -3
  44. sempy_labs/_spark.py +1 -1
  45. sempy_labs/_sql.py +3 -3
  46. sempy_labs/_sql_endpoints.py +5 -3
  47. sempy_labs/_sqldatabase.py +5 -1
  48. sempy_labs/_tags.py +3 -1
  49. sempy_labs/_translations.py +7 -360
  50. sempy_labs/_user_delegation_key.py +2 -2
  51. sempy_labs/_utils.py +27 -0
  52. sempy_labs/_vertipaq.py +3 -3
  53. sempy_labs/_vpax.py +1 -1
  54. sempy_labs/_warehouses.py +5 -0
  55. sempy_labs/_workloads.py +1 -1
  56. sempy_labs/_workspace_identity.py +1 -1
  57. sempy_labs/_workspaces.py +145 -11
  58. sempy_labs/admin/__init__.py +6 -0
  59. sempy_labs/admin/_capacities.py +34 -11
  60. sempy_labs/admin/_items.py +2 -2
  61. sempy_labs/admin/_tenant_keys.py +89 -0
  62. sempy_labs/directlake/_dl_helper.py +5 -2
  63. sempy_labs/graph/_users.py +3 -5
  64. sempy_labs/lakehouse/__init__.py +4 -0
  65. sempy_labs/lakehouse/_helper.py +18 -9
  66. sempy_labs/lakehouse/_lakehouse.py +18 -9
  67. sempy_labs/lakehouse/_materialized_lake_views.py +76 -0
  68. sempy_labs/lakehouse/_shortcuts.py +8 -2
  69. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +38 -47
  70. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +12 -22
  71. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +7 -11
  72. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +14 -23
  73. sempy_labs/ml_model/__init__.py +23 -0
  74. sempy_labs/ml_model/_functions.py +427 -0
  75. sempy_labs/report/_bpareporttemplate/.pbi/localSettings.json +9 -0
  76. sempy_labs/report/_bpareporttemplate/.platform +11 -0
  77. sempy_labs/report/_download_report.py +4 -1
  78. sempy_labs/report/_export_report.py +12 -5
  79. sempy_labs/report/_generate_report.py +11 -3
  80. sempy_labs/report/_paginated.py +21 -15
  81. sempy_labs/report/_report_functions.py +19 -11
  82. sempy_labs/report/_report_rebind.py +21 -10
  83. sempy_labs/report/_reportwrapper.py +1 -1
  84. sempy_labs/theme/_org_themes.py +5 -6
  85. sempy_labs/tom/_model.py +13 -19
  86. sempy_labs/variable_library/__init__.py +19 -0
  87. sempy_labs/variable_library/_functions.py +403 -0
  88. sempy_labs/_dax_query_view.py +0 -57
  89. sempy_labs/_ml_models.py +0 -111
  90. sempy_labs/_variable_libraries.py +0 -92
  91. {semantic_link_labs-0.11.2.dist-info → semantic_link_labs-0.12.0.dist-info}/WHEEL +0 -0
  92. {semantic_link_labs-0.11.2.dist-info → semantic_link_labs-0.12.0.dist-info}/licenses/LICENSE +0 -0
  93. {semantic_link_labs-0.11.2.dist-info → semantic_link_labs-0.12.0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  import sempy.fabric as fabric
2
2
  import time
3
- from ._helper_functions import (
3
+ from sempy_labs._helper_functions import (
4
4
  resolve_workspace_name_and_id,
5
5
  _get_partition_map,
6
6
  _process_and_display_chart,
@@ -1,7 +1,7 @@
1
1
  from uuid import UUID
2
2
  from typing import Optional, List
3
3
  import pandas as pd
4
- from ._helper_functions import (
4
+ from sempy_labs._helper_functions import (
5
5
  _create_dataframe,
6
6
  _base_api,
7
7
  _update_dataframe_datatypes,
@@ -23,6 +23,8 @@ def get_semantic_model_refresh_schedule(
23
23
  """
24
24
  Gets the refresh schedule for the specified dataset from the specified workspace.
25
25
 
26
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
27
+
26
28
  Parameters
27
29
  ----------
28
30
  dataset : str | uuid.UUID
@@ -38,7 +40,7 @@ def get_semantic_model_refresh_schedule(
38
40
  Shows the refresh schedule for the specified dataset from the specified workspace.
39
41
  """
40
42
 
41
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
43
+ workspace_id = resolve_workspace_id(workspace)
42
44
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace)
43
45
 
44
46
  columns = {
@@ -60,7 +62,8 @@ def get_semantic_model_refresh_schedule(
60
62
  df = _create_dataframe(columns)
61
63
 
62
64
  result = _base_api(
63
- request=f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}/refreshSchedule"
65
+ request=f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}/refreshSchedule",
66
+ client="fabric_sp",
64
67
  ).json()
65
68
 
66
69
  df = (
@@ -83,6 +86,8 @@ def enable_semantic_model_scheduled_refresh(
83
86
  """
84
87
  Enables the scheduled refresh for the specified dataset from the specified workspace.
85
88
 
89
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
90
+
86
91
  Parameters
87
92
  ----------
88
93
  dataset : str | uuid.UUID
@@ -117,6 +122,7 @@ def enable_semantic_model_scheduled_refresh(
117
122
  request=f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}/refreshSchedule",
118
123
  method="patch",
119
124
  payload=payload,
125
+ client="fabric_sp",
120
126
  )
121
127
 
122
128
  print(
@@ -131,6 +137,8 @@ def delete_semantic_model(dataset: str | UUID, workspace: Optional[str | UUID] =
131
137
 
132
138
  This is a wrapper function for the following API: `Items - Delete Semantic Model <https://learn.microsoft.com/rest/api/fabric/semanticmodel/items/delete-semantic-model>`_.
133
139
 
140
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
141
+
134
142
  Parameters
135
143
  ----------
136
144
  dataset: str | uuid.UUID
@@ -157,6 +165,8 @@ def update_semantic_model_refresh_schedule(
157
165
 
158
166
  This is a wrapper function for the following API: `Datasets - Update Refresh Schedule In Group <https://learn.microsoft.com/rest/api/power-bi/datasets/update-refresh-schedule-in-group>`_.
159
167
 
168
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
169
+
160
170
  Parameters
161
171
  ----------
162
172
  dataset : str | uuid.UUID
@@ -247,6 +257,8 @@ def list_semantic_model_datasources(
247
257
 
248
258
  This is a wrapper function for the following API: `Datasets - Get Datasources In Group <https://learn.microsoft.com/rest/api/power-bi/datasets/get-datasources-in-group>`_.
249
259
 
260
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
261
+
250
262
  Parameters
251
263
  ----------
252
264
  dataset : str | uuid.UUID
sempy_labs/_spark.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import pandas as pd
2
2
  import sempy_labs._icons as icons
3
3
  from typing import Optional
4
- from ._helper_functions import (
4
+ from sempy_labs._helper_functions import (
5
5
  resolve_workspace_name_and_id,
6
6
  _update_dataframe_datatypes,
7
7
  _base_api,
sempy_labs/_sql.py CHANGED
@@ -3,7 +3,7 @@ from typing import Optional, Union, List
3
3
  from sempy._utils._log import log
4
4
  import struct
5
5
  from itertools import chain, repeat
6
- from ._helper_functions import (
6
+ from sempy_labs._helper_functions import (
7
7
  resolve_lakehouse_name_and_id,
8
8
  resolve_item_name_and_id,
9
9
  resolve_workspace_name_and_id,
@@ -39,7 +39,7 @@ class ConnectBase:
39
39
  timeout: Optional[int] = None,
40
40
  endpoint_type: str = "warehouse",
41
41
  ):
42
- from sempy.fabric._token_provider import SynapseTokenProvider
42
+ from sempy.fabric._credentials import get_access_token
43
43
  import pyodbc
44
44
 
45
45
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
@@ -82,7 +82,7 @@ class ConnectBase:
82
82
  )
83
83
 
84
84
  # Set up the connection string
85
- access_token = SynapseTokenProvider()("sql")
85
+ access_token = get_access_token("sql").token
86
86
  tokenstruct = _bytes2mswin_bstr(access_token.encode())
87
87
  if endpoint_type == "sqldatabase":
88
88
  conn_str = f"DRIVER={{ODBC Driver 18 for SQL Server}};SERVER={tds_endpoint};DATABASE={resource_name}-{resource_id};Encrypt=Yes;"
@@ -1,7 +1,7 @@
1
1
  from typing import Optional, Literal
2
2
  from uuid import UUID
3
3
  import pandas as pd
4
- from ._helper_functions import (
4
+ from sempy_labs._helper_functions import (
5
5
  _base_api,
6
6
  _create_dataframe,
7
7
  resolve_workspace_name_and_id,
@@ -73,6 +73,8 @@ def refresh_sql_endpoint_metadata(
73
73
 
74
74
  This is a wrapper function for the following API: `Items - Refresh Sql Endpoint Metadata <https://learn.microsoft.com/rest/api/fabric/sqlendpoint/items/refresh-sql-endpoint-metadata>`_.
75
75
 
76
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
77
+
76
78
  Parameters
77
79
  ----------
78
80
  item : str | uuid.UUID
@@ -140,7 +142,7 @@ def refresh_sql_endpoint_metadata(
140
142
  }
141
143
 
142
144
  result = _base_api(
143
- request=f"v1/workspaces/{workspace_id}/sqlEndpoints/{sql_endpoint_id}/refreshMetadata?preview=true",
145
+ request=f"v1/workspaces/{workspace_id}/sqlEndpoints/{sql_endpoint_id}/refreshMetadata",
144
146
  method="post",
145
147
  client="fabric_sp",
146
148
  status_codes=[200, 202],
@@ -159,7 +161,7 @@ def refresh_sql_endpoint_metadata(
159
161
  }
160
162
 
161
163
  if result:
162
- df = pd.json_normalize(result)
164
+ df = pd.json_normalize(result.get("value"))
163
165
 
164
166
  # Extract error code and message, set to None if no error
165
167
  df["Error Code"] = df.get("error.errorCode", None)
@@ -1,4 +1,4 @@
1
- from ._helper_functions import (
1
+ from sempy_labs._helper_functions import (
2
2
  resolve_workspace_id,
3
3
  _base_api,
4
4
  _create_dataframe,
@@ -21,6 +21,8 @@ def create_sql_database(
21
21
 
22
22
  This is a wrapper function for the following API: `Items - Create SQL Database <https://learn.microsoft.com/rest/api/fabric/sqldatabase/items/create-sql-database>`_.
23
23
 
24
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
25
+
24
26
  Parameters
25
27
  ----------
26
28
  name: str
@@ -47,6 +49,8 @@ def delete_sql_database(
47
49
 
48
50
  This is a wrapper function for the following API: `Items - Delete SQL Database <https://learn.microsoft.com/rest/api/fabric/sqldatabase/items/delete-sql-database>`_.
49
51
 
52
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
53
+
50
54
  Parameters
51
55
  ----------
52
56
  sql_database: str | uuid.UUID
sempy_labs/_tags.py CHANGED
@@ -1,4 +1,4 @@
1
- from ._helper_functions import (
1
+ from sempy_labs._helper_functions import (
2
2
  _base_api,
3
3
  _create_dataframe,
4
4
  _update_dataframe_datatypes,
@@ -62,6 +62,8 @@ def resolve_tags(tags: str | List[str]) -> List[str]:
62
62
  """
63
63
  Resolves the tags to a list of strings.
64
64
 
65
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
66
+
65
67
  Parameters
66
68
  ----------
67
69
  tags : str | List[str]
@@ -1,12 +1,7 @@
1
- import sempy
1
+ import sempy.fabric as fabric
2
2
  import pandas as pd
3
3
  from typing import List, Optional, Union
4
4
  from sempy._utils._log import log
5
- import sempy_labs._icons as icons
6
- from ._helper_functions import (
7
- get_language_codes,
8
- _create_spark_session,
9
- )
10
5
  from uuid import UUID
11
6
 
12
7
 
@@ -39,358 +34,10 @@ def translate_semantic_model(
39
34
  Shows a pandas dataframe which displays all of the translations in the semantic model.
40
35
  """
41
36
 
42
- from synapse.ml.services import Translate
43
- from pyspark.sql.functions import col, flatten
44
- from sempy_labs.tom import connect_semantic_model
45
-
46
- icons.sll_tags.append("TranslateSemanticModel")
47
-
48
- def _clean_text(text, exclude_chars):
49
- if exclude_chars:
50
- for char in exclude_chars:
51
- text = text.replace(char, " ")
52
- return text
53
-
54
- if isinstance(languages, str):
55
- languages = [languages]
56
-
57
- languages = get_language_codes(languages)
58
-
59
- df_prep = pd.DataFrame(
60
- columns=["Object Type", "Name", "Description", "Display Folder"]
61
- )
62
-
63
- final_df = pd.DataFrame(columns=["Value", "Translation"])
64
-
65
- with connect_semantic_model(
66
- dataset=dataset, readonly=False, workspace=workspace
67
- ) as tom:
68
-
69
- for o in tom.model.Tables:
70
- oName = _clean_text(o.Name, exclude_characters)
71
- oDescription = _clean_text(o.Description, exclude_characters)
72
- new_data = {
73
- "Name": o.Name,
74
- "TName": oName,
75
- "Object Type": "Table",
76
- "Description": o.Description,
77
- "TDescription": oDescription,
78
- "Display Folder": None,
79
- "TDisplay Folder": None,
80
- }
81
- df_prep = pd.concat(
82
- [df_prep, pd.DataFrame(new_data, index=[0])], ignore_index=True
83
- )
84
- for o in tom.all_columns():
85
- oName = _clean_text(o.Name, exclude_characters)
86
- oDescription = _clean_text(o.Description, exclude_characters)
87
- oDisplayFolder = _clean_text(o.DisplayFolder, exclude_characters)
88
- new_data = {
89
- "Name": o.Name,
90
- "TName": oName,
91
- "Object Type": "Column",
92
- "Description": o.Description,
93
- "TDescription": oDescription,
94
- "Display Folder": o.DisplayFolder,
95
- "TDisplay Folder": oDisplayFolder,
96
- }
97
- df_prep = pd.concat(
98
- [df_prep, pd.DataFrame(new_data, index=[0])], ignore_index=True
99
- )
100
- for o in tom.all_measures():
101
- oName = _clean_text(o.Name, exclude_characters)
102
- oDescription = _clean_text(o.Description, exclude_characters)
103
- oDisplayFolder = _clean_text(o.DisplayFolder, exclude_characters)
104
- new_data = {
105
- "Name": o.Name,
106
- "TName": oName,
107
- "Object Type": "Measure",
108
- "Description": o.Description,
109
- "TDescription": oDescription,
110
- "Display Folder": o.DisplayFolder,
111
- "TDisplay Folder": oDisplayFolder,
112
- }
113
- df_prep = pd.concat(
114
- [df_prep, pd.DataFrame(new_data, index=[0])], ignore_index=True
115
- )
116
- for o in tom.all_hierarchies():
117
- oName = _clean_text(o.Name, exclude_characters)
118
- oDescription = _clean_text(o.Description, exclude_characters)
119
- oDisplayFolder = _clean_text(o.DisplayFolder, exclude_characters)
120
- new_data = {
121
- "Name": o.Name,
122
- "TName": oName,
123
- "Object Type": "Hierarchy",
124
- "Description": o.Description,
125
- "TDescription": oDescription,
126
- "Display Folder": o.DisplayFolder,
127
- "TDisplay Folder": oDisplayFolder,
128
- }
129
- df_prep = pd.concat(
130
- [df_prep, pd.DataFrame(new_data, index=[0])], ignore_index=True
131
- )
132
- for o in tom.all_levels():
133
- oName = _clean_text(o.Name, exclude_characters)
134
- oDescription = _clean_text(o.Description, exclude_characters)
135
- new_data = {
136
- "Name": o.Name,
137
- "TName": oName,
138
- "Object Type": "Level",
139
- "Description": o.Description,
140
- "TDescription": oDescription,
141
- "Display Folder": None,
142
- "TDisplay Folder": None,
143
- }
144
- df_prep = pd.concat(
145
- [df_prep, pd.DataFrame(new_data, index=[0])], ignore_index=True
146
- )
147
-
148
- spark = _create_spark_session()
149
- df = spark.createDataFrame(df_prep)
150
-
151
- columns = ["Name", "Description", "Display Folder"]
152
-
153
- for clm in columns:
154
- columnToTranslate = f"T{clm}"
155
- translate = (
156
- Translate()
157
- .setTextCol(columnToTranslate)
158
- .setToLanguage(languages)
159
- .setOutputCol("translation")
160
- .setConcurrency(5)
161
- )
162
-
163
- transDF = (
164
- translate.transform(df)
165
- .withColumn("translation", flatten(col("translation.translations")))
166
- .withColumn("translation", col("translation.text"))
167
- .select("Object Type", clm, columnToTranslate, "translation")
168
- )
169
-
170
- df_panda = transDF.toPandas()
171
- df_panda = df_panda[~df_panda[clm].isin([None, ""])][[clm, "translation"]]
172
-
173
- df_panda = df_panda.rename(columns={clm: "value"})
174
- final_df = pd.concat([final_df, df_panda], ignore_index=True)
175
-
176
- def set_translation_if_exists(object, language, property, index):
177
-
178
- if property == "Name":
179
- trans = object.Name
180
- elif property == "Description":
181
- trans = object.Description
182
- elif property == "Display Folder":
183
- trans = object.DisplayFolder
184
-
185
- df_filt = final_df[final_df["value"] == trans]
186
- if not df_filt.empty:
187
- translation_value = df_filt["translation"].str[index].iloc[0]
188
- tom.set_translation(
189
- object=object,
190
- language=language,
191
- property=property,
192
- value=translation_value,
193
- )
194
-
195
- for language in languages:
196
- index = languages.index(language)
197
- tom.add_translation(language=language)
198
- print(
199
- f"{icons.in_progress} Translating {clm.lower()}s into the '{language}' language..."
200
- )
201
-
202
- for t in tom.model.Tables:
203
- set_translation_if_exists(
204
- object=t, language=language, property="Name", index=index
205
- )
206
- set_translation_if_exists(
207
- object=t, language=language, property="Description", index=index
208
- )
209
- for c in tom.all_columns():
210
- set_translation_if_exists(
211
- object=c, language=language, property="Name", index=index
212
- )
213
- set_translation_if_exists(
214
- object=c, language=language, property="Description", index=index
215
- )
216
- set_translation_if_exists(
217
- object=c, language=language, property="Display Folder", index=index
218
- )
219
- for c in tom.all_measures():
220
- set_translation_if_exists(
221
- object=c, language=language, property="Name", index=index
222
- )
223
- set_translation_if_exists(
224
- object=c, language=language, property="Description", index=index
225
- )
226
- set_translation_if_exists(
227
- object=c, language=language, property="Display Folder", index=index
228
- )
229
- for c in tom.all_hierarchies():
230
- set_translation_if_exists(
231
- object=c, language=language, property="Name", index=index
232
- )
233
- set_translation_if_exists(
234
- object=c, language=language, property="Description", index=index
235
- )
236
- set_translation_if_exists(
237
- object=c, language=language, property="Display Folder", index=index
238
- )
239
- for c in tom.all_levels():
240
- set_translation_if_exists(
241
- object=c, language=language, property="Name", index=index
242
- )
243
- set_translation_if_exists(
244
- object=c, language=language, property="Description", index=index
245
- )
246
-
247
- result = pd.DataFrame(
248
- columns=[
249
- "Language",
250
- "Object Type",
251
- "Table Name",
252
- "Object Name",
253
- "Translated Object Name",
254
- "Description",
255
- "Translated Description",
256
- "Display Folder",
257
- "Translated Display Folder",
258
- ]
37
+ return fabric.translate_semantic_model(
38
+ dataset=dataset,
39
+ languages=languages,
40
+ exclude_characters=exclude_characters,
41
+ workspace=workspace,
42
+ model_readonly=False,
259
43
  )
260
- with connect_semantic_model(
261
- dataset=dataset, readonly=True, workspace=workspace
262
- ) as tom:
263
-
264
- sempy.fabric._client._utils._init_analysis_services()
265
- import Microsoft.AnalysisServices.Tabular as TOM
266
-
267
- for c in tom.model.Cultures:
268
- for tr in c.ObjectTranslations:
269
- oType = str(tr.Object.ObjectType)
270
- oName = tr.Object.Name
271
- tValue = tr.Value
272
- prop = str(tr.Property)
273
-
274
- if tr.Object.ObjectType == TOM.ObjectType.Table:
275
- desc = tom.model.Tables[oName].Description
276
- new_data = {
277
- "Language": c.Name,
278
- "Table Name": oName,
279
- "Object Name": oName,
280
- "Object Type": oType,
281
- "Description": desc,
282
- }
283
- result = pd.concat(
284
- [result, pd.DataFrame(new_data, index=[0])], ignore_index=True
285
- )
286
- condition = (
287
- (result["Language"] == c.Name)
288
- & (result["Table Name"] == oName)
289
- & (result["Object Name"] == oName)
290
- & (result["Object Type"] == oType)
291
- )
292
- elif tr.Object.ObjectType == TOM.ObjectType.Level:
293
- hierarchyName = tr.Object.Parent.Name
294
- tName = tr.Object.Parent.Parent.Name
295
- levelName = "'" + hierarchyName + "'[" + oName + "]"
296
- desc = (
297
- tom.model.Tables[tName]
298
- .Hierarchies[hierarchyName]
299
- .Levels[oName]
300
- .Description
301
- )
302
- new_data = {
303
- "Language": c.Name,
304
- "Table Name": tName,
305
- "Object Name": levelName,
306
- "Object Type": oType,
307
- "Description": desc,
308
- }
309
- result = pd.concat(
310
- [result, pd.DataFrame(new_data, index=[0])], ignore_index=True
311
- )
312
- condition = (
313
- (result["Language"] == c.Name)
314
- & (result["Table Name"] == tName)
315
- & (result["Object Name"] == levelName)
316
- & (result["Object Type"] == oType)
317
- )
318
- elif tr.Object.ObjectType == TOM.ObjectType.Column:
319
- tName = tr.Object.Table.Name
320
- desc = tom.model.Tables[tName].Columns[oName].Description
321
- display_folder = (
322
- tom.model.Tables[tName].Columns[oName].DisplayFolder
323
- )
324
- new_data = {
325
- "Language": c.Name,
326
- "Table Name": tName,
327
- "Object Name": oName,
328
- "Object Type": oType,
329
- "Description": desc,
330
- "Display Folder": display_folder,
331
- }
332
- result = pd.concat(
333
- [result, pd.DataFrame(new_data, index=[0])], ignore_index=True
334
- )
335
- condition = (
336
- (result["Language"] == c.Name)
337
- & (result["Table Name"] == tName)
338
- & (result["Object Name"] == oName)
339
- & (result["Object Type"] == oType)
340
- )
341
- elif tr.Object.ObjectType == TOM.ObjectType.Measure:
342
- tName = tr.Object.Table.Name
343
- desc = tom.model.Tables[tName].Measures[oName].Description
344
- display_folder = (
345
- tom.model.Tables[tName].Measures[oName].DisplayFolder
346
- )
347
- new_data = {
348
- "Language": c.Name,
349
- "Table Name": tName,
350
- "Object Name": oName,
351
- "Object Type": oType,
352
- "Description": desc,
353
- "Display Folder": display_folder,
354
- }
355
- result = pd.concat(
356
- [result, pd.DataFrame(new_data, index=[0])], ignore_index=True
357
- )
358
- condition = (
359
- (result["Language"] == c.Name)
360
- & (result["Table Name"] == tName)
361
- & (result["Object Name"] == oName)
362
- & (result["Object Type"] == oType)
363
- )
364
- elif tr.Object.ObjectType == TOM.ObjectType.Hierarchy:
365
- tName = tr.Object.Table.Name
366
- desc = tom.model.Tables[tName].Hierarchies[oName].Description
367
- display_folder = (
368
- tom.model.Tables[tName].Hierarchies[oName].DisplayFolder
369
- )
370
- new_data = {
371
- "Language": c.Name,
372
- "Table Name": tName,
373
- "Object Name": oName,
374
- "Object Type": oType,
375
- "Description": desc,
376
- "Display Folder": display_folder,
377
- }
378
- result = pd.concat(
379
- [result, pd.DataFrame(new_data, index=[0])], ignore_index=True
380
- )
381
- condition = (
382
- (result["Language"] == c.Name)
383
- & (result["Table Name"] == tName)
384
- & (result["Object Name"] == oName)
385
- & (result["Object Type"] == oType)
386
- )
387
-
388
- if prop == "Caption":
389
- result.loc[condition, "Translated Object Name"] = tValue
390
- elif prop == "Description":
391
- result.loc[condition, "Translated Description"] = tValue
392
- else:
393
- result.loc[condition, "Translated Display Folder"] = tValue
394
- result.fillna("", inplace=True)
395
-
396
- return result
@@ -1,5 +1,5 @@
1
- from .lakehouse._blobs import _request_blob_api
2
- from ._helper_functions import (
1
+ from sempy_labs.lakehouse._blobs import _request_blob_api
2
+ from sempy_labs._helper_functions import (
3
3
  _xml_to_dict,
4
4
  )
5
5
  from datetime import datetime, timedelta, timezone
sempy_labs/_utils.py CHANGED
@@ -40,3 +40,30 @@ item_types = {
40
40
  ],
41
41
  "Warehouse": ["Warehouse", "warehouses"],
42
42
  }
43
+
44
+
45
+ items = {
46
+ "CopyJob": "copyJobs",
47
+ "Dataflow": "dataflows",
48
+ "Eventhouse": "eventhouses",
49
+ "GraphQLApi": "GraphQLApis",
50
+ "Report": "reports",
51
+ "SemanticModel": "semanticModels",
52
+ # "Environment": "environments",
53
+ "KQLDatabase": "kqlDatabases",
54
+ "KQLDashboard": "kqlDashboards",
55
+ "KQLQueryset": "kqlQuerysets",
56
+ "DataPipeline": "dataPipelines",
57
+ "Notebook": "notebooks",
58
+ "SparkJobDefinition": "sparkJobDefinitions",
59
+ "Eventstream": "eventstreams",
60
+ "MirroredWarehouse": "mirroredWarehouses",
61
+ "MirroredDatabase": "mirroredDatabases",
62
+ "MountedDataFactory": "mountedDataFactories",
63
+ "VariableLibrary": "variableLibraries",
64
+ "ApacheAirFlowJob": "ApacheAirflowJobs",
65
+ "WarehouseSnapshot": "warehousesnapshots",
66
+ "DigitalTwinBuilder": "digitaltwinbuilders",
67
+ "DigitalTwinBuilderFlow": "DigitalTwinBuilderFlows",
68
+ "MirroredAzureDatabricksCatalog": "mirroredAzureDatabricksCatalogs",
69
+ }
sempy_labs/_vertipaq.py CHANGED
@@ -6,7 +6,7 @@ import os
6
6
  import shutil
7
7
  import datetime
8
8
  import warnings
9
- from ._helper_functions import (
9
+ from sempy_labs._helper_functions import (
10
10
  format_dax_object_name,
11
11
  save_as_delta_table,
12
12
  resolve_workspace_capacity,
@@ -17,8 +17,8 @@ from ._helper_functions import (
17
17
  resolve_workspace_id,
18
18
  resolve_workspace_name,
19
19
  )
20
- from ._list_functions import list_relationships, list_tables
21
- from .lakehouse import lakehouse_attached, get_lakehouse_tables
20
+ from sempy_labs._list_functions import list_relationships, list_tables
21
+ from sempy_labs.lakehouse import lakehouse_attached, get_lakehouse_tables
22
22
  from typing import Optional
23
23
  from sempy._utils._log import log
24
24
  import sempy_labs._icons as icons
sempy_labs/_vpax.py CHANGED
@@ -6,7 +6,7 @@ import sys
6
6
  from pathlib import Path
7
7
  from typing import Optional
8
8
  from uuid import UUID
9
- from ._helper_functions import (
9
+ from sempy_labs._helper_functions import (
10
10
  resolve_workspace_name_and_id,
11
11
  resolve_dataset_name_and_id,
12
12
  resolve_lakehouse_name_and_id,
sempy_labs/_warehouses.py CHANGED
@@ -25,6 +25,8 @@ def create_warehouse(
25
25
 
26
26
  This is a wrapper function for the following API: `Items - Create Warehouse <https://learn.microsoft.com/rest/api/fabric/warehouse/items/create-warehouse>`_.
27
27
 
28
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
29
+
28
30
  Parameters
29
31
  ----------
30
32
  warehouse: str
@@ -62,6 +64,7 @@ def create_warehouse(
62
64
  method="post",
63
65
  lro_return_json=True,
64
66
  status_codes=[201, 202],
67
+ client="fabric_sp",
65
68
  )
66
69
 
67
70
  print(
@@ -141,6 +144,8 @@ def delete_warehouse(name: str | UUID, workspace: Optional[str | UUID] = None):
141
144
 
142
145
  This is a wrapper function for the following API: `Items - Delete Warehouse <https://learn.microsoft.com/rest/api/fabric/warehouse/items/delete-warehouse>`_.
143
146
 
147
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
148
+
144
149
  Parameters
145
150
  ----------
146
151
  name: str | uuid.UUID