semantic-link-labs 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of semantic-link-labs might be problematic. Click here for more details.
- semantic_link_labs-0.7.0.dist-info/METADATA +148 -0
- semantic_link_labs-0.7.0.dist-info/RECORD +111 -0
- {semantic_link_labs-0.6.0.dist-info → semantic_link_labs-0.7.0.dist-info}/WHEEL +1 -1
- sempy_labs/__init__.py +26 -2
- sempy_labs/_ai.py +3 -65
- sempy_labs/_bpa_translation/_translations_am-ET.po +828 -0
- sempy_labs/_bpa_translation/_translations_ar-AE.po +860 -0
- sempy_labs/_bpa_translation/_translations_cs-CZ.po +894 -0
- sempy_labs/_bpa_translation/_translations_da-DK.po +894 -0
- sempy_labs/_bpa_translation/_translations_de-DE.po +933 -0
- sempy_labs/_bpa_translation/_translations_el-GR.po +936 -0
- sempy_labs/_bpa_translation/_translations_es-ES.po +915 -0
- sempy_labs/_bpa_translation/_translations_fa-IR.po +883 -0
- sempy_labs/_bpa_translation/_translations_fr-FR.po +938 -0
- sempy_labs/_bpa_translation/_translations_ga-IE.po +912 -0
- sempy_labs/_bpa_translation/_translations_he-IL.po +855 -0
- sempy_labs/_bpa_translation/_translations_hi-IN.po +892 -0
- sempy_labs/_bpa_translation/_translations_hu-HU.po +910 -0
- sempy_labs/_bpa_translation/_translations_is-IS.po +887 -0
- sempy_labs/_bpa_translation/_translations_it-IT.po +931 -0
- sempy_labs/_bpa_translation/_translations_ja-JP.po +805 -0
- sempy_labs/_bpa_translation/_translations_nl-NL.po +924 -0
- sempy_labs/_bpa_translation/_translations_pl-PL.po +913 -0
- sempy_labs/_bpa_translation/_translations_pt-BR.po +909 -0
- sempy_labs/_bpa_translation/_translations_pt-PT.po +904 -0
- sempy_labs/_bpa_translation/_translations_ru-RU.po +909 -0
- sempy_labs/_bpa_translation/_translations_ta-IN.po +922 -0
- sempy_labs/_bpa_translation/_translations_te-IN.po +896 -0
- sempy_labs/_bpa_translation/_translations_th-TH.po +873 -0
- sempy_labs/_bpa_translation/_translations_zh-CN.po +767 -0
- sempy_labs/_bpa_translation/_translations_zu-ZA.po +916 -0
- sempy_labs/_clear_cache.py +9 -4
- sempy_labs/_generate_semantic_model.py +30 -56
- sempy_labs/_helper_functions.py +358 -14
- sempy_labs/_icons.py +10 -1
- sempy_labs/_list_functions.py +478 -237
- sempy_labs/_model_bpa.py +194 -18
- sempy_labs/_model_bpa_bulk.py +363 -0
- sempy_labs/_model_bpa_rules.py +4 -4
- sempy_labs/_model_dependencies.py +12 -10
- sempy_labs/_one_lake_integration.py +7 -7
- sempy_labs/_query_scale_out.py +45 -66
- sempy_labs/_refresh_semantic_model.py +7 -0
- sempy_labs/_translations.py +154 -1
- sempy_labs/_vertipaq.py +103 -90
- sempy_labs/directlake/__init__.py +5 -1
- sempy_labs/directlake/_directlake_schema_compare.py +27 -31
- sempy_labs/directlake/_directlake_schema_sync.py +55 -66
- sempy_labs/directlake/_dl_helper.py +233 -0
- sempy_labs/directlake/_get_directlake_lakehouse.py +6 -7
- sempy_labs/directlake/_get_shared_expression.py +1 -1
- sempy_labs/directlake/_guardrails.py +17 -13
- sempy_labs/directlake/_update_directlake_partition_entity.py +54 -30
- sempy_labs/directlake/_warm_cache.py +1 -1
- sempy_labs/lakehouse/_get_lakehouse_tables.py +61 -69
- sempy_labs/lakehouse/_lakehouse.py +3 -2
- sempy_labs/lakehouse/_shortcuts.py +1 -1
- sempy_labs/migration/_create_pqt_file.py +174 -182
- sempy_labs/migration/_migrate_calctables_to_lakehouse.py +236 -268
- sempy_labs/migration/_migrate_calctables_to_semantic_model.py +75 -73
- sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +442 -426
- sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +91 -97
- sempy_labs/migration/_refresh_calc_tables.py +92 -101
- sempy_labs/report/_BPAReportTemplate.json +232 -0
- sempy_labs/report/__init__.py +6 -2
- sempy_labs/report/_bpareporttemplate/.pbi/localSettings.json +9 -0
- sempy_labs/report/_bpareporttemplate/.platform +11 -0
- sempy_labs/report/_bpareporttemplate/StaticResources/SharedResources/BaseThemes/CY24SU06.json +710 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/page.json +11 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/1b08bce3bebabb0a27a8/visual.json +191 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/2f22ddb70c301693c165/visual.json +438 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/3b1182230aa6c600b43a/visual.json +127 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/58577ba6380c69891500/visual.json +576 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/a2a8fa5028b3b776c96c/visual.json +207 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/adfd47ef30652707b987/visual.json +506 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/b6a80ee459e716e170b1/visual.json +127 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/ce3130a721c020cc3d81/visual.json +513 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/92735ae19b31712208ad/page.json +8 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/92735ae19b31712208ad/visuals/66e60dfb526437cd78d1/visual.json +112 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/page.json +11 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/07deb8bce824e1be37d7/visual.json +513 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0b1c68838818b32ad03b/visual.json +352 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0c171de9d2683d10b930/visual.json +37 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0efa01be0510e40a645e/visual.json +542 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/6bf2f0eb830ab53cc668/visual.json +221 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/88d8141cb8500b60030c/visual.json +127 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/a753273590beed656a03/visual.json +576 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/b8fdc82cddd61ac447bc/visual.json +127 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/page.json +9 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/visuals/ce8532a7e25020271077/visual.json +38 -0
- sempy_labs/report/_bpareporttemplate/definition/pages/pages.json +10 -0
- sempy_labs/report/_bpareporttemplate/definition/report.json +176 -0
- sempy_labs/report/_bpareporttemplate/definition/version.json +4 -0
- sempy_labs/report/_bpareporttemplate/definition.pbir +14 -0
- sempy_labs/report/_generate_report.py +255 -139
- sempy_labs/report/_report_functions.py +26 -33
- sempy_labs/report/_report_rebind.py +31 -26
- sempy_labs/tom/_model.py +75 -58
- semantic_link_labs-0.6.0.dist-info/METADATA +0 -22
- semantic_link_labs-0.6.0.dist-info/RECORD +0 -54
- sempy_labs/directlake/_fallback.py +0 -60
- {semantic_link_labs-0.6.0.dist-info → semantic_link_labs-0.7.0.dist-info}/LICENSE +0 -0
- {semantic_link_labs-0.6.0.dist-info → semantic_link_labs-0.7.0.dist-info}/top_level.txt +0 -0
|
@@ -2,13 +2,12 @@ import sempy
|
|
|
2
2
|
import sempy.fabric as fabric
|
|
3
3
|
import pandas as pd
|
|
4
4
|
import re
|
|
5
|
-
import datetime
|
|
6
|
-
import time
|
|
7
5
|
from sempy_labs.lakehouse._get_lakehouse_tables import get_lakehouse_tables
|
|
8
6
|
from sempy_labs._helper_functions import (
|
|
9
7
|
resolve_lakehouse_name,
|
|
10
8
|
resolve_lakehouse_id,
|
|
11
9
|
create_abfss_path,
|
|
10
|
+
retry,
|
|
12
11
|
)
|
|
13
12
|
from sempy_labs.tom import connect_semantic_model
|
|
14
13
|
from pyspark.sql import SparkSession
|
|
@@ -101,168 +100,142 @@ def migrate_calc_tables_to_lakehouse(
|
|
|
101
100
|
)
|
|
102
101
|
return
|
|
103
102
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
for
|
|
115
|
-
if
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
):
|
|
129
|
-
daxQuery = f"ADDCOLUMNS(\n{query},"
|
|
130
|
-
for c in t.Columns:
|
|
131
|
-
if str(c.Type) == "Calculated":
|
|
132
|
-
expr = c.Expression
|
|
133
|
-
expr = expr.replace(
|
|
134
|
-
f"'{t.Name}'", ""
|
|
135
|
-
).replace(f"{t.Name}[Date]", "[Date]")
|
|
136
|
-
expr = expr.replace(
|
|
137
|
-
"[MonthNo]", "MONTH([Date])"
|
|
138
|
-
).replace(
|
|
139
|
-
"[QuarterNo]",
|
|
140
|
-
"INT((MONTH([Date]) + 2) / 3)",
|
|
141
|
-
)
|
|
142
|
-
daxQuery = (
|
|
143
|
-
f'{daxQuery}\n"{c.Name}",{expr},'
|
|
144
|
-
)
|
|
145
|
-
daxQuery = (
|
|
146
|
-
"EVALUATE\n" + daxQuery.rstrip(",") + "\n)"
|
|
103
|
+
with connect_semantic_model(
|
|
104
|
+
dataset=dataset, workspace=workspace, readonly=True
|
|
105
|
+
) as tom:
|
|
106
|
+
for t in tom.model.Tables:
|
|
107
|
+
if tom.is_auto_date_table(table_name=t.Name):
|
|
108
|
+
print(
|
|
109
|
+
f"{icons.yellow_dot} The '{t.Name}' table is an auto-datetime table and is not supported in the Direct Lake migration process. "
|
|
110
|
+
"Please create a proper Date/Calendar table in your lakehoues and use it in your Direct Lake model."
|
|
111
|
+
)
|
|
112
|
+
else:
|
|
113
|
+
for p in t.Partitions:
|
|
114
|
+
if str(p.SourceType) == "Calculated":
|
|
115
|
+
query = p.Source.Expression
|
|
116
|
+
if "NAMEOF" not in query: # exclude field parameters
|
|
117
|
+
daxQuery = ""
|
|
118
|
+
if query.lower().startswith("calendar") and any(
|
|
119
|
+
str(c.Type) == "Calculated" for c in t.Columns
|
|
120
|
+
):
|
|
121
|
+
daxQuery = f"ADDCOLUMNS(\n{query},"
|
|
122
|
+
for c in t.Columns:
|
|
123
|
+
if str(c.Type) == "Calculated":
|
|
124
|
+
expr = c.Expression
|
|
125
|
+
expr = expr.replace(f"'{t.Name}'", "").replace(
|
|
126
|
+
f"{t.Name}[Date]", "[Date]"
|
|
147
127
|
)
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
"
|
|
128
|
+
expr = expr.replace(
|
|
129
|
+
"[MonthNo]", "MONTH([Date])"
|
|
130
|
+
).replace(
|
|
131
|
+
"[QuarterNo]",
|
|
132
|
+
"INT((MONTH([Date]) + 2) / 3)",
|
|
153
133
|
)
|
|
154
|
-
|
|
134
|
+
daxQuery = f'{daxQuery}\n"{c.Name}",{expr},'
|
|
135
|
+
daxQuery = "EVALUATE\n" + daxQuery.rstrip(",") + "\n)"
|
|
136
|
+
else:
|
|
137
|
+
daxQuery = f"EVALUATE\n{query}"
|
|
138
|
+
daxQueryTopN = (
|
|
139
|
+
daxQuery.replace("EVALUATE\n", "EVALUATE\nTOPN(1,")
|
|
140
|
+
+ ")"
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
try:
|
|
144
|
+
df = fabric.evaluate_dax(
|
|
145
|
+
dataset=dataset,
|
|
146
|
+
dax_string=daxQueryTopN,
|
|
147
|
+
workspace=workspace,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
for col in df.columns:
|
|
151
|
+
pattern = r"\[([^\]]+)\]"
|
|
152
|
+
|
|
153
|
+
matches = re.findall(pattern, col)
|
|
154
|
+
new_column_name = matches[0].replace(" ", "")
|
|
155
|
+
|
|
156
|
+
df.rename(
|
|
157
|
+
columns={col: new_column_name},
|
|
158
|
+
inplace=True,
|
|
155
159
|
)
|
|
156
160
|
|
|
157
161
|
try:
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
for col in df.columns:
|
|
165
|
-
pattern = r"\[([^\]]+)\]"
|
|
166
|
-
|
|
167
|
-
matches = re.findall(pattern, col)
|
|
168
|
-
new_column_name = matches[0].replace(
|
|
169
|
-
" ", ""
|
|
170
|
-
)
|
|
171
|
-
|
|
172
|
-
df.rename(
|
|
173
|
-
columns={col: new_column_name},
|
|
174
|
-
inplace=True,
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
try:
|
|
178
|
-
dataType = next(
|
|
179
|
-
str(c.DataType)
|
|
180
|
-
for c in tom.model.Tables[
|
|
181
|
-
t.Name
|
|
182
|
-
].Columns
|
|
183
|
-
if str(c.Type)
|
|
184
|
-
== "CalculatedTableColumn"
|
|
185
|
-
and c.SourceColumn == col
|
|
186
|
-
)
|
|
187
|
-
except Exception:
|
|
188
|
-
dataType = next(
|
|
189
|
-
str(c.DataType)
|
|
190
|
-
for c in tom.model.Tables[
|
|
191
|
-
t.Name
|
|
192
|
-
].Columns
|
|
193
|
-
if str(c.Type) == "Calculated"
|
|
194
|
-
and c.Name == new_column_name
|
|
195
|
-
)
|
|
196
|
-
|
|
197
|
-
if dataType == "Int64":
|
|
198
|
-
df[new_column_name] = df[
|
|
199
|
-
new_column_name
|
|
200
|
-
].astype(int)
|
|
201
|
-
elif dataType in ["Decimal", "Double"]:
|
|
202
|
-
df[new_column_name] = df[
|
|
203
|
-
new_column_name
|
|
204
|
-
].astype(float)
|
|
205
|
-
elif dataType == "Boolean":
|
|
206
|
-
df[new_column_name] = df[
|
|
207
|
-
new_column_name
|
|
208
|
-
].astype(bool)
|
|
209
|
-
elif dataType == "DateTime":
|
|
210
|
-
df[new_column_name] = pd.to_datetime(
|
|
211
|
-
df[new_column_name]
|
|
212
|
-
)
|
|
213
|
-
|
|
214
|
-
delta_table_name = t.Name.replace(
|
|
215
|
-
" ", "_"
|
|
216
|
-
).lower()
|
|
217
|
-
|
|
218
|
-
spark_df = spark.createDataFrame(df)
|
|
219
|
-
filePath = create_abfss_path(
|
|
220
|
-
lakehouse_id=lakehouse_id,
|
|
221
|
-
lakehouse_workspace_id=lakehouse_workspace_id,
|
|
222
|
-
delta_table_name=delta_table_name,
|
|
223
|
-
)
|
|
224
|
-
spark_df.write.mode("overwrite").format(
|
|
225
|
-
"delta"
|
|
226
|
-
).save(filePath)
|
|
227
|
-
|
|
228
|
-
start_time2 = datetime.datetime.now()
|
|
229
|
-
timeout2 = datetime.timedelta(minutes=1)
|
|
230
|
-
success2 = False
|
|
231
|
-
|
|
232
|
-
while not success2:
|
|
233
|
-
try:
|
|
234
|
-
with connect_semantic_model(
|
|
235
|
-
dataset=new_dataset,
|
|
236
|
-
readonly=False,
|
|
237
|
-
workspace=new_dataset_workspace,
|
|
238
|
-
) as tom2:
|
|
239
|
-
success2 = True
|
|
240
|
-
tom2.set_annotation(
|
|
241
|
-
object=tom2.model,
|
|
242
|
-
name=t.Name,
|
|
243
|
-
value=daxQuery,
|
|
244
|
-
)
|
|
245
|
-
except Exception:
|
|
246
|
-
if (
|
|
247
|
-
datetime.datetime.now()
|
|
248
|
-
- start_time2
|
|
249
|
-
> timeout2
|
|
250
|
-
):
|
|
251
|
-
break
|
|
252
|
-
time.sleep(1)
|
|
253
|
-
|
|
254
|
-
print(
|
|
255
|
-
f"{icons.green_dot} Calculated table '{t.Name}' has been created as delta table '{delta_table_name.lower()}' "
|
|
256
|
-
f"in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace."
|
|
162
|
+
dataType = next(
|
|
163
|
+
str(c.DataType)
|
|
164
|
+
for c in tom.model.Tables[t.Name].Columns
|
|
165
|
+
if str(c.Type) == "CalculatedTableColumn"
|
|
166
|
+
and c.SourceColumn == col
|
|
257
167
|
)
|
|
258
168
|
except Exception:
|
|
259
|
-
|
|
260
|
-
|
|
169
|
+
dataType = next(
|
|
170
|
+
str(c.DataType)
|
|
171
|
+
for c in tom.model.Tables[t.Name].Columns
|
|
172
|
+
if str(c.Type) == "Calculated"
|
|
173
|
+
and c.Name == new_column_name
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
if dataType == "Int64":
|
|
177
|
+
df[new_column_name] = df[
|
|
178
|
+
new_column_name
|
|
179
|
+
].astype(int)
|
|
180
|
+
elif dataType in ["Decimal", "Double"]:
|
|
181
|
+
df[new_column_name] = df[
|
|
182
|
+
new_column_name
|
|
183
|
+
].astype(float)
|
|
184
|
+
elif dataType == "Boolean":
|
|
185
|
+
df[new_column_name] = df[
|
|
186
|
+
new_column_name
|
|
187
|
+
].astype(bool)
|
|
188
|
+
elif dataType == "DateTime":
|
|
189
|
+
df[new_column_name] = pd.to_datetime(
|
|
190
|
+
df[new_column_name]
|
|
261
191
|
)
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
192
|
+
|
|
193
|
+
delta_table_name = t.Name.replace(" ", "_").lower()
|
|
194
|
+
|
|
195
|
+
spark_df = spark.createDataFrame(df)
|
|
196
|
+
filePath = create_abfss_path(
|
|
197
|
+
lakehouse_id=lakehouse_id,
|
|
198
|
+
lakehouse_workspace_id=lakehouse_workspace_id,
|
|
199
|
+
delta_table_name=delta_table_name,
|
|
200
|
+
)
|
|
201
|
+
spark_df.write.mode("overwrite").format("delta").save(
|
|
202
|
+
filePath
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
@retry(
|
|
206
|
+
sleep_time=1,
|
|
207
|
+
timeout_error_message=f"{icons.red_dot} Function timed out after 1 minute",
|
|
208
|
+
)
|
|
209
|
+
def dyn_connect():
|
|
210
|
+
with connect_semantic_model(
|
|
211
|
+
dataset=new_dataset,
|
|
212
|
+
readonly=True,
|
|
213
|
+
workspace=new_dataset_workspace,
|
|
214
|
+
) as tom2:
|
|
215
|
+
|
|
216
|
+
tom2.model
|
|
217
|
+
|
|
218
|
+
dyn_connect()
|
|
219
|
+
|
|
220
|
+
with connect_semantic_model(
|
|
221
|
+
dataset=new_dataset,
|
|
222
|
+
readonly=False,
|
|
223
|
+
workspace=new_dataset_workspace,
|
|
224
|
+
) as tom2:
|
|
225
|
+
tom2.set_annotation(
|
|
226
|
+
object=tom2.model,
|
|
227
|
+
name=t.Name,
|
|
228
|
+
value=daxQuery,
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
print(
|
|
232
|
+
f"{icons.green_dot} Calculated table '{t.Name}' has been created as delta table '{delta_table_name.lower()}' "
|
|
233
|
+
f"in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace."
|
|
234
|
+
)
|
|
235
|
+
except Exception:
|
|
236
|
+
print(
|
|
237
|
+
f"{icons.red_dot} Failed to create calculated table '{t.Name}' as a delta table in the lakehouse."
|
|
238
|
+
)
|
|
266
239
|
|
|
267
240
|
|
|
268
241
|
@log
|
|
@@ -318,115 +291,110 @@ def migrate_field_parameters(
|
|
|
318
291
|
)
|
|
319
292
|
return
|
|
320
293
|
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
)
|
|
429
|
-
except Exception:
|
|
430
|
-
if datetime.datetime.now() - start_time > timeout:
|
|
431
|
-
break
|
|
432
|
-
time.sleep(1)
|
|
294
|
+
@retry(
|
|
295
|
+
sleep_time=1,
|
|
296
|
+
timeout_error_message=f"{icons.red_dot} Function timed out after 1 minute",
|
|
297
|
+
)
|
|
298
|
+
def dyn_connect():
|
|
299
|
+
with connect_semantic_model(
|
|
300
|
+
dataset=new_dataset, readonly=True, workspace=new_dataset_workspace
|
|
301
|
+
) as tom:
|
|
302
|
+
|
|
303
|
+
tom.model
|
|
304
|
+
|
|
305
|
+
dyn_connect()
|
|
306
|
+
|
|
307
|
+
with connect_semantic_model(
|
|
308
|
+
dataset=new_dataset, workspace=new_dataset_workspace, readonly=False
|
|
309
|
+
) as tom:
|
|
310
|
+
|
|
311
|
+
for i, r in dfP_filt.iterrows():
|
|
312
|
+
tName = r["Table Name"]
|
|
313
|
+
query = r["Query"]
|
|
314
|
+
|
|
315
|
+
# For field parameters, remove calc columns from the query
|
|
316
|
+
rows = query.strip().split("\n")
|
|
317
|
+
filtered_rows = [
|
|
318
|
+
row
|
|
319
|
+
for row in rows
|
|
320
|
+
if not any(
|
|
321
|
+
value in row for value in dfC_CalcColumn["Column Object"].values
|
|
322
|
+
)
|
|
323
|
+
]
|
|
324
|
+
updated_query_string = "\n".join(filtered_rows)
|
|
325
|
+
|
|
326
|
+
# Remove extra comma
|
|
327
|
+
lines = updated_query_string.strip().split("\n")
|
|
328
|
+
lines[-2] = lines[-2].rstrip(",")
|
|
329
|
+
expr = "\n".join(lines)
|
|
330
|
+
|
|
331
|
+
try:
|
|
332
|
+
par = TOM.Partition()
|
|
333
|
+
par.Name = tName
|
|
334
|
+
|
|
335
|
+
parSource = TOM.CalculatedPartitionSource()
|
|
336
|
+
par.Source = parSource
|
|
337
|
+
parSource.Expression = expr
|
|
338
|
+
|
|
339
|
+
tbl = TOM.Table()
|
|
340
|
+
tbl.Name = tName
|
|
341
|
+
tbl.Partitions.Add(par)
|
|
342
|
+
|
|
343
|
+
columns = ["Value1", "Value2", "Value3"]
|
|
344
|
+
|
|
345
|
+
for colName in columns:
|
|
346
|
+
col = TOM.CalculatedTableColumn()
|
|
347
|
+
col.Name = colName
|
|
348
|
+
col.SourceColumn = "[" + colName + "]"
|
|
349
|
+
col.DataType = TOM.DataType.String
|
|
350
|
+
|
|
351
|
+
tbl.Columns.Add(col)
|
|
352
|
+
|
|
353
|
+
tom.model.Tables.Add(tbl)
|
|
354
|
+
|
|
355
|
+
ep = TOM.JsonExtendedProperty()
|
|
356
|
+
ep.Name = "ParameterMetadata"
|
|
357
|
+
ep.Value = '{"version":3,"kind":2}'
|
|
358
|
+
|
|
359
|
+
rcd = TOM.RelatedColumnDetails()
|
|
360
|
+
gpc = TOM.GroupByColumn()
|
|
361
|
+
gpc.GroupingColumn = tom.model.Tables[tName].Columns["Value2"]
|
|
362
|
+
rcd.GroupByColumns.Add(gpc)
|
|
363
|
+
|
|
364
|
+
# Update column properties
|
|
365
|
+
tom.model.Tables[tName].Columns["Value2"].IsHidden = True
|
|
366
|
+
tom.model.Tables[tName].Columns["Value3"].IsHidden = True
|
|
367
|
+
tom.model.Tables[tName].Columns["Value3"].DataType = TOM.DataType.Int64
|
|
368
|
+
tom.model.Tables[tName].Columns["Value1"].SortByColumn = (
|
|
369
|
+
tom.model.Tables[tName].Columns["Value3"]
|
|
370
|
+
)
|
|
371
|
+
tom.model.Tables[tName].Columns["Value2"].SortByColumn = (
|
|
372
|
+
tom.model.Tables[tName].Columns["Value3"]
|
|
373
|
+
)
|
|
374
|
+
tom.model.Tables[tName].Columns["Value2"].ExtendedProperties.Add(ep)
|
|
375
|
+
tom.model.Tables[tName].Columns["Value1"].RelatedColumnDetails = rcd
|
|
376
|
+
|
|
377
|
+
dfC_filt1 = dfC[
|
|
378
|
+
(dfC["Table Name"] == tName) & (dfC["Source"] == "[Value1]")
|
|
379
|
+
]
|
|
380
|
+
col1 = dfC_filt1["Column Name"].iloc[0]
|
|
381
|
+
dfC_filt2 = dfC[
|
|
382
|
+
(dfC["Table Name"] == tName) & (dfC["Source"] == "[Value2]")
|
|
383
|
+
]
|
|
384
|
+
col2 = dfC_filt2["Column Name"].iloc[0]
|
|
385
|
+
dfC_filt3 = dfC[
|
|
386
|
+
(dfC["Table Name"] == tName) & (dfC["Source"] == "[Value3]")
|
|
387
|
+
]
|
|
388
|
+
col3 = dfC_filt3["Column Name"].iloc[0]
|
|
389
|
+
|
|
390
|
+
tom.model.Tables[tName].Columns["Value1"].Name = col1
|
|
391
|
+
tom.model.Tables[tName].Columns["Value2"].Name = col2
|
|
392
|
+
tom.model.Tables[tName].Columns["Value3"].Name = col3
|
|
393
|
+
|
|
394
|
+
print(
|
|
395
|
+
f"{icons.green_dot} The '{tName}' table has been added as a field parameter to the '{new_dataset}' semantic model in the '{new_dataset_workspace}' workspace."
|
|
396
|
+
)
|
|
397
|
+
except Exception:
|
|
398
|
+
print(
|
|
399
|
+
f"{icons.red_dot} The '{tName}' table has not been added as a field parameter."
|
|
400
|
+
)
|