fabric-pbi 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fabric_pbi-1.0.0.dist-info/METADATA +124 -0
- fabric_pbi-1.0.0.dist-info/RECORD +17 -0
- fabric_pbi-1.0.0.dist-info/WHEEL +5 -0
- fabric_pbi-1.0.0.dist-info/top_level.txt +1 -0
- fabricpandas/__init__.py +22 -0
- fabricpandas/auth/__init__.py +8 -0
- fabricpandas/auth/base_client.py +220 -0
- fabricpandas/bulks/__init__.py +8 -0
- fabricpandas/bulks/bulk_client.py +733 -0
- fabricpandas/client.py +80 -0
- fabricpandas/report/__init__.py +8 -0
- fabricpandas/report/report_client.py +509 -0
- fabricpandas/semantic_model/__init__.py +8 -0
- fabricpandas/semantic_model/semantic_model_client.py +643 -0
- fabricpandas/utils/__init__.py +6 -0
- fabricpandas/workspace/__init__.py +8 -0
- fabricpandas/workspace/workspace_client.py +167 -0
|
@@ -0,0 +1,643 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Semantic Model Client for Microsoft Fabric
|
|
3
|
+
Provides operations for managing Semantic Models in Fabric workspaces.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
import base64
|
|
8
|
+
import time
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Optional, Dict, Any, List
|
|
11
|
+
from ..auth import BaseClient
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class SemanticModelClient(BaseClient):
|
|
15
|
+
"""
|
|
16
|
+
Client for interacting with Fabric Semantic Model APIs
|
|
17
|
+
|
|
18
|
+
Reference: https://learn.microsoft.com/en-us/rest/api/fabric/semanticmodel/items
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def create_semantic_model(
|
|
22
|
+
self,
|
|
23
|
+
workspace_id: str,
|
|
24
|
+
display_name: str,
|
|
25
|
+
description: Optional[str] = None,
|
|
26
|
+
definition: Optional[Dict[str, Any]] = None
|
|
27
|
+
) -> Dict[str, Any]:
|
|
28
|
+
"""
|
|
29
|
+
Create a new Semantic Model
|
|
30
|
+
|
|
31
|
+
Reference: https://learn.microsoft.com/en-us/rest/api/fabric/semanticmodel/items/create-semantic-model
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
workspace_id: The workspace ID where the semantic model will be created
|
|
35
|
+
display_name: Display name for the semantic model
|
|
36
|
+
description: Optional description
|
|
37
|
+
definition: Optional semantic model definition
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
Created semantic model details
|
|
41
|
+
"""
|
|
42
|
+
endpoint = f"workspaces/{workspace_id}/semanticModels"
|
|
43
|
+
|
|
44
|
+
payload = {
|
|
45
|
+
"displayName": display_name
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
if description:
|
|
49
|
+
payload["description"] = description
|
|
50
|
+
|
|
51
|
+
if definition:
|
|
52
|
+
payload["definition"] = definition
|
|
53
|
+
|
|
54
|
+
response = self._make_request("POST", endpoint, data=payload)
|
|
55
|
+
return response.json()
|
|
56
|
+
|
|
57
|
+
def get_semantic_model(
|
|
58
|
+
self,
|
|
59
|
+
workspace_id: str,
|
|
60
|
+
semantic_model_id: str
|
|
61
|
+
) -> Dict[str, Any]:
|
|
62
|
+
"""
|
|
63
|
+
Get a Semantic Model by ID
|
|
64
|
+
|
|
65
|
+
Reference: https://learn.microsoft.com/en-us/rest/api/fabric/semanticmodel/items/get-semantic-model
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
workspace_id: The workspace ID
|
|
69
|
+
semantic_model_id: The semantic model ID
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Semantic model details
|
|
73
|
+
"""
|
|
74
|
+
endpoint = f"workspaces/{workspace_id}/semanticModels/{semantic_model_id}"
|
|
75
|
+
response = self._make_request("GET", endpoint)
|
|
76
|
+
return response.json()
|
|
77
|
+
|
|
78
|
+
def list_semantic_models(
|
|
79
|
+
self,
|
|
80
|
+
workspace_id: str,
|
|
81
|
+
continuation_token: Optional[str] = None
|
|
82
|
+
) -> Dict[str, Any]:
|
|
83
|
+
"""
|
|
84
|
+
List all Semantic Models in a workspace
|
|
85
|
+
|
|
86
|
+
Reference: https://learn.microsoft.com/en-us/rest/api/fabric/semanticmodel/items/list-semantic-models-definition
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
workspace_id: The workspace ID
|
|
90
|
+
continuation_token: Optional token for pagination
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
List of semantic models
|
|
94
|
+
"""
|
|
95
|
+
endpoint = f"workspaces/{workspace_id}/semanticModels"
|
|
96
|
+
params = {}
|
|
97
|
+
|
|
98
|
+
if continuation_token:
|
|
99
|
+
params["continuationToken"] = continuation_token
|
|
100
|
+
|
|
101
|
+
response = self._make_request("GET", endpoint, params=params)
|
|
102
|
+
return response.json()
|
|
103
|
+
|
|
104
|
+
def get_semantic_model_definition(
|
|
105
|
+
self,
|
|
106
|
+
workspace_id: str,
|
|
107
|
+
semantic_model_id: str,
|
|
108
|
+
format: Optional[str] = None,
|
|
109
|
+
save_to_folder: bool = False,
|
|
110
|
+
output_folder: Optional[str] = None,
|
|
111
|
+
use_model_id_as_folder: bool = False,
|
|
112
|
+
wait_for_completion: bool = True,
|
|
113
|
+
max_retries: int = 30,
|
|
114
|
+
retry_delay: int = 5
|
|
115
|
+
) -> Dict[str, Any]:
|
|
116
|
+
"""
|
|
117
|
+
Get a Semantic Model's definition with optional waiting and saving
|
|
118
|
+
|
|
119
|
+
This unified method handles:
|
|
120
|
+
- Getting the definition
|
|
121
|
+
- Waiting for long-running operations (optional)
|
|
122
|
+
- Saving to folder structure (optional)
|
|
123
|
+
- Auto-naming folders based on model name or ID
|
|
124
|
+
|
|
125
|
+
Reference: https://learn.microsoft.com/en-us/rest/api/fabric/semanticmodel/items/get-semantic-model-definition
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
workspace_id: The workspace ID
|
|
129
|
+
semantic_model_id: The semantic model ID
|
|
130
|
+
format: Optional format for the definition (e.g., "TMSL", "TMDL")
|
|
131
|
+
save_to_folder: If True, saves definition to folder structure (default: False)
|
|
132
|
+
output_folder: Root folder to save files (optional, auto-generated if not provided)
|
|
133
|
+
use_model_id_as_folder: If True, uses model ID as folder name instead of display name (default: False)
|
|
134
|
+
wait_for_completion: If True, waits for long-running operations (default: True)
|
|
135
|
+
max_retries: Maximum number of polling attempts (default: 30)
|
|
136
|
+
retry_delay: Seconds to wait between retries (default: 5)
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
Dictionary containing:
|
|
140
|
+
- 'definition': The semantic model definition
|
|
141
|
+
- 'saved_files': List of saved file paths (if save_to_folder=True)
|
|
142
|
+
- 'output_folder': The folder path used (if save_to_folder=True)
|
|
143
|
+
- 'model_info': Semantic model metadata (if save_to_folder=True or output_folder=None)
|
|
144
|
+
"""
|
|
145
|
+
# Step 1: Initiate the get definition request
|
|
146
|
+
endpoint = f"workspaces/{workspace_id}/semanticModels/{semantic_model_id}/getDefinition"
|
|
147
|
+
|
|
148
|
+
payload = {}
|
|
149
|
+
if format:
|
|
150
|
+
payload["format"] = format
|
|
151
|
+
|
|
152
|
+
response = self._make_request("POST", endpoint, data=payload)
|
|
153
|
+
|
|
154
|
+
# Step 2: Handle the response
|
|
155
|
+
# Check if it's a long-running operation (202 Accepted)
|
|
156
|
+
if response.status_code == 202:
|
|
157
|
+
if not wait_for_completion:
|
|
158
|
+
# Return operation info without waiting
|
|
159
|
+
return {
|
|
160
|
+
"status": "Accepted",
|
|
161
|
+
"status_code": 202,
|
|
162
|
+
"x-ms-operation-id": response.headers.get("x-ms-operation-id"),
|
|
163
|
+
"retry_after": response.headers.get("Retry-After"),
|
|
164
|
+
"message": "Long-running operation started. Use the location header to check status."
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
# Wait for operation to complete
|
|
168
|
+
operation_id = response.headers.get("x-ms-operation-id")
|
|
169
|
+
if operation_id:
|
|
170
|
+
print(f"Long-running operation started (ID: {operation_id})")
|
|
171
|
+
print(f"Waiting for operation to complete (checking every {retry_delay}s)...")
|
|
172
|
+
result = self.get_operation_result(operation_id, max_retries, retry_delay)
|
|
173
|
+
else:
|
|
174
|
+
raise Exception("Operation accepted but no operation ID returned")
|
|
175
|
+
else:
|
|
176
|
+
# Immediate response
|
|
177
|
+
if response.text:
|
|
178
|
+
result = response.json()
|
|
179
|
+
else:
|
|
180
|
+
result = {
|
|
181
|
+
"status": "Success",
|
|
182
|
+
"status_code": response.status_code,
|
|
183
|
+
"message": "Request completed but no content returned"
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
# Step 3: Extract definition from result
|
|
187
|
+
if "definition" in result:
|
|
188
|
+
definition = result["definition"]
|
|
189
|
+
elif "format" in result and "parts" in result:
|
|
190
|
+
# Already the definition object
|
|
191
|
+
definition = result
|
|
192
|
+
else:
|
|
193
|
+
# Return as-is if not a standard definition format
|
|
194
|
+
return result
|
|
195
|
+
|
|
196
|
+
# Step 4: Prepare response
|
|
197
|
+
response_data = {
|
|
198
|
+
"definition": definition
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
# Step 5: Save to folder if requested
|
|
202
|
+
if save_to_folder:
|
|
203
|
+
# Determine folder name if not provided
|
|
204
|
+
if output_folder is None:
|
|
205
|
+
if use_model_id_as_folder:
|
|
206
|
+
# Use semantic model ID as folder name
|
|
207
|
+
output_folder = semantic_model_id
|
|
208
|
+
print(f"Using model ID as folder: '{output_folder}'\n")
|
|
209
|
+
else:
|
|
210
|
+
# Use semantic model display name as folder name
|
|
211
|
+
print("Getting semantic model information...")
|
|
212
|
+
model_info = self.get_semantic_model(workspace_id, semantic_model_id)
|
|
213
|
+
model_name = model_info.get("displayName", f"semantic_model_{semantic_model_id}")
|
|
214
|
+
|
|
215
|
+
# Clean the name for use as folder name (remove invalid characters)
|
|
216
|
+
import re
|
|
217
|
+
output_folder = re.sub(r'[<>:"/\\|?*]', "_", model_name)
|
|
218
|
+
print(f"Using model name as folder: '{output_folder}'\n")
|
|
219
|
+
|
|
220
|
+
response_data["model_info"] = model_info
|
|
221
|
+
|
|
222
|
+
# Save to folder structure
|
|
223
|
+
saved_files = self.save_definition_to_folder(definition, output_folder)
|
|
224
|
+
response_data["saved_files"] = saved_files
|
|
225
|
+
response_data["output_folder"] = output_folder
|
|
226
|
+
|
|
227
|
+
return response_data
|
|
228
|
+
|
|
229
|
+
def get_operation_result(
|
|
230
|
+
self,
|
|
231
|
+
operation_id: str,
|
|
232
|
+
max_retries: int = 30,
|
|
233
|
+
retry_delay: int = 5
|
|
234
|
+
) -> Dict[str, Any]:
|
|
235
|
+
"""
|
|
236
|
+
Poll a long-running operation until completion and retrieve the result
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
operation_id: The operation ID from the x-ms-operation-id header
|
|
240
|
+
max_retries: Maximum number of polling attempts (default: 30)
|
|
241
|
+
retry_delay: Seconds to wait between retries (default: 5)
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
Operation result data
|
|
245
|
+
|
|
246
|
+
Raises:
|
|
247
|
+
Exception: If operation fails or times out
|
|
248
|
+
"""
|
|
249
|
+
# Construct the operation status URL
|
|
250
|
+
status_url = f"https://api.fabric.microsoft.com/v1/operations/{operation_id}"
|
|
251
|
+
|
|
252
|
+
for attempt in range(max_retries):
|
|
253
|
+
response = requests.get(
|
|
254
|
+
status_url,
|
|
255
|
+
headers={"Authorization": f"Bearer {self.config.access_token}"}
|
|
256
|
+
)
|
|
257
|
+
response.raise_for_status()
|
|
258
|
+
|
|
259
|
+
result = response.json()
|
|
260
|
+
status = result.get("status", "").lower()
|
|
261
|
+
|
|
262
|
+
if status in ["succeeded"]:
|
|
263
|
+
# When operation is done, try to fetch the result from the /result endpoint
|
|
264
|
+
result_url = f"https://api.fabric.microsoft.com/v1/operations/{operation_id}/result"
|
|
265
|
+
|
|
266
|
+
print("Operation completed. Checking for result...")
|
|
267
|
+
result_response = requests.get(
|
|
268
|
+
result_url,
|
|
269
|
+
headers={"Authorization": f"Bearer {self.config.access_token}"}
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
# Some operations (like update) don't have results, only status
|
|
273
|
+
if result_response.status_code == 200:
|
|
274
|
+
# Return the actual result data
|
|
275
|
+
return result_response.json()
|
|
276
|
+
elif result_response.status_code == 400:
|
|
277
|
+
# Check if it's "OperationHasNoResult" error
|
|
278
|
+
try:
|
|
279
|
+
error_data = result_response.json()
|
|
280
|
+
if error_data.get("errorCode") == "OperationHasNoResult":
|
|
281
|
+
# Operation succeeded but has no result data, return status info
|
|
282
|
+
print("Operation completed successfully (no result data)")
|
|
283
|
+
return result
|
|
284
|
+
except:
|
|
285
|
+
pass
|
|
286
|
+
# Re-raise for other 400 errors
|
|
287
|
+
result_response.raise_for_status()
|
|
288
|
+
else:
|
|
289
|
+
result_response.raise_for_status()
|
|
290
|
+
|
|
291
|
+
elif status == "failed":
|
|
292
|
+
error_details = result.get("error", result)
|
|
293
|
+
raise Exception(f"Operation failed: {error_details}")
|
|
294
|
+
elif status in ["running", "notstarted", "inprogress"]:
|
|
295
|
+
if attempt < max_retries - 1:
|
|
296
|
+
print(f"Operation status: {status}. Waiting {retry_delay}s... (attempt {attempt + 1}/{max_retries})")
|
|
297
|
+
time.sleep(retry_delay)
|
|
298
|
+
continue
|
|
299
|
+
else:
|
|
300
|
+
# Unknown status, return as-is
|
|
301
|
+
print(f"Warning: Unknown operation status: {status}")
|
|
302
|
+
return result
|
|
303
|
+
|
|
304
|
+
raise Exception(f"Operation timed out after {max_retries} attempts")
|
|
305
|
+
|
|
306
|
+
def update_semantic_model(
|
|
307
|
+
self,
|
|
308
|
+
workspace_id: str,
|
|
309
|
+
semantic_model_id: str,
|
|
310
|
+
display_name: Optional[str] = None,
|
|
311
|
+
description: Optional[str] = None
|
|
312
|
+
) -> Dict[str, Any]:
|
|
313
|
+
"""
|
|
314
|
+
Update a Semantic Model's metadata
|
|
315
|
+
|
|
316
|
+
Reference: https://learn.microsoft.com/en-us/rest/api/fabric/semanticmodel/items/update-semantic-model
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
workspace_id: The workspace ID
|
|
320
|
+
semantic_model_id: The semantic model ID
|
|
321
|
+
display_name: New display name
|
|
322
|
+
description: New description
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
Updated semantic model details
|
|
326
|
+
"""
|
|
327
|
+
endpoint = f"workspaces/{workspace_id}/semanticModels/{semantic_model_id}"
|
|
328
|
+
|
|
329
|
+
payload = {}
|
|
330
|
+
if display_name:
|
|
331
|
+
payload["displayName"] = display_name
|
|
332
|
+
if description:
|
|
333
|
+
payload["description"] = description
|
|
334
|
+
|
|
335
|
+
response = self._make_request("PATCH", endpoint, data=payload)
|
|
336
|
+
return response.json()
|
|
337
|
+
|
|
338
|
+
def update_semantic_model_definition(
|
|
339
|
+
self,
|
|
340
|
+
workspace_id: str,
|
|
341
|
+
semantic_model_id: str,
|
|
342
|
+
definition: Optional[Dict[str, Any]] = None,
|
|
343
|
+
definition_folder: Optional[str] = None,
|
|
344
|
+
update_metadata: bool = True,
|
|
345
|
+
wait_for_completion: bool = True,
|
|
346
|
+
max_retries: int = 30,
|
|
347
|
+
retry_delay: int = 5
|
|
348
|
+
) -> Dict[str, Any]:
|
|
349
|
+
"""
|
|
350
|
+
Update semantic model definition from either a definition object or folder structure
|
|
351
|
+
|
|
352
|
+
Reference: https://learn.microsoft.com/en-us/rest/api/fabric/semanticmodel/items/update-semantic-model-definition
|
|
353
|
+
|
|
354
|
+
Args:
|
|
355
|
+
workspace_id: The workspace ID
|
|
356
|
+
semantic_model_id: The semantic model ID
|
|
357
|
+
definition: Definition object with 'format' and 'parts' (optional if definition_folder provided)
|
|
358
|
+
definition_folder: Path to folder containing model files (optional if definition provided)
|
|
359
|
+
update_metadata: If True, updates item metadata from .platform file (default: True)
|
|
360
|
+
wait_for_completion: Wait for the operation to complete (default: True)
|
|
361
|
+
max_retries: Maximum number of polling attempts (default: 30)
|
|
362
|
+
retry_delay: Delay in seconds between polling attempts (default: 5)
|
|
363
|
+
|
|
364
|
+
Returns:
|
|
365
|
+
Dictionary with operation status and result
|
|
366
|
+
|
|
367
|
+
Note:
|
|
368
|
+
- When update_metadata=True and .platform file is in definition, the item's
|
|
369
|
+
metadata (display name, description) will be updated from the .platform file
|
|
370
|
+
- The update always replaces the entire definition (Full update)
|
|
371
|
+
"""
|
|
372
|
+
endpoint = f"workspaces/{workspace_id}/semanticModels/{semantic_model_id}/updateDefinition"
|
|
373
|
+
|
|
374
|
+
# Build definition from folder if provided
|
|
375
|
+
if definition_folder and not definition:
|
|
376
|
+
print(f"Building definition from folder: {definition_folder}")
|
|
377
|
+
definition = self._build_definition_from_folder(definition_folder)
|
|
378
|
+
|
|
379
|
+
if not definition:
|
|
380
|
+
raise ValueError("Either 'definition' or 'definition_folder' must be provided")
|
|
381
|
+
|
|
382
|
+
# Prepare request body according to API spec
|
|
383
|
+
payload = {
|
|
384
|
+
"definition": definition,
|
|
385
|
+
"updateMetadata": update_metadata
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
print("Updating semantic model definition...")
|
|
389
|
+
print(f"Update metadata from .platform file: {update_metadata}")
|
|
390
|
+
print(f"Number of parts: {len(definition.get('parts', []))}\n")
|
|
391
|
+
|
|
392
|
+
# Make the request
|
|
393
|
+
response = self._make_request("POST", endpoint, data=payload)
|
|
394
|
+
|
|
395
|
+
# Handle long-running operation (202 Accepted)
|
|
396
|
+
if response.status_code == 202:
|
|
397
|
+
if not wait_for_completion:
|
|
398
|
+
return {
|
|
399
|
+
"status": "Accepted",
|
|
400
|
+
"message": "Update initiated. Operation is running.",
|
|
401
|
+
"location": response.headers.get("Location"),
|
|
402
|
+
"retry_after": response.headers.get("Retry-After")
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
# Extract operation ID from Location header
|
|
406
|
+
operation_id = response.headers.get("x-ms-operation-id")
|
|
407
|
+
|
|
408
|
+
if not operation_id:
|
|
409
|
+
raise ValueError("No operation ID found in response headers")
|
|
410
|
+
|
|
411
|
+
print(f"Long-running operation initiated. Operation ID: {operation_id}")
|
|
412
|
+
print("Polling for completion...\n")
|
|
413
|
+
|
|
414
|
+
# Wait for operation to complete (update operations don't return result data)
|
|
415
|
+
status_url = f"https://api.fabric.microsoft.com/v1/operations/{operation_id}"
|
|
416
|
+
|
|
417
|
+
for attempt in range(max_retries):
|
|
418
|
+
status_response = requests.get(
|
|
419
|
+
status_url,
|
|
420
|
+
headers={"Authorization": f"Bearer {self.config.access_token}"}
|
|
421
|
+
)
|
|
422
|
+
status_response.raise_for_status()
|
|
423
|
+
|
|
424
|
+
operation_status = status_response.json()
|
|
425
|
+
status = operation_status.get("status", "").lower()
|
|
426
|
+
|
|
427
|
+
if status == "succeeded":
|
|
428
|
+
print("✓ Operation completed successfully!")
|
|
429
|
+
return {
|
|
430
|
+
"status": "Completed",
|
|
431
|
+
"message": "Definition updated successfully",
|
|
432
|
+
"operation_details": operation_status
|
|
433
|
+
}
|
|
434
|
+
elif status == "failed":
|
|
435
|
+
error_details = operation_status.get("error", operation_status)
|
|
436
|
+
raise Exception(f"Operation failed: {error_details}")
|
|
437
|
+
elif status in ["running", "notstarted", "inprogress"]:
|
|
438
|
+
if attempt < max_retries - 1:
|
|
439
|
+
print(f"Operation status: {status}. Waiting {retry_delay}s... (attempt {attempt + 1}/{max_retries})")
|
|
440
|
+
time.sleep(retry_delay)
|
|
441
|
+
continue
|
|
442
|
+
else:
|
|
443
|
+
print(f"Warning: Unknown operation status: {status}")
|
|
444
|
+
|
|
445
|
+
raise Exception(f"Operation timed out after {max_retries} attempts")
|
|
446
|
+
|
|
447
|
+
# Handle immediate success (200 OK)
|
|
448
|
+
elif response.status_code == 200:
|
|
449
|
+
return {
|
|
450
|
+
"status": "Completed",
|
|
451
|
+
"message": "Definition updated successfully"
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
# Unexpected response
|
|
455
|
+
else:
|
|
456
|
+
return {
|
|
457
|
+
"status": "Unknown",
|
|
458
|
+
"status_code": response.status_code,
|
|
459
|
+
"response": response.text
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
def _build_definition_from_folder(
|
|
463
|
+
self,
|
|
464
|
+
folder_path: str,
|
|
465
|
+
format_type: str = "TMDL"
|
|
466
|
+
) -> Dict[str, Any]:
|
|
467
|
+
"""
|
|
468
|
+
Build definition object from folder structure by encoding files to Base64
|
|
469
|
+
|
|
470
|
+
Args:
|
|
471
|
+
folder_path: Path to folder containing model files
|
|
472
|
+
format_type: Definition format (default: "TMDL")
|
|
473
|
+
|
|
474
|
+
Returns:
|
|
475
|
+
Definition object with 'format' and 'parts'
|
|
476
|
+
"""
|
|
477
|
+
folder = Path(folder_path)
|
|
478
|
+
if not folder.exists():
|
|
479
|
+
raise FileNotFoundError(f"Folder not found: {folder_path}")
|
|
480
|
+
|
|
481
|
+
parts = []
|
|
482
|
+
|
|
483
|
+
# Walk through all files in the folder
|
|
484
|
+
for file_path in folder.rglob("*"):
|
|
485
|
+
if file_path.is_file():
|
|
486
|
+
# Get relative path from root folder
|
|
487
|
+
relative_path = file_path.relative_to(folder)
|
|
488
|
+
path_str = str(relative_path).replace("\\", "/")
|
|
489
|
+
|
|
490
|
+
# Read file content
|
|
491
|
+
try:
|
|
492
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
|
493
|
+
content = f.read()
|
|
494
|
+
|
|
495
|
+
# Encode to Base64
|
|
496
|
+
encoded_content = base64.b64encode(content.encode("utf-8")).decode("utf-8")
|
|
497
|
+
|
|
498
|
+
# Add to parts
|
|
499
|
+
parts.append({
|
|
500
|
+
"path": path_str,
|
|
501
|
+
"payload": encoded_content,
|
|
502
|
+
"payloadType": "InlineBase64"
|
|
503
|
+
})
|
|
504
|
+
|
|
505
|
+
print(f" ✓ Added: {path_str}")
|
|
506
|
+
|
|
507
|
+
except Exception as e:
|
|
508
|
+
print(f" ✗ Failed to read {path_str}: {e}")
|
|
509
|
+
|
|
510
|
+
print(f"\nTotal files processed: {len(parts)}\n")
|
|
511
|
+
|
|
512
|
+
return {
|
|
513
|
+
"format": format_type,
|
|
514
|
+
"parts": parts
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
def delete_semantic_model(
|
|
518
|
+
self,
|
|
519
|
+
workspace_id: str,
|
|
520
|
+
semantic_model_id: str
|
|
521
|
+
) -> None:
|
|
522
|
+
"""
|
|
523
|
+
Delete a Semantic Model
|
|
524
|
+
|
|
525
|
+
Reference: https://learn.microsoft.com/en-us/rest/api/fabric/semanticmodel/items/delete-semantic-model
|
|
526
|
+
|
|
527
|
+
Args:
|
|
528
|
+
workspace_id: The workspace ID
|
|
529
|
+
semantic_model_id: The semantic model ID to delete
|
|
530
|
+
|
|
531
|
+
Returns:
|
|
532
|
+
None (204 No Content on success)
|
|
533
|
+
"""
|
|
534
|
+
endpoint = f"workspaces/{workspace_id}/semanticModels/{semantic_model_id}"
|
|
535
|
+
self._make_request("DELETE", endpoint)
|
|
536
|
+
|
|
537
|
+
def save_definition_to_folder(
|
|
538
|
+
self,
|
|
539
|
+
definition_data: Dict[str, Any],
|
|
540
|
+
output_folder: str = "model_definition"
|
|
541
|
+
) -> List[str]:
|
|
542
|
+
"""
|
|
543
|
+
Save semantic model definition parts to folder structure
|
|
544
|
+
|
|
545
|
+
Args:
|
|
546
|
+
definition_data: The definition object from API response (containing 'format' and 'parts')
|
|
547
|
+
output_folder: Root folder to save files (default: "model_definition")
|
|
548
|
+
|
|
549
|
+
Returns:
|
|
550
|
+
List of saved file paths
|
|
551
|
+
"""
|
|
552
|
+
# Get format and parts
|
|
553
|
+
format_type = definition_data.get("format", "TMDL")
|
|
554
|
+
parts = definition_data.get("parts", [])
|
|
555
|
+
|
|
556
|
+
print(f"Format: {format_type}")
|
|
557
|
+
print(f"Number of parts: {len(parts)}\n")
|
|
558
|
+
|
|
559
|
+
# Create output folder
|
|
560
|
+
output_path = Path(output_folder)
|
|
561
|
+
output_path.mkdir(parents=True, exist_ok=True)
|
|
562
|
+
|
|
563
|
+
saved_files = []
|
|
564
|
+
|
|
565
|
+
# Process each part
|
|
566
|
+
for part in parts:
|
|
567
|
+
file_path = part.get("path", "")
|
|
568
|
+
payload = part.get("payload", "")
|
|
569
|
+
payload_type = part.get("payloadType", "")
|
|
570
|
+
|
|
571
|
+
if not file_path or not payload:
|
|
572
|
+
print("⚠ Skipping part with missing path or payload")
|
|
573
|
+
continue
|
|
574
|
+
|
|
575
|
+
# Decode payload based on type
|
|
576
|
+
if payload_type == "InlineBase64":
|
|
577
|
+
try:
|
|
578
|
+
decoded_content = base64.b64decode(payload).decode("utf-8")
|
|
579
|
+
except Exception as e:
|
|
580
|
+
print(f"✗ Failed to decode {file_path}: {e}")
|
|
581
|
+
continue
|
|
582
|
+
else:
|
|
583
|
+
# If not base64, assume it's plain text
|
|
584
|
+
decoded_content = payload
|
|
585
|
+
|
|
586
|
+
# Create full file path
|
|
587
|
+
full_path = output_path / file_path
|
|
588
|
+
|
|
589
|
+
# Create parent directories
|
|
590
|
+
full_path.parent.mkdir(parents=True, exist_ok=True)
|
|
591
|
+
|
|
592
|
+
# Write file
|
|
593
|
+
try:
|
|
594
|
+
with open(full_path, "w", encoding="utf-8") as f:
|
|
595
|
+
f.write(decoded_content)
|
|
596
|
+
|
|
597
|
+
print(f"✓ Saved: {file_path}")
|
|
598
|
+
saved_files.append(str(full_path))
|
|
599
|
+
except Exception as e:
|
|
600
|
+
print(f"✗ Failed to save {file_path}: {e}")
|
|
601
|
+
|
|
602
|
+
print(f"\n{'=' * 60}")
|
|
603
|
+
print(f"Summary: Saved {len(saved_files)} files to '{output_folder}'")
|
|
604
|
+
print(f"{'=' * 60}")
|
|
605
|
+
|
|
606
|
+
return saved_files
|
|
607
|
+
|
|
608
|
+
def bind_semantic_model_connection(
|
|
609
|
+
self,
|
|
610
|
+
workspace_id: str,
|
|
611
|
+
semantic_model_id: str,
|
|
612
|
+
connection_id: str
|
|
613
|
+
) -> None:
|
|
614
|
+
"""
|
|
615
|
+
Bind a connection to a Semantic Model
|
|
616
|
+
|
|
617
|
+
Reference: https://learn.microsoft.com/en-us/rest/api/fabric/semanticmodel/items/bind-semantic-model-connection
|
|
618
|
+
|
|
619
|
+
Args:
|
|
620
|
+
workspace_id: The workspace ID
|
|
621
|
+
semantic_model_id: The semantic model ID
|
|
622
|
+
connection_id: The connection ID to bind
|
|
623
|
+
|
|
624
|
+
Returns:
|
|
625
|
+
None (200 OK on success)
|
|
626
|
+
"""
|
|
627
|
+
endpoint = f"workspaces/{workspace_id}/semanticModels/{semantic_model_id}/bindConnection"
|
|
628
|
+
|
|
629
|
+
payload = {
|
|
630
|
+
"connectionId": connection_id
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
self._make_request("POST", endpoint, data=payload)
|
|
634
|
+
|
|
635
|
+
|
|
636
|
+
# Example usage
|
|
637
|
+
if __name__ == "__main__":
|
|
638
|
+
print("SemanticModelClient - Client for Fabric Semantic Models")
|
|
639
|
+
print("\nExample usage:")
|
|
640
|
+
print(" from semantic_model_client import SemanticModelClient")
|
|
641
|
+
print(" client = SemanticModelClient()")
|
|
642
|
+
print(" client.list_semantic_models(workspace_id)")
|
|
643
|
+
|