powerbi-mcp 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- powerbi_mcp-0.1.0.dist-info/METADATA +540 -0
- powerbi_mcp-0.1.0.dist-info/RECORD +18 -0
- powerbi_mcp-0.1.0.dist-info/WHEEL +4 -0
- powerbi_mcp-0.1.0.dist-info/entry_points.txt +2 -0
- powerbi_mcp-0.1.0.dist-info/licenses/LICENSE +21 -0
- src/__init__.py +13 -0
- src/auth.py +136 -0
- src/client.py +259 -0
- src/constants.py +26 -0
- src/exceptions.py +96 -0
- src/formatters.py +367 -0
- src/models.py +85 -0
- src/server.py +90 -0
- src/tools/__init__.py +15 -0
- src/tools/datasets.py +355 -0
- src/tools/queries.py +125 -0
- src/tools/workspaces.py +185 -0
- src/validation.py +40 -0
src/tools/datasets.py
ADDED
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Dataset management tools
|
|
3
|
+
|
|
4
|
+
Tools for listing and inspecting PowerBI datasets.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
from mcp.server import FastMCP
|
|
12
|
+
from mcp.server.fastmcp import Context
|
|
13
|
+
|
|
14
|
+
from ..models import ResponseFormat, DetailLevel
|
|
15
|
+
from ..formatters import (
|
|
16
|
+
format_datasets_markdown,
|
|
17
|
+
format_dataset_markdown,
|
|
18
|
+
format_refresh_history_markdown,
|
|
19
|
+
format_parameters_markdown,
|
|
20
|
+
truncate_response
|
|
21
|
+
)
|
|
22
|
+
from ..validation import validate_refresh_top
|
|
23
|
+
from ..constants import REFRESH_HISTORY_DEFAULT
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def register_dataset_tools(mcp: FastMCP) -> None:
|
|
29
|
+
"""Register dataset-related tools with the MCP server"""
|
|
30
|
+
|
|
31
|
+
@mcp.tool()
|
|
32
|
+
async def get_datasets(
|
|
33
|
+
workspace_id: Optional[str] = None,
|
|
34
|
+
format: str = "markdown",
|
|
35
|
+
detail: str = "concise",
|
|
36
|
+
ctx: Context = None
|
|
37
|
+
) -> str:
|
|
38
|
+
"""
|
|
39
|
+
Get list of datasets from a specific workspace or My workspace.
|
|
40
|
+
|
|
41
|
+
Datasets in PowerBI contain the data model, including tables, columns, relationships,
|
|
42
|
+
and measures. This tool retrieves all datasets accessible in the specified workspace.
|
|
43
|
+
|
|
44
|
+
Use this when you need to:
|
|
45
|
+
- List all datasets in a workspace
|
|
46
|
+
- Find a dataset ID for querying
|
|
47
|
+
- Discover available data sources
|
|
48
|
+
- Check dataset refresh status and configuration
|
|
49
|
+
|
|
50
|
+
Parameters:
|
|
51
|
+
- workspace_id (optional): Workspace (group) ID. If not provided, returns datasets from "My workspace"
|
|
52
|
+
- format: Response format - "json" or "markdown" (default: "markdown")
|
|
53
|
+
- detail: Detail level - "concise" or "detailed" (default: "concise")
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
List of datasets with their IDs, names, and optionally detailed metadata including
|
|
57
|
+
refresh status, storage mode, and configuration details.
|
|
58
|
+
|
|
59
|
+
Example usage:
|
|
60
|
+
- Get datasets from specific workspace: workspace_id="abc123..."
|
|
61
|
+
- Get datasets from My workspace: (omit workspace_id)
|
|
62
|
+
- Get detailed info: detail="detailed"
|
|
63
|
+
|
|
64
|
+
Error handling:
|
|
65
|
+
- If workspace_id not found, verify the ID is correct using get_workspaces
|
|
66
|
+
- For permission errors, ensure service principal has read access to the workspace
|
|
67
|
+
- Empty list means no datasets in the workspace or no access
|
|
68
|
+
"""
|
|
69
|
+
try:
|
|
70
|
+
format_type = ResponseFormat(format.lower())
|
|
71
|
+
detail_level = DetailLevel(detail.lower())
|
|
72
|
+
|
|
73
|
+
# Get client from context
|
|
74
|
+
client = ctx.request_context.lifespan_context["client"]
|
|
75
|
+
|
|
76
|
+
if workspace_id:
|
|
77
|
+
await ctx.info(f"Fetching datasets from workspace {workspace_id}")
|
|
78
|
+
else:
|
|
79
|
+
await ctx.info("Fetching datasets from My workspace")
|
|
80
|
+
|
|
81
|
+
# Fetch datasets from API
|
|
82
|
+
result = await client.get_datasets(workspace_id=workspace_id)
|
|
83
|
+
datasets = result.get("value", [])
|
|
84
|
+
|
|
85
|
+
await ctx.info(f"Found {len(datasets)} datasets")
|
|
86
|
+
|
|
87
|
+
# Format response
|
|
88
|
+
if format_type == ResponseFormat.MARKDOWN:
|
|
89
|
+
response = format_datasets_markdown(datasets, detail_level)
|
|
90
|
+
else:
|
|
91
|
+
response = json.dumps(datasets, indent=2)
|
|
92
|
+
|
|
93
|
+
return truncate_response(response)
|
|
94
|
+
|
|
95
|
+
except ValueError as e:
|
|
96
|
+
error_msg = f"Invalid parameter value: {str(e)}"
|
|
97
|
+
await ctx.error(error_msg)
|
|
98
|
+
return f"Error: {error_msg}"
|
|
99
|
+
except Exception as e:
|
|
100
|
+
error_msg = str(e)
|
|
101
|
+
await ctx.error(f"Failed to get datasets: {error_msg}")
|
|
102
|
+
return f"Error executing get_datasets: {error_msg}"
|
|
103
|
+
|
|
104
|
+
@mcp.tool()
|
|
105
|
+
async def get_dataset(
|
|
106
|
+
dataset_id: str,
|
|
107
|
+
workspace_id: Optional[str] = None,
|
|
108
|
+
format: str = "json",
|
|
109
|
+
ctx: Context = None
|
|
110
|
+
) -> str:
|
|
111
|
+
"""
|
|
112
|
+
Get detailed information about a specific dataset.
|
|
113
|
+
|
|
114
|
+
Retrieves comprehensive metadata about a PowerBI dataset including configuration,
|
|
115
|
+
refresh settings, and data source information.
|
|
116
|
+
|
|
117
|
+
Use this when you need to:
|
|
118
|
+
- Get detailed metadata about a specific dataset
|
|
119
|
+
- Check dataset configuration and capabilities
|
|
120
|
+
- Verify dataset refresh settings
|
|
121
|
+
- Understand dataset storage mode and requirements
|
|
122
|
+
|
|
123
|
+
Parameters:
|
|
124
|
+
- dataset_id (required): The unique identifier of the dataset
|
|
125
|
+
- workspace_id (optional): Workspace (group) ID. Omit for datasets in "My workspace"
|
|
126
|
+
- format: Response format - "json" or "markdown" (default: "json")
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
Detailed dataset information including:
|
|
130
|
+
- Dataset name and ID
|
|
131
|
+
- Configuration details
|
|
132
|
+
- Refresh capabilities and requirements
|
|
133
|
+
- Storage mode
|
|
134
|
+
- Creation date
|
|
135
|
+
- Identity requirements
|
|
136
|
+
|
|
137
|
+
Example usage:
|
|
138
|
+
- Get dataset from My workspace: dataset_id="dataset123"
|
|
139
|
+
- Get dataset from specific workspace: dataset_id="dataset123", workspace_id="workspace456"
|
|
140
|
+
|
|
141
|
+
Error handling:
|
|
142
|
+
- If dataset_id not found (404), verify the ID using get_datasets
|
|
143
|
+
- For permission errors, ensure service principal has read access
|
|
144
|
+
- Check workspace_id matches the workspace containing the dataset
|
|
145
|
+
"""
|
|
146
|
+
try:
|
|
147
|
+
if not dataset_id:
|
|
148
|
+
return "Error: dataset_id is required"
|
|
149
|
+
|
|
150
|
+
format_type = ResponseFormat(format.lower())
|
|
151
|
+
|
|
152
|
+
# Get client from context
|
|
153
|
+
client = ctx.request_context.lifespan_context["client"]
|
|
154
|
+
|
|
155
|
+
if workspace_id:
|
|
156
|
+
await ctx.info(f"Fetching dataset {dataset_id} from workspace {workspace_id}")
|
|
157
|
+
else:
|
|
158
|
+
await ctx.info(f"Fetching dataset {dataset_id} from My workspace")
|
|
159
|
+
|
|
160
|
+
# Fetch dataset from API
|
|
161
|
+
result = await client.get_dataset(
|
|
162
|
+
dataset_id=dataset_id,
|
|
163
|
+
workspace_id=workspace_id
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
await ctx.info(f"Retrieved dataset: {result.get('name', 'Unknown')}")
|
|
167
|
+
|
|
168
|
+
# Format response
|
|
169
|
+
if format_type == ResponseFormat.JSON:
|
|
170
|
+
response = json.dumps(result, indent=2)
|
|
171
|
+
else:
|
|
172
|
+
response = format_dataset_markdown(result)
|
|
173
|
+
|
|
174
|
+
return truncate_response(response)
|
|
175
|
+
|
|
176
|
+
except ValueError as e:
|
|
177
|
+
error_msg = f"Invalid parameter value: {str(e)}"
|
|
178
|
+
await ctx.error(error_msg)
|
|
179
|
+
return f"Error: {error_msg}"
|
|
180
|
+
except Exception as e:
|
|
181
|
+
error_msg = str(e)
|
|
182
|
+
await ctx.error(f"Failed to get dataset: {error_msg}")
|
|
183
|
+
return f"Error executing get_dataset: {error_msg}"
|
|
184
|
+
|
|
185
|
+
@mcp.tool()
|
|
186
|
+
async def get_refresh_history(
|
|
187
|
+
dataset_id: str,
|
|
188
|
+
workspace_id: Optional[str] = None,
|
|
189
|
+
top: int = REFRESH_HISTORY_DEFAULT,
|
|
190
|
+
format: str = "markdown",
|
|
191
|
+
ctx: Context = None
|
|
192
|
+
) -> str:
|
|
193
|
+
"""
|
|
194
|
+
Get refresh history for a PowerBI dataset.
|
|
195
|
+
|
|
196
|
+
Shows recent refresh operations including:
|
|
197
|
+
- Status (Completed, Failed, Unknown)
|
|
198
|
+
- Start and end times
|
|
199
|
+
- Request ID and refresh type
|
|
200
|
+
- Error messages (for failed refreshes)
|
|
201
|
+
|
|
202
|
+
Useful for monitoring data freshness and troubleshooting refresh failures.
|
|
203
|
+
|
|
204
|
+
Parameters:
|
|
205
|
+
- dataset_id (required): The dataset ID
|
|
206
|
+
- workspace_id (optional): Workspace ID. Omit for datasets in "My workspace"
|
|
207
|
+
- top: Number of refresh records to return (default: 5, max: 60)
|
|
208
|
+
- format: Response format - "markdown" or "json" (default: "markdown")
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
Formatted refresh history with status, timestamps, and error details for failed refreshes.
|
|
212
|
+
|
|
213
|
+
Example usage:
|
|
214
|
+
- Get last 5 refreshes: dataset_id="abc123"
|
|
215
|
+
- Get last 10 refreshes: dataset_id="abc123", top=10
|
|
216
|
+
- Check specific workspace: dataset_id="abc123", workspace_id="workspace456"
|
|
217
|
+
|
|
218
|
+
Error handling:
|
|
219
|
+
- If dataset_id not found, verify the ID using get_datasets
|
|
220
|
+
- For permission errors, ensure service principal has read access
|
|
221
|
+
"""
|
|
222
|
+
try:
|
|
223
|
+
if not dataset_id:
|
|
224
|
+
return "Error: dataset_id is required"
|
|
225
|
+
|
|
226
|
+
# Validate top parameter
|
|
227
|
+
validate_refresh_top(top)
|
|
228
|
+
format_type = ResponseFormat(format.lower())
|
|
229
|
+
|
|
230
|
+
# Get client from context
|
|
231
|
+
client = ctx.request_context.lifespan_context["client"]
|
|
232
|
+
|
|
233
|
+
if workspace_id:
|
|
234
|
+
await ctx.info(f"Fetching refresh history for dataset {dataset_id} from workspace {workspace_id}")
|
|
235
|
+
else:
|
|
236
|
+
await ctx.info(f"Fetching refresh history for dataset {dataset_id} from My workspace")
|
|
237
|
+
|
|
238
|
+
# Fetch refresh history from API
|
|
239
|
+
result = await client.get_refresh_history(
|
|
240
|
+
dataset_id=dataset_id,
|
|
241
|
+
workspace_id=workspace_id,
|
|
242
|
+
top=top
|
|
243
|
+
)
|
|
244
|
+
refresh_history = result.get("value", [])
|
|
245
|
+
|
|
246
|
+
await ctx.info(f"Retrieved {len(refresh_history)} refresh records")
|
|
247
|
+
|
|
248
|
+
# Get dataset name for formatting
|
|
249
|
+
dataset_info = await client.get_dataset(dataset_id=dataset_id, workspace_id=workspace_id)
|
|
250
|
+
dataset_name = dataset_info.get("name", dataset_id)
|
|
251
|
+
|
|
252
|
+
# Format response
|
|
253
|
+
if format_type == ResponseFormat.MARKDOWN:
|
|
254
|
+
response = format_refresh_history_markdown(refresh_history, dataset_name)
|
|
255
|
+
else:
|
|
256
|
+
response = json.dumps(refresh_history, indent=2)
|
|
257
|
+
|
|
258
|
+
return truncate_response(response)
|
|
259
|
+
|
|
260
|
+
except ValueError as e:
|
|
261
|
+
error_msg = f"Invalid parameter value: {str(e)}"
|
|
262
|
+
await ctx.error(error_msg)
|
|
263
|
+
return f"Error: {error_msg}"
|
|
264
|
+
except Exception as e:
|
|
265
|
+
error_msg = str(e)
|
|
266
|
+
await ctx.error(f"Failed to get refresh history: {error_msg}")
|
|
267
|
+
return f"Error executing get_refresh_history: {error_msg}"
|
|
268
|
+
|
|
269
|
+
@mcp.tool()
|
|
270
|
+
async def get_parameters(
|
|
271
|
+
dataset_id: str,
|
|
272
|
+
workspace_id: Optional[str] = None,
|
|
273
|
+
format: str = "markdown",
|
|
274
|
+
detail: str = "concise",
|
|
275
|
+
ctx: Context = None
|
|
276
|
+
) -> str:
|
|
277
|
+
"""
|
|
278
|
+
Get parameters defined in a PowerBI dataset.
|
|
279
|
+
|
|
280
|
+
Returns parameter definitions including:
|
|
281
|
+
- Name and data type
|
|
282
|
+
- Current value
|
|
283
|
+
- Whether parameter is required
|
|
284
|
+
- Suggested values (if defined)
|
|
285
|
+
|
|
286
|
+
Useful for discovering available parameters before querying parameterized datasets.
|
|
287
|
+
|
|
288
|
+
Note: Not supported for datasets with SQL, Oracle, Teradata, SAP HANA DirectQuery
|
|
289
|
+
connections or datasets modified via XMLA endpoint.
|
|
290
|
+
|
|
291
|
+
Parameters:
|
|
292
|
+
- dataset_id (required): The dataset ID
|
|
293
|
+
- workspace_id (optional): Workspace ID. Omit for datasets in "My workspace"
|
|
294
|
+
- format: Response format - "markdown" or "json" (default: "markdown")
|
|
295
|
+
- detail: Detail level - "concise" or "detailed" (default: "concise")
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
Formatted parameter information with names, types, values, and suggested values.
|
|
299
|
+
|
|
300
|
+
Example usage:
|
|
301
|
+
- Get all parameters: dataset_id="abc123"
|
|
302
|
+
- Get detailed info: dataset_id="abc123", detail="detailed"
|
|
303
|
+
- Check specific workspace: dataset_id="abc123", workspace_id="workspace456"
|
|
304
|
+
|
|
305
|
+
Error handling:
|
|
306
|
+
- If dataset_id not found, verify the ID using get_datasets
|
|
307
|
+
- "Not supported" errors indicate dataset type limitations
|
|
308
|
+
- Empty result means no parameters are defined
|
|
309
|
+
"""
|
|
310
|
+
try:
|
|
311
|
+
if not dataset_id:
|
|
312
|
+
return "Error: dataset_id is required"
|
|
313
|
+
|
|
314
|
+
format_type = ResponseFormat(format.lower())
|
|
315
|
+
detail_level = DetailLevel(detail.lower())
|
|
316
|
+
|
|
317
|
+
# Get client from context
|
|
318
|
+
client = ctx.request_context.lifespan_context["client"]
|
|
319
|
+
|
|
320
|
+
if workspace_id:
|
|
321
|
+
await ctx.info(f"Fetching parameters for dataset {dataset_id} from workspace {workspace_id}")
|
|
322
|
+
else:
|
|
323
|
+
await ctx.info(f"Fetching parameters for dataset {dataset_id} from My workspace")
|
|
324
|
+
|
|
325
|
+
# Fetch parameters from API
|
|
326
|
+
result = await client.get_parameters(
|
|
327
|
+
dataset_id=dataset_id,
|
|
328
|
+
workspace_id=workspace_id
|
|
329
|
+
)
|
|
330
|
+
parameters = result.get("value", [])
|
|
331
|
+
|
|
332
|
+
await ctx.info(f"Retrieved {len(parameters)} parameters")
|
|
333
|
+
|
|
334
|
+
# Get dataset name for formatting
|
|
335
|
+
dataset_info = await client.get_dataset(dataset_id=dataset_id, workspace_id=workspace_id)
|
|
336
|
+
dataset_name = dataset_info.get("name", dataset_id)
|
|
337
|
+
|
|
338
|
+
# Format response
|
|
339
|
+
if format_type == ResponseFormat.MARKDOWN:
|
|
340
|
+
response = format_parameters_markdown(parameters, dataset_name, detail_level)
|
|
341
|
+
else:
|
|
342
|
+
response = json.dumps(parameters, indent=2)
|
|
343
|
+
|
|
344
|
+
return truncate_response(response)
|
|
345
|
+
|
|
346
|
+
except ValueError as e:
|
|
347
|
+
error_msg = f"Invalid parameter value: {str(e)}"
|
|
348
|
+
await ctx.error(error_msg)
|
|
349
|
+
return f"Error: {error_msg}"
|
|
350
|
+
except Exception as e:
|
|
351
|
+
error_msg = str(e)
|
|
352
|
+
await ctx.error(f"Failed to get parameters: {error_msg}")
|
|
353
|
+
return f"Error executing get_parameters: {error_msg}"
|
|
354
|
+
|
|
355
|
+
logger.info("Dataset tools registered")
|
src/tools/queries.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Query execution tools
|
|
3
|
+
|
|
4
|
+
Tools for executing DAX queries against PowerBI datasets.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
from mcp.server import FastMCP
|
|
12
|
+
from mcp.server.fastmcp import Context
|
|
13
|
+
|
|
14
|
+
from ..models import ResponseFormat
|
|
15
|
+
from ..formatters import format_query_results_markdown, truncate_response
|
|
16
|
+
from ..validation import validate_dax_query
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def register_query_tools(mcp: FastMCP) -> None:
|
|
22
|
+
"""Register query-related tools with the MCP server"""
|
|
23
|
+
|
|
24
|
+
@mcp.tool()
|
|
25
|
+
async def query_dataset(
|
|
26
|
+
dataset_id: str,
|
|
27
|
+
dax_query: str,
|
|
28
|
+
workspace_id: Optional[str] = None,
|
|
29
|
+
format: str = "markdown",
|
|
30
|
+
ctx: Context = None
|
|
31
|
+
) -> str:
|
|
32
|
+
"""
|
|
33
|
+
Execute DAX (Data Analysis Expressions) queries against a PowerBI dataset.
|
|
34
|
+
|
|
35
|
+
This tool allows you to query dataset tables using DAX, PowerBI's formula language.
|
|
36
|
+
DAX queries can retrieve data, perform calculations, and apply filters.
|
|
37
|
+
|
|
38
|
+
Use this when you need to:
|
|
39
|
+
- Retrieve data from dataset tables
|
|
40
|
+
- Perform calculations and aggregations
|
|
41
|
+
- Filter and analyze dataset data
|
|
42
|
+
- Get specific rows or aggregated results
|
|
43
|
+
|
|
44
|
+
DAX Query Syntax:
|
|
45
|
+
- Basic table query: EVALUATE TableName
|
|
46
|
+
- Filtered query: EVALUATE FILTER(TableName, TableName[Column] = "Value")
|
|
47
|
+
- Aggregation: EVALUATE SUMMARIZE(TableName, TableName[Column], "Total", SUM(TableName[Amount]))
|
|
48
|
+
- Top N: EVALUATE TOPN(10, TableName, TableName[Column], DESC)
|
|
49
|
+
|
|
50
|
+
Parameters:
|
|
51
|
+
- dataset_id (required): The unique identifier of the dataset to query
|
|
52
|
+
- dax_query (required): DAX query to execute (must start with EVALUATE)
|
|
53
|
+
- workspace_id (optional): Workspace (group) ID. Omit for "My workspace"
|
|
54
|
+
- format: Response format - "json" or "markdown" (default: "markdown")
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
Query results as a table with rows and columns. Markdown format presents results
|
|
58
|
+
as formatted tables. Results are limited to 100 rows in markdown view.
|
|
59
|
+
|
|
60
|
+
Example queries:
|
|
61
|
+
1. Get all rows: "EVALUATE 'Sales'"
|
|
62
|
+
2. Filter data: "EVALUATE FILTER('Sales', 'Sales'[Year] = 2024)"
|
|
63
|
+
3. Aggregate: "EVALUATE SUMMARIZE('Sales', 'Sales'[Category], "Total", SUM('Sales'[Amount]))"
|
|
64
|
+
4. Top 10: "EVALUATE TOPN(10, 'Sales', 'Sales'[Amount], DESC)"
|
|
65
|
+
|
|
66
|
+
Error handling:
|
|
67
|
+
- For syntax errors, check your DAX query starts with EVALUATE
|
|
68
|
+
- If table/column not found, use get_dataset to see available schema
|
|
69
|
+
- For large result sets, add filters or use TOPN to limit rows
|
|
70
|
+
- Timeout errors suggest simplifying the query or adding filters
|
|
71
|
+
"""
|
|
72
|
+
try:
|
|
73
|
+
if not dataset_id:
|
|
74
|
+
return "Error: dataset_id is required"
|
|
75
|
+
|
|
76
|
+
if not dax_query:
|
|
77
|
+
return "Error: dax_query is required"
|
|
78
|
+
|
|
79
|
+
# Validate DAX query
|
|
80
|
+
try:
|
|
81
|
+
validate_dax_query(dax_query)
|
|
82
|
+
except ValueError as e:
|
|
83
|
+
return f"Error: {str(e)}"
|
|
84
|
+
|
|
85
|
+
format_type = ResponseFormat(format.lower())
|
|
86
|
+
|
|
87
|
+
# Get client from context
|
|
88
|
+
client = ctx.request_context.lifespan_context["client"]
|
|
89
|
+
|
|
90
|
+
if workspace_id:
|
|
91
|
+
await ctx.info(f"Executing query on dataset {dataset_id} in workspace {workspace_id}")
|
|
92
|
+
else:
|
|
93
|
+
await ctx.info(f"Executing query on dataset {dataset_id} in My workspace")
|
|
94
|
+
|
|
95
|
+
await ctx.report_progress(0.3, 1.0, "Executing DAX query...")
|
|
96
|
+
|
|
97
|
+
# Execute query
|
|
98
|
+
queries = [{"query": dax_query}]
|
|
99
|
+
result = await client.execute_queries(
|
|
100
|
+
dataset_id=dataset_id,
|
|
101
|
+
queries=queries,
|
|
102
|
+
workspace_id=workspace_id
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
await ctx.report_progress(1.0, 1.0, "Query completed")
|
|
106
|
+
await ctx.info("Query executed successfully")
|
|
107
|
+
|
|
108
|
+
# Format response
|
|
109
|
+
if format_type == ResponseFormat.MARKDOWN:
|
|
110
|
+
response = format_query_results_markdown(result)
|
|
111
|
+
else:
|
|
112
|
+
response = json.dumps(result, indent=2)
|
|
113
|
+
|
|
114
|
+
return truncate_response(response)
|
|
115
|
+
|
|
116
|
+
except ValueError as e:
|
|
117
|
+
error_msg = f"Invalid parameter value: {str(e)}"
|
|
118
|
+
await ctx.error(error_msg)
|
|
119
|
+
return f"Error: {error_msg}"
|
|
120
|
+
except Exception as e:
|
|
121
|
+
error_msg = str(e)
|
|
122
|
+
await ctx.error(f"Failed to execute query: {error_msg}")
|
|
123
|
+
return f"Error executing query_dataset: {error_msg}"
|
|
124
|
+
|
|
125
|
+
logger.info("Query tools registered")
|
src/tools/workspaces.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workspace management tools
|
|
3
|
+
|
|
4
|
+
Tools for listing and managing PowerBI workspaces.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
from mcp.server import FastMCP
|
|
12
|
+
from mcp.server.fastmcp import Context
|
|
13
|
+
|
|
14
|
+
from ..models import ResponseFormat, DetailLevel
|
|
15
|
+
from ..formatters import (
|
|
16
|
+
format_workspaces_markdown,
|
|
17
|
+
format_reports_markdown,
|
|
18
|
+
truncate_response
|
|
19
|
+
)
|
|
20
|
+
from ..constants import MAX_WORKSPACES_TOP, MIN_WORKSPACES_TOP
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def register_workspace_tools(mcp: FastMCP) -> None:
|
|
26
|
+
"""Register workspace-related tools with the MCP server"""
|
|
27
|
+
|
|
28
|
+
@mcp.tool()
|
|
29
|
+
async def get_workspaces(
|
|
30
|
+
top: Optional[int] = None,
|
|
31
|
+
skip: Optional[int] = None,
|
|
32
|
+
format: str = "markdown",
|
|
33
|
+
detail: str = "concise",
|
|
34
|
+
ctx: Context = None
|
|
35
|
+
) -> str:
|
|
36
|
+
"""
|
|
37
|
+
Get list of PowerBI workspaces (also called groups) accessible to the service principal.
|
|
38
|
+
|
|
39
|
+
This tool retrieves all workspaces that the configured service principal has access to.
|
|
40
|
+
Workspaces are containers for dashboards, reports, datasets, and dataflows in PowerBI.
|
|
41
|
+
|
|
42
|
+
Use this when you need to:
|
|
43
|
+
- Discover available workspaces
|
|
44
|
+
- Find a workspace ID for further operations
|
|
45
|
+
- List all accessible PowerBI workspaces
|
|
46
|
+
|
|
47
|
+
Parameters:
|
|
48
|
+
- top (optional): Maximum number of workspaces to return (1-5000, for pagination)
|
|
49
|
+
- skip (optional): Number of workspaces to skip (for pagination)
|
|
50
|
+
- format: Response format - "json" or "markdown" (default: "markdown")
|
|
51
|
+
- detail: Detail level - "concise" or "detailed" (default: "concise")
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
List of workspaces with their IDs, names, and optionally detailed metadata.
|
|
55
|
+
|
|
56
|
+
Example usage:
|
|
57
|
+
- Get first 10 workspaces: top=10
|
|
58
|
+
- Get concise markdown list: format="markdown", detail="concise"
|
|
59
|
+
- Get detailed JSON: format="json", detail="detailed"
|
|
60
|
+
|
|
61
|
+
Error handling:
|
|
62
|
+
- If authentication fails, check your service principal credentials
|
|
63
|
+
- If no workspaces returned, ensure service principal has workspace access
|
|
64
|
+
- For permission errors, verify service principal is enabled in PowerBI admin portal
|
|
65
|
+
"""
|
|
66
|
+
try:
|
|
67
|
+
# Validate parameters
|
|
68
|
+
if top is not None and (top < MIN_WORKSPACES_TOP or top > MAX_WORKSPACES_TOP):
|
|
69
|
+
return f"Error: 'top' parameter must be between {MIN_WORKSPACES_TOP} and {MAX_WORKSPACES_TOP}"
|
|
70
|
+
|
|
71
|
+
if skip is not None and skip < 0:
|
|
72
|
+
return "Error: 'skip' parameter must be non-negative"
|
|
73
|
+
|
|
74
|
+
format_type = ResponseFormat(format.lower())
|
|
75
|
+
detail_level = DetailLevel(detail.lower())
|
|
76
|
+
|
|
77
|
+
# Get client from context
|
|
78
|
+
client = ctx.request_context.lifespan_context["client"]
|
|
79
|
+
|
|
80
|
+
await ctx.info(f"Fetching workspaces (top={top}, skip={skip})")
|
|
81
|
+
|
|
82
|
+
# Fetch workspaces from API
|
|
83
|
+
result = await client.get_workspaces(top=top, skip=skip)
|
|
84
|
+
workspaces = result.get("value", [])
|
|
85
|
+
|
|
86
|
+
await ctx.info(f"Found {len(workspaces)} workspaces")
|
|
87
|
+
|
|
88
|
+
# Format response
|
|
89
|
+
if format_type == ResponseFormat.MARKDOWN:
|
|
90
|
+
response = format_workspaces_markdown(workspaces, detail_level)
|
|
91
|
+
else:
|
|
92
|
+
response = json.dumps(workspaces, indent=2)
|
|
93
|
+
|
|
94
|
+
return truncate_response(response)
|
|
95
|
+
|
|
96
|
+
except ValueError as e:
|
|
97
|
+
error_msg = f"Invalid parameter value: {str(e)}"
|
|
98
|
+
await ctx.error(error_msg)
|
|
99
|
+
return f"Error: {error_msg}"
|
|
100
|
+
except Exception as e:
|
|
101
|
+
error_msg = str(e)
|
|
102
|
+
await ctx.error(f"Failed to get workspaces: {error_msg}")
|
|
103
|
+
return f"Error executing get_workspaces: {error_msg}"
|
|
104
|
+
|
|
105
|
+
@mcp.tool()
|
|
106
|
+
async def get_reports(
|
|
107
|
+
workspace_id: Optional[str] = None,
|
|
108
|
+
format: str = "markdown",
|
|
109
|
+
detail: str = "concise",
|
|
110
|
+
ctx: Context = None
|
|
111
|
+
) -> str:
|
|
112
|
+
"""
|
|
113
|
+
Get PowerBI reports from a workspace.
|
|
114
|
+
|
|
115
|
+
Returns report information including:
|
|
116
|
+
- Report name and ID
|
|
117
|
+
- Web URL for viewing
|
|
118
|
+
- Embed URL for embedding
|
|
119
|
+
- Associated dataset ID
|
|
120
|
+
|
|
121
|
+
Useful for discovering available reports and getting report URLs.
|
|
122
|
+
|
|
123
|
+
Parameters:
|
|
124
|
+
- workspace_id (optional): Workspace ID. Omit to get reports from "My workspace"
|
|
125
|
+
- format: Response format - "json" or "markdown" (default: "markdown")
|
|
126
|
+
- detail: Detail level - "concise" or "detailed" (default: "concise")
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
List of reports with their IDs, names, and optionally URLs and dataset IDs.
|
|
130
|
+
|
|
131
|
+
Example usage:
|
|
132
|
+
- Get reports from My workspace: (omit workspace_id)
|
|
133
|
+
- Get reports from specific workspace: workspace_id="abc123"
|
|
134
|
+
- Get detailed info with URLs: detail="detailed"
|
|
135
|
+
|
|
136
|
+
Error handling:
|
|
137
|
+
- If workspace_id not found, verify the ID using get_workspaces
|
|
138
|
+
- For permission errors, ensure service principal has read access
|
|
139
|
+
- Empty list means no reports in the workspace or no access
|
|
140
|
+
"""
|
|
141
|
+
try:
|
|
142
|
+
format_type = ResponseFormat(format.lower())
|
|
143
|
+
detail_level = DetailLevel(detail.lower())
|
|
144
|
+
|
|
145
|
+
# Get client from context
|
|
146
|
+
client = ctx.request_context.lifespan_context["client"]
|
|
147
|
+
|
|
148
|
+
if workspace_id:
|
|
149
|
+
await ctx.info(f"Fetching reports from workspace {workspace_id}")
|
|
150
|
+
else:
|
|
151
|
+
await ctx.info("Fetching reports from My workspace")
|
|
152
|
+
|
|
153
|
+
# Fetch reports from API
|
|
154
|
+
result = await client.get_reports(workspace_id=workspace_id)
|
|
155
|
+
reports = result.get("value", [])
|
|
156
|
+
|
|
157
|
+
await ctx.info(f"Found {len(reports)} reports")
|
|
158
|
+
|
|
159
|
+
# Get workspace name for formatting
|
|
160
|
+
workspace_name = "My workspace"
|
|
161
|
+
if workspace_id:
|
|
162
|
+
workspaces_result = await client.get_workspaces()
|
|
163
|
+
workspaces = workspaces_result.get("value", [])
|
|
164
|
+
workspace = next((w for w in workspaces if w.get("id") == workspace_id), None)
|
|
165
|
+
if workspace:
|
|
166
|
+
workspace_name = workspace.get("name", workspace_id)
|
|
167
|
+
|
|
168
|
+
# Format response
|
|
169
|
+
if format_type == ResponseFormat.MARKDOWN:
|
|
170
|
+
response = format_reports_markdown(reports, workspace_name, detail_level)
|
|
171
|
+
else:
|
|
172
|
+
response = json.dumps(reports, indent=2)
|
|
173
|
+
|
|
174
|
+
return truncate_response(response)
|
|
175
|
+
|
|
176
|
+
except ValueError as e:
|
|
177
|
+
error_msg = f"Invalid parameter value: {str(e)}"
|
|
178
|
+
await ctx.error(error_msg)
|
|
179
|
+
return f"Error: {error_msg}"
|
|
180
|
+
except Exception as e:
|
|
181
|
+
error_msg = str(e)
|
|
182
|
+
await ctx.error(f"Failed to get reports: {error_msg}")
|
|
183
|
+
return f"Error executing get_reports: {error_msg}"
|
|
184
|
+
|
|
185
|
+
logger.info("Workspace tools registered")
|