powerbi-mcp 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
src/formatters.py ADDED
@@ -0,0 +1,367 @@
1
+ """
2
+ Response formatters for PowerBI data
3
+
4
+ Converts API responses into user-friendly formats (Markdown, JSON).
5
+ """
6
+
7
+ from typing import Any, Callable, Optional
8
+
9
+ from .models import DetailLevel
10
+ from .constants import CHARACTER_LIMIT, MAX_ROWS_DISPLAY
11
+
12
+
13
+ def _format_items_markdown(
14
+ items: list[dict[str, Any]],
15
+ item_type: str,
16
+ detail: DetailLevel,
17
+ detail_formatter: Optional[Callable[[dict[str, Any]], list[str]]] = None
18
+ ) -> str:
19
+ """
20
+ Generic markdown formatter for lists of items (workspaces, datasets, etc.)
21
+
22
+ Args:
23
+ items: List of item dictionaries from API
24
+ item_type: Type of items (e.g., "Workspaces", "Datasets")
25
+ detail: Level of detail (concise or detailed)
26
+ detail_formatter: Optional function to format detailed fields for an item
27
+
28
+ Returns:
29
+ Markdown-formatted string of items
30
+ """
31
+ if not items:
32
+ return f"No {item_type.lower()} found."
33
+
34
+ lines = [f"# PowerBI {item_type} ({len(items)} total)\n"]
35
+
36
+ for item in items:
37
+ lines.append(f"## {item.get('name', 'Unnamed')}")
38
+ lines.append(f"- **ID**: `{item.get('id')}`")
39
+
40
+ if detail == DetailLevel.DETAILED and detail_formatter:
41
+ detail_lines = detail_formatter(item)
42
+ lines.extend(detail_lines)
43
+
44
+ lines.append("")
45
+
46
+ return "\n".join(lines)
47
+
48
+
49
+ def _format_workspace_details(ws: dict[str, Any]) -> list[str]:
50
+ """Format detailed fields for a workspace"""
51
+ lines = []
52
+ lines.append(f"- **Type**: {ws.get('type', 'N/A')}")
53
+ lines.append(f"- **State**: {ws.get('state', 'N/A')}")
54
+ if ws.get('isReadOnly'):
55
+ lines.append("- **Read Only**: Yes")
56
+ if ws.get('isOnDedicatedCapacity'):
57
+ lines.append("- **Dedicated Capacity**: Yes")
58
+ if ws.get('capacityId'):
59
+ lines.append(f"- **Capacity ID**: {ws.get('capacityId')}")
60
+ return lines
61
+
62
+
63
+ def _format_dataset_details(ds: dict[str, Any]) -> list[str]:
64
+ """Format detailed fields for a dataset"""
65
+ lines = []
66
+ lines.append(f"- **Configured By**: {ds.get('configuredBy', 'N/A')}")
67
+ if ds.get('isRefreshable'):
68
+ lines.append("- **Refreshable**: Yes")
69
+ if ds.get('isEffectiveIdentityRequired'):
70
+ lines.append("- **Effective Identity Required**: Yes")
71
+ if ds.get('isEffectiveIdentityRolesRequired'):
72
+ lines.append("- **Effective Identity Roles Required**: Yes")
73
+ if ds.get('targetStorageMode'):
74
+ lines.append(f"- **Storage Mode**: {ds.get('targetStorageMode')}")
75
+ if ds.get('createdDate'):
76
+ lines.append(f"- **Created**: {ds.get('createdDate')}")
77
+ return lines
78
+
79
+
80
+ def truncate_response(text: str, limit: int = CHARACTER_LIMIT) -> str:
81
+ """
82
+ Truncate response to character limit
83
+
84
+ Args:
85
+ text: Response text to truncate
86
+ limit: Maximum character limit (default: 25000)
87
+
88
+ Returns:
89
+ Truncated text with informative message if truncated
90
+ """
91
+ if len(text) <= limit:
92
+ return text
93
+
94
+ truncated = text[:limit]
95
+ return (
96
+ f"{truncated}\n\n"
97
+ f"... [Response truncated at {limit} characters. "
98
+ "Use filtering or pagination to reduce response size.]"
99
+ )
100
+
101
+
102
+ def format_workspaces_markdown(workspaces: list[dict[str, Any]], detail: DetailLevel) -> str:
103
+ """
104
+ Format workspaces as markdown
105
+
106
+ Args:
107
+ workspaces: List of workspace dictionaries from API
108
+ detail: Level of detail (concise or detailed)
109
+
110
+ Returns:
111
+ Markdown-formatted string of workspaces
112
+ """
113
+ return _format_items_markdown(workspaces, "Workspaces", detail, _format_workspace_details)
114
+
115
+
116
+ def format_datasets_markdown(datasets: list[dict[str, Any]], detail: DetailLevel) -> str:
117
+ """
118
+ Format datasets as markdown
119
+
120
+ Args:
121
+ datasets: List of dataset dictionaries from API
122
+ detail: Level of detail (concise or detailed)
123
+
124
+ Returns:
125
+ Markdown-formatted string of datasets
126
+ """
127
+ return _format_items_markdown(datasets, "Datasets", detail, _format_dataset_details)
128
+
129
+
130
+ def format_dataset_markdown(dataset: dict[str, Any]) -> str:
131
+ """
132
+ Format single dataset details as markdown
133
+
134
+ Args:
135
+ dataset: Dataset dictionary from API
136
+
137
+ Returns:
138
+ Markdown-formatted string of dataset details
139
+ """
140
+ lines = [f"# Dataset: {dataset.get('name', 'Unnamed')}\n"]
141
+
142
+ for key, value in dataset.items():
143
+ # Format key nicely (convert camelCase to Title Case)
144
+ formatted_key = key.replace('_', ' ').title()
145
+ lines.append(f"- **{formatted_key}**: {value}")
146
+
147
+ return "\n".join(lines)
148
+
149
+
150
+ def format_query_results_markdown(results: dict[str, Any]) -> str:
151
+ """
152
+ Format query results as markdown tables
153
+
154
+ Args:
155
+ results: Query results dictionary from API
156
+
157
+ Returns:
158
+ Markdown-formatted string with result tables
159
+ """
160
+ if not results or "results" not in results:
161
+ return "No query results."
162
+
163
+ lines = ["# Query Results\n"]
164
+
165
+ for idx, result in enumerate(results["results"]):
166
+ lines.append(f"## Query {idx + 1}\n")
167
+
168
+ if "tables" not in result or not result["tables"]:
169
+ lines.append("*No data returned*\n")
170
+ continue
171
+
172
+ for table_idx, table in enumerate(result["tables"]):
173
+ if table_idx > 0:
174
+ lines.append(f"\n### Table {table_idx + 1}\n")
175
+
176
+ rows = table.get("rows", [])
177
+
178
+ if not rows:
179
+ lines.append("*No rows returned*\n")
180
+ continue
181
+
182
+ # Get column names from first row keys
183
+ columns = list(rows[0].keys()) if rows else []
184
+
185
+ # Create markdown table header
186
+ lines.append("| " + " | ".join(columns) + " |")
187
+ lines.append("| " + " | ".join(["---"] * len(columns)) + " |")
188
+
189
+ # Add rows (limit to prevent huge responses)
190
+ for row in rows[:MAX_ROWS_DISPLAY]:
191
+ values = [str(row.get(col, "")) for col in columns]
192
+ lines.append("| " + " | ".join(values) + " |")
193
+
194
+ if len(rows) > MAX_ROWS_DISPLAY:
195
+ lines.append(
196
+ f"\n*Showing {MAX_ROWS_DISPLAY} of {len(rows)} rows. "
197
+ "Query returned more data than displayed.*\n"
198
+ )
199
+
200
+ return "\n".join(lines)
201
+
202
+
203
+ def format_refresh_history_markdown(
204
+ refresh_history: list[dict[str, Any]],
205
+ dataset_name: str
206
+ ) -> str:
207
+ """
208
+ Format refresh history as markdown
209
+
210
+ Args:
211
+ refresh_history: List of refresh records from API
212
+ dataset_name: Name of the dataset
213
+
214
+ Returns:
215
+ Markdown-formatted string of refresh history
216
+ """
217
+ if not refresh_history:
218
+ return f"# Refresh History - Dataset: {dataset_name}\n\nNo refresh history found."
219
+
220
+ lines = [
221
+ f"# Refresh History - Dataset: {dataset_name}",
222
+ f"\n## Recent Refreshes (showing {len(refresh_history)})\n"
223
+ ]
224
+
225
+ for idx, refresh in enumerate(refresh_history, 1):
226
+ status = refresh.get("status", "Unknown")
227
+ status_emoji = "✓" if status == "Completed" else "✗" if status == "Failed" else "⏳"
228
+
229
+ lines.append(f"### Refresh {idx}")
230
+ lines.append(f"- **Status**: {status} {status_emoji}")
231
+
232
+ if refresh.get("startTime"):
233
+ lines.append(f"- **Start**: {refresh['startTime']}")
234
+
235
+ if refresh.get("endTime"):
236
+ lines.append(f"- **End**: {refresh['endTime']}")
237
+
238
+ if refresh.get("refreshType"):
239
+ lines.append(f"- **Type**: {refresh['refreshType']}")
240
+
241
+ if refresh.get("requestId"):
242
+ lines.append(f"- **Request ID**: {refresh['requestId']}")
243
+
244
+ # Show error details for failed refreshes
245
+ if status == "Failed" and refresh.get("serviceExceptionJson"):
246
+ lines.append(f"- **Error**: {refresh['serviceExceptionJson']}")
247
+
248
+ lines.append("")
249
+
250
+ return "\n".join(lines)
251
+
252
+
253
+ def format_parameters_markdown(
254
+ parameters: list[dict[str, Any]],
255
+ dataset_name: str,
256
+ detail: DetailLevel
257
+ ) -> str:
258
+ """
259
+ Format dataset parameters as markdown
260
+
261
+ Args:
262
+ parameters: List of parameter dictionaries from API
263
+ dataset_name: Name of the dataset
264
+ detail: Level of detail (concise or normal)
265
+
266
+ Returns:
267
+ Markdown-formatted string of parameters
268
+ """
269
+ if not parameters:
270
+ return f"# Dataset Parameters - {dataset_name}\n\nNo parameters defined for this dataset."
271
+
272
+ lines = [f"# Dataset Parameters - {dataset_name}\n"]
273
+
274
+ # Add summary statistics
275
+ required_count = sum(1 for p in parameters if p.get("isRequired", False))
276
+ lines.append("## Summary")
277
+ lines.append(f"- Total Parameters: {len(parameters)}")
278
+ lines.append(f"- Required: {required_count}")
279
+ lines.append(f"- Optional: {len(parameters) - required_count}")
280
+ lines.append("\n## Parameters\n")
281
+
282
+ # List parameters
283
+ for idx, param in enumerate(parameters, 1):
284
+ name = param.get("name", "Unnamed")
285
+ param_type = param.get("type", "Unknown")
286
+ is_required = param.get("isRequired", False)
287
+ required_str = " *Required*" if is_required else ""
288
+
289
+ lines.append(f"### {idx}. {name} ({param_type}){required_str}")
290
+
291
+ if detail == DetailLevel.CONCISE:
292
+ current_value = param.get("currentValue")
293
+ if current_value is not None:
294
+ lines.append(f"- **Current Value**: {current_value}")
295
+ else:
296
+ # Normal detail - show more info
297
+ current_value = param.get("currentValue")
298
+ if current_value is not None:
299
+ lines.append(f"- **Current Value**: {current_value}")
300
+
301
+ suggested = param.get("suggestedValues")
302
+ if suggested:
303
+ values_str = ", ".join(str(v) for v in suggested)
304
+ lines.append(f"- **Suggested Values**: {values_str}")
305
+
306
+ description = param.get("description")
307
+ if description:
308
+ lines.append(f"- **Description**: {description}")
309
+
310
+ lines.append("")
311
+
312
+ return "\n".join(lines)
313
+
314
+
315
+ def format_reports_markdown(
316
+ reports: list[dict[str, Any]],
317
+ workspace_name: str,
318
+ detail: DetailLevel
319
+ ) -> str:
320
+ """
321
+ Format reports as markdown
322
+
323
+ Args:
324
+ reports: List of report dictionaries from API
325
+ workspace_name: Name of the workspace (or "My workspace")
326
+ detail: Level of detail (concise or normal)
327
+
328
+ Returns:
329
+ Markdown-formatted string of reports
330
+ """
331
+ if not reports:
332
+ return f"# PowerBI Reports\n\n## Workspace: {workspace_name}\n\nNo reports found."
333
+
334
+ lines = [
335
+ "# PowerBI Reports",
336
+ f"\n## Workspace: {workspace_name}",
337
+ f"\nFound {len(reports)} reports:\n"
338
+ ]
339
+
340
+ for idx, report in enumerate(reports, 1):
341
+ name = report.get("name", "Unnamed")
342
+ report_id = report.get("id", "N/A")
343
+
344
+ if detail == DetailLevel.CONCISE:
345
+ lines.append(f"{idx}. **{name}**")
346
+ lines.append(f" - ID: {report_id}")
347
+ lines.append("")
348
+ else:
349
+ # Normal detail - show more info
350
+ lines.append(f"### {idx}. {name}")
351
+ lines.append(f"- **Report ID**: {report_id}")
352
+
353
+ dataset_id = report.get("datasetId")
354
+ if dataset_id:
355
+ lines.append(f"- **Dataset ID**: {dataset_id}")
356
+
357
+ web_url = report.get("webUrl")
358
+ if web_url:
359
+ lines.append(f"- **Web URL**: {web_url}")
360
+
361
+ embed_url = report.get("embedUrl")
362
+ if embed_url:
363
+ lines.append(f"- **Embed URL**: {embed_url}")
364
+
365
+ lines.append("")
366
+
367
+ return "\n".join(lines)
src/models.py ADDED
@@ -0,0 +1,85 @@
1
+ """
2
+ Data models for PowerBI MCP Server
3
+
4
+ Pydantic models for request/response validation and structured output.
5
+ """
6
+
7
+ from enum import Enum
8
+ from typing import Any
9
+ from pydantic import BaseModel, Field, field_validator
10
+
11
+ from .validation import validate_dax_query as _validate_dax_query
12
+
13
+
14
+ class ResponseFormat(str, Enum):
15
+ """Output format for tool responses"""
16
+ JSON = "json"
17
+ MARKDOWN = "markdown"
18
+
19
+
20
+ class DetailLevel(str, Enum):
21
+ """Level of detail in responses"""
22
+ CONCISE = "concise"
23
+ DETAILED = "detailed"
24
+
25
+
26
+ class Workspace(BaseModel):
27
+ """PowerBI workspace (group) model"""
28
+ id: str = Field(..., description="Unique workspace identifier")
29
+ name: str = Field(..., description="Workspace name")
30
+ type: str | None = Field(None, description="Workspace type")
31
+ state: str | None = Field(None, description="Workspace state")
32
+ is_read_only: bool | None = Field(None, alias="isReadOnly", description="Read-only flag")
33
+ is_on_dedicated_capacity: bool | None = Field(
34
+ None, alias="isOnDedicatedCapacity", description="Dedicated capacity flag"
35
+ )
36
+ capacity_id: str | None = Field(None, alias="capacityId", description="Capacity ID")
37
+
38
+ model_config = {"populate_by_name": True}
39
+
40
+
41
+ class Dataset(BaseModel):
42
+ """PowerBI dataset model"""
43
+ id: str = Field(..., description="Unique dataset identifier")
44
+ name: str = Field(..., description="Dataset name")
45
+ configured_by: str | None = Field(None, alias="configuredBy", description="User who configured the dataset")
46
+ is_refreshable: bool | None = Field(None, alias="isRefreshable", description="Refreshable flag")
47
+ is_effective_identity_required: bool | None = Field(
48
+ None, alias="isEffectiveIdentityRequired", description="Effective identity requirement"
49
+ )
50
+ is_effective_identity_roles_required: bool | None = Field(
51
+ None, alias="isEffectiveIdentityRolesRequired", description="Effective identity roles requirement"
52
+ )
53
+ target_storage_mode: str | None = Field(None, alias="targetStorageMode", description="Storage mode")
54
+ created_date: str | None = Field(None, alias="createdDate", description="Creation timestamp")
55
+
56
+ model_config = {"populate_by_name": True}
57
+
58
+
59
+ class DAXQuery(BaseModel):
60
+ """DAX query request model"""
61
+ query: str = Field(..., description="DAX query expression (must start with EVALUATE)")
62
+
63
+ @field_validator("query")
64
+ @classmethod
65
+ def validate_dax_query(cls, v: str) -> str:
66
+ """Validate that DAX query starts with EVALUATE"""
67
+ _validate_dax_query(v)
68
+ return v
69
+
70
+
71
+ class QueryResult(BaseModel):
72
+ """Query execution result model"""
73
+ tables: list[dict[str, Any]] = Field(default_factory=list, description="Result tables")
74
+
75
+
76
+ class WorkspaceList(BaseModel):
77
+ """List of workspaces response"""
78
+ workspaces: list[Workspace] = Field(..., description="List of workspaces")
79
+ total: int = Field(..., description="Total count of workspaces")
80
+
81
+
82
+ class DatasetList(BaseModel):
83
+ """List of datasets response"""
84
+ datasets: list[Dataset] = Field(..., description="List of datasets")
85
+ total: int = Field(..., description="Total count of datasets")
src/server.py ADDED
@@ -0,0 +1,90 @@
1
+ """
2
+ PowerBI MCP Server
3
+
4
+ Main server initialization and configuration.
5
+ """
6
+
7
+ import logging
8
+ from contextlib import asynccontextmanager
9
+ from typing import AsyncIterator
10
+
11
+ from mcp.server import FastMCP
12
+
13
+ from .auth import PowerBIAuth
14
+ from .client import PowerBIClient
15
+ from .tools import (
16
+ register_workspace_tools,
17
+ register_dataset_tools,
18
+ register_query_tools
19
+ )
20
+
21
+ # Configure logging
22
+ logging.basicConfig(
23
+ level=logging.INFO,
24
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
25
+ )
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ @asynccontextmanager
30
+ async def powerbi_lifespan(server: FastMCP) -> AsyncIterator[dict]:
31
+ """
32
+ Manage PowerBI server lifecycle
33
+
34
+ Initializes authentication and API client on startup,
35
+ ensures cleanup on shutdown.
36
+
37
+ Yields:
38
+ dict: Lifespan context with 'auth' and 'client' keys
39
+ """
40
+ logger.info("PowerBI MCP Server initializing...")
41
+
42
+ # Initialize authentication (credentials validated on first use)
43
+ auth = PowerBIAuth()
44
+ logger.info("Authentication handler created")
45
+
46
+ # Initialize API client
47
+ client = PowerBIClient(auth)
48
+ logger.info("PowerBI API client created")
49
+
50
+ # Yield context to server
51
+ try:
52
+ logger.info("PowerBI MCP Server ready")
53
+ yield {"auth": auth, "client": client}
54
+ finally:
55
+ # Cleanup on shutdown
56
+ logger.info("PowerBI MCP Server shutting down...")
57
+ await client.close()
58
+ await auth.close()
59
+ logger.info("PowerBI MCP Server stopped")
60
+
61
+
62
+ # Initialize MCP server with FastMCP
63
+ mcp = FastMCP(
64
+ name="powerbi-mcp",
65
+ instructions=(
66
+ "A Model Context Protocol server for PowerBI REST API. "
67
+ "Provides tools to list workspaces, browse datasets, and execute DAX queries. "
68
+ "Requires Azure AD service principal authentication."
69
+ ),
70
+ lifespan=powerbi_lifespan
71
+ )
72
+
73
+ # Register all tools
74
+ logger.info("Registering tools...")
75
+ register_workspace_tools(mcp)
76
+ register_dataset_tools(mcp)
77
+ register_query_tools(mcp)
78
+ logger.info("All tools registered")
79
+
80
+
81
+ def main():
82
+ """Main entry point for the PowerBI MCP server"""
83
+ import os
84
+ transport = os.getenv("MCP_TRANSPORT", "stdio")
85
+ logger.info(f"Starting PowerBI MCP Server via transport: {transport}")
86
+ mcp.run(transport=transport)
87
+
88
+
89
+ if __name__ == "__main__":
90
+ main()
src/tools/__init__.py ADDED
@@ -0,0 +1,15 @@
1
+ """
2
+ PowerBI MCP Tools
3
+
4
+ Tool implementations for PowerBI operations.
5
+ """
6
+
7
+ from .workspaces import register_workspace_tools
8
+ from .datasets import register_dataset_tools
9
+ from .queries import register_query_tools
10
+
11
+ __all__ = [
12
+ "register_workspace_tools",
13
+ "register_dataset_tools",
14
+ "register_query_tools",
15
+ ]