stravinsky 0.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of stravinsky might be problematic. Click here for more details.
- mcp_bridge/__init__.py +5 -0
- mcp_bridge/auth/__init__.py +32 -0
- mcp_bridge/auth/cli.py +208 -0
- mcp_bridge/auth/oauth.py +418 -0
- mcp_bridge/auth/openai_oauth.py +350 -0
- mcp_bridge/auth/token_store.py +195 -0
- mcp_bridge/config/__init__.py +14 -0
- mcp_bridge/config/hooks.py +174 -0
- mcp_bridge/prompts/__init__.py +18 -0
- mcp_bridge/prompts/delphi.py +110 -0
- mcp_bridge/prompts/dewey.py +183 -0
- mcp_bridge/prompts/document_writer.py +155 -0
- mcp_bridge/prompts/explore.py +118 -0
- mcp_bridge/prompts/frontend.py +112 -0
- mcp_bridge/prompts/multimodal.py +58 -0
- mcp_bridge/prompts/stravinsky.py +329 -0
- mcp_bridge/server.py +866 -0
- mcp_bridge/tools/__init__.py +31 -0
- mcp_bridge/tools/agent_manager.py +665 -0
- mcp_bridge/tools/background_tasks.py +166 -0
- mcp_bridge/tools/code_search.py +301 -0
- mcp_bridge/tools/continuous_loop.py +67 -0
- mcp_bridge/tools/lsp/__init__.py +29 -0
- mcp_bridge/tools/lsp/tools.py +526 -0
- mcp_bridge/tools/model_invoke.py +233 -0
- mcp_bridge/tools/project_context.py +141 -0
- mcp_bridge/tools/session_manager.py +302 -0
- mcp_bridge/tools/skill_loader.py +212 -0
- mcp_bridge/tools/task_runner.py +97 -0
- mcp_bridge/utils/__init__.py +1 -0
- stravinsky-0.1.12.dist-info/METADATA +198 -0
- stravinsky-0.1.12.dist-info/RECORD +34 -0
- stravinsky-0.1.12.dist-info/WHEEL +4 -0
- stravinsky-0.1.12.dist-info/entry_points.txt +3 -0
mcp_bridge/server.py
ADDED
|
@@ -0,0 +1,866 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Claude Superagent MCP Bridge Server
|
|
3
|
+
|
|
4
|
+
Main entry point for the MCP server that provides tools for:
|
|
5
|
+
- OAuth-authenticated Gemini model invocation
|
|
6
|
+
- OAuth-authenticated OpenAI model invocation
|
|
7
|
+
- LSP tool proxies
|
|
8
|
+
- Session management
|
|
9
|
+
|
|
10
|
+
Run with: python -m mcp_bridge.server
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
import logging
|
|
15
|
+
from typing import Any
|
|
16
|
+
|
|
17
|
+
from mcp.server import Server
|
|
18
|
+
from mcp.server.stdio import stdio_server
|
|
19
|
+
from mcp.types import (
|
|
20
|
+
Tool,
|
|
21
|
+
TextContent,
|
|
22
|
+
Resource,
|
|
23
|
+
Prompt,
|
|
24
|
+
PromptMessage,
|
|
25
|
+
GetPromptResult,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
from .auth.token_store import TokenStore
|
|
29
|
+
from .tools.model_invoke import invoke_gemini, invoke_openai
|
|
30
|
+
from .tools.code_search import lsp_diagnostics, ast_grep_search, ast_grep_replace, grep_search, glob_files
|
|
31
|
+
from .tools.session_manager import list_sessions, read_session, search_sessions, get_session_info
|
|
32
|
+
from .tools.skill_loader import list_skills, get_skill, create_skill
|
|
33
|
+
from .tools.background_tasks import task_spawn, task_status, task_list
|
|
34
|
+
from .tools.agent_manager import agent_spawn, agent_output, agent_cancel, agent_list, agent_progress
|
|
35
|
+
from .tools.project_context import get_project_context, get_system_health
|
|
36
|
+
from .tools.lsp import (
|
|
37
|
+
lsp_hover,
|
|
38
|
+
lsp_goto_definition,
|
|
39
|
+
lsp_find_references,
|
|
40
|
+
lsp_document_symbols,
|
|
41
|
+
lsp_workspace_symbols,
|
|
42
|
+
lsp_prepare_rename,
|
|
43
|
+
lsp_rename,
|
|
44
|
+
lsp_code_actions,
|
|
45
|
+
lsp_servers,
|
|
46
|
+
)
|
|
47
|
+
from .prompts import stravinsky, delphi, dewey, explore, frontend, document_writer, multimodal
|
|
48
|
+
|
|
49
|
+
# Configure logging
|
|
50
|
+
logging.basicConfig(level=logging.INFO)
|
|
51
|
+
logger = logging.getLogger(__name__)
|
|
52
|
+
|
|
53
|
+
# Initialize the MCP server
|
|
54
|
+
server = Server("stravinsky")
|
|
55
|
+
|
|
56
|
+
# Token store for OAuth tokens
|
|
57
|
+
token_store = TokenStore()
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@server.list_tools()
|
|
61
|
+
async def list_tools() -> list[Tool]:
|
|
62
|
+
"""List all available tools."""
|
|
63
|
+
tools = [
|
|
64
|
+
Tool(
|
|
65
|
+
name="invoke_gemini",
|
|
66
|
+
description=(
|
|
67
|
+
"Invoke a Gemini model with the given prompt. "
|
|
68
|
+
"Requires OAuth authentication with Google. "
|
|
69
|
+
"Use this for tasks requiring Gemini's capabilities like "
|
|
70
|
+
"frontend UI generation, documentation writing, or multimodal analysis."
|
|
71
|
+
),
|
|
72
|
+
inputSchema={
|
|
73
|
+
"type": "object",
|
|
74
|
+
"properties": {
|
|
75
|
+
"prompt": {
|
|
76
|
+
"type": "string",
|
|
77
|
+
"description": "The prompt to send to Gemini",
|
|
78
|
+
},
|
|
79
|
+
"model": {
|
|
80
|
+
"type": "string",
|
|
81
|
+
"description": "Gemini model to use (default: gemini-3-flash)",
|
|
82
|
+
"default": "gemini-3-flash",
|
|
83
|
+
},
|
|
84
|
+
"temperature": {
|
|
85
|
+
"type": "number",
|
|
86
|
+
"description": "Sampling temperature (0.0-2.0)",
|
|
87
|
+
"default": 0.7,
|
|
88
|
+
},
|
|
89
|
+
"max_tokens": {
|
|
90
|
+
"type": "integer",
|
|
91
|
+
"description": "Maximum tokens in response",
|
|
92
|
+
"default": 4096,
|
|
93
|
+
},
|
|
94
|
+
"thinking_budget": {
|
|
95
|
+
"type": "integer",
|
|
96
|
+
"description": "Tokens reserved for internal reasoning (if model supports it)",
|
|
97
|
+
"default": 0,
|
|
98
|
+
},
|
|
99
|
+
},
|
|
100
|
+
"required": ["prompt"],
|
|
101
|
+
},
|
|
102
|
+
),
|
|
103
|
+
Tool(
|
|
104
|
+
name="invoke_openai",
|
|
105
|
+
description=(
|
|
106
|
+
"Invoke an OpenAI model with the given prompt. "
|
|
107
|
+
"Requires OAuth authentication with OpenAI. "
|
|
108
|
+
"Use this for tasks requiring GPT capabilities like "
|
|
109
|
+
"strategic advice, code review, or complex reasoning."
|
|
110
|
+
),
|
|
111
|
+
inputSchema={
|
|
112
|
+
"type": "object",
|
|
113
|
+
"properties": {
|
|
114
|
+
"prompt": {
|
|
115
|
+
"type": "string",
|
|
116
|
+
"description": "The prompt to send to OpenAI",
|
|
117
|
+
},
|
|
118
|
+
"model": {
|
|
119
|
+
"type": "string",
|
|
120
|
+
"description": "OpenAI model to use (default: gpt-5.2)",
|
|
121
|
+
"default": "gpt-5.2",
|
|
122
|
+
},
|
|
123
|
+
"temperature": {
|
|
124
|
+
"type": "number",
|
|
125
|
+
"description": "Sampling temperature (0.0-2.0)",
|
|
126
|
+
"default": 0.7,
|
|
127
|
+
},
|
|
128
|
+
"max_tokens": {
|
|
129
|
+
"type": "integer",
|
|
130
|
+
"description": "Maximum tokens in response",
|
|
131
|
+
"default": 4096,
|
|
132
|
+
},
|
|
133
|
+
"thinking_budget": {
|
|
134
|
+
"type": "integer",
|
|
135
|
+
"description": "Tokens reserved for internal reasoning (e.g. gpt-5.2 / o1 / o3)",
|
|
136
|
+
"default": 0,
|
|
137
|
+
},
|
|
138
|
+
},
|
|
139
|
+
"required": ["prompt"],
|
|
140
|
+
},
|
|
141
|
+
),
|
|
142
|
+
Tool(
|
|
143
|
+
name="get_project_context",
|
|
144
|
+
description="Summarize project environment including Git status, local rules (.claude/rules/), and pending todos.",
|
|
145
|
+
inputSchema={
|
|
146
|
+
"type": "object",
|
|
147
|
+
"properties": {
|
|
148
|
+
"project_path": {"type": "string", "description": "Path to the project root"},
|
|
149
|
+
},
|
|
150
|
+
},
|
|
151
|
+
),
|
|
152
|
+
Tool(
|
|
153
|
+
name="get_system_health",
|
|
154
|
+
description="Comprehensive check of system dependencies (rg, fd, sg, etc.) and authentication status.",
|
|
155
|
+
inputSchema={
|
|
156
|
+
"type": "object",
|
|
157
|
+
"properties": {},
|
|
158
|
+
},
|
|
159
|
+
),
|
|
160
|
+
Tool(
|
|
161
|
+
name="lsp_diagnostics",
|
|
162
|
+
description="Get diagnostics (errors, warnings) for a file using language tools (tsc, ruff).",
|
|
163
|
+
inputSchema={
|
|
164
|
+
"type": "object",
|
|
165
|
+
"properties": {
|
|
166
|
+
"file_path": {"type": "string", "description": "Path to file to analyze"},
|
|
167
|
+
"severity": {"type": "string", "description": "Filter: error, warning, all", "default": "all"},
|
|
168
|
+
},
|
|
169
|
+
"required": ["file_path"],
|
|
170
|
+
},
|
|
171
|
+
),
|
|
172
|
+
Tool(
|
|
173
|
+
name="ast_grep_search",
|
|
174
|
+
description="Search codebase using ast-grep for structural AST patterns.",
|
|
175
|
+
inputSchema={
|
|
176
|
+
"type": "object",
|
|
177
|
+
"properties": {
|
|
178
|
+
"pattern": {"type": "string", "description": "ast-grep pattern"},
|
|
179
|
+
"directory": {"type": "string", "description": "Directory to search", "default": "."},
|
|
180
|
+
"language": {"type": "string", "description": "Filter by language"},
|
|
181
|
+
},
|
|
182
|
+
"required": ["pattern"],
|
|
183
|
+
},
|
|
184
|
+
),
|
|
185
|
+
Tool(
|
|
186
|
+
name="grep_search",
|
|
187
|
+
description="Fast text search using ripgrep.",
|
|
188
|
+
inputSchema={
|
|
189
|
+
"type": "object",
|
|
190
|
+
"properties": {
|
|
191
|
+
"pattern": {"type": "string", "description": "Search pattern (regex)"},
|
|
192
|
+
"directory": {"type": "string", "description": "Directory to search", "default": "."},
|
|
193
|
+
"file_pattern": {"type": "string", "description": "Glob filter (e.g. *.py)"},
|
|
194
|
+
},
|
|
195
|
+
"required": ["pattern"],
|
|
196
|
+
},
|
|
197
|
+
),
|
|
198
|
+
Tool(
|
|
199
|
+
name="glob_files",
|
|
200
|
+
description="Find files matching a glob pattern.",
|
|
201
|
+
inputSchema={
|
|
202
|
+
"type": "object",
|
|
203
|
+
"properties": {
|
|
204
|
+
"pattern": {"type": "string", "description": "Glob pattern (e.g. **/*.py)"},
|
|
205
|
+
"directory": {"type": "string", "description": "Base directory", "default": "."},
|
|
206
|
+
},
|
|
207
|
+
"required": ["pattern"],
|
|
208
|
+
},
|
|
209
|
+
),
|
|
210
|
+
Tool(
|
|
211
|
+
name="session_list",
|
|
212
|
+
description="List Claude Code sessions with optional filtering.",
|
|
213
|
+
inputSchema={
|
|
214
|
+
"type": "object",
|
|
215
|
+
"properties": {
|
|
216
|
+
"project_path": {"type": "string", "description": "Filter by project path"},
|
|
217
|
+
"limit": {"type": "integer", "description": "Max sessions", "default": 20},
|
|
218
|
+
},
|
|
219
|
+
},
|
|
220
|
+
),
|
|
221
|
+
Tool(
|
|
222
|
+
name="session_read",
|
|
223
|
+
description="Read messages from a Claude Code session.",
|
|
224
|
+
inputSchema={
|
|
225
|
+
"type": "object",
|
|
226
|
+
"properties": {
|
|
227
|
+
"session_id": {"type": "string", "description": "Session ID"},
|
|
228
|
+
"limit": {"type": "integer", "description": "Max messages"},
|
|
229
|
+
},
|
|
230
|
+
"required": ["session_id"],
|
|
231
|
+
},
|
|
232
|
+
),
|
|
233
|
+
Tool(
|
|
234
|
+
name="session_search",
|
|
235
|
+
description="Search across Claude Code session messages.",
|
|
236
|
+
inputSchema={
|
|
237
|
+
"type": "object",
|
|
238
|
+
"properties": {
|
|
239
|
+
"query": {"type": "string", "description": "Search query"},
|
|
240
|
+
"session_id": {"type": "string", "description": "Search in specific session"},
|
|
241
|
+
"limit": {"type": "integer", "description": "Max results", "default": 20},
|
|
242
|
+
},
|
|
243
|
+
"required": ["query"],
|
|
244
|
+
},
|
|
245
|
+
),
|
|
246
|
+
Tool(
|
|
247
|
+
name="skill_list",
|
|
248
|
+
description="List available Claude Code skills/commands from .claude/commands/.",
|
|
249
|
+
inputSchema={
|
|
250
|
+
"type": "object",
|
|
251
|
+
"properties": {
|
|
252
|
+
"project_path": {"type": "string", "description": "Project directory"},
|
|
253
|
+
},
|
|
254
|
+
},
|
|
255
|
+
),
|
|
256
|
+
Tool(
|
|
257
|
+
name="skill_get",
|
|
258
|
+
description="Get the content of a specific skill/command.",
|
|
259
|
+
inputSchema={
|
|
260
|
+
"type": "object",
|
|
261
|
+
"properties": {
|
|
262
|
+
"name": {"type": "string", "description": "Skill name"},
|
|
263
|
+
"project_path": {"type": "string", "description": "Project directory"},
|
|
264
|
+
},
|
|
265
|
+
"required": ["name"],
|
|
266
|
+
},
|
|
267
|
+
),
|
|
268
|
+
Tool(
|
|
269
|
+
name="task_spawn",
|
|
270
|
+
description=(
|
|
271
|
+
"Spawn a background task to execute a prompt asynchronously. "
|
|
272
|
+
"Returns a Task ID. Best for deep research or parallel processing."
|
|
273
|
+
),
|
|
274
|
+
inputSchema={
|
|
275
|
+
"type": "object",
|
|
276
|
+
"properties": {
|
|
277
|
+
"prompt": {"type": "string", "description": "The prompt for the background agent"},
|
|
278
|
+
"model": {
|
|
279
|
+
"type": "string",
|
|
280
|
+
"description": "Model to use (gemini-3-flash or gpt-5.2)",
|
|
281
|
+
"default": "gemini-3-flash"
|
|
282
|
+
},
|
|
283
|
+
},
|
|
284
|
+
"required": ["prompt"],
|
|
285
|
+
},
|
|
286
|
+
),
|
|
287
|
+
Tool(
|
|
288
|
+
name="task_status",
|
|
289
|
+
description="Check the status and retrieve results of a background task.",
|
|
290
|
+
inputSchema={
|
|
291
|
+
"type": "object",
|
|
292
|
+
"properties": {
|
|
293
|
+
"task_id": {"type": "string", "description": "The ID of the task to check"},
|
|
294
|
+
},
|
|
295
|
+
"required": ["task_id"],
|
|
296
|
+
},
|
|
297
|
+
),
|
|
298
|
+
Tool(
|
|
299
|
+
name="task_list",
|
|
300
|
+
description="List all active and recent background tasks.",
|
|
301
|
+
inputSchema={
|
|
302
|
+
"type": "object",
|
|
303
|
+
"properties": {},
|
|
304
|
+
},
|
|
305
|
+
),
|
|
306
|
+
# New Agent Tools with Full Tool Access
|
|
307
|
+
Tool(
|
|
308
|
+
name="agent_spawn",
|
|
309
|
+
description=(
|
|
310
|
+
"Spawn a background agent. Uses Gemini by default for fast execution. "
|
|
311
|
+
"Set model='claude' to use Claude Code CLI with full tool access."
|
|
312
|
+
),
|
|
313
|
+
inputSchema={
|
|
314
|
+
"type": "object",
|
|
315
|
+
"properties": {
|
|
316
|
+
"prompt": {"type": "string", "description": "The task for the agent to perform"},
|
|
317
|
+
"agent_type": {
|
|
318
|
+
"type": "string",
|
|
319
|
+
"description": "Agent type: explore, dewey, frontend, delphi",
|
|
320
|
+
"default": "explore",
|
|
321
|
+
},
|
|
322
|
+
"description": {"type": "string", "description": "Short description for status display"},
|
|
323
|
+
"model": {
|
|
324
|
+
"type": "string",
|
|
325
|
+
"description": "Model: gemini-3-flash (default) or claude",
|
|
326
|
+
"default": "gemini-3-flash",
|
|
327
|
+
},
|
|
328
|
+
"thinking_budget": {
|
|
329
|
+
"type": "integer",
|
|
330
|
+
"description": "Tokens reserved for internal reasoning (if model supports it)",
|
|
331
|
+
"default": 0,
|
|
332
|
+
},
|
|
333
|
+
},
|
|
334
|
+
"required": ["prompt"],
|
|
335
|
+
},
|
|
336
|
+
),
|
|
337
|
+
Tool(
|
|
338
|
+
name="agent_output",
|
|
339
|
+
description="Get output from a background agent. Use block=true to wait for completion.",
|
|
340
|
+
inputSchema={
|
|
341
|
+
"type": "object",
|
|
342
|
+
"properties": {
|
|
343
|
+
"task_id": {"type": "string", "description": "The agent task ID"},
|
|
344
|
+
"block": {"type": "boolean", "description": "Wait for completion", "default": False},
|
|
345
|
+
},
|
|
346
|
+
"required": ["task_id"],
|
|
347
|
+
},
|
|
348
|
+
),
|
|
349
|
+
Tool(
|
|
350
|
+
name="agent_cancel",
|
|
351
|
+
description="Cancel a running background agent.",
|
|
352
|
+
inputSchema={
|
|
353
|
+
"type": "object",
|
|
354
|
+
"properties": {
|
|
355
|
+
"task_id": {"type": "string", "description": "The agent task ID to cancel"},
|
|
356
|
+
},
|
|
357
|
+
"required": ["task_id"],
|
|
358
|
+
},
|
|
359
|
+
),
|
|
360
|
+
Tool(
|
|
361
|
+
name="agent_list",
|
|
362
|
+
description="List all background agent tasks with their status.",
|
|
363
|
+
inputSchema={
|
|
364
|
+
"type": "object",
|
|
365
|
+
"properties": {},
|
|
366
|
+
},
|
|
367
|
+
),
|
|
368
|
+
Tool(
|
|
369
|
+
name="agent_progress",
|
|
370
|
+
description="Get real-time progress from a running background agent. Shows recent output lines to monitor what the agent is doing.",
|
|
371
|
+
inputSchema={
|
|
372
|
+
"type": "object",
|
|
373
|
+
"properties": {
|
|
374
|
+
"task_id": {"type": "string", "description": "The agent task ID"},
|
|
375
|
+
"lines": {"type": "integer", "description": "Number of recent lines to show", "default": 20},
|
|
376
|
+
},
|
|
377
|
+
"required": ["task_id"],
|
|
378
|
+
},
|
|
379
|
+
),
|
|
380
|
+
# LSP Tools
|
|
381
|
+
Tool(
|
|
382
|
+
name="lsp_hover",
|
|
383
|
+
description="Get type info, documentation, and signature at a position in a file.",
|
|
384
|
+
inputSchema={
|
|
385
|
+
"type": "object",
|
|
386
|
+
"properties": {
|
|
387
|
+
"file_path": {"type": "string", "description": "Absolute path to the file"},
|
|
388
|
+
"line": {"type": "integer", "description": "Line number (1-indexed)"},
|
|
389
|
+
"character": {"type": "integer", "description": "Character position (0-indexed)"},
|
|
390
|
+
},
|
|
391
|
+
"required": ["file_path", "line", "character"],
|
|
392
|
+
},
|
|
393
|
+
),
|
|
394
|
+
Tool(
|
|
395
|
+
name="lsp_goto_definition",
|
|
396
|
+
description="Find where a symbol is defined. Jump to symbol definition.",
|
|
397
|
+
inputSchema={
|
|
398
|
+
"type": "object",
|
|
399
|
+
"properties": {
|
|
400
|
+
"file_path": {"type": "string", "description": "Absolute path to the file"},
|
|
401
|
+
"line": {"type": "integer", "description": "Line number (1-indexed)"},
|
|
402
|
+
"character": {"type": "integer", "description": "Character position (0-indexed)"},
|
|
403
|
+
},
|
|
404
|
+
"required": ["file_path", "line", "character"],
|
|
405
|
+
},
|
|
406
|
+
),
|
|
407
|
+
Tool(
|
|
408
|
+
name="lsp_find_references",
|
|
409
|
+
description="Find all references to a symbol across the workspace.",
|
|
410
|
+
inputSchema={
|
|
411
|
+
"type": "object",
|
|
412
|
+
"properties": {
|
|
413
|
+
"file_path": {"type": "string", "description": "Absolute path to the file"},
|
|
414
|
+
"line": {"type": "integer", "description": "Line number (1-indexed)"},
|
|
415
|
+
"character": {"type": "integer", "description": "Character position (0-indexed)"},
|
|
416
|
+
"include_declaration": {"type": "boolean", "description": "Include the declaration itself", "default": True},
|
|
417
|
+
},
|
|
418
|
+
"required": ["file_path", "line", "character"],
|
|
419
|
+
},
|
|
420
|
+
),
|
|
421
|
+
Tool(
|
|
422
|
+
name="lsp_document_symbols",
|
|
423
|
+
description="Get hierarchical outline of all symbols (functions, classes, methods) in a file.",
|
|
424
|
+
inputSchema={
|
|
425
|
+
"type": "object",
|
|
426
|
+
"properties": {
|
|
427
|
+
"file_path": {"type": "string", "description": "Absolute path to the file"},
|
|
428
|
+
},
|
|
429
|
+
"required": ["file_path"],
|
|
430
|
+
},
|
|
431
|
+
),
|
|
432
|
+
Tool(
|
|
433
|
+
name="lsp_workspace_symbols",
|
|
434
|
+
description="Search for symbols by name across the entire workspace.",
|
|
435
|
+
inputSchema={
|
|
436
|
+
"type": "object",
|
|
437
|
+
"properties": {
|
|
438
|
+
"query": {"type": "string", "description": "Symbol name to search for (fuzzy match)"},
|
|
439
|
+
"directory": {"type": "string", "description": "Workspace directory", "default": "."},
|
|
440
|
+
},
|
|
441
|
+
"required": ["query"],
|
|
442
|
+
},
|
|
443
|
+
),
|
|
444
|
+
Tool(
|
|
445
|
+
name="lsp_prepare_rename",
|
|
446
|
+
description="Check if a symbol at position can be renamed. Use before lsp_rename.",
|
|
447
|
+
inputSchema={
|
|
448
|
+
"type": "object",
|
|
449
|
+
"properties": {
|
|
450
|
+
"file_path": {"type": "string", "description": "Absolute path to the file"},
|
|
451
|
+
"line": {"type": "integer", "description": "Line number (1-indexed)"},
|
|
452
|
+
"character": {"type": "integer", "description": "Character position (0-indexed)"},
|
|
453
|
+
},
|
|
454
|
+
"required": ["file_path", "line", "character"],
|
|
455
|
+
},
|
|
456
|
+
),
|
|
457
|
+
Tool(
|
|
458
|
+
name="lsp_rename",
|
|
459
|
+
description="Rename a symbol across the workspace. Use lsp_prepare_rename first to validate.",
|
|
460
|
+
inputSchema={
|
|
461
|
+
"type": "object",
|
|
462
|
+
"properties": {
|
|
463
|
+
"file_path": {"type": "string", "description": "Absolute path to the file"},
|
|
464
|
+
"line": {"type": "integer", "description": "Line number (1-indexed)"},
|
|
465
|
+
"character": {"type": "integer", "description": "Character position (0-indexed)"},
|
|
466
|
+
"new_name": {"type": "string", "description": "New name for the symbol"},
|
|
467
|
+
"dry_run": {"type": "boolean", "description": "Preview changes without applying", "default": True},
|
|
468
|
+
},
|
|
469
|
+
"required": ["file_path", "line", "character", "new_name"],
|
|
470
|
+
},
|
|
471
|
+
),
|
|
472
|
+
Tool(
|
|
473
|
+
name="lsp_code_actions",
|
|
474
|
+
description="Get available quick fixes and refactorings at a position.",
|
|
475
|
+
inputSchema={
|
|
476
|
+
"type": "object",
|
|
477
|
+
"properties": {
|
|
478
|
+
"file_path": {"type": "string", "description": "Absolute path to the file"},
|
|
479
|
+
"line": {"type": "integer", "description": "Line number (1-indexed)"},
|
|
480
|
+
"character": {"type": "integer", "description": "Character position (0-indexed)"},
|
|
481
|
+
},
|
|
482
|
+
"required": ["file_path", "line", "character"],
|
|
483
|
+
},
|
|
484
|
+
),
|
|
485
|
+
Tool(
|
|
486
|
+
name="lsp_servers",
|
|
487
|
+
description="List available LSP servers and their installation status.",
|
|
488
|
+
inputSchema={
|
|
489
|
+
"type": "object",
|
|
490
|
+
"properties": {},
|
|
491
|
+
},
|
|
492
|
+
),
|
|
493
|
+
Tool(
|
|
494
|
+
name="ast_grep_replace",
|
|
495
|
+
description="Replace code patterns using ast-grep's AST-aware replacement. More reliable than text-based replace for refactoring.",
|
|
496
|
+
inputSchema={
|
|
497
|
+
"type": "object",
|
|
498
|
+
"properties": {
|
|
499
|
+
"pattern": {"type": "string", "description": "ast-grep pattern to search (e.g., 'console.log($A)')"},
|
|
500
|
+
"replacement": {"type": "string", "description": "Replacement pattern (e.g., 'logger.debug($A)')"},
|
|
501
|
+
"directory": {"type": "string", "description": "Directory to search in", "default": "."},
|
|
502
|
+
"language": {"type": "string", "description": "Filter by language (typescript, python, etc.)"},
|
|
503
|
+
"dry_run": {"type": "boolean", "description": "Preview changes without applying", "default": True},
|
|
504
|
+
},
|
|
505
|
+
"required": ["pattern", "replacement"],
|
|
506
|
+
},
|
|
507
|
+
),
|
|
508
|
+
]
|
|
509
|
+
return tools
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
@server.call_tool()
|
|
513
|
+
async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
514
|
+
"""Handle tool invocations."""
|
|
515
|
+
logger.info(f"Tool called: {name} with args: {arguments}")
|
|
516
|
+
|
|
517
|
+
try:
|
|
518
|
+
if name == "invoke_gemini":
|
|
519
|
+
result = await invoke_gemini(
|
|
520
|
+
token_store=token_store,
|
|
521
|
+
prompt=arguments["prompt"],
|
|
522
|
+
model=arguments.get("model", "gemini-3-flash"),
|
|
523
|
+
temperature=arguments.get("temperature", 0.7),
|
|
524
|
+
max_tokens=arguments.get("max_tokens", 4096),
|
|
525
|
+
thinking_budget=arguments.get("thinking_budget", 0),
|
|
526
|
+
)
|
|
527
|
+
return [TextContent(type="text", text=result)]
|
|
528
|
+
|
|
529
|
+
elif name == "invoke_openai":
|
|
530
|
+
result = await invoke_openai(
|
|
531
|
+
token_store=token_store,
|
|
532
|
+
prompt=arguments["prompt"],
|
|
533
|
+
model=arguments.get("model", "gpt-5.2"),
|
|
534
|
+
temperature=arguments.get("temperature", 0.7),
|
|
535
|
+
max_tokens=arguments.get("max_tokens", 4096),
|
|
536
|
+
thinking_budget=arguments.get("thinking_budget", 0),
|
|
537
|
+
)
|
|
538
|
+
return [TextContent(type="text", text=result)]
|
|
539
|
+
|
|
540
|
+
elif name == "get_project_context":
|
|
541
|
+
result = await get_project_context(
|
|
542
|
+
project_path=arguments.get("project_path"),
|
|
543
|
+
)
|
|
544
|
+
return [TextContent(type="text", text=result)]
|
|
545
|
+
|
|
546
|
+
elif name == "get_system_health":
|
|
547
|
+
result = await get_system_health()
|
|
548
|
+
return [TextContent(type="text", text=result)]
|
|
549
|
+
|
|
550
|
+
elif name == "lsp_diagnostics":
|
|
551
|
+
result = await lsp_diagnostics(
|
|
552
|
+
file_path=arguments["file_path"],
|
|
553
|
+
severity=arguments.get("severity", "all"),
|
|
554
|
+
)
|
|
555
|
+
return [TextContent(type="text", text=result)]
|
|
556
|
+
|
|
557
|
+
elif name == "ast_grep_search":
|
|
558
|
+
result = await ast_grep_search(
|
|
559
|
+
pattern=arguments["pattern"],
|
|
560
|
+
directory=arguments.get("directory", "."),
|
|
561
|
+
language=arguments.get("language", ""),
|
|
562
|
+
)
|
|
563
|
+
return [TextContent(type="text", text=result)]
|
|
564
|
+
|
|
565
|
+
elif name == "grep_search":
|
|
566
|
+
result = await grep_search(
|
|
567
|
+
pattern=arguments["pattern"],
|
|
568
|
+
directory=arguments.get("directory", "."),
|
|
569
|
+
file_pattern=arguments.get("file_pattern", ""),
|
|
570
|
+
)
|
|
571
|
+
return [TextContent(type="text", text=result)]
|
|
572
|
+
|
|
573
|
+
elif name == "glob_files":
|
|
574
|
+
result = await glob_files(
|
|
575
|
+
pattern=arguments["pattern"],
|
|
576
|
+
directory=arguments.get("directory", "."),
|
|
577
|
+
)
|
|
578
|
+
return [TextContent(type="text", text=result)]
|
|
579
|
+
|
|
580
|
+
elif name == "session_list":
|
|
581
|
+
result = list_sessions(
|
|
582
|
+
project_path=arguments.get("project_path"),
|
|
583
|
+
limit=arguments.get("limit", 20),
|
|
584
|
+
)
|
|
585
|
+
return [TextContent(type="text", text=result)]
|
|
586
|
+
|
|
587
|
+
elif name == "session_read":
|
|
588
|
+
result = read_session(
|
|
589
|
+
session_id=arguments["session_id"],
|
|
590
|
+
limit=arguments.get("limit"),
|
|
591
|
+
)
|
|
592
|
+
return [TextContent(type="text", text=result)]
|
|
593
|
+
|
|
594
|
+
elif name == "session_search":
|
|
595
|
+
result = search_sessions(
|
|
596
|
+
query=arguments["query"],
|
|
597
|
+
session_id=arguments.get("session_id"),
|
|
598
|
+
limit=arguments.get("limit", 20),
|
|
599
|
+
)
|
|
600
|
+
return [TextContent(type="text", text=result)]
|
|
601
|
+
|
|
602
|
+
elif name == "skill_list":
|
|
603
|
+
result = list_skills(
|
|
604
|
+
project_path=arguments.get("project_path"),
|
|
605
|
+
)
|
|
606
|
+
return [TextContent(type="text", text=result)]
|
|
607
|
+
|
|
608
|
+
elif name == "skill_get":
|
|
609
|
+
result = get_skill(
|
|
610
|
+
name=arguments["name"],
|
|
611
|
+
project_path=arguments.get("project_path"),
|
|
612
|
+
)
|
|
613
|
+
return [TextContent(type="text", text=result)]
|
|
614
|
+
|
|
615
|
+
elif name == "task_spawn":
|
|
616
|
+
result = await task_spawn(
|
|
617
|
+
prompt=arguments["prompt"],
|
|
618
|
+
model=arguments.get("model", "gemini-3-flash"),
|
|
619
|
+
)
|
|
620
|
+
return [TextContent(type="text", text=result)]
|
|
621
|
+
|
|
622
|
+
elif name == "task_status":
|
|
623
|
+
result = await task_status(
|
|
624
|
+
task_id=arguments["task_id"],
|
|
625
|
+
)
|
|
626
|
+
return [TextContent(type="text", text=result)]
|
|
627
|
+
|
|
628
|
+
elif name == "task_list":
|
|
629
|
+
result = await task_list()
|
|
630
|
+
return [TextContent(type="text", text=result)]
|
|
631
|
+
|
|
632
|
+
# Agent tools with full tool access
|
|
633
|
+
elif name == "agent_spawn":
|
|
634
|
+
result = await agent_spawn(
|
|
635
|
+
prompt=arguments["prompt"],
|
|
636
|
+
agent_type=arguments.get("agent_type", "explore"),
|
|
637
|
+
description=arguments.get("description", ""),
|
|
638
|
+
model=arguments.get("model", "gemini-3-flash"),
|
|
639
|
+
thinking_budget=arguments.get("thinking_budget", 0),
|
|
640
|
+
)
|
|
641
|
+
return [TextContent(type="text", text=result)]
|
|
642
|
+
|
|
643
|
+
elif name == "agent_output":
|
|
644
|
+
result = await agent_output(
|
|
645
|
+
task_id=arguments["task_id"],
|
|
646
|
+
block=arguments.get("block", False),
|
|
647
|
+
)
|
|
648
|
+
return [TextContent(type="text", text=result)]
|
|
649
|
+
|
|
650
|
+
elif name == "agent_cancel":
|
|
651
|
+
result = await agent_cancel(
|
|
652
|
+
task_id=arguments["task_id"],
|
|
653
|
+
)
|
|
654
|
+
return [TextContent(type="text", text=result)]
|
|
655
|
+
|
|
656
|
+
elif name == "agent_list":
|
|
657
|
+
result = await agent_list()
|
|
658
|
+
return [TextContent(type="text", text=result)]
|
|
659
|
+
|
|
660
|
+
elif name == "agent_progress":
|
|
661
|
+
result = await agent_progress(
|
|
662
|
+
task_id=arguments["task_id"],
|
|
663
|
+
lines=arguments.get("lines", 20),
|
|
664
|
+
)
|
|
665
|
+
return [TextContent(type="text", text=result)]
|
|
666
|
+
|
|
667
|
+
# LSP Tools
|
|
668
|
+
elif name == "lsp_hover":
|
|
669
|
+
result = await lsp_hover(
|
|
670
|
+
file_path=arguments["file_path"],
|
|
671
|
+
line=arguments["line"],
|
|
672
|
+
character=arguments["character"],
|
|
673
|
+
)
|
|
674
|
+
return [TextContent(type="text", text=result)]
|
|
675
|
+
|
|
676
|
+
elif name == "lsp_goto_definition":
|
|
677
|
+
result = await lsp_goto_definition(
|
|
678
|
+
file_path=arguments["file_path"],
|
|
679
|
+
line=arguments["line"],
|
|
680
|
+
character=arguments["character"],
|
|
681
|
+
)
|
|
682
|
+
return [TextContent(type="text", text=result)]
|
|
683
|
+
|
|
684
|
+
elif name == "lsp_find_references":
|
|
685
|
+
result = await lsp_find_references(
|
|
686
|
+
file_path=arguments["file_path"],
|
|
687
|
+
line=arguments["line"],
|
|
688
|
+
character=arguments["character"],
|
|
689
|
+
include_declaration=arguments.get("include_declaration", True),
|
|
690
|
+
)
|
|
691
|
+
return [TextContent(type="text", text=result)]
|
|
692
|
+
|
|
693
|
+
elif name == "lsp_document_symbols":
|
|
694
|
+
result = await lsp_document_symbols(
|
|
695
|
+
file_path=arguments["file_path"],
|
|
696
|
+
)
|
|
697
|
+
return [TextContent(type="text", text=result)]
|
|
698
|
+
|
|
699
|
+
elif name == "lsp_workspace_symbols":
|
|
700
|
+
result = await lsp_workspace_symbols(
|
|
701
|
+
query=arguments["query"],
|
|
702
|
+
directory=arguments.get("directory", "."),
|
|
703
|
+
)
|
|
704
|
+
return [TextContent(type="text", text=result)]
|
|
705
|
+
|
|
706
|
+
elif name == "lsp_prepare_rename":
|
|
707
|
+
result = await lsp_prepare_rename(
|
|
708
|
+
file_path=arguments["file_path"],
|
|
709
|
+
line=arguments["line"],
|
|
710
|
+
character=arguments["character"],
|
|
711
|
+
)
|
|
712
|
+
return [TextContent(type="text", text=result)]
|
|
713
|
+
|
|
714
|
+
elif name == "lsp_rename":
|
|
715
|
+
result = await lsp_rename(
|
|
716
|
+
file_path=arguments["file_path"],
|
|
717
|
+
line=arguments["line"],
|
|
718
|
+
character=arguments["character"],
|
|
719
|
+
new_name=arguments["new_name"],
|
|
720
|
+
dry_run=arguments.get("dry_run", True),
|
|
721
|
+
)
|
|
722
|
+
return [TextContent(type="text", text=result)]
|
|
723
|
+
|
|
724
|
+
elif name == "lsp_code_actions":
|
|
725
|
+
result = await lsp_code_actions(
|
|
726
|
+
file_path=arguments["file_path"],
|
|
727
|
+
line=arguments["line"],
|
|
728
|
+
character=arguments["character"],
|
|
729
|
+
)
|
|
730
|
+
return [TextContent(type="text", text=result)]
|
|
731
|
+
|
|
732
|
+
elif name == "lsp_servers":
|
|
733
|
+
result = await lsp_servers()
|
|
734
|
+
return [TextContent(type="text", text=result)]
|
|
735
|
+
|
|
736
|
+
elif name == "ast_grep_replace":
|
|
737
|
+
result = await ast_grep_replace(
|
|
738
|
+
pattern=arguments["pattern"],
|
|
739
|
+
replacement=arguments["replacement"],
|
|
740
|
+
directory=arguments.get("directory", "."),
|
|
741
|
+
language=arguments.get("language", ""),
|
|
742
|
+
dry_run=arguments.get("dry_run", True),
|
|
743
|
+
)
|
|
744
|
+
return [TextContent(type="text", text=result)]
|
|
745
|
+
|
|
746
|
+
else:
|
|
747
|
+
return [TextContent(type="text", text=f"Unknown tool: {name}")]
|
|
748
|
+
|
|
749
|
+
except Exception as e:
|
|
750
|
+
logger.error(f"Error in tool {name}: {e}")
|
|
751
|
+
return [TextContent(type="text", text=f"Error: {str(e)}")]
|
|
752
|
+
|
|
753
|
+
|
|
754
|
+
@server.list_prompts()
|
|
755
|
+
async def list_prompts() -> list[Prompt]:
|
|
756
|
+
"""List available agent prompts."""
|
|
757
|
+
return [
|
|
758
|
+
Prompt(
|
|
759
|
+
name="stravinsky",
|
|
760
|
+
description=(
|
|
761
|
+
"Stravinsky - Powerful AI orchestrator. "
|
|
762
|
+
"Plans obsessively with todos, assesses search complexity before "
|
|
763
|
+
"exploration, delegates strategically to specialized agents."
|
|
764
|
+
),
|
|
765
|
+
arguments=[],
|
|
766
|
+
),
|
|
767
|
+
Prompt(
|
|
768
|
+
name="delphi",
|
|
769
|
+
description=(
|
|
770
|
+
"Delphi - Strategic advisor using GPT for debugging, "
|
|
771
|
+
"architecture review, and complex problem solving."
|
|
772
|
+
),
|
|
773
|
+
arguments=[],
|
|
774
|
+
),
|
|
775
|
+
Prompt(
|
|
776
|
+
name="dewey",
|
|
777
|
+
description=(
|
|
778
|
+
"Dewey - Documentation and GitHub research specialist. "
|
|
779
|
+
"Finds implementation examples, official docs, and code patterns."
|
|
780
|
+
),
|
|
781
|
+
arguments=[],
|
|
782
|
+
),
|
|
783
|
+
Prompt(
|
|
784
|
+
name="explore",
|
|
785
|
+
description=(
|
|
786
|
+
"Explore - Fast codebase search specialist. "
|
|
787
|
+
"Answers 'Where is X?', finds files and code patterns."
|
|
788
|
+
),
|
|
789
|
+
arguments=[],
|
|
790
|
+
),
|
|
791
|
+
Prompt(
|
|
792
|
+
name="frontend",
|
|
793
|
+
description=(
|
|
794
|
+
"Frontend UI/UX Engineer - Designer-turned-developer for stunning visuals. "
|
|
795
|
+
"Excels at styling, layout, animation, typography."
|
|
796
|
+
),
|
|
797
|
+
arguments=[],
|
|
798
|
+
),
|
|
799
|
+
Prompt(
|
|
800
|
+
name="document_writer",
|
|
801
|
+
description=(
|
|
802
|
+
"Document Writer - Technical documentation specialist. "
|
|
803
|
+
"README files, API docs, architecture docs, user guides."
|
|
804
|
+
),
|
|
805
|
+
arguments=[],
|
|
806
|
+
),
|
|
807
|
+
Prompt(
|
|
808
|
+
name="multimodal",
|
|
809
|
+
description=(
|
|
810
|
+
"Multimodal Looker - Visual content analysis. "
|
|
811
|
+
"PDFs, images, diagrams - extracts and interprets visual data."
|
|
812
|
+
),
|
|
813
|
+
arguments=[],
|
|
814
|
+
),
|
|
815
|
+
]
|
|
816
|
+
|
|
817
|
+
|
|
818
|
+
@server.get_prompt()
|
|
819
|
+
async def get_prompt(name: str, arguments: dict[str, str] | None) -> GetPromptResult:
|
|
820
|
+
"""Get a specific agent prompt."""
|
|
821
|
+
prompts_map = {
|
|
822
|
+
"stravinsky": ("Stravinsky orchestrator system prompt", stravinsky.get_stravinsky_prompt),
|
|
823
|
+
"delphi": ("Delphi advisor system prompt", delphi.get_delphi_prompt),
|
|
824
|
+
"dewey": ("Dewey research agent prompt", dewey.get_dewey_prompt),
|
|
825
|
+
"explore": ("Explore codebase search prompt", explore.get_explore_prompt),
|
|
826
|
+
"frontend": ("Frontend UI/UX Engineer prompt", frontend.get_frontend_prompt),
|
|
827
|
+
"document_writer": ("Document Writer prompt", document_writer.get_document_writer_prompt),
|
|
828
|
+
"multimodal": ("Multimodal Looker prompt", multimodal.get_multimodal_prompt),
|
|
829
|
+
}
|
|
830
|
+
|
|
831
|
+
if name not in prompts_map:
|
|
832
|
+
raise ValueError(f"Unknown prompt: {name}")
|
|
833
|
+
|
|
834
|
+
description, get_prompt_fn = prompts_map[name]
|
|
835
|
+
prompt_text = get_prompt_fn()
|
|
836
|
+
|
|
837
|
+
return GetPromptResult(
|
|
838
|
+
description=description,
|
|
839
|
+
messages=[
|
|
840
|
+
PromptMessage(
|
|
841
|
+
role="user",
|
|
842
|
+
content=TextContent(type="text", text=prompt_text),
|
|
843
|
+
)
|
|
844
|
+
],
|
|
845
|
+
)
|
|
846
|
+
|
|
847
|
+
|
|
848
|
+
async def async_main():
|
|
849
|
+
"""Async entry point for the MCP server."""
|
|
850
|
+
logger.info("Starting Stravinsky MCP Bridge Server...")
|
|
851
|
+
|
|
852
|
+
async with stdio_server() as (read_stream, write_stream):
|
|
853
|
+
await server.run(
|
|
854
|
+
read_stream,
|
|
855
|
+
write_stream,
|
|
856
|
+
server.create_initialization_options(),
|
|
857
|
+
)
|
|
858
|
+
|
|
859
|
+
|
|
860
|
+
def main():
|
|
861
|
+
"""Synchronous main entry point for uvx/CLI."""
|
|
862
|
+
asyncio.run(async_main())
|
|
863
|
+
|
|
864
|
+
|
|
865
|
+
if __name__ == "__main__":
|
|
866
|
+
main()
|