stravinsky 0.2.7__py3-none-any.whl → 0.2.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_bridge/__init__.py +1 -1
- mcp_bridge/auth/cli.py +84 -46
- mcp_bridge/auth/oauth.py +88 -63
- mcp_bridge/hooks/__init__.py +29 -8
- mcp_bridge/hooks/agent_reminder.py +61 -0
- mcp_bridge/hooks/auto_slash_command.py +186 -0
- mcp_bridge/hooks/comment_checker.py +136 -0
- mcp_bridge/hooks/context_monitor.py +58 -0
- mcp_bridge/hooks/empty_message_sanitizer.py +240 -0
- mcp_bridge/hooks/keyword_detector.py +122 -0
- mcp_bridge/hooks/manager.py +27 -8
- mcp_bridge/hooks/preemptive_compaction.py +157 -0
- mcp_bridge/hooks/session_recovery.py +186 -0
- mcp_bridge/hooks/todo_enforcer.py +75 -0
- mcp_bridge/hooks/truncator.py +1 -1
- mcp_bridge/native_hooks/stravinsky_mode.py +109 -0
- mcp_bridge/native_hooks/truncator.py +1 -1
- mcp_bridge/prompts/delphi.py +3 -2
- mcp_bridge/prompts/dewey.py +105 -21
- mcp_bridge/prompts/stravinsky.py +451 -127
- mcp_bridge/server.py +304 -38
- mcp_bridge/server_tools.py +21 -3
- mcp_bridge/tools/__init__.py +2 -1
- mcp_bridge/tools/agent_manager.py +313 -236
- mcp_bridge/tools/init.py +1 -1
- mcp_bridge/tools/model_invoke.py +534 -52
- mcp_bridge/tools/skill_loader.py +51 -47
- mcp_bridge/tools/task_runner.py +74 -30
- mcp_bridge/tools/templates.py +101 -12
- {stravinsky-0.2.7.dist-info → stravinsky-0.2.40.dist-info}/METADATA +6 -12
- stravinsky-0.2.40.dist-info/RECORD +57 -0
- stravinsky-0.2.7.dist-info/RECORD +0 -47
- {stravinsky-0.2.7.dist-info → stravinsky-0.2.40.dist-info}/WHEEL +0 -0
- {stravinsky-0.2.7.dist-info → stravinsky-0.2.40.dist-info}/entry_points.txt +0 -0
mcp_bridge/server.py
CHANGED
|
@@ -31,12 +31,11 @@ from . import __version__
|
|
|
31
31
|
|
|
32
32
|
# Configure logging to stderr explicitly to avoid protocol corruption
|
|
33
33
|
logging.basicConfig(
|
|
34
|
-
level=logging.INFO,
|
|
35
|
-
format='%(levelname)s:%(name)s:%(message)s',
|
|
36
|
-
stream=sys.stderr
|
|
34
|
+
level=logging.INFO, format="%(levelname)s:%(name)s:%(message)s", stream=sys.stderr
|
|
37
35
|
)
|
|
38
36
|
logger = logging.getLogger(__name__)
|
|
39
37
|
|
|
38
|
+
|
|
40
39
|
# Pre-async crash logger
|
|
41
40
|
def install_emergency_logger():
|
|
42
41
|
def handle_exception(exc_type, exc_value, exc_traceback):
|
|
@@ -47,13 +46,15 @@ def install_emergency_logger():
|
|
|
47
46
|
try:
|
|
48
47
|
with open("/tmp/stravinsky_crash.log", "a") as f:
|
|
49
48
|
import traceback
|
|
49
|
+
|
|
50
50
|
f.write(f"\n--- CRASH AT {time.ctime()} ---\n")
|
|
51
51
|
traceback.print_exception(exc_type, exc_value, exc_traceback, file=f)
|
|
52
52
|
except:
|
|
53
53
|
pass
|
|
54
|
-
|
|
54
|
+
|
|
55
55
|
sys.excepthook = handle_exception
|
|
56
56
|
|
|
57
|
+
|
|
57
58
|
install_emergency_logger()
|
|
58
59
|
|
|
59
60
|
# --- SERVER INITIALIZATION ---
|
|
@@ -64,28 +65,36 @@ server = Server("stravinsky", version=__version__)
|
|
|
64
65
|
_token_store = None
|
|
65
66
|
_hook_manager = None
|
|
66
67
|
|
|
68
|
+
|
|
67
69
|
def get_token_store():
|
|
68
70
|
global _token_store
|
|
69
71
|
if _token_store is None:
|
|
70
72
|
from .auth.token_store import TokenStore
|
|
73
|
+
|
|
71
74
|
_token_store = TokenStore()
|
|
72
75
|
return _token_store
|
|
73
76
|
|
|
77
|
+
|
|
74
78
|
def get_hook_manager_lazy():
|
|
75
79
|
global _hook_manager
|
|
76
80
|
if _hook_manager is None:
|
|
77
81
|
from .hooks.manager import get_hook_manager
|
|
82
|
+
|
|
78
83
|
_hook_manager = get_hook_manager()
|
|
79
84
|
return _hook_manager
|
|
80
85
|
|
|
86
|
+
|
|
81
87
|
# --- MCP INTERFACE ---
|
|
82
88
|
|
|
89
|
+
|
|
83
90
|
@server.list_tools()
|
|
84
91
|
async def list_tools() -> list[Tool]:
|
|
85
92
|
"""List available tools (metadata only)."""
|
|
86
93
|
from .server_tools import get_tool_definitions
|
|
94
|
+
|
|
87
95
|
return get_tool_definitions()
|
|
88
96
|
|
|
97
|
+
|
|
89
98
|
@server.call_tool()
|
|
90
99
|
async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
91
100
|
"""Handle tool calls with deep lazy loading of implementations."""
|
|
@@ -96,12 +105,13 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
96
105
|
try:
|
|
97
106
|
# Pre-tool call hooks orchestration
|
|
98
107
|
arguments = await hook_manager.execute_pre_tool_call(name, arguments)
|
|
99
|
-
|
|
108
|
+
|
|
100
109
|
result_content = None
|
|
101
110
|
|
|
102
111
|
# --- MODEL DISPATCH ---
|
|
103
112
|
if name == "invoke_gemini":
|
|
104
113
|
from .tools.model_invoke import invoke_gemini
|
|
114
|
+
|
|
105
115
|
result_content = await invoke_gemini(
|
|
106
116
|
token_store=token_store,
|
|
107
117
|
prompt=arguments["prompt"],
|
|
@@ -113,6 +123,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
113
123
|
|
|
114
124
|
elif name == "invoke_openai":
|
|
115
125
|
from .tools.model_invoke import invoke_openai
|
|
126
|
+
|
|
116
127
|
result_content = await invoke_openai(
|
|
117
128
|
token_store=token_store,
|
|
118
129
|
prompt=arguments["prompt"],
|
|
@@ -125,15 +136,18 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
125
136
|
# --- CONTEXT DISPATCH ---
|
|
126
137
|
elif name == "get_project_context":
|
|
127
138
|
from .tools.project_context import get_project_context
|
|
139
|
+
|
|
128
140
|
result_content = await get_project_context(project_path=arguments.get("project_path"))
|
|
129
141
|
|
|
130
142
|
elif name == "get_system_health":
|
|
131
143
|
from .tools.project_context import get_system_health
|
|
144
|
+
|
|
132
145
|
result_content = await get_system_health()
|
|
133
146
|
|
|
134
147
|
# --- SEARCH DISPATCH ---
|
|
135
148
|
elif name == "grep_search":
|
|
136
149
|
from .tools.code_search import grep_search
|
|
150
|
+
|
|
137
151
|
result_content = await grep_search(
|
|
138
152
|
pattern=arguments["pattern"],
|
|
139
153
|
directory=arguments.get("directory", "."),
|
|
@@ -142,6 +156,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
142
156
|
|
|
143
157
|
elif name == "ast_grep_search":
|
|
144
158
|
from .tools.code_search import ast_grep_search
|
|
159
|
+
|
|
145
160
|
result_content = await ast_grep_search(
|
|
146
161
|
pattern=arguments["pattern"],
|
|
147
162
|
directory=arguments.get("directory", "."),
|
|
@@ -150,6 +165,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
150
165
|
|
|
151
166
|
elif name == "ast_grep_replace":
|
|
152
167
|
from .tools.code_search import ast_grep_replace
|
|
168
|
+
|
|
153
169
|
result_content = await ast_grep_replace(
|
|
154
170
|
pattern=arguments["pattern"],
|
|
155
171
|
replacement=arguments["replacement"],
|
|
@@ -160,6 +176,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
160
176
|
|
|
161
177
|
elif name == "glob_files":
|
|
162
178
|
from .tools.code_search import glob_files
|
|
179
|
+
|
|
163
180
|
result_content = await glob_files(
|
|
164
181
|
pattern=arguments["pattern"],
|
|
165
182
|
directory=arguments.get("directory", "."),
|
|
@@ -168,6 +185,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
168
185
|
# --- SESSION DISPATCH ---
|
|
169
186
|
elif name == "session_list":
|
|
170
187
|
from .tools.session_manager import list_sessions
|
|
188
|
+
|
|
171
189
|
result_content = list_sessions(
|
|
172
190
|
project_path=arguments.get("project_path"),
|
|
173
191
|
limit=arguments.get("limit", 20),
|
|
@@ -175,6 +193,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
175
193
|
|
|
176
194
|
elif name == "session_read":
|
|
177
195
|
from .tools.session_manager import read_session
|
|
196
|
+
|
|
178
197
|
result_content = read_session(
|
|
179
198
|
session_id=arguments["session_id"],
|
|
180
199
|
limit=arguments.get("limit"),
|
|
@@ -182,6 +201,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
182
201
|
|
|
183
202
|
elif name == "session_search":
|
|
184
203
|
from .tools.session_manager import search_sessions
|
|
204
|
+
|
|
185
205
|
result_content = search_sessions(
|
|
186
206
|
query=arguments["query"],
|
|
187
207
|
session_id=arguments.get("session_id"),
|
|
@@ -191,33 +211,57 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
191
211
|
# --- SKILL DISPATCH ---
|
|
192
212
|
elif name == "skill_list":
|
|
193
213
|
from .tools.skill_loader import list_skills
|
|
214
|
+
|
|
194
215
|
result_content = list_skills(project_path=arguments.get("project_path"))
|
|
195
216
|
|
|
196
217
|
elif name == "skill_get":
|
|
197
218
|
from .tools.skill_loader import get_skill
|
|
219
|
+
|
|
198
220
|
result_content = get_skill(
|
|
199
221
|
name=arguments["name"],
|
|
200
222
|
project_path=arguments.get("project_path"),
|
|
201
223
|
)
|
|
202
224
|
|
|
225
|
+
elif name == "stravinsky_version":
|
|
226
|
+
from . import __version__
|
|
227
|
+
import sys
|
|
228
|
+
import os
|
|
229
|
+
|
|
230
|
+
result_content = [
|
|
231
|
+
TextContent(
|
|
232
|
+
type="text",
|
|
233
|
+
text=f"Stravinsky Bridge v{__version__}\n"
|
|
234
|
+
f"Python: {sys.version.split()[0]}\n"
|
|
235
|
+
f"Platform: {sys.platform}\n"
|
|
236
|
+
f"CWD: {os.getcwd()}\n"
|
|
237
|
+
f"CLI: {os.environ.get('CLAUDE_CLI', '/opt/homebrew/bin/claude')}",
|
|
238
|
+
)
|
|
239
|
+
]
|
|
240
|
+
|
|
241
|
+
elif name == "system_restart":
|
|
242
|
+
# Schedule a restart. We can't exit immediately or MCP will error on the reply.
|
|
243
|
+
# We'll use a small delay.
|
|
244
|
+
async def restart_soon():
|
|
245
|
+
await asyncio.sleep(1)
|
|
246
|
+
os._exit(0) # Immediate exit
|
|
247
|
+
|
|
248
|
+
asyncio.create_task(restart_soon())
|
|
249
|
+
result_content = [
|
|
250
|
+
TextContent(
|
|
251
|
+
type="text",
|
|
252
|
+
text="🚀 Restarting Stravinsky Bridge... This process will exit and Claude Code will automatically respawn it. Please wait a few seconds before calling tools again.",
|
|
253
|
+
)
|
|
254
|
+
]
|
|
255
|
+
|
|
203
256
|
# --- AGENT DISPATCH ---
|
|
204
257
|
elif name == "agent_spawn":
|
|
205
|
-
from .tools.agent_manager import
|
|
206
|
-
|
|
207
|
-
result_content = await
|
|
208
|
-
token_store=token_store,
|
|
209
|
-
prompt=arguments["prompt"],
|
|
210
|
-
agent_type=arguments.get("agent_type", "explore"),
|
|
211
|
-
description=arguments.get("description", ""),
|
|
212
|
-
parent_session_id=arguments.get("parent_session_id"),
|
|
213
|
-
system_prompt=arguments.get("system_prompt"),
|
|
214
|
-
model=arguments.get("model", "gemini-3-flash"),
|
|
215
|
-
thinking_budget=arguments.get("thinking_budget", 0),
|
|
216
|
-
timeout=arguments.get("timeout", 300),
|
|
217
|
-
)
|
|
258
|
+
from .tools.agent_manager import agent_spawn
|
|
259
|
+
|
|
260
|
+
result_content = await agent_spawn(**arguments)
|
|
218
261
|
|
|
219
262
|
elif name == "agent_output":
|
|
220
263
|
from .tools.agent_manager import agent_output
|
|
264
|
+
|
|
221
265
|
result_content = await agent_output(
|
|
222
266
|
task_id=arguments["task_id"],
|
|
223
267
|
block=arguments.get("block", False),
|
|
@@ -225,14 +269,17 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
225
269
|
|
|
226
270
|
elif name == "agent_cancel":
|
|
227
271
|
from .tools.agent_manager import agent_cancel
|
|
272
|
+
|
|
228
273
|
result_content = await agent_cancel(task_id=arguments["task_id"])
|
|
229
274
|
|
|
230
275
|
elif name == "agent_list":
|
|
231
276
|
from .tools.agent_manager import agent_list
|
|
277
|
+
|
|
232
278
|
result_content = await agent_list()
|
|
233
279
|
|
|
234
280
|
elif name == "agent_progress":
|
|
235
281
|
from .tools.agent_manager import agent_progress
|
|
282
|
+
|
|
236
283
|
result_content = await agent_progress(
|
|
237
284
|
task_id=arguments["task_id"],
|
|
238
285
|
lines=arguments.get("lines", 20),
|
|
@@ -240,6 +287,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
240
287
|
|
|
241
288
|
elif name == "agent_retry":
|
|
242
289
|
from .tools.agent_manager import agent_retry
|
|
290
|
+
|
|
243
291
|
result_content = await agent_retry(
|
|
244
292
|
task_id=arguments["task_id"],
|
|
245
293
|
new_prompt=arguments.get("new_prompt"),
|
|
@@ -249,6 +297,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
249
297
|
# --- BACKGROUND TASK DISPATCH ---
|
|
250
298
|
elif name == "task_spawn":
|
|
251
299
|
from .tools.background_tasks import task_spawn
|
|
300
|
+
|
|
252
301
|
result_content = await task_spawn(
|
|
253
302
|
prompt=arguments["prompt"],
|
|
254
303
|
model=arguments.get("model", "gemini-3-flash"),
|
|
@@ -256,15 +305,18 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
256
305
|
|
|
257
306
|
elif name == "task_status":
|
|
258
307
|
from .tools.background_tasks import task_status
|
|
308
|
+
|
|
259
309
|
result_content = await task_status(task_id=arguments["task_id"])
|
|
260
310
|
|
|
261
311
|
elif name == "task_list":
|
|
262
312
|
from .tools.background_tasks import task_list
|
|
313
|
+
|
|
263
314
|
result_content = await task_list()
|
|
264
315
|
|
|
265
316
|
# --- LSP DISPATCH ---
|
|
266
317
|
elif name == "lsp_hover":
|
|
267
318
|
from .tools.lsp import lsp_hover
|
|
319
|
+
|
|
268
320
|
result_content = await lsp_hover(
|
|
269
321
|
file_path=arguments["file_path"],
|
|
270
322
|
line=arguments["line"],
|
|
@@ -273,6 +325,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
273
325
|
|
|
274
326
|
elif name == "lsp_goto_definition":
|
|
275
327
|
from .tools.lsp import lsp_goto_definition
|
|
328
|
+
|
|
276
329
|
result_content = await lsp_goto_definition(
|
|
277
330
|
file_path=arguments["file_path"],
|
|
278
331
|
line=arguments["line"],
|
|
@@ -281,6 +334,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
281
334
|
|
|
282
335
|
elif name == "lsp_find_references":
|
|
283
336
|
from .tools.lsp import lsp_find_references
|
|
337
|
+
|
|
284
338
|
result_content = await lsp_find_references(
|
|
285
339
|
file_path=arguments["file_path"],
|
|
286
340
|
line=arguments["line"],
|
|
@@ -290,14 +344,17 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
290
344
|
|
|
291
345
|
elif name == "lsp_document_symbols":
|
|
292
346
|
from .tools.lsp import lsp_document_symbols
|
|
347
|
+
|
|
293
348
|
result_content = await lsp_document_symbols(file_path=arguments["file_path"])
|
|
294
349
|
|
|
295
350
|
elif name == "lsp_workspace_symbols":
|
|
296
351
|
from .tools.lsp import lsp_workspace_symbols
|
|
352
|
+
|
|
297
353
|
result_content = await lsp_workspace_symbols(query=arguments["query"])
|
|
298
354
|
|
|
299
355
|
elif name == "lsp_prepare_rename":
|
|
300
356
|
from .tools.lsp import lsp_prepare_rename
|
|
357
|
+
|
|
301
358
|
result_content = await lsp_prepare_rename(
|
|
302
359
|
file_path=arguments["file_path"],
|
|
303
360
|
line=arguments["line"],
|
|
@@ -306,6 +363,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
306
363
|
|
|
307
364
|
elif name == "lsp_rename":
|
|
308
365
|
from .tools.lsp import lsp_rename
|
|
366
|
+
|
|
309
367
|
result_content = await lsp_rename(
|
|
310
368
|
file_path=arguments["file_path"],
|
|
311
369
|
line=arguments["line"],
|
|
@@ -315,6 +373,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
315
373
|
|
|
316
374
|
elif name == "lsp_code_actions":
|
|
317
375
|
from .tools.lsp import lsp_code_actions
|
|
376
|
+
|
|
318
377
|
result_content = await lsp_code_actions(
|
|
319
378
|
file_path=arguments["file_path"],
|
|
320
379
|
line=arguments["line"],
|
|
@@ -323,6 +382,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
323
382
|
|
|
324
383
|
elif name == "lsp_servers":
|
|
325
384
|
from .tools.lsp import lsp_servers
|
|
385
|
+
|
|
326
386
|
result_content = await lsp_servers()
|
|
327
387
|
|
|
328
388
|
else:
|
|
@@ -330,12 +390,20 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
330
390
|
|
|
331
391
|
# Post-tool call hooks orchestration
|
|
332
392
|
if result_content is not None:
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
393
|
+
if (
|
|
394
|
+
isinstance(result_content, list)
|
|
395
|
+
and len(result_content) > 0
|
|
396
|
+
and hasattr(result_content[0], "text")
|
|
397
|
+
):
|
|
398
|
+
processed_text = await hook_manager.execute_post_tool_call(
|
|
399
|
+
name, arguments, result_content[0].text
|
|
400
|
+
)
|
|
401
|
+
result_content[0].text = processed_text
|
|
402
|
+
elif isinstance(result_content, str):
|
|
403
|
+
result_content = await hook_manager.execute_post_tool_call(
|
|
404
|
+
name, arguments, result_content
|
|
405
|
+
)
|
|
406
|
+
|
|
339
407
|
# Format final return as List[TextContent]
|
|
340
408
|
if isinstance(result_content, list):
|
|
341
409
|
return result_content
|
|
@@ -345,17 +413,20 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
|
345
413
|
logger.error(f"Error calling tool {name}: {e}")
|
|
346
414
|
return [TextContent(type="text", text=f"Error: {str(e)}")]
|
|
347
415
|
|
|
416
|
+
|
|
348
417
|
@server.list_prompts()
|
|
349
418
|
async def list_prompts() -> list[Prompt]:
|
|
350
419
|
"""List available prompts (metadata only)."""
|
|
351
420
|
from .server_tools import get_prompt_definitions
|
|
421
|
+
|
|
352
422
|
return get_prompt_definitions()
|
|
353
423
|
|
|
424
|
+
|
|
354
425
|
@server.get_prompt()
|
|
355
426
|
async def get_prompt(name: str, arguments: dict[str, str] | None) -> GetPromptResult:
|
|
356
427
|
"""Get a specific prompt content (lazy loaded)."""
|
|
357
428
|
from .prompts import stravinsky, delphi, dewey, explore, frontend, document_writer, multimodal
|
|
358
|
-
|
|
429
|
+
|
|
359
430
|
prompts_map = {
|
|
360
431
|
"stravinsky": ("Stravinsky orchestrator system prompt", stravinsky.get_stravinsky_prompt),
|
|
361
432
|
"delphi": ("Delphi advisor system prompt", delphi.get_delphi_prompt),
|
|
@@ -365,13 +436,13 @@ async def get_prompt(name: str, arguments: dict[str, str] | None) -> GetPromptRe
|
|
|
365
436
|
"document_writer": ("Document Writer prompt", document_writer.get_document_writer_prompt),
|
|
366
437
|
"multimodal": ("Multimodal Looker prompt", multimodal.get_multimodal_prompt),
|
|
367
438
|
}
|
|
368
|
-
|
|
439
|
+
|
|
369
440
|
if name not in prompts_map:
|
|
370
441
|
raise ValueError(f"Unknown prompt: {name}")
|
|
371
|
-
|
|
442
|
+
|
|
372
443
|
description, get_prompt_fn = prompts_map[name]
|
|
373
444
|
prompt_text = get_prompt_fn()
|
|
374
|
-
|
|
445
|
+
|
|
375
446
|
return GetPromptResult(
|
|
376
447
|
description=description,
|
|
377
448
|
messages=[
|
|
@@ -382,11 +453,13 @@ async def get_prompt(name: str, arguments: dict[str, str] | None) -> GetPromptRe
|
|
|
382
453
|
],
|
|
383
454
|
)
|
|
384
455
|
|
|
456
|
+
|
|
385
457
|
async def async_main():
|
|
386
458
|
"""Server execution entry point."""
|
|
387
459
|
# Initialize hooks at runtime, not import time
|
|
388
460
|
try:
|
|
389
461
|
from .hooks import initialize_hooks
|
|
462
|
+
|
|
390
463
|
initialize_hooks()
|
|
391
464
|
except Exception as e:
|
|
392
465
|
logger.error(f"Failed to initialize hooks: {e}")
|
|
@@ -402,22 +475,215 @@ async def async_main():
|
|
|
402
475
|
logger.critical("Server process crashed in async_main", exc_info=True)
|
|
403
476
|
sys.exit(1)
|
|
404
477
|
|
|
478
|
+
|
|
405
479
|
def main():
|
|
406
480
|
"""Synchronous entry point with CLI arg handling."""
|
|
407
481
|
import argparse
|
|
408
|
-
|
|
482
|
+
import sys
|
|
483
|
+
from .tools.agent_manager import get_manager
|
|
484
|
+
from .auth.token_store import TokenStore
|
|
485
|
+
|
|
486
|
+
parser = argparse.ArgumentParser(
|
|
487
|
+
description="Stravinsky MCP Bridge - Multi-model AI orchestration for Claude Code. "
|
|
488
|
+
"Spawns background agents with full tool access via Claude CLI.",
|
|
489
|
+
prog="stravinsky",
|
|
490
|
+
epilog="Examples:\n"
|
|
491
|
+
" stravinsky # Start MCP server (default)\n"
|
|
492
|
+
" stravinsky list # Show all background agents\n"
|
|
493
|
+
" stravinsky status # Check auth status\n"
|
|
494
|
+
" stravinsky stop --clear # Stop agents and clear history\n",
|
|
495
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
496
|
+
)
|
|
409
497
|
parser.add_argument("--version", action="version", version=f"stravinsky {__version__}")
|
|
410
|
-
|
|
411
|
-
|
|
498
|
+
|
|
499
|
+
subparsers = parser.add_subparsers(dest="command", help="Available commands", metavar="COMMAND")
|
|
500
|
+
|
|
501
|
+
# list command
|
|
502
|
+
subparsers.add_parser(
|
|
503
|
+
"list",
|
|
504
|
+
help="List all background agent tasks",
|
|
505
|
+
description="Shows status, ID, type, and description of all spawned agents.",
|
|
506
|
+
)
|
|
507
|
+
|
|
508
|
+
# status command
|
|
509
|
+
subparsers.add_parser(
|
|
510
|
+
"status",
|
|
511
|
+
help="Show authentication status for all providers",
|
|
512
|
+
description="Displays OAuth authentication status and token expiration for Gemini and OpenAI.",
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
# start command (explicit server start)
|
|
516
|
+
subparsers.add_parser(
|
|
517
|
+
"start",
|
|
518
|
+
help="Explicitly start the MCP server (STDIO transport)",
|
|
519
|
+
description="Starts the MCP server for communication with Claude Code. Usually started automatically.",
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
# stop command (stop all agents)
|
|
523
|
+
stop_parser = subparsers.add_parser(
|
|
524
|
+
"stop",
|
|
525
|
+
help="Stop all running background agents",
|
|
526
|
+
description="Terminates all active agent processes. Use --clear to also remove history.",
|
|
527
|
+
)
|
|
528
|
+
stop_parser.add_argument(
|
|
529
|
+
"--clear",
|
|
530
|
+
action="store_true",
|
|
531
|
+
help="Also clear agent history from .stravinsky/agents.json",
|
|
532
|
+
)
|
|
533
|
+
|
|
534
|
+
# auth command (authentication)
|
|
535
|
+
auth_parser = subparsers.add_parser(
|
|
536
|
+
"auth",
|
|
537
|
+
help="Authentication commands (login/logout/refresh/status)",
|
|
538
|
+
description="Manage OAuth authentication for Gemini and OpenAI providers.",
|
|
539
|
+
)
|
|
540
|
+
auth_subparsers = auth_parser.add_subparsers(
|
|
541
|
+
dest="auth_command", help="Auth subcommands", metavar="SUBCOMMAND"
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
# auth login
|
|
545
|
+
login_parser = auth_subparsers.add_parser(
|
|
546
|
+
"login",
|
|
547
|
+
help="Login to a provider via browser OAuth",
|
|
548
|
+
description="Opens browser for OAuth authentication with the specified provider.",
|
|
549
|
+
)
|
|
550
|
+
login_parser.add_argument(
|
|
551
|
+
"provider",
|
|
552
|
+
choices=["gemini", "openai"],
|
|
553
|
+
metavar="PROVIDER",
|
|
554
|
+
help="Provider to authenticate with: gemini (Google) or openai (ChatGPT Plus/Pro)",
|
|
555
|
+
)
|
|
556
|
+
|
|
557
|
+
# auth logout
|
|
558
|
+
logout_parser = auth_subparsers.add_parser(
|
|
559
|
+
"logout",
|
|
560
|
+
help="Remove stored OAuth credentials",
|
|
561
|
+
description="Deletes stored access and refresh tokens for the specified provider.",
|
|
562
|
+
)
|
|
563
|
+
logout_parser.add_argument(
|
|
564
|
+
"provider",
|
|
565
|
+
choices=["gemini", "openai"],
|
|
566
|
+
metavar="PROVIDER",
|
|
567
|
+
help="Provider to logout from: gemini or openai",
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
# auth status
|
|
571
|
+
auth_subparsers.add_parser(
|
|
572
|
+
"status",
|
|
573
|
+
help="Show authentication status for all providers",
|
|
574
|
+
description="Displays authentication status and token expiration for Gemini and OpenAI.",
|
|
575
|
+
)
|
|
576
|
+
|
|
577
|
+
# auth refresh
|
|
578
|
+
refresh_parser = auth_subparsers.add_parser(
|
|
579
|
+
"refresh",
|
|
580
|
+
help="Manually refresh access token",
|
|
581
|
+
description="Force-refresh the access token using the stored refresh token.",
|
|
582
|
+
)
|
|
583
|
+
refresh_parser.add_argument(
|
|
584
|
+
"provider",
|
|
585
|
+
choices=["gemini", "openai"],
|
|
586
|
+
metavar="PROVIDER",
|
|
587
|
+
help="Provider to refresh token for: gemini or openai",
|
|
588
|
+
)
|
|
589
|
+
|
|
590
|
+
# auth init
|
|
591
|
+
auth_subparsers.add_parser(
|
|
592
|
+
"init",
|
|
593
|
+
help="Bootstrap current repository for Stravinsky",
|
|
594
|
+
description="Creates .stravinsky/ directory structure and copies default configuration files.",
|
|
595
|
+
)
|
|
596
|
+
|
|
597
|
+
# Check for CLI flags
|
|
412
598
|
args, unknown = parser.parse_known_args()
|
|
413
|
-
|
|
414
|
-
|
|
599
|
+
|
|
600
|
+
if args.command == "list":
|
|
601
|
+
# Run agent_list logic
|
|
602
|
+
manager = get_manager()
|
|
603
|
+
tasks = manager.list_tasks()
|
|
604
|
+
if not tasks:
|
|
605
|
+
print("No background agent tasks found.")
|
|
606
|
+
return 0
|
|
607
|
+
|
|
608
|
+
print("\nStravinsky Background Agents:")
|
|
609
|
+
print("-" * 100)
|
|
610
|
+
print(f"{'STATUS':10} | {'ID':15} | {'TYPE':10} | {'STARTED':20} | DESCRIPTION")
|
|
611
|
+
print("-" * 100)
|
|
612
|
+
for t in sorted(tasks, key=lambda x: x.get("created_at", ""), reverse=True):
|
|
613
|
+
status = t["status"]
|
|
614
|
+
task_id = t["id"]
|
|
615
|
+
agent = t["agent_type"]
|
|
616
|
+
created = t.get("created_at", "")[:19].replace("T", " ") # Format datetime
|
|
617
|
+
desc = t.get("description", t.get("prompt", "")[:40])[:40]
|
|
618
|
+
print(f"{status.upper():10} | {task_id:15} | {agent:10} | {created:20} | {desc}")
|
|
619
|
+
|
|
620
|
+
# Show error for failed agents
|
|
621
|
+
if status == "failed" and t.get("error"):
|
|
622
|
+
error_msg = t["error"][:100].replace("\n", " ")
|
|
623
|
+
print(f" └─ ERROR: {error_msg}")
|
|
624
|
+
print("-" * 100)
|
|
625
|
+
return 0
|
|
626
|
+
|
|
627
|
+
elif args.command == "status":
|
|
628
|
+
from .auth.cli import cmd_status
|
|
629
|
+
|
|
630
|
+
return cmd_status(TokenStore())
|
|
631
|
+
|
|
632
|
+
elif args.command == "start":
|
|
415
633
|
asyncio.run(async_main())
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
634
|
+
return 0
|
|
635
|
+
|
|
636
|
+
elif args.command == "stop":
|
|
637
|
+
manager = get_manager()
|
|
638
|
+
count = manager.stop_all(clear_history=getattr(args, "clear", False))
|
|
639
|
+
if getattr(args, "clear", False):
|
|
640
|
+
print(f"Cleared {count} agent task(s) from history.")
|
|
641
|
+
else:
|
|
642
|
+
print(f"Stopped {count} running agent(s).")
|
|
643
|
+
return 0
|
|
644
|
+
|
|
645
|
+
elif args.command == "auth":
|
|
646
|
+
auth_cmd = getattr(args, "auth_command", None)
|
|
647
|
+
token_store = get_token_store()
|
|
648
|
+
|
|
649
|
+
if auth_cmd == "login":
|
|
650
|
+
from .auth.cli import cmd_login
|
|
651
|
+
|
|
652
|
+
return cmd_login(args.provider, token_store)
|
|
653
|
+
|
|
654
|
+
elif auth_cmd == "logout":
|
|
655
|
+
from .auth.cli import cmd_logout
|
|
656
|
+
|
|
657
|
+
return cmd_logout(args.provider, token_store)
|
|
658
|
+
|
|
659
|
+
elif auth_cmd == "status":
|
|
660
|
+
from .auth.cli import cmd_status
|
|
661
|
+
|
|
662
|
+
return cmd_status(token_store)
|
|
663
|
+
|
|
664
|
+
elif auth_cmd == "refresh":
|
|
665
|
+
from .auth.cli import cmd_refresh
|
|
666
|
+
|
|
667
|
+
return cmd_refresh(args.provider, token_store)
|
|
668
|
+
|
|
669
|
+
elif auth_cmd == "init":
|
|
670
|
+
from .tools.init import bootstrap_repo
|
|
671
|
+
|
|
672
|
+
print(bootstrap_repo())
|
|
673
|
+
return 0
|
|
674
|
+
|
|
675
|
+
else:
|
|
676
|
+
auth_parser.print_help()
|
|
677
|
+
return 0
|
|
678
|
+
|
|
679
|
+
else:
|
|
680
|
+
# Default behavior: start server (fallback for MCP runners and unknown args)
|
|
681
|
+
# This ensures that flags like --transport stdio don't cause an exit
|
|
682
|
+
if unknown:
|
|
683
|
+
logger.info(f"Starting MCP server with unknown arguments: {unknown}")
|
|
684
|
+
asyncio.run(async_main())
|
|
685
|
+
return 0
|
|
686
|
+
|
|
421
687
|
|
|
422
688
|
if __name__ == "__main__":
|
|
423
689
|
main()
|
mcp_bridge/server_tools.py
CHANGED
|
@@ -4,6 +4,22 @@ from typing import List
|
|
|
4
4
|
def get_tool_definitions() -> List[Tool]:
|
|
5
5
|
"""Return all Tool definitions for the Stravinsky MCP server."""
|
|
6
6
|
return [
|
|
7
|
+
Tool(
|
|
8
|
+
name="stravinsky_version",
|
|
9
|
+
description="Returns the current version of the Stravinsky MCP bridge and diagnostic info.",
|
|
10
|
+
inputSchema={
|
|
11
|
+
"type": "object",
|
|
12
|
+
"properties": {},
|
|
13
|
+
},
|
|
14
|
+
),
|
|
15
|
+
Tool(
|
|
16
|
+
name="system_restart",
|
|
17
|
+
description="Force-restarts the Stravinsky MCP server by exiting the process. The host (Claude Code) will automatically respawn it, picking up any updated code/packages.",
|
|
18
|
+
inputSchema={
|
|
19
|
+
"type": "object",
|
|
20
|
+
"properties": {},
|
|
21
|
+
},
|
|
22
|
+
),
|
|
7
23
|
Tool(
|
|
8
24
|
name="invoke_gemini",
|
|
9
25
|
description=(
|
|
@@ -249,8 +265,10 @@ def get_tool_definitions() -> List[Tool]:
|
|
|
249
265
|
Tool(
|
|
250
266
|
name="agent_spawn",
|
|
251
267
|
description=(
|
|
252
|
-
"
|
|
253
|
-
"
|
|
268
|
+
"PREFERRED TOOL for parallel work. Spawn multiple agents simultaneously for independent tasks. "
|
|
269
|
+
"ALWAYS use this when you have 2+ independent research, implementation, or verification tasks. "
|
|
270
|
+
"Call agent_spawn multiple times in ONE response to run tasks concurrently. "
|
|
271
|
+
"Each agent runs independently with full Gemini capabilities."
|
|
254
272
|
),
|
|
255
273
|
inputSchema={
|
|
256
274
|
"type": "object",
|
|
@@ -258,7 +276,7 @@ def get_tool_definitions() -> List[Tool]:
|
|
|
258
276
|
"prompt": {"type": "string", "description": "The task for the agent to perform"},
|
|
259
277
|
"agent_type": {
|
|
260
278
|
"type": "string",
|
|
261
|
-
"description": "Agent type: explore, dewey, frontend, delphi",
|
|
279
|
+
"description": "Agent type: explore, dewey, frontend (gemini-3-pro), delphi (gpt-5.2-medium), document_writer, multimodal",
|
|
262
280
|
"default": "explore",
|
|
263
281
|
},
|
|
264
282
|
"description": {"type": "string", "description": "Short description for status display"},
|