stravinsky 0.1.2__py3-none-any.whl → 0.2.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of stravinsky might be problematic. Click here for more details.

Files changed (42) hide show
  1. mcp_bridge/__init__.py +1 -5
  2. mcp_bridge/auth/cli.py +89 -44
  3. mcp_bridge/auth/oauth.py +88 -63
  4. mcp_bridge/hooks/__init__.py +49 -0
  5. mcp_bridge/hooks/agent_reminder.py +61 -0
  6. mcp_bridge/hooks/auto_slash_command.py +186 -0
  7. mcp_bridge/hooks/budget_optimizer.py +38 -0
  8. mcp_bridge/hooks/comment_checker.py +136 -0
  9. mcp_bridge/hooks/compaction.py +32 -0
  10. mcp_bridge/hooks/context_monitor.py +58 -0
  11. mcp_bridge/hooks/directory_context.py +40 -0
  12. mcp_bridge/hooks/edit_recovery.py +41 -0
  13. mcp_bridge/hooks/empty_message_sanitizer.py +240 -0
  14. mcp_bridge/hooks/keyword_detector.py +122 -0
  15. mcp_bridge/hooks/manager.py +96 -0
  16. mcp_bridge/hooks/preemptive_compaction.py +157 -0
  17. mcp_bridge/hooks/session_recovery.py +186 -0
  18. mcp_bridge/hooks/todo_enforcer.py +75 -0
  19. mcp_bridge/hooks/truncator.py +19 -0
  20. mcp_bridge/native_hooks/context.py +38 -0
  21. mcp_bridge/native_hooks/edit_recovery.py +46 -0
  22. mcp_bridge/native_hooks/stravinsky_mode.py +109 -0
  23. mcp_bridge/native_hooks/truncator.py +23 -0
  24. mcp_bridge/prompts/delphi.py +3 -2
  25. mcp_bridge/prompts/dewey.py +105 -21
  26. mcp_bridge/prompts/stravinsky.py +452 -118
  27. mcp_bridge/server.py +491 -668
  28. mcp_bridge/server_tools.py +547 -0
  29. mcp_bridge/tools/__init__.py +13 -3
  30. mcp_bridge/tools/agent_manager.py +359 -190
  31. mcp_bridge/tools/continuous_loop.py +67 -0
  32. mcp_bridge/tools/init.py +50 -0
  33. mcp_bridge/tools/lsp/tools.py +15 -15
  34. mcp_bridge/tools/model_invoke.py +594 -48
  35. mcp_bridge/tools/skill_loader.py +51 -47
  36. mcp_bridge/tools/task_runner.py +141 -0
  37. mcp_bridge/tools/templates.py +175 -0
  38. {stravinsky-0.1.2.dist-info → stravinsky-0.2.38.dist-info}/METADATA +55 -10
  39. stravinsky-0.2.38.dist-info/RECORD +57 -0
  40. stravinsky-0.1.2.dist-info/RECORD +0 -32
  41. {stravinsky-0.1.2.dist-info → stravinsky-0.2.38.dist-info}/WHEEL +0 -0
  42. {stravinsky-0.1.2.dist-info → stravinsky-0.2.38.dist-info}/entry_points.txt +0 -0
mcp_bridge/server.py CHANGED
@@ -1,17 +1,17 @@
1
1
  """
2
- Claude Superagent MCP Bridge Server
2
+ Stravinsky MCP Bridge Server - Zero-Import-Weight Architecture
3
3
 
4
- Main entry point for the MCP server that provides tools for:
5
- - OAuth-authenticated Gemini model invocation
6
- - OAuth-authenticated OpenAI model invocation
7
- - LSP tool proxies
8
- - Session management
9
-
10
- Run with: python -m mcp_bridge.server
4
+ Optimized for extremely fast startup and protocol compliance:
5
+ - Lazy-loads all tool implementations and dependencies.
6
+ - Minimal top-level imports.
7
+ - Robust crash logging to stderr and /tmp.
11
8
  """
12
9
 
10
+ import sys
11
+ import os
13
12
  import asyncio
14
13
  import logging
14
+ import time
15
15
  from typing import Any
16
16
 
17
17
  from mcp.server import Server
@@ -25,799 +25,408 @@ from mcp.types import (
25
25
  GetPromptResult,
26
26
  )
27
27
 
28
- from .auth.token_store import TokenStore
29
- from .tools.model_invoke import invoke_gemini, invoke_openai
30
- from .tools.code_search import lsp_diagnostics, ast_grep_search, ast_grep_replace, grep_search, glob_files
31
- from .tools.session_manager import list_sessions, read_session, search_sessions, get_session_info
32
- from .tools.skill_loader import list_skills, get_skill, create_skill
33
- from .tools.background_tasks import task_spawn, task_status, task_list
34
- from .tools.agent_manager import agent_spawn, agent_output, agent_cancel, agent_list, agent_progress
35
- from .tools.project_context import get_project_context, get_system_health
36
- from .tools.lsp import (
37
- lsp_hover,
38
- lsp_goto_definition,
39
- lsp_find_references,
40
- lsp_document_symbols,
41
- lsp_workspace_symbols,
42
- lsp_prepare_rename,
43
- lsp_rename,
44
- lsp_code_actions,
45
- lsp_servers,
46
- )
47
- from .prompts import stravinsky, delphi, dewey, explore, frontend, document_writer, multimodal
28
+ from . import __version__
29
+
30
+ # --- CRITICAL: PROTOCOL HYGIENE ---
48
31
 
49
- # Configure logging
50
- logging.basicConfig(level=logging.INFO)
32
+ # Configure logging to stderr explicitly to avoid protocol corruption
33
+ logging.basicConfig(
34
+ level=logging.INFO, format="%(levelname)s:%(name)s:%(message)s", stream=sys.stderr
35
+ )
51
36
  logger = logging.getLogger(__name__)
52
37
 
53
- # Initialize the MCP server
54
- server = Server("stravinsky")
55
38
 
56
- # Token store for OAuth tokens
57
- token_store = TokenStore()
39
+ # Pre-async crash logger
40
+ def install_emergency_logger():
41
+ def handle_exception(exc_type, exc_value, exc_traceback):
42
+ if issubclass(exc_type, KeyboardInterrupt):
43
+ sys.__excepthook__(exc_type, exc_value, exc_traceback)
44
+ return
45
+ logger.critical("FATAL PRE-STARTUP ERROR", exc_info=(exc_type, exc_value, exc_traceback))
46
+ try:
47
+ with open("/tmp/stravinsky_crash.log", "a") as f:
48
+ import traceback
49
+
50
+ f.write(f"\n--- CRASH AT {time.ctime()} ---\n")
51
+ traceback.print_exception(exc_type, exc_value, exc_traceback, file=f)
52
+ except:
53
+ pass
54
+
55
+ sys.excepthook = handle_exception
56
+
57
+
58
+ install_emergency_logger()
59
+
60
+ # --- SERVER INITIALIZATION ---
61
+
62
+ server = Server("stravinsky", version=__version__)
63
+
64
+ # Lazy-loaded systems
65
+ _token_store = None
66
+ _hook_manager = None
67
+
68
+
69
+ def get_token_store():
70
+ global _token_store
71
+ if _token_store is None:
72
+ from .auth.token_store import TokenStore
73
+
74
+ _token_store = TokenStore()
75
+ return _token_store
76
+
77
+
78
+ def get_hook_manager_lazy():
79
+ global _hook_manager
80
+ if _hook_manager is None:
81
+ from .hooks.manager import get_hook_manager
82
+
83
+ _hook_manager = get_hook_manager()
84
+ return _hook_manager
85
+
86
+
87
+ # --- MCP INTERFACE ---
58
88
 
59
89
 
60
90
  @server.list_tools()
61
91
  async def list_tools() -> list[Tool]:
62
- """List all available tools."""
63
- tools = [
64
- Tool(
65
- name="invoke_gemini",
66
- description=(
67
- "Invoke a Gemini model with the given prompt. "
68
- "Requires OAuth authentication with Google. "
69
- "Use this for tasks requiring Gemini's capabilities like "
70
- "frontend UI generation, documentation writing, or multimodal analysis."
71
- ),
72
- inputSchema={
73
- "type": "object",
74
- "properties": {
75
- "prompt": {
76
- "type": "string",
77
- "description": "The prompt to send to Gemini",
78
- },
79
- "model": {
80
- "type": "string",
81
- "description": "Gemini model to use (default: gemini-3-flash)",
82
- "default": "gemini-3-flash",
83
- },
84
- "temperature": {
85
- "type": "number",
86
- "description": "Sampling temperature (0.0-2.0)",
87
- "default": 0.7,
88
- },
89
- "max_tokens": {
90
- "type": "integer",
91
- "description": "Maximum tokens in response",
92
- "default": 4096,
93
- },
94
- "thinking_budget": {
95
- "type": "integer",
96
- "description": "Tokens reserved for internal reasoning (if model supports it)",
97
- "default": 0,
98
- },
99
- },
100
- "required": ["prompt"],
101
- },
102
- ),
103
- Tool(
104
- name="invoke_openai",
105
- description=(
106
- "Invoke an OpenAI model with the given prompt. "
107
- "Requires OAuth authentication with OpenAI. "
108
- "Use this for tasks requiring GPT capabilities like "
109
- "strategic advice, code review, or complex reasoning."
110
- ),
111
- inputSchema={
112
- "type": "object",
113
- "properties": {
114
- "prompt": {
115
- "type": "string",
116
- "description": "The prompt to send to OpenAI",
117
- },
118
- "model": {
119
- "type": "string",
120
- "description": "OpenAI model to use (default: gpt-5.2)",
121
- "default": "gpt-5.2",
122
- },
123
- "temperature": {
124
- "type": "number",
125
- "description": "Sampling temperature (0.0-2.0)",
126
- "default": 0.7,
127
- },
128
- "max_tokens": {
129
- "type": "integer",
130
- "description": "Maximum tokens in response",
131
- "default": 4096,
132
- },
133
- "thinking_budget": {
134
- "type": "integer",
135
- "description": "Tokens reserved for internal reasoning (e.g. gpt-5.2 / o1 / o3)",
136
- "default": 0,
137
- },
138
- },
139
- "required": ["prompt"],
140
- },
141
- ),
142
- Tool(
143
- name="get_project_context",
144
- description="Summarize project environment including Git status, local rules (.claude/rules/), and pending todos.",
145
- inputSchema={
146
- "type": "object",
147
- "properties": {
148
- "project_path": {"type": "string", "description": "Path to the project root"},
149
- },
150
- },
151
- ),
152
- Tool(
153
- name="get_system_health",
154
- description="Comprehensive check of system dependencies (rg, fd, sg, etc.) and authentication status.",
155
- inputSchema={
156
- "type": "object",
157
- "properties": {},
158
- },
159
- ),
160
- Tool(
161
- name="lsp_diagnostics",
162
- description="Get diagnostics (errors, warnings) for a file using language tools (tsc, ruff).",
163
- inputSchema={
164
- "type": "object",
165
- "properties": {
166
- "file_path": {"type": "string", "description": "Path to file to analyze"},
167
- "severity": {"type": "string", "description": "Filter: error, warning, all", "default": "all"},
168
- },
169
- "required": ["file_path"],
170
- },
171
- ),
172
- Tool(
173
- name="ast_grep_search",
174
- description="Search codebase using ast-grep for structural AST patterns.",
175
- inputSchema={
176
- "type": "object",
177
- "properties": {
178
- "pattern": {"type": "string", "description": "ast-grep pattern"},
179
- "directory": {"type": "string", "description": "Directory to search", "default": "."},
180
- "language": {"type": "string", "description": "Filter by language"},
181
- },
182
- "required": ["pattern"],
183
- },
184
- ),
185
- Tool(
186
- name="grep_search",
187
- description="Fast text search using ripgrep.",
188
- inputSchema={
189
- "type": "object",
190
- "properties": {
191
- "pattern": {"type": "string", "description": "Search pattern (regex)"},
192
- "directory": {"type": "string", "description": "Directory to search", "default": "."},
193
- "file_pattern": {"type": "string", "description": "Glob filter (e.g. *.py)"},
194
- },
195
- "required": ["pattern"],
196
- },
197
- ),
198
- Tool(
199
- name="glob_files",
200
- description="Find files matching a glob pattern.",
201
- inputSchema={
202
- "type": "object",
203
- "properties": {
204
- "pattern": {"type": "string", "description": "Glob pattern (e.g. **/*.py)"},
205
- "directory": {"type": "string", "description": "Base directory", "default": "."},
206
- },
207
- "required": ["pattern"],
208
- },
209
- ),
210
- Tool(
211
- name="session_list",
212
- description="List Claude Code sessions with optional filtering.",
213
- inputSchema={
214
- "type": "object",
215
- "properties": {
216
- "project_path": {"type": "string", "description": "Filter by project path"},
217
- "limit": {"type": "integer", "description": "Max sessions", "default": 20},
218
- },
219
- },
220
- ),
221
- Tool(
222
- name="session_read",
223
- description="Read messages from a Claude Code session.",
224
- inputSchema={
225
- "type": "object",
226
- "properties": {
227
- "session_id": {"type": "string", "description": "Session ID"},
228
- "limit": {"type": "integer", "description": "Max messages"},
229
- },
230
- "required": ["session_id"],
231
- },
232
- ),
233
- Tool(
234
- name="session_search",
235
- description="Search across Claude Code session messages.",
236
- inputSchema={
237
- "type": "object",
238
- "properties": {
239
- "query": {"type": "string", "description": "Search query"},
240
- "session_id": {"type": "string", "description": "Search in specific session"},
241
- "limit": {"type": "integer", "description": "Max results", "default": 20},
242
- },
243
- "required": ["query"],
244
- },
245
- ),
246
- Tool(
247
- name="skill_list",
248
- description="List available Claude Code skills/commands from .claude/commands/.",
249
- inputSchema={
250
- "type": "object",
251
- "properties": {
252
- "project_path": {"type": "string", "description": "Project directory"},
253
- },
254
- },
255
- ),
256
- Tool(
257
- name="skill_get",
258
- description="Get the content of a specific skill/command.",
259
- inputSchema={
260
- "type": "object",
261
- "properties": {
262
- "name": {"type": "string", "description": "Skill name"},
263
- "project_path": {"type": "string", "description": "Project directory"},
264
- },
265
- "required": ["name"],
266
- },
267
- ),
268
- Tool(
269
- name="task_spawn",
270
- description=(
271
- "Spawn a background task to execute a prompt asynchronously. "
272
- "Returns a Task ID. Best for deep research or parallel processing."
273
- ),
274
- inputSchema={
275
- "type": "object",
276
- "properties": {
277
- "prompt": {"type": "string", "description": "The prompt for the background agent"},
278
- "model": {
279
- "type": "string",
280
- "description": "Model to use (gemini-3-flash or gpt-5.2)",
281
- "default": "gemini-3-flash"
282
- },
283
- },
284
- "required": ["prompt"],
285
- },
286
- ),
287
- Tool(
288
- name="task_status",
289
- description="Check the status and retrieve results of a background task.",
290
- inputSchema={
291
- "type": "object",
292
- "properties": {
293
- "task_id": {"type": "string", "description": "The ID of the task to check"},
294
- },
295
- "required": ["task_id"],
296
- },
297
- ),
298
- Tool(
299
- name="task_list",
300
- description="List all active and recent background tasks.",
301
- inputSchema={
302
- "type": "object",
303
- "properties": {},
304
- },
305
- ),
306
- # New Agent Tools with Full Tool Access
307
- Tool(
308
- name="agent_spawn",
309
- description=(
310
- "Spawn a background agent. Uses Gemini by default for fast execution. "
311
- "Set model='claude' to use Claude Code CLI with full tool access."
312
- ),
313
- inputSchema={
314
- "type": "object",
315
- "properties": {
316
- "prompt": {"type": "string", "description": "The task for the agent to perform"},
317
- "agent_type": {
318
- "type": "string",
319
- "description": "Agent type: explore, dewey, frontend, delphi",
320
- "default": "explore",
321
- },
322
- "description": {"type": "string", "description": "Short description for status display"},
323
- "model": {
324
- "type": "string",
325
- "description": "Model: gemini-3-flash (default) or claude",
326
- "default": "gemini-3-flash",
327
- },
328
- "thinking_budget": {
329
- "type": "integer",
330
- "description": "Tokens reserved for internal reasoning (if model supports it)",
331
- "default": 0,
332
- },
333
- },
334
- "required": ["prompt"],
335
- },
336
- ),
337
- Tool(
338
- name="agent_output",
339
- description="Get output from a background agent. Use block=true to wait for completion.",
340
- inputSchema={
341
- "type": "object",
342
- "properties": {
343
- "task_id": {"type": "string", "description": "The agent task ID"},
344
- "block": {"type": "boolean", "description": "Wait for completion", "default": False},
345
- },
346
- "required": ["task_id"],
347
- },
348
- ),
349
- Tool(
350
- name="agent_cancel",
351
- description="Cancel a running background agent.",
352
- inputSchema={
353
- "type": "object",
354
- "properties": {
355
- "task_id": {"type": "string", "description": "The agent task ID to cancel"},
356
- },
357
- "required": ["task_id"],
358
- },
359
- ),
360
- Tool(
361
- name="agent_list",
362
- description="List all background agent tasks with their status.",
363
- inputSchema={
364
- "type": "object",
365
- "properties": {},
366
- },
367
- ),
368
- Tool(
369
- name="agent_progress",
370
- description="Get real-time progress from a running background agent. Shows recent output lines to monitor what the agent is doing.",
371
- inputSchema={
372
- "type": "object",
373
- "properties": {
374
- "task_id": {"type": "string", "description": "The agent task ID"},
375
- "lines": {"type": "integer", "description": "Number of recent lines to show", "default": 20},
376
- },
377
- "required": ["task_id"],
378
- },
379
- ),
380
- # LSP Tools
381
- Tool(
382
- name="lsp_hover",
383
- description="Get type info, documentation, and signature at a position in a file.",
384
- inputSchema={
385
- "type": "object",
386
- "properties": {
387
- "file_path": {"type": "string", "description": "Absolute path to the file"},
388
- "line": {"type": "integer", "description": "Line number (1-indexed)"},
389
- "character": {"type": "integer", "description": "Character position (0-indexed)"},
390
- },
391
- "required": ["file_path", "line", "character"],
392
- },
393
- ),
394
- Tool(
395
- name="lsp_goto_definition",
396
- description="Find where a symbol is defined. Jump to symbol definition.",
397
- inputSchema={
398
- "type": "object",
399
- "properties": {
400
- "file_path": {"type": "string", "description": "Absolute path to the file"},
401
- "line": {"type": "integer", "description": "Line number (1-indexed)"},
402
- "character": {"type": "integer", "description": "Character position (0-indexed)"},
403
- },
404
- "required": ["file_path", "line", "character"],
405
- },
406
- ),
407
- Tool(
408
- name="lsp_find_references",
409
- description="Find all references to a symbol across the workspace.",
410
- inputSchema={
411
- "type": "object",
412
- "properties": {
413
- "file_path": {"type": "string", "description": "Absolute path to the file"},
414
- "line": {"type": "integer", "description": "Line number (1-indexed)"},
415
- "character": {"type": "integer", "description": "Character position (0-indexed)"},
416
- "include_declaration": {"type": "boolean", "description": "Include the declaration itself", "default": True},
417
- },
418
- "required": ["file_path", "line", "character"],
419
- },
420
- ),
421
- Tool(
422
- name="lsp_document_symbols",
423
- description="Get hierarchical outline of all symbols (functions, classes, methods) in a file.",
424
- inputSchema={
425
- "type": "object",
426
- "properties": {
427
- "file_path": {"type": "string", "description": "Absolute path to the file"},
428
- },
429
- "required": ["file_path"],
430
- },
431
- ),
432
- Tool(
433
- name="lsp_workspace_symbols",
434
- description="Search for symbols by name across the entire workspace.",
435
- inputSchema={
436
- "type": "object",
437
- "properties": {
438
- "query": {"type": "string", "description": "Symbol name to search for (fuzzy match)"},
439
- "directory": {"type": "string", "description": "Workspace directory", "default": "."},
440
- },
441
- "required": ["query"],
442
- },
443
- ),
444
- Tool(
445
- name="lsp_prepare_rename",
446
- description="Check if a symbol at position can be renamed. Use before lsp_rename.",
447
- inputSchema={
448
- "type": "object",
449
- "properties": {
450
- "file_path": {"type": "string", "description": "Absolute path to the file"},
451
- "line": {"type": "integer", "description": "Line number (1-indexed)"},
452
- "character": {"type": "integer", "description": "Character position (0-indexed)"},
453
- },
454
- "required": ["file_path", "line", "character"],
455
- },
456
- ),
457
- Tool(
458
- name="lsp_rename",
459
- description="Rename a symbol across the workspace. Use lsp_prepare_rename first to validate.",
460
- inputSchema={
461
- "type": "object",
462
- "properties": {
463
- "file_path": {"type": "string", "description": "Absolute path to the file"},
464
- "line": {"type": "integer", "description": "Line number (1-indexed)"},
465
- "character": {"type": "integer", "description": "Character position (0-indexed)"},
466
- "new_name": {"type": "string", "description": "New name for the symbol"},
467
- "dry_run": {"type": "boolean", "description": "Preview changes without applying", "default": True},
468
- },
469
- "required": ["file_path", "line", "character", "new_name"],
470
- },
471
- ),
472
- Tool(
473
- name="lsp_code_actions",
474
- description="Get available quick fixes and refactorings at a position.",
475
- inputSchema={
476
- "type": "object",
477
- "properties": {
478
- "file_path": {"type": "string", "description": "Absolute path to the file"},
479
- "line": {"type": "integer", "description": "Line number (1-indexed)"},
480
- "character": {"type": "integer", "description": "Character position (0-indexed)"},
481
- },
482
- "required": ["file_path", "line", "character"],
483
- },
484
- ),
485
- Tool(
486
- name="lsp_servers",
487
- description="List available LSP servers and their installation status.",
488
- inputSchema={
489
- "type": "object",
490
- "properties": {},
491
- },
492
- ),
493
- Tool(
494
- name="ast_grep_replace",
495
- description="Replace code patterns using ast-grep's AST-aware replacement. More reliable than text-based replace for refactoring.",
496
- inputSchema={
497
- "type": "object",
498
- "properties": {
499
- "pattern": {"type": "string", "description": "ast-grep pattern to search (e.g., 'console.log($A)')"},
500
- "replacement": {"type": "string", "description": "Replacement pattern (e.g., 'logger.debug($A)')"},
501
- "directory": {"type": "string", "description": "Directory to search in", "default": "."},
502
- "language": {"type": "string", "description": "Filter by language (typescript, python, etc.)"},
503
- "dry_run": {"type": "boolean", "description": "Preview changes without applying", "default": True},
504
- },
505
- "required": ["pattern", "replacement"],
506
- },
507
- ),
508
- ]
509
- return tools
92
+ """List available tools (metadata only)."""
93
+ from .server_tools import get_tool_definitions
94
+
95
+ return get_tool_definitions()
510
96
 
511
97
 
512
98
  @server.call_tool()
513
99
  async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
514
- """Handle tool invocations."""
515
- logger.info(f"Tool called: {name} with args: {arguments}")
100
+ """Handle tool calls with deep lazy loading of implementations."""
101
+ logger.info(f"Tool call: {name}")
102
+ hook_manager = get_hook_manager_lazy()
103
+ token_store = get_token_store()
516
104
 
517
105
  try:
106
+ # Pre-tool call hooks orchestration
107
+ arguments = await hook_manager.execute_pre_tool_call(name, arguments)
108
+
109
+ result_content = None
110
+
111
+ # --- MODEL DISPATCH ---
518
112
  if name == "invoke_gemini":
519
- result = await invoke_gemini(
113
+ from .tools.model_invoke import invoke_gemini
114
+
115
+ result_content = await invoke_gemini(
520
116
  token_store=token_store,
521
117
  prompt=arguments["prompt"],
522
- model=arguments.get("model", "gemini-3-flash"),
118
+ model=arguments.get("model", "gemini-2.0-flash-exp"),
523
119
  temperature=arguments.get("temperature", 0.7),
524
- max_tokens=arguments.get("max_tokens", 4096),
120
+ max_tokens=arguments.get("max_tokens", 8192),
525
121
  thinking_budget=arguments.get("thinking_budget", 0),
526
122
  )
527
- return [TextContent(type="text", text=result)]
528
123
 
529
124
  elif name == "invoke_openai":
530
- result = await invoke_openai(
125
+ from .tools.model_invoke import invoke_openai
126
+
127
+ result_content = await invoke_openai(
531
128
  token_store=token_store,
532
129
  prompt=arguments["prompt"],
533
- model=arguments.get("model", "gpt-5.2"),
130
+ model=arguments.get("model", "gpt-4o"),
534
131
  temperature=arguments.get("temperature", 0.7),
535
132
  max_tokens=arguments.get("max_tokens", 4096),
536
133
  thinking_budget=arguments.get("thinking_budget", 0),
537
134
  )
538
- return [TextContent(type="text", text=result)]
539
135
 
136
+ # --- CONTEXT DISPATCH ---
540
137
  elif name == "get_project_context":
541
- result = await get_project_context(
542
- project_path=arguments.get("project_path"),
543
- )
544
- return [TextContent(type="text", text=result)]
138
+ from .tools.project_context import get_project_context
139
+
140
+ result_content = await get_project_context(project_path=arguments.get("project_path"))
545
141
 
546
142
  elif name == "get_system_health":
547
- result = await get_system_health()
548
- return [TextContent(type="text", text=result)]
143
+ from .tools.project_context import get_system_health
549
144
 
550
- elif name == "lsp_diagnostics":
551
- result = await lsp_diagnostics(
552
- file_path=arguments["file_path"],
553
- severity=arguments.get("severity", "all"),
145
+ result_content = await get_system_health()
146
+
147
+ # --- SEARCH DISPATCH ---
148
+ elif name == "grep_search":
149
+ from .tools.code_search import grep_search
150
+
151
+ result_content = await grep_search(
152
+ pattern=arguments["pattern"],
153
+ directory=arguments.get("directory", "."),
154
+ file_pattern=arguments.get("file_pattern", ""),
554
155
  )
555
- return [TextContent(type="text", text=result)]
556
156
 
557
157
  elif name == "ast_grep_search":
558
- result = await ast_grep_search(
158
+ from .tools.code_search import ast_grep_search
159
+
160
+ result_content = await ast_grep_search(
559
161
  pattern=arguments["pattern"],
560
162
  directory=arguments.get("directory", "."),
561
163
  language=arguments.get("language", ""),
562
164
  )
563
- return [TextContent(type="text", text=result)]
564
165
 
565
- elif name == "grep_search":
566
- result = await grep_search(
166
+ elif name == "ast_grep_replace":
167
+ from .tools.code_search import ast_grep_replace
168
+
169
+ result_content = await ast_grep_replace(
567
170
  pattern=arguments["pattern"],
171
+ replacement=arguments["replacement"],
568
172
  directory=arguments.get("directory", "."),
569
- file_pattern=arguments.get("file_pattern", ""),
173
+ language=arguments.get("language", ""),
174
+ dry_run=arguments.get("dry_run", True),
570
175
  )
571
- return [TextContent(type="text", text=result)]
572
176
 
573
177
  elif name == "glob_files":
574
- result = await glob_files(
178
+ from .tools.code_search import glob_files
179
+
180
+ result_content = await glob_files(
575
181
  pattern=arguments["pattern"],
576
182
  directory=arguments.get("directory", "."),
577
183
  )
578
- return [TextContent(type="text", text=result)]
579
184
 
185
+ # --- SESSION DISPATCH ---
580
186
  elif name == "session_list":
581
- result = list_sessions(
187
+ from .tools.session_manager import list_sessions
188
+
189
+ result_content = list_sessions(
582
190
  project_path=arguments.get("project_path"),
583
191
  limit=arguments.get("limit", 20),
584
192
  )
585
- return [TextContent(type="text", text=result)]
586
193
 
587
194
  elif name == "session_read":
588
- result = read_session(
195
+ from .tools.session_manager import read_session
196
+
197
+ result_content = read_session(
589
198
  session_id=arguments["session_id"],
590
199
  limit=arguments.get("limit"),
591
200
  )
592
- return [TextContent(type="text", text=result)]
593
201
 
594
202
  elif name == "session_search":
595
- result = search_sessions(
203
+ from .tools.session_manager import search_sessions
204
+
205
+ result_content = search_sessions(
596
206
  query=arguments["query"],
597
207
  session_id=arguments.get("session_id"),
598
208
  limit=arguments.get("limit", 20),
599
209
  )
600
- return [TextContent(type="text", text=result)]
601
210
 
211
+ # --- SKILL DISPATCH ---
602
212
  elif name == "skill_list":
603
- result = list_skills(
604
- project_path=arguments.get("project_path"),
605
- )
606
- return [TextContent(type="text", text=result)]
213
+ from .tools.skill_loader import list_skills
214
+
215
+ result_content = list_skills(project_path=arguments.get("project_path"))
607
216
 
608
217
  elif name == "skill_get":
609
- result = get_skill(
218
+ from .tools.skill_loader import get_skill
219
+
220
+ result_content = get_skill(
610
221
  name=arguments["name"],
611
222
  project_path=arguments.get("project_path"),
612
223
  )
613
- return [TextContent(type="text", text=result)]
614
224
 
615
- elif name == "task_spawn":
616
- result = await task_spawn(
617
- prompt=arguments["prompt"],
618
- model=arguments.get("model", "gemini-3-flash"),
619
- )
620
- return [TextContent(type="text", text=result)]
621
-
622
- elif name == "task_status":
623
- result = await task_status(
624
- task_id=arguments["task_id"],
625
- )
626
- return [TextContent(type="text", text=result)]
627
-
628
- elif name == "task_list":
629
- result = await task_list()
630
- return [TextContent(type="text", text=result)]
631
-
632
- # Agent tools with full tool access
225
+ elif name == "stravinsky_version":
226
+ from . import __version__
227
+ import sys
228
+ import os
229
+
230
+ result_content = [
231
+ TextContent(
232
+ type="text",
233
+ text=f"Stravinsky Bridge v{__version__}\n"
234
+ f"Python: {sys.version.split()[0]}\n"
235
+ f"Platform: {sys.platform}\n"
236
+ f"CWD: {os.getcwd()}\n"
237
+ f"CLI: {os.environ.get('CLAUDE_CLI', '/opt/homebrew/bin/claude')}",
238
+ )
239
+ ]
240
+
241
+ elif name == "system_restart":
242
+ # Schedule a restart. We can't exit immediately or MCP will error on the reply.
243
+ # We'll use a small delay.
244
+ async def restart_soon():
245
+ await asyncio.sleep(1)
246
+ os._exit(0) # Immediate exit
247
+
248
+ asyncio.create_task(restart_soon())
249
+ result_content = [
250
+ TextContent(
251
+ type="text",
252
+ text="🚀 Restarting Stravinsky Bridge... This process will exit and Claude Code will automatically respawn it. Please wait a few seconds before calling tools again.",
253
+ )
254
+ ]
255
+
256
+ # --- AGENT DISPATCH ---
633
257
  elif name == "agent_spawn":
634
- result = await agent_spawn(
635
- prompt=arguments["prompt"],
636
- agent_type=arguments.get("agent_type", "explore"),
637
- description=arguments.get("description", ""),
638
- model=arguments.get("model", "gemini-3-flash"),
639
- thinking_budget=arguments.get("thinking_budget", 0),
640
- )
641
- return [TextContent(type="text", text=result)]
258
+ from .tools.agent_manager import agent_spawn
259
+
260
+ result_content = await agent_spawn(**arguments)
642
261
 
643
262
  elif name == "agent_output":
644
- result = await agent_output(
263
+ from .tools.agent_manager import agent_output
264
+
265
+ result_content = await agent_output(
645
266
  task_id=arguments["task_id"],
646
267
  block=arguments.get("block", False),
647
268
  )
648
- return [TextContent(type="text", text=result)]
649
269
 
650
270
  elif name == "agent_cancel":
651
- result = await agent_cancel(
652
- task_id=arguments["task_id"],
653
- )
654
- return [TextContent(type="text", text=result)]
271
+ from .tools.agent_manager import agent_cancel
272
+
273
+ result_content = await agent_cancel(task_id=arguments["task_id"])
655
274
 
656
275
  elif name == "agent_list":
657
- result = await agent_list()
658
- return [TextContent(type="text", text=result)]
276
+ from .tools.agent_manager import agent_list
277
+
278
+ result_content = await agent_list()
659
279
 
660
280
  elif name == "agent_progress":
661
- result = await agent_progress(
281
+ from .tools.agent_manager import agent_progress
282
+
283
+ result_content = await agent_progress(
662
284
  task_id=arguments["task_id"],
663
285
  lines=arguments.get("lines", 20),
664
286
  )
665
- return [TextContent(type="text", text=result)]
666
287
 
667
- # LSP Tools
288
+ elif name == "agent_retry":
289
+ from .tools.agent_manager import agent_retry
290
+
291
+ result_content = await agent_retry(
292
+ task_id=arguments["task_id"],
293
+ new_prompt=arguments.get("new_prompt"),
294
+ new_timeout=arguments.get("new_timeout"),
295
+ )
296
+
297
+ # --- BACKGROUND TASK DISPATCH ---
298
+ elif name == "task_spawn":
299
+ from .tools.background_tasks import task_spawn
300
+
301
+ result_content = await task_spawn(
302
+ prompt=arguments["prompt"],
303
+ model=arguments.get("model", "gemini-3-flash"),
304
+ )
305
+
306
+ elif name == "task_status":
307
+ from .tools.background_tasks import task_status
308
+
309
+ result_content = await task_status(task_id=arguments["task_id"])
310
+
311
+ elif name == "task_list":
312
+ from .tools.background_tasks import task_list
313
+
314
+ result_content = await task_list()
315
+
316
+ # --- LSP DISPATCH ---
668
317
  elif name == "lsp_hover":
669
- result = await lsp_hover(
318
+ from .tools.lsp import lsp_hover
319
+
320
+ result_content = await lsp_hover(
670
321
  file_path=arguments["file_path"],
671
322
  line=arguments["line"],
672
323
  character=arguments["character"],
673
324
  )
674
- return [TextContent(type="text", text=result)]
675
325
 
676
326
  elif name == "lsp_goto_definition":
677
- result = await lsp_goto_definition(
327
+ from .tools.lsp import lsp_goto_definition
328
+
329
+ result_content = await lsp_goto_definition(
678
330
  file_path=arguments["file_path"],
679
331
  line=arguments["line"],
680
332
  character=arguments["character"],
681
333
  )
682
- return [TextContent(type="text", text=result)]
683
334
 
684
335
  elif name == "lsp_find_references":
685
- result = await lsp_find_references(
336
+ from .tools.lsp import lsp_find_references
337
+
338
+ result_content = await lsp_find_references(
686
339
  file_path=arguments["file_path"],
687
340
  line=arguments["line"],
688
341
  character=arguments["character"],
689
342
  include_declaration=arguments.get("include_declaration", True),
690
343
  )
691
- return [TextContent(type="text", text=result)]
692
344
 
693
345
  elif name == "lsp_document_symbols":
694
- result = await lsp_document_symbols(
695
- file_path=arguments["file_path"],
696
- )
697
- return [TextContent(type="text", text=result)]
346
+ from .tools.lsp import lsp_document_symbols
347
+
348
+ result_content = await lsp_document_symbols(file_path=arguments["file_path"])
698
349
 
699
350
  elif name == "lsp_workspace_symbols":
700
- result = await lsp_workspace_symbols(
701
- query=arguments["query"],
702
- directory=arguments.get("directory", "."),
703
- )
704
- return [TextContent(type="text", text=result)]
351
+ from .tools.lsp import lsp_workspace_symbols
352
+
353
+ result_content = await lsp_workspace_symbols(query=arguments["query"])
705
354
 
706
355
  elif name == "lsp_prepare_rename":
707
- result = await lsp_prepare_rename(
356
+ from .tools.lsp import lsp_prepare_rename
357
+
358
+ result_content = await lsp_prepare_rename(
708
359
  file_path=arguments["file_path"],
709
360
  line=arguments["line"],
710
361
  character=arguments["character"],
711
362
  )
712
- return [TextContent(type="text", text=result)]
713
363
 
714
364
  elif name == "lsp_rename":
715
- result = await lsp_rename(
365
+ from .tools.lsp import lsp_rename
366
+
367
+ result_content = await lsp_rename(
716
368
  file_path=arguments["file_path"],
717
369
  line=arguments["line"],
718
370
  character=arguments["character"],
719
371
  new_name=arguments["new_name"],
720
- dry_run=arguments.get("dry_run", True),
721
372
  )
722
- return [TextContent(type="text", text=result)]
723
373
 
724
374
  elif name == "lsp_code_actions":
725
- result = await lsp_code_actions(
375
+ from .tools.lsp import lsp_code_actions
376
+
377
+ result_content = await lsp_code_actions(
726
378
  file_path=arguments["file_path"],
727
379
  line=arguments["line"],
728
380
  character=arguments["character"],
729
381
  )
730
- return [TextContent(type="text", text=result)]
731
382
 
732
383
  elif name == "lsp_servers":
733
- result = await lsp_servers()
734
- return [TextContent(type="text", text=result)]
384
+ from .tools.lsp import lsp_servers
735
385
 
736
- elif name == "ast_grep_replace":
737
- result = await ast_grep_replace(
738
- pattern=arguments["pattern"],
739
- replacement=arguments["replacement"],
740
- directory=arguments.get("directory", "."),
741
- language=arguments.get("language", ""),
742
- dry_run=arguments.get("dry_run", True),
743
- )
744
- return [TextContent(type="text", text=result)]
386
+ result_content = await lsp_servers()
745
387
 
746
388
  else:
747
- return [TextContent(type="text", text=f"Unknown tool: {name}")]
389
+ result_content = f"Unknown tool: {name}"
390
+
391
+ # Post-tool call hooks orchestration
392
+ if result_content is not None:
393
+ if (
394
+ isinstance(result_content, list)
395
+ and len(result_content) > 0
396
+ and hasattr(result_content[0], "text")
397
+ ):
398
+ processed_text = await hook_manager.execute_post_tool_call(
399
+ name, arguments, result_content[0].text
400
+ )
401
+ result_content[0].text = processed_text
402
+ elif isinstance(result_content, str):
403
+ result_content = await hook_manager.execute_post_tool_call(
404
+ name, arguments, result_content
405
+ )
406
+
407
+ # Format final return as List[TextContent]
408
+ if isinstance(result_content, list):
409
+ return result_content
410
+ return [TextContent(type="text", text=str(result_content))]
748
411
 
749
412
  except Exception as e:
750
- logger.error(f"Error in tool {name}: {e}")
413
+ logger.error(f"Error calling tool {name}: {e}")
751
414
  return [TextContent(type="text", text=f"Error: {str(e)}")]
752
415
 
753
416
 
754
417
  @server.list_prompts()
755
418
  async def list_prompts() -> list[Prompt]:
756
- """List available agent prompts."""
757
- return [
758
- Prompt(
759
- name="stravinsky",
760
- description=(
761
- "Stravinsky - Powerful AI orchestrator. "
762
- "Plans obsessively with todos, assesses search complexity before "
763
- "exploration, delegates strategically to specialized agents."
764
- ),
765
- arguments=[],
766
- ),
767
- Prompt(
768
- name="delphi",
769
- description=(
770
- "Delphi - Strategic advisor using GPT for debugging, "
771
- "architecture review, and complex problem solving."
772
- ),
773
- arguments=[],
774
- ),
775
- Prompt(
776
- name="dewey",
777
- description=(
778
- "Dewey - Documentation and GitHub research specialist. "
779
- "Finds implementation examples, official docs, and code patterns."
780
- ),
781
- arguments=[],
782
- ),
783
- Prompt(
784
- name="explore",
785
- description=(
786
- "Explore - Fast codebase search specialist. "
787
- "Answers 'Where is X?', finds files and code patterns."
788
- ),
789
- arguments=[],
790
- ),
791
- Prompt(
792
- name="frontend",
793
- description=(
794
- "Frontend UI/UX Engineer - Designer-turned-developer for stunning visuals. "
795
- "Excels at styling, layout, animation, typography."
796
- ),
797
- arguments=[],
798
- ),
799
- Prompt(
800
- name="document_writer",
801
- description=(
802
- "Document Writer - Technical documentation specialist. "
803
- "README files, API docs, architecture docs, user guides."
804
- ),
805
- arguments=[],
806
- ),
807
- Prompt(
808
- name="multimodal",
809
- description=(
810
- "Multimodal Looker - Visual content analysis. "
811
- "PDFs, images, diagrams - extracts and interprets visual data."
812
- ),
813
- arguments=[],
814
- ),
815
- ]
419
+ """List available prompts (metadata only)."""
420
+ from .server_tools import get_prompt_definitions
421
+
422
+ return get_prompt_definitions()
816
423
 
817
424
 
818
425
  @server.get_prompt()
819
426
  async def get_prompt(name: str, arguments: dict[str, str] | None) -> GetPromptResult:
820
- """Get a specific agent prompt."""
427
+ """Get a specific prompt content (lazy loaded)."""
428
+ from .prompts import stravinsky, delphi, dewey, explore, frontend, document_writer, multimodal
429
+
821
430
  prompts_map = {
822
431
  "stravinsky": ("Stravinsky orchestrator system prompt", stravinsky.get_stravinsky_prompt),
823
432
  "delphi": ("Delphi advisor system prompt", delphi.get_delphi_prompt),
@@ -827,13 +436,13 @@ async def get_prompt(name: str, arguments: dict[str, str] | None) -> GetPromptRe
827
436
  "document_writer": ("Document Writer prompt", document_writer.get_document_writer_prompt),
828
437
  "multimodal": ("Multimodal Looker prompt", multimodal.get_multimodal_prompt),
829
438
  }
830
-
439
+
831
440
  if name not in prompts_map:
832
441
  raise ValueError(f"Unknown prompt: {name}")
833
-
442
+
834
443
  description, get_prompt_fn = prompts_map[name]
835
444
  prompt_text = get_prompt_fn()
836
-
445
+
837
446
  return GetPromptResult(
838
447
  description=description,
839
448
  messages=[
@@ -846,20 +455,234 @@ async def get_prompt(name: str, arguments: dict[str, str] | None) -> GetPromptRe
846
455
 
847
456
 
848
457
  async def async_main():
849
- """Async entry point for the MCP server."""
850
- logger.info("Starting Stravinsky MCP Bridge Server...")
458
+ """Server execution entry point."""
459
+ # Initialize hooks at runtime, not import time
460
+ try:
461
+ from .hooks import initialize_hooks
462
+
463
+ initialize_hooks()
464
+ except Exception as e:
465
+ logger.error(f"Failed to initialize hooks: {e}")
851
466
 
852
- async with stdio_server() as (read_stream, write_stream):
853
- await server.run(
854
- read_stream,
855
- write_stream,
856
- server.create_initialization_options(),
857
- )
467
+ try:
468
+ async with stdio_server() as (read_stream, write_stream):
469
+ await server.run(
470
+ read_stream,
471
+ write_stream,
472
+ server.create_initialization_options(),
473
+ )
474
+ except Exception as e:
475
+ logger.critical("Server process crashed in async_main", exc_info=True)
476
+ sys.exit(1)
858
477
 
859
478
 
860
479
  def main():
861
- """Synchronous main entry point for uvx/CLI."""
862
- asyncio.run(async_main())
480
+ """Synchronous entry point with CLI arg handling."""
481
+ import argparse
482
+ import sys
483
+ from .tools.agent_manager import get_manager
484
+ from .auth.token_store import TokenStore
485
+
486
+ parser = argparse.ArgumentParser(
487
+ description="Stravinsky MCP Bridge - Multi-model AI orchestration for Claude Code. "
488
+ "Spawns background agents with full tool access via Claude CLI.",
489
+ prog="stravinsky",
490
+ epilog="Examples:\n"
491
+ " stravinsky # Start MCP server (default)\n"
492
+ " stravinsky list # Show all background agents\n"
493
+ " stravinsky status # Check auth status\n"
494
+ " stravinsky stop --clear # Stop agents and clear history\n",
495
+ formatter_class=argparse.RawDescriptionHelpFormatter,
496
+ )
497
+ parser.add_argument("--version", action="version", version=f"stravinsky {__version__}")
498
+
499
+ subparsers = parser.add_subparsers(dest="command", help="Available commands", metavar="COMMAND")
500
+
501
+ # list command
502
+ subparsers.add_parser(
503
+ "list",
504
+ help="List all background agent tasks",
505
+ description="Shows status, ID, type, and description of all spawned agents.",
506
+ )
507
+
508
+ # status command
509
+ subparsers.add_parser(
510
+ "status",
511
+ help="Show authentication status for all providers",
512
+ description="Displays OAuth authentication status and token expiration for Gemini and OpenAI.",
513
+ )
514
+
515
+ # start command (explicit server start)
516
+ subparsers.add_parser(
517
+ "start",
518
+ help="Explicitly start the MCP server (STDIO transport)",
519
+ description="Starts the MCP server for communication with Claude Code. Usually started automatically.",
520
+ )
521
+
522
+ # stop command (stop all agents)
523
+ stop_parser = subparsers.add_parser(
524
+ "stop",
525
+ help="Stop all running background agents",
526
+ description="Terminates all active agent processes. Use --clear to also remove history.",
527
+ )
528
+ stop_parser.add_argument(
529
+ "--clear",
530
+ action="store_true",
531
+ help="Also clear agent history from .stravinsky/agents.json",
532
+ )
533
+
534
+ # auth command (authentication)
535
+ auth_parser = subparsers.add_parser(
536
+ "auth",
537
+ help="Authentication commands (login/logout/refresh/status)",
538
+ description="Manage OAuth authentication for Gemini and OpenAI providers.",
539
+ )
540
+ auth_subparsers = auth_parser.add_subparsers(
541
+ dest="auth_command", help="Auth subcommands", metavar="SUBCOMMAND"
542
+ )
543
+
544
+ # auth login
545
+ login_parser = auth_subparsers.add_parser(
546
+ "login",
547
+ help="Login to a provider via browser OAuth",
548
+ description="Opens browser for OAuth authentication with the specified provider.",
549
+ )
550
+ login_parser.add_argument(
551
+ "provider",
552
+ choices=["gemini", "openai"],
553
+ metavar="PROVIDER",
554
+ help="Provider to authenticate with: gemini (Google) or openai (ChatGPT Plus/Pro)",
555
+ )
556
+
557
+ # auth logout
558
+ logout_parser = auth_subparsers.add_parser(
559
+ "logout",
560
+ help="Remove stored OAuth credentials",
561
+ description="Deletes stored access and refresh tokens for the specified provider.",
562
+ )
563
+ logout_parser.add_argument(
564
+ "provider",
565
+ choices=["gemini", "openai"],
566
+ metavar="PROVIDER",
567
+ help="Provider to logout from: gemini or openai",
568
+ )
569
+
570
+ # auth status
571
+ auth_subparsers.add_parser(
572
+ "status",
573
+ help="Show authentication status for all providers",
574
+ description="Displays authentication status and token expiration for Gemini and OpenAI.",
575
+ )
576
+
577
+ # auth refresh
578
+ refresh_parser = auth_subparsers.add_parser(
579
+ "refresh",
580
+ help="Manually refresh access token",
581
+ description="Force-refresh the access token using the stored refresh token.",
582
+ )
583
+ refresh_parser.add_argument(
584
+ "provider",
585
+ choices=["gemini", "openai"],
586
+ metavar="PROVIDER",
587
+ help="Provider to refresh token for: gemini or openai",
588
+ )
589
+
590
+ # auth init
591
+ auth_subparsers.add_parser(
592
+ "init",
593
+ help="Bootstrap current repository for Stravinsky",
594
+ description="Creates .stravinsky/ directory structure and copies default configuration files.",
595
+ )
596
+
597
+ # Check for CLI flags
598
+ args, unknown = parser.parse_known_args()
599
+
600
+ if args.command == "list":
601
+ # Run agent_list logic
602
+ manager = get_manager()
603
+ tasks = manager.list_tasks()
604
+ if not tasks:
605
+ print("No background agent tasks found.")
606
+ return 0
607
+
608
+ print("\nStravinsky Background Agents:")
609
+ print("-" * 100)
610
+ print(f"{'STATUS':10} | {'ID':15} | {'TYPE':10} | {'STARTED':20} | DESCRIPTION")
611
+ print("-" * 100)
612
+ for t in sorted(tasks, key=lambda x: x.get("created_at", ""), reverse=True):
613
+ status = t["status"]
614
+ task_id = t["id"]
615
+ agent = t["agent_type"]
616
+ created = t.get("created_at", "")[:19].replace("T", " ") # Format datetime
617
+ desc = t.get("description", t.get("prompt", "")[:40])[:40]
618
+ print(f"{status.upper():10} | {task_id:15} | {agent:10} | {created:20} | {desc}")
619
+
620
+ # Show error for failed agents
621
+ if status == "failed" and t.get("error"):
622
+ error_msg = t["error"][:100].replace("\n", " ")
623
+ print(f" └─ ERROR: {error_msg}")
624
+ print("-" * 100)
625
+ return 0
626
+
627
+ elif args.command == "status":
628
+ from .auth.cli import cmd_status
629
+
630
+ return cmd_status(TokenStore())
631
+
632
+ elif args.command == "start":
633
+ asyncio.run(async_main())
634
+ return 0
635
+
636
+ elif args.command == "stop":
637
+ manager = get_manager()
638
+ count = manager.stop_all(clear_history=getattr(args, "clear", False))
639
+ if getattr(args, "clear", False):
640
+ print(f"Cleared {count} agent task(s) from history.")
641
+ else:
642
+ print(f"Stopped {count} running agent(s).")
643
+ return 0
644
+
645
+ elif args.command == "auth":
646
+ auth_cmd = getattr(args, "auth_command", None)
647
+ token_store = get_token_store()
648
+
649
+ if auth_cmd == "login":
650
+ from .auth.cli import cmd_login
651
+
652
+ return cmd_login(args.provider, token_store)
653
+
654
+ elif auth_cmd == "logout":
655
+ from .auth.cli import cmd_logout
656
+
657
+ return cmd_logout(args.provider, token_store)
658
+
659
+ elif auth_cmd == "status":
660
+ from .auth.cli import cmd_status
661
+
662
+ return cmd_status(token_store)
663
+
664
+ elif auth_cmd == "refresh":
665
+ from .auth.cli import cmd_refresh
666
+
667
+ return cmd_refresh(args.provider, token_store)
668
+
669
+ elif auth_cmd == "init":
670
+ from .tools.init import bootstrap_repo
671
+
672
+ print(bootstrap_repo())
673
+ return 0
674
+
675
+ else:
676
+ auth_parser.print_help()
677
+ return 0
678
+
679
+ else:
680
+ # Default behavior: start server (fallback for MCP runners and unknown args)
681
+ # This ensures that flags like --transport stdio don't cause an exit
682
+ if unknown:
683
+ logger.info(f"Starting MCP server with unknown arguments: {unknown}")
684
+ asyncio.run(async_main())
685
+ return 0
863
686
 
864
687
 
865
688
  if __name__ == "__main__":