stravinsky 0.2.40__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. mcp_bridge/__init__.py +1 -1
  2. mcp_bridge/auth/token_refresh.py +130 -0
  3. mcp_bridge/cli/__init__.py +6 -0
  4. mcp_bridge/cli/install_hooks.py +1265 -0
  5. mcp_bridge/cli/session_report.py +585 -0
  6. mcp_bridge/hooks/HOOKS_SETTINGS.json +175 -0
  7. mcp_bridge/hooks/README.md +215 -0
  8. mcp_bridge/hooks/__init__.py +119 -43
  9. mcp_bridge/hooks/edit_recovery.py +42 -37
  10. mcp_bridge/hooks/git_noninteractive.py +89 -0
  11. mcp_bridge/hooks/keyword_detector.py +30 -0
  12. mcp_bridge/hooks/manager.py +50 -0
  13. mcp_bridge/hooks/notification_hook.py +103 -0
  14. mcp_bridge/hooks/parallel_enforcer.py +127 -0
  15. mcp_bridge/hooks/parallel_execution.py +111 -0
  16. mcp_bridge/hooks/pre_compact.py +123 -0
  17. mcp_bridge/hooks/preemptive_compaction.py +81 -7
  18. mcp_bridge/hooks/rules_injector.py +507 -0
  19. mcp_bridge/hooks/session_idle.py +116 -0
  20. mcp_bridge/hooks/session_notifier.py +125 -0
  21. mcp_bridge/{native_hooks → hooks}/stravinsky_mode.py +51 -16
  22. mcp_bridge/hooks/subagent_stop.py +98 -0
  23. mcp_bridge/hooks/task_validator.py +73 -0
  24. mcp_bridge/hooks/tmux_manager.py +141 -0
  25. mcp_bridge/hooks/todo_continuation.py +90 -0
  26. mcp_bridge/hooks/todo_delegation.py +88 -0
  27. mcp_bridge/hooks/tool_messaging.py +164 -0
  28. mcp_bridge/hooks/truncator.py +21 -17
  29. mcp_bridge/notifications.py +151 -0
  30. mcp_bridge/prompts/__init__.py +3 -1
  31. mcp_bridge/prompts/dewey.py +30 -20
  32. mcp_bridge/prompts/explore.py +46 -8
  33. mcp_bridge/prompts/multimodal.py +24 -3
  34. mcp_bridge/prompts/planner.py +222 -0
  35. mcp_bridge/prompts/stravinsky.py +107 -28
  36. mcp_bridge/server.py +170 -10
  37. mcp_bridge/server_tools.py +554 -32
  38. mcp_bridge/tools/agent_manager.py +316 -106
  39. mcp_bridge/tools/background_tasks.py +2 -1
  40. mcp_bridge/tools/code_search.py +97 -11
  41. mcp_bridge/tools/lsp/__init__.py +7 -0
  42. mcp_bridge/tools/lsp/manager.py +448 -0
  43. mcp_bridge/tools/lsp/tools.py +637 -150
  44. mcp_bridge/tools/model_invoke.py +270 -47
  45. mcp_bridge/tools/semantic_search.py +2492 -0
  46. mcp_bridge/tools/templates.py +32 -18
  47. stravinsky-0.3.4.dist-info/METADATA +420 -0
  48. stravinsky-0.3.4.dist-info/RECORD +79 -0
  49. stravinsky-0.3.4.dist-info/entry_points.txt +5 -0
  50. mcp_bridge/native_hooks/edit_recovery.py +0 -46
  51. mcp_bridge/native_hooks/truncator.py +0 -23
  52. stravinsky-0.2.40.dist-info/METADATA +0 -204
  53. stravinsky-0.2.40.dist-info/RECORD +0 -57
  54. stravinsky-0.2.40.dist-info/entry_points.txt +0 -3
  55. /mcp_bridge/{native_hooks → hooks}/context.py +0 -0
  56. {stravinsky-0.2.40.dist-info → stravinsky-0.3.4.dist-info}/WHEEL +0 -0
mcp_bridge/server.py CHANGED
@@ -98,7 +98,6 @@ async def list_tools() -> list[Tool]:
98
98
  @server.call_tool()
99
99
  async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
100
100
  """Handle tool calls with deep lazy loading of implementations."""
101
- logger.info(f"Tool call: {name}")
102
101
  hook_manager = get_hook_manager_lazy()
103
102
  token_store = get_token_store()
104
103
 
@@ -115,7 +114,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
115
114
  result_content = await invoke_gemini(
116
115
  token_store=token_store,
117
116
  prompt=arguments["prompt"],
118
- model=arguments.get("model", "gemini-2.0-flash-exp"),
117
+ model=arguments.get("model", "gemini-3-flash"),
119
118
  temperature=arguments.get("temperature", 0.7),
120
119
  max_tokens=arguments.get("max_tokens", 8192),
121
120
  thinking_budget=arguments.get("thinking_budget", 0),
@@ -127,7 +126,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
127
126
  result_content = await invoke_openai(
128
127
  token_store=token_store,
129
128
  prompt=arguments["prompt"],
130
- model=arguments.get("model", "gpt-4o"),
129
+ model=arguments.get("model", "gpt-5.2-codex"),
131
130
  temperature=arguments.get("temperature", 0.7),
132
131
  max_tokens=arguments.get("max_tokens", 4096),
133
132
  thinking_budget=arguments.get("thinking_budget", 0),
@@ -144,6 +143,19 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
144
143
 
145
144
  result_content = await get_system_health()
146
145
 
146
+ elif name == "semantic_health":
147
+ from .tools.semantic_search import semantic_health
148
+
149
+ result_content = await semantic_health(
150
+ project_path=arguments.get("project_path", "."),
151
+ provider=arguments.get("provider", "ollama"),
152
+ )
153
+
154
+ elif name == "lsp_health":
155
+ from .tools.lsp.tools import lsp_health
156
+
157
+ result_content = await lsp_health()
158
+
147
159
  # --- SEARCH DISPATCH ---
148
160
  elif name == "grep_search":
149
161
  from .tools.code_search import grep_search
@@ -223,10 +235,7 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
223
235
  )
224
236
 
225
237
  elif name == "stravinsky_version":
226
- from . import __version__
227
- import sys
228
- import os
229
-
238
+ # sys and os already imported at module level
230
239
  result_content = [
231
240
  TextContent(
232
241
  type="text",
@@ -380,11 +389,141 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
380
389
  character=arguments["character"],
381
390
  )
382
391
 
392
+ elif name == "lsp_code_action_resolve":
393
+ from .tools.lsp import lsp_code_action_resolve
394
+
395
+ result_content = await lsp_code_action_resolve(
396
+ file_path=arguments["file_path"],
397
+ action_code=arguments["action_code"],
398
+ line=arguments.get("line"),
399
+ )
400
+
401
+ elif name == "lsp_extract_refactor":
402
+ from .tools.lsp import lsp_extract_refactor
403
+
404
+ result_content = await lsp_extract_refactor(
405
+ file_path=arguments["file_path"],
406
+ start_line=arguments["start_line"],
407
+ start_char=arguments["start_char"],
408
+ end_line=arguments["end_line"],
409
+ end_char=arguments["end_char"],
410
+ new_name=arguments["new_name"],
411
+ kind=arguments.get("kind", "function"),
412
+ )
413
+
383
414
  elif name == "lsp_servers":
384
415
  from .tools.lsp import lsp_servers
385
416
 
386
417
  result_content = await lsp_servers()
387
418
 
419
+ elif name == "lsp_diagnostics":
420
+ from .tools.code_search import lsp_diagnostics
421
+
422
+ result_content = await lsp_diagnostics(
423
+ file_path=arguments["file_path"],
424
+ severity=arguments.get("severity", "all"),
425
+ )
426
+
427
+ elif name == "semantic_search":
428
+ from .tools.semantic_search import semantic_search
429
+
430
+ result_content = await semantic_search(
431
+ query=arguments["query"],
432
+ project_path=arguments.get("project_path", "."),
433
+ n_results=arguments.get("n_results", 10),
434
+ language=arguments.get("language"),
435
+ node_type=arguments.get("node_type"),
436
+ provider=arguments.get("provider", "ollama"),
437
+ )
438
+
439
+ elif name == "hybrid_search":
440
+ from .tools.semantic_search import hybrid_search
441
+
442
+ result_content = await hybrid_search(
443
+ query=arguments["query"],
444
+ pattern=arguments.get("pattern"),
445
+ project_path=arguments.get("project_path", "."),
446
+ n_results=arguments.get("n_results", 10),
447
+ language=arguments.get("language"),
448
+ provider=arguments.get("provider", "ollama"),
449
+ )
450
+
451
+ elif name == "semantic_index":
452
+ from .tools.semantic_search import index_codebase
453
+
454
+ result_content = await index_codebase(
455
+ project_path=arguments.get("project_path", "."),
456
+ force=arguments.get("force", False),
457
+ provider=arguments.get("provider", "ollama"),
458
+ )
459
+
460
+ elif name == "semantic_stats":
461
+ from .tools.semantic_search import semantic_stats
462
+
463
+ result_content = await semantic_stats(
464
+ project_path=arguments.get("project_path", "."),
465
+ provider=arguments.get("provider", "ollama"),
466
+ )
467
+
468
+ elif name == "start_file_watcher":
469
+ from .tools.semantic_search import start_file_watcher
470
+
471
+ result_content = await start_file_watcher(
472
+ project_path=arguments.get("project_path", "."),
473
+ provider=arguments.get("provider", "ollama"),
474
+ debounce_seconds=arguments.get("debounce_seconds", 2.0),
475
+ )
476
+
477
+ elif name == "stop_file_watcher":
478
+ from .tools.semantic_search import stop_file_watcher
479
+
480
+ result_content = await stop_file_watcher(
481
+ project_path=arguments.get("project_path", "."),
482
+ )
483
+
484
+ elif name == "list_file_watchers":
485
+ from .tools.semantic_search import list_file_watchers
486
+
487
+ result_content = await list_file_watchers()
488
+
489
+ elif name == "multi_query_search":
490
+ from .tools.semantic_search import multi_query_search
491
+
492
+ result_content = await multi_query_search(
493
+ query=arguments["query"],
494
+ project_path=arguments.get("project_path", "."),
495
+ n_results=arguments.get("n_results", 10),
496
+ num_expansions=arguments.get("num_expansions", 3),
497
+ language=arguments.get("language"),
498
+ node_type=arguments.get("node_type"),
499
+ provider=arguments.get("provider", "ollama"),
500
+ )
501
+
502
+ elif name == "decomposed_search":
503
+ from .tools.semantic_search import decomposed_search
504
+
505
+ result_content = await decomposed_search(
506
+ query=arguments["query"],
507
+ project_path=arguments.get("project_path", "."),
508
+ n_results=arguments.get("n_results", 10),
509
+ language=arguments.get("language"),
510
+ node_type=arguments.get("node_type"),
511
+ provider=arguments.get("provider", "ollama"),
512
+ )
513
+
514
+ elif name == "enhanced_search":
515
+ from .tools.semantic_search import enhanced_search
516
+
517
+ result_content = await enhanced_search(
518
+ query=arguments["query"],
519
+ project_path=arguments.get("project_path", "."),
520
+ n_results=arguments.get("n_results", 10),
521
+ mode=arguments.get("mode", "auto"),
522
+ language=arguments.get("language"),
523
+ node_type=arguments.get("node_type"),
524
+ provider=arguments.get("provider", "ollama"),
525
+ )
526
+
388
527
  else:
389
528
  result_content = f"Unknown tool: {name}"
390
529
 
@@ -398,7 +537,10 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
398
537
  processed_text = await hook_manager.execute_post_tool_call(
399
538
  name, arguments, result_content[0].text
400
539
  )
401
- result_content[0].text = processed_text
540
+ # Only update if processed_text is non-empty to avoid empty text blocks
541
+ # (API error: cache_control cannot be set for empty text blocks)
542
+ if processed_text:
543
+ result_content[0].text = processed_text
402
544
  elif isinstance(result_content, str):
403
545
  result_content = await hook_manager.execute_post_tool_call(
404
546
  name, arguments, result_content
@@ -410,8 +552,11 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
410
552
  return [TextContent(type="text", text=str(result_content))]
411
553
 
412
554
  except Exception as e:
413
- logger.error(f"Error calling tool {name}: {e}")
414
- return [TextContent(type="text", text=f"Error: {str(e)}")]
555
+ import traceback
556
+
557
+ tb = traceback.format_exc()
558
+ logger.error(f"Error calling tool {name}: {e}\n{tb}")
559
+ return [TextContent(type="text", text=f"Error: {str(e)}\n\nTraceback:\n{tb}")]
415
560
 
416
561
 
417
562
  @server.list_prompts()
@@ -464,6 +609,15 @@ async def async_main():
464
609
  except Exception as e:
465
610
  logger.error(f"Failed to initialize hooks: {e}")
466
611
 
612
+ # Start background token refresh scheduler
613
+ try:
614
+ from .auth.token_refresh import background_token_refresh
615
+
616
+ asyncio.create_task(background_token_refresh(get_token_store()))
617
+ logger.info("Background token refresh scheduler started")
618
+ except Exception as e:
619
+ logger.warning(f"Failed to start token refresh scheduler: {e}")
620
+
467
621
  try:
468
622
  async with stdio_server() as (read_stream, write_stream):
469
623
  await server.run(
@@ -474,6 +628,12 @@ async def async_main():
474
628
  except Exception as e:
475
629
  logger.critical("Server process crashed in async_main", exc_info=True)
476
630
  sys.exit(1)
631
+ finally:
632
+ logger.info("Initiating shutdown sequence...")
633
+ from .tools.lsp.manager import get_lsp_manager
634
+
635
+ lsp_manager = get_lsp_manager()
636
+ await lsp_manager.shutdown()
477
637
 
478
638
 
479
639
  def main():