stravinsky 0.2.40__py3-none-any.whl → 0.2.52__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of stravinsky might be problematic. Click here for more details.
- mcp_bridge/__init__.py +1 -1
- mcp_bridge/auth/token_refresh.py +130 -0
- mcp_bridge/hooks/__init__.py +18 -1
- mcp_bridge/hooks/manager.py +50 -0
- mcp_bridge/hooks/parallel_enforcer.py +127 -0
- mcp_bridge/hooks/pre_compact.py +224 -0
- mcp_bridge/hooks/preemptive_compaction.py +81 -7
- mcp_bridge/hooks/session_idle.py +116 -0
- mcp_bridge/native_hooks/todo_delegation.py +54 -0
- mcp_bridge/prompts/__init__.py +3 -1
- mcp_bridge/prompts/dewey.py +30 -20
- mcp_bridge/prompts/explore.py +46 -8
- mcp_bridge/prompts/planner.py +222 -0
- mcp_bridge/prompts/stravinsky.py +107 -28
- mcp_bridge/server.py +64 -9
- mcp_bridge/server_tools.py +159 -32
- mcp_bridge/tools/agent_manager.py +173 -85
- mcp_bridge/tools/background_tasks.py +2 -1
- mcp_bridge/tools/model_invoke.py +194 -46
- {stravinsky-0.2.40.dist-info → stravinsky-0.2.52.dist-info}/METADATA +1 -1
- {stravinsky-0.2.40.dist-info → stravinsky-0.2.52.dist-info}/RECORD +23 -17
- {stravinsky-0.2.40.dist-info → stravinsky-0.2.52.dist-info}/WHEEL +0 -0
- {stravinsky-0.2.40.dist-info → stravinsky-0.2.52.dist-info}/entry_points.txt +0 -0
|
@@ -11,6 +11,7 @@ import os
|
|
|
11
11
|
import shutil
|
|
12
12
|
import subprocess
|
|
13
13
|
import signal
|
|
14
|
+
import time
|
|
14
15
|
import uuid
|
|
15
16
|
from dataclasses import asdict, dataclass, field
|
|
16
17
|
from datetime import datetime
|
|
@@ -21,6 +22,38 @@ import logging
|
|
|
21
22
|
|
|
22
23
|
logger = logging.getLogger(__name__)
|
|
23
24
|
|
|
25
|
+
# Model routing configuration
|
|
26
|
+
# Specialized agents call external models via MCP tools:
|
|
27
|
+
# explore/dewey/document_writer/multimodal → invoke_gemini(gemini-3-flash)
|
|
28
|
+
# frontend → invoke_gemini(gemini-3-pro-high)
|
|
29
|
+
# delphi → invoke_openai(gpt-5.2)
|
|
30
|
+
# Non-specialized coding tasks use Claude CLI with --model sonnet
|
|
31
|
+
AGENT_MODEL_ROUTING = {
|
|
32
|
+
# Specialized agents - no CLI model flag, they call invoke_* tools
|
|
33
|
+
"explore": None,
|
|
34
|
+
"dewey": None,
|
|
35
|
+
"document_writer": None,
|
|
36
|
+
"multimodal": None,
|
|
37
|
+
"frontend": None,
|
|
38
|
+
"delphi": None,
|
|
39
|
+
# Planner uses Opus for superior reasoning about dependencies and parallelization
|
|
40
|
+
"planner": "opus",
|
|
41
|
+
# Default for unknown agent types (coding tasks) - use Sonnet 4.5
|
|
42
|
+
"_default": "sonnet",
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
# Cost tier classification (from oh-my-opencode pattern)
|
|
46
|
+
AGENT_COST_TIERS = {
|
|
47
|
+
"explore": "CHEAP", # Uses gemini-3-flash
|
|
48
|
+
"dewey": "CHEAP", # Uses gemini-3-flash
|
|
49
|
+
"document_writer": "CHEAP", # Uses gemini-3-flash
|
|
50
|
+
"multimodal": "CHEAP", # Uses gemini-3-flash
|
|
51
|
+
"frontend": "MEDIUM", # Uses gemini-3-pro-high
|
|
52
|
+
"delphi": "EXPENSIVE", # Uses gpt-5.2 (OpenAI GPT)
|
|
53
|
+
"planner": "EXPENSIVE", # Uses Claude Opus 4.5
|
|
54
|
+
"_default": "EXPENSIVE", # Claude Sonnet 4.5 via CLI
|
|
55
|
+
}
|
|
56
|
+
|
|
24
57
|
|
|
25
58
|
@dataclass
|
|
26
59
|
class AgentTask:
|
|
@@ -86,7 +119,7 @@ class AgentManager:
|
|
|
86
119
|
|
|
87
120
|
# In-memory tracking for running processes
|
|
88
121
|
self._processes: Dict[str, subprocess.Popen] = {}
|
|
89
|
-
self._notification_queue: Dict[str, List[
|
|
122
|
+
self._notification_queue: Dict[str, List[Dict[str, Any]]] = {}
|
|
90
123
|
|
|
91
124
|
def _load_tasks(self) -> Dict[str, Any]:
|
|
92
125
|
"""Load tasks from persistent storage."""
|
|
@@ -155,7 +188,9 @@ class AgentManager:
|
|
|
155
188
|
Returns:
|
|
156
189
|
Task ID for tracking
|
|
157
190
|
"""
|
|
158
|
-
|
|
191
|
+
import uuid as uuid_module # Local import for MCP context
|
|
192
|
+
|
|
193
|
+
task_id = f"agent_{uuid_module.uuid4().hex[:8]}"
|
|
159
194
|
|
|
160
195
|
task = AgentTask(
|
|
161
196
|
id=task_id,
|
|
@@ -220,12 +255,20 @@ class AgentManager:
|
|
|
220
255
|
full_prompt,
|
|
221
256
|
"--output-format",
|
|
222
257
|
"text",
|
|
258
|
+
"--dangerously-skip-permissions", # Critical: bypass permission prompts
|
|
223
259
|
]
|
|
224
260
|
|
|
225
|
-
#
|
|
226
|
-
#
|
|
227
|
-
#
|
|
228
|
-
|
|
261
|
+
# Model routing:
|
|
262
|
+
# - Specialized agents (explore/dewey/etc): None = use CLI default, they call invoke_*
|
|
263
|
+
# - Unknown agent types (coding tasks): Use Sonnet 4.5
|
|
264
|
+
if agent_type in AGENT_MODEL_ROUTING:
|
|
265
|
+
cli_model = AGENT_MODEL_ROUTING[agent_type] # None for specialized
|
|
266
|
+
else:
|
|
267
|
+
cli_model = AGENT_MODEL_ROUTING.get("_default", "sonnet")
|
|
268
|
+
|
|
269
|
+
if cli_model:
|
|
270
|
+
cmd.extend(["--model", cli_model])
|
|
271
|
+
logger.info(f"[AgentManager] Using --model {cli_model} for {agent_type} agent")
|
|
229
272
|
|
|
230
273
|
# Add system prompt file if we have one
|
|
231
274
|
if system_prompt:
|
|
@@ -240,6 +283,7 @@ class AgentManager:
|
|
|
240
283
|
# (Previously used file handle which was closed before process finished)
|
|
241
284
|
process = subprocess.Popen(
|
|
242
285
|
cmd,
|
|
286
|
+
stdin=subprocess.DEVNULL, # Critical: prevent stdin blocking
|
|
243
287
|
stdout=subprocess.PIPE,
|
|
244
288
|
stderr=subprocess.PIPE,
|
|
245
289
|
text=True,
|
|
@@ -416,9 +460,13 @@ class AgentManager:
|
|
|
416
460
|
start = datetime.now()
|
|
417
461
|
while (datetime.now() - start).total_seconds() < timeout:
|
|
418
462
|
task = self.get_task(task_id)
|
|
419
|
-
if task["status"] != "running":
|
|
463
|
+
if not task or task["status"] != "running":
|
|
420
464
|
break
|
|
421
|
-
|
|
465
|
+
time.sleep(0.5)
|
|
466
|
+
|
|
467
|
+
# Refresh task state after potential blocking wait
|
|
468
|
+
if not task:
|
|
469
|
+
return f"Task {task_id} not found."
|
|
422
470
|
|
|
423
471
|
status = task["status"]
|
|
424
472
|
description = task.get("description", "")
|
|
@@ -601,88 +649,128 @@ async def agent_spawn(
|
|
|
601
649
|
manager = get_manager()
|
|
602
650
|
|
|
603
651
|
# Map agent types to system prompts
|
|
652
|
+
# ALL agents use invoke_gemini or invoke_openai - NOT Claude directly
|
|
653
|
+
# explore/dewey/document_writer/multimodal/frontend → gemini-3-flash
|
|
654
|
+
# delphi → openai gpt-5.2
|
|
604
655
|
system_prompts = {
|
|
605
|
-
"explore": "You are a codebase exploration specialist. Find files, patterns, and answer 'where is X?' questions
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
-
|
|
671
|
-
-
|
|
656
|
+
"explore": """You are a codebase exploration specialist. Find files, patterns, and answer 'where is X?' questions.
|
|
657
|
+
|
|
658
|
+
MODEL ROUTING (MANDATORY):
|
|
659
|
+
You MUST use invoke_gemini with model="gemini-3-flash" for ALL analysis and reasoning.
|
|
660
|
+
Use Claude's native tools (Read, Grep, Glob) ONLY for file access, then pass content to invoke_gemini.
|
|
661
|
+
|
|
662
|
+
WORKFLOW:
|
|
663
|
+
1. Use Read/Grep/Glob to get file contents
|
|
664
|
+
2. Call invoke_gemini(prompt="Analyze this: <content>", model="gemini-3-flash", agent_context={"agent_type": "explore"}) for analysis
|
|
665
|
+
3. Return the Gemini response""",
|
|
666
|
+
"dewey": """You are a documentation and research specialist. Find implementation examples and official docs.
|
|
667
|
+
|
|
668
|
+
MODEL ROUTING (MANDATORY):
|
|
669
|
+
You MUST use invoke_gemini with model="gemini-3-flash" for ALL analysis, summarization, and reasoning.
|
|
670
|
+
|
|
671
|
+
WORKFLOW:
|
|
672
|
+
1. Gather information using available tools
|
|
673
|
+
2. Call invoke_gemini(prompt="<task>", model="gemini-3-flash", agent_context={"agent_type": "dewey"}) for processing
|
|
674
|
+
3. Return the Gemini response""",
|
|
675
|
+
"frontend": """You are a Senior Frontend Architect & UI Designer.
|
|
676
|
+
|
|
677
|
+
MODEL ROUTING (MANDATORY):
|
|
678
|
+
You MUST use invoke_gemini with model="gemini-3-pro-high" for ALL code generation and design work.
|
|
679
|
+
|
|
680
|
+
DESIGN PHILOSOPHY:
|
|
681
|
+
- Anti-Generic: Reject standard layouts. Bespoke, asymmetric, distinctive.
|
|
682
|
+
- Library Discipline: Use existing UI libraries (Shadcn, Radix, MUI) if detected.
|
|
683
|
+
- Stack: React/Vue/Svelte, Tailwind/Custom CSS, semantic HTML5.
|
|
684
|
+
|
|
685
|
+
WORKFLOW:
|
|
686
|
+
1. Analyze requirements
|
|
687
|
+
2. Call invoke_gemini(prompt="Generate frontend code for: <task>", model="gemini-3-pro-high", agent_context={"agent_type": "frontend"})
|
|
688
|
+
3. Return the code""",
|
|
689
|
+
"delphi": """You are a strategic technical advisor for architecture and hard debugging.
|
|
690
|
+
|
|
691
|
+
MODEL ROUTING (MANDATORY):
|
|
692
|
+
You MUST use invoke_openai with model="gpt-5.2" for ALL strategic advice and analysis.
|
|
693
|
+
|
|
694
|
+
WORKFLOW:
|
|
695
|
+
1. Gather context about the problem
|
|
696
|
+
2. Call invoke_openai(prompt="<problem description>", model="gpt-5.2", agent_context={"agent_type": "delphi"})
|
|
697
|
+
3. Return the GPT response""",
|
|
698
|
+
"document_writer": """You are a Technical Documentation Specialist.
|
|
699
|
+
|
|
700
|
+
MODEL ROUTING (MANDATORY):
|
|
701
|
+
You MUST use invoke_gemini with model="gemini-3-flash" for ALL documentation generation.
|
|
702
|
+
|
|
703
|
+
DOCUMENT TYPES: README, API docs, ADRs, user guides, inline docs.
|
|
704
|
+
|
|
705
|
+
WORKFLOW:
|
|
706
|
+
1. Gather context about what to document
|
|
707
|
+
2. Call invoke_gemini(prompt="Write documentation for: <topic>", model="gemini-3-flash", agent_context={"agent_type": "document_writer"})
|
|
708
|
+
3. Return the documentation""",
|
|
709
|
+
"multimodal": """You interpret media files (PDFs, images, diagrams, screenshots).
|
|
710
|
+
|
|
711
|
+
MODEL ROUTING (MANDATORY):
|
|
712
|
+
You MUST use invoke_gemini with model="gemini-3-flash" for ALL visual analysis.
|
|
713
|
+
|
|
714
|
+
WORKFLOW:
|
|
715
|
+
1. Receive file path and extraction goal
|
|
716
|
+
2. Call invoke_gemini(prompt="Analyze this file: <path>. Extract: <goal>", model="gemini-3-flash", agent_context={"agent_type": "multimodal"})
|
|
717
|
+
3. Return extracted information only""",
|
|
718
|
+
"planner": """You are a pre-implementation planning specialist. You analyze requests and produce structured implementation plans BEFORE any code changes begin.
|
|
719
|
+
|
|
720
|
+
PURPOSE:
|
|
721
|
+
- Analyze requests and produce actionable implementation plans
|
|
722
|
+
- Identify dependencies and parallelization opportunities
|
|
723
|
+
- Enable efficient parallel execution by the orchestrator
|
|
724
|
+
- Prevent wasted effort through upfront planning
|
|
725
|
+
|
|
726
|
+
METHODOLOGY:
|
|
727
|
+
1. EXPLORE FIRST: Spawn explore agents IN PARALLEL to understand the codebase
|
|
728
|
+
2. DECOMPOSE: Break request into atomic, single-purpose tasks
|
|
729
|
+
3. ANALYZE DEPENDENCIES: What blocks what? What can run in parallel?
|
|
730
|
+
4. ASSIGN AGENTS: Map each task to the right specialist (explore/dewey/frontend/delphi)
|
|
731
|
+
5. OUTPUT STRUCTURED PLAN: Use the required format below
|
|
732
|
+
|
|
733
|
+
REQUIRED OUTPUT FORMAT:
|
|
734
|
+
```
|
|
735
|
+
## PLAN: [Brief title]
|
|
736
|
+
|
|
737
|
+
### ANALYSIS
|
|
738
|
+
- **Request**: [One sentence summary]
|
|
739
|
+
- **Scope**: [What's in/out of scope]
|
|
740
|
+
- **Risk Level**: [Low/Medium/High]
|
|
741
|
+
|
|
742
|
+
### EXECUTION PHASES
|
|
743
|
+
|
|
744
|
+
#### Phase 1: [Name] (PARALLEL)
|
|
745
|
+
| Task | Agent | Files | Est |
|
|
746
|
+
|------|-------|-------|-----|
|
|
747
|
+
| [description] | explore | file.py | S/M/L |
|
|
748
|
+
|
|
749
|
+
#### Phase 2: [Name] (SEQUENTIAL after Phase 1)
|
|
750
|
+
| Task | Agent | Files | Est |
|
|
751
|
+
|------|-------|-------|-----|
|
|
752
|
+
|
|
753
|
+
### AGENT SPAWN COMMANDS
|
|
754
|
+
```python
|
|
755
|
+
# Phase 1 - Fire all in parallel
|
|
756
|
+
agent_spawn(prompt="...", agent_type="explore", description="...")
|
|
757
|
+
```
|
|
758
|
+
```
|
|
759
|
+
|
|
760
|
+
CONSTRAINTS:
|
|
761
|
+
- You ONLY plan. You NEVER execute code changes.
|
|
762
|
+
- Every task must have a clear agent assignment
|
|
763
|
+
- Parallel phases must be truly independent
|
|
764
|
+
- Include ready-to-use agent_spawn commands""",
|
|
672
765
|
}
|
|
673
766
|
|
|
674
767
|
system_prompt = system_prompts.get(agent_type, None)
|
|
675
768
|
|
|
676
|
-
#
|
|
677
|
-
#
|
|
678
|
-
#
|
|
679
|
-
#
|
|
680
|
-
#
|
|
681
|
-
# Agent model preferences (for reference - NOT passed to Claude CLI):
|
|
682
|
-
# - stravinsky: Claude Opus 4.5 (orchestration)
|
|
683
|
-
# - delphi: GPT-5.2 (strategic advice) - use invoke_openai
|
|
684
|
-
# - frontend: Gemini Pro High (UI/UX) - use invoke_gemini with thinking_budget
|
|
685
|
-
# - explore, dewey, document_writer, multimodal: Gemini Flash (fast) - use invoke_gemini
|
|
769
|
+
# Model routing (MANDATORY - enforced in system prompts):
|
|
770
|
+
# - explore, dewey, document_writer, multimodal → invoke_gemini(gemini-3-flash)
|
|
771
|
+
# - frontend → invoke_gemini(gemini-3-pro-high)
|
|
772
|
+
# - delphi → invoke_openai(gpt-5.2)
|
|
773
|
+
# - Unknown agent types (coding tasks) → Claude CLI --model sonnet
|
|
686
774
|
|
|
687
775
|
# Get token store for authentication
|
|
688
776
|
from ..auth.token_store import TokenStore
|
|
@@ -61,7 +61,8 @@ class BackgroundManager:
|
|
|
61
61
|
json.dump(tasks, f, indent=2)
|
|
62
62
|
|
|
63
63
|
def create_task(self, prompt: str, model: str) -> str:
|
|
64
|
-
|
|
64
|
+
import uuid as uuid_module # Local import for MCP context
|
|
65
|
+
task_id = str(uuid_module.uuid4())[:8]
|
|
65
66
|
task = BackgroundTask(
|
|
66
67
|
id=task_id,
|
|
67
68
|
prompt=prompt,
|