stravinsky 0.2.38__py3-none-any.whl → 0.2.52__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of stravinsky might be problematic. Click here for more details.

mcp_bridge/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.2.32"
1
+ __version__ = "0.2.52"
@@ -0,0 +1,130 @@
1
+ """
2
+ Background Token Refresh Scheduler
3
+
4
+ Proactively refreshes OAuth tokens before they expire:
5
+ - Gemini: Refreshes when 30 minutes remaining (tokens expire in ~1 hour)
6
+ - OpenAI: Refreshes when 12 hours remaining (tokens expire in ~24 hours)
7
+ """
8
+
9
+ import asyncio
10
+ import logging
11
+ import time
12
+ from typing import TYPE_CHECKING
13
+
14
+ from .oauth import refresh_access_token as gemini_refresh
15
+ from .openai_oauth import refresh_access_token as openai_refresh
16
+
17
+ if TYPE_CHECKING:
18
+ from .token_store import TokenStore
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+ # Refresh configuration per provider
23
+ REFRESH_CONFIG = {
24
+ "gemini": {
25
+ "interval": 3600, # Token expires in ~1 hour
26
+ "buffer": 1800, # Refresh with 30 min remaining
27
+ },
28
+ "openai": {
29
+ "interval": 86400, # Token expires in ~24 hours
30
+ "buffer": 43200, # Refresh with 12 hours remaining
31
+ },
32
+ }
33
+
34
+
35
+ async def background_token_refresh(token_store: "TokenStore") -> None:
36
+ """
37
+ Background task that proactively refreshes tokens before expiry.
38
+
39
+ Runs in an infinite loop, checking every 5 minutes if any tokens
40
+ need refreshing. This prevents tokens from expiring during long
41
+ sessions.
42
+
43
+ Args:
44
+ token_store: The token store to manage.
45
+ """
46
+ logger.info("Starting background token refresh scheduler")
47
+
48
+ while True:
49
+ for provider, config in REFRESH_CONFIG.items():
50
+ await _refresh_if_needed(token_store, provider, config["buffer"])
51
+
52
+ # Check every 5 minutes
53
+ await asyncio.sleep(300)
54
+
55
+
56
+ async def _refresh_if_needed(
57
+ token_store: "TokenStore",
58
+ provider: str,
59
+ buffer_seconds: int,
60
+ ) -> None:
61
+ """
62
+ Refresh a provider's token if it's close to expiring.
63
+
64
+ Args:
65
+ token_store: Token store instance
66
+ provider: Provider name (gemini, openai)
67
+ buffer_seconds: Refresh when this many seconds remain
68
+ """
69
+ try:
70
+ # Check if token needs refresh
71
+ if not token_store.needs_refresh(provider, buffer_seconds=buffer_seconds):
72
+ return
73
+
74
+ token = token_store.get_token(provider)
75
+ if not token or not token.get("refresh_token"):
76
+ return # No token or no refresh token
77
+
78
+ # Get the appropriate refresh function
79
+ refresh_fn = gemini_refresh if provider == "gemini" else openai_refresh
80
+
81
+ # Perform refresh
82
+ result = refresh_fn(token["refresh_token"])
83
+
84
+ # Update stored token
85
+ token_store.update_access_token(
86
+ provider,
87
+ result.access_token,
88
+ result.expires_in,
89
+ )
90
+
91
+ logger.info(
92
+ f"Proactively refreshed {provider} token "
93
+ f"(expires in {result.expires_in}s)"
94
+ )
95
+
96
+ except Exception as e:
97
+ logger.warning(f"Failed to refresh {provider} token: {e}")
98
+
99
+
100
+ def get_token_status(token_store: "TokenStore") -> dict[str, dict]:
101
+ """
102
+ Get status of all provider tokens.
103
+
104
+ Returns:
105
+ Dict mapping provider to status info.
106
+ """
107
+ status = {}
108
+
109
+ for provider in REFRESH_CONFIG:
110
+ token = token_store.get_token(provider)
111
+
112
+ if not token:
113
+ status[provider] = {"status": "not_authenticated"}
114
+ continue
115
+
116
+ expires_at = token.get("expires_at", 0)
117
+ if expires_at <= 0:
118
+ status[provider] = {"status": "authenticated", "expires": "unknown"}
119
+ else:
120
+ remaining = expires_at - time.time()
121
+ if remaining <= 0:
122
+ status[provider] = {"status": "expired"}
123
+ else:
124
+ status[provider] = {
125
+ "status": "valid",
126
+ "expires_in_seconds": int(remaining),
127
+ "expires_in_minutes": int(remaining / 60),
128
+ }
129
+
130
+ return status
@@ -1,6 +1,13 @@
1
1
  """
2
2
  Hooks initialization.
3
- Registers all Tier 1-4 hooks into the HookManager.
3
+ Registers all Tier 1-5 hooks into the HookManager.
4
+
5
+ Hook Tiers:
6
+ - Tier 1: Post-tool-call (immediate response modification)
7
+ - Tier 2: Pre-model-invoke (context management)
8
+ - Tier 3: Pre-model-invoke (performance optimization)
9
+ - Tier 4: Pre-model-invoke (behavior enforcement)
10
+ - Tier 5: Session lifecycle (idle detection, compaction)
4
11
  """
5
12
 
6
13
  from .manager import get_hook_manager
@@ -19,6 +26,11 @@ from .auto_slash_command import auto_slash_command_hook
19
26
  from .session_recovery import session_recovery_hook
20
27
  from .empty_message_sanitizer import empty_message_sanitizer_hook
21
28
 
29
+ # New hooks based on oh-my-opencode patterns
30
+ from .session_idle import session_idle_hook
31
+ from .pre_compact import pre_compact_hook
32
+ from .parallel_enforcer import parallel_enforcer_post_tool_hook
33
+
22
34
 
23
35
  def initialize_hooks():
24
36
  """Register all available hooks."""
@@ -30,6 +42,7 @@ def initialize_hooks():
30
42
  manager.register_post_tool_call(comment_checker_hook)
31
43
  manager.register_post_tool_call(agent_reminder_hook)
32
44
  manager.register_post_tool_call(session_recovery_hook)
45
+ manager.register_post_tool_call(parallel_enforcer_post_tool_hook) # NEW: Enforce parallel spawning
33
46
 
34
47
  # Tier 2: Pre-model-invoke (context management)
35
48
  manager.register_pre_model_invoke(directory_context_hook)
@@ -46,4 +59,8 @@ def initialize_hooks():
46
59
  manager.register_pre_model_invoke(todo_continuation_hook)
47
60
  manager.register_pre_model_invoke(auto_slash_command_hook)
48
61
 
62
+ # Tier 5: Session lifecycle hooks (NEW)
63
+ manager.register_session_idle(session_idle_hook) # Stop hook - idle detection
64
+ manager.register_pre_compact(pre_compact_hook) # PreCompact - context preservation
65
+
49
66
  return manager
@@ -12,6 +12,13 @@ logger = logging.getLogger(__name__)
12
12
  class HookManager:
13
13
  """
14
14
  Manages the registration and execution of hooks.
15
+
16
+ Hook Types:
17
+ - pre_tool_call: Before tool execution (can modify args or block)
18
+ - post_tool_call: After tool execution (can modify output)
19
+ - pre_model_invoke: Before model invocation (can modify prompt/params)
20
+ - session_idle: When session becomes idle (can inject continuation)
21
+ - pre_compact: Before context compaction (can preserve critical context)
15
22
  """
16
23
 
17
24
  _instance = None
@@ -26,6 +33,13 @@ class HookManager:
26
33
  self.pre_model_invoke_hooks: List[
27
34
  Callable[[Dict[str, Any]], Awaitable[Optional[Dict[str, Any]]]]
28
35
  ] = []
36
+ # New hook types based on oh-my-opencode patterns
37
+ self.session_idle_hooks: List[
38
+ Callable[[Dict[str, Any]], Awaitable[Optional[Dict[str, Any]]]]
39
+ ] = []
40
+ self.pre_compact_hooks: List[
41
+ Callable[[Dict[str, Any]], Awaitable[Optional[Dict[str, Any]]]]
42
+ ] = []
29
43
 
30
44
  @classmethod
31
45
  def get_instance(cls):
@@ -51,6 +65,18 @@ class HookManager:
51
65
  """Run before model invocation. Can modify prompt or parameters."""
52
66
  self.pre_model_invoke_hooks.append(hook)
53
67
 
68
+ def register_session_idle(
69
+ self, hook: Callable[[Dict[str, Any]], Awaitable[Optional[Dict[str, Any]]]]
70
+ ):
71
+ """Run when session becomes idle. Can inject continuation prompts."""
72
+ self.session_idle_hooks.append(hook)
73
+
74
+ def register_pre_compact(
75
+ self, hook: Callable[[Dict[str, Any]], Awaitable[Optional[Dict[str, Any]]]]
76
+ ):
77
+ """Run before context compaction. Can preserve critical context."""
78
+ self.pre_compact_hooks.append(hook)
79
+
54
80
  async def execute_pre_tool_call(
55
81
  self, tool_name: str, arguments: Dict[str, Any]
56
82
  ) -> Dict[str, Any]:
@@ -91,6 +117,30 @@ class HookManager:
91
117
  logger.error(f"[HookManager] Error in pre_model_invoke hook {hook.__name__}: {e}")
92
118
  return current_params
93
119
 
120
+ async def execute_session_idle(self, params: Dict[str, Any]) -> Dict[str, Any]:
121
+ """Executes all session idle hooks (Stop hook pattern)."""
122
+ current_params = params
123
+ for hook in self.session_idle_hooks:
124
+ try:
125
+ modified_params = await hook(current_params)
126
+ if modified_params is not None:
127
+ current_params = modified_params
128
+ except Exception as e:
129
+ logger.error(f"[HookManager] Error in session_idle hook {hook.__name__}: {e}")
130
+ return current_params
131
+
132
+ async def execute_pre_compact(self, params: Dict[str, Any]) -> Dict[str, Any]:
133
+ """Executes all pre-compact hooks (context preservation)."""
134
+ current_params = params
135
+ for hook in self.pre_compact_hooks:
136
+ try:
137
+ modified_params = await hook(current_params)
138
+ if modified_params is not None:
139
+ current_params = modified_params
140
+ except Exception as e:
141
+ logger.error(f"[HookManager] Error in pre_compact hook {hook.__name__}: {e}")
142
+ return current_params
143
+
94
144
 
95
145
  def get_hook_manager() -> HookManager:
96
146
  return HookManager.get_instance()
@@ -0,0 +1,127 @@
1
+ """
2
+ Parallel Enforcer Hook - Enforce Parallel Agent Spawning.
3
+
4
+ Detects when 2+ independent tasks exist and injects reminders
5
+ to spawn agents in parallel rather than working sequentially.
6
+
7
+ Based on oh-my-opencode's parallel execution enforcement pattern.
8
+ """
9
+
10
+ import logging
11
+ import re
12
+ from typing import Any, Dict, Optional
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ # Parallel enforcement prompt
17
+ PARALLEL_ENFORCEMENT_PROMPT = """
18
+ [PARALLEL EXECUTION REQUIRED]
19
+
20
+ You have {count} independent pending tasks. You MUST spawn agents for ALL of them simultaneously.
21
+
22
+ CORRECT (Parallel - DO THIS):
23
+ ```
24
+ agent_spawn(prompt="Task 1...", agent_type="explore", description="Task 1")
25
+ agent_spawn(prompt="Task 2...", agent_type="explore", description="Task 2")
26
+ agent_spawn(prompt="Task 3...", agent_type="dewey", description="Task 3")
27
+ // All spawned in ONE response, then wait for results
28
+ ```
29
+
30
+ WRONG (Sequential - DO NOT DO THIS):
31
+ ```
32
+ Mark task 1 in_progress -> work on it -> complete
33
+ Mark task 2 in_progress -> work on it -> complete // TOO SLOW!
34
+ ```
35
+
36
+ RULES:
37
+ 1. Spawn ALL independent tasks simultaneously using agent_spawn
38
+ 2. Do NOT mark any task as in_progress until agents are spawned
39
+ 3. Collect results with agent_output AFTER spawning
40
+ 4. Only work sequentially when tasks have dependencies
41
+ """
42
+
43
+ # Track if enforcement was already triggered this session
44
+ _enforcement_triggered: Dict[str, bool] = {}
45
+
46
+
47
+ async def parallel_enforcer_hook(params: Dict[str, Any]) -> Optional[Dict[str, Any]]:
48
+ """
49
+ Post-tool-call hook that triggers after TodoWrite.
50
+
51
+ When 2+ pending todos are detected, injects parallel execution
52
+ enforcement prompt to prevent sequential work patterns.
53
+ """
54
+ tool_name = params.get("tool_name", "")
55
+ output = params.get("output", "")
56
+ session_id = params.get("session_id", "default")
57
+
58
+ # Only trigger for TodoWrite calls
59
+ if tool_name.lower() not in ["todowrite", "todo_write"]:
60
+ return None
61
+
62
+ # Count pending todos
63
+ pending_count = _count_pending_todos(output)
64
+
65
+ if pending_count < 2:
66
+ return None
67
+
68
+ # Check if already triggered recently
69
+ if _enforcement_triggered.get(session_id, False):
70
+ return None
71
+
72
+ # Mark as triggered
73
+ _enforcement_triggered[session_id] = True
74
+
75
+ logger.info(f"[ParallelEnforcerHook] Detected {pending_count} pending todos, enforcing parallel execution")
76
+
77
+ # Inject enforcement prompt
78
+ enforcement = PARALLEL_ENFORCEMENT_PROMPT.format(count=pending_count)
79
+ modified_output = output + "\n\n" + enforcement
80
+
81
+ return modified_output
82
+
83
+
84
+ def _count_pending_todos(output: str) -> int:
85
+ """Count the number of pending todos in TodoWrite output."""
86
+ # Pattern matches various pending todo formats
87
+ patterns = [
88
+ r'\[pending\]',
89
+ r'"status":\s*"pending"',
90
+ r"status:\s*pending",
91
+ r"'status':\s*'pending'",
92
+ ]
93
+
94
+ total = 0
95
+ for pattern in patterns:
96
+ matches = re.findall(pattern, output, re.IGNORECASE)
97
+ total += len(matches)
98
+
99
+ return total
100
+
101
+
102
+ def reset_enforcement(session_id: str = "default"):
103
+ """Reset enforcement state for a session."""
104
+ _enforcement_triggered[session_id] = False
105
+
106
+
107
+ async def parallel_enforcer_post_tool_hook(
108
+ tool_name: str,
109
+ arguments: Dict[str, Any],
110
+ output: str
111
+ ) -> Optional[str]:
112
+ """
113
+ Post-tool-call hook interface for HookManager.
114
+
115
+ Wraps parallel_enforcer_hook for the standard hook signature.
116
+ """
117
+ params = {
118
+ "tool_name": tool_name,
119
+ "arguments": arguments,
120
+ "output": output,
121
+ }
122
+
123
+ result = await parallel_enforcer_hook(params)
124
+
125
+ if isinstance(result, str):
126
+ return result
127
+ return None
@@ -0,0 +1,224 @@
1
+ """
2
+ PreCompact Hook - Context Preservation Before Compaction.
3
+
4
+ Triggers before session compaction to preserve critical context
5
+ that should survive summarization. Uses Gemini for intelligent
6
+ context extraction and preservation.
7
+
8
+ Based on oh-my-opencode's pre-compact hook pattern.
9
+ """
10
+
11
+ import logging
12
+ from typing import Any, Dict, List, Optional
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ # Flag to prevent recursive calls during Gemini invocation
17
+ _in_preservation = False
18
+
19
+ # Critical context patterns to preserve
20
+ PRESERVE_PATTERNS = [
21
+ # Architecture decisions
22
+ "ARCHITECTURE:",
23
+ "DESIGN DECISION:",
24
+ "## Architecture",
25
+
26
+ # Important constraints
27
+ "CONSTRAINT:",
28
+ "REQUIREMENT:",
29
+ "MUST NOT:",
30
+ "NEVER:",
31
+
32
+ # Session state
33
+ "CURRENT TASK:",
34
+ "BLOCKED BY:",
35
+ "WAITING FOR:",
36
+
37
+ # Critical errors
38
+ "CRITICAL ERROR:",
39
+ "SECURITY ISSUE:",
40
+ "BREAKING CHANGE:",
41
+ ]
42
+
43
+ # Memory anchors to inject into compaction
44
+ MEMORY_ANCHORS: List[str] = []
45
+
46
+
47
+ def register_memory_anchor(anchor: str, priority: str = "normal"):
48
+ """
49
+ Register a memory anchor to preserve during compaction.
50
+
51
+ Args:
52
+ anchor: The text to preserve
53
+ priority: "critical" or "normal"
54
+ """
55
+ if priority == "critical":
56
+ MEMORY_ANCHORS.insert(0, f"[CRITICAL] {anchor}")
57
+ else:
58
+ MEMORY_ANCHORS.append(anchor)
59
+
60
+ # Limit to 10 anchors to prevent bloat
61
+ while len(MEMORY_ANCHORS) > 10:
62
+ MEMORY_ANCHORS.pop()
63
+
64
+
65
+ def clear_memory_anchors():
66
+ """Clear all registered memory anchors."""
67
+ MEMORY_ANCHORS.clear()
68
+
69
+
70
+ async def pre_compact_hook(params: Dict[str, Any]) -> Optional[Dict[str, Any]]:
71
+ """
72
+ Pre-model-invoke hook that runs before context compaction.
73
+
74
+ Uses Gemini to intelligently extract and preserve critical context
75
+ that should survive summarization.
76
+ """
77
+ global _in_preservation
78
+
79
+ # Prevent recursive calls
80
+ if _in_preservation:
81
+ return None
82
+
83
+ prompt = params.get("prompt", "")
84
+
85
+ # Only activate for compaction-related prompts
86
+ if not _is_compaction_prompt(prompt):
87
+ return None
88
+
89
+ # Collect pattern-matched context
90
+ preserved_context = _extract_preserved_context(prompt)
91
+ preserved_context.extend(MEMORY_ANCHORS)
92
+
93
+ # Use Gemini for intelligent context extraction if prompt is long
94
+ if len(prompt) > 50000:
95
+ try:
96
+ _in_preservation = True
97
+ gemini_context = await _extract_context_with_gemini(prompt)
98
+ if gemini_context:
99
+ preserved_context.extend(gemini_context)
100
+ except Exception as e:
101
+ logger.warning(f"[PreCompactHook] Gemini extraction failed: {e}")
102
+ finally:
103
+ _in_preservation = False
104
+
105
+ if not preserved_context:
106
+ return None
107
+
108
+ # Build preservation section
109
+ preservation_section = _build_preservation_section(preserved_context)
110
+
111
+ logger.info(f"[PreCompactHook] Preserving {len(preserved_context)} context items")
112
+
113
+ # Inject into prompt
114
+ modified_prompt = prompt + "\n\n" + preservation_section
115
+
116
+ return {**params, "prompt": modified_prompt}
117
+
118
+
119
+ async def _extract_context_with_gemini(prompt: str) -> List[str]:
120
+ """
121
+ Use Gemini to intelligently extract critical context to preserve.
122
+
123
+ Args:
124
+ prompt: The full conversation/context to analyze
125
+
126
+ Returns:
127
+ List of critical context items to preserve
128
+ """
129
+ try:
130
+ from ..tools.model_invoke import invoke_gemini_impl
131
+
132
+ # Truncate prompt if too long for Gemini
133
+ max_chars = 100000
134
+ truncated = prompt[:max_chars] if len(prompt) > max_chars else prompt
135
+
136
+ extraction_prompt = f"""Analyze this conversation and extract ONLY the most critical information that MUST be preserved during summarization.
137
+
138
+ Focus on:
139
+ 1. Architecture decisions and their rationale
140
+ 2. Critical constraints or requirements
141
+ 3. Important error patterns or debugging insights
142
+ 4. Key file paths and their purposes
143
+ 5. Unfinished tasks or blocking issues
144
+
145
+ Return a bullet list of critical items (max 10). Be extremely concise.
146
+
147
+ CONVERSATION:
148
+ {truncated}
149
+
150
+ CRITICAL ITEMS TO PRESERVE:"""
151
+
152
+ result = await invoke_gemini_impl(
153
+ prompt=extraction_prompt,
154
+ model="gemini-3-flash",
155
+ max_tokens=2000,
156
+ temperature=0.1,
157
+ )
158
+
159
+ if not result:
160
+ return []
161
+
162
+ # Parse bullet points from response
163
+ lines = result.strip().split("\n")
164
+ items = []
165
+ for line in lines:
166
+ line = line.strip()
167
+ if line.startswith(("-", "*", "•")) or (len(line) > 1 and line[0].isdigit() and line[1] in ".):"):
168
+ # Clean up the bullet
169
+ item = line.lstrip("-*•0123456789.): ").strip()
170
+ if item and len(item) > 10:
171
+ items.append(item)
172
+
173
+ return items[:10] # Max 10 items
174
+
175
+ except Exception as e:
176
+ logger.warning(f"[PreCompactHook] Gemini context extraction error: {e}")
177
+ return []
178
+
179
+
180
+ def _is_compaction_prompt(prompt: str) -> bool:
181
+ """Detect if this is a compaction/summarization prompt."""
182
+ compaction_signals = [
183
+ "summarize the conversation",
184
+ "compact the context",
185
+ "reduce context size",
186
+ "context window",
187
+ "summarization",
188
+ ]
189
+
190
+ prompt_lower = prompt.lower()
191
+ return any(signal in prompt_lower for signal in compaction_signals)
192
+
193
+
194
+ def _extract_preserved_context(prompt: str) -> List[str]:
195
+ """Extract context matching preservation patterns."""
196
+ preserved = []
197
+ lines = prompt.split("\n")
198
+
199
+ for i, line in enumerate(lines):
200
+ for pattern in PRESERVE_PATTERNS:
201
+ if pattern in line:
202
+ # Capture the line and next 2 lines for context
203
+ context_lines = lines[i:i+3]
204
+ preserved.append("\n".join(context_lines))
205
+ break
206
+
207
+ return preserved
208
+
209
+
210
+ def _build_preservation_section(context_items: List[str]) -> str:
211
+ """Build the preservation section to inject."""
212
+ section = """
213
+ ## CRITICAL CONTEXT TO PRESERVE
214
+
215
+ The following information MUST be preserved in any summarization:
216
+
217
+ """
218
+ for i, item in enumerate(context_items, 1):
219
+ section += f"{i}. {item}\n\n"
220
+
221
+ section += """
222
+ When summarizing, ensure these items are included verbatim or with minimal paraphrasing.
223
+ """
224
+ return section