stravinsky 0.2.67__py3-none-any.whl → 0.4.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of stravinsky might be problematic. Click here for more details.

@@ -0,0 +1,222 @@
1
+ """
2
+ Rate Limiting Configuration for Stravinsky Agent Manager.
3
+
4
+ Provides per-model concurrency limits to prevent API overload.
5
+ Implements semaphore-based rate limiting with configurable limits
6
+ per model family.
7
+
8
+ Configuration file: ~/.stravinsky/config.json
9
+ {
10
+ "rate_limits": {
11
+ "claude-opus-4": 2,
12
+ "claude-sonnet-4.5": 5,
13
+ "gemini-3-flash": 10,
14
+ "gemini-3-pro-high": 5,
15
+ "gpt-5.2": 3
16
+ }
17
+ }
18
+ """
19
+
20
+ import asyncio
21
+ import json
22
+ import threading
23
+ from pathlib import Path
24
+ from typing import Dict, Optional
25
+ from collections import defaultdict
26
+ from datetime import datetime
27
+ import logging
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+ # Default rate limits per model (conservative defaults)
32
+ DEFAULT_RATE_LIMITS = {
33
+ # Claude models via CLI
34
+ "opus": 2, # Expensive, limit parallel calls
35
+ "sonnet": 5, # Moderate cost
36
+ "haiku": 10, # Cheap, allow more
37
+ # Gemini models via MCP
38
+ "gemini-3-flash": 10, # Free/cheap, allow many
39
+ "gemini-3-pro-high": 5, # Medium cost
40
+ # OpenAI models via MCP
41
+ "gpt-5.2": 3, # Expensive
42
+ # Default for unknown models
43
+ "_default": 5,
44
+ }
45
+
46
+ # Config file location
47
+ CONFIG_FILE = Path.home() / ".stravinsky" / "config.json"
48
+
49
+
50
+ class RateLimiter:
51
+ """
52
+ Semaphore-based rate limiter for model concurrency.
53
+
54
+ Thread-safe implementation that limits concurrent requests
55
+ per model family to prevent API overload.
56
+ """
57
+
58
+ def __init__(self):
59
+ self._semaphores: Dict[str, threading.Semaphore] = {}
60
+ self._lock = threading.Lock()
61
+ self._limits = self._load_limits()
62
+ self._active_counts: Dict[str, int] = defaultdict(int)
63
+ self._queue_counts: Dict[str, int] = defaultdict(int)
64
+
65
+ def _load_limits(self) -> Dict[str, int]:
66
+ """Load rate limits from config file or use defaults."""
67
+ limits = DEFAULT_RATE_LIMITS.copy()
68
+
69
+ if CONFIG_FILE.exists():
70
+ try:
71
+ with open(CONFIG_FILE) as f:
72
+ config = json.load(f)
73
+ if "rate_limits" in config:
74
+ limits.update(config["rate_limits"])
75
+ logger.info(f"[RateLimiter] Loaded custom limits from {CONFIG_FILE}")
76
+ except (json.JSONDecodeError, IOError) as e:
77
+ logger.warning(f"[RateLimiter] Failed to load config: {e}")
78
+
79
+ return limits
80
+
81
+ def _get_semaphore(self, model: str) -> threading.Semaphore:
82
+ """Get or create a semaphore for a model."""
83
+ with self._lock:
84
+ if model not in self._semaphores:
85
+ limit = self._limits.get(model, self._limits.get("_default", 5))
86
+ self._semaphores[model] = threading.Semaphore(limit)
87
+ logger.debug(f"[RateLimiter] Created semaphore for {model} with limit {limit}")
88
+ return self._semaphores[model]
89
+
90
+ def _normalize_model(self, model: str) -> str:
91
+ """Normalize model name to match config keys."""
92
+ model_lower = model.lower()
93
+
94
+ # Match known patterns
95
+ if "opus" in model_lower:
96
+ return "opus"
97
+ elif "sonnet" in model_lower:
98
+ return "sonnet"
99
+ elif "haiku" in model_lower:
100
+ return "haiku"
101
+ elif "gemini" in model_lower and "flash" in model_lower:
102
+ return "gemini-3-flash"
103
+ elif "gemini" in model_lower and ("pro" in model_lower or "high" in model_lower):
104
+ return "gemini-3-pro-high"
105
+ elif "gpt" in model_lower:
106
+ return "gpt-5.2"
107
+
108
+ return model_lower
109
+
110
+ def acquire(self, model: str, timeout: float = 60.0) -> bool:
111
+ """
112
+ Acquire a slot for the given model.
113
+
114
+ Args:
115
+ model: Model name to acquire slot for
116
+ timeout: Maximum time to wait in seconds
117
+
118
+ Returns:
119
+ True if slot acquired, False if timed out
120
+ """
121
+ normalized = self._normalize_model(model)
122
+ semaphore = self._get_semaphore(normalized)
123
+
124
+ with self._lock:
125
+ self._queue_counts[normalized] += 1
126
+
127
+ logger.debug(f"[RateLimiter] Acquiring slot for {normalized}")
128
+ acquired = semaphore.acquire(blocking=True, timeout=timeout)
129
+
130
+ with self._lock:
131
+ self._queue_counts[normalized] -= 1
132
+ if acquired:
133
+ self._active_counts[normalized] += 1
134
+
135
+ if acquired:
136
+ logger.debug(f"[RateLimiter] Acquired slot for {normalized}")
137
+ else:
138
+ logger.warning(f"[RateLimiter] Timeout waiting for slot for {normalized}")
139
+
140
+ return acquired
141
+
142
+ def release(self, model: str):
143
+ """Release a slot for the given model."""
144
+ normalized = self._normalize_model(model)
145
+ semaphore = self._get_semaphore(normalized)
146
+
147
+ with self._lock:
148
+ self._active_counts[normalized] = max(0, self._active_counts[normalized] - 1)
149
+
150
+ semaphore.release()
151
+ logger.debug(f"[RateLimiter] Released slot for {normalized}")
152
+
153
+ def get_status(self) -> Dict[str, Dict[str, int]]:
154
+ """Get current rate limiter status."""
155
+ with self._lock:
156
+ return {
157
+ model: {
158
+ "limit": self._limits.get(model, self._limits.get("_default", 5)),
159
+ "active": self._active_counts[model],
160
+ "queued": self._queue_counts[model],
161
+ }
162
+ for model in set(list(self._active_counts.keys()) + list(self._queue_counts.keys()))
163
+ }
164
+
165
+ def update_limits(self, new_limits: Dict[str, int]):
166
+ """
167
+ Update rate limits dynamically.
168
+
169
+ Note: This only affects new semaphores. Existing ones
170
+ will continue with their original limits until recreated.
171
+ """
172
+ with self._lock:
173
+ self._limits.update(new_limits)
174
+ logger.info(f"[RateLimiter] Updated limits: {new_limits}")
175
+
176
+
177
+ class RateLimitContext:
178
+ """Context manager for rate-limited model access."""
179
+
180
+ def __init__(self, limiter: RateLimiter, model: str, timeout: float = 60.0):
181
+ self.limiter = limiter
182
+ self.model = model
183
+ self.timeout = timeout
184
+ self.acquired = False
185
+
186
+ def __enter__(self):
187
+ self.acquired = self.limiter.acquire(self.model, self.timeout)
188
+ if not self.acquired:
189
+ raise TimeoutError(f"Rate limit timeout for model {self.model}")
190
+ return self
191
+
192
+ def __exit__(self, exc_type, exc_val, exc_tb):
193
+ if self.acquired:
194
+ self.limiter.release(self.model)
195
+ return False
196
+
197
+
198
+ # Global rate limiter instance
199
+ _rate_limiter: Optional[RateLimiter] = None
200
+ _rate_limiter_lock = threading.Lock()
201
+
202
+
203
+ def get_rate_limiter() -> RateLimiter:
204
+ """Get or create the global RateLimiter instance."""
205
+ global _rate_limiter
206
+ if _rate_limiter is None:
207
+ with _rate_limiter_lock:
208
+ if _rate_limiter is None:
209
+ _rate_limiter = RateLimiter()
210
+ return _rate_limiter
211
+
212
+
213
+ def rate_limited(model: str, timeout: float = 60.0) -> RateLimitContext:
214
+ """
215
+ Get a rate-limited context for a model.
216
+
217
+ Usage:
218
+ with rate_limited("gemini-3-flash") as ctx:
219
+ # Make API call
220
+ pass
221
+ """
222
+ return RateLimitContext(get_rate_limiter(), model, timeout)
@@ -0,0 +1,128 @@
1
+ {
2
+ "schema_version": "1.0.0",
3
+ "manifest_version": "0.3.9",
4
+ "description": "Stravinsky skills for Claude Code command integration",
5
+ "generated_date": "2026-01-08T23:53:08.837988Z",
6
+ "skills": {
7
+ "commit.md": {
8
+ "file_path": "commit.md",
9
+ "description": "Git Master - Intelligent atomic commit orchestration",
10
+ "checksum": "f09aee4cc46e",
11
+ "lines_of_code": 531,
12
+ "updatable": true,
13
+ "priority": "medium"
14
+ },
15
+ "delphi.md": {
16
+ "file_path": "delphi.md",
17
+ "description": "Strategic advisor for architecture and debugging",
18
+ "checksum": "46ce352164a5",
19
+ "lines_of_code": 5,
20
+ "updatable": true,
21
+ "priority": "medium"
22
+ },
23
+ "dewey.md": {
24
+ "file_path": "dewey.md",
25
+ "description": "Research librarian for documentation and examples",
26
+ "checksum": "de4e41fc0e08",
27
+ "lines_of_code": 54,
28
+ "updatable": true,
29
+ "priority": "medium"
30
+ },
31
+ "index.md": {
32
+ "file_path": "str/index.md",
33
+ "description": "Index project for semantic search",
34
+ "checksum": "a37c570b70a5",
35
+ "lines_of_code": 199,
36
+ "updatable": true,
37
+ "priority": "medium"
38
+ },
39
+ "publish.md": {
40
+ "file_path": "publish.md",
41
+ "description": "Publish to PyPI with version bump",
42
+ "checksum": "9e6ed392ebc4",
43
+ "lines_of_code": 66,
44
+ "updatable": true,
45
+ "priority": "medium"
46
+ },
47
+ "review.md": {
48
+ "file_path": "review.md",
49
+ "description": "Code review recent changes",
50
+ "checksum": "47274e796826",
51
+ "lines_of_code": 67,
52
+ "updatable": true,
53
+ "priority": "medium"
54
+ },
55
+ "search.md": {
56
+ "file_path": "str/search.md",
57
+ "description": "Stravinsky skill",
58
+ "checksum": "eb092d567333",
59
+ "lines_of_code": 205,
60
+ "updatable": true,
61
+ "priority": "medium"
62
+ },
63
+ "start_filewatch.md": {
64
+ "file_path": "str/start_filewatch.md",
65
+ "description": "Stravinsky skill",
66
+ "checksum": "23ca1f6a1999",
67
+ "lines_of_code": 136,
68
+ "updatable": true,
69
+ "priority": "medium"
70
+ },
71
+ "stats.md": {
72
+ "file_path": "str/stats.md",
73
+ "description": "Stravinsky skill",
74
+ "checksum": "017f4fc6d099",
75
+ "lines_of_code": 71,
76
+ "updatable": true,
77
+ "priority": "medium"
78
+ },
79
+ "stop_filewatch.md": {
80
+ "file_path": "str/stop_filewatch.md",
81
+ "description": "Stravinsky skill",
82
+ "checksum": "dbe92100d0ba",
83
+ "lines_of_code": 89,
84
+ "updatable": true,
85
+ "priority": "medium"
86
+ },
87
+ "cancel-loop.md": {
88
+ "file_path": "strav/cancel-loop.md",
89
+ "description": "Cancel active continuation loop",
90
+ "checksum": "d811ea8bb0e9",
91
+ "lines_of_code": 128,
92
+ "updatable": true,
93
+ "priority": "medium"
94
+ },
95
+ "loop.md": {
96
+ "file_path": "strav/loop.md",
97
+ "description": "Continuation loop for iterative execution",
98
+ "checksum": "defc1ae0aae4",
99
+ "lines_of_code": 193,
100
+ "updatable": true,
101
+ "priority": "medium"
102
+ },
103
+ "strav.md": {
104
+ "file_path": "strav.md",
105
+ "description": "Stravinsky Orchestrator - Parallel agent execution",
106
+ "checksum": "9c9969cf8c09",
107
+ "lines_of_code": 216,
108
+ "updatable": true,
109
+ "priority": "medium"
110
+ },
111
+ "verify.md": {
112
+ "file_path": "verify.md",
113
+ "description": "Post-implementation verification",
114
+ "checksum": "87894579d5ec",
115
+ "lines_of_code": 60,
116
+ "updatable": true,
117
+ "priority": "medium"
118
+ },
119
+ "version.md": {
120
+ "file_path": "version.md",
121
+ "description": "Stravinsky skill",
122
+ "checksum": "1cf41d5d28da",
123
+ "lines_of_code": 5,
124
+ "updatable": true,
125
+ "priority": "medium"
126
+ }
127
+ }
128
+ }
@@ -99,22 +99,27 @@ __all__ = [
99
99
  "parallel_execution",
100
100
  "stravinsky_mode",
101
101
  "todo_delegation",
102
-
103
102
  # Context & state
104
103
  "context",
105
104
  "todo_continuation",
106
105
  "pre_compact",
107
-
108
106
  # Tool enhancement
109
107
  "tool_messaging",
110
108
  "edit_recovery",
111
109
  "truncator",
112
-
113
110
  # Agent lifecycle
114
111
  "notification_hook",
115
112
  "subagent_stop",
116
113
  ]
117
114
 
115
+
116
+ def initialize_hooks():
117
+ """Initialize and register all hooks with the HookManager."""
118
+ # Currently hooks are primarily external scripts or lazy-loaded.
119
+ # This entry point allows for future internal hook registration.
120
+ pass
121
+
122
+
118
123
  __version__ = "0.2.63"
119
124
  __author__ = "David Andrews"
120
125
  __description__ = "Claude Code hooks for Stravinsky MCP parallel execution"
@@ -6,6 +6,14 @@ Provides interception points for tool calls and model invocations.
6
6
  import logging
7
7
  from typing import Any, Callable, Dict, List, Optional, Awaitable
8
8
 
9
+ try:
10
+ from mcp_bridge.config.hook_config import is_hook_enabled
11
+ except ImportError:
12
+
13
+ def is_hook_enabled(hook_name: str) -> bool:
14
+ return True
15
+
16
+
9
17
  logger = logging.getLogger(__name__)
10
18
 
11
19
 
@@ -4,9 +4,13 @@ PostToolUse hook for user-friendly tool messaging.
4
4
 
5
5
  Outputs concise messages about which agent/tool was used and what it did.
6
6
  Format examples:
7
- - ast-grep('Searching for authentication patterns')
8
- - delphi:openai/gpt-5.2-medium('Analyzing architecture trade-offs')
9
- - explore:gemini-3-flash('Finding all API endpoints')
7
+ - 🔧 ast-grep:stravinsky('Searching for authentication patterns')
8
+ - 🟡 get_file_contents:github('Fetching src/main.py from user/repo')
9
+ - 🟣 searchCode:grep-app('Searching GitHub for auth patterns')
10
+ - 🔵 web_search_exa:MCP_DOCKER('Web search for Docker best practices')
11
+ - 🟤 find_code:ast-grep('AST search for class definitions')
12
+ - 🎯 delphi:gpt-5.2-medium('Analyzing architecture trade-offs')
13
+ - 🎯 explore:gemini-3-flash('Finding all API endpoints')
10
14
  """
11
15
 
12
16
  import json
@@ -23,7 +27,16 @@ AGENT_MODELS = {
23
27
  "delphi": "gpt-5.2-medium",
24
28
  }
25
29
 
26
- # Tool display names
30
+ # MCP Server emoji mappings
31
+ SERVER_EMOJIS = {
32
+ "github": "🟡",
33
+ "ast-grep": "🟤",
34
+ "grep-app": "🟣",
35
+ "MCP_DOCKER": "🔵",
36
+ "stravinsky": "🔧",
37
+ }
38
+
39
+ # Tool display names (legacy mapping for simple tools)
27
40
  TOOL_NAMES = {
28
41
  "mcp__stravinsky__ast_grep_search": "ast-grep",
29
42
  "mcp__stravinsky__grep_search": "grep",
@@ -41,10 +54,98 @@ TOOL_NAMES = {
41
54
  }
42
55
 
43
56
 
57
+ def parse_mcp_tool_name(tool_name: str) -> tuple[str, str, str]:
58
+ """
59
+ Parse MCP tool name into (server, tool_type, emoji).
60
+
61
+ Examples:
62
+ mcp__github__get_file_contents -> ("github", "get_file_contents", "🟡")
63
+ mcp__stravinsky__grep_search -> ("stravinsky", "grep", "🔧")
64
+ mcp__ast-grep__find_code -> ("ast-grep", "find_code", "🟤")
65
+ """
66
+ if not tool_name.startswith("mcp__"):
67
+ return ("unknown", tool_name, "🔧")
68
+
69
+ # Remove mcp__ prefix and split by __
70
+ parts = tool_name[5:].split("__", 1)
71
+ if len(parts) != 2:
72
+ return ("unknown", tool_name, "🔧")
73
+
74
+ server = parts[0]
75
+ tool_type = parts[1]
76
+
77
+ # Get emoji for server
78
+ emoji = SERVER_EMOJIS.get(server, "🔧")
79
+
80
+ # Get simplified tool name if available
81
+ simple_name = TOOL_NAMES.get(tool_name, tool_type)
82
+
83
+ return (server, simple_name, emoji)
84
+
85
+
44
86
  def extract_description(tool_name: str, params: dict) -> str:
45
87
  """Extract a concise description of what the tool did."""
46
88
 
47
- # AST-grep
89
+ # GitHub tools
90
+ if "github" in tool_name.lower():
91
+ if "get_file_contents" in tool_name:
92
+ path = params.get("path", "")
93
+ repo = params.get("repo", "")
94
+ owner = params.get("owner", "")
95
+ return f"Fetching {path} from {owner}/{repo}"
96
+ elif "create_or_update_file" in tool_name:
97
+ path = params.get("path", "")
98
+ return f"Updating {path}"
99
+ elif "search_repositories" in tool_name:
100
+ query = params.get("query", "")
101
+ return f"Searching repos for '{query[:40]}'"
102
+ elif "search_code" in tool_name:
103
+ q = params.get("q", "")
104
+ return f"Searching code for '{q[:40]}'"
105
+ elif "create_pull_request" in tool_name:
106
+ title = params.get("title", "")
107
+ return f"Creating PR: {title[:40]}"
108
+ elif "get_pull_request" in tool_name or "list_pull_requests" in tool_name:
109
+ return "Fetching PR details"
110
+ return "GitHub operation"
111
+
112
+ # MCP_DOCKER tools
113
+ if "MCP_DOCKER" in tool_name:
114
+ if "web_search_exa" in tool_name:
115
+ query = params.get("query", "")
116
+ return f"Web search: '{query[:40]}'"
117
+ elif "create_entities" in tool_name:
118
+ entities = params.get("entities", [])
119
+ count = len(entities)
120
+ return f"Creating {count} knowledge graph entities"
121
+ elif "search_nodes" in tool_name:
122
+ query = params.get("query", "")
123
+ return f"Searching knowledge graph for '{query[:40]}'"
124
+ return "Knowledge graph operation"
125
+
126
+ # ast-grep tools
127
+ if "ast-grep" in tool_name or "ast_grep" in tool_name:
128
+ if "find_code" in tool_name or "search" in tool_name:
129
+ pattern = params.get("pattern", "")
130
+ return f"AST search for '{pattern[:40]}'"
131
+ elif "test_match" in tool_name:
132
+ return "Testing AST pattern"
133
+ elif "dump_syntax" in tool_name:
134
+ return "Dumping syntax tree"
135
+ return "AST operation"
136
+
137
+ # grep-app tools
138
+ if "grep-app" in tool_name or "grep_app" in tool_name:
139
+ if "searchCode" in tool_name:
140
+ query = params.get("query", "")
141
+ return f"Searching GitHub for '{query[:40]}'"
142
+ elif "github_file" in tool_name:
143
+ path = params.get("path", "")
144
+ repo = params.get("repo", "")
145
+ return f"Fetching {path} from {repo}"
146
+ return "grep.app search"
147
+
148
+ # AST-grep (stravinsky)
48
149
  if "ast_grep" in tool_name:
49
150
  pattern = params.get("pattern", "")
50
151
  directory = params.get("directory", ".")
@@ -136,9 +237,6 @@ def main():
136
237
  if not (tool_name.startswith("mcp__") or tool_name == "Task"):
137
238
  sys.exit(0)
138
239
 
139
- # Get tool display name
140
- display_name = TOOL_NAMES.get(tool_name, tool_name)
141
-
142
240
  # Special handling for Task delegations
143
241
  if tool_name == "Task":
144
242
  subagent_type = params.get("subagent_type", "unknown")
@@ -148,9 +246,14 @@ def main():
148
246
  # Show full agent delegation message
149
247
  print(f"🎯 {subagent_type}:{model}('{description}')", file=sys.stderr)
150
248
  else:
151
- # Regular tool usage
249
+ # Parse MCP tool name to get server, tool_type, and emoji
250
+ server, tool_type, emoji = parse_mcp_tool_name(tool_name)
251
+
252
+ # Get description of what the tool did
152
253
  description = extract_description(tool_name, params)
153
- print(f"🔧 {display_name}('{description}')", file=sys.stderr)
254
+
255
+ # Format output: emoji tool_type:server('description')
256
+ print(f"{emoji} {tool_type}:{server}('{description}')", file=sys.stderr)
154
257
 
155
258
  sys.exit(0)
156
259