stravinsky 0.2.52__py3-none-any.whl → 0.4.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of stravinsky might be problematic. Click here for more details.
- mcp_bridge/__init__.py +1 -1
- mcp_bridge/auth/token_store.py +113 -11
- mcp_bridge/cli/__init__.py +6 -0
- mcp_bridge/cli/install_hooks.py +1265 -0
- mcp_bridge/cli/session_report.py +585 -0
- mcp_bridge/config/MANIFEST_SCHEMA.md +305 -0
- mcp_bridge/config/README.md +276 -0
- mcp_bridge/config/hook_config.py +249 -0
- mcp_bridge/config/hooks_manifest.json +138 -0
- mcp_bridge/config/rate_limits.py +222 -0
- mcp_bridge/config/skills_manifest.json +128 -0
- mcp_bridge/hooks/HOOKS_SETTINGS.json +175 -0
- mcp_bridge/hooks/README.md +215 -0
- mcp_bridge/hooks/__init__.py +119 -60
- mcp_bridge/hooks/edit_recovery.py +42 -37
- mcp_bridge/hooks/git_noninteractive.py +89 -0
- mcp_bridge/hooks/keyword_detector.py +30 -0
- mcp_bridge/hooks/manager.py +8 -0
- mcp_bridge/hooks/notification_hook.py +103 -0
- mcp_bridge/hooks/parallel_execution.py +111 -0
- mcp_bridge/hooks/pre_compact.py +82 -183
- mcp_bridge/hooks/rules_injector.py +507 -0
- mcp_bridge/hooks/session_notifier.py +125 -0
- mcp_bridge/{native_hooks → hooks}/stravinsky_mode.py +51 -16
- mcp_bridge/hooks/subagent_stop.py +98 -0
- mcp_bridge/hooks/task_validator.py +73 -0
- mcp_bridge/hooks/tmux_manager.py +141 -0
- mcp_bridge/hooks/todo_continuation.py +90 -0
- mcp_bridge/hooks/todo_delegation.py +88 -0
- mcp_bridge/hooks/tool_messaging.py +267 -0
- mcp_bridge/hooks/truncator.py +21 -17
- mcp_bridge/notifications.py +151 -0
- mcp_bridge/prompts/multimodal.py +24 -3
- mcp_bridge/server.py +214 -49
- mcp_bridge/server_tools.py +445 -0
- mcp_bridge/tools/__init__.py +22 -18
- mcp_bridge/tools/agent_manager.py +220 -32
- mcp_bridge/tools/code_search.py +97 -11
- mcp_bridge/tools/lsp/__init__.py +7 -0
- mcp_bridge/tools/lsp/manager.py +448 -0
- mcp_bridge/tools/lsp/tools.py +637 -150
- mcp_bridge/tools/model_invoke.py +208 -106
- mcp_bridge/tools/query_classifier.py +323 -0
- mcp_bridge/tools/semantic_search.py +3042 -0
- mcp_bridge/tools/templates.py +32 -18
- mcp_bridge/update_manager.py +589 -0
- mcp_bridge/update_manager_pypi.py +299 -0
- stravinsky-0.4.18.dist-info/METADATA +468 -0
- stravinsky-0.4.18.dist-info/RECORD +88 -0
- stravinsky-0.4.18.dist-info/entry_points.txt +5 -0
- mcp_bridge/native_hooks/edit_recovery.py +0 -46
- mcp_bridge/native_hooks/todo_delegation.py +0 -54
- mcp_bridge/native_hooks/truncator.py +0 -23
- stravinsky-0.2.52.dist-info/METADATA +0 -204
- stravinsky-0.2.52.dist-info/RECORD +0 -63
- stravinsky-0.2.52.dist-info/entry_points.txt +0 -3
- /mcp_bridge/{native_hooks → hooks}/context.py +0 -0
- {stravinsky-0.2.52.dist-info → stravinsky-0.4.18.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
PostToolUse hook for user-friendly tool messaging.
|
|
4
|
+
|
|
5
|
+
Outputs concise messages about which agent/tool was used and what it did.
|
|
6
|
+
Format examples:
|
|
7
|
+
- 🔧 ast-grep:stravinsky('Searching for authentication patterns')
|
|
8
|
+
- 🟡 get_file_contents:github('Fetching src/main.py from user/repo')
|
|
9
|
+
- 🟣 searchCode:grep-app('Searching GitHub for auth patterns')
|
|
10
|
+
- 🔵 web_search_exa:MCP_DOCKER('Web search for Docker best practices')
|
|
11
|
+
- 🟤 find_code:ast-grep('AST search for class definitions')
|
|
12
|
+
- 🎯 delphi:gpt-5.2-medium('Analyzing architecture trade-offs')
|
|
13
|
+
- 🎯 explore:gemini-3-flash('Finding all API endpoints')
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
import os
|
|
18
|
+
import sys
|
|
19
|
+
|
|
20
|
+
# Agent model mappings
|
|
21
|
+
AGENT_MODELS = {
|
|
22
|
+
"explore": "gemini-3-flash",
|
|
23
|
+
"dewey": "gemini-3-flash",
|
|
24
|
+
"code-reviewer": "sonnet",
|
|
25
|
+
"debugger": "sonnet",
|
|
26
|
+
"frontend": "gemini-3-pro-high",
|
|
27
|
+
"delphi": "gpt-5.2-medium",
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
# MCP Server emoji mappings
|
|
31
|
+
SERVER_EMOJIS = {
|
|
32
|
+
"github": "🟡",
|
|
33
|
+
"ast-grep": "🟤",
|
|
34
|
+
"grep-app": "🟣",
|
|
35
|
+
"MCP_DOCKER": "🔵",
|
|
36
|
+
"stravinsky": "🔧",
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
# Tool display names (legacy mapping for simple tools)
|
|
40
|
+
TOOL_NAMES = {
|
|
41
|
+
"mcp__stravinsky__ast_grep_search": "ast-grep",
|
|
42
|
+
"mcp__stravinsky__grep_search": "grep",
|
|
43
|
+
"mcp__stravinsky__glob_files": "glob",
|
|
44
|
+
"mcp__stravinsky__lsp_diagnostics": "lsp-diagnostics",
|
|
45
|
+
"mcp__stravinsky__lsp_hover": "lsp-hover",
|
|
46
|
+
"mcp__stravinsky__lsp_goto_definition": "lsp-goto-def",
|
|
47
|
+
"mcp__stravinsky__lsp_find_references": "lsp-find-refs",
|
|
48
|
+
"mcp__stravinsky__lsp_document_symbols": "lsp-symbols",
|
|
49
|
+
"mcp__stravinsky__lsp_workspace_symbols": "lsp-workspace-symbols",
|
|
50
|
+
"mcp__stravinsky__invoke_gemini": "gemini",
|
|
51
|
+
"mcp__stravinsky__invoke_openai": "openai",
|
|
52
|
+
"mcp__grep-app__searchCode": "grep.app",
|
|
53
|
+
"mcp__grep-app__github_file": "github-file",
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def parse_mcp_tool_name(tool_name: str) -> tuple[str, str, str]:
|
|
58
|
+
"""
|
|
59
|
+
Parse MCP tool name into (server, tool_type, emoji).
|
|
60
|
+
|
|
61
|
+
Examples:
|
|
62
|
+
mcp__github__get_file_contents -> ("github", "get_file_contents", "🟡")
|
|
63
|
+
mcp__stravinsky__grep_search -> ("stravinsky", "grep", "🔧")
|
|
64
|
+
mcp__ast-grep__find_code -> ("ast-grep", "find_code", "🟤")
|
|
65
|
+
"""
|
|
66
|
+
if not tool_name.startswith("mcp__"):
|
|
67
|
+
return ("unknown", tool_name, "🔧")
|
|
68
|
+
|
|
69
|
+
# Remove mcp__ prefix and split by __
|
|
70
|
+
parts = tool_name[5:].split("__", 1)
|
|
71
|
+
if len(parts) != 2:
|
|
72
|
+
return ("unknown", tool_name, "🔧")
|
|
73
|
+
|
|
74
|
+
server = parts[0]
|
|
75
|
+
tool_type = parts[1]
|
|
76
|
+
|
|
77
|
+
# Get emoji for server
|
|
78
|
+
emoji = SERVER_EMOJIS.get(server, "🔧")
|
|
79
|
+
|
|
80
|
+
# Get simplified tool name if available
|
|
81
|
+
simple_name = TOOL_NAMES.get(tool_name, tool_type)
|
|
82
|
+
|
|
83
|
+
return (server, simple_name, emoji)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def extract_description(tool_name: str, params: dict) -> str:
|
|
87
|
+
"""Extract a concise description of what the tool did."""
|
|
88
|
+
|
|
89
|
+
# GitHub tools
|
|
90
|
+
if "github" in tool_name.lower():
|
|
91
|
+
if "get_file_contents" in tool_name:
|
|
92
|
+
path = params.get("path", "")
|
|
93
|
+
repo = params.get("repo", "")
|
|
94
|
+
owner = params.get("owner", "")
|
|
95
|
+
return f"Fetching {path} from {owner}/{repo}"
|
|
96
|
+
elif "create_or_update_file" in tool_name:
|
|
97
|
+
path = params.get("path", "")
|
|
98
|
+
return f"Updating {path}"
|
|
99
|
+
elif "search_repositories" in tool_name:
|
|
100
|
+
query = params.get("query", "")
|
|
101
|
+
return f"Searching repos for '{query[:40]}'"
|
|
102
|
+
elif "search_code" in tool_name:
|
|
103
|
+
q = params.get("q", "")
|
|
104
|
+
return f"Searching code for '{q[:40]}'"
|
|
105
|
+
elif "create_pull_request" in tool_name:
|
|
106
|
+
title = params.get("title", "")
|
|
107
|
+
return f"Creating PR: {title[:40]}"
|
|
108
|
+
elif "get_pull_request" in tool_name or "list_pull_requests" in tool_name:
|
|
109
|
+
return "Fetching PR details"
|
|
110
|
+
return "GitHub operation"
|
|
111
|
+
|
|
112
|
+
# MCP_DOCKER tools
|
|
113
|
+
if "MCP_DOCKER" in tool_name:
|
|
114
|
+
if "web_search_exa" in tool_name:
|
|
115
|
+
query = params.get("query", "")
|
|
116
|
+
return f"Web search: '{query[:40]}'"
|
|
117
|
+
elif "create_entities" in tool_name:
|
|
118
|
+
entities = params.get("entities", [])
|
|
119
|
+
count = len(entities)
|
|
120
|
+
return f"Creating {count} knowledge graph entities"
|
|
121
|
+
elif "search_nodes" in tool_name:
|
|
122
|
+
query = params.get("query", "")
|
|
123
|
+
return f"Searching knowledge graph for '{query[:40]}'"
|
|
124
|
+
return "Knowledge graph operation"
|
|
125
|
+
|
|
126
|
+
# ast-grep tools
|
|
127
|
+
if "ast-grep" in tool_name or "ast_grep" in tool_name:
|
|
128
|
+
if "find_code" in tool_name or "search" in tool_name:
|
|
129
|
+
pattern = params.get("pattern", "")
|
|
130
|
+
return f"AST search for '{pattern[:40]}'"
|
|
131
|
+
elif "test_match" in tool_name:
|
|
132
|
+
return "Testing AST pattern"
|
|
133
|
+
elif "dump_syntax" in tool_name:
|
|
134
|
+
return "Dumping syntax tree"
|
|
135
|
+
return "AST operation"
|
|
136
|
+
|
|
137
|
+
# grep-app tools
|
|
138
|
+
if "grep-app" in tool_name or "grep_app" in tool_name:
|
|
139
|
+
if "searchCode" in tool_name:
|
|
140
|
+
query = params.get("query", "")
|
|
141
|
+
return f"Searching GitHub for '{query[:40]}'"
|
|
142
|
+
elif "github_file" in tool_name:
|
|
143
|
+
path = params.get("path", "")
|
|
144
|
+
repo = params.get("repo", "")
|
|
145
|
+
return f"Fetching {path} from {repo}"
|
|
146
|
+
return "grep.app search"
|
|
147
|
+
|
|
148
|
+
# AST-grep (stravinsky)
|
|
149
|
+
if "ast_grep" in tool_name:
|
|
150
|
+
pattern = params.get("pattern", "")
|
|
151
|
+
directory = params.get("directory", ".")
|
|
152
|
+
return f"Searching AST in {directory} for '{pattern[:40]}...'"
|
|
153
|
+
|
|
154
|
+
# Grep/search
|
|
155
|
+
if "grep_search" in tool_name or "searchCode" in tool_name:
|
|
156
|
+
pattern = params.get("pattern", params.get("query", ""))
|
|
157
|
+
return f"Searching for '{pattern[:40]}...'"
|
|
158
|
+
|
|
159
|
+
# Glob
|
|
160
|
+
if "glob_files" in tool_name:
|
|
161
|
+
pattern = params.get("pattern", "")
|
|
162
|
+
return f"Finding files matching '{pattern}'"
|
|
163
|
+
|
|
164
|
+
# LSP diagnostics
|
|
165
|
+
if "lsp_diagnostics" in tool_name:
|
|
166
|
+
file_path = params.get("file_path", "")
|
|
167
|
+
filename = os.path.basename(file_path) if file_path else "file"
|
|
168
|
+
return f"Checking {filename} for errors"
|
|
169
|
+
|
|
170
|
+
# LSP hover
|
|
171
|
+
if "lsp_hover" in tool_name:
|
|
172
|
+
file_path = params.get("file_path", "")
|
|
173
|
+
line = params.get("line", "")
|
|
174
|
+
filename = os.path.basename(file_path) if file_path else "file"
|
|
175
|
+
return f"Type info for {filename}:{line}"
|
|
176
|
+
|
|
177
|
+
# LSP goto definition
|
|
178
|
+
if "lsp_goto" in tool_name:
|
|
179
|
+
file_path = params.get("file_path", "")
|
|
180
|
+
filename = os.path.basename(file_path) if file_path else "symbol"
|
|
181
|
+
return f"Finding definition in {filename}"
|
|
182
|
+
|
|
183
|
+
# LSP find references
|
|
184
|
+
if "lsp_find_references" in tool_name:
|
|
185
|
+
file_path = params.get("file_path", "")
|
|
186
|
+
filename = os.path.basename(file_path) if file_path else "symbol"
|
|
187
|
+
return f"Finding all references to symbol in {filename}"
|
|
188
|
+
|
|
189
|
+
# LSP symbols
|
|
190
|
+
if "lsp_symbols" in tool_name or "lsp_document_symbols" in tool_name:
|
|
191
|
+
file_path = params.get("file_path", "")
|
|
192
|
+
filename = os.path.basename(file_path) if file_path else "file"
|
|
193
|
+
return f"Getting symbols from {filename}"
|
|
194
|
+
|
|
195
|
+
if "lsp_workspace_symbols" in tool_name:
|
|
196
|
+
query = params.get("query", "")
|
|
197
|
+
return f"Searching workspace for symbol '{query}'"
|
|
198
|
+
|
|
199
|
+
# Gemini invocation
|
|
200
|
+
if "invoke_gemini" in tool_name:
|
|
201
|
+
prompt = params.get("prompt", "")
|
|
202
|
+
# Extract first meaningful line
|
|
203
|
+
first_line = prompt.split('\n')[0][:50] if prompt else "Processing"
|
|
204
|
+
return first_line
|
|
205
|
+
|
|
206
|
+
# OpenAI invocation
|
|
207
|
+
if "invoke_openai" in tool_name:
|
|
208
|
+
prompt = params.get("prompt", "")
|
|
209
|
+
first_line = prompt.split('\n')[0][:50] if prompt else "Strategic analysis"
|
|
210
|
+
return first_line
|
|
211
|
+
|
|
212
|
+
# GitHub file fetch
|
|
213
|
+
if "github_file" in tool_name:
|
|
214
|
+
path = params.get("path", "")
|
|
215
|
+
repo = params.get("repo", "")
|
|
216
|
+
return f"Fetching {path} from {repo}"
|
|
217
|
+
|
|
218
|
+
# Task delegation
|
|
219
|
+
if tool_name == "Task":
|
|
220
|
+
subagent_type = params.get("subagent_type", "unknown")
|
|
221
|
+
description = params.get("description", "")
|
|
222
|
+
model = AGENT_MODELS.get(subagent_type, "unknown")
|
|
223
|
+
return f"{subagent_type}:{model}('{description}')"
|
|
224
|
+
|
|
225
|
+
return "Processing"
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def main():
|
|
229
|
+
try:
|
|
230
|
+
# Read hook input from stdin
|
|
231
|
+
hook_input = json.loads(sys.stdin.read())
|
|
232
|
+
|
|
233
|
+
tool_name = hook_input.get("toolName", hook_input.get("tool_name", ""))
|
|
234
|
+
params = hook_input.get("params", hook_input.get("tool_input", {}))
|
|
235
|
+
|
|
236
|
+
# Only output messages for MCP tools and Task delegations
|
|
237
|
+
if not (tool_name.startswith("mcp__") or tool_name == "Task"):
|
|
238
|
+
sys.exit(0)
|
|
239
|
+
|
|
240
|
+
# Special handling for Task delegations
|
|
241
|
+
if tool_name == "Task":
|
|
242
|
+
subagent_type = params.get("subagent_type", "unknown")
|
|
243
|
+
description = params.get("description", "")
|
|
244
|
+
model = AGENT_MODELS.get(subagent_type, "unknown")
|
|
245
|
+
|
|
246
|
+
# Show full agent delegation message
|
|
247
|
+
print(f"🎯 {subagent_type}:{model}('{description}')", file=sys.stderr)
|
|
248
|
+
else:
|
|
249
|
+
# Parse MCP tool name to get server, tool_type, and emoji
|
|
250
|
+
server, tool_type, emoji = parse_mcp_tool_name(tool_name)
|
|
251
|
+
|
|
252
|
+
# Get description of what the tool did
|
|
253
|
+
description = extract_description(tool_name, params)
|
|
254
|
+
|
|
255
|
+
# Format output: emoji tool_type:server('description')
|
|
256
|
+
print(f"{emoji} {tool_type}:{server}('{description}')", file=sys.stderr)
|
|
257
|
+
|
|
258
|
+
sys.exit(0)
|
|
259
|
+
|
|
260
|
+
except Exception as e:
|
|
261
|
+
# On error, fail silently (don't disrupt workflow)
|
|
262
|
+
print(f"Tool messaging hook error: {e}", file=sys.stderr)
|
|
263
|
+
sys.exit(0)
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
if __name__ == "__main__":
|
|
267
|
+
main()
|
mcp_bridge/hooks/truncator.py
CHANGED
|
@@ -1,19 +1,23 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
"""
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import json
|
|
5
4
|
|
|
6
|
-
|
|
5
|
+
MAX_CHARS = 30000
|
|
7
6
|
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
7
|
+
def main():
|
|
8
|
+
try:
|
|
9
|
+
data = json.load(sys.stdin)
|
|
10
|
+
tool_response = data.get("tool_response", "")
|
|
11
|
+
except Exception:
|
|
12
|
+
return
|
|
13
|
+
|
|
14
|
+
if len(tool_response) > MAX_CHARS:
|
|
15
|
+
header = f"[TRUNCATED - {len(tool_response)} chars reduced to {MAX_CHARS}]\n"
|
|
16
|
+
footer = "\n...[TRUNCATED]"
|
|
17
|
+
truncated = tool_response[:MAX_CHARS]
|
|
18
|
+
print(header + truncated + footer)
|
|
19
|
+
else:
|
|
20
|
+
print(tool_response)
|
|
21
|
+
|
|
22
|
+
if __name__ == "__main__":
|
|
23
|
+
main()
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Desktop Notifications Manager for Stravinsky.
|
|
3
|
+
|
|
4
|
+
Provides cross-platform desktop notifications (macOS, Linux, Windows)
|
|
5
|
+
for long-running operations like codebase indexing.
|
|
6
|
+
|
|
7
|
+
Supports:
|
|
8
|
+
- Non-blocking async notifications
|
|
9
|
+
- Platform-specific backends
|
|
10
|
+
- Notification queuing
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import logging
|
|
14
|
+
import platform
|
|
15
|
+
import subprocess
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Dict, Optional
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class NotificationManager:
|
|
23
|
+
"""
|
|
24
|
+
Cross-platform desktop notification manager.
|
|
25
|
+
|
|
26
|
+
Provides non-blocking notifications with automatic platform detection.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, app_name: str = "Stravinsky"):
|
|
30
|
+
self.app_name = app_name
|
|
31
|
+
self.system = platform.system()
|
|
32
|
+
|
|
33
|
+
def _get_notification_command(
|
|
34
|
+
self,
|
|
35
|
+
title: str,
|
|
36
|
+
message: str,
|
|
37
|
+
sound: bool = True
|
|
38
|
+
) -> Optional[list]:
|
|
39
|
+
"""Get platform-specific notification command."""
|
|
40
|
+
if self.system == "Darwin": # macOS
|
|
41
|
+
script = f'display notification "{message}" with title "{title}"'
|
|
42
|
+
if sound:
|
|
43
|
+
script += ' sound name "Glass"'
|
|
44
|
+
return ["osascript", "-e", script]
|
|
45
|
+
|
|
46
|
+
elif self.system == "Linux":
|
|
47
|
+
cmd = ["notify-send", "--app-name", self.app_name, title, message]
|
|
48
|
+
if sound:
|
|
49
|
+
cmd.extend(["--urgency=normal"])
|
|
50
|
+
return cmd
|
|
51
|
+
|
|
52
|
+
elif self.system == "Windows":
|
|
53
|
+
ps_script = f"""
|
|
54
|
+
[Windows.UI.Notifications.ToastNotificationManager, Windows.UI.Notifications, ContentType = WindowsRuntime] | Out-Null
|
|
55
|
+
[Windows.UI.Notifications.ToastNotification, Windows.UI.Notifications, ContentType = WindowsRuntime] | Out-Null
|
|
56
|
+
[Windows.Data.Xml.Dom.XmlDocument, Windows.Data.Xml.Dom.XmlDocument, ContentType = WindowsRuntime] | Out-Null
|
|
57
|
+
|
|
58
|
+
$template = @"
|
|
59
|
+
<toast>
|
|
60
|
+
<visual>
|
|
61
|
+
<binding template="ToastGeneric">
|
|
62
|
+
<text>{title}</text>
|
|
63
|
+
<text>{message}</text>
|
|
64
|
+
</binding>
|
|
65
|
+
</visual>
|
|
66
|
+
</toast>
|
|
67
|
+
"@
|
|
68
|
+
|
|
69
|
+
$xml = New-Object Windows.Data.Xml.Dom.XmlDocument
|
|
70
|
+
$xml.LoadXml($template)
|
|
71
|
+
$toast = New-Object Windows.UI.Notifications.ToastNotification $xml
|
|
72
|
+
[Windows.UI.Notifications.ToastNotificationManager]::CreateToastNotifier("{self.app_name}").Show($toast)
|
|
73
|
+
"""
|
|
74
|
+
return ["powershell", "-Command", ps_script]
|
|
75
|
+
|
|
76
|
+
return None
|
|
77
|
+
|
|
78
|
+
def _send_notification_sync(
|
|
79
|
+
self,
|
|
80
|
+
title: str,
|
|
81
|
+
message: str,
|
|
82
|
+
sound: bool = True
|
|
83
|
+
) -> bool:
|
|
84
|
+
"""Send notification synchronously (blocking)."""
|
|
85
|
+
cmd = self._get_notification_command(title, message, sound)
|
|
86
|
+
|
|
87
|
+
if not cmd:
|
|
88
|
+
logger.warning(
|
|
89
|
+
f"[Notifications] Desktop notifications not supported on {self.system}"
|
|
90
|
+
)
|
|
91
|
+
return False
|
|
92
|
+
|
|
93
|
+
try:
|
|
94
|
+
subprocess.Popen(
|
|
95
|
+
cmd,
|
|
96
|
+
stdout=subprocess.DEVNULL,
|
|
97
|
+
stderr=subprocess.DEVNULL,
|
|
98
|
+
start_new_session=True
|
|
99
|
+
)
|
|
100
|
+
logger.debug(f"[Notifications] Sent: {title}")
|
|
101
|
+
return True
|
|
102
|
+
except FileNotFoundError:
|
|
103
|
+
logger.warning(f"[Notifications] Command not found: {cmd[0]}")
|
|
104
|
+
return False
|
|
105
|
+
except Exception as e:
|
|
106
|
+
logger.error(f"[Notifications] Failed to send notification: {e}")
|
|
107
|
+
return False
|
|
108
|
+
|
|
109
|
+
async def notify_reindex_start(self, project_path: str) -> bool:
|
|
110
|
+
"""Notify that codebase reindexing has started."""
|
|
111
|
+
path = Path(project_path).name or Path(project_path).parent.name
|
|
112
|
+
title = "Codebase Indexing Started"
|
|
113
|
+
message = f"Indexing {path}..."
|
|
114
|
+
return self._send_notification_sync(title, message, sound=True)
|
|
115
|
+
|
|
116
|
+
async def notify_reindex_complete(self, stats: Dict) -> bool:
|
|
117
|
+
"""Notify that codebase reindexing is complete."""
|
|
118
|
+
indexed = stats.get("indexed", 0)
|
|
119
|
+
pruned = stats.get("pruned", 0)
|
|
120
|
+
time_taken = stats.get("time_taken", 0)
|
|
121
|
+
|
|
122
|
+
title = "Codebase Indexing Complete"
|
|
123
|
+
message = f"Indexed {indexed} chunks, pruned {pruned} stale entries in {time_taken}s"
|
|
124
|
+
|
|
125
|
+
return self._send_notification_sync(title, message, sound=True)
|
|
126
|
+
|
|
127
|
+
async def notify_reindex_error(self, error_message: str) -> bool:
|
|
128
|
+
"""Notify that codebase reindexing failed."""
|
|
129
|
+
title = "Codebase Indexing Failed"
|
|
130
|
+
# Truncate long error messages
|
|
131
|
+
message = error_message[:100] + "..." if len(error_message) > 100 else error_message
|
|
132
|
+
|
|
133
|
+
return self._send_notification_sync(title, message, sound=True)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
# Global singleton instance
|
|
137
|
+
_notification_manager: Optional[NotificationManager] = None
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def get_notification_manager() -> NotificationManager:
|
|
141
|
+
"""Get or create the global notification manager instance."""
|
|
142
|
+
global _notification_manager
|
|
143
|
+
if _notification_manager is None:
|
|
144
|
+
_notification_manager = NotificationManager()
|
|
145
|
+
return _notification_manager
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def reset_notification_manager() -> None:
|
|
149
|
+
"""Reset the global notification manager (for testing)."""
|
|
150
|
+
global _notification_manager
|
|
151
|
+
_notification_manager = None
|
mcp_bridge/prompts/multimodal.py
CHANGED
|
@@ -18,32 +18,53 @@ MULTIMODAL_SYSTEM_PROMPT = """You interpret media files that cannot be read as p
|
|
|
18
18
|
|
|
19
19
|
Your job: examine the attached file and extract ONLY what was requested.
|
|
20
20
|
|
|
21
|
+
## TOKEN OPTIMIZATION (CRITICAL)
|
|
22
|
+
|
|
23
|
+
You exist to REDUCE context token consumption. Instead of passing 50k tokens of raw
|
|
24
|
+
image/PDF data to the main agent, you summarize into 500-2000 tokens of actionable
|
|
25
|
+
information. This is a 95%+ reduction in context usage.
|
|
26
|
+
|
|
21
27
|
When to use you:
|
|
22
28
|
- Media files the Read tool cannot interpret
|
|
23
29
|
- Extracting specific information or summaries from documents
|
|
24
30
|
- Describing visual content in images or diagrams
|
|
25
31
|
- When analyzed/extracted data is needed, not raw file contents
|
|
32
|
+
- UI screenshots for analysis (NOT for exact CSS recreation)
|
|
33
|
+
- PDF documents requiring data extraction
|
|
26
34
|
|
|
27
35
|
When NOT to use you:
|
|
28
36
|
- Source code or plain text files needing exact contents (use Read)
|
|
29
37
|
- Files that need editing afterward (need literal content from Read)
|
|
30
38
|
- Simple file reading where no interpretation is needed
|
|
31
39
|
|
|
32
|
-
How you work
|
|
40
|
+
## How you work
|
|
41
|
+
|
|
33
42
|
1. Receive a file path and a goal describing what to extract
|
|
34
|
-
2.
|
|
35
|
-
|
|
43
|
+
2. Use invoke_gemini with the image/PDF for vision analysis:
|
|
44
|
+
```
|
|
45
|
+
invoke_gemini(
|
|
46
|
+
prompt="Analyze this image: [goal]",
|
|
47
|
+
model="gemini-3-flash",
|
|
48
|
+
image_path="/path/to/file.png", # Vision API
|
|
49
|
+
agent_context={"agent_type": "multimodal"}
|
|
50
|
+
)
|
|
51
|
+
```
|
|
52
|
+
3. Return ONLY the relevant extracted information (compressed summary)
|
|
36
53
|
4. The main agent never processes the raw file - you save context tokens
|
|
37
54
|
|
|
55
|
+
## Output Guidelines
|
|
56
|
+
|
|
38
57
|
For PDFs: extract text, structure, tables, data from specific sections
|
|
39
58
|
For images: describe layouts, UI elements, text, diagrams, charts
|
|
40
59
|
For diagrams: explain relationships, flows, architecture depicted
|
|
60
|
+
For screenshots: describe visible UI, key elements, layout structure
|
|
41
61
|
|
|
42
62
|
Response rules:
|
|
43
63
|
- Return extracted information directly, no preamble
|
|
44
64
|
- If info not found, state clearly what's missing
|
|
45
65
|
- Match the language of the request
|
|
46
66
|
- Be thorough on the goal, concise on everything else
|
|
67
|
+
- Keep response under 2000 tokens when possible
|
|
47
68
|
|
|
48
69
|
Your output goes straight to the main agent for continued work."""
|
|
49
70
|
|