stravinsky 0.2.52__py3-none-any.whl → 0.2.67__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of stravinsky might be problematic. Click here for more details.

Files changed (41) hide show
  1. mcp_bridge/__init__.py +1 -1
  2. mcp_bridge/cli/__init__.py +6 -0
  3. mcp_bridge/cli/install_hooks.py +1265 -0
  4. mcp_bridge/cli/session_report.py +585 -0
  5. mcp_bridge/hooks/HOOKS_SETTINGS.json +175 -0
  6. mcp_bridge/hooks/README.md +215 -0
  7. mcp_bridge/hooks/__init__.py +117 -63
  8. mcp_bridge/hooks/edit_recovery.py +42 -37
  9. mcp_bridge/hooks/git_noninteractive.py +89 -0
  10. mcp_bridge/hooks/keyword_detector.py +30 -0
  11. mcp_bridge/hooks/notification_hook.py +103 -0
  12. mcp_bridge/hooks/parallel_execution.py +111 -0
  13. mcp_bridge/hooks/pre_compact.py +82 -183
  14. mcp_bridge/hooks/rules_injector.py +507 -0
  15. mcp_bridge/hooks/session_notifier.py +125 -0
  16. mcp_bridge/{native_hooks → hooks}/stravinsky_mode.py +51 -16
  17. mcp_bridge/hooks/subagent_stop.py +98 -0
  18. mcp_bridge/hooks/task_validator.py +73 -0
  19. mcp_bridge/hooks/tmux_manager.py +141 -0
  20. mcp_bridge/hooks/todo_continuation.py +90 -0
  21. mcp_bridge/hooks/todo_delegation.py +88 -0
  22. mcp_bridge/hooks/tool_messaging.py +164 -0
  23. mcp_bridge/hooks/truncator.py +21 -17
  24. mcp_bridge/prompts/multimodal.py +24 -3
  25. mcp_bridge/server.py +12 -1
  26. mcp_bridge/server_tools.py +5 -0
  27. mcp_bridge/tools/agent_manager.py +30 -11
  28. mcp_bridge/tools/code_search.py +81 -9
  29. mcp_bridge/tools/lsp/tools.py +6 -2
  30. mcp_bridge/tools/model_invoke.py +76 -1
  31. mcp_bridge/tools/templates.py +32 -18
  32. stravinsky-0.2.67.dist-info/METADATA +284 -0
  33. {stravinsky-0.2.52.dist-info → stravinsky-0.2.67.dist-info}/RECORD +36 -23
  34. stravinsky-0.2.67.dist-info/entry_points.txt +5 -0
  35. mcp_bridge/native_hooks/edit_recovery.py +0 -46
  36. mcp_bridge/native_hooks/todo_delegation.py +0 -54
  37. mcp_bridge/native_hooks/truncator.py +0 -23
  38. stravinsky-0.2.52.dist-info/METADATA +0 -204
  39. stravinsky-0.2.52.dist-info/entry_points.txt +0 -3
  40. /mcp_bridge/{native_hooks → hooks}/context.py +0 -0
  41. {stravinsky-0.2.52.dist-info → stravinsky-0.2.67.dist-info}/WHEEL +0 -0
@@ -0,0 +1,164 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ PostToolUse hook for user-friendly tool messaging.
4
+
5
+ Outputs concise messages about which agent/tool was used and what it did.
6
+ Format examples:
7
+ - ast-grep('Searching for authentication patterns')
8
+ - delphi:openai/gpt-5.2-medium('Analyzing architecture trade-offs')
9
+ - explore:gemini-3-flash('Finding all API endpoints')
10
+ """
11
+
12
+ import json
13
+ import os
14
+ import sys
15
+
16
+ # Agent model mappings
17
+ AGENT_MODELS = {
18
+ "explore": "gemini-3-flash",
19
+ "dewey": "gemini-3-flash",
20
+ "code-reviewer": "sonnet",
21
+ "debugger": "sonnet",
22
+ "frontend": "gemini-3-pro-high",
23
+ "delphi": "gpt-5.2-medium",
24
+ }
25
+
26
+ # Tool display names
27
+ TOOL_NAMES = {
28
+ "mcp__stravinsky__ast_grep_search": "ast-grep",
29
+ "mcp__stravinsky__grep_search": "grep",
30
+ "mcp__stravinsky__glob_files": "glob",
31
+ "mcp__stravinsky__lsp_diagnostics": "lsp-diagnostics",
32
+ "mcp__stravinsky__lsp_hover": "lsp-hover",
33
+ "mcp__stravinsky__lsp_goto_definition": "lsp-goto-def",
34
+ "mcp__stravinsky__lsp_find_references": "lsp-find-refs",
35
+ "mcp__stravinsky__lsp_document_symbols": "lsp-symbols",
36
+ "mcp__stravinsky__lsp_workspace_symbols": "lsp-workspace-symbols",
37
+ "mcp__stravinsky__invoke_gemini": "gemini",
38
+ "mcp__stravinsky__invoke_openai": "openai",
39
+ "mcp__grep-app__searchCode": "grep.app",
40
+ "mcp__grep-app__github_file": "github-file",
41
+ }
42
+
43
+
44
+ def extract_description(tool_name: str, params: dict) -> str:
45
+ """Extract a concise description of what the tool did."""
46
+
47
+ # AST-grep
48
+ if "ast_grep" in tool_name:
49
+ pattern = params.get("pattern", "")
50
+ directory = params.get("directory", ".")
51
+ return f"Searching AST in {directory} for '{pattern[:40]}...'"
52
+
53
+ # Grep/search
54
+ if "grep_search" in tool_name or "searchCode" in tool_name:
55
+ pattern = params.get("pattern", params.get("query", ""))
56
+ return f"Searching for '{pattern[:40]}...'"
57
+
58
+ # Glob
59
+ if "glob_files" in tool_name:
60
+ pattern = params.get("pattern", "")
61
+ return f"Finding files matching '{pattern}'"
62
+
63
+ # LSP diagnostics
64
+ if "lsp_diagnostics" in tool_name:
65
+ file_path = params.get("file_path", "")
66
+ filename = os.path.basename(file_path) if file_path else "file"
67
+ return f"Checking {filename} for errors"
68
+
69
+ # LSP hover
70
+ if "lsp_hover" in tool_name:
71
+ file_path = params.get("file_path", "")
72
+ line = params.get("line", "")
73
+ filename = os.path.basename(file_path) if file_path else "file"
74
+ return f"Type info for {filename}:{line}"
75
+
76
+ # LSP goto definition
77
+ if "lsp_goto" in tool_name:
78
+ file_path = params.get("file_path", "")
79
+ filename = os.path.basename(file_path) if file_path else "symbol"
80
+ return f"Finding definition in {filename}"
81
+
82
+ # LSP find references
83
+ if "lsp_find_references" in tool_name:
84
+ file_path = params.get("file_path", "")
85
+ filename = os.path.basename(file_path) if file_path else "symbol"
86
+ return f"Finding all references to symbol in {filename}"
87
+
88
+ # LSP symbols
89
+ if "lsp_symbols" in tool_name or "lsp_document_symbols" in tool_name:
90
+ file_path = params.get("file_path", "")
91
+ filename = os.path.basename(file_path) if file_path else "file"
92
+ return f"Getting symbols from {filename}"
93
+
94
+ if "lsp_workspace_symbols" in tool_name:
95
+ query = params.get("query", "")
96
+ return f"Searching workspace for symbol '{query}'"
97
+
98
+ # Gemini invocation
99
+ if "invoke_gemini" in tool_name:
100
+ prompt = params.get("prompt", "")
101
+ # Extract first meaningful line
102
+ first_line = prompt.split('\n')[0][:50] if prompt else "Processing"
103
+ return first_line
104
+
105
+ # OpenAI invocation
106
+ if "invoke_openai" in tool_name:
107
+ prompt = params.get("prompt", "")
108
+ first_line = prompt.split('\n')[0][:50] if prompt else "Strategic analysis"
109
+ return first_line
110
+
111
+ # GitHub file fetch
112
+ if "github_file" in tool_name:
113
+ path = params.get("path", "")
114
+ repo = params.get("repo", "")
115
+ return f"Fetching {path} from {repo}"
116
+
117
+ # Task delegation
118
+ if tool_name == "Task":
119
+ subagent_type = params.get("subagent_type", "unknown")
120
+ description = params.get("description", "")
121
+ model = AGENT_MODELS.get(subagent_type, "unknown")
122
+ return f"{subagent_type}:{model}('{description}')"
123
+
124
+ return "Processing"
125
+
126
+
127
+ def main():
128
+ try:
129
+ # Read hook input from stdin
130
+ hook_input = json.loads(sys.stdin.read())
131
+
132
+ tool_name = hook_input.get("toolName", hook_input.get("tool_name", ""))
133
+ params = hook_input.get("params", hook_input.get("tool_input", {}))
134
+
135
+ # Only output messages for MCP tools and Task delegations
136
+ if not (tool_name.startswith("mcp__") or tool_name == "Task"):
137
+ sys.exit(0)
138
+
139
+ # Get tool display name
140
+ display_name = TOOL_NAMES.get(tool_name, tool_name)
141
+
142
+ # Special handling for Task delegations
143
+ if tool_name == "Task":
144
+ subagent_type = params.get("subagent_type", "unknown")
145
+ description = params.get("description", "")
146
+ model = AGENT_MODELS.get(subagent_type, "unknown")
147
+
148
+ # Show full agent delegation message
149
+ print(f"🎯 {subagent_type}:{model}('{description}')", file=sys.stderr)
150
+ else:
151
+ # Regular tool usage
152
+ description = extract_description(tool_name, params)
153
+ print(f"🔧 {display_name}('{description}')", file=sys.stderr)
154
+
155
+ sys.exit(0)
156
+
157
+ except Exception as e:
158
+ # On error, fail silently (don't disrupt workflow)
159
+ print(f"Tool messaging hook error: {e}", file=sys.stderr)
160
+ sys.exit(0)
161
+
162
+
163
+ if __name__ == "__main__":
164
+ main()
@@ -1,19 +1,23 @@
1
- """
2
- Tool output truncator hook.
3
- Limits the size of tool outputs to prevent context bloat.
4
- """
1
+ import os
2
+ import sys
3
+ import json
5
4
 
6
- from typing import Any, Dict, Optional
5
+ MAX_CHARS = 30000
7
6
 
8
- async def output_truncator_hook(tool_name: str, arguments: Dict[str, Any], output: str) -> Optional[str]:
9
- """
10
- Truncates tool output if it exceeds a certain length.
11
- """
12
- MAX_LENGTH = 30000 # 30k characters limit
13
-
14
- if len(output) > MAX_LENGTH:
15
- truncated = output[:MAX_LENGTH]
16
- summary = f"\n\n... (Result truncated from {len(output)} chars to {MAX_LENGTH} chars) ..."
17
- return truncated + summary
18
-
19
- return None
7
+ def main():
8
+ try:
9
+ data = json.load(sys.stdin)
10
+ tool_response = data.get("tool_response", "")
11
+ except Exception:
12
+ return
13
+
14
+ if len(tool_response) > MAX_CHARS:
15
+ header = f"[TRUNCATED - {len(tool_response)} chars reduced to {MAX_CHARS}]\n"
16
+ footer = "\n...[TRUNCATED]"
17
+ truncated = tool_response[:MAX_CHARS]
18
+ print(header + truncated + footer)
19
+ else:
20
+ print(tool_response)
21
+
22
+ if __name__ == "__main__":
23
+ main()
@@ -18,32 +18,53 @@ MULTIMODAL_SYSTEM_PROMPT = """You interpret media files that cannot be read as p
18
18
 
19
19
  Your job: examine the attached file and extract ONLY what was requested.
20
20
 
21
+ ## TOKEN OPTIMIZATION (CRITICAL)
22
+
23
+ You exist to REDUCE context token consumption. Instead of passing 50k tokens of raw
24
+ image/PDF data to the main agent, you summarize into 500-2000 tokens of actionable
25
+ information. This is a 95%+ reduction in context usage.
26
+
21
27
  When to use you:
22
28
  - Media files the Read tool cannot interpret
23
29
  - Extracting specific information or summaries from documents
24
30
  - Describing visual content in images or diagrams
25
31
  - When analyzed/extracted data is needed, not raw file contents
32
+ - UI screenshots for analysis (NOT for exact CSS recreation)
33
+ - PDF documents requiring data extraction
26
34
 
27
35
  When NOT to use you:
28
36
  - Source code or plain text files needing exact contents (use Read)
29
37
  - Files that need editing afterward (need literal content from Read)
30
38
  - Simple file reading where no interpretation is needed
31
39
 
32
- How you work:
40
+ ## How you work
41
+
33
42
  1. Receive a file path and a goal describing what to extract
34
- 2. Read and analyze the file deeply
35
- 3. Return ONLY the relevant extracted information
43
+ 2. Use invoke_gemini with the image/PDF for vision analysis:
44
+ ```
45
+ invoke_gemini(
46
+ prompt="Analyze this image: [goal]",
47
+ model="gemini-3-flash",
48
+ image_path="/path/to/file.png", # Vision API
49
+ agent_context={"agent_type": "multimodal"}
50
+ )
51
+ ```
52
+ 3. Return ONLY the relevant extracted information (compressed summary)
36
53
  4. The main agent never processes the raw file - you save context tokens
37
54
 
55
+ ## Output Guidelines
56
+
38
57
  For PDFs: extract text, structure, tables, data from specific sections
39
58
  For images: describe layouts, UI elements, text, diagrams, charts
40
59
  For diagrams: explain relationships, flows, architecture depicted
60
+ For screenshots: describe visible UI, key elements, layout structure
41
61
 
42
62
  Response rules:
43
63
  - Return extracted information directly, no preamble
44
64
  - If info not found, state clearly what's missing
45
65
  - Match the language of the request
46
66
  - Be thorough on the goal, concise on everything else
67
+ - Keep response under 2000 tokens when possible
47
68
 
48
69
  Your output goes straight to the main agent for continued work."""
49
70
 
mcp_bridge/server.py CHANGED
@@ -428,6 +428,14 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
428
428
 
429
429
  result_content = await lsp_servers()
430
430
 
431
+ elif name == "lsp_diagnostics":
432
+ from .tools.code_search import lsp_diagnostics
433
+
434
+ result_content = await lsp_diagnostics(
435
+ file_path=arguments["file_path"],
436
+ severity=arguments.get("severity", "all"),
437
+ )
438
+
431
439
  else:
432
440
  result_content = f"Unknown tool: {name}"
433
441
 
@@ -441,7 +449,10 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
441
449
  processed_text = await hook_manager.execute_post_tool_call(
442
450
  name, arguments, result_content[0].text
443
451
  )
444
- result_content[0].text = processed_text
452
+ # Only update if processed_text is non-empty to avoid empty text blocks
453
+ # (API error: cache_control cannot be set for empty text blocks)
454
+ if processed_text:
455
+ result_content[0].text = processed_text
445
456
  elif isinstance(result_content, str):
446
457
  result_content = await hook_manager.execute_post_tool_call(
447
458
  name, arguments, result_content
@@ -357,6 +357,11 @@ def get_tool_definitions() -> List[Tool]:
357
357
  "description": "Maximum execution time in seconds",
358
358
  "default": 300,
359
359
  },
360
+ "blocking": {
361
+ "type": "boolean",
362
+ "description": "If true, wait for agent completion and return result directly. Recommended for delphi consultations.",
363
+ "default": False,
364
+ },
360
365
  },
361
366
  "required": ["prompt"],
362
367
  },
@@ -54,6 +54,18 @@ AGENT_COST_TIERS = {
54
54
  "_default": "EXPENSIVE", # Claude Sonnet 4.5 via CLI
55
55
  }
56
56
 
57
+ # Display model names for output formatting (user-visible)
58
+ AGENT_DISPLAY_MODELS = {
59
+ "explore": "gemini-3-flash",
60
+ "dewey": "gemini-3-flash",
61
+ "document_writer": "gemini-3-flash",
62
+ "multimodal": "gemini-3-flash",
63
+ "frontend": "gemini-3-pro-high",
64
+ "delphi": "gpt-5.2",
65
+ "planner": "opus-4.5",
66
+ "_default": "sonnet-4.5",
67
+ }
68
+
57
69
 
58
70
  @dataclass
59
71
  class AgentTask:
@@ -631,6 +643,7 @@ async def agent_spawn(
631
643
  model: str = "gemini-3-flash",
632
644
  thinking_budget: int = 0,
633
645
  timeout: int = 300,
646
+ blocking: bool = False,
634
647
  ) -> str:
635
648
  """
636
649
  Spawn a background agent.
@@ -642,9 +655,10 @@ async def agent_spawn(
642
655
  model: Model to use (gemini-3-flash, gemini-2.0-flash, claude)
643
656
  thinking_budget: Reserved reasoning tokens
644
657
  timeout: Execution timeout in seconds
658
+ blocking: If True, wait for completion and return result directly (use for delphi)
645
659
 
646
660
  Returns:
647
- Task ID and instructions
661
+ Task ID and instructions, or full result if blocking=True
648
662
  """
649
663
  manager = get_manager()
650
664
 
@@ -788,16 +802,18 @@ CONSTRAINTS:
788
802
  timeout=timeout,
789
803
  )
790
804
 
791
- return f"""🚀 Background agent spawned successfully.
805
+ # Get display model for concise output
806
+ display_model = AGENT_DISPLAY_MODELS.get(agent_type, AGENT_DISPLAY_MODELS["_default"])
807
+ short_desc = (description or prompt[:50]).strip()
792
808
 
793
- **Task ID**: {task_id}
794
- **Agent Type**: {agent_type}
795
- **Description**: {description or prompt[:50]}
809
+ # If blocking mode (recommended for delphi), wait for completion
810
+ if blocking:
811
+ result = manager.get_output(task_id, block=True, timeout=timeout)
812
+ return f"{agent_type}:{display_model}('{short_desc}') [BLOCKING]\n\n{result}"
796
813
 
797
- The agent is now running. Use:
798
- - `agent_progress(task_id="{task_id}")` to monitor real-time progress
799
- - `agent_output(task_id="{task_id}")` to get final result
800
- - `agent_cancel(task_id="{task_id}")` to stop the agent"""
814
+ # Concise format: AgentType:model('description')
815
+ return f"""{agent_type}:{display_model}('{short_desc}')
816
+ task_id={task_id}"""
801
817
 
802
818
 
803
819
  async def agent_output(task_id: str, block: bool = False) -> str:
@@ -887,7 +903,7 @@ async def agent_list() -> str:
887
903
  if not tasks:
888
904
  return "No background agent tasks found."
889
905
 
890
- lines = ["**Background Agent Tasks**", ""]
906
+ lines = []
891
907
 
892
908
  for t in sorted(tasks, key=lambda x: x.get("created_at", ""), reverse=True):
893
909
  status_emoji = {
@@ -898,8 +914,11 @@ async def agent_list() -> str:
898
914
  "cancelled": "⚠️",
899
915
  }.get(t["status"], "❓")
900
916
 
917
+ agent_type = t.get("agent_type", "unknown")
918
+ display_model = AGENT_DISPLAY_MODELS.get(agent_type, AGENT_DISPLAY_MODELS["_default"])
901
919
  desc = t.get("description", t.get("prompt", "")[:40])
902
- lines.append(f"- {status_emoji} [{t['id']}] {t['agent_type']}: {desc}")
920
+ # Concise format: status agent:model('desc') id=xxx
921
+ lines.append(f"{status_emoji} {agent_type}:{display_model}('{desc}') id={t['id']}")
903
922
 
904
923
  return "\n".join(lines)
905
924
 
@@ -15,17 +15,21 @@ from pathlib import Path
15
15
  async def lsp_diagnostics(file_path: str, severity: str = "all") -> str:
16
16
  """
17
17
  Get diagnostics (errors, warnings) for a file using language server.
18
-
18
+
19
19
  For TypeScript/JavaScript, uses `tsc` or `biome`.
20
20
  For Python, uses `pyright` or `ruff`.
21
-
21
+
22
22
  Args:
23
23
  file_path: Path to the file to analyze
24
24
  severity: Filter by severity (error, warning, information, hint, all)
25
-
25
+
26
26
  Returns:
27
27
  Formatted diagnostics output.
28
28
  """
29
+ # USER-VISIBLE NOTIFICATION
30
+ import sys
31
+ print(f"🩺 LSP-DIAG: file={file_path} severity={severity}", file=sys.stderr)
32
+
29
33
  path = Path(file_path)
30
34
  if not path.exists():
31
35
  return f"Error: File not found: {file_path}"
@@ -49,7 +53,7 @@ async def lsp_diagnostics(file_path: str, severity: str = "all") -> str:
49
53
  elif suffix == ".py":
50
54
  # Use ruff for Python diagnostics
51
55
  result = subprocess.run(
52
- ["ruff", "check", str(path), "--output-format=text"],
56
+ ["ruff", "check", str(path), "--output-format=concise"],
53
57
  capture_output=True,
54
58
  text=True,
55
59
  timeout=30,
@@ -70,21 +74,84 @@ async def lsp_diagnostics(file_path: str, severity: str = "all") -> str:
70
74
  return f"Error: {str(e)}"
71
75
 
72
76
 
77
+ async def check_ai_comment_patterns(file_path: str) -> str:
78
+ """
79
+ Detect AI-generated or placeholder comment patterns that indicate incomplete work.
80
+
81
+ Patterns detected:
82
+ - # TODO: implement, # FIXME, # placeholder
83
+ - // TODO, // FIXME, // placeholder
84
+ - AI-style verbose comments: "This function handles...", "This method is responsible for..."
85
+ - Placeholder phrases: "implement this", "add logic here", "your code here"
86
+
87
+ Args:
88
+ file_path: Path to the file to check
89
+
90
+ Returns:
91
+ List of detected AI-style patterns with line numbers, or "No AI patterns detected"
92
+ """
93
+ path = Path(file_path)
94
+ if not path.exists():
95
+ return f"Error: File not found: {file_path}"
96
+
97
+ # Patterns that indicate AI-generated or placeholder code
98
+ ai_patterns = [
99
+ # Placeholder comments
100
+ r"#\s*(TODO|FIXME|XXX|HACK):\s*(implement|add|placeholder|your code)",
101
+ r"//\s*(TODO|FIXME|XXX|HACK):\s*(implement|add|placeholder|your code)",
102
+ # AI-style verbose descriptions
103
+ r"#\s*This (function|method|class) (handles|is responsible for|manages|processes)",
104
+ r"//\s*This (function|method|class) (handles|is responsible for|manages|processes)",
105
+ r'"""This (function|method|class) (handles|is responsible for|manages|processes)',
106
+ # Placeholder implementations
107
+ r"pass\s*#\s*(TODO|implement|placeholder)",
108
+ r"raise NotImplementedError.*implement",
109
+ # Common AI filler phrases
110
+ r"#.*\b(as needed|as required|as appropriate|if necessary)\b",
111
+ r"//.*\b(as needed|as required|as appropriate|if necessary)\b",
112
+ ]
113
+
114
+ import re
115
+
116
+ try:
117
+ content = path.read_text()
118
+ lines = content.split("\n")
119
+ findings = []
120
+
121
+ for i, line in enumerate(lines, 1):
122
+ for pattern in ai_patterns:
123
+ if re.search(pattern, line, re.IGNORECASE):
124
+ findings.append(f" Line {i}: {line.strip()[:80]}")
125
+ break
126
+
127
+ if findings:
128
+ return f"AI/Placeholder patterns detected in {file_path}:\n" + "\n".join(findings)
129
+ return "No AI patterns detected"
130
+
131
+ except Exception as e:
132
+ return f"Error reading file: {str(e)}"
133
+
134
+
73
135
  async def ast_grep_search(pattern: str, directory: str = ".", language: str = "") -> str:
74
136
  """
75
137
  Search codebase using ast-grep for structural patterns.
76
-
138
+
77
139
  ast-grep uses AST-aware pattern matching, finding code by structure
78
140
  rather than just text. More precise than regex for code search.
79
-
141
+
80
142
  Args:
81
143
  pattern: ast-grep pattern to search for
82
144
  directory: Directory to search in
83
145
  language: Filter by language (typescript, python, rust, etc.)
84
-
146
+
85
147
  Returns:
86
148
  Matched code locations and snippets.
87
149
  """
150
+ # USER-VISIBLE NOTIFICATION
151
+ import sys
152
+ lang_info = f" lang={language}" if language else ""
153
+ print(f"🔍 AST-GREP: pattern='{pattern[:50]}...'{lang_info}", file=sys.stderr)
154
+
88
155
  try:
89
156
  cmd = ["sg", "run", "-p", pattern, directory]
90
157
  if language:
@@ -129,15 +196,20 @@ async def ast_grep_search(pattern: str, directory: str = ".", language: str = ""
129
196
  async def grep_search(pattern: str, directory: str = ".", file_pattern: str = "") -> str:
130
197
  """
131
198
  Fast text search using ripgrep.
132
-
199
+
133
200
  Args:
134
201
  pattern: Search pattern (supports regex)
135
202
  directory: Directory to search in
136
203
  file_pattern: Glob pattern to filter files (e.g., "*.py", "*.ts")
137
-
204
+
138
205
  Returns:
139
206
  Matched lines with file paths and line numbers.
140
207
  """
208
+ # USER-VISIBLE NOTIFICATION
209
+ import sys
210
+ glob_info = f" glob={file_pattern}" if file_pattern else ""
211
+ print(f"🔎 GREP: pattern='{pattern[:50]}'{glob_info} dir={directory}", file=sys.stderr)
212
+
141
213
  try:
142
214
  cmd = ["rg", "--line-number", "--max-count=50", pattern, directory]
143
215
  if file_pattern:
@@ -48,15 +48,19 @@ def _position_to_offset(content: str, line: int, character: int) -> int:
48
48
  async def lsp_hover(file_path: str, line: int, character: int) -> str:
49
49
  """
50
50
  Get type info, documentation, and signature at a position.
51
-
51
+
52
52
  Args:
53
53
  file_path: Absolute path to the file
54
54
  line: Line number (1-indexed)
55
55
  character: Character position (0-indexed)
56
-
56
+
57
57
  Returns:
58
58
  Type information and documentation at the position.
59
59
  """
60
+ # USER-VISIBLE NOTIFICATION
61
+ import sys
62
+ print(f"📍 LSP-HOVER: {file_path}:{line}:{character}", file=sys.stderr)
63
+
60
64
  path = Path(file_path)
61
65
  if not path.exists():
62
66
  return f"Error: File not found: {file_path}"