stravinsky 0.2.40__py3-none-any.whl → 0.2.67__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of stravinsky might be problematic. Click here for more details.

Files changed (52) hide show
  1. mcp_bridge/__init__.py +1 -1
  2. mcp_bridge/auth/token_refresh.py +130 -0
  3. mcp_bridge/cli/__init__.py +6 -0
  4. mcp_bridge/cli/install_hooks.py +1265 -0
  5. mcp_bridge/cli/session_report.py +585 -0
  6. mcp_bridge/hooks/HOOKS_SETTINGS.json +175 -0
  7. mcp_bridge/hooks/README.md +215 -0
  8. mcp_bridge/hooks/__init__.py +117 -46
  9. mcp_bridge/hooks/edit_recovery.py +42 -37
  10. mcp_bridge/hooks/git_noninteractive.py +89 -0
  11. mcp_bridge/hooks/keyword_detector.py +30 -0
  12. mcp_bridge/hooks/manager.py +50 -0
  13. mcp_bridge/hooks/notification_hook.py +103 -0
  14. mcp_bridge/hooks/parallel_enforcer.py +127 -0
  15. mcp_bridge/hooks/parallel_execution.py +111 -0
  16. mcp_bridge/hooks/pre_compact.py +123 -0
  17. mcp_bridge/hooks/preemptive_compaction.py +81 -7
  18. mcp_bridge/hooks/rules_injector.py +507 -0
  19. mcp_bridge/hooks/session_idle.py +116 -0
  20. mcp_bridge/hooks/session_notifier.py +125 -0
  21. mcp_bridge/{native_hooks → hooks}/stravinsky_mode.py +51 -16
  22. mcp_bridge/hooks/subagent_stop.py +98 -0
  23. mcp_bridge/hooks/task_validator.py +73 -0
  24. mcp_bridge/hooks/tmux_manager.py +141 -0
  25. mcp_bridge/hooks/todo_continuation.py +90 -0
  26. mcp_bridge/hooks/todo_delegation.py +88 -0
  27. mcp_bridge/hooks/tool_messaging.py +164 -0
  28. mcp_bridge/hooks/truncator.py +21 -17
  29. mcp_bridge/prompts/__init__.py +3 -1
  30. mcp_bridge/prompts/dewey.py +30 -20
  31. mcp_bridge/prompts/explore.py +46 -8
  32. mcp_bridge/prompts/multimodal.py +24 -3
  33. mcp_bridge/prompts/planner.py +222 -0
  34. mcp_bridge/prompts/stravinsky.py +107 -28
  35. mcp_bridge/server.py +76 -10
  36. mcp_bridge/server_tools.py +164 -32
  37. mcp_bridge/tools/agent_manager.py +203 -96
  38. mcp_bridge/tools/background_tasks.py +2 -1
  39. mcp_bridge/tools/code_search.py +81 -9
  40. mcp_bridge/tools/lsp/tools.py +6 -2
  41. mcp_bridge/tools/model_invoke.py +270 -47
  42. mcp_bridge/tools/templates.py +32 -18
  43. stravinsky-0.2.67.dist-info/METADATA +284 -0
  44. stravinsky-0.2.67.dist-info/RECORD +76 -0
  45. stravinsky-0.2.67.dist-info/entry_points.txt +5 -0
  46. mcp_bridge/native_hooks/edit_recovery.py +0 -46
  47. mcp_bridge/native_hooks/truncator.py +0 -23
  48. stravinsky-0.2.40.dist-info/METADATA +0 -204
  49. stravinsky-0.2.40.dist-info/RECORD +0 -57
  50. stravinsky-0.2.40.dist-info/entry_points.txt +0 -3
  51. /mcp_bridge/{native_hooks → hooks}/context.py +0 -0
  52. {stravinsky-0.2.40.dist-info → stravinsky-0.2.67.dist-info}/WHEEL +0 -0
@@ -11,6 +11,7 @@ import os
11
11
  import shutil
12
12
  import subprocess
13
13
  import signal
14
+ import time
14
15
  import uuid
15
16
  from dataclasses import asdict, dataclass, field
16
17
  from datetime import datetime
@@ -21,6 +22,50 @@ import logging
21
22
 
22
23
  logger = logging.getLogger(__name__)
23
24
 
25
+ # Model routing configuration
26
+ # Specialized agents call external models via MCP tools:
27
+ # explore/dewey/document_writer/multimodal → invoke_gemini(gemini-3-flash)
28
+ # frontend → invoke_gemini(gemini-3-pro-high)
29
+ # delphi → invoke_openai(gpt-5.2)
30
+ # Non-specialized coding tasks use Claude CLI with --model sonnet
31
+ AGENT_MODEL_ROUTING = {
32
+ # Specialized agents - no CLI model flag, they call invoke_* tools
33
+ "explore": None,
34
+ "dewey": None,
35
+ "document_writer": None,
36
+ "multimodal": None,
37
+ "frontend": None,
38
+ "delphi": None,
39
+ # Planner uses Opus for superior reasoning about dependencies and parallelization
40
+ "planner": "opus",
41
+ # Default for unknown agent types (coding tasks) - use Sonnet 4.5
42
+ "_default": "sonnet",
43
+ }
44
+
45
+ # Cost tier classification (from oh-my-opencode pattern)
46
+ AGENT_COST_TIERS = {
47
+ "explore": "CHEAP", # Uses gemini-3-flash
48
+ "dewey": "CHEAP", # Uses gemini-3-flash
49
+ "document_writer": "CHEAP", # Uses gemini-3-flash
50
+ "multimodal": "CHEAP", # Uses gemini-3-flash
51
+ "frontend": "MEDIUM", # Uses gemini-3-pro-high
52
+ "delphi": "EXPENSIVE", # Uses gpt-5.2 (OpenAI GPT)
53
+ "planner": "EXPENSIVE", # Uses Claude Opus 4.5
54
+ "_default": "EXPENSIVE", # Claude Sonnet 4.5 via CLI
55
+ }
56
+
57
+ # Display model names for output formatting (user-visible)
58
+ AGENT_DISPLAY_MODELS = {
59
+ "explore": "gemini-3-flash",
60
+ "dewey": "gemini-3-flash",
61
+ "document_writer": "gemini-3-flash",
62
+ "multimodal": "gemini-3-flash",
63
+ "frontend": "gemini-3-pro-high",
64
+ "delphi": "gpt-5.2",
65
+ "planner": "opus-4.5",
66
+ "_default": "sonnet-4.5",
67
+ }
68
+
24
69
 
25
70
  @dataclass
26
71
  class AgentTask:
@@ -86,7 +131,7 @@ class AgentManager:
86
131
 
87
132
  # In-memory tracking for running processes
88
133
  self._processes: Dict[str, subprocess.Popen] = {}
89
- self._notification_queue: Dict[str, List[AgentTask]] = {}
134
+ self._notification_queue: Dict[str, List[Dict[str, Any]]] = {}
90
135
 
91
136
  def _load_tasks(self) -> Dict[str, Any]:
92
137
  """Load tasks from persistent storage."""
@@ -155,7 +200,9 @@ class AgentManager:
155
200
  Returns:
156
201
  Task ID for tracking
157
202
  """
158
- task_id = f"agent_{uuid.uuid4().hex[:8]}"
203
+ import uuid as uuid_module # Local import for MCP context
204
+
205
+ task_id = f"agent_{uuid_module.uuid4().hex[:8]}"
159
206
 
160
207
  task = AgentTask(
161
208
  id=task_id,
@@ -220,12 +267,20 @@ class AgentManager:
220
267
  full_prompt,
221
268
  "--output-format",
222
269
  "text",
270
+ "--dangerously-skip-permissions", # Critical: bypass permission prompts
223
271
  ]
224
272
 
225
- # NOTE: We intentionally do NOT pass --model to Claude CLI
226
- # The agent_configs have Stravinsky MCP model names (gemini-3-pro-low, gpt-5.2)
227
- # which Claude CLI doesn't recognize. Agents use Claude's default model
228
- # and can invoke Stravinsky MCP tools (invoke_gemini, invoke_openai) if needed.
273
+ # Model routing:
274
+ # - Specialized agents (explore/dewey/etc): None = use CLI default, they call invoke_*
275
+ # - Unknown agent types (coding tasks): Use Sonnet 4.5
276
+ if agent_type in AGENT_MODEL_ROUTING:
277
+ cli_model = AGENT_MODEL_ROUTING[agent_type] # None for specialized
278
+ else:
279
+ cli_model = AGENT_MODEL_ROUTING.get("_default", "sonnet")
280
+
281
+ if cli_model:
282
+ cmd.extend(["--model", cli_model])
283
+ logger.info(f"[AgentManager] Using --model {cli_model} for {agent_type} agent")
229
284
 
230
285
  # Add system prompt file if we have one
231
286
  if system_prompt:
@@ -240,6 +295,7 @@ class AgentManager:
240
295
  # (Previously used file handle which was closed before process finished)
241
296
  process = subprocess.Popen(
242
297
  cmd,
298
+ stdin=subprocess.DEVNULL, # Critical: prevent stdin blocking
243
299
  stdout=subprocess.PIPE,
244
300
  stderr=subprocess.PIPE,
245
301
  text=True,
@@ -416,9 +472,13 @@ class AgentManager:
416
472
  start = datetime.now()
417
473
  while (datetime.now() - start).total_seconds() < timeout:
418
474
  task = self.get_task(task_id)
419
- if task["status"] != "running":
475
+ if not task or task["status"] != "running":
420
476
  break
421
- asyncio.sleep(0.5)
477
+ time.sleep(0.5)
478
+
479
+ # Refresh task state after potential blocking wait
480
+ if not task:
481
+ return f"Task {task_id} not found."
422
482
 
423
483
  status = task["status"]
424
484
  description = task.get("description", "")
@@ -583,6 +643,7 @@ async def agent_spawn(
583
643
  model: str = "gemini-3-flash",
584
644
  thinking_budget: int = 0,
585
645
  timeout: int = 300,
646
+ blocking: bool = False,
586
647
  ) -> str:
587
648
  """
588
649
  Spawn a background agent.
@@ -594,95 +655,136 @@ async def agent_spawn(
594
655
  model: Model to use (gemini-3-flash, gemini-2.0-flash, claude)
595
656
  thinking_budget: Reserved reasoning tokens
596
657
  timeout: Execution timeout in seconds
658
+ blocking: If True, wait for completion and return result directly (use for delphi)
597
659
 
598
660
  Returns:
599
- Task ID and instructions
661
+ Task ID and instructions, or full result if blocking=True
600
662
  """
601
663
  manager = get_manager()
602
664
 
603
665
  # Map agent types to system prompts
666
+ # ALL agents use invoke_gemini or invoke_openai - NOT Claude directly
667
+ # explore/dewey/document_writer/multimodal/frontend → gemini-3-flash
668
+ # delphi → openai gpt-5.2
604
669
  system_prompts = {
605
- "explore": "You are a codebase exploration specialist. Find files, patterns, and answer 'where is X?' questions efficiently.",
606
- "dewey": "You are a documentation and research specialist. Find implementation examples, official docs, and provide evidence-based answers.",
607
- "frontend": """You are a Senior Frontend Architect & Avant-Garde UI Designer with 15+ years experience.
608
-
609
- OPERATIONAL DIRECTIVES:
610
- - Follow instructions. Execute immediately. No fluff.
611
- - Output First: Prioritize code and visual solutions.
612
-
613
- DESIGN PHILOSOPHY - "INTENTIONAL MINIMALISM":
614
- - Anti-Generic: Reject standard "bootstrapped" layouts. If it looks like a template, it's wrong.
615
- - Bespoke layouts, asymmetry, distinctive typography.
616
- - Before placing any element, calculate its purpose. No purpose = delete it.
617
-
618
- FRONTEND CODING STANDARDS:
619
- - Library Discipline: If a UI library (Shadcn, Radix, MUI) is detected, YOU MUST USE IT.
620
- - Do NOT build custom components if the library provides them.
621
- - Stack: Modern (React/Vue/Svelte), Tailwind/Custom CSS, semantic HTML5.
622
- - Focus on micro-interactions, perfect spacing, "invisible" UX.
623
-
624
- RESPONSE FORMAT:
625
- 1. Rationale: (1 sentence on why elements were placed there)
626
- 2. The Code.
627
-
628
- ULTRATHINK MODE (when user says "ULTRATHINK" or "think harder"):
629
- 1. Deep Reasoning Chain: Detailed breakdown of architectural and design decisions
630
- 2. Edge Case Analysis: What could go wrong and how we prevented it
631
- 3. The Code: Optimized, bespoke, production-ready, utilizing existing libraries""",
632
- "delphi": "You are a strategic advisor. Provide architecture guidance, debugging assistance, and code review.",
633
- "document_writer": """You are a Technical Documentation Specialist. Your expertise is creating clear, comprehensive documentation.
634
-
635
- DOCUMENT TYPES YOU EXCEL AT:
636
- - README files with proper structure
637
- - API documentation with examples
638
- - Architecture decision records (ADRs)
639
- - User guides and tutorials
640
- - Inline code documentation
641
-
642
- DOCUMENTATION PRINCIPLES:
643
- - Audience-first: Know who's reading and what they need
644
- - Progressive disclosure: Overview Details → Edge cases
645
- - Examples over explanations: Show, don't just tell
646
- - Keep it DRY: Reference rather than repeat
647
- - Version awareness: Note when behavior differs across versions
648
-
649
- RESPONSE FORMAT:
650
- 1. Document type and target audience identified
651
- 2. The documentation, properly formatted in markdown""",
652
- "multimodal": """You interpret media files that cannot be read as plain text.
653
-
654
- Your job: examine the attached file and extract ONLY what was requested.
655
-
656
- CAPABILITIES:
657
- - PDFs: extract text, structure, tables, data from specific sections
658
- - Images: describe layouts, UI elements, text, diagrams, charts
659
- - Diagrams: explain relationships, flows, architecture depicted
660
- - Screenshots: analyze UI/UX, identify components, extract text
661
-
662
- HOW YOU WORK:
663
- 1. Receive a file path and a goal describing what to extract
664
- 2. Read and analyze the file deeply using Gemini's vision capabilities
665
- 3. Return ONLY the relevant extracted information
666
- 4. The main agent never processes the raw file - you save context tokens
667
-
668
- RESPONSE RULES:
669
- - Return extracted information directly, no preamble
670
- - If info not found, state clearly what's missing
671
- - Be thorough on the goal, concise on everything else""",
670
+ "explore": """You are a codebase exploration specialist. Find files, patterns, and answer 'where is X?' questions.
671
+
672
+ MODEL ROUTING (MANDATORY):
673
+ You MUST use invoke_gemini with model="gemini-3-flash" for ALL analysis and reasoning.
674
+ Use Claude's native tools (Read, Grep, Glob) ONLY for file access, then pass content to invoke_gemini.
675
+
676
+ WORKFLOW:
677
+ 1. Use Read/Grep/Glob to get file contents
678
+ 2. Call invoke_gemini(prompt="Analyze this: <content>", model="gemini-3-flash", agent_context={"agent_type": "explore"}) for analysis
679
+ 3. Return the Gemini response""",
680
+ "dewey": """You are a documentation and research specialist. Find implementation examples and official docs.
681
+
682
+ MODEL ROUTING (MANDATORY):
683
+ You MUST use invoke_gemini with model="gemini-3-flash" for ALL analysis, summarization, and reasoning.
684
+
685
+ WORKFLOW:
686
+ 1. Gather information using available tools
687
+ 2. Call invoke_gemini(prompt="<task>", model="gemini-3-flash", agent_context={"agent_type": "dewey"}) for processing
688
+ 3. Return the Gemini response""",
689
+ "frontend": """You are a Senior Frontend Architect & UI Designer.
690
+
691
+ MODEL ROUTING (MANDATORY):
692
+ You MUST use invoke_gemini with model="gemini-3-pro-high" for ALL code generation and design work.
693
+
694
+ DESIGN PHILOSOPHY:
695
+ - Anti-Generic: Reject standard layouts. Bespoke, asymmetric, distinctive.
696
+ - Library Discipline: Use existing UI libraries (Shadcn, Radix, MUI) if detected.
697
+ - Stack: React/Vue/Svelte, Tailwind/Custom CSS, semantic HTML5.
698
+
699
+ WORKFLOW:
700
+ 1. Analyze requirements
701
+ 2. Call invoke_gemini(prompt="Generate frontend code for: <task>", model="gemini-3-pro-high", agent_context={"agent_type": "frontend"})
702
+ 3. Return the code""",
703
+ "delphi": """You are a strategic technical advisor for architecture and hard debugging.
704
+
705
+ MODEL ROUTING (MANDATORY):
706
+ You MUST use invoke_openai with model="gpt-5.2" for ALL strategic advice and analysis.
707
+
708
+ WORKFLOW:
709
+ 1. Gather context about the problem
710
+ 2. Call invoke_openai(prompt="<problem description>", model="gpt-5.2", agent_context={"agent_type": "delphi"})
711
+ 3. Return the GPT response""",
712
+ "document_writer": """You are a Technical Documentation Specialist.
713
+
714
+ MODEL ROUTING (MANDATORY):
715
+ You MUST use invoke_gemini with model="gemini-3-flash" for ALL documentation generation.
716
+
717
+ DOCUMENT TYPES: README, API docs, ADRs, user guides, inline docs.
718
+
719
+ WORKFLOW:
720
+ 1. Gather context about what to document
721
+ 2. Call invoke_gemini(prompt="Write documentation for: <topic>", model="gemini-3-flash", agent_context={"agent_type": "document_writer"})
722
+ 3. Return the documentation""",
723
+ "multimodal": """You interpret media files (PDFs, images, diagrams, screenshots).
724
+
725
+ MODEL ROUTING (MANDATORY):
726
+ You MUST use invoke_gemini with model="gemini-3-flash" for ALL visual analysis.
727
+
728
+ WORKFLOW:
729
+ 1. Receive file path and extraction goal
730
+ 2. Call invoke_gemini(prompt="Analyze this file: <path>. Extract: <goal>", model="gemini-3-flash", agent_context={"agent_type": "multimodal"})
731
+ 3. Return extracted information only""",
732
+ "planner": """You are a pre-implementation planning specialist. You analyze requests and produce structured implementation plans BEFORE any code changes begin.
733
+
734
+ PURPOSE:
735
+ - Analyze requests and produce actionable implementation plans
736
+ - Identify dependencies and parallelization opportunities
737
+ - Enable efficient parallel execution by the orchestrator
738
+ - Prevent wasted effort through upfront planning
739
+
740
+ METHODOLOGY:
741
+ 1. EXPLORE FIRST: Spawn explore agents IN PARALLEL to understand the codebase
742
+ 2. DECOMPOSE: Break request into atomic, single-purpose tasks
743
+ 3. ANALYZE DEPENDENCIES: What blocks what? What can run in parallel?
744
+ 4. ASSIGN AGENTS: Map each task to the right specialist (explore/dewey/frontend/delphi)
745
+ 5. OUTPUT STRUCTURED PLAN: Use the required format below
746
+
747
+ REQUIRED OUTPUT FORMAT:
748
+ ```
749
+ ## PLAN: [Brief title]
750
+
751
+ ### ANALYSIS
752
+ - **Request**: [One sentence summary]
753
+ - **Scope**: [What's in/out of scope]
754
+ - **Risk Level**: [Low/Medium/High]
755
+
756
+ ### EXECUTION PHASES
757
+
758
+ #### Phase 1: [Name] (PARALLEL)
759
+ | Task | Agent | Files | Est |
760
+ |------|-------|-------|-----|
761
+ | [description] | explore | file.py | S/M/L |
762
+
763
+ #### Phase 2: [Name] (SEQUENTIAL after Phase 1)
764
+ | Task | Agent | Files | Est |
765
+ |------|-------|-------|-----|
766
+
767
+ ### AGENT SPAWN COMMANDS
768
+ ```python
769
+ # Phase 1 - Fire all in parallel
770
+ agent_spawn(prompt="...", agent_type="explore", description="...")
771
+ ```
772
+ ```
773
+
774
+ CONSTRAINTS:
775
+ - You ONLY plan. You NEVER execute code changes.
776
+ - Every task must have a clear agent assignment
777
+ - Parallel phases must be truly independent
778
+ - Include ready-to-use agent_spawn commands""",
672
779
  }
673
780
 
674
781
  system_prompt = system_prompts.get(agent_type, None)
675
782
 
676
- # NOTE: All agents run via Claude CLI using Claude's default model.
677
- # The agent_configs below are kept for documentation purposes only.
678
- # Agents can invoke Stravinsky MCP tools (invoke_gemini, invoke_openai)
679
- # within their prompts if they need to use other models.
680
- #
681
- # Agent model preferences (for reference - NOT passed to Claude CLI):
682
- # - stravinsky: Claude Opus 4.5 (orchestration)
683
- # - delphi: GPT-5.2 (strategic advice) - use invoke_openai
684
- # - frontend: Gemini Pro High (UI/UX) - use invoke_gemini with thinking_budget
685
- # - explore, dewey, document_writer, multimodal: Gemini Flash (fast) - use invoke_gemini
783
+ # Model routing (MANDATORY - enforced in system prompts):
784
+ # - explore, dewey, document_writer, multimodal invoke_gemini(gemini-3-flash)
785
+ # - frontend invoke_gemini(gemini-3-pro-high)
786
+ # - delphi invoke_openai(gpt-5.2)
787
+ # - Unknown agent types (coding tasks) → Claude CLI --model sonnet
686
788
 
687
789
  # Get token store for authentication
688
790
  from ..auth.token_store import TokenStore
@@ -700,16 +802,18 @@ RESPONSE RULES:
700
802
  timeout=timeout,
701
803
  )
702
804
 
703
- return f"""🚀 Background agent spawned successfully.
805
+ # Get display model for concise output
806
+ display_model = AGENT_DISPLAY_MODELS.get(agent_type, AGENT_DISPLAY_MODELS["_default"])
807
+ short_desc = (description or prompt[:50]).strip()
704
808
 
705
- **Task ID**: {task_id}
706
- **Agent Type**: {agent_type}
707
- **Description**: {description or prompt[:50]}
809
+ # If blocking mode (recommended for delphi), wait for completion
810
+ if blocking:
811
+ result = manager.get_output(task_id, block=True, timeout=timeout)
812
+ return f"{agent_type}:{display_model}('{short_desc}') [BLOCKING]\n\n{result}"
708
813
 
709
- The agent is now running. Use:
710
- - `agent_progress(task_id="{task_id}")` to monitor real-time progress
711
- - `agent_output(task_id="{task_id}")` to get final result
712
- - `agent_cancel(task_id="{task_id}")` to stop the agent"""
814
+ # Concise format: AgentType:model('description')
815
+ return f"""{agent_type}:{display_model}('{short_desc}')
816
+ task_id={task_id}"""
713
817
 
714
818
 
715
819
  async def agent_output(task_id: str, block: bool = False) -> str:
@@ -799,7 +903,7 @@ async def agent_list() -> str:
799
903
  if not tasks:
800
904
  return "No background agent tasks found."
801
905
 
802
- lines = ["**Background Agent Tasks**", ""]
906
+ lines = []
803
907
 
804
908
  for t in sorted(tasks, key=lambda x: x.get("created_at", ""), reverse=True):
805
909
  status_emoji = {
@@ -810,8 +914,11 @@ async def agent_list() -> str:
810
914
  "cancelled": "⚠️",
811
915
  }.get(t["status"], "❓")
812
916
 
917
+ agent_type = t.get("agent_type", "unknown")
918
+ display_model = AGENT_DISPLAY_MODELS.get(agent_type, AGENT_DISPLAY_MODELS["_default"])
813
919
  desc = t.get("description", t.get("prompt", "")[:40])
814
- lines.append(f"- {status_emoji} [{t['id']}] {t['agent_type']}: {desc}")
920
+ # Concise format: status agent:model('desc') id=xxx
921
+ lines.append(f"{status_emoji} {agent_type}:{display_model}('{desc}') id={t['id']}")
815
922
 
816
923
  return "\n".join(lines)
817
924
 
@@ -61,7 +61,8 @@ class BackgroundManager:
61
61
  json.dump(tasks, f, indent=2)
62
62
 
63
63
  def create_task(self, prompt: str, model: str) -> str:
64
- task_id = str(uuid.uuid4())[:8]
64
+ import uuid as uuid_module # Local import for MCP context
65
+ task_id = str(uuid_module.uuid4())[:8]
65
66
  task = BackgroundTask(
66
67
  id=task_id,
67
68
  prompt=prompt,
@@ -15,17 +15,21 @@ from pathlib import Path
15
15
  async def lsp_diagnostics(file_path: str, severity: str = "all") -> str:
16
16
  """
17
17
  Get diagnostics (errors, warnings) for a file using language server.
18
-
18
+
19
19
  For TypeScript/JavaScript, uses `tsc` or `biome`.
20
20
  For Python, uses `pyright` or `ruff`.
21
-
21
+
22
22
  Args:
23
23
  file_path: Path to the file to analyze
24
24
  severity: Filter by severity (error, warning, information, hint, all)
25
-
25
+
26
26
  Returns:
27
27
  Formatted diagnostics output.
28
28
  """
29
+ # USER-VISIBLE NOTIFICATION
30
+ import sys
31
+ print(f"🩺 LSP-DIAG: file={file_path} severity={severity}", file=sys.stderr)
32
+
29
33
  path = Path(file_path)
30
34
  if not path.exists():
31
35
  return f"Error: File not found: {file_path}"
@@ -49,7 +53,7 @@ async def lsp_diagnostics(file_path: str, severity: str = "all") -> str:
49
53
  elif suffix == ".py":
50
54
  # Use ruff for Python diagnostics
51
55
  result = subprocess.run(
52
- ["ruff", "check", str(path), "--output-format=text"],
56
+ ["ruff", "check", str(path), "--output-format=concise"],
53
57
  capture_output=True,
54
58
  text=True,
55
59
  timeout=30,
@@ -70,21 +74,84 @@ async def lsp_diagnostics(file_path: str, severity: str = "all") -> str:
70
74
  return f"Error: {str(e)}"
71
75
 
72
76
 
77
+ async def check_ai_comment_patterns(file_path: str) -> str:
78
+ """
79
+ Detect AI-generated or placeholder comment patterns that indicate incomplete work.
80
+
81
+ Patterns detected:
82
+ - # TODO: implement, # FIXME, # placeholder
83
+ - // TODO, // FIXME, // placeholder
84
+ - AI-style verbose comments: "This function handles...", "This method is responsible for..."
85
+ - Placeholder phrases: "implement this", "add logic here", "your code here"
86
+
87
+ Args:
88
+ file_path: Path to the file to check
89
+
90
+ Returns:
91
+ List of detected AI-style patterns with line numbers, or "No AI patterns detected"
92
+ """
93
+ path = Path(file_path)
94
+ if not path.exists():
95
+ return f"Error: File not found: {file_path}"
96
+
97
+ # Patterns that indicate AI-generated or placeholder code
98
+ ai_patterns = [
99
+ # Placeholder comments
100
+ r"#\s*(TODO|FIXME|XXX|HACK):\s*(implement|add|placeholder|your code)",
101
+ r"//\s*(TODO|FIXME|XXX|HACK):\s*(implement|add|placeholder|your code)",
102
+ # AI-style verbose descriptions
103
+ r"#\s*This (function|method|class) (handles|is responsible for|manages|processes)",
104
+ r"//\s*This (function|method|class) (handles|is responsible for|manages|processes)",
105
+ r'"""This (function|method|class) (handles|is responsible for|manages|processes)',
106
+ # Placeholder implementations
107
+ r"pass\s*#\s*(TODO|implement|placeholder)",
108
+ r"raise NotImplementedError.*implement",
109
+ # Common AI filler phrases
110
+ r"#.*\b(as needed|as required|as appropriate|if necessary)\b",
111
+ r"//.*\b(as needed|as required|as appropriate|if necessary)\b",
112
+ ]
113
+
114
+ import re
115
+
116
+ try:
117
+ content = path.read_text()
118
+ lines = content.split("\n")
119
+ findings = []
120
+
121
+ for i, line in enumerate(lines, 1):
122
+ for pattern in ai_patterns:
123
+ if re.search(pattern, line, re.IGNORECASE):
124
+ findings.append(f" Line {i}: {line.strip()[:80]}")
125
+ break
126
+
127
+ if findings:
128
+ return f"AI/Placeholder patterns detected in {file_path}:\n" + "\n".join(findings)
129
+ return "No AI patterns detected"
130
+
131
+ except Exception as e:
132
+ return f"Error reading file: {str(e)}"
133
+
134
+
73
135
  async def ast_grep_search(pattern: str, directory: str = ".", language: str = "") -> str:
74
136
  """
75
137
  Search codebase using ast-grep for structural patterns.
76
-
138
+
77
139
  ast-grep uses AST-aware pattern matching, finding code by structure
78
140
  rather than just text. More precise than regex for code search.
79
-
141
+
80
142
  Args:
81
143
  pattern: ast-grep pattern to search for
82
144
  directory: Directory to search in
83
145
  language: Filter by language (typescript, python, rust, etc.)
84
-
146
+
85
147
  Returns:
86
148
  Matched code locations and snippets.
87
149
  """
150
+ # USER-VISIBLE NOTIFICATION
151
+ import sys
152
+ lang_info = f" lang={language}" if language else ""
153
+ print(f"🔍 AST-GREP: pattern='{pattern[:50]}...'{lang_info}", file=sys.stderr)
154
+
88
155
  try:
89
156
  cmd = ["sg", "run", "-p", pattern, directory]
90
157
  if language:
@@ -129,15 +196,20 @@ async def ast_grep_search(pattern: str, directory: str = ".", language: str = ""
129
196
  async def grep_search(pattern: str, directory: str = ".", file_pattern: str = "") -> str:
130
197
  """
131
198
  Fast text search using ripgrep.
132
-
199
+
133
200
  Args:
134
201
  pattern: Search pattern (supports regex)
135
202
  directory: Directory to search in
136
203
  file_pattern: Glob pattern to filter files (e.g., "*.py", "*.ts")
137
-
204
+
138
205
  Returns:
139
206
  Matched lines with file paths and line numbers.
140
207
  """
208
+ # USER-VISIBLE NOTIFICATION
209
+ import sys
210
+ glob_info = f" glob={file_pattern}" if file_pattern else ""
211
+ print(f"🔎 GREP: pattern='{pattern[:50]}'{glob_info} dir={directory}", file=sys.stderr)
212
+
141
213
  try:
142
214
  cmd = ["rg", "--line-number", "--max-count=50", pattern, directory]
143
215
  if file_pattern:
@@ -48,15 +48,19 @@ def _position_to_offset(content: str, line: int, character: int) -> int:
48
48
  async def lsp_hover(file_path: str, line: int, character: int) -> str:
49
49
  """
50
50
  Get type info, documentation, and signature at a position.
51
-
51
+
52
52
  Args:
53
53
  file_path: Absolute path to the file
54
54
  line: Line number (1-indexed)
55
55
  character: Character position (0-indexed)
56
-
56
+
57
57
  Returns:
58
58
  Type information and documentation at the position.
59
59
  """
60
+ # USER-VISIBLE NOTIFICATION
61
+ import sys
62
+ print(f"📍 LSP-HOVER: {file_path}:{line}:{character}", file=sys.stderr)
63
+
60
64
  path = Path(file_path)
61
65
  if not path.exists():
62
66
  return f"Error: File not found: {file_path}"