mcp-vector-search 0.12.6__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. mcp_vector_search/__init__.py +2 -2
  2. mcp_vector_search/analysis/__init__.py +64 -0
  3. mcp_vector_search/analysis/collectors/__init__.py +39 -0
  4. mcp_vector_search/analysis/collectors/base.py +164 -0
  5. mcp_vector_search/analysis/collectors/complexity.py +743 -0
  6. mcp_vector_search/analysis/metrics.py +341 -0
  7. mcp_vector_search/analysis/reporters/__init__.py +5 -0
  8. mcp_vector_search/analysis/reporters/console.py +222 -0
  9. mcp_vector_search/cli/commands/analyze.py +408 -0
  10. mcp_vector_search/cli/commands/chat.py +1262 -0
  11. mcp_vector_search/cli/commands/index.py +21 -3
  12. mcp_vector_search/cli/commands/init.py +13 -0
  13. mcp_vector_search/cli/commands/install.py +597 -335
  14. mcp_vector_search/cli/commands/install_old.py +8 -4
  15. mcp_vector_search/cli/commands/mcp.py +78 -6
  16. mcp_vector_search/cli/commands/reset.py +68 -26
  17. mcp_vector_search/cli/commands/search.py +30 -7
  18. mcp_vector_search/cli/commands/setup.py +1133 -0
  19. mcp_vector_search/cli/commands/status.py +37 -2
  20. mcp_vector_search/cli/commands/uninstall.py +276 -357
  21. mcp_vector_search/cli/commands/visualize/__init__.py +39 -0
  22. mcp_vector_search/cli/commands/visualize/cli.py +276 -0
  23. mcp_vector_search/cli/commands/visualize/exporters/__init__.py +12 -0
  24. mcp_vector_search/cli/commands/visualize/exporters/html_exporter.py +33 -0
  25. mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +29 -0
  26. mcp_vector_search/cli/commands/visualize/graph_builder.py +714 -0
  27. mcp_vector_search/cli/commands/visualize/layout_engine.py +469 -0
  28. mcp_vector_search/cli/commands/visualize/server.py +311 -0
  29. mcp_vector_search/cli/commands/visualize/state_manager.py +428 -0
  30. mcp_vector_search/cli/commands/visualize/templates/__init__.py +16 -0
  31. mcp_vector_search/cli/commands/visualize/templates/base.py +180 -0
  32. mcp_vector_search/cli/commands/visualize/templates/scripts.py +2507 -0
  33. mcp_vector_search/cli/commands/visualize/templates/styles.py +1313 -0
  34. mcp_vector_search/cli/commands/visualize.py.original +2536 -0
  35. mcp_vector_search/cli/didyoumean.py +22 -2
  36. mcp_vector_search/cli/main.py +115 -159
  37. mcp_vector_search/cli/output.py +24 -8
  38. mcp_vector_search/config/__init__.py +4 -0
  39. mcp_vector_search/config/default_thresholds.yaml +52 -0
  40. mcp_vector_search/config/settings.py +12 -0
  41. mcp_vector_search/config/thresholds.py +185 -0
  42. mcp_vector_search/core/auto_indexer.py +3 -3
  43. mcp_vector_search/core/boilerplate.py +186 -0
  44. mcp_vector_search/core/config_utils.py +394 -0
  45. mcp_vector_search/core/database.py +369 -94
  46. mcp_vector_search/core/exceptions.py +11 -0
  47. mcp_vector_search/core/git_hooks.py +4 -4
  48. mcp_vector_search/core/indexer.py +221 -4
  49. mcp_vector_search/core/llm_client.py +751 -0
  50. mcp_vector_search/core/models.py +3 -0
  51. mcp_vector_search/core/project.py +17 -0
  52. mcp_vector_search/core/scheduler.py +11 -11
  53. mcp_vector_search/core/search.py +179 -29
  54. mcp_vector_search/mcp/server.py +24 -5
  55. mcp_vector_search/utils/__init__.py +2 -0
  56. mcp_vector_search/utils/gitignore_updater.py +212 -0
  57. mcp_vector_search/utils/monorepo.py +66 -4
  58. mcp_vector_search/utils/timing.py +10 -6
  59. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/METADATA +182 -52
  60. mcp_vector_search-1.0.3.dist-info/RECORD +97 -0
  61. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/WHEEL +1 -1
  62. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/entry_points.txt +1 -0
  63. mcp_vector_search/cli/commands/visualize.py +0 -1467
  64. mcp_vector_search-0.12.6.dist-info/RECORD +0 -68
  65. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1262 @@
1
+ """Chat command for LLM-powered intelligent code search."""
2
+
3
+ import asyncio
4
+ import os
5
+ from fnmatch import fnmatch
6
+ from pathlib import Path
7
+ from typing import Any
8
+
9
+ import typer
10
+ from loguru import logger
11
+ from rich.live import Live
12
+ from rich.markdown import Markdown
13
+ from rich.panel import Panel
14
+
15
+ from ...core.database import ChromaVectorDatabase
16
+ from ...core.embeddings import create_embedding_function
17
+ from ...core.exceptions import ProjectNotFoundError, SearchError
18
+ from ...core.llm_client import LLMClient
19
+ from ...core.project import ProjectManager
20
+ from ...core.search import SemanticSearchEngine
21
+ from ..didyoumean import create_enhanced_typer
22
+ from ..output import (
23
+ console,
24
+ print_error,
25
+ print_info,
26
+ print_success,
27
+ print_warning,
28
+ )
29
+
30
+
31
+ def show_api_key_help() -> None:
32
+ """Display helpful error message when API key is missing."""
33
+ message = """[bold yellow]⚠️ No LLM API Key Found[/bold yellow]
34
+
35
+ The chat feature requires an API key for an LLM provider.
36
+
37
+ [bold cyan]Set one of these environment variables:[/bold cyan]
38
+ • [green]OPENAI_API_KEY[/green] - For OpenAI (GPT-4, etc.) [dim](recommended)[/dim]
39
+ • [green]OPENROUTER_API_KEY[/green] - For OpenRouter (Claude, GPT, etc.)
40
+
41
+ [bold cyan]Example:[/bold cyan]
42
+ [yellow]export OPENAI_API_KEY="sk-..."[/yellow]
43
+ [yellow]export OPENROUTER_API_KEY="sk-or-..."[/yellow]
44
+
45
+ [bold cyan]Get API keys at:[/bold cyan]
46
+ • OpenAI: [link=https://platform.openai.com/api-keys]https://platform.openai.com/api-keys[/link]
47
+ • OpenRouter: [link=https://openrouter.ai/keys]https://openrouter.ai/keys[/link]
48
+
49
+ [dim]Alternatively, run: [cyan]mcp-vector-search setup[/cyan] for interactive setup[/dim]"""
50
+
51
+ panel = Panel(
52
+ message,
53
+ border_style="yellow",
54
+ padding=(1, 2),
55
+ )
56
+ console.print(panel)
57
+
58
+
59
+ class ChatSession:
60
+ """Manages conversation history with automatic compaction.
61
+
62
+ Keeps system prompt intact, compacts older messages when history grows large,
63
+ and maintains recent exchanges for context.
64
+ """
65
+
66
+ # Threshold for compaction (estimated tokens, ~4 chars per token)
67
+ COMPACTION_THRESHOLD = 8000 * 4 # ~32000 chars
68
+ RECENT_EXCHANGES_TO_KEEP = 3 # Keep last N user/assistant pairs
69
+
70
+ def __init__(self, system_prompt: str) -> None:
71
+ """Initialize session with system prompt.
72
+
73
+ Args:
74
+ system_prompt: Initial system message
75
+ """
76
+ self.system_prompt = system_prompt
77
+ self.messages: list[dict[str, str]] = [
78
+ {"role": "system", "content": system_prompt}
79
+ ]
80
+
81
+ def add_message(self, role: str, content: str) -> None:
82
+ """Add message to history and compact if needed.
83
+
84
+ Args:
85
+ role: Message role (user/assistant)
86
+ content: Message content
87
+ """
88
+ self.messages.append({"role": role, "content": content})
89
+
90
+ # Check if compaction needed
91
+ total_chars = sum(len(msg["content"]) for msg in self.messages)
92
+ if total_chars > self.COMPACTION_THRESHOLD:
93
+ self._compact_history()
94
+
95
+ def _compact_history(self) -> None:
96
+ """Compact conversation history by summarizing older exchanges.
97
+
98
+ Strategy:
99
+ 1. Keep system prompt intact
100
+ 2. Summarize older exchanges into brief context
101
+ 3. Keep recent N exchanges verbatim
102
+ """
103
+ logger.debug("Compacting conversation history")
104
+
105
+ # Separate system prompt and conversation
106
+ system_msg = self.messages[0]
107
+ conversation = self.messages[1:]
108
+
109
+ # Keep recent exchanges (last N user/assistant pairs)
110
+ recent_start = max(0, len(conversation) - (self.RECENT_EXCHANGES_TO_KEEP * 2))
111
+ older_messages = conversation[:recent_start]
112
+ recent_messages = conversation[recent_start:]
113
+
114
+ # Summarize older messages
115
+ if older_messages:
116
+ summary_parts = []
117
+ for msg in older_messages:
118
+ role = msg["role"].capitalize()
119
+ content_preview = msg["content"][:100].replace("\n", " ")
120
+ summary_parts.append(f"{role}: {content_preview}...")
121
+
122
+ summary = "\n".join(summary_parts)
123
+ summary_msg = {
124
+ "role": "system",
125
+ "content": f"[Previous conversation summary]\n{summary}\n[End summary]",
126
+ }
127
+
128
+ # Rebuild messages: system + summary + recent
129
+ self.messages = [system_msg, summary_msg] + recent_messages
130
+
131
+ logger.debug(
132
+ f"Compacted {len(older_messages)} old messages, kept {len(recent_messages)} recent"
133
+ )
134
+
135
+ def get_messages(self) -> list[dict[str, str]]:
136
+ """Get current message history.
137
+
138
+ Returns:
139
+ List of message dictionaries
140
+ """
141
+ return self.messages.copy()
142
+
143
+ def clear(self) -> None:
144
+ """Clear conversation history, keeping only system prompt."""
145
+ self.messages = [{"role": "system", "content": self.system_prompt}]
146
+
147
+
148
+ # Create chat subcommand app with "did you mean" functionality
149
+ chat_app = create_enhanced_typer(
150
+ help="🤖 LLM-powered intelligent code search",
151
+ invoke_without_command=True,
152
+ )
153
+
154
+
155
+ @chat_app.callback(invoke_without_command=True)
156
+ def chat_main(
157
+ ctx: typer.Context,
158
+ query: str | None = typer.Argument(
159
+ None,
160
+ help="Natural language query about your code",
161
+ ),
162
+ project_root: Path | None = typer.Option(
163
+ None,
164
+ "--project-root",
165
+ "-p",
166
+ help="Project root directory (auto-detected if not specified)",
167
+ exists=True,
168
+ file_okay=False,
169
+ dir_okay=True,
170
+ readable=True,
171
+ rich_help_panel="🔧 Global Options",
172
+ ),
173
+ limit: int = typer.Option(
174
+ 5,
175
+ "--limit",
176
+ "-l",
177
+ help="Maximum number of results to return",
178
+ min=1,
179
+ max=20,
180
+ rich_help_panel="📊 Result Options",
181
+ ),
182
+ model: str | None = typer.Option(
183
+ None,
184
+ "--model",
185
+ "-m",
186
+ help="Model to use (defaults based on provider: gpt-4o-mini for OpenAI, claude-3-haiku for OpenRouter)",
187
+ rich_help_panel="🤖 LLM Options",
188
+ ),
189
+ provider: str | None = typer.Option(
190
+ None,
191
+ "--provider",
192
+ help="LLM provider to use: 'openai' or 'openrouter' (auto-detect if not specified)",
193
+ rich_help_panel="🤖 LLM Options",
194
+ ),
195
+ timeout: float | None = typer.Option(
196
+ 30.0,
197
+ "--timeout",
198
+ help="API timeout in seconds",
199
+ min=5.0,
200
+ max=120.0,
201
+ rich_help_panel="🤖 LLM Options",
202
+ ),
203
+ json_output: bool = typer.Option(
204
+ False,
205
+ "--json",
206
+ help="Output results in JSON format",
207
+ rich_help_panel="📊 Result Options",
208
+ ),
209
+ files: str | None = typer.Option(
210
+ None,
211
+ "--files",
212
+ "-f",
213
+ help="Filter by file glob patterns (e.g., '*.py', 'src/*.js'). Matches basename or relative path.",
214
+ rich_help_panel="🔍 Filters",
215
+ ),
216
+ think: bool = typer.Option(
217
+ False,
218
+ "--think",
219
+ "-t",
220
+ help="Use advanced model for complex queries (gpt-4o / claude-sonnet-4). Better reasoning, higher cost.",
221
+ rich_help_panel="🤖 LLM Options",
222
+ ),
223
+ ) -> None:
224
+ """🤖 Ask questions about your code in natural language.
225
+
226
+ Uses LLM (OpenAI or OpenRouter) to intelligently search your codebase and answer
227
+ questions like "where is X defined?", "how does Y work?", etc.
228
+
229
+ [bold cyan]Setup:[/bold cyan]
230
+
231
+ [green]Option A - OpenAI (recommended):[/green]
232
+ $ export OPENAI_API_KEY="your-key-here"
233
+ Get a key at: [cyan]https://platform.openai.com/api-keys[/cyan]
234
+
235
+ [green]Option B - OpenRouter:[/green]
236
+ $ export OPENROUTER_API_KEY="your-key-here"
237
+ Get a key at: [cyan]https://openrouter.ai/keys[/cyan]
238
+
239
+ [dim]Provider is auto-detected. OpenAI is preferred if both keys are set.[/dim]
240
+
241
+ [bold cyan]Examples:[/bold cyan]
242
+
243
+ [green]Ask where a parameter is set:[/green]
244
+ $ mcp-vector-search chat "where is similarity_threshold set?"
245
+
246
+ [green]Ask how something works:[/green]
247
+ $ mcp-vector-search chat "how does the indexing process work?"
248
+
249
+ [green]Find implementation details:[/green]
250
+ $ mcp-vector-search chat "show me the search ranking algorithm"
251
+
252
+ [green]Force specific provider:[/green]
253
+ $ mcp-vector-search chat "question" --provider openai
254
+ $ mcp-vector-search chat "question" --provider openrouter
255
+
256
+ [green]Use custom model:[/green]
257
+ $ mcp-vector-search chat "question" --model gpt-4o
258
+ $ mcp-vector-search chat "question" --model anthropic/claude-3.5-sonnet
259
+
260
+ [bold cyan]Advanced:[/bold cyan]
261
+
262
+ [green]Filter by file pattern:[/green]
263
+ $ mcp-vector-search chat "how does validation work?" --files "*.py"
264
+ $ mcp-vector-search chat "find React components" --files "src/*.tsx"
265
+
266
+ [green]Limit results:[/green]
267
+ $ mcp-vector-search chat "find auth code" --limit 3
268
+
269
+ [green]Custom timeout:[/green]
270
+ $ mcp-vector-search chat "complex question" --timeout 60
271
+
272
+ [green]Use advanced model for complex queries:[/green]
273
+ $ mcp-vector-search chat "explain the authentication flow" --think
274
+
275
+ [dim]💡 Tip: Use --think for complex architectural questions. It uses gpt-4o or
276
+ claude-sonnet-4 for better reasoning at higher cost.[/dim]
277
+ """
278
+ # If no query provided and no subcommand invoked, exit (show help)
279
+ if query is None:
280
+ if ctx.invoked_subcommand is None:
281
+ # No query and no subcommand - show help
282
+ raise typer.Exit()
283
+ else:
284
+ # A subcommand was invoked - let it handle the request
285
+ return
286
+
287
+ try:
288
+ project_root = project_root or ctx.obj.get("project_root") or Path.cwd()
289
+
290
+ # Validate provider if specified
291
+ if provider and provider not in ("openai", "openrouter"):
292
+ print_error(
293
+ f"Invalid provider: {provider}. Must be 'openai' or 'openrouter'"
294
+ )
295
+ raise typer.Exit(1)
296
+
297
+ # Run the chat with intent detection and routing
298
+ asyncio.run(
299
+ run_chat_with_intent(
300
+ project_root=project_root,
301
+ query=query,
302
+ limit=limit,
303
+ model=model,
304
+ provider=provider,
305
+ timeout=timeout,
306
+ json_output=json_output,
307
+ files=files,
308
+ think=think,
309
+ )
310
+ )
311
+
312
+ except (typer.Exit, SystemExit):
313
+ # Re-raise exit exceptions without printing additional error messages
314
+ # The error message has already been shown to the user
315
+ raise
316
+ except Exception as e:
317
+ # Log real exceptions (not typer.Exit)
318
+ if not isinstance(e, typer.Exit | SystemExit):
319
+ logger.error(f"Chat failed: {e}")
320
+ print_error(f"Chat failed: {e}")
321
+ raise typer.Exit(
322
+ 1
323
+ ) from None # Suppress exception chain to avoid double-printing
324
+
325
+
326
+ async def run_chat_with_intent(
327
+ project_root: Path,
328
+ query: str,
329
+ limit: int = 5,
330
+ model: str | None = None,
331
+ provider: str | None = None,
332
+ timeout: float = 30.0,
333
+ json_output: bool = False,
334
+ files: str | None = None,
335
+ think: bool = False,
336
+ ) -> None:
337
+ """Route to appropriate chat mode based on detected intent.
338
+
339
+ Args:
340
+ project_root: Project root directory
341
+ query: User's natural language query
342
+ limit: Maximum results to return
343
+ model: Model to use (optional)
344
+ provider: LLM provider
345
+ timeout: API timeout
346
+ json_output: Whether to output JSON
347
+ files: File pattern filter
348
+ think: Use advanced model
349
+ """
350
+ # Initialize LLM client for intent detection
351
+ from ...core.config_utils import (
352
+ get_openai_api_key,
353
+ get_openrouter_api_key,
354
+ get_preferred_llm_provider,
355
+ )
356
+
357
+ config_dir = project_root / ".mcp-vector-search"
358
+ openai_key = get_openai_api_key(config_dir)
359
+ openrouter_key = get_openrouter_api_key(config_dir)
360
+
361
+ # Determine provider (same logic as before)
362
+ if not provider:
363
+ preferred_provider = get_preferred_llm_provider(config_dir)
364
+ if preferred_provider == "openai" and openai_key:
365
+ provider = "openai"
366
+ elif preferred_provider == "openrouter" and openrouter_key:
367
+ provider = "openrouter"
368
+ elif openai_key:
369
+ provider = "openai"
370
+ elif openrouter_key:
371
+ provider = "openrouter"
372
+ else:
373
+ console.print() # Blank line for spacing
374
+ show_api_key_help()
375
+ raise typer.Exit(1)
376
+
377
+ # Create temporary client for intent detection (use fast model)
378
+ try:
379
+ intent_client = LLMClient(
380
+ openai_api_key=openai_key,
381
+ openrouter_api_key=openrouter_key,
382
+ provider=provider,
383
+ timeout=timeout,
384
+ think=False, # Use fast model for intent detection
385
+ )
386
+
387
+ # Detect intent
388
+ intent = await intent_client.detect_intent(query)
389
+
390
+ # Show intent to user
391
+ if intent == "find":
392
+ console.print("\n[cyan]🔍 Intent: Find[/cyan] - Searching codebase\n")
393
+ await run_chat_search(
394
+ project_root=project_root,
395
+ query=query,
396
+ limit=limit,
397
+ model=model,
398
+ provider=provider,
399
+ timeout=timeout,
400
+ json_output=json_output,
401
+ files=files,
402
+ think=think,
403
+ )
404
+ else:
405
+ # Answer mode - force think mode and enter interactive session
406
+ console.print(
407
+ "\n[cyan]💬 Intent: Answer[/cyan] - Entering interactive mode\n"
408
+ )
409
+ await run_chat_answer(
410
+ project_root=project_root,
411
+ initial_query=query,
412
+ limit=limit,
413
+ model=model,
414
+ provider=provider,
415
+ timeout=timeout,
416
+ files=files,
417
+ )
418
+
419
+ except Exception as e:
420
+ logger.error(f"Intent detection failed: {e}")
421
+ # Default to find mode on error
422
+ console.print("\n[yellow]⚠ Using default search mode[/yellow]\n")
423
+ await run_chat_search(
424
+ project_root=project_root,
425
+ query=query,
426
+ limit=limit,
427
+ model=model,
428
+ provider=provider,
429
+ timeout=timeout,
430
+ json_output=json_output,
431
+ files=files,
432
+ think=think,
433
+ )
434
+
435
+
436
+ async def run_chat_answer(
437
+ project_root: Path,
438
+ initial_query: str,
439
+ limit: int = 5,
440
+ model: str | None = None,
441
+ provider: str | None = None,
442
+ timeout: float = 30.0,
443
+ files: str | None = None,
444
+ ) -> None:
445
+ """Run interactive answer mode with streaming responses.
446
+
447
+ Args:
448
+ project_root: Project root directory
449
+ initial_query: Initial user question
450
+ limit: Max search results for context
451
+ model: Model to use (optional, defaults to advanced model)
452
+ provider: LLM provider
453
+ timeout: API timeout
454
+ files: File pattern filter
455
+ """
456
+ from ...core.config_utils import get_openai_api_key, get_openrouter_api_key
457
+
458
+ config_dir = project_root / ".mcp-vector-search"
459
+ openai_key = get_openai_api_key(config_dir)
460
+ openrouter_key = get_openrouter_api_key(config_dir)
461
+
462
+ # Load project configuration
463
+ project_manager = ProjectManager(project_root)
464
+ if not project_manager.is_initialized():
465
+ raise ProjectNotFoundError(
466
+ f"Project not initialized at {project_root}. Run 'mcp-vector-search init' first."
467
+ )
468
+
469
+ config = project_manager.load_config()
470
+
471
+ # Initialize LLM client with advanced model (force think mode)
472
+ try:
473
+ llm_client = LLMClient(
474
+ openai_api_key=openai_key,
475
+ openrouter_api_key=openrouter_key,
476
+ model=model,
477
+ provider=provider,
478
+ timeout=timeout,
479
+ think=True, # Always use advanced model for answer mode
480
+ )
481
+ provider_display = llm_client.provider.capitalize()
482
+ model_info = f"{llm_client.model} [bold magenta](thinking mode)[/bold magenta]"
483
+ print_success(f"Connected to {provider_display}: {model_info}")
484
+ except ValueError as e:
485
+ print_error(str(e))
486
+ raise typer.Exit(1)
487
+
488
+ # Initialize search engine
489
+ embedding_function, _ = create_embedding_function(config.embedding_model)
490
+ database = ChromaVectorDatabase(
491
+ persist_directory=config.index_path,
492
+ embedding_function=embedding_function,
493
+ )
494
+ search_engine = SemanticSearchEngine(
495
+ database=database,
496
+ project_root=project_root,
497
+ similarity_threshold=config.similarity_threshold,
498
+ )
499
+
500
+ # Initialize session (cleared on startup)
501
+ system_prompt = """You are a helpful code assistant analyzing a codebase. Answer questions based on provided code context.
502
+
503
+ Guidelines:
504
+ - Be concise but thorough
505
+ - Reference specific functions, classes, or files
506
+ - Use code examples when helpful
507
+ - If context is insufficient, say so
508
+ - Use markdown formatting"""
509
+
510
+ session = ChatSession(system_prompt)
511
+
512
+ # Process initial query
513
+ await _process_answer_query(
514
+ query=initial_query,
515
+ llm_client=llm_client,
516
+ search_engine=search_engine,
517
+ database=database,
518
+ session=session,
519
+ project_root=project_root,
520
+ limit=limit,
521
+ files=files,
522
+ config=config,
523
+ )
524
+
525
+ # Interactive loop
526
+ console.print("\n[dim]Type your questions or '/exit' to quit[/dim]\n")
527
+
528
+ while True:
529
+ try:
530
+ # Get user input
531
+ user_input = console.input("\n[bold cyan]You:[/bold cyan] ").strip()
532
+
533
+ if not user_input:
534
+ continue
535
+
536
+ # Check for exit command
537
+ if user_input.lower() in ("/exit", "/quit", "exit", "quit"):
538
+ console.print("\n[cyan]👋 Session ended.[/cyan]")
539
+ break
540
+
541
+ # Process query
542
+ await _process_answer_query(
543
+ query=user_input,
544
+ llm_client=llm_client,
545
+ search_engine=search_engine,
546
+ database=database,
547
+ session=session,
548
+ project_root=project_root,
549
+ limit=limit,
550
+ files=files,
551
+ config=config,
552
+ )
553
+
554
+ except KeyboardInterrupt:
555
+ console.print("\n\n[cyan]👋 Session ended.[/cyan]")
556
+ break
557
+ except EOFError:
558
+ console.print("\n\n[cyan]👋 Session ended.[/cyan]")
559
+ break
560
+ except Exception as e:
561
+ logger.error(f"Error processing query: {e}")
562
+ print_error(f"Error: {e}")
563
+
564
+
565
+ async def _process_answer_query(
566
+ query: str,
567
+ llm_client: LLMClient,
568
+ search_engine: SemanticSearchEngine,
569
+ database: ChromaVectorDatabase,
570
+ session: ChatSession,
571
+ project_root: Path,
572
+ limit: int,
573
+ files: str | None,
574
+ config: Any,
575
+ ) -> None:
576
+ """Process a single answer query with agentic tool use.
577
+
578
+ Args:
579
+ query: User query
580
+ llm_client: LLM client instance
581
+ search_engine: Search engine instance
582
+ database: Vector database
583
+ session: Chat session
584
+ project_root: Project root path
585
+ limit: Max results
586
+ files: File pattern filter
587
+ config: Project config
588
+ """
589
+ # Define search tools for the LLM
590
+ tools = [
591
+ {
592
+ "type": "function",
593
+ "function": {
594
+ "name": "search_code",
595
+ "description": "Search the codebase for relevant code snippets using semantic search",
596
+ "parameters": {
597
+ "type": "object",
598
+ "properties": {
599
+ "query": {
600
+ "type": "string",
601
+ "description": "Search query to find relevant code (e.g., 'authentication logic', 'database connection', 'error handling')",
602
+ },
603
+ "limit": {
604
+ "type": "integer",
605
+ "description": "Maximum number of results to return (default: 5, max: 10)",
606
+ "default": 5,
607
+ },
608
+ },
609
+ "required": ["query"],
610
+ },
611
+ },
612
+ },
613
+ {
614
+ "type": "function",
615
+ "function": {
616
+ "name": "read_file",
617
+ "description": "Read the full content of a specific file",
618
+ "parameters": {
619
+ "type": "object",
620
+ "properties": {
621
+ "file_path": {
622
+ "type": "string",
623
+ "description": "Relative path to the file from project root",
624
+ }
625
+ },
626
+ "required": ["file_path"],
627
+ },
628
+ },
629
+ },
630
+ {
631
+ "type": "function",
632
+ "function": {
633
+ "name": "list_files",
634
+ "description": "List files in the codebase matching a pattern",
635
+ "parameters": {
636
+ "type": "object",
637
+ "properties": {
638
+ "pattern": {
639
+ "type": "string",
640
+ "description": "Glob pattern to match files (e.g., '*.py', 'src/**/*.ts', 'tests/')",
641
+ }
642
+ },
643
+ "required": ["pattern"],
644
+ },
645
+ },
646
+ },
647
+ ]
648
+
649
+ # System prompt for tool use
650
+ system_prompt = """You are a helpful code assistant with access to search tools. Use these tools to find and analyze code in the codebase.
651
+
652
+ Available tools:
653
+ - search_code: Search for relevant code using semantic search
654
+ - read_file: Read the full content of a specific file
655
+ - list_files: List files matching a pattern
656
+
657
+ Guidelines:
658
+ 1. Use search_code to find relevant code snippets
659
+ 2. Use read_file when you need to see the full file context
660
+ 3. Use list_files to understand the project structure
661
+ 4. Make multiple searches if needed to gather enough context
662
+ 5. After gathering sufficient information, provide your analysis
663
+
664
+ Always base your answers on actual code from the tools. If you can't find relevant code, say so."""
665
+
666
+ # Tool execution functions
667
+ async def execute_search_code(query_str: str, limit_val: int = 5) -> str:
668
+ """Execute search_code tool."""
669
+ try:
670
+ limit_val = min(limit_val, 10) # Cap at 10
671
+ async with database:
672
+ results = await search_engine.search(
673
+ query=query_str,
674
+ limit=limit_val,
675
+ similarity_threshold=config.similarity_threshold,
676
+ include_context=True,
677
+ )
678
+
679
+ # Post-filter by file pattern if specified
680
+ if files and results:
681
+ filtered_results = []
682
+ for result in results:
683
+ try:
684
+ rel_path = str(result.file_path.relative_to(project_root))
685
+ except ValueError:
686
+ rel_path = str(result.file_path)
687
+
688
+ if fnmatch(rel_path, files) or fnmatch(
689
+ os.path.basename(rel_path), files
690
+ ):
691
+ filtered_results.append(result)
692
+ results = filtered_results
693
+
694
+ if not results:
695
+ return "No results found for this query."
696
+
697
+ # Format results
698
+ result_parts = []
699
+ for i, result in enumerate(results, 1):
700
+ try:
701
+ rel_path = str(result.file_path.relative_to(project_root))
702
+ except ValueError:
703
+ rel_path = str(result.file_path)
704
+
705
+ result_parts.append(
706
+ f"[Result {i}: {rel_path}]\n"
707
+ f"Location: {result.location}\n"
708
+ f"Lines {result.start_line}-{result.end_line}\n"
709
+ f"Similarity: {result.similarity_score:.3f}\n"
710
+ f"```\n{result.content}\n```\n"
711
+ )
712
+ return "\n".join(result_parts)
713
+
714
+ except Exception as e:
715
+ logger.error(f"search_code tool failed: {e}")
716
+ return f"Error searching code: {e}"
717
+
718
+ async def execute_read_file(file_path: str) -> str:
719
+ """Execute read_file tool."""
720
+ try:
721
+ # Normalize path
722
+ if file_path.startswith("/"):
723
+ full_path = Path(file_path)
724
+ else:
725
+ full_path = project_root / file_path
726
+
727
+ # Security check: file must be within project
728
+ try:
729
+ full_path.relative_to(project_root)
730
+ except ValueError:
731
+ return f"Error: File must be within project root: {project_root}"
732
+
733
+ if not full_path.exists():
734
+ return f"Error: File not found: {file_path}"
735
+
736
+ if not full_path.is_file():
737
+ return f"Error: Not a file: {file_path}"
738
+
739
+ # Read file with size limit
740
+ max_size = 100_000 # 100KB
741
+ file_size = full_path.stat().st_size
742
+ if file_size > max_size:
743
+ return f"Error: File too large ({file_size} bytes). Use search_code instead."
744
+
745
+ content = full_path.read_text(errors="replace")
746
+ return f"File: {file_path}\n```\n{content}\n```"
747
+
748
+ except Exception as e:
749
+ logger.error(f"read_file tool failed: {e}")
750
+ return f"Error reading file: {e}"
751
+
752
+ async def execute_list_files(pattern: str) -> str:
753
+ """Execute list_files tool."""
754
+ try:
755
+ from glob import glob
756
+
757
+ # Use glob to find matching files
758
+ matches = glob(str(project_root / pattern), recursive=True)
759
+
760
+ if not matches:
761
+ return f"No files found matching pattern: {pattern}"
762
+
763
+ # Get relative paths and limit results
764
+ rel_paths = []
765
+ for match in matches[:50]: # Limit to 50 files
766
+ try:
767
+ rel_path = Path(match).relative_to(project_root)
768
+ rel_paths.append(str(rel_path))
769
+ except ValueError:
770
+ continue
771
+
772
+ if not rel_paths:
773
+ return f"No files found matching pattern: {pattern}"
774
+
775
+ return f"Files matching '{pattern}':\n" + "\n".join(
776
+ f"- {p}" for p in sorted(rel_paths)
777
+ )
778
+
779
+ except Exception as e:
780
+ logger.error(f"list_files tool failed: {e}")
781
+ return f"Error listing files: {e}"
782
+
783
+ # Get conversation history
784
+ conversation_history = session.get_messages()[1:] # Skip system prompt
785
+
786
+ # Build messages: system + history + current query
787
+ messages = [{"role": "system", "content": system_prompt}]
788
+ messages.extend(conversation_history)
789
+ messages.append({"role": "user", "content": query})
790
+
791
+ # Agentic loop
792
+ max_iterations = 25
793
+ for _iteration in range(max_iterations):
794
+ try:
795
+ response = await llm_client.chat_with_tools(messages, tools)
796
+
797
+ # Extract message from response
798
+ choice = response.get("choices", [{}])[0]
799
+ message = choice.get("message", {})
800
+
801
+ # Check for tool calls
802
+ tool_calls = message.get("tool_calls", [])
803
+
804
+ if tool_calls:
805
+ # Add assistant message with tool calls
806
+ messages.append(message)
807
+
808
+ # Execute each tool call
809
+ for tool_call in tool_calls:
810
+ tool_id = tool_call.get("id")
811
+ function = tool_call.get("function", {})
812
+ function_name = function.get("name")
813
+ arguments_str = function.get("arguments", "{}")
814
+
815
+ # Parse arguments
816
+ try:
817
+ import json
818
+
819
+ arguments = json.loads(arguments_str)
820
+ except json.JSONDecodeError:
821
+ arguments = {}
822
+
823
+ # Display tool usage
824
+ console.print(
825
+ f"\n[dim]🔧 Using tool: {function_name}({', '.join(f'{k}={repr(v)}' for k, v in arguments.items())})[/dim]"
826
+ )
827
+
828
+ # Execute tool
829
+ if function_name == "search_code":
830
+ result = await execute_search_code(
831
+ arguments.get("query", ""),
832
+ arguments.get("limit", 5),
833
+ )
834
+ console.print(
835
+ f"[dim] Found {len(result.split('[Result')) - 1} results[/dim]"
836
+ )
837
+ elif function_name == "read_file":
838
+ result = await execute_read_file(arguments.get("file_path", ""))
839
+ console.print("[dim] Read file[/dim]")
840
+ elif function_name == "list_files":
841
+ result = await execute_list_files(arguments.get("pattern", ""))
842
+ console.print("[dim] Listed files[/dim]")
843
+ else:
844
+ result = f"Error: Unknown tool: {function_name}"
845
+
846
+ # Add tool result to messages
847
+ messages.append(
848
+ {
849
+ "role": "tool",
850
+ "tool_call_id": tool_id,
851
+ "content": result,
852
+ }
853
+ )
854
+
855
+ else:
856
+ # No tool calls - final response
857
+ final_content = message.get("content", "")
858
+
859
+ if not final_content:
860
+ print_error("LLM returned empty response")
861
+ return
862
+
863
+ # Stream the final response
864
+ console.print("\n[bold cyan]🤖 Assistant:[/bold cyan]\n")
865
+
866
+ # Use Rich Live for rendering
867
+ with Live("", console=console, auto_refresh=True) as live:
868
+ live.update(Markdown(final_content))
869
+
870
+ # Add to session history
871
+ session.add_message("user", query)
872
+ session.add_message("assistant", final_content)
873
+
874
+ return
875
+
876
+ except Exception as e:
877
+ logger.error(f"Tool execution loop failed: {e}")
878
+ print_error(f"Error: {e}")
879
+ return
880
+
881
+ # Max iterations reached
882
+ print_warning(
883
+ "\n⚠ Maximum iterations reached. The assistant may not have gathered enough information."
884
+ )
885
+
886
+
887
+ async def run_chat_search(
888
+ project_root: Path,
889
+ query: str,
890
+ limit: int = 5,
891
+ model: str | None = None,
892
+ provider: str | None = None,
893
+ timeout: float = 30.0,
894
+ json_output: bool = False,
895
+ files: str | None = None,
896
+ think: bool = False,
897
+ ) -> None:
898
+ """Run LLM-powered chat search.
899
+
900
+ Implementation Flow:
901
+ 1. Initialize LLM client and validate API key
902
+ 2. Generate 2-3 targeted search queries from natural language
903
+ 3. Execute each search query against vector database
904
+ 4. Have LLM analyze all results and select most relevant ones
905
+ 5. Display results with explanations
906
+
907
+ Args:
908
+ project_root: Project root directory
909
+ query: Natural language query from user
910
+ limit: Maximum number of results to return
911
+ model: Model to use (optional, defaults based on provider)
912
+ provider: LLM provider ('openai' or 'openrouter', auto-detect if None)
913
+ timeout: API timeout in seconds
914
+ json_output: Whether to output JSON format
915
+ files: Optional glob pattern to filter files (e.g., '*.py', 'src/*.js')
916
+ think: Use advanced "thinking" model for complex queries
917
+ """
918
+ # Check for API keys (environment variable or config file)
919
+ from ...core.config_utils import (
920
+ get_openai_api_key,
921
+ get_openrouter_api_key,
922
+ get_preferred_llm_provider,
923
+ )
924
+
925
+ config_dir = project_root / ".mcp-vector-search"
926
+ openai_key = get_openai_api_key(config_dir)
927
+ openrouter_key = get_openrouter_api_key(config_dir)
928
+
929
+ # Determine which provider to use
930
+ if provider:
931
+ # Explicit provider specified
932
+ if provider == "openai" and not openai_key:
933
+ print_error("OpenAI API key not found.")
934
+ print_info("\n[bold]To use OpenAI:[/bold]")
935
+ print_info(
936
+ "1. Get an API key from [cyan]https://platform.openai.com/api-keys[/cyan]"
937
+ )
938
+ print_info("2. Set environment variable:")
939
+ print_info(" [yellow]export OPENAI_API_KEY='your-key'[/yellow]")
940
+ print_info("")
941
+ print_info("Or run: [cyan]mcp-vector-search setup[/cyan]")
942
+ raise typer.Exit(1)
943
+ elif provider == "openrouter" and not openrouter_key:
944
+ print_error("OpenRouter API key not found.")
945
+ print_info("\n[bold]To use OpenRouter:[/bold]")
946
+ print_info("1. Get an API key from [cyan]https://openrouter.ai/keys[/cyan]")
947
+ print_info("2. Set environment variable:")
948
+ print_info(" [yellow]export OPENROUTER_API_KEY='your-key'[/yellow]")
949
+ print_info("")
950
+ print_info("Or run: [cyan]mcp-vector-search setup[/cyan]")
951
+ raise typer.Exit(1)
952
+ else:
953
+ # Auto-detect provider
954
+ preferred_provider = get_preferred_llm_provider(config_dir)
955
+
956
+ if preferred_provider == "openai" and openai_key:
957
+ provider = "openai"
958
+ elif preferred_provider == "openrouter" and openrouter_key:
959
+ provider = "openrouter"
960
+ elif openai_key:
961
+ provider = "openai"
962
+ elif openrouter_key:
963
+ provider = "openrouter"
964
+ else:
965
+ console.print() # Blank line for spacing
966
+ show_api_key_help()
967
+ raise typer.Exit(1)
968
+
969
+ # Load project configuration
970
+ project_manager = ProjectManager(project_root)
971
+
972
+ if not project_manager.is_initialized():
973
+ raise ProjectNotFoundError(
974
+ f"Project not initialized at {project_root}. Run 'mcp-vector-search init' first."
975
+ )
976
+
977
+ config = project_manager.load_config()
978
+
979
+ # Initialize LLM client
980
+ try:
981
+ llm_client = LLMClient(
982
+ openai_api_key=openai_key,
983
+ openrouter_api_key=openrouter_key,
984
+ model=model,
985
+ provider=provider,
986
+ timeout=timeout,
987
+ think=think,
988
+ )
989
+ provider_display = llm_client.provider.capitalize()
990
+ model_info = f"{llm_client.model}"
991
+ if think:
992
+ model_info += " [bold magenta](thinking mode)[/bold magenta]"
993
+ print_success(f"Connected to {provider_display}: {model_info}")
994
+ except ValueError as e:
995
+ print_error(str(e))
996
+ raise typer.Exit(1)
997
+
998
+ # Step 1: Generate search queries from natural language
999
+ console.print(f"\n[cyan]💭 Analyzing query:[/cyan] [white]{query}[/white]")
1000
+
1001
+ try:
1002
+ search_queries = await llm_client.generate_search_queries(query, limit=3)
1003
+
1004
+ if not search_queries:
1005
+ print_error("Failed to generate search queries from your question.")
1006
+ raise typer.Exit(1)
1007
+
1008
+ console.print(
1009
+ f"\n[cyan]🔍 Generated {len(search_queries)} search queries:[/cyan]"
1010
+ )
1011
+ for i, sq in enumerate(search_queries, 1):
1012
+ console.print(f" {i}. [yellow]{sq}[/yellow]")
1013
+
1014
+ except SearchError as e:
1015
+ print_error(f"Failed to generate queries: {e}")
1016
+ raise typer.Exit(1)
1017
+
1018
+ # Step 2: Execute each search query
1019
+ console.print("\n[cyan]🔎 Searching codebase...[/cyan]")
1020
+
1021
+ embedding_function, _ = create_embedding_function(config.embedding_model)
1022
+ database = ChromaVectorDatabase(
1023
+ persist_directory=config.index_path,
1024
+ embedding_function=embedding_function,
1025
+ )
1026
+
1027
+ search_engine = SemanticSearchEngine(
1028
+ database=database,
1029
+ project_root=project_root,
1030
+ similarity_threshold=config.similarity_threshold,
1031
+ )
1032
+
1033
+ # Execute all searches
1034
+ search_results = {}
1035
+ total_results = 0
1036
+
1037
+ try:
1038
+ async with database:
1039
+ for search_query in search_queries:
1040
+ results = await search_engine.search(
1041
+ query=search_query,
1042
+ limit=limit * 2, # Get more results for LLM to analyze
1043
+ similarity_threshold=config.similarity_threshold,
1044
+ include_context=True,
1045
+ )
1046
+
1047
+ # Post-filter results by file pattern if specified
1048
+ if files and results:
1049
+ filtered_results = []
1050
+ for result in results:
1051
+ # Get relative path from project root
1052
+ try:
1053
+ rel_path = str(result.file_path.relative_to(project_root))
1054
+ except ValueError:
1055
+ # If file is outside project root, use absolute path
1056
+ rel_path = str(result.file_path)
1057
+
1058
+ # Match against glob pattern (both full path and basename)
1059
+ if fnmatch(rel_path, files) or fnmatch(
1060
+ os.path.basename(rel_path), files
1061
+ ):
1062
+ filtered_results.append(result)
1063
+ results = filtered_results
1064
+
1065
+ search_results[search_query] = results
1066
+ total_results += len(results)
1067
+
1068
+ console.print(
1069
+ f" • [yellow]{search_query}[/yellow]: {len(results)} results"
1070
+ )
1071
+
1072
+ except Exception as e:
1073
+ logger.error(f"Search execution failed: {e}")
1074
+ print_error(f"Search failed: {e}")
1075
+ raise typer.Exit(1)
1076
+
1077
+ if total_results == 0:
1078
+ print_warning("\n⚠️ No results found for any search query.")
1079
+ print_info("\n[bold]Suggestions:[/bold]")
1080
+ print_info(" • Try rephrasing your question")
1081
+ print_info(" • Use more general terms")
1082
+ print_info(
1083
+ " • Check if relevant files are indexed with [cyan]mcp-vector-search status[/cyan]"
1084
+ )
1085
+ raise typer.Exit(0)
1086
+
1087
+ # Step 3: Have LLM analyze and rank results
1088
+ console.print(f"\n[cyan]🤖 Analyzing {total_results} results...[/cyan]")
1089
+
1090
+ try:
1091
+ ranked_results = await llm_client.analyze_and_rank_results(
1092
+ original_query=query,
1093
+ search_results=search_results,
1094
+ top_n=limit,
1095
+ )
1096
+
1097
+ if not ranked_results:
1098
+ print_warning("\n⚠️ LLM could not identify relevant results.")
1099
+ raise typer.Exit(0)
1100
+
1101
+ except SearchError as e:
1102
+ print_error(f"Result analysis failed: {e}")
1103
+ # Fallback: show raw search results
1104
+ print_warning("\nShowing raw search results instead...")
1105
+ await _show_fallback_results(search_results, limit)
1106
+ raise typer.Exit(1)
1107
+
1108
+ # Step 4: Display results with explanations
1109
+ if json_output:
1110
+ await _display_json_results(ranked_results)
1111
+ else:
1112
+ await _display_rich_results(ranked_results, query)
1113
+
1114
+
1115
+ async def _display_rich_results(
1116
+ ranked_results: list[dict],
1117
+ original_query: str,
1118
+ ) -> None:
1119
+ """Display results in rich formatted output.
1120
+
1121
+ Args:
1122
+ ranked_results: List of ranked results with explanations
1123
+ original_query: Original user query
1124
+ """
1125
+ from rich.panel import Panel
1126
+ from rich.syntax import Syntax
1127
+
1128
+ console.print(
1129
+ f"\n[bold cyan]🎯 Top Results for:[/bold cyan] [white]{original_query}[/white]\n"
1130
+ )
1131
+
1132
+ for i, item in enumerate(ranked_results, 1):
1133
+ result = item["result"]
1134
+ relevance = item["relevance"]
1135
+ explanation = item["explanation"]
1136
+ query = item["query"]
1137
+
1138
+ # Determine relevance emoji and color
1139
+ if relevance == "High":
1140
+ relevance_emoji = "🟢"
1141
+ relevance_color = "green"
1142
+ elif relevance == "Medium":
1143
+ relevance_emoji = "🟡"
1144
+ relevance_color = "yellow"
1145
+ else:
1146
+ relevance_emoji = "🔴"
1147
+ relevance_color = "red"
1148
+
1149
+ # Header with result number and file
1150
+ console.print(f"[bold]📍 Result {i} of {len(ranked_results)}[/bold]")
1151
+ console.print(
1152
+ f"[cyan]📂 {result.file_path.relative_to(result.file_path.parent.parent)}[/cyan]"
1153
+ )
1154
+
1155
+ # Relevance and explanation
1156
+ console.print(
1157
+ f"\n{relevance_emoji} [bold {relevance_color}]Relevance: {relevance}[/bold {relevance_color}]"
1158
+ )
1159
+ console.print(f"[dim]Search query: {query}[/dim]")
1160
+ console.print(f"\n💡 [italic]{explanation}[/italic]\n")
1161
+
1162
+ # Code snippet with syntax highlighting
1163
+ file_ext = result.file_path.suffix.lstrip(".")
1164
+ code_syntax = Syntax(
1165
+ result.content,
1166
+ lexer=file_ext or "python",
1167
+ theme="monokai",
1168
+ line_numbers=True,
1169
+ start_line=result.start_line,
1170
+ )
1171
+
1172
+ panel = Panel(
1173
+ code_syntax,
1174
+ title=f"[bold]{result.function_name or result.class_name or 'Code'}[/bold]",
1175
+ border_style="cyan",
1176
+ )
1177
+ console.print(panel)
1178
+
1179
+ # Metadata
1180
+ metadata = []
1181
+ if result.function_name:
1182
+ metadata.append(f"Function: [cyan]{result.function_name}[/cyan]")
1183
+ if result.class_name:
1184
+ metadata.append(f"Class: [cyan]{result.class_name}[/cyan]")
1185
+ metadata.append(f"Lines: [cyan]{result.start_line}-{result.end_line}[/cyan]")
1186
+ metadata.append(f"Similarity: [cyan]{result.similarity_score:.3f}[/cyan]")
1187
+
1188
+ console.print("[dim]" + " | ".join(metadata) + "[/dim]")
1189
+ console.print() # Blank line between results
1190
+
1191
+ # Footer with tips
1192
+ console.print("[dim]─" * 80 + "[/dim]")
1193
+ console.print(
1194
+ "\n[dim]💡 Tip: Try different phrasings or add more specific terms for better results[/dim]"
1195
+ )
1196
+
1197
+
1198
+ async def _display_json_results(ranked_results: list[dict]) -> None:
1199
+ """Display results in JSON format.
1200
+
1201
+ Args:
1202
+ ranked_results: List of ranked results with explanations
1203
+ """
1204
+ from ..output import print_json
1205
+
1206
+ json_data = []
1207
+ for item in ranked_results:
1208
+ result = item["result"]
1209
+ json_data.append(
1210
+ {
1211
+ "file": str(result.file_path),
1212
+ "start_line": result.start_line,
1213
+ "end_line": result.end_line,
1214
+ "function_name": result.function_name,
1215
+ "class_name": result.class_name,
1216
+ "content": result.content,
1217
+ "similarity_score": result.similarity_score,
1218
+ "relevance": item["relevance"],
1219
+ "explanation": item["explanation"],
1220
+ "search_query": item["query"],
1221
+ }
1222
+ )
1223
+
1224
+ print_json(json_data, title="Chat Search Results")
1225
+
1226
+
1227
+ async def _show_fallback_results(
1228
+ search_results: dict[str, list],
1229
+ limit: int,
1230
+ ) -> None:
1231
+ """Show fallback results when LLM analysis fails.
1232
+
1233
+ Args:
1234
+ search_results: Dictionary of search queries to results
1235
+ limit: Number of results to show
1236
+ """
1237
+ from ..output import print_search_results
1238
+
1239
+ # Flatten and deduplicate results
1240
+ all_results = []
1241
+ seen_files = set()
1242
+
1243
+ for results in search_results.values():
1244
+ for result in results:
1245
+ file_key = (result.file_path, result.start_line)
1246
+ if file_key not in seen_files:
1247
+ all_results.append(result)
1248
+ seen_files.add(file_key)
1249
+
1250
+ # Sort by similarity score
1251
+ all_results.sort(key=lambda r: r.similarity_score, reverse=True)
1252
+
1253
+ # Show top N
1254
+ print_search_results(
1255
+ results=all_results[:limit],
1256
+ query="Combined search results",
1257
+ show_content=True,
1258
+ )
1259
+
1260
+
1261
+ if __name__ == "__main__":
1262
+ chat_app()