mcp-vector-search 0.15.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-vector-search might be problematic. Click here for more details.

Files changed (86) hide show
  1. mcp_vector_search/__init__.py +10 -0
  2. mcp_vector_search/cli/__init__.py +1 -0
  3. mcp_vector_search/cli/commands/__init__.py +1 -0
  4. mcp_vector_search/cli/commands/auto_index.py +397 -0
  5. mcp_vector_search/cli/commands/chat.py +534 -0
  6. mcp_vector_search/cli/commands/config.py +393 -0
  7. mcp_vector_search/cli/commands/demo.py +358 -0
  8. mcp_vector_search/cli/commands/index.py +762 -0
  9. mcp_vector_search/cli/commands/init.py +658 -0
  10. mcp_vector_search/cli/commands/install.py +869 -0
  11. mcp_vector_search/cli/commands/install_old.py +700 -0
  12. mcp_vector_search/cli/commands/mcp.py +1254 -0
  13. mcp_vector_search/cli/commands/reset.py +393 -0
  14. mcp_vector_search/cli/commands/search.py +796 -0
  15. mcp_vector_search/cli/commands/setup.py +1133 -0
  16. mcp_vector_search/cli/commands/status.py +584 -0
  17. mcp_vector_search/cli/commands/uninstall.py +404 -0
  18. mcp_vector_search/cli/commands/visualize/__init__.py +39 -0
  19. mcp_vector_search/cli/commands/visualize/cli.py +265 -0
  20. mcp_vector_search/cli/commands/visualize/exporters/__init__.py +12 -0
  21. mcp_vector_search/cli/commands/visualize/exporters/html_exporter.py +33 -0
  22. mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +29 -0
  23. mcp_vector_search/cli/commands/visualize/graph_builder.py +709 -0
  24. mcp_vector_search/cli/commands/visualize/layout_engine.py +469 -0
  25. mcp_vector_search/cli/commands/visualize/server.py +201 -0
  26. mcp_vector_search/cli/commands/visualize/state_manager.py +428 -0
  27. mcp_vector_search/cli/commands/visualize/templates/__init__.py +16 -0
  28. mcp_vector_search/cli/commands/visualize/templates/base.py +218 -0
  29. mcp_vector_search/cli/commands/visualize/templates/scripts.py +3670 -0
  30. mcp_vector_search/cli/commands/visualize/templates/styles.py +779 -0
  31. mcp_vector_search/cli/commands/visualize.py.original +2536 -0
  32. mcp_vector_search/cli/commands/watch.py +287 -0
  33. mcp_vector_search/cli/didyoumean.py +520 -0
  34. mcp_vector_search/cli/export.py +320 -0
  35. mcp_vector_search/cli/history.py +295 -0
  36. mcp_vector_search/cli/interactive.py +342 -0
  37. mcp_vector_search/cli/main.py +484 -0
  38. mcp_vector_search/cli/output.py +414 -0
  39. mcp_vector_search/cli/suggestions.py +375 -0
  40. mcp_vector_search/config/__init__.py +1 -0
  41. mcp_vector_search/config/constants.py +24 -0
  42. mcp_vector_search/config/defaults.py +200 -0
  43. mcp_vector_search/config/settings.py +146 -0
  44. mcp_vector_search/core/__init__.py +1 -0
  45. mcp_vector_search/core/auto_indexer.py +298 -0
  46. mcp_vector_search/core/config_utils.py +394 -0
  47. mcp_vector_search/core/connection_pool.py +360 -0
  48. mcp_vector_search/core/database.py +1237 -0
  49. mcp_vector_search/core/directory_index.py +318 -0
  50. mcp_vector_search/core/embeddings.py +294 -0
  51. mcp_vector_search/core/exceptions.py +89 -0
  52. mcp_vector_search/core/factory.py +318 -0
  53. mcp_vector_search/core/git_hooks.py +345 -0
  54. mcp_vector_search/core/indexer.py +1002 -0
  55. mcp_vector_search/core/llm_client.py +453 -0
  56. mcp_vector_search/core/models.py +294 -0
  57. mcp_vector_search/core/project.py +350 -0
  58. mcp_vector_search/core/scheduler.py +330 -0
  59. mcp_vector_search/core/search.py +952 -0
  60. mcp_vector_search/core/watcher.py +322 -0
  61. mcp_vector_search/mcp/__init__.py +5 -0
  62. mcp_vector_search/mcp/__main__.py +25 -0
  63. mcp_vector_search/mcp/server.py +752 -0
  64. mcp_vector_search/parsers/__init__.py +8 -0
  65. mcp_vector_search/parsers/base.py +296 -0
  66. mcp_vector_search/parsers/dart.py +605 -0
  67. mcp_vector_search/parsers/html.py +413 -0
  68. mcp_vector_search/parsers/javascript.py +643 -0
  69. mcp_vector_search/parsers/php.py +694 -0
  70. mcp_vector_search/parsers/python.py +502 -0
  71. mcp_vector_search/parsers/registry.py +223 -0
  72. mcp_vector_search/parsers/ruby.py +678 -0
  73. mcp_vector_search/parsers/text.py +186 -0
  74. mcp_vector_search/parsers/utils.py +265 -0
  75. mcp_vector_search/py.typed +1 -0
  76. mcp_vector_search/utils/__init__.py +42 -0
  77. mcp_vector_search/utils/gitignore.py +250 -0
  78. mcp_vector_search/utils/gitignore_updater.py +212 -0
  79. mcp_vector_search/utils/monorepo.py +339 -0
  80. mcp_vector_search/utils/timing.py +338 -0
  81. mcp_vector_search/utils/version.py +47 -0
  82. mcp_vector_search-0.15.7.dist-info/METADATA +884 -0
  83. mcp_vector_search-0.15.7.dist-info/RECORD +86 -0
  84. mcp_vector_search-0.15.7.dist-info/WHEEL +4 -0
  85. mcp_vector_search-0.15.7.dist-info/entry_points.txt +3 -0
  86. mcp_vector_search-0.15.7.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,534 @@
1
+ """Chat command for LLM-powered intelligent code search."""
2
+
3
+ import asyncio
4
+ from pathlib import Path
5
+
6
+ import typer
7
+ from loguru import logger
8
+
9
+ from ...core.database import ChromaVectorDatabase
10
+ from ...core.embeddings import create_embedding_function
11
+ from ...core.exceptions import ProjectNotFoundError, SearchError
12
+ from ...core.llm_client import LLMClient
13
+ from ...core.project import ProjectManager
14
+ from ...core.search import SemanticSearchEngine
15
+ from ..didyoumean import create_enhanced_typer
16
+ from ..output import (
17
+ console,
18
+ print_error,
19
+ print_info,
20
+ print_success,
21
+ print_warning,
22
+ )
23
+
24
+ # Create chat subcommand app with "did you mean" functionality
25
+ chat_app = create_enhanced_typer(
26
+ help="šŸ¤– LLM-powered intelligent code search",
27
+ invoke_without_command=True,
28
+ )
29
+
30
+
31
+ @chat_app.callback(invoke_without_command=True)
32
+ def chat_main(
33
+ ctx: typer.Context,
34
+ query: str | None = typer.Argument(
35
+ None,
36
+ help="Natural language query about your code",
37
+ ),
38
+ project_root: Path | None = typer.Option(
39
+ None,
40
+ "--project-root",
41
+ "-p",
42
+ help="Project root directory (auto-detected if not specified)",
43
+ exists=True,
44
+ file_okay=False,
45
+ dir_okay=True,
46
+ readable=True,
47
+ rich_help_panel="šŸ”§ Global Options",
48
+ ),
49
+ limit: int = typer.Option(
50
+ 5,
51
+ "--limit",
52
+ "-l",
53
+ help="Maximum number of results to return",
54
+ min=1,
55
+ max=20,
56
+ rich_help_panel="šŸ“Š Result Options",
57
+ ),
58
+ model: str | None = typer.Option(
59
+ None,
60
+ "--model",
61
+ "-m",
62
+ help="Model to use (defaults based on provider: gpt-4o-mini for OpenAI, claude-3-haiku for OpenRouter)",
63
+ rich_help_panel="šŸ¤– LLM Options",
64
+ ),
65
+ provider: str | None = typer.Option(
66
+ None,
67
+ "--provider",
68
+ help="LLM provider to use: 'openai' or 'openrouter' (auto-detect if not specified)",
69
+ rich_help_panel="šŸ¤– LLM Options",
70
+ ),
71
+ timeout: float | None = typer.Option(
72
+ 30.0,
73
+ "--timeout",
74
+ help="API timeout in seconds",
75
+ min=5.0,
76
+ max=120.0,
77
+ rich_help_panel="šŸ¤– LLM Options",
78
+ ),
79
+ json_output: bool = typer.Option(
80
+ False,
81
+ "--json",
82
+ help="Output results in JSON format",
83
+ rich_help_panel="šŸ“Š Result Options",
84
+ ),
85
+ ) -> None:
86
+ """šŸ¤– Ask questions about your code in natural language.
87
+
88
+ Uses LLM (OpenAI or OpenRouter) to intelligently search your codebase and answer
89
+ questions like "where is X defined?", "how does Y work?", etc.
90
+
91
+ [bold cyan]Setup:[/bold cyan]
92
+
93
+ [green]Option A - OpenAI (recommended):[/green]
94
+ $ export OPENAI_API_KEY="your-key-here"
95
+ Get a key at: [cyan]https://platform.openai.com/api-keys[/cyan]
96
+
97
+ [green]Option B - OpenRouter:[/green]
98
+ $ export OPENROUTER_API_KEY="your-key-here"
99
+ Get a key at: [cyan]https://openrouter.ai/keys[/cyan]
100
+
101
+ [dim]Provider is auto-detected. OpenAI is preferred if both keys are set.[/dim]
102
+
103
+ [bold cyan]Examples:[/bold cyan]
104
+
105
+ [green]Ask where a parameter is set:[/green]
106
+ $ mcp-vector-search chat "where is similarity_threshold set?"
107
+
108
+ [green]Ask how something works:[/green]
109
+ $ mcp-vector-search chat "how does the indexing process work?"
110
+
111
+ [green]Find implementation details:[/green]
112
+ $ mcp-vector-search chat "show me the search ranking algorithm"
113
+
114
+ [green]Force specific provider:[/green]
115
+ $ mcp-vector-search chat "question" --provider openai
116
+ $ mcp-vector-search chat "question" --provider openrouter
117
+
118
+ [green]Use custom model:[/green]
119
+ $ mcp-vector-search chat "question" --model gpt-4o
120
+ $ mcp-vector-search chat "question" --model anthropic/claude-3.5-sonnet
121
+
122
+ [bold cyan]Advanced:[/bold cyan]
123
+
124
+ [green]Limit results:[/green]
125
+ $ mcp-vector-search chat "find auth code" --limit 3
126
+
127
+ [green]Custom timeout:[/green]
128
+ $ mcp-vector-search chat "complex question" --timeout 60
129
+
130
+ [dim]šŸ’” Tip: More specific questions get better answers. The LLM generates multiple
131
+ search queries and analyzes results to find the most relevant code.[/dim]
132
+ """
133
+ # If no query provided and no subcommand invoked, exit (show help)
134
+ if query is None:
135
+ if ctx.invoked_subcommand is None:
136
+ # No query and no subcommand - show help
137
+ raise typer.Exit()
138
+ else:
139
+ # A subcommand was invoked - let it handle the request
140
+ return
141
+
142
+ try:
143
+ project_root = project_root or ctx.obj.get("project_root") or Path.cwd()
144
+
145
+ # Validate provider if specified
146
+ if provider and provider not in ("openai", "openrouter"):
147
+ print_error(
148
+ f"Invalid provider: {provider}. Must be 'openai' or 'openrouter'"
149
+ )
150
+ raise typer.Exit(1)
151
+
152
+ # Run the chat search
153
+ asyncio.run(
154
+ run_chat_search(
155
+ project_root=project_root,
156
+ query=query,
157
+ limit=limit,
158
+ model=model,
159
+ provider=provider,
160
+ timeout=timeout,
161
+ json_output=json_output,
162
+ )
163
+ )
164
+
165
+ except Exception as e:
166
+ logger.error(f"Chat search failed: {e}")
167
+ print_error(f"Chat search failed: {e}")
168
+ raise typer.Exit(1)
169
+
170
+
171
+ async def run_chat_search(
172
+ project_root: Path,
173
+ query: str,
174
+ limit: int = 5,
175
+ model: str | None = None,
176
+ provider: str | None = None,
177
+ timeout: float = 30.0,
178
+ json_output: bool = False,
179
+ ) -> None:
180
+ """Run LLM-powered chat search.
181
+
182
+ Implementation Flow:
183
+ 1. Initialize LLM client and validate API key
184
+ 2. Generate 2-3 targeted search queries from natural language
185
+ 3. Execute each search query against vector database
186
+ 4. Have LLM analyze all results and select most relevant ones
187
+ 5. Display results with explanations
188
+
189
+ Args:
190
+ project_root: Project root directory
191
+ query: Natural language query from user
192
+ limit: Maximum number of results to return
193
+ model: Model to use (optional, defaults based on provider)
194
+ provider: LLM provider ('openai' or 'openrouter', auto-detect if None)
195
+ timeout: API timeout in seconds
196
+ json_output: Whether to output JSON format
197
+ """
198
+ # Check for API keys (environment variable or config file)
199
+ from ...core.config_utils import (
200
+ get_config_file_path,
201
+ get_openai_api_key,
202
+ get_openrouter_api_key,
203
+ get_preferred_llm_provider,
204
+ )
205
+
206
+ config_dir = project_root / ".mcp-vector-search"
207
+ openai_key = get_openai_api_key(config_dir)
208
+ openrouter_key = get_openrouter_api_key(config_dir)
209
+
210
+ # Determine which provider to use
211
+ if provider:
212
+ # Explicit provider specified
213
+ if provider == "openai" and not openai_key:
214
+ print_error("OpenAI API key not found.")
215
+ print_info("\n[bold]To use OpenAI:[/bold]")
216
+ print_info(
217
+ "1. Get an API key from [cyan]https://platform.openai.com/api-keys[/cyan]"
218
+ )
219
+ print_info("2. Set environment variable:")
220
+ print_info(" [yellow]export OPENAI_API_KEY='your-key'[/yellow]")
221
+ print_info("")
222
+ print_info("Or run: [cyan]mcp-vector-search setup[/cyan]")
223
+ raise typer.Exit(1)
224
+ elif provider == "openrouter" and not openrouter_key:
225
+ print_error("OpenRouter API key not found.")
226
+ print_info("\n[bold]To use OpenRouter:[/bold]")
227
+ print_info("1. Get an API key from [cyan]https://openrouter.ai/keys[/cyan]")
228
+ print_info("2. Set environment variable:")
229
+ print_info(" [yellow]export OPENROUTER_API_KEY='your-key'[/yellow]")
230
+ print_info("")
231
+ print_info("Or run: [cyan]mcp-vector-search setup[/cyan]")
232
+ raise typer.Exit(1)
233
+ else:
234
+ # Auto-detect provider
235
+ preferred_provider = get_preferred_llm_provider(config_dir)
236
+
237
+ if preferred_provider == "openai" and openai_key:
238
+ provider = "openai"
239
+ elif preferred_provider == "openrouter" and openrouter_key:
240
+ provider = "openrouter"
241
+ elif openai_key:
242
+ provider = "openai"
243
+ elif openrouter_key:
244
+ provider = "openrouter"
245
+ else:
246
+ print_error("No LLM API key found.")
247
+ print_info("\n[bold]To use the chat command, set up an API key:[/bold]")
248
+ print_info("")
249
+ print_info("[cyan]Option A - OpenAI (recommended):[/cyan]")
250
+ print_info(
251
+ "1. Get a key from [cyan]https://platform.openai.com/api-keys[/cyan]"
252
+ )
253
+ print_info("2. [yellow]export OPENAI_API_KEY='your-key'[/yellow]")
254
+ print_info("")
255
+ print_info("[cyan]Option B - OpenRouter:[/cyan]")
256
+ print_info("1. Get a key from [cyan]https://openrouter.ai/keys[/cyan]")
257
+ print_info("2. [yellow]export OPENROUTER_API_KEY='your-key'[/yellow]")
258
+ print_info("")
259
+ print_info("Or run: [cyan]mcp-vector-search setup[/cyan]")
260
+ config_path = get_config_file_path(config_dir)
261
+ print_info(f"\n[dim]Config file location: {config_path}[/dim]\n")
262
+ raise typer.Exit(1)
263
+
264
+ # Load project configuration
265
+ project_manager = ProjectManager(project_root)
266
+
267
+ if not project_manager.is_initialized():
268
+ raise ProjectNotFoundError(
269
+ f"Project not initialized at {project_root}. Run 'mcp-vector-search init' first."
270
+ )
271
+
272
+ config = project_manager.load_config()
273
+
274
+ # Initialize LLM client
275
+ try:
276
+ llm_client = LLMClient(
277
+ openai_api_key=openai_key,
278
+ openrouter_api_key=openrouter_key,
279
+ model=model,
280
+ provider=provider,
281
+ timeout=timeout,
282
+ )
283
+ provider_display = llm_client.provider.capitalize()
284
+ print_success(f"Connected to {provider_display}: {llm_client.model}")
285
+ except ValueError as e:
286
+ print_error(str(e))
287
+ raise typer.Exit(1)
288
+
289
+ # Step 1: Generate search queries from natural language
290
+ console.print(f"\n[cyan]šŸ’­ Analyzing query:[/cyan] [white]{query}[/white]")
291
+
292
+ try:
293
+ search_queries = await llm_client.generate_search_queries(query, limit=3)
294
+
295
+ if not search_queries:
296
+ print_error("Failed to generate search queries from your question.")
297
+ raise typer.Exit(1)
298
+
299
+ console.print(
300
+ f"\n[cyan]šŸ” Generated {len(search_queries)} search queries:[/cyan]"
301
+ )
302
+ for i, sq in enumerate(search_queries, 1):
303
+ console.print(f" {i}. [yellow]{sq}[/yellow]")
304
+
305
+ except SearchError as e:
306
+ print_error(f"Failed to generate queries: {e}")
307
+ raise typer.Exit(1)
308
+
309
+ # Step 2: Execute each search query
310
+ console.print("\n[cyan]šŸ”Ž Searching codebase...[/cyan]")
311
+
312
+ embedding_function, _ = create_embedding_function(config.embedding_model)
313
+ database = ChromaVectorDatabase(
314
+ persist_directory=config.index_path,
315
+ embedding_function=embedding_function,
316
+ )
317
+
318
+ search_engine = SemanticSearchEngine(
319
+ database=database,
320
+ project_root=project_root,
321
+ similarity_threshold=config.similarity_threshold,
322
+ )
323
+
324
+ # Execute all searches
325
+ search_results = {}
326
+ total_results = 0
327
+
328
+ try:
329
+ async with database:
330
+ for search_query in search_queries:
331
+ results = await search_engine.search(
332
+ query=search_query,
333
+ limit=limit * 2, # Get more results for LLM to analyze
334
+ similarity_threshold=config.similarity_threshold,
335
+ include_context=True,
336
+ )
337
+ search_results[search_query] = results
338
+ total_results += len(results)
339
+
340
+ console.print(
341
+ f" • [yellow]{search_query}[/yellow]: {len(results)} results"
342
+ )
343
+
344
+ except Exception as e:
345
+ logger.error(f"Search execution failed: {e}")
346
+ print_error(f"Search failed: {e}")
347
+ raise typer.Exit(1)
348
+
349
+ if total_results == 0:
350
+ print_warning("\nāš ļø No results found for any search query.")
351
+ print_info("\n[bold]Suggestions:[/bold]")
352
+ print_info(" • Try rephrasing your question")
353
+ print_info(" • Use more general terms")
354
+ print_info(
355
+ " • Check if relevant files are indexed with [cyan]mcp-vector-search status[/cyan]"
356
+ )
357
+ raise typer.Exit(0)
358
+
359
+ # Step 3: Have LLM analyze and rank results
360
+ console.print(f"\n[cyan]šŸ¤– Analyzing {total_results} results...[/cyan]")
361
+
362
+ try:
363
+ ranked_results = await llm_client.analyze_and_rank_results(
364
+ original_query=query,
365
+ search_results=search_results,
366
+ top_n=limit,
367
+ )
368
+
369
+ if not ranked_results:
370
+ print_warning("\nāš ļø LLM could not identify relevant results.")
371
+ raise typer.Exit(0)
372
+
373
+ except SearchError as e:
374
+ print_error(f"Result analysis failed: {e}")
375
+ # Fallback: show raw search results
376
+ print_warning("\nShowing raw search results instead...")
377
+ await _show_fallback_results(search_results, limit)
378
+ raise typer.Exit(1)
379
+
380
+ # Step 4: Display results with explanations
381
+ if json_output:
382
+ await _display_json_results(ranked_results)
383
+ else:
384
+ await _display_rich_results(ranked_results, query)
385
+
386
+
387
+ async def _display_rich_results(
388
+ ranked_results: list[dict],
389
+ original_query: str,
390
+ ) -> None:
391
+ """Display results in rich formatted output.
392
+
393
+ Args:
394
+ ranked_results: List of ranked results with explanations
395
+ original_query: Original user query
396
+ """
397
+ from rich.panel import Panel
398
+ from rich.syntax import Syntax
399
+
400
+ console.print(
401
+ f"\n[bold cyan]šŸŽÆ Top Results for:[/bold cyan] [white]{original_query}[/white]\n"
402
+ )
403
+
404
+ for i, item in enumerate(ranked_results, 1):
405
+ result = item["result"]
406
+ relevance = item["relevance"]
407
+ explanation = item["explanation"]
408
+ query = item["query"]
409
+
410
+ # Determine relevance emoji and color
411
+ if relevance == "High":
412
+ relevance_emoji = "🟢"
413
+ relevance_color = "green"
414
+ elif relevance == "Medium":
415
+ relevance_emoji = "🟔"
416
+ relevance_color = "yellow"
417
+ else:
418
+ relevance_emoji = "šŸ”“"
419
+ relevance_color = "red"
420
+
421
+ # Header with result number and file
422
+ console.print(f"[bold]šŸ“ Result {i} of {len(ranked_results)}[/bold]")
423
+ console.print(
424
+ f"[cyan]šŸ“‚ {result.file_path.relative_to(result.file_path.parent.parent)}[/cyan]"
425
+ )
426
+
427
+ # Relevance and explanation
428
+ console.print(
429
+ f"\n{relevance_emoji} [bold {relevance_color}]Relevance: {relevance}[/bold {relevance_color}]"
430
+ )
431
+ console.print(f"[dim]Search query: {query}[/dim]")
432
+ console.print(f"\nšŸ’” [italic]{explanation}[/italic]\n")
433
+
434
+ # Code snippet with syntax highlighting
435
+ file_ext = result.file_path.suffix.lstrip(".")
436
+ code_syntax = Syntax(
437
+ result.content,
438
+ lexer=file_ext or "python",
439
+ theme="monokai",
440
+ line_numbers=True,
441
+ start_line=result.start_line,
442
+ )
443
+
444
+ panel = Panel(
445
+ code_syntax,
446
+ title=f"[bold]{result.function_name or result.class_name or 'Code'}[/bold]",
447
+ border_style="cyan",
448
+ )
449
+ console.print(panel)
450
+
451
+ # Metadata
452
+ metadata = []
453
+ if result.function_name:
454
+ metadata.append(f"Function: [cyan]{result.function_name}[/cyan]")
455
+ if result.class_name:
456
+ metadata.append(f"Class: [cyan]{result.class_name}[/cyan]")
457
+ metadata.append(f"Lines: [cyan]{result.start_line}-{result.end_line}[/cyan]")
458
+ metadata.append(f"Similarity: [cyan]{result.similarity_score:.3f}[/cyan]")
459
+
460
+ console.print("[dim]" + " | ".join(metadata) + "[/dim]")
461
+ console.print() # Blank line between results
462
+
463
+ # Footer with tips
464
+ console.print("[dim]─" * 80 + "[/dim]")
465
+ console.print(
466
+ "\n[dim]šŸ’” Tip: Try different phrasings or add more specific terms for better results[/dim]"
467
+ )
468
+
469
+
470
+ async def _display_json_results(ranked_results: list[dict]) -> None:
471
+ """Display results in JSON format.
472
+
473
+ Args:
474
+ ranked_results: List of ranked results with explanations
475
+ """
476
+ from ..output import print_json
477
+
478
+ json_data = []
479
+ for item in ranked_results:
480
+ result = item["result"]
481
+ json_data.append(
482
+ {
483
+ "file": str(result.file_path),
484
+ "start_line": result.start_line,
485
+ "end_line": result.end_line,
486
+ "function_name": result.function_name,
487
+ "class_name": result.class_name,
488
+ "content": result.content,
489
+ "similarity_score": result.similarity_score,
490
+ "relevance": item["relevance"],
491
+ "explanation": item["explanation"],
492
+ "search_query": item["query"],
493
+ }
494
+ )
495
+
496
+ print_json(json_data, title="Chat Search Results")
497
+
498
+
499
+ async def _show_fallback_results(
500
+ search_results: dict[str, list],
501
+ limit: int,
502
+ ) -> None:
503
+ """Show fallback results when LLM analysis fails.
504
+
505
+ Args:
506
+ search_results: Dictionary of search queries to results
507
+ limit: Number of results to show
508
+ """
509
+ from ..output import print_search_results
510
+
511
+ # Flatten and deduplicate results
512
+ all_results = []
513
+ seen_files = set()
514
+
515
+ for results in search_results.values():
516
+ for result in results:
517
+ file_key = (result.file_path, result.start_line)
518
+ if file_key not in seen_files:
519
+ all_results.append(result)
520
+ seen_files.add(file_key)
521
+
522
+ # Sort by similarity score
523
+ all_results.sort(key=lambda r: r.similarity_score, reverse=True)
524
+
525
+ # Show top N
526
+ print_search_results(
527
+ results=all_results[:limit],
528
+ query="Combined search results",
529
+ show_content=True,
530
+ )
531
+
532
+
533
+ if __name__ == "__main__":
534
+ chat_app()