cicada-mcp 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. cicada/_version_hash.py +4 -0
  2. cicada/cli.py +6 -748
  3. cicada/commands.py +1255 -0
  4. cicada/dead_code/__init__.py +1 -0
  5. cicada/{find_dead_code.py → dead_code/finder.py} +2 -1
  6. cicada/dependency_analyzer.py +147 -0
  7. cicada/entry_utils.py +92 -0
  8. cicada/extractors/base.py +9 -9
  9. cicada/extractors/call.py +17 -20
  10. cicada/extractors/common.py +64 -0
  11. cicada/extractors/dependency.py +117 -235
  12. cicada/extractors/doc.py +2 -49
  13. cicada/extractors/function.py +10 -14
  14. cicada/extractors/keybert.py +228 -0
  15. cicada/extractors/keyword.py +191 -0
  16. cicada/extractors/module.py +6 -10
  17. cicada/extractors/spec.py +8 -56
  18. cicada/format/__init__.py +20 -0
  19. cicada/{ascii_art.py → format/ascii_art.py} +1 -1
  20. cicada/format/formatter.py +1145 -0
  21. cicada/git_helper.py +134 -7
  22. cicada/indexer.py +322 -89
  23. cicada/interactive_setup.py +251 -323
  24. cicada/interactive_setup_helpers.py +302 -0
  25. cicada/keyword_expander.py +437 -0
  26. cicada/keyword_search.py +208 -422
  27. cicada/keyword_test.py +383 -16
  28. cicada/mcp/__init__.py +10 -0
  29. cicada/mcp/entry.py +17 -0
  30. cicada/mcp/filter_utils.py +107 -0
  31. cicada/mcp/pattern_utils.py +118 -0
  32. cicada/{mcp_server.py → mcp/server.py} +819 -73
  33. cicada/mcp/tools.py +473 -0
  34. cicada/pr_finder.py +2 -3
  35. cicada/pr_indexer/indexer.py +3 -2
  36. cicada/setup.py +167 -35
  37. cicada/tier.py +225 -0
  38. cicada/utils/__init__.py +9 -2
  39. cicada/utils/fuzzy_match.py +54 -0
  40. cicada/utils/index_utils.py +9 -0
  41. cicada/utils/path_utils.py +18 -0
  42. cicada/utils/text_utils.py +52 -1
  43. cicada/utils/tree_utils.py +47 -0
  44. cicada/version_check.py +99 -0
  45. cicada/watch_manager.py +320 -0
  46. cicada/watcher.py +431 -0
  47. cicada_mcp-0.3.0.dist-info/METADATA +541 -0
  48. cicada_mcp-0.3.0.dist-info/RECORD +70 -0
  49. cicada_mcp-0.3.0.dist-info/entry_points.txt +4 -0
  50. cicada/formatter.py +0 -864
  51. cicada/keybert_extractor.py +0 -286
  52. cicada/lightweight_keyword_extractor.py +0 -290
  53. cicada/mcp_entry.py +0 -683
  54. cicada/mcp_tools.py +0 -291
  55. cicada_mcp-0.2.0.dist-info/METADATA +0 -735
  56. cicada_mcp-0.2.0.dist-info/RECORD +0 -53
  57. cicada_mcp-0.2.0.dist-info/entry_points.txt +0 -4
  58. /cicada/{dead_code_analyzer.py → dead_code/analyzer.py} +0 -0
  59. /cicada/{colors.py → format/colors.py} +0 -0
  60. {cicada_mcp-0.2.0.dist-info → cicada_mcp-0.3.0.dist-info}/WHEEL +0 -0
  61. {cicada_mcp-0.2.0.dist-info → cicada_mcp-0.3.0.dist-info}/licenses/LICENSE +0 -0
  62. {cicada_mcp-0.2.0.dist-info → cicada_mcp-0.3.0.dist-info}/top_level.txt +0 -0
@@ -7,10 +7,11 @@ Provides an MCP tool to search for Elixir modules and their functions.
7
7
  Author: Cursor(Auto)
8
8
  """
9
9
 
10
- import contextlib
11
10
  import os
11
+ import subprocess
12
12
  import sys
13
13
  import time
14
+ from datetime import datetime, timedelta, timezone
14
15
  from pathlib import Path
15
16
  from typing import Any, cast
16
17
 
@@ -20,11 +21,18 @@ from mcp.server.stdio import stdio_server
20
21
  from mcp.types import TextContent, Tool
21
22
 
22
23
  from cicada.command_logger import get_logger
23
- from cicada.formatter import ModuleFormatter
24
+ from cicada.format import ModuleFormatter
24
25
  from cicada.git_helper import GitHelper
25
- from cicada.mcp_tools import get_tool_definitions
26
+ from cicada.mcp.pattern_utils import (
27
+ FunctionPattern,
28
+ has_wildcards,
29
+ match_any_pattern,
30
+ parse_function_patterns,
31
+ split_or_patterns,
32
+ )
33
+ from cicada.mcp.tools import get_tool_definitions
26
34
  from cicada.pr_finder import PRFinder
27
- from cicada.utils import get_config_path, get_pr_index_path, load_index
35
+ from cicada.utils import find_similar_names, get_config_path, get_pr_index_path, load_index
28
36
 
29
37
 
30
38
  class CicadaServer:
@@ -73,32 +81,31 @@ class CicadaServer:
73
81
  Returns:
74
82
  Path to the config file
75
83
  """
76
- # Check if CICADA_CONFIG_DIR is set (new temp directory approach)
84
+ # Check if CICADA_CONFIG_DIR is set (direct path to storage directory)
77
85
  config_dir = os.environ.get("CICADA_CONFIG_DIR")
78
86
  if config_dir:
79
87
  return str(Path(config_dir) / "config.yaml")
80
88
 
81
89
  # Determine repository path from environment or current directory
82
- repo_path = os.environ.get("CICADA_REPO_PATH")
90
+ repo_path = None
83
91
 
84
92
  # Check if WORKSPACE_FOLDER_PATHS is available (Cursor-specific)
85
- if not repo_path:
86
- workspace_paths = os.environ.get("WORKSPACE_FOLDER_PATHS")
87
- if workspace_paths:
88
- # WORKSPACE_FOLDER_PATHS might be a single path or multiple paths
89
- # Take the first one if multiple
90
- # Use os.pathsep for platform-aware splitting (';' on Windows, ':' on Unix)
91
- repo_path = (
92
- workspace_paths.split(os.pathsep)[0]
93
- if os.pathsep in workspace_paths
94
- else workspace_paths
95
- )
93
+ workspace_paths = os.environ.get("WORKSPACE_FOLDER_PATHS")
94
+ if workspace_paths:
95
+ # WORKSPACE_FOLDER_PATHS might be a single path or multiple paths
96
+ # Take the first one if multiple
97
+ # Use os.pathsep for platform-aware splitting (';' on Windows, ':' on Unix)
98
+ repo_path = (
99
+ workspace_paths.split(os.pathsep)[0]
100
+ if os.pathsep in workspace_paths
101
+ else workspace_paths
102
+ )
96
103
 
97
104
  # Fall back to current working directory
98
105
  if not repo_path:
99
106
  repo_path = str(Path.cwd().resolve())
100
107
 
101
- # Use new storage structure only
108
+ # Calculate config path from repository path
102
109
  config_path = get_config_path(repo_path)
103
110
  return str(config_path)
104
111
 
@@ -193,6 +200,77 @@ class CicadaServer:
193
200
  return True
194
201
  return False
195
202
 
203
+ def _check_index_staleness(self) -> dict[str, Any] | None:
204
+ """
205
+ Check if the index is stale by comparing file modification times.
206
+
207
+ Returns:
208
+ Dictionary with staleness info (is_stale, index_age, newest_file_age) or None
209
+ """
210
+ try:
211
+ import os
212
+ import random
213
+ from datetime import datetime
214
+
215
+ # Get index file path and modification time
216
+ index_path = Path(self.config["storage"]["index_path"])
217
+ if not index_path.exists():
218
+ return None
219
+
220
+ index_mtime = os.path.getmtime(index_path)
221
+ index_age = datetime.now().timestamp() - index_mtime
222
+
223
+ # Get repo path
224
+ repo_path = Path(self.config.get("repository", {}).get("path", "."))
225
+
226
+ # Check a sample of indexed files to see if any are newer than the index
227
+ # Use random sampling for better coverage
228
+ max_files_to_check = 50
229
+ all_modules = list(self.index.get("modules", {}).values())
230
+
231
+ if len(all_modules) > max_files_to_check:
232
+ modules_to_check = random.sample(all_modules, max_files_to_check)
233
+ else:
234
+ modules_to_check = all_modules
235
+
236
+ newest_file_mtime = 0
237
+
238
+ for module_data in modules_to_check:
239
+ file_path = repo_path / module_data["file"]
240
+ if file_path.exists():
241
+ file_mtime = os.path.getmtime(file_path)
242
+ newest_file_mtime = max(newest_file_mtime, file_mtime)
243
+
244
+ # Check if any files are newer than the index
245
+ is_stale = newest_file_mtime > index_mtime
246
+
247
+ if is_stale:
248
+ # Calculate how old the index is in human-readable format
249
+ hours_old = index_age / 3600
250
+ if hours_old < 1:
251
+ age_str = f"{int(index_age / 60)} minutes"
252
+ elif hours_old < 24:
253
+ age_str = f"{int(hours_old)} hours"
254
+ else:
255
+ age_str = f"{int(hours_old / 24)} days"
256
+
257
+ return {
258
+ "is_stale": True,
259
+ "age_str": age_str,
260
+ }
261
+
262
+ return None
263
+ except (OSError, KeyError):
264
+ # Expected errors - file permissions, disk issues, config issues
265
+ # Silently ignore these as staleness check is non-critical
266
+ return None
267
+ except Exception as e:
268
+ # Unexpected error - log for debugging but don't break functionality
269
+ import sys
270
+
271
+ print(f"Warning: Unexpected error checking index staleness: {e}", file=sys.stderr)
272
+ return None
273
+
196
274
  async def list_tools(self) -> list[Tool]:
197
275
  """List available MCP tools."""
198
276
  return get_tool_definitions()
@@ -259,6 +337,8 @@ class CicadaServer:
259
337
  include_usage_examples = arguments.get("include_usage_examples", False)
260
338
  max_examples = arguments.get("max_examples", 5)
261
339
  test_files_only = arguments.get("test_files_only", False)
340
+ changed_since = arguments.get("changed_since")
341
+ show_relationships = arguments.get("show_relationships", True)
262
342
 
263
343
  if not function_name:
264
344
  error_msg = "'function_name' is required"
@@ -270,16 +350,23 @@ class CicadaServer:
270
350
  include_usage_examples,
271
351
  max_examples,
272
352
  test_files_only,
353
+ changed_since,
354
+ show_relationships,
273
355
  )
274
356
  elif name == "search_module_usage":
275
357
  module_name = arguments.get("module_name")
276
358
  output_format = arguments.get("format", "markdown")
359
+ usage_type = arguments.get("usage_type", "all")
277
360
 
278
361
  if not module_name:
279
362
  error_msg = "'module_name' is required"
280
363
  return [TextContent(type="text", text=error_msg)]
281
364
 
282
- return await self._search_module_usage(module_name, output_format)
365
+ if usage_type not in ("all", "test_only", "production_only"):
366
+ error_msg = "'usage_type' must be one of: 'all', 'test_only', 'production_only'"
367
+ return [TextContent(type="text", text=error_msg)]
368
+
369
+ return await self._search_module_usage(module_name, output_format, usage_type)
283
370
  elif name == "find_pr_for_line":
284
371
  file_path = arguments.get("file_path")
285
372
  line_number = arguments.get("line_number")
@@ -302,6 +389,10 @@ class CicadaServer:
302
389
  precise_tracking = arguments.get("precise_tracking", False)
303
390
  show_evolution = arguments.get("show_evolution", False)
304
391
  max_commits = arguments.get("max_commits", 10)
392
+ since_date = arguments.get("since_date")
393
+ until_date = arguments.get("until_date")
394
+ author = arguments.get("author")
395
+ min_changes = arguments.get("min_changes", 0)
305
396
 
306
397
  if not file_path:
307
398
  error_msg = "'file_path' is required"
@@ -320,6 +411,10 @@ class CicadaServer:
320
411
  precise_tracking,
321
412
  show_evolution,
322
413
  max_commits,
414
+ since_date,
415
+ until_date,
416
+ author,
417
+ min_changes,
323
418
  )
324
419
  elif name == "get_blame":
325
420
  file_path = arguments.get("file_path")
@@ -343,8 +438,12 @@ class CicadaServer:
343
438
  return [TextContent(type="text", text=error_msg)]
344
439
 
345
440
  return await self._get_file_pr_history(file_path)
346
- elif name == "search_by_keywords":
441
+ elif name == "search_by_features" or name == "search_by_keywords":
442
+ # Support both names for backward compatibility
443
+ # search_by_keywords is deprecated but still functional
347
444
  keywords = arguments.get("keywords")
445
+ filter_type = arguments.get("filter_type", "all")
446
+ min_score = arguments.get("min_score", 0.0)
348
447
 
349
448
  if not keywords:
350
449
  error_msg = "'keywords' is required"
@@ -354,15 +453,76 @@ class CicadaServer:
354
453
  error_msg = "'keywords' must be a list of strings"
355
454
  return [TextContent(type="text", text=error_msg)]
356
455
 
357
- return await self._search_by_keywords(keywords)
456
+ if filter_type not in ("all", "modules", "functions"):
457
+ error_msg = "'filter_type' must be one of: 'all', 'modules', 'functions'"
458
+ return [TextContent(type="text", text=error_msg)]
459
+
460
+ if not isinstance(min_score, (int, float)) or min_score < 0.0 or min_score > 1.0:
461
+ error_msg = "'min_score' must be a number between 0.0 and 1.0"
462
+ return [TextContent(type="text", text=error_msg)]
463
+
464
+ return await self._search_by_keywords(keywords, filter_type, min_score)
358
465
  elif name == "find_dead_code":
359
466
  min_confidence = arguments.get("min_confidence", "high")
360
467
  output_format = arguments.get("format", "markdown")
361
468
 
362
469
  return await self._find_dead_code(min_confidence, output_format)
470
+ elif name == "get_module_dependencies":
471
+ module_name = arguments.get("module_name")
472
+ if not module_name:
473
+ raise ValueError("module_name is required")
474
+ output_format = arguments.get("format", "markdown")
475
+ depth = arguments.get("depth", 1)
476
+ granular = arguments.get("granular", False)
477
+
478
+ return await self._get_module_dependencies(module_name, output_format, depth, granular)
479
+ elif name == "get_function_dependencies":
480
+ module_name = arguments.get("module_name")
481
+ function_name = arguments.get("function_name")
482
+ arity = arguments.get("arity")
483
+ if not module_name:
484
+ raise ValueError("module_name is required")
485
+ if not function_name:
486
+ raise ValueError("function_name is required")
487
+ if arity is None:
488
+ raise ValueError("arity is required")
489
+ output_format = arguments.get("format", "markdown")
490
+ include_context = arguments.get("include_context", False)
491
+
492
+ return await self._get_function_dependencies(
493
+ module_name, function_name, arity, output_format, include_context
494
+ )
363
495
  else:
364
496
  raise ValueError(f"Unknown tool: {name}")
365
497
 
498
+ def _lookup_module_with_error(
499
+ self, module_name: str, include_suggestions: bool = True
500
+ ) -> tuple[dict | None, str | None]:
501
+ """
502
+ Look up a module in the index with error handling.
503
+
504
+ Args:
505
+ module_name: Module name to look up
506
+ include_suggestions: Whether to include similar module suggestions in error
507
+
508
+ Returns:
509
+ Tuple of (module_data, error_message). If found, returns (data, None).
510
+ If not found, returns (None, error_message).
511
+ """
512
+ module_data = self.index["modules"].get(module_name)
513
+ if module_data:
514
+ return module_data, None
515
+
516
+ # Module not found - create error message
517
+ error_msg = f"Module not found: {module_name}"
518
+ if include_suggestions:
519
+ similar = find_similar_names(module_name, list(self.index["modules"].keys()))
520
+ if similar:
521
+ error_msg += "\n\nDid you mean one of these?\n" + "\n".join(
522
+ f" - {name}" for name in similar[:5]
523
+ )
524
+ return None, error_msg
525
+
366
526
  def _resolve_file_to_module(self, file_path: str) -> str | None:
367
527
  """Resolve a file path to a module name by searching the index."""
368
528
  # Normalize the file path (remove leading ./ and trailing whitespace)
@@ -394,27 +554,99 @@ class CicadaServer:
394
554
  output_format: str = "markdown",
395
555
  private_functions: str = "exclude",
396
556
  ) -> list[TextContent]:
397
- """Search for a module and return its information."""
398
- # Exact match lookup
557
+ """
558
+ Search for a module and return its information.
559
+
560
+ Supports wildcards (*) and OR patterns (|) for both module names and file paths.
561
+ Examples:
562
+ - "MyApp.*" - matches all modules starting with MyApp.
563
+ - "*User*" - matches all modules containing User
564
+ - "lib/my_app/*.ex" - matches all modules in that directory
565
+ - "MyApp.User|MyApp.Post" - matches either module
566
+ - "*User*|*Post*" - matches modules containing User OR Post
567
+ """
568
+ # Check for wildcard or OR patterns
569
+ if has_wildcards(module_name):
570
+ # Split by OR patterns
571
+ patterns = split_or_patterns(module_name)
572
+
573
+ # Find all matching modules
574
+ matching_modules = []
575
+ for mod_name, mod_data in self.index["modules"].items():
576
+ # Check if module name or file path matches any pattern
577
+ if match_any_pattern(patterns, mod_name) or match_any_pattern(
578
+ patterns, mod_data["file"]
579
+ ):
580
+ matching_modules.append((mod_name, mod_data))
581
+
582
+ # If no matches found, return error
583
+ if not matching_modules:
584
+ total_modules = self.index["metadata"]["total_modules"]
585
+ if output_format == "json":
586
+ error_result = ModuleFormatter.format_error_json(module_name, total_modules)
587
+ else:
588
+ error_result = ModuleFormatter.format_error_markdown(module_name, total_modules)
589
+ return [TextContent(type="text", text=error_result)]
590
+
591
+ # Format all matching modules
592
+ results: list[str] = []
593
+ for mod_name, mod_data in matching_modules:
594
+ if output_format == "json":
595
+ result = ModuleFormatter.format_module_json(
596
+ mod_name, mod_data, private_functions
597
+ )
598
+ else:
599
+ result = ModuleFormatter.format_module_markdown(
600
+ mod_name, mod_data, private_functions
601
+ )
602
+ results.append(result)
603
+
604
+ # Combine results with separator for markdown, or as array for JSON
605
+ if output_format == "json":
606
+ # For JSON, wrap in array notation
607
+ combined = "[\n" + ",\n".join(results) + "\n]"
608
+ else:
609
+ # For markdown, separate with horizontal rules
610
+ header = (
611
+ f"Found {len(matching_modules)} module(s) matching pattern '{module_name}':\n\n"
612
+ )
613
+ combined = header + "\n\n---\n\n".join(results)
614
+
615
+ return [TextContent(type="text", text=combined)]
616
+
617
+ # Exact match lookup (no wildcards)
399
618
  if module_name in self.index["modules"]:
400
619
  data = self.index["modules"][module_name]
401
620
 
621
+ # Get PR context for the file
622
+ pr_info = self._get_recent_pr_info(data["file"])
623
+
624
+ # Check index staleness
625
+ staleness_info = self._check_index_staleness()
626
+
402
627
  if output_format == "json":
403
628
  result = ModuleFormatter.format_module_json(module_name, data, private_functions)
404
629
  else:
405
630
  result = ModuleFormatter.format_module_markdown(
406
- module_name, data, private_functions
631
+ module_name, data, private_functions, pr_info, staleness_info
407
632
  )
408
633
 
409
634
  return [TextContent(type="text", text=result)]
410
635
 
411
- # Module not found
636
+ # Module not found - compute suggestions and provide helpful error message
412
637
  total_modules = self.index["metadata"]["total_modules"]
413
638
 
414
639
  if output_format == "json":
415
640
  error_result = ModuleFormatter.format_error_json(module_name, total_modules)
416
641
  else:
417
- error_result = ModuleFormatter.format_error_markdown(module_name, total_modules)
642
+ # Compute fuzzy match suggestions
643
+ available_modules = list(self.index["modules"].keys())
644
+ similar_matches = find_similar_names(module_name, available_modules, max_suggestions=3)
645
+ suggestions = [name for name, _score in similar_matches]
646
+
647
+ error_result = ModuleFormatter.format_error_markdown(
648
+ module_name, total_modules, suggestions
649
+ )
418
650
 
419
651
  return [TextContent(type="text", text=error_result)]
420
652
 
@@ -425,46 +657,60 @@ class CicadaServer:
425
657
  include_usage_examples: bool = False,
426
658
  max_examples: int = 5,
427
659
  test_files_only: bool = False,
660
+ changed_since: str | None = None,
661
+ show_relationships: bool = True,
428
662
  ) -> list[TextContent]:
429
- """Search for a function across all modules and return matches with call sites."""
430
- # Parse the function name - supports multiple formats:
431
- # - "func_name" or "func_name/arity" (search all modules)
432
- # - "Module.func_name" or "Module.func_name/arity" (search specific module)
433
- target_module = None
434
- target_name = function_name
435
- target_arity = None
436
-
437
- # Check for Module.function format
438
- if "." in function_name:
439
- # Split on last dot to separate module from function
440
- parts = function_name.rsplit(".", 1)
441
- if len(parts) == 2:
442
- target_module = parts[0]
443
- target_name = parts[1]
444
-
445
- # Check for arity
446
- if "/" in target_name:
447
- parts = target_name.split("/")
448
- target_name = parts[0]
449
- with contextlib.suppress(ValueError, IndexError):
450
- target_arity = int(parts[1])
663
+ """
664
+ Search for a function across all modules and return matches with call sites.
665
+
666
+ Supports wildcards (*) and OR patterns (|) for function names, module names, and file paths.
667
+ Examples:
668
+ - "create*" - matches all functions starting with create
669
+ - "*user*" - matches all functions containing user
670
+ - "MyApp.User.create*" - matches create* functions in MyApp.User module
671
+ - "create*|update*" - matches functions starting with create OR update
672
+ - "MyApp.*.create/1" - matches create/1 in any module under MyApp
673
+ - "lib/*/user.ex:create*" - matches create* functions in files matching path pattern
674
+ """
675
+ # Support OR syntax by splitting first, then parsing each component individually
676
+ parsed_patterns: list[FunctionPattern] = parse_function_patterns(function_name)
451
677
 
452
678
  # Search across all modules for function definitions
453
679
  results = []
454
- for module_name, module_data in self.index["modules"].items():
455
- # If target_module is specified, only search in that module
456
- if target_module and module_name != target_module:
457
- continue
680
+ seen_functions: set[tuple[str, str, int]] = set()
681
+ # Parse changed_since filter if provided
682
+ cutoff_date = None
683
+ if changed_since:
684
+ cutoff_date = self._parse_changed_since(changed_since)
458
685
 
686
+ for module_name, module_data in self.index["modules"].items():
459
687
  for func in module_data["functions"]:
460
- # Match by name and optionally arity
461
- if func["name"] == target_name and (
462
- target_arity is None or func["arity"] == target_arity
688
+ if any(
689
+ pattern.matches(module_name, module_data["file"], func)
690
+ for pattern in parsed_patterns
463
691
  ):
692
+ # Filter by changed_since if provided
693
+ if cutoff_date:
694
+ func_modified = func.get("last_modified_at")
695
+ if not func_modified:
696
+ continue # Skip functions without timestamp
697
+
698
+ func_modified_dt = datetime.fromisoformat(func_modified)
699
+ # Ensure timezone-aware for comparison
700
+ if func_modified_dt.tzinfo is None:
701
+ func_modified_dt = func_modified_dt.replace(tzinfo=timezone.utc)
702
+
703
+ if func_modified_dt < cutoff_date:
704
+ continue # Function too old, skip
705
+
706
+ key = (module_name, func["name"], func["arity"])
707
+ if key in seen_functions:
708
+ continue
709
+ seen_functions.add(key)
464
710
  # Find call sites for this function
465
711
  call_sites = self._find_call_sites(
466
712
  target_module=module_name,
467
- target_function=target_name,
713
+ target_function=func["name"],
468
714
  target_arity=func["arity"],
469
715
  )
470
716
 
@@ -482,6 +728,14 @@ class CicadaServer:
482
728
  # Extract code lines for each call site
483
729
  self._add_code_examples(call_sites_with_examples)
484
730
 
731
+ # Get PR context for this function
732
+ pr_info = self._get_recent_pr_info(module_data["file"])
733
+
734
+ # Get function dependencies if show_relationships is enabled
735
+ dependencies = []
736
+ if show_relationships:
737
+ dependencies = func.get("dependencies", [])
738
+
485
739
  results.append(
486
740
  {
487
741
  "module": module_name,
@@ -490,19 +744,26 @@ class CicadaServer:
490
744
  "file": module_data["file"],
491
745
  "call_sites": call_sites,
492
746
  "call_sites_with_examples": call_sites_with_examples,
747
+ "pr_info": pr_info,
748
+ "dependencies": dependencies,
493
749
  }
494
750
  )
495
751
 
752
+ # Check index staleness
753
+ staleness_info = self._check_index_staleness()
754
+
496
755
  # Format results
497
756
  if output_format == "json":
498
757
  result = ModuleFormatter.format_function_results_json(function_name, results)
499
758
  else:
500
- result = ModuleFormatter.format_function_results_markdown(function_name, results)
759
+ result = ModuleFormatter.format_function_results_markdown(
760
+ function_name, results, staleness_info, show_relationships
761
+ )
501
762
 
502
763
  return [TextContent(type="text", text=result)]
503
764
 
504
765
  async def _search_module_usage(
505
- self, module_name: str, output_format: str = "markdown"
766
+ self, module_name: str, output_format: str = "markdown", usage_type: str = "all"
506
767
  ) -> list[TextContent]:
507
768
  """
508
769
  Search for all locations where a module is used (aliased/imported and called).
@@ -510,6 +771,7 @@ class CicadaServer:
510
771
  Args:
511
772
  module_name: The module to search for (e.g., "MyApp.User")
512
773
  output_format: Output format ('markdown' or 'json')
774
+ usage_type: Filter by file type ('all', 'test_only', 'production_only')
513
775
 
514
776
  Returns:
515
777
  TextContent with usage information
@@ -624,6 +886,21 @@ class CicadaServer:
624
886
  }
625
887
  )
626
888
 
889
+ # Apply usage type filter if not 'all'
890
+ if usage_type != "all":
891
+ from cicada.mcp.filter_utils import filter_by_file_type
892
+
893
+ # Filter each category that has file information
894
+ for category in [
895
+ "aliases",
896
+ "imports",
897
+ "requires",
898
+ "uses",
899
+ "value_mentions",
900
+ "function_calls",
901
+ ]:
902
+ usage_results[category] = filter_by_file_type(usage_results[category], usage_type)
903
+
627
904
  # Format results
628
905
  if output_format == "json":
629
906
  result = ModuleFormatter.format_module_usage_json(module_name, usage_results)
@@ -801,6 +1078,131 @@ class CicadaServer:
801
1078
 
802
1079
  return call_sites
803
1080
 
1081
+ def _parse_changed_since(self, changed_since: str) -> datetime:
1082
+ """
1083
+ Parse changed_since parameter into datetime.
1084
+
1085
+ Supports:
1086
+ - ISO dates: '2024-01-15'
1087
+ - Relative: '7d', '2w', '3m', '1y'
1088
+ - Git refs: 'HEAD~10', 'v1.0.0' (if git_helper available)
1089
+
1090
+ Returns:
1091
+ datetime object (timezone-aware) representing the cutoff date
1092
+
1093
+ Raises:
1094
+ ValueError: If format is invalid or amount is negative/zero
1095
+ """
1096
+ # ISO date format (YYYY-MM-DD)
1097
+ if "-" in changed_since and len(changed_since) >= 10:
1098
+ try:
1099
+ dt = datetime.fromisoformat(changed_since)
1100
+ # Ensure timezone-aware - if naive, assume UTC
1101
+ if dt.tzinfo is None:
1102
+ dt = dt.replace(tzinfo=timezone.utc)
1103
+ return dt
1104
+ except ValueError:
1105
+ pass
1106
+
1107
+ # Relative format (7d, 2w, 3m, 1y)
1108
+ if len(changed_since) >= 2 and changed_since[-1] in "dwmy":
1109
+ try:
1110
+ amount = int(changed_since[:-1])
1111
+ unit = changed_since[-1]
1112
+
1113
+ # Validate positive amount
1114
+ if amount <= 0:
1115
+ raise ValueError(f"Time amount must be positive, got: {amount}{unit}")
1116
+
1117
+ now = datetime.now(timezone.utc)
1118
+ if unit == "d":
1119
+ return now - timedelta(days=amount)
1120
+ elif unit == "w":
1121
+ return now - timedelta(weeks=amount)
1122
+ elif unit == "m":
1123
+ return now - timedelta(days=amount * 30)
1124
+ elif unit == "y":
1125
+ return now - timedelta(days=amount * 365)
1126
+ except ValueError as e:
1127
+ # Re-raise if it's our validation error
1128
+ if "Time amount must be positive" in str(e):
1129
+ raise
1130
+ # Otherwise, try next format (likely invalid int parsing)
1131
+
1132
+ # Git ref format (requires git_helper)
1133
+ if self.git_helper:
1134
+ try:
1135
+ # Validate git ref format to prevent command injection
1136
+ # Refs should not start with - or -- (could be flags)
1137
+ if changed_since.startswith("-"):
1138
+ raise ValueError(f"Invalid git ref format (starts with '-'): {changed_since}")
1139
+
1140
+ # Get timestamp of the ref using git show
1141
+ repo_path = self.git_helper.repo_path
1142
+ result = subprocess.run(
1143
+ ["git", "show", "-s", "--format=%ai", changed_since],
1144
+ cwd=repo_path,
1145
+ capture_output=True,
1146
+ text=True,
1147
+ check=True,
1148
+ )
1149
+ dt = datetime.fromisoformat(result.stdout.strip())
1150
+ # Git returns timezone-aware datetime, ensure it has tzinfo
1151
+ if dt.tzinfo is None:
1152
+ dt = dt.replace(tzinfo=timezone.utc)
1153
+ return dt
1154
+ except subprocess.CalledProcessError:
1155
+ # Git command failed - invalid ref or other git error
1156
+ pass
1157
+ except ValueError:
1158
+ # Re-raise validation errors
1159
+ raise
1160
+ except Exception:
1161
+ # Other errors (e.g., datetime parsing) - try next format
1162
+ pass
1163
+
1164
+ raise ValueError(f"Invalid changed_since format: {changed_since}")
1165
+
1166
+ def _get_recent_pr_info(self, file_path: str) -> dict | None:
1167
+ """
1168
+ Get the most recent PR that modified a file.
1169
+
1170
+ Args:
1171
+ file_path: Relative path to the file
1172
+
1173
+ Returns:
1174
+ Dictionary with PR info (number, title, date, comment_count) or None
1175
+ """
1176
+ if not self.pr_index:
1177
+ return None
1178
+
1179
+ # Look up PRs for this file
1180
+ file_to_prs = self.pr_index.get("file_to_prs", {})
1181
+ pr_numbers = file_to_prs.get(file_path, [])
1182
+
1183
+ if not pr_numbers:
1184
+ return None
1185
+
1186
+ # Get the most recent PR (last in list)
1187
+ prs_data = self.pr_index.get("prs", {})
1188
+ most_recent_pr_num = pr_numbers[-1]
1189
+ pr = prs_data.get(str(most_recent_pr_num))
1190
+
1191
+ if not pr:
1192
+ return None
1193
+
1194
+ # Count comments for this file
1195
+ comments = pr.get("comments", [])
1196
+ file_comments = [c for c in comments if c.get("path") == file_path]
1197
+
1198
+ return {
1199
+ "number": pr["number"],
1200
+ "title": pr["title"],
1201
+ "author": pr.get("author", "unknown"),
1202
+ "comment_count": len(file_comments),
1203
+ "url": pr.get("url", ""),
1204
+ }
1205
+
804
1206
  def _find_function_at_line(self, module_name: str, line: int) -> dict | None:
805
1207
  """
806
1208
  Find the function that contains a specific line number.
@@ -951,6 +1353,10 @@ class CicadaServer:
951
1353
  _precise_tracking: bool = False,
952
1354
  show_evolution: bool = False,
953
1355
  max_commits: int = 10,
1356
+ since_date: str | None = None,
1357
+ until_date: str | None = None,
1358
+ author: str | None = None,
1359
+ min_changes: int = 0,
954
1360
  ) -> list[TextContent]:
955
1361
  """
956
1362
  Get git commit history for a file or function.
@@ -963,6 +1369,10 @@ class CicadaServer:
963
1369
  precise_tracking: Deprecated (function tracking is always used when function_name provided)
964
1370
  show_evolution: Include function evolution metadata
965
1371
  max_commits: Maximum number of commits to return
1372
+ since_date: Only include commits after this date (ISO format or relative like '7d', '2w')
1373
+ until_date: Only include commits before this date (ISO format or relative)
1374
+ author: Filter by author name (substring match)
1375
+ min_changes: Minimum number of lines changed
966
1376
 
967
1377
  Returns:
968
1378
  TextContent with formatted commit history
@@ -972,11 +1382,27 @@ class CicadaServer:
972
1382
  - Function tracking works even as the function moves in the file
973
1383
  - Line numbers are used as fallback if function tracking fails
974
1384
  - Requires .gitattributes with "*.ex diff=elixir" for function tracking
1385
+ - Date filters only work with file-level history (not function/line tracking)
975
1386
  """
976
1387
  if not self.git_helper:
977
1388
  error_msg = "Git history is not available (repository may not be a git repo)"
978
1389
  return [TextContent(type="text", text=error_msg)]
979
1390
 
1391
+ # Parse date filters if provided
1392
+ since_datetime = None
1393
+ until_datetime = None
1394
+ if since_date:
1395
+ since_datetime = self._parse_changed_since(since_date)
1396
+ if until_date:
1397
+ until_datetime = self._parse_changed_since(until_date)
1398
+
1399
+ # Check if any filters are being used (only supported for file-level history)
1400
+ has_filters = since_date or until_date or author or min_changes > 0
1401
+ if has_filters and (function_name or (start_line and end_line)):
1402
+ warning_msg = "⚠️ Date/author/min_changes filters only work with file-level history (without function_name or line range)\n\n"
1403
+ else:
1404
+ warning_msg = ""
1405
+
980
1406
  try:
981
1407
  evolution = None
982
1408
  tracking_method = "file"
@@ -1021,7 +1447,17 @@ class CicadaServer:
1021
1447
  )
1022
1448
  else:
1023
1449
  # File-level history
1024
- commits = self.git_helper.get_file_history(file_path, max_commits)
1450
+ if has_filters:
1451
+ commits = self.git_helper.get_file_history_filtered(
1452
+ file_path,
1453
+ max_commits=max_commits,
1454
+ since_date=since_datetime,
1455
+ until_date=until_datetime,
1456
+ author=author,
1457
+ min_changes=min_changes,
1458
+ )
1459
+ else:
1460
+ commits = self.git_helper.get_file_history(file_path, max_commits)
1025
1461
  title = f"Git History for {file_path}"
1026
1462
 
1027
1463
  if not commits:
@@ -1031,6 +1467,23 @@ class CicadaServer:
1031
1467
  # Format the results as markdown
1032
1468
  lines = [f"# {title}\n"]
1033
1469
 
1470
+ # Add warning if filters were specified but not used
1471
+ if warning_msg:
1472
+ lines.append(warning_msg)
1473
+
1474
+ # Add filter information if filters were used
1475
+ if has_filters and not (function_name or (start_line and end_line)):
1476
+ filter_parts = []
1477
+ if since_date:
1478
+ filter_parts.append(f"since {since_date}")
1479
+ if until_date:
1480
+ filter_parts.append(f"until {until_date}")
1481
+ if author:
1482
+ filter_parts.append(f"author: {author}")
1483
+ if min_changes > 0:
1484
+ filter_parts.append(f"min changes: {min_changes}")
1485
+ lines.append(f"*Filters: {', '.join(filter_parts)}*\n")
1486
+
1034
1487
  # Add tracking method info
1035
1488
  if tracking_method == "function":
1036
1489
  lines.append(
@@ -1256,40 +1709,56 @@ class CicadaServer:
1256
1709
  result = "\n".join(lines)
1257
1710
  return [TextContent(type="text", text=result)]
1258
1711
 
1259
- async def _search_by_keywords(self, keywords: list[str]) -> list[TextContent]:
1712
+ async def _search_by_keywords(
1713
+ self, keywords: list[str], filter_type: str = "all", min_score: float = 0.0
1714
+ ) -> list[TextContent]:
1260
1715
  """
1261
1716
  Search for modules and functions by keywords.
1262
1717
 
1263
1718
  Args:
1264
1719
  keywords: List of keywords to search for
1720
+ filter_type: Filter results by type ('all', 'modules', 'functions'). Defaults to 'all'.
1721
+ min_score: Minimum relevance score threshold (0.0 to 1.0). Defaults to 0.0.
1265
1722
 
1266
1723
  Returns:
1267
1724
  TextContent with formatted search results
1268
1725
  """
1269
1726
  from cicada.keyword_search import KeywordSearcher
1727
+ from cicada.mcp.filter_utils import filter_by_score_threshold
1270
1728
 
1271
1729
  # Check if keywords are available (cached at initialization)
1272
1730
  if not self._has_keywords:
1273
1731
  error_msg = (
1274
1732
  "No keywords found in index. Please rebuild the index with keyword extraction:\n\n"
1275
- " cicada index --nlp # NLP-based extraction (lemminflect)\n"
1276
- " cicada index --rag # BERT-based extraction\n\n"
1733
+ " cicada index # Default: reuse configured tier\n"
1734
+ " cicada index --force --regular # BERT + GloVe (regular tier)\n"
1735
+ " cicada index --force --fast # Fast: Token-based + lemminflect\n"
1736
+ " cicada index --force --max # Max: BERT + FastText\n\n"
1277
1737
  "This will extract keywords from documentation for semantic search."
1278
1738
  )
1279
1739
  return [TextContent(type="text", text=error_msg)]
1280
1740
 
1281
1741
  # Perform the search
1282
1742
  searcher = KeywordSearcher(self.index)
1283
- results = searcher.search(keywords, top_n=5)
1743
+ results = searcher.search(keywords, top_n=20, filter_type=filter_type)
1744
+
1745
+ # Apply score threshold filter
1746
+ if min_score > 0.0:
1747
+ results = filter_by_score_threshold(results, min_score)
1284
1748
 
1285
1749
  if not results:
1286
- result = f"No results found for keywords: {', '.join(keywords)}"
1750
+ if min_score > 0.0:
1751
+ result = f"No results found for keywords: {', '.join(keywords)} with min_score >= {min_score}"
1752
+ else:
1753
+ result = f"No results found for keywords: {', '.join(keywords)}"
1287
1754
  return [TextContent(type="text", text=result)]
1288
1755
 
1289
1756
  # Format results
1290
- from cicada.formatter import ModuleFormatter
1757
+ from cicada.format import ModuleFormatter
1291
1758
 
1292
- formatted_result = ModuleFormatter.format_keyword_search_results_markdown(keywords, results)
1759
+ formatted_result = ModuleFormatter.format_keyword_search_results_markdown(
1760
+ keywords, results, show_scores=True
1761
+ )
1293
1762
 
1294
1763
  return [TextContent(type="text", text=formatted_result)]
1295
1764
 
@@ -1304,8 +1773,8 @@ class CicadaServer:
1304
1773
  Returns:
1305
1774
  TextContent with formatted dead code analysis
1306
1775
  """
1307
- from cicada.dead_code_analyzer import DeadCodeAnalyzer
1308
- from cicada.find_dead_code import (
1776
+ from cicada.dead_code.analyzer import DeadCodeAnalyzer
1777
+ from cicada.dead_code.finder import (
1309
1778
  filter_by_confidence,
1310
1779
  format_json,
1311
1780
  format_markdown,
@@ -1323,6 +1792,276 @@ class CicadaServer:
1323
1792
 
1324
1793
  return [TextContent(type="text", text=output)]
1325
1794
 
1795
+ async def _get_module_dependencies(
1796
+ self, module_name: str, output_format: str, depth: int, granular: bool = False
1797
+ ) -> list[TextContent]:
1798
+ """
1799
+ Get all modules that a given module depends on.
1800
+
1801
+ Args:
1802
+ module_name: Module name to analyze
1803
+ output_format: Output format ('markdown' or 'json')
1804
+ depth: Depth for transitive dependencies (1 = direct only, 2 = include dependencies of dependencies)
1805
+ granular: Show which specific functions use which dependencies
1806
+
1807
+ Returns:
1808
+ TextContent with formatted dependency information
1809
+ """
1810
+ import json
1811
+
1812
+ # Look up the module in the index
1813
+ module_data, error_msg = self._lookup_module_with_error(module_name)
1814
+ if error_msg:
1815
+ return [TextContent(type="text", text=error_msg)]
1816
+
1817
+ # module_data is guaranteed to be non-None here
1818
+ assert module_data is not None
1819
+
1820
+ # Get dependencies from the index
1821
+ dependencies = module_data.get("dependencies", {})
1822
+ direct_modules = dependencies.get("modules", [])
1823
+
1824
+ # Collect granular dependency information if requested
1825
+ granular_info: dict[str, list[dict[str, Any]]] = {}
1826
+ if granular:
1827
+ # Build a mapping of dependency_module -> [functions that use it]
1828
+ for func in module_data.get("functions", []):
1829
+ func_deps = func.get("dependencies", [])
1830
+ for dep in func_deps:
1831
+ dep_module = dep.get("module", "")
1832
+ if dep_module in direct_modules:
1833
+ if dep_module not in granular_info:
1834
+ granular_info[dep_module] = []
1835
+ granular_info[dep_module].append(
1836
+ {
1837
+ "function": func.get("name"),
1838
+ "arity": func.get("arity"),
1839
+ "line": func.get("line"),
1840
+ "calls": f"{dep.get('function')}/{dep.get('arity')}",
1841
+ "call_line": dep.get("line"),
1842
+ }
1843
+ )
1844
+
1845
+ # If depth > 1, collect transitive dependencies
1846
+ all_modules = set(direct_modules)
1847
+ if depth > 1:
1848
+ visited = {module_name} # Avoid circular dependencies
1849
+ to_visit = list(direct_modules)
1850
+
1851
+ for _ in range(depth - 1):
1852
+ next_level = []
1853
+ for dep_module in to_visit:
1854
+ if dep_module in visited:
1855
+ continue
1856
+ visited.add(dep_module)
1857
+
1858
+ dep_data = self.index["modules"].get(dep_module)
1859
+ if dep_data:
1860
+ dep_dependencies = dep_data.get("dependencies", {})
1861
+ dep_modules = dep_dependencies.get("modules", [])
1862
+ all_modules.update(dep_modules)
1863
+ next_level.extend(dep_modules)
1864
+
1865
+ to_visit = next_level
1866
+
1867
+ # Format output
1868
+ if output_format == "json":
1869
+ result = {
1870
+ "module": module_name,
1871
+ "dependencies": {
1872
+ "direct": sorted(direct_modules),
1873
+ "all": sorted(all_modules) if depth > 1 else sorted(direct_modules),
1874
+ "depth": depth,
1875
+ },
1876
+ }
1877
+ if granular:
1878
+ result["granular"] = granular_info # type: ignore
1879
+ output = json.dumps(result, indent=2)
1880
+ else:
1881
+ # Markdown format
1882
+ lines = [f"# Dependencies for {module_name}\n"]
1883
+
1884
+ if direct_modules:
1885
+ lines.append(f"## Direct Dependencies ({len(direct_modules)})\n")
1886
+ for dep in sorted(direct_modules):
1887
+ lines.append(f"- {dep}")
1888
+ # Add granular information if available
1889
+ if granular and dep in granular_info:
1890
+ uses = granular_info[dep]
1891
+ lines.append(f" Used by {len(uses)} function(s):")
1892
+ for use in uses[:3]: # Limit to 3 examples
1893
+ lines.append(
1894
+ f" • {use['function']}/{use['arity']} (line {use['line']}) → calls {use['calls']} (line {use['call_line']})"
1895
+ )
1896
+ if len(uses) > 3:
1897
+ lines.append(f" ... and {len(uses) - 3} more")
1898
+ lines.append("")
1899
+
1900
+ if depth > 1 and len(all_modules) > len(direct_modules):
1901
+ transitive = sorted(all_modules - set(direct_modules))
1902
+ lines.append(f"## Transitive Dependencies ({len(transitive)})\n")
1903
+ for dep in transitive:
1904
+ lines.append(f"- {dep}")
1905
+ lines.append("")
1906
+
1907
+ if not direct_modules:
1908
+ lines.append("*No dependencies found*")
1909
+
1910
+ output = "\n".join(lines)
1911
+
1912
+ return [TextContent(type="text", text=output)]
1913
+
1914
+ def _format_dependency_with_context(
1915
+ self,
1916
+ dep: dict,
1917
+ context_lines: dict,
1918
+ include_context: bool,
1919
+ include_module: bool = False,
1920
+ ) -> list[str]:
1921
+ """
1922
+ Format a single dependency with optional code context.
1923
+
1924
+ Args:
1925
+ dep: Dependency dict with module, function, arity, line
1926
+ context_lines: Dict mapping line numbers to code context
1927
+ include_context: Whether to include code context
1928
+ include_module: Whether to include module name in output
1929
+
1930
+ Returns:
1931
+ List of formatted lines
1932
+ """
1933
+ lines = []
1934
+ line_info = f"(line {dep['line']})"
1935
+
1936
+ if include_module:
1937
+ lines.append(f"- {dep['module']}.{dep['function']}/{dep['arity']} {line_info}")
1938
+ else:
1939
+ lines.append(f"- {dep['function']}/{dep['arity']} {line_info}")
1940
+
1941
+ if include_context and dep["line"] in context_lines:
1942
+ lines.append(" ```elixir")
1943
+ lines.append(f" {context_lines[dep['line']]}")
1944
+ lines.append(" ```")
1945
+
1946
+ return lines
1947
+
1948
+ async def _get_function_dependencies(
1949
+ self,
1950
+ module_name: str,
1951
+ function_name: str,
1952
+ arity: int,
1953
+ output_format: str,
1954
+ include_context: bool,
1955
+ ) -> list[TextContent]:
1956
+ """
1957
+ Get all functions that a given function calls.
1958
+
1959
+ Args:
1960
+ module_name: Module name containing the function
1961
+ function_name: Function name to analyze
1962
+ arity: Function arity
1963
+ output_format: Output format ('markdown' or 'json')
1964
+ include_context: Whether to include code context
1965
+
1966
+ Returns:
1967
+ TextContent with formatted dependency information
1968
+ """
1969
+ import json
1970
+
1971
+ # Look up the module in the index (no suggestions for function lookup)
1972
+ module_data, error_msg = self._lookup_module_with_error(
1973
+ module_name, include_suggestions=False
1974
+ )
1975
+ if error_msg:
1976
+ return [TextContent(type="text", text=error_msg)]
1977
+
1978
+ # module_data is guaranteed to be non-None here
1979
+ assert module_data is not None
1980
+
1981
+ # Find the function
1982
+ functions = module_data.get("functions", [])
1983
+ target_func = None
1984
+ for func in functions:
1985
+ if func["name"] == function_name and func["arity"] == arity:
1986
+ target_func = func
1987
+ break
1988
+
1989
+ if not target_func:
1990
+ error_msg = (
1991
+ f"Function not found: {module_name}.{function_name}/{arity}\n\n"
1992
+ f"Available functions in {module_name}:\n"
1993
+ )
1994
+ available = [f" - {f['name']}/{f['arity']}" for f in functions[:10]]
1995
+ error_msg += "\n".join(available)
1996
+ return [TextContent(type="text", text=error_msg)]
1997
+
1998
+ # Get function dependencies
1999
+ dependencies = target_func.get("dependencies", [])
2000
+
2001
+ # If include_context is True, fetch the source code
2002
+ context_lines = {}
2003
+ if include_context and dependencies:
2004
+ # Read the source file
2005
+ repo_path = self.config.get("repository", {}).get("path", ".")
2006
+ file_path = Path(repo_path) / module_data["file"]
2007
+ try:
2008
+ with open(file_path) as f:
2009
+ source_lines = f.readlines()
2010
+ # Get context for each dependency call
2011
+ for dep in dependencies:
2012
+ line_num = dep["line"]
2013
+ if 1 <= line_num <= len(source_lines):
2014
+ # Get 3 lines of context (before, current, after)
2015
+ start = max(0, line_num - 2)
2016
+ end = min(len(source_lines), line_num + 1)
2017
+ context = "".join(source_lines[start:end])
2018
+ context_lines[line_num] = context.rstrip()
2019
+ except OSError:
2020
+ pass # If we can't read the file, just skip context
2021
+
2022
+ # Format output
2023
+ if output_format == "json":
2024
+ result = {
2025
+ "module": module_name,
2026
+ "function": f"{function_name}/{arity}",
2027
+ "dependencies": dependencies,
2028
+ }
2029
+ output = json.dumps(result, indent=2)
2030
+ else:
2031
+ # Markdown format
2032
+ lines = [f"# Dependencies for {module_name}.{function_name}/{arity}\n"]
2033
+
2034
+ if dependencies:
2035
+ # Group by internal vs external
2036
+ internal = [d for d in dependencies if d["module"] == module_name]
2037
+ external = [d for d in dependencies if d["module"] != module_name]
2038
+
2039
+ if internal:
2040
+ lines.append(f"## Internal Calls ({len(internal)})\n")
2041
+ for dep in internal:
2042
+ lines.extend(
2043
+ self._format_dependency_with_context(
2044
+ dep, context_lines, include_context, include_module=False
2045
+ )
2046
+ )
2047
+ lines.append("")
2048
+
2049
+ if external:
2050
+ lines.append(f"## External Calls ({len(external)})\n")
2051
+ for dep in external:
2052
+ lines.extend(
2053
+ self._format_dependency_with_context(
2054
+ dep, context_lines, include_context, include_module=True
2055
+ )
2056
+ )
2057
+ lines.append("")
2058
+ else:
2059
+ lines.append("*No dependencies found*")
2060
+
2061
+ output = "\n".join(lines)
2062
+
2063
+ return [TextContent(type="text", text=output)]
2064
+
1326
2065
  async def run(self):
1327
2066
  """Run the MCP server."""
1328
2067
  async with stdio_server() as (read_stream, write_stream):
@@ -1365,9 +2104,12 @@ def _auto_setup_if_needed():
1365
2104
  )
1366
2105
 
1367
2106
  # Determine repository path from environment or current directory
1368
- repo_path_str = os.environ.get("CICADA_REPO_PATH")
2107
+ repo_path_str = None
2108
+
2109
+ # First check if repo path was provided via positional argument (internal env var)
2110
+ repo_path_str = os.environ.get("_CICADA_REPO_PATH_ARG")
1369
2111
 
1370
- # Check if WORKSPACE_FOLDER_PATHS is available (Cursor-specific)
2112
+ # Fall back to WORKSPACE_FOLDER_PATHS (Cursor-specific)
1371
2113
  if not repo_path_str:
1372
2114
  workspace_paths = os.environ.get("WORKSPACE_FOLDER_PATHS")
1373
2115
  if workspace_paths:
@@ -1420,15 +2162,19 @@ def main():
1420
2162
  import sys
1421
2163
 
1422
2164
  # Accept optional positional argument for repo path
1423
- # Usage: cicada-server [repo_path]
1424
2165
  if len(sys.argv) > 1:
1425
2166
  repo_path = sys.argv[1]
1426
2167
  # Convert to absolute path
1427
2168
  from pathlib import Path
1428
2169
 
2170
+ from cicada.utils.storage import get_storage_dir
2171
+
1429
2172
  abs_path = Path(repo_path).resolve()
1430
- # Set environment variable to override default
1431
- os.environ["CICADA_REPO_PATH"] = str(abs_path)
2173
+ # Set environment variables for both storage directory and repo path
2174
+ # The repo path is needed by _auto_setup_if_needed() for first-time setup
2175
+ storage_dir = get_storage_dir(abs_path)
2176
+ os.environ["CICADA_CONFIG_DIR"] = str(storage_dir)
2177
+ os.environ["_CICADA_REPO_PATH_ARG"] = str(abs_path)
1432
2178
 
1433
2179
  asyncio.run(async_main())
1434
2180