mcp-code-indexer 4.2.16__tar.gz → 4.2.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/PKG-INFO +3 -3
  2. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/README.md +2 -2
  3. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/pyproject.toml +1 -1
  4. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/database/database.py +136 -33
  5. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/database/database_factory.py +1 -1
  6. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/database/exceptions.py +1 -1
  7. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/logging_config.py +16 -9
  8. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/server/mcp_server.py +10 -6
  9. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/LICENSE +0 -0
  10. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/__init__.py +0 -0
  11. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/__main__.py +0 -0
  12. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/ask_handler.py +0 -0
  13. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/claude_api_handler.py +0 -0
  14. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/cleanup_manager.py +0 -0
  15. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/commands/__init__.py +0 -0
  16. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/commands/makelocal.py +0 -0
  17. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/data/stop_words_english.txt +0 -0
  18. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/database/__init__.py +0 -0
  19. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/database/connection_health.py +0 -0
  20. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/database/models.py +0 -0
  21. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/database/path_resolver.py +0 -0
  22. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/database/retry_executor.py +0 -0
  23. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/deepask_handler.py +0 -0
  24. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/error_handler.py +0 -0
  25. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/file_scanner.py +0 -0
  26. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/git_hook_handler.py +0 -0
  27. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/main.py +0 -0
  28. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/middleware/__init__.py +0 -0
  29. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/middleware/auth.py +0 -0
  30. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/middleware/error_middleware.py +0 -0
  31. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/middleware/logging.py +0 -0
  32. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/middleware/security.py +0 -0
  33. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/migrations/001_initial.sql +0 -0
  34. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/migrations/002_performance_indexes.sql +0 -0
  35. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/migrations/003_project_overviews.sql +0 -0
  36. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/migrations/004_remove_branch_dependency.sql +0 -0
  37. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/migrations/005_remove_git_remotes.sql +0 -0
  38. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/migrations/006_vector_mode.sql +0 -0
  39. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/query_preprocessor.py +0 -0
  40. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/server/__init__.py +0 -0
  41. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/tiktoken_cache/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 +0 -0
  42. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/token_counter.py +0 -0
  43. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/tools/__init__.py +0 -0
  44. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/transport/__init__.py +0 -0
  45. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/transport/base.py +0 -0
  46. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/transport/http_transport.py +0 -0
  47. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/transport/stdio_transport.py +0 -0
  48. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/__init__.py +0 -0
  49. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/chunking/__init__.py +0 -0
  50. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/chunking/ast_chunker.py +0 -0
  51. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/chunking/chunk_optimizer.py +0 -0
  52. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/chunking/language_handlers.py +0 -0
  53. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/config.py +0 -0
  54. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/const.py +0 -0
  55. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/daemon.py +0 -0
  56. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/monitoring/__init__.py +0 -0
  57. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/monitoring/change_detector.py +0 -0
  58. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/monitoring/file_watcher.py +0 -0
  59. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/monitoring/merkle_tree.py +0 -0
  60. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/providers/__init__.py +0 -0
  61. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/providers/turbopuffer_client.py +0 -0
  62. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/providers/voyage_client.py +0 -0
  63. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/security/__init__.py +0 -0
  64. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/security/patterns.py +0 -0
  65. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/security/redactor.py +0 -0
  66. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/services/__init__.py +0 -0
  67. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/services/embedding_service.py +0 -0
  68. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/services/vector_mode_tools_service.py +0 -0
  69. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/services/vector_storage_service.py +0 -0
  70. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/types.py +0 -0
  71. {mcp_code_indexer-4.2.16 → mcp_code_indexer-4.2.18}/src/mcp_code_indexer/vector_mode/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp-code-indexer
3
- Version: 4.2.16
3
+ Version: 4.2.18
4
4
  Summary: MCP server that tracks file descriptions across codebases, enabling AI agents to efficiently navigate and understand code through searchable summaries and token-aware overviews.
5
5
  License: MIT
6
6
  License-File: LICENSE
@@ -49,8 +49,8 @@ Description-Content-Type: text/markdown
49
49
 
50
50
  # MCP Code Indexer 🚀
51
51
 
52
- [![PyPI version](https://badge.fury.io/py/mcp-code-indexer.svg?63)](https://badge.fury.io/py/mcp-code-indexer)
53
- [![Python](https://img.shields.io/pypi/pyversions/mcp-code-indexer.svg?63)](https://pypi.org/project/mcp-code-indexer/)
52
+ [![PyPI version](https://badge.fury.io/py/mcp-code-indexer.svg?65)](https://badge.fury.io/py/mcp-code-indexer)
53
+ [![Python](https://img.shields.io/pypi/pyversions/mcp-code-indexer.svg?65)](https://pypi.org/project/mcp-code-indexer/)
54
54
  [![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
55
55
 
56
56
  A production-ready **Model Context Protocol (MCP) server** that revolutionizes how AI agents navigate and understand codebases. Built for high-concurrency environments with advanced database resilience, the server provides instant access to intelligent descriptions, semantic search, and context-aware recommendations while maintaining 800+ writes/sec throughput.
@@ -1,7 +1,7 @@
1
1
  # MCP Code Indexer 🚀
2
2
 
3
- [![PyPI version](https://badge.fury.io/py/mcp-code-indexer.svg?63)](https://badge.fury.io/py/mcp-code-indexer)
4
- [![Python](https://img.shields.io/pypi/pyversions/mcp-code-indexer.svg?63)](https://pypi.org/project/mcp-code-indexer/)
3
+ [![PyPI version](https://badge.fury.io/py/mcp-code-indexer.svg?65)](https://badge.fury.io/py/mcp-code-indexer)
4
+ [![Python](https://img.shields.io/pypi/pyversions/mcp-code-indexer.svg?65)](https://pypi.org/project/mcp-code-indexer/)
5
5
  [![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
6
6
 
7
7
  A production-ready **Model Context Protocol (MCP) server** that revolutionizes how AI agents navigate and understand codebases. Built for high-concurrency environments with advanced database resilience, the server provides instant access to intelligent descriptions, semantic search, and context-aware recommendations while maintaining 800+ writes/sec throughput.
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "mcp-code-indexer"
7
- version = "4.2.16"
7
+ version = "4.2.18"
8
8
  description = "MCP server that tracks file descriptions across codebases, enabling AI agents to efficiently navigate and understand code through searchable summaries and token-aware overviews."
9
9
  authors = ["MCP Code Indexer Contributors"]
10
10
  maintainers = ["MCP Code Indexer Contributors"]
@@ -35,7 +35,10 @@ from mcp_code_indexer.database.models import (
35
35
  WordFrequencyResult,
36
36
  WordFrequencyTerm,
37
37
  )
38
- from mcp_code_indexer.database.retry_executor import create_retry_executor
38
+ from mcp_code_indexer.database.retry_executor import (
39
+ create_retry_executor,
40
+ DatabaseLockError,
41
+ )
39
42
  from mcp_code_indexer.query_preprocessor import preprocess_search_query
40
43
 
41
44
  logger = logging.getLogger(__name__)
@@ -54,7 +57,7 @@ class DatabaseManager:
54
57
  db_path: Path,
55
58
  pool_size: int = 3,
56
59
  retry_count: int = 5,
57
- timeout: float = 10.0,
60
+ timeout: float = 30.0,
58
61
  enable_wal_mode: bool = True,
59
62
  health_check_interval: float = 30.0,
60
63
  retry_min_wait: float = 0.1,
@@ -222,7 +225,7 @@ class DatabaseManager:
222
225
  "PRAGMA cache_size = -64000", # 64MB cache
223
226
  "PRAGMA temp_store = MEMORY", # Use memory for temp tables
224
227
  "PRAGMA mmap_size = 268435456", # 256MB memory mapping
225
- "PRAGMA busy_timeout = 10000", # 10s timeout (reduced from 30s)
228
+ f"PRAGMA busy_timeout = {int(self.timeout * 1000)}", # Use configured timeout
226
229
  "PRAGMA optimize", # Enable query planner optimizations
227
230
  ]
228
231
  )
@@ -406,14 +409,30 @@ class DatabaseManager:
406
409
  operation_name: Name of the operation for monitoring
407
410
  timeout_seconds: Transaction timeout in seconds
408
411
  """
412
+ import time
413
+ acquire_start = time.monotonic()
409
414
  async with self.get_write_connection_with_retry(operation_name) as conn:
415
+ write_lock_time = time.monotonic() - acquire_start
416
+ logger.debug(
417
+ f"[{operation_name}] Write lock acquired in {write_lock_time*1000:.1f}ms"
418
+ )
410
419
  try:
411
420
  # Start immediate transaction with timeout
421
+ begin_start = time.monotonic()
412
422
  await asyncio.wait_for(
413
423
  conn.execute("BEGIN IMMEDIATE"), timeout=timeout_seconds
414
424
  )
425
+ begin_time = time.monotonic() - begin_start
426
+ logger.debug(
427
+ f"[{operation_name}] BEGIN IMMEDIATE completed in {begin_time*1000:.1f}ms"
428
+ )
415
429
  yield conn
430
+ commit_start = time.monotonic()
416
431
  await conn.commit()
432
+ commit_time = time.monotonic() - commit_start
433
+ logger.debug(
434
+ f"[{operation_name}] COMMIT completed in {commit_time*1000:.1f}ms"
435
+ )
417
436
  except asyncio.TimeoutError:
418
437
  logger.warning(
419
438
  (
@@ -473,13 +492,34 @@ class DatabaseManager:
473
492
 
474
493
  async def execute_transaction() -> Any:
475
494
  """Inner function to execute transaction - retried by executor."""
495
+ import time
496
+ start_time = time.monotonic()
497
+ logger.debug(
498
+ f"[{operation_name}] Starting transaction "
499
+ f"(timeout={timeout_seconds}s, pool_size={len(self._connection_pool)})"
500
+ )
476
501
  try:
477
502
  async with self.get_immediate_transaction(
478
503
  operation_name, timeout_seconds
479
504
  ) as conn:
505
+ lock_acquired_time = time.monotonic()
506
+ logger.debug(
507
+ f"[{operation_name}] Lock acquired in "
508
+ f"{(lock_acquired_time - start_time)*1000:.1f}ms"
509
+ )
480
510
  result = await operation_func(conn)
511
+ exec_time = time.monotonic()
512
+ logger.debug(
513
+ f"[{operation_name}] Operation executed in "
514
+ f"{(exec_time - lock_acquired_time)*1000:.1f}ms"
515
+ )
481
516
 
482
517
  # Record successful operation metrics
518
+ total_time = time.monotonic() - start_time
519
+ logger.debug(
520
+ f"[{operation_name}] Transaction completed successfully "
521
+ f"in {total_time*1000:.1f}ms"
522
+ )
483
523
  if self._metrics_collector:
484
524
  self._metrics_collector.record_operation(
485
525
  operation_name,
@@ -490,26 +530,52 @@ class DatabaseManager:
490
530
 
491
531
  return result
492
532
 
493
- except (aiosqlite.OperationalError, asyncio.TimeoutError) as e:
494
- # Record locking event for metrics
495
- if self._metrics_collector and "locked" in str(e).lower():
533
+ except aiosqlite.OperationalError as e:
534
+ elapsed = time.monotonic() - start_time
535
+ error_msg = str(e).lower()
536
+ logger.debug(
537
+ f"[{operation_name}] OperationalError after {elapsed*1000:.1f}ms: {e}"
538
+ )
539
+ if self._metrics_collector and "locked" in error_msg:
496
540
  self._metrics_collector.record_locking_event(operation_name, str(e))
497
541
 
498
- # Classify the error for better handling
499
- classified_error = classify_sqlite_error(e, operation_name)
500
-
501
- # Record failed operation metrics for non-retryable errors
502
- if not is_retryable_error(classified_error):
503
- if self._metrics_collector:
504
- self._metrics_collector.record_operation(
505
- operation_name,
506
- timeout_seconds * 1000,
507
- False,
508
- len(self._connection_pool),
509
- )
542
+ # For retryable errors (locked/busy), re-raise the ORIGINAL error
543
+ # so tenacity can retry. Only classify non-retryable errors.
544
+ if "locked" in error_msg or "busy" in error_msg:
545
+ logger.debug(
546
+ f"[{operation_name}] Retryable error, will retry: {e}"
547
+ )
548
+ raise # Let tenacity retry this
510
549
 
550
+ # Non-retryable OperationalError - classify and raise
551
+ logger.warning(
552
+ f"[{operation_name}] Non-retryable OperationalError: {e}"
553
+ )
554
+ classified_error = classify_sqlite_error(e, operation_name)
555
+ if self._metrics_collector:
556
+ self._metrics_collector.record_operation(
557
+ operation_name,
558
+ timeout_seconds * 1000,
559
+ False,
560
+ len(self._connection_pool),
561
+ )
511
562
  raise classified_error
512
563
 
564
+ except asyncio.TimeoutError as e:
565
+ elapsed = time.monotonic() - start_time
566
+ logger.warning(
567
+ f"[{operation_name}] Timeout after {elapsed*1000:.1f}ms "
568
+ f"waiting for database lock (timeout={timeout_seconds}s)"
569
+ )
570
+ if self._metrics_collector:
571
+ self._metrics_collector.record_locking_event(
572
+ operation_name, "timeout waiting for lock"
573
+ )
574
+ # Re-raise as OperationalError so tenacity can retry
575
+ raise aiosqlite.OperationalError(
576
+ f"Timeout waiting for database lock: {e}"
577
+ ) from e
578
+
513
579
  try:
514
580
  # Create a temporary retry executor with custom max_retries if different
515
581
  # from default
@@ -534,8 +600,27 @@ class DatabaseManager:
534
600
  execute_transaction, operation_name
535
601
  )
536
602
 
603
+ except DatabaseLockError as e:
604
+ # Retries exhausted - record metrics and convert to DatabaseError
605
+ if self._metrics_collector:
606
+ self._metrics_collector.record_operation(
607
+ operation_name,
608
+ timeout_seconds * 1000,
609
+ False,
610
+ len(self._connection_pool),
611
+ )
612
+ # Convert to a proper DatabaseError for consistent error handling
613
+ raise DatabaseError(
614
+ f"Database operation failed after retries: {e.message}",
615
+ error_context={
616
+ "operation": operation_name,
617
+ "retry_count": e.retry_count,
618
+ "retryable": False, # Retries already exhausted
619
+ },
620
+ ) from e
621
+
537
622
  except DatabaseError:
538
- # Record failed operation metrics for final failure
623
+ # Non-retryable DatabaseError from classification
539
624
  if self._metrics_collector:
540
625
  self._metrics_collector.record_operation(
541
626
  operation_name,
@@ -721,7 +806,7 @@ class DatabaseManager:
721
806
  cursor = await db.execute("SELECT changes()")
722
807
  changes = await cursor.fetchone()
723
808
  if changes[0] == 0:
724
- raise ValueError(f"Project not found: {project_id}")
809
+ raise DatabaseError(f"Project not found: {project_id}")
725
810
 
726
811
  await db.commit()
727
812
  logger.debug(f"Set vector_mode={enabled} for project: {project_id}")
@@ -737,12 +822,18 @@ class DatabaseManager:
737
822
  projects = []
738
823
  for row in rows:
739
824
  aliases = json.loads(row[2]) if row[2] else []
825
+ created = row[3]
826
+ last_accessed = row[4]
827
+ if isinstance(created, str):
828
+ created = datetime.fromisoformat(created)
829
+ if isinstance(last_accessed, str):
830
+ last_accessed = datetime.fromisoformat(last_accessed)
740
831
  project = Project(
741
832
  id=row[0],
742
833
  name=row[1],
743
834
  aliases=aliases,
744
- created=row[3],
745
- last_accessed=row[4],
835
+ created=created,
836
+ last_accessed=last_accessed,
746
837
  vector_mode=bool(row[5]),
747
838
  )
748
839
  projects.append(project)
@@ -760,12 +851,18 @@ class DatabaseManager:
760
851
  projects = []
761
852
  for row in rows:
762
853
  aliases = json.loads(row[2]) if row[2] else []
854
+ created = row[3]
855
+ last_accessed = row[4]
856
+ if isinstance(created, str):
857
+ created = datetime.fromisoformat(created)
858
+ if isinstance(last_accessed, str):
859
+ last_accessed = datetime.fromisoformat(last_accessed)
763
860
  project = Project(
764
861
  id=row[0],
765
862
  name=row[1],
766
863
  aliases=aliases,
767
- created=row[3],
768
- last_accessed=row[4],
864
+ created=created,
865
+ last_accessed=last_accessed,
769
866
  vector_mode=bool(row[5]),
770
867
  )
771
868
  projects.append(project)
@@ -776,9 +873,7 @@ class DatabaseManager:
776
873
 
777
874
  async def create_file_description(self, file_desc: FileDescription) -> None:
778
875
  """Create or update a file description."""
779
- async with self.get_write_connection_with_retry(
780
- "create_file_description"
781
- ) as db:
876
+ async def operation(db: aiosqlite.Connection) -> None:
782
877
  await db.execute(
783
878
  """
784
879
  INSERT INTO file_descriptions
@@ -806,8 +901,12 @@ class DatabaseManager:
806
901
  file_desc.to_be_cleaned,
807
902
  ),
808
903
  )
809
- await db.commit()
810
- logger.debug(f"Saved file description: {file_desc.file_path}")
904
+
905
+ await self.execute_transaction_with_retry(
906
+ operation,
907
+ "create_file_description"
908
+ )
909
+ logger.debug(f"Saved file description: {file_desc.file_path}")
811
910
 
812
911
  async def get_file_description(
813
912
  self, project_id: str, file_path: str
@@ -1018,7 +1117,7 @@ class DatabaseManager:
1018
1117
 
1019
1118
  async def create_project_overview(self, overview: ProjectOverview) -> None:
1020
1119
  """Create or update a project overview."""
1021
- async with self.get_write_connection() as db:
1120
+ async def operation(db: aiosqlite.Connection) -> None:
1022
1121
  await db.execute(
1023
1122
  """
1024
1123
  INSERT OR REPLACE INTO project_overviews
@@ -1033,8 +1132,12 @@ class DatabaseManager:
1033
1132
  overview.total_tokens,
1034
1133
  ),
1035
1134
  )
1036
- await db.commit()
1037
- logger.debug(f"Created/updated overview for project {overview.project_id}")
1135
+
1136
+ await self.execute_transaction_with_retry(
1137
+ operation,
1138
+ "create_project_overview"
1139
+ )
1140
+ logger.debug(f"Created/updated overview for project {overview.project_id}")
1038
1141
 
1039
1142
  async def get_project_overview(self, project_id: str) -> Optional[ProjectOverview]:
1040
1143
  """Get project overview by ID."""
@@ -1373,7 +1476,7 @@ class DatabaseManager:
1373
1476
  cursor = await db.execute("SELECT changes()")
1374
1477
  changes = await cursor.fetchone()
1375
1478
  if changes[0] == 0:
1376
- raise ValueError(
1479
+ raise DatabaseError(
1377
1480
  f"Index metadata not found for project: {index_meta.project_id}"
1378
1481
  )
1379
1482
 
@@ -28,7 +28,7 @@ class DatabaseFactory:
28
28
  global_db_path: Path,
29
29
  pool_size: int = 3,
30
30
  retry_count: int = 5,
31
- timeout: float = 10.0,
31
+ timeout: float = 30.0,
32
32
  enable_wal_mode: bool = True,
33
33
  health_check_interval: float = 30.0,
34
34
  retry_min_wait: float = 0.1,
@@ -236,7 +236,7 @@ def classify_sqlite_error(error: Exception, operation_name: str = "") -> Databas
236
236
  for msg in [
237
237
  "no such table",
238
238
  "no such column",
239
- "table already exists",
239
+ "already exists",
240
240
  "syntax error",
241
241
  ]
242
242
  ):
@@ -81,22 +81,29 @@ def setup_logging(
81
81
  root_logger.warning(f"Failed to set up file logging: {e}")
82
82
 
83
83
  # Configure specific loggers
84
+ effective_level = getattr(logging, log_level.upper())
84
85
 
85
- # Quiet down noisy libraries
86
+ # Quiet down noisy libraries (always WARNING+)
86
87
  logging.getLogger("aiosqlite").setLevel(logging.WARNING)
87
88
  logging.getLogger("tiktoken").setLevel(logging.WARNING)
88
89
 
89
- # MCP specific loggers
90
+ # MCP specific loggers - respect the configured log level
90
91
  mcp_logger = logging.getLogger("mcp")
91
- mcp_logger.setLevel(logging.INFO)
92
+ mcp_logger.setLevel(effective_level)
92
93
 
93
- # Database logger
94
- db_logger = logging.getLogger("src.database")
95
- db_logger.setLevel(logging.INFO)
94
+ # Database logger - respect the configured log level
95
+ db_logger = logging.getLogger("mcp_code_indexer.database")
96
+ db_logger.setLevel(effective_level)
96
97
 
97
- # Server logger
98
- server_logger = logging.getLogger("src.server")
99
- server_logger.setLevel(logging.INFO)
98
+ # Also set the old logger names for backwards compatibility
99
+ logging.getLogger("src.database").setLevel(effective_level)
100
+
101
+ # Server logger - respect the configured log level
102
+ server_logger = logging.getLogger("mcp_code_indexer.server")
103
+ server_logger.setLevel(effective_level)
104
+
105
+ # Also set the old logger names for backwards compatibility
106
+ logging.getLogger("src.server").setLevel(effective_level)
100
107
 
101
108
  return root_logger
102
109
 
@@ -13,7 +13,7 @@ import random
13
13
  import re
14
14
  import time
15
15
  import uuid
16
- from datetime import datetime
16
+ from datetime import datetime, timedelta
17
17
  from pathlib import Path
18
18
  from typing import Any, Dict, List, Optional, Callable, cast
19
19
 
@@ -56,7 +56,7 @@ class MCPCodeIndexServer:
56
56
  cache_dir: Optional[Path] = None,
57
57
  db_pool_size: int = 3,
58
58
  db_retry_count: int = 5,
59
- db_timeout: float = 10.0,
59
+ db_timeout: float = 30.0,
60
60
  enable_wal_mode: bool = True,
61
61
  health_check_interval: float = 30.0,
62
62
  retry_min_wait: float = 0.1,
@@ -882,8 +882,11 @@ class MCPCodeIndexServer:
882
882
  all_projects = await db_manager.get_all_projects()
883
883
  if all_projects:
884
884
  project = all_projects[0] # Use the first (and should be only) project
885
- # Update last accessed time
886
- await db_manager.update_project_access_time(project.id)
885
+
886
+ # Update last accessed time only if older than 5 minutes
887
+ if datetime.utcnow() - project.last_accessed > timedelta(minutes=5):
888
+ await db_manager.update_project_access_time(project.id)
889
+
887
890
  logger.info(
888
891
  f"Using existing local project: {project.name} (ID: {project.id})"
889
892
  )
@@ -1040,8 +1043,9 @@ class MCPCodeIndexServer:
1040
1043
  db_manager: DatabaseManager,
1041
1044
  ) -> None:
1042
1045
  """Update an existing project with new metadata and folder alias."""
1043
- # Update last accessed time
1044
- await db_manager.update_project_access_time(project.id)
1046
+ # Update last accessed time only if older than 5 minutes
1047
+ if datetime.utcnow() - project.last_accessed > timedelta(minutes=5):
1048
+ await db_manager.update_project_access_time(project.id)
1045
1049
 
1046
1050
  should_update = False
1047
1051