claude-self-reflect 2.5.12 → 2.5.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,59 @@
1
+ log_level: INFO
2
+
3
+ storage:
4
+ # Where to store all the data
5
+ storage_path: ./storage
6
+
7
+ # Where to store snapshots
8
+ snapshots_path: ./snapshots
9
+
10
+ # CRITICAL: Store payloads on disk to save memory
11
+ on_disk_payload: true
12
+
13
+ performance:
14
+ # Reduce number of search threads to save memory
15
+ max_search_threads: 2
16
+
17
+ # Conservative CPU budget for optimization
18
+ optimizer_cpu_budget: 1
19
+
20
+ optimizers:
21
+ # Memory-optimized settings
22
+ deleted_threshold: 0.2
23
+ vacuum_min_vector_number: 1000
24
+ default_segment_number: 1
25
+ # Reduce max segment size to prevent memory spikes
26
+ max_segment_size_kb: 50000 # 50MB max
27
+ # Lower indexing threshold to use disk sooner
28
+ indexing_threshold_kb: 1000 # 1MB
29
+ flush_interval_sec: 5
30
+ max_optimization_threads: 1
31
+
32
+ # CRITICAL: Store HNSW indexes on disk
33
+ hnsw_index:
34
+ m: 16
35
+ ef_construct: 100
36
+ full_scan_threshold_kb: 1000
37
+ max_indexing_threads: 2
38
+ # Store HNSW index on disk - CRITICAL for memory savings
39
+ on_disk: true
40
+
41
+ collection:
42
+ # Default vectors storage on disk
43
+ vectors:
44
+ on_disk: true
45
+
46
+ # Single replica to save memory
47
+ replication_factor: 1
48
+ write_consistency_factor: 1
49
+
50
+ service:
51
+ max_request_size_mb: 32
52
+ max_workers: 2
53
+ host: 0.0.0.0
54
+ http_port: 6333
55
+ grpc_port: 6334
56
+ enable_cors: true
57
+
58
+ # Disable telemetry
59
+ telemetry_disabled: true
@@ -168,11 +168,13 @@ async def update_indexing_status():
168
168
  if imported_files_path and imported_files_path.exists():
169
169
  with open(imported_files_path, 'r') as f:
170
170
  imported_data = json.load(f)
171
- # The file has nested structure: {stream_position: {file: position}, imported_files: {file: lines}}
172
- # Handle new nested structure
173
- stream_position = imported_data.get("stream_position", {})
174
- imported_files_list = stream_position.get("imported_files", [])
175
- file_metadata = stream_position.get("file_metadata", {})
171
+ # The actual structure has imported_files and file_metadata at the top level
172
+ # NOT nested under stream_position as previously assumed
173
+ imported_files_dict = imported_data.get("imported_files", {})
174
+ file_metadata = imported_data.get("file_metadata", {})
175
+
176
+ # Convert dict keys to list for compatibility with existing logic
177
+ imported_files_list = list(imported_files_dict.keys())
176
178
 
177
179
  # Count files that have been imported
178
180
  for file_path in jsonl_files:
@@ -714,18 +716,15 @@ async def reflect_on_past(
714
716
  # Add upfront summary for immediate visibility (before collapsible XML)
715
717
  upfront_summary = ""
716
718
 
717
- # Show indexing status prominently
718
- if indexing_status["percentage"] < 95.0:
719
- upfront_summary += f"📊 INDEXING: {indexing_status['indexed_conversations']}/{indexing_status['total_conversations']} conversations ({indexing_status['percentage']:.1f}% complete, {indexing_status['backlog_count']} pending)\n"
720
-
721
719
  # Show result summary
722
720
  if all_results:
723
721
  score_info = "high" if all_results[0].score >= 0.85 else "good" if all_results[0].score >= 0.75 else "partial"
724
722
  upfront_summary += f"🎯 RESULTS: {len(all_results)} matches ({score_info} relevance, top score: {all_results[0].score:.3f})\n"
725
723
 
726
- # Show performance
724
+ # Show performance with indexing status inline
727
725
  total_time = time.time() - start_time
728
- upfront_summary += f" PERFORMANCE: {int(total_time * 1000)}ms total ({len(collections_to_search)} collections searched)\n"
726
+ indexing_info = f" | 📊 {indexing_status['indexed_conversations']}/{indexing_status['total_conversations']} indexed" if indexing_status["percentage"] < 100.0 else ""
727
+ upfront_summary += f"⚡ PERFORMANCE: {int(total_time * 1000)}ms ({len(collections_to_search)} collections searched{indexing_info})\n"
729
728
  else:
730
729
  upfront_summary += f"❌ NO RESULTS: No conversations found matching '{query}'\n"
731
730
 
@@ -39,6 +39,20 @@ def extract_project_name_from_path(file_path: str) -> str:
39
39
  return dir_name.lstrip('-')
40
40
 
41
41
 
42
+ def normalize_file_path(file_path: str) -> str:
43
+ """Normalize file paths to handle Docker vs local path differences.
44
+
45
+ Converts:
46
+ - /logs/PROJECT_DIR/file.jsonl -> ~/.claude/projects/PROJECT_DIR/file.jsonl
47
+ - Already normalized paths remain unchanged
48
+ """
49
+ if file_path.startswith("/logs/"):
50
+ # Convert Docker path to local path
51
+ projects_dir = str(Path.home() / ".claude" / "projects")
52
+ return file_path.replace("/logs/", projects_dir + "/", 1)
53
+ return file_path
54
+
55
+
42
56
  def get_status() -> dict:
43
57
  """Get indexing status with overall stats and per-project breakdown.
44
58
 
@@ -48,11 +62,16 @@ def get_status() -> dict:
48
62
  projects_dir = Path.home() / ".claude" / "projects"
49
63
  project_stats = defaultdict(lambda: {"indexed": 0, "total": 0})
50
64
 
65
+ # Build a mapping of normalized file paths to project names
66
+ file_to_project = {}
67
+
51
68
  # Count total JSONL files per project
52
69
  if projects_dir.exists():
53
70
  for jsonl_file in projects_dir.glob("**/*.jsonl"):
54
- project_name = extract_project_name_from_path(str(jsonl_file))
71
+ file_str = str(jsonl_file)
72
+ project_name = extract_project_name_from_path(file_str)
55
73
  project_stats[project_name]["total"] += 1
74
+ file_to_project[file_str] = project_name
56
75
 
57
76
  # Read imported-files.json to count indexed files per project
58
77
  config_paths = [
@@ -72,33 +91,42 @@ def get_status() -> dict:
72
91
  with open(imported_files_path, 'r') as f:
73
92
  data = json.load(f)
74
93
 
75
- # Handle both old and new config file formats
76
- if "stream_position" in data:
77
- # New format with stream_position
78
- stream_pos = data.get("stream_position", {})
79
- imported_files = stream_pos.get("imported_files", [])
80
- file_metadata = stream_pos.get("file_metadata", {})
81
-
82
- # Count fully imported files
83
- for file_path in imported_files:
84
- project_name = extract_project_name_from_path(file_path)
94
+ # The actual structure has imported_files at the top level
95
+ imported_files = data.get("imported_files", {})
96
+
97
+ # Count all files in imported_files object (they are all fully imported)
98
+ for file_path in imported_files.keys():
99
+ normalized_path = normalize_file_path(file_path)
100
+ if normalized_path in file_to_project:
101
+ project_name = file_to_project[normalized_path]
85
102
  project_stats[project_name]["indexed"] += 1
86
-
87
- # Count partially imported files (files with position > 0)
88
- for file_path, metadata in file_metadata.items():
89
- if isinstance(metadata, dict) and metadata.get("position", 0) > 0:
90
- # Only count if not already in imported_files
91
- if file_path not in imported_files:
92
- project_name = extract_project_name_from_path(file_path)
103
+
104
+ # Also check file_metadata for partially imported files
105
+ file_metadata = data.get("file_metadata", {})
106
+ for file_path, metadata in file_metadata.items():
107
+ if isinstance(metadata, dict) and metadata.get("position", 0) > 0:
108
+ # Only count if not already in imported_files
109
+ if file_path not in imported_files:
110
+ normalized_path = normalize_file_path(file_path)
111
+ if normalized_path in file_to_project:
112
+ project_name = file_to_project[normalized_path]
93
113
  project_stats[project_name]["indexed"] += 1
94
- else:
95
- # Legacy format with imported_files as top-level object
96
- imported_files = data.get("imported_files", {})
97
-
98
- # Count all files in imported_files object (they are all fully imported)
99
- for file_path in imported_files.keys():
100
- project_name = extract_project_name_from_path(file_path)
101
- project_stats[project_name]["indexed"] += 1
114
+
115
+ # Also check stream_position if it contains file paths
116
+ stream_position = data.get("stream_position", {})
117
+ if isinstance(stream_position, dict):
118
+ for file_path in stream_position.keys():
119
+ # Skip non-file entries
120
+ if file_path in ["imported_files", "file_metadata"]:
121
+ continue
122
+ # Only count if not already counted
123
+ if file_path not in imported_files:
124
+ normalized_path = normalize_file_path(file_path)
125
+ if normalized_path in file_to_project:
126
+ project_name = file_to_project[normalized_path]
127
+ # Only increment if not already counted
128
+ if project_stats[project_name]["indexed"] < project_stats[project_name]["total"]:
129
+ project_stats[project_name]["indexed"] += 1
102
130
  except (json.JSONDecodeError, KeyError, OSError):
103
131
  # If config file is corrupted or unreadable, continue with zero indexed counts
104
132
  pass
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-self-reflect",
3
- "version": "2.5.12",
3
+ "version": "2.5.14",
4
4
  "description": "Give Claude perfect memory of all your conversations - Installation wizard for Python MCP server",
5
5
  "keywords": [
6
6
  "claude",
@@ -35,6 +35,7 @@
35
35
  "mcp-server/run-mcp-docker.sh",
36
36
  "scripts/import-*.py",
37
37
  ".claude/agents/*.md",
38
+ "config/qdrant-config.yaml",
38
39
  "docker-compose.yaml",
39
40
  "Dockerfile.*",
40
41
  ".env.example",