claude-self-reflect 2.5.12 → 2.5.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,59 @@
1
+ log_level: INFO
2
+
3
+ storage:
4
+ # Where to store all the data
5
+ storage_path: ./storage
6
+
7
+ # Where to store snapshots
8
+ snapshots_path: ./snapshots
9
+
10
+ # CRITICAL: Store payloads on disk to save memory
11
+ on_disk_payload: true
12
+
13
+ performance:
14
+ # Reduce number of search threads to save memory
15
+ max_search_threads: 2
16
+
17
+ # Conservative CPU budget for optimization
18
+ optimizer_cpu_budget: 1
19
+
20
+ optimizers:
21
+ # Memory-optimized settings
22
+ deleted_threshold: 0.2
23
+ vacuum_min_vector_number: 1000
24
+ default_segment_number: 1
25
+ # Reduce max segment size to prevent memory spikes
26
+ max_segment_size_kb: 50000 # 50MB max
27
+ # Lower indexing threshold to use disk sooner
28
+ indexing_threshold_kb: 1000 # 1MB
29
+ flush_interval_sec: 5
30
+ max_optimization_threads: 1
31
+
32
+ # CRITICAL: Store HNSW indexes on disk
33
+ hnsw_index:
34
+ m: 16
35
+ ef_construct: 100
36
+ full_scan_threshold_kb: 1000
37
+ max_indexing_threads: 2
38
+ # Store HNSW index on disk - CRITICAL for memory savings
39
+ on_disk: true
40
+
41
+ collection:
42
+ # Default vectors storage on disk
43
+ vectors:
44
+ on_disk: true
45
+
46
+ # Single replica to save memory
47
+ replication_factor: 1
48
+ write_consistency_factor: 1
49
+
50
+ service:
51
+ max_request_size_mb: 32
52
+ max_workers: 2
53
+ host: 0.0.0.0
54
+ http_port: 6333
55
+ grpc_port: 6334
56
+ enable_cors: true
57
+
58
+ # Disable telemetry
59
+ telemetry_disabled: true
@@ -39,6 +39,20 @@ def extract_project_name_from_path(file_path: str) -> str:
39
39
  return dir_name.lstrip('-')
40
40
 
41
41
 
42
+ def normalize_file_path(file_path: str) -> str:
43
+ """Normalize file paths to handle Docker vs local path differences.
44
+
45
+ Converts:
46
+ - /logs/PROJECT_DIR/file.jsonl -> ~/.claude/projects/PROJECT_DIR/file.jsonl
47
+ - Already normalized paths remain unchanged
48
+ """
49
+ if file_path.startswith("/logs/"):
50
+ # Convert Docker path to local path
51
+ projects_dir = str(Path.home() / ".claude" / "projects")
52
+ return file_path.replace("/logs/", projects_dir + "/", 1)
53
+ return file_path
54
+
55
+
42
56
  def get_status() -> dict:
43
57
  """Get indexing status with overall stats and per-project breakdown.
44
58
 
@@ -48,11 +62,16 @@ def get_status() -> dict:
48
62
  projects_dir = Path.home() / ".claude" / "projects"
49
63
  project_stats = defaultdict(lambda: {"indexed": 0, "total": 0})
50
64
 
65
+ # Build a mapping of normalized file paths to project names
66
+ file_to_project = {}
67
+
51
68
  # Count total JSONL files per project
52
69
  if projects_dir.exists():
53
70
  for jsonl_file in projects_dir.glob("**/*.jsonl"):
54
- project_name = extract_project_name_from_path(str(jsonl_file))
71
+ file_str = str(jsonl_file)
72
+ project_name = extract_project_name_from_path(file_str)
55
73
  project_stats[project_name]["total"] += 1
74
+ file_to_project[file_str] = project_name
56
75
 
57
76
  # Read imported-files.json to count indexed files per project
58
77
  config_paths = [
@@ -72,33 +91,42 @@ def get_status() -> dict:
72
91
  with open(imported_files_path, 'r') as f:
73
92
  data = json.load(f)
74
93
 
75
- # Handle both old and new config file formats
76
- if "stream_position" in data:
77
- # New format with stream_position
78
- stream_pos = data.get("stream_position", {})
79
- imported_files = stream_pos.get("imported_files", [])
80
- file_metadata = stream_pos.get("file_metadata", {})
81
-
82
- # Count fully imported files
83
- for file_path in imported_files:
84
- project_name = extract_project_name_from_path(file_path)
94
+ # The actual structure has imported_files at the top level
95
+ imported_files = data.get("imported_files", {})
96
+
97
+ # Count all files in imported_files object (they are all fully imported)
98
+ for file_path in imported_files.keys():
99
+ normalized_path = normalize_file_path(file_path)
100
+ if normalized_path in file_to_project:
101
+ project_name = file_to_project[normalized_path]
85
102
  project_stats[project_name]["indexed"] += 1
86
-
87
- # Count partially imported files (files with position > 0)
88
- for file_path, metadata in file_metadata.items():
89
- if isinstance(metadata, dict) and metadata.get("position", 0) > 0:
90
- # Only count if not already in imported_files
91
- if file_path not in imported_files:
92
- project_name = extract_project_name_from_path(file_path)
103
+
104
+ # Also check file_metadata for partially imported files
105
+ file_metadata = data.get("file_metadata", {})
106
+ for file_path, metadata in file_metadata.items():
107
+ if isinstance(metadata, dict) and metadata.get("position", 0) > 0:
108
+ # Only count if not already in imported_files
109
+ if file_path not in imported_files:
110
+ normalized_path = normalize_file_path(file_path)
111
+ if normalized_path in file_to_project:
112
+ project_name = file_to_project[normalized_path]
93
113
  project_stats[project_name]["indexed"] += 1
94
- else:
95
- # Legacy format with imported_files as top-level object
96
- imported_files = data.get("imported_files", {})
97
-
98
- # Count all files in imported_files object (they are all fully imported)
99
- for file_path in imported_files.keys():
100
- project_name = extract_project_name_from_path(file_path)
101
- project_stats[project_name]["indexed"] += 1
114
+
115
+ # Also check stream_position if it contains file paths
116
+ stream_position = data.get("stream_position", {})
117
+ if isinstance(stream_position, dict):
118
+ for file_path in stream_position.keys():
119
+ # Skip non-file entries
120
+ if file_path in ["imported_files", "file_metadata"]:
121
+ continue
122
+ # Only count if not already counted
123
+ if file_path not in imported_files:
124
+ normalized_path = normalize_file_path(file_path)
125
+ if normalized_path in file_to_project:
126
+ project_name = file_to_project[normalized_path]
127
+ # Only increment if not already counted
128
+ if project_stats[project_name]["indexed"] < project_stats[project_name]["total"]:
129
+ project_stats[project_name]["indexed"] += 1
102
130
  except (json.JSONDecodeError, KeyError, OSError):
103
131
  # If config file is corrupted or unreadable, continue with zero indexed counts
104
132
  pass
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-self-reflect",
3
- "version": "2.5.12",
3
+ "version": "2.5.13",
4
4
  "description": "Give Claude perfect memory of all your conversations - Installation wizard for Python MCP server",
5
5
  "keywords": [
6
6
  "claude",
@@ -35,6 +35,7 @@
35
35
  "mcp-server/run-mcp-docker.sh",
36
36
  "scripts/import-*.py",
37
37
  ".claude/agents/*.md",
38
+ "config/qdrant-config.yaml",
38
39
  "docker-compose.yaml",
39
40
  "Dockerfile.*",
40
41
  ".env.example",