claude-self-reflect 5.0.2 → 5.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/docker-compose.yaml +3 -1
- package/installer/setup-wizard-docker.js +64 -9
- package/package.json +1 -1
- package/scripts/ast_grep_final_analyzer.py +16 -6
- package/scripts/csr-status +120 -17
- package/scripts/debug-august-parsing.py +5 -1
- package/scripts/debug-project-resolver.py +3 -3
- package/scripts/import-conversations-unified.py +292 -821
- package/scripts/session_quality_tracker.py +10 -0
- package/scripts/unified_state_manager.py +7 -4
- package/mcp-server/src/test_quality.py +0 -153
package/docker-compose.yaml
CHANGED
|
@@ -18,7 +18,9 @@ services:
|
|
|
18
18
|
- "${QDRANT_PORT:-6333}:6333"
|
|
19
19
|
volumes:
|
|
20
20
|
- qdrant_data:/qdrant/storage
|
|
21
|
-
|
|
21
|
+
# Note: Using CONFIG_PATH variable to support global npm installs (fixes #71)
|
|
22
|
+
# macOS Docker Desktop restricts mounts to /Users, /Volumes, /private, /tmp
|
|
23
|
+
- ${CONFIG_PATH:-~/.claude-self-reflect/config}/qdrant-config.yaml:/qdrant/config/config.yaml:ro
|
|
22
24
|
environment:
|
|
23
25
|
- QDRANT__LOG_LEVEL=INFO
|
|
24
26
|
- QDRANT__SERVICE__HTTP_PORT=6333
|
|
@@ -71,23 +71,64 @@ async function checkDocker() {
|
|
|
71
71
|
try {
|
|
72
72
|
safeExec('docker', ['info'], { stdio: 'ignore' });
|
|
73
73
|
console.log('✅ Docker is installed and running');
|
|
74
|
-
|
|
74
|
+
|
|
75
75
|
// Check docker compose
|
|
76
76
|
try {
|
|
77
77
|
safeExec('docker', ['compose', 'version'], { stdio: 'ignore' });
|
|
78
|
-
console.log('✅ Docker Compose
|
|
78
|
+
console.log('✅ Docker Compose is available');
|
|
79
79
|
return true;
|
|
80
80
|
} catch {
|
|
81
|
-
console.log('❌ Docker Compose
|
|
82
|
-
console.log(' Please update Docker Desktop to
|
|
81
|
+
console.log('❌ Docker Compose not found');
|
|
82
|
+
console.log(' Please update Docker Desktop to include Compose v2');
|
|
83
83
|
return false;
|
|
84
84
|
}
|
|
85
85
|
} catch {
|
|
86
|
-
console.log('❌ Docker is not running or not installed');
|
|
87
|
-
console.log('
|
|
88
|
-
console.log('
|
|
89
|
-
|
|
90
|
-
|
|
86
|
+
console.log('❌ Docker is not running or not installed\n');
|
|
87
|
+
console.log('📋 Claude Self-Reflect requires Docker Desktop');
|
|
88
|
+
console.log(' (Includes Docker Engine + Compose - everything you need)\n');
|
|
89
|
+
|
|
90
|
+
const platform = process.platform;
|
|
91
|
+
const arch = process.arch;
|
|
92
|
+
|
|
93
|
+
if (platform === 'darwin') {
|
|
94
|
+
const archType = arch === 'arm64' ? 'Apple Silicon (M1/M2/M3/M4)' : 'Intel';
|
|
95
|
+
console.log(`🍎 macOS (${archType}) Installation:\n`);
|
|
96
|
+
|
|
97
|
+
if (arch === 'arm64') {
|
|
98
|
+
console.log(' Download: https://desktop.docker.com/mac/main/arm64/Docker.dmg');
|
|
99
|
+
} else {
|
|
100
|
+
console.log(' Download: https://desktop.docker.com/mac/main/amd64/Docker.dmg');
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
console.log(' 1. Open the downloaded Docker.dmg');
|
|
104
|
+
console.log(' 2. Drag Docker.app to Applications folder');
|
|
105
|
+
console.log(' 3. Launch Docker Desktop from Applications');
|
|
106
|
+
console.log(' 4. Wait for Docker to start (whale icon in menu bar)');
|
|
107
|
+
console.log(' 5. Re-run: claude-self-reflect setup\n');
|
|
108
|
+
|
|
109
|
+
} else if (platform === 'win32') {
|
|
110
|
+
console.log('🪟 Windows Installation:\n');
|
|
111
|
+
console.log(' Download: https://desktop.docker.com/win/main/amd64/Docker%20Desktop%20Installer.exe');
|
|
112
|
+
console.log(' 1. Run the installer');
|
|
113
|
+
console.log(' 2. Follow installation prompts');
|
|
114
|
+
console.log(' 3. Restart computer if prompted');
|
|
115
|
+
console.log(' 4. Launch Docker Desktop');
|
|
116
|
+
console.log(' 5. Re-run: claude-self-reflect setup\n');
|
|
117
|
+
|
|
118
|
+
} else {
|
|
119
|
+
console.log('🐧 Linux Installation:\n');
|
|
120
|
+
console.log(' Install Docker Engine (includes Compose):');
|
|
121
|
+
console.log(' • Ubuntu/Debian: https://docs.docker.com/engine/install/ubuntu/');
|
|
122
|
+
console.log(' • Fedora: https://docs.docker.com/engine/install/fedora/');
|
|
123
|
+
console.log(' • Arch: https://wiki.archlinux.org/title/docker');
|
|
124
|
+
console.log(' • CentOS: https://docs.docker.com/engine/install/centos/\n');
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
console.log('ℹ️ Docker Desktop is free for:');
|
|
128
|
+
console.log(' • Personal use');
|
|
129
|
+
console.log(' • Small businesses (<250 employees, <$10M revenue)');
|
|
130
|
+
console.log(' • Education and open source projects\n');
|
|
131
|
+
|
|
91
132
|
return false;
|
|
92
133
|
}
|
|
93
134
|
}
|
|
@@ -123,6 +164,20 @@ async function configureEnvironment() {
|
|
|
123
164
|
} catch {
|
|
124
165
|
// No old config directory, nothing to migrate
|
|
125
166
|
}
|
|
167
|
+
|
|
168
|
+
// Copy qdrant-config.yaml from npm package to user config directory
|
|
169
|
+
// This is critical for global npm installs where Docker cannot mount from /opt/homebrew
|
|
170
|
+
const sourceQdrantConfig = join(projectRoot, 'config', 'qdrant-config.yaml');
|
|
171
|
+
const targetQdrantConfig = join(userConfigDir, 'qdrant-config.yaml');
|
|
172
|
+
try {
|
|
173
|
+
await fs.copyFile(sourceQdrantConfig, targetQdrantConfig);
|
|
174
|
+
console.log('✅ Qdrant config copied to user directory');
|
|
175
|
+
} catch (err) {
|
|
176
|
+
if (err.code !== 'ENOENT') {
|
|
177
|
+
console.log('⚠️ Could not copy qdrant-config.yaml:', err.message);
|
|
178
|
+
console.log(' Docker may have issues starting Qdrant service');
|
|
179
|
+
}
|
|
180
|
+
}
|
|
126
181
|
} catch (error) {
|
|
127
182
|
console.log(`❌ Could not create config directory: ${error.message}`);
|
|
128
183
|
console.log(' This may cause Docker mount issues. Please check permissions.');
|
package/package.json
CHANGED
|
@@ -257,15 +257,18 @@ class FinalASTGrepAnalyzer:
|
|
|
257
257
|
return '\n'.join(report)
|
|
258
258
|
|
|
259
259
|
|
|
260
|
-
def run_final_analysis():
|
|
260
|
+
def run_final_analysis(file_path=None):
|
|
261
261
|
"""Run final AST-GREP analysis with unified registry."""
|
|
262
262
|
print("🚀 FINAL AST-GREP Analysis with Unified Registry")
|
|
263
263
|
print("=" * 60)
|
|
264
264
|
|
|
265
265
|
analyzer = FinalASTGrepAnalyzer()
|
|
266
266
|
|
|
267
|
-
#
|
|
268
|
-
|
|
267
|
+
# Use provided path or default
|
|
268
|
+
# Use relative path from script location
|
|
269
|
+
script_dir = Path(__file__).parent
|
|
270
|
+
default_path = script_dir.parent / "mcp-server" / "src" / "server.py"
|
|
271
|
+
server_path = file_path if file_path else str(default_path)
|
|
269
272
|
|
|
270
273
|
print(f"\nAnalyzing: {server_path}")
|
|
271
274
|
print("-" * 40)
|
|
@@ -299,14 +302,14 @@ def run_final_analysis():
|
|
|
299
302
|
|
|
300
303
|
# Generate and save report
|
|
301
304
|
report = analyzer.generate_report(result)
|
|
302
|
-
report_path = "
|
|
305
|
+
report_path = script_dir / "final_analysis_report.md"
|
|
303
306
|
with open(report_path, 'w') as f:
|
|
304
307
|
f.write(report)
|
|
305
308
|
|
|
306
309
|
print(f"\n📝 Full report saved to: {report_path}")
|
|
307
310
|
|
|
308
311
|
# Save JSON results
|
|
309
|
-
json_path = "
|
|
312
|
+
json_path = script_dir / "final_analysis_result.json"
|
|
310
313
|
with open(json_path, 'w') as f:
|
|
311
314
|
json.dump(result, f, indent=2)
|
|
312
315
|
|
|
@@ -325,4 +328,11 @@ def run_final_analysis():
|
|
|
325
328
|
|
|
326
329
|
|
|
327
330
|
if __name__ == "__main__":
|
|
328
|
-
|
|
331
|
+
import sys
|
|
332
|
+
if len(sys.argv) > 1:
|
|
333
|
+
# Use provided file path
|
|
334
|
+
file_path = sys.argv[1]
|
|
335
|
+
else:
|
|
336
|
+
# Default to server.py
|
|
337
|
+
file_path = str(default_path) # Use the same default path from above
|
|
338
|
+
run_final_analysis(file_path)
|
package/scripts/csr-status
CHANGED
|
@@ -6,6 +6,7 @@ Standalone script that doesn't require venv activation.
|
|
|
6
6
|
|
|
7
7
|
import json
|
|
8
8
|
import time
|
|
9
|
+
import os
|
|
9
10
|
from pathlib import Path
|
|
10
11
|
from datetime import datetime, timedelta
|
|
11
12
|
import sys
|
|
@@ -147,6 +148,51 @@ def format_statusline_quality(critical=0, medium=0, low=0):
|
|
|
147
148
|
|
|
148
149
|
def get_session_health():
|
|
149
150
|
"""Get cached session health with icon-based quality display."""
|
|
151
|
+
# Get project-specific cache based on current directory
|
|
152
|
+
project_dir = os.getcwd()
|
|
153
|
+
project_name = os.path.basename(project_dir) if project_dir else "default"
|
|
154
|
+
|
|
155
|
+
# Check project-specific realtime cache first
|
|
156
|
+
quality_dir = Path.home() / ".claude-self-reflect" / "quality_by_project"
|
|
157
|
+
realtime_cache = quality_dir / f"{project_name}.json"
|
|
158
|
+
|
|
159
|
+
# Fallback to global cache if project-specific doesn't exist
|
|
160
|
+
if not realtime_cache.exists():
|
|
161
|
+
realtime_cache = Path.home() / ".claude-self-reflect" / "realtime_quality.json"
|
|
162
|
+
|
|
163
|
+
if realtime_cache.exists():
|
|
164
|
+
try:
|
|
165
|
+
# Check realtime cache age
|
|
166
|
+
mtime = datetime.fromtimestamp(realtime_cache.stat().st_mtime)
|
|
167
|
+
age = datetime.now() - mtime
|
|
168
|
+
|
|
169
|
+
if age < timedelta(minutes=5): # Fresh realtime data
|
|
170
|
+
with open(realtime_cache, 'r') as f:
|
|
171
|
+
realtime_data = json.load(f)
|
|
172
|
+
|
|
173
|
+
if "session_aggregate" in realtime_data:
|
|
174
|
+
agg = realtime_data["session_aggregate"]
|
|
175
|
+
issues = agg.get("total_issues", {})
|
|
176
|
+
critical = issues.get("critical", 0)
|
|
177
|
+
medium = issues.get("medium", 0)
|
|
178
|
+
low = issues.get("low", 0)
|
|
179
|
+
|
|
180
|
+
# Include score in display if significantly below threshold
|
|
181
|
+
score = agg.get("average_score", 100)
|
|
182
|
+
if critical > 0 or medium > 0 or low > 0:
|
|
183
|
+
# Show issue counts when there are any issues
|
|
184
|
+
return format_statusline_quality(critical, medium, low)
|
|
185
|
+
elif score < 70:
|
|
186
|
+
# Use red icon for scores below threshold (no issues but poor score)
|
|
187
|
+
icon = "🔴"
|
|
188
|
+
return f"{icon} {score:.0f}%"
|
|
189
|
+
else:
|
|
190
|
+
# Good quality, no issues
|
|
191
|
+
return format_statusline_quality(critical, medium, low)
|
|
192
|
+
except Exception:
|
|
193
|
+
pass # Fall back to old cache system
|
|
194
|
+
|
|
195
|
+
# Fall back to old cache system
|
|
150
196
|
# Check for session edit tracker to show appropriate label
|
|
151
197
|
tracker_file = Path.home() / ".claude-self-reflect" / "current_session_edits.json"
|
|
152
198
|
|
|
@@ -363,8 +409,63 @@ def get_compact_status():
|
|
|
363
409
|
except:
|
|
364
410
|
pass
|
|
365
411
|
|
|
366
|
-
# Get
|
|
367
|
-
|
|
412
|
+
# Get project-specific cache based on current directory
|
|
413
|
+
project_name = os.path.basename(os.getcwd()) if os.getcwd() else "default"
|
|
414
|
+
|
|
415
|
+
# Check project-specific realtime cache first
|
|
416
|
+
quality_dir = Path.home() / ".claude-self-reflect" / "quality_by_project"
|
|
417
|
+
realtime_cache = quality_dir / f"{project_name}.json"
|
|
418
|
+
|
|
419
|
+
# Fallback to global cache if project-specific doesn't exist
|
|
420
|
+
if not realtime_cache.exists():
|
|
421
|
+
realtime_cache = Path.home() / ".claude-self-reflect" / "realtime_quality.json"
|
|
422
|
+
|
|
423
|
+
grade_str = ""
|
|
424
|
+
quality_valid = False
|
|
425
|
+
|
|
426
|
+
if realtime_cache.exists():
|
|
427
|
+
try:
|
|
428
|
+
mtime = datetime.fromtimestamp(realtime_cache.stat().st_mtime)
|
|
429
|
+
age = datetime.now() - mtime
|
|
430
|
+
if age < timedelta(minutes=5): # Fresh realtime data
|
|
431
|
+
with open(realtime_cache, 'r') as f:
|
|
432
|
+
realtime_data = json.load(f)
|
|
433
|
+
|
|
434
|
+
if "session_aggregate" in realtime_data:
|
|
435
|
+
agg = realtime_data["session_aggregate"]
|
|
436
|
+
issues = agg.get("total_issues", {})
|
|
437
|
+
critical = issues.get("critical", 0)
|
|
438
|
+
medium = issues.get("medium", 0)
|
|
439
|
+
low = issues.get("low", 0)
|
|
440
|
+
score = agg.get("average_score", 100)
|
|
441
|
+
|
|
442
|
+
# Get icon based on score and issues
|
|
443
|
+
if score < 70:
|
|
444
|
+
icon = "🔴" # Red for below threshold
|
|
445
|
+
else:
|
|
446
|
+
icon = get_quality_icon(critical, medium, low)
|
|
447
|
+
|
|
448
|
+
# Build compact display
|
|
449
|
+
if critical > 0 or medium > 0 or low > 0:
|
|
450
|
+
# Show issue counts when there are any issues
|
|
451
|
+
colored_parts = []
|
|
452
|
+
if critical > 0:
|
|
453
|
+
colored_parts.append(f"C:{critical}")
|
|
454
|
+
if medium > 0:
|
|
455
|
+
colored_parts.append(f"M:{medium}")
|
|
456
|
+
if low > 0:
|
|
457
|
+
colored_parts.append(f"L:{low}")
|
|
458
|
+
grade_str = f"[{icon}:{'·'.join(colored_parts)}]"
|
|
459
|
+
elif score < 70:
|
|
460
|
+
# Show score when below threshold but no specific issues
|
|
461
|
+
grade_str = f"[{icon}:{score:.0f}%]"
|
|
462
|
+
else:
|
|
463
|
+
grade_str = f"[{icon}]"
|
|
464
|
+
quality_valid = True
|
|
465
|
+
except:
|
|
466
|
+
pass
|
|
467
|
+
|
|
468
|
+
# Setup cache file path for fallback
|
|
368
469
|
project_name = os.path.basename(os.getcwd())
|
|
369
470
|
# Secure sanitization with whitelist approach
|
|
370
471
|
import re
|
|
@@ -372,21 +473,23 @@ def get_compact_status():
|
|
|
372
473
|
cache_dir = Path.home() / ".claude-self-reflect" / "quality_cache"
|
|
373
474
|
cache_file = cache_dir / f"{safe_project_name}.json"
|
|
374
475
|
|
|
375
|
-
#
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
#
|
|
379
|
-
|
|
380
|
-
if
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
476
|
+
# Fall back to old cache if no realtime data
|
|
477
|
+
if not quality_valid:
|
|
478
|
+
|
|
479
|
+
# If the exact cache file doesn't exist, try to find one that ends with this project name
|
|
480
|
+
# This handles cases like "metafora-Atlas-gold.json" for project "Atlas-gold"
|
|
481
|
+
if not cache_file.exists():
|
|
482
|
+
# Look for files ending with the project name
|
|
483
|
+
possible_files = list(cache_dir.glob(f"*-{safe_project_name}.json"))
|
|
484
|
+
if possible_files:
|
|
485
|
+
cache_file = possible_files[0] # Use the first match
|
|
486
|
+
|
|
487
|
+
# Validate cache file path stays within cache directory
|
|
488
|
+
if cache_file.exists() and not str(cache_file.resolve()).startswith(str(cache_dir.resolve())):
|
|
489
|
+
# Security issue - return placeholder
|
|
490
|
+
grade_str = "[...]"
|
|
491
|
+
else:
|
|
492
|
+
cache_file.parent.mkdir(exist_ok=True, parents=True)
|
|
390
493
|
|
|
391
494
|
# Try to get quality data (regenerate if too old or missing)
|
|
392
495
|
quality_valid = False
|
|
@@ -59,7 +59,11 @@ def parse_jsonl_file(file_path):
|
|
|
59
59
|
return messages
|
|
60
60
|
|
|
61
61
|
if __name__ == "__main__":
|
|
62
|
-
|
|
62
|
+
# Use home directory path
|
|
63
|
+
from pathlib import Path
|
|
64
|
+
home = Path.home()
|
|
65
|
+
file_path = home / ".claude" / "projects" / f"-{home}-projects-claude-self-reflect" / "7b3354ed-d6d2-4eab-b328-1fced4bb63bb.jsonl"
|
|
66
|
+
file_path = str(file_path)
|
|
63
67
|
|
|
64
68
|
print(f"Parsing: {file_path}")
|
|
65
69
|
print("=" * 60)
|
|
@@ -48,9 +48,9 @@ from shared.normalization import normalize_project_name
|
|
|
48
48
|
import hashlib
|
|
49
49
|
|
|
50
50
|
test_paths = [
|
|
51
|
-
"
|
|
52
|
-
"
|
|
53
|
-
"
|
|
51
|
+
str(Path.home() / "projects" / "claude-self-reflect"),
|
|
52
|
+
str(Path.home() / "projects" / "memento"),
|
|
53
|
+
str(Path.home() / "projects" / "cc-enhance")
|
|
54
54
|
]
|
|
55
55
|
|
|
56
56
|
for path in test_paths:
|