claude-self-reflect 3.3.0 → 3.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/claude-self-reflect-test.md +426 -11
- package/installer/cli.js +16 -0
- package/installer/postinstall.js +14 -0
- package/installer/statusline-setup.js +289 -0
- package/mcp-server/run-mcp.sh +28 -4
- package/mcp-server/src/parallel_search.py +16 -82
- package/mcp-server/src/reflection_tools.py +13 -8
- package/mcp-server/src/search_tools.py +90 -42
- package/mcp-server/src/temporal_tools.py +10 -3
- package/package.json +6 -1
- package/scripts/ast_grep_final_analyzer.py +325 -0
- package/scripts/ast_grep_unified_registry.py +556 -0
- package/scripts/csr-status +366 -0
- package/scripts/import-conversations-unified.py +104 -23
- package/scripts/session_quality_tracker.py +481 -0
- package/scripts/streaming-watcher.py +140 -5
- package/scripts/update_patterns.py +334 -0
|
@@ -0,0 +1,481 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Session Quality Tracker
|
|
4
|
+
Analyzes code quality for files edited in the current Claude session.
|
|
5
|
+
MANDATORY AST-GREP analysis for real-time quality feedback.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from datetime import datetime, timedelta
|
|
12
|
+
from typing import Dict, List, Any, Optional, Set
|
|
13
|
+
import logging
|
|
14
|
+
import sys
|
|
15
|
+
|
|
16
|
+
# Setup logger
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
logging.basicConfig(level=logging.INFO, format='%(message)s')
|
|
19
|
+
|
|
20
|
+
# Add scripts directory to path
|
|
21
|
+
sys.path.append(str(Path(__file__).parent))
|
|
22
|
+
|
|
23
|
+
from ast_grep_final_analyzer import FinalASTGrepAnalyzer
|
|
24
|
+
from update_patterns import check_and_update_patterns
|
|
25
|
+
|
|
26
|
+
logging.basicConfig(level=logging.INFO)
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
class SessionQualityTracker:
|
|
30
|
+
"""
|
|
31
|
+
Tracks code quality for the current Claude session.
|
|
32
|
+
Provides real-time feedback on code quality issues.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self):
|
|
36
|
+
self.analyzer = FinalASTGrepAnalyzer()
|
|
37
|
+
self.session_files = set()
|
|
38
|
+
self.quality_history = {}
|
|
39
|
+
self.current_session_id = None
|
|
40
|
+
|
|
41
|
+
def find_active_session(self, project_path: Optional[str] = None) -> Optional[Path]:
|
|
42
|
+
"""Find the most recently modified JSONL file (active session)."""
|
|
43
|
+
claude_projects = Path.home() / ".claude" / "projects"
|
|
44
|
+
|
|
45
|
+
if project_path:
|
|
46
|
+
# Look for specific project
|
|
47
|
+
project_dir = claude_projects / project_path.replace('/', '-')
|
|
48
|
+
else:
|
|
49
|
+
# Look for claude-self-reflect project
|
|
50
|
+
project_dir = claude_projects / "-Users-ramakrishnanannaswamy-projects-claude-self-reflect"
|
|
51
|
+
|
|
52
|
+
if not project_dir.exists():
|
|
53
|
+
logger.warning(f"Project directory not found: {project_dir}")
|
|
54
|
+
return None
|
|
55
|
+
|
|
56
|
+
# Find most recent JSONL file (modified in last 2 hours)
|
|
57
|
+
recent_files = []
|
|
58
|
+
cutoff_time = datetime.now() - timedelta(hours=2)
|
|
59
|
+
|
|
60
|
+
for jsonl_file in project_dir.glob("*.jsonl"):
|
|
61
|
+
if jsonl_file.stat().st_mtime > cutoff_time.timestamp():
|
|
62
|
+
recent_files.append((jsonl_file, jsonl_file.stat().st_mtime))
|
|
63
|
+
|
|
64
|
+
if not recent_files:
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
# Return most recent file
|
|
68
|
+
recent_files.sort(key=lambda x: x[1], reverse=True)
|
|
69
|
+
return recent_files[0][0]
|
|
70
|
+
|
|
71
|
+
def extract_edited_files(self, session_file: Path) -> Set[str]:
|
|
72
|
+
"""Extract files edited in the current session."""
|
|
73
|
+
edited_files = set()
|
|
74
|
+
|
|
75
|
+
try:
|
|
76
|
+
with open(session_file, 'r') as f:
|
|
77
|
+
for line in f:
|
|
78
|
+
try:
|
|
79
|
+
entry = json.loads(line.strip())
|
|
80
|
+
|
|
81
|
+
# Look for messages with tool usage
|
|
82
|
+
if 'messages' in entry:
|
|
83
|
+
for msg in entry['messages']:
|
|
84
|
+
if msg.get('role') == 'assistant' and 'content' in msg:
|
|
85
|
+
content = msg['content']
|
|
86
|
+
|
|
87
|
+
# Handle different content formats
|
|
88
|
+
if isinstance(content, list):
|
|
89
|
+
for item in content:
|
|
90
|
+
if isinstance(item, dict) and item.get('type') == 'tool_use':
|
|
91
|
+
tool_name = item.get('name', '')
|
|
92
|
+
if tool_name in ['Edit', 'Write', 'MultiEdit', 'NotebookEdit']:
|
|
93
|
+
input_data = item.get('input', {})
|
|
94
|
+
if 'file_path' in input_data:
|
|
95
|
+
file_path = input_data['file_path']
|
|
96
|
+
if file_path and os.path.exists(file_path):
|
|
97
|
+
edited_files.add(file_path)
|
|
98
|
+
|
|
99
|
+
except json.JSONDecodeError:
|
|
100
|
+
continue
|
|
101
|
+
except Exception as e:
|
|
102
|
+
logger.debug(f"Error processing line: {e}")
|
|
103
|
+
|
|
104
|
+
except Exception as e:
|
|
105
|
+
logger.error(f"Error reading session file: {e}")
|
|
106
|
+
|
|
107
|
+
return edited_files
|
|
108
|
+
|
|
109
|
+
def analyze_session_quality(self, session_file: Optional[Path] = None) -> Dict[str, Any]:
|
|
110
|
+
"""
|
|
111
|
+
Analyze code quality for all files edited in current session.
|
|
112
|
+
Returns quality report with actionable insights.
|
|
113
|
+
"""
|
|
114
|
+
# Update patterns (uses cache, <100ms)
|
|
115
|
+
check_and_update_patterns()
|
|
116
|
+
|
|
117
|
+
# Find active session if not provided
|
|
118
|
+
if not session_file:
|
|
119
|
+
session_file = self.find_active_session()
|
|
120
|
+
if not session_file:
|
|
121
|
+
# Fallback: analyze recently modified files in project
|
|
122
|
+
return self.analyze_recent_files()
|
|
123
|
+
|
|
124
|
+
self.current_session_id = session_file.stem
|
|
125
|
+
|
|
126
|
+
# Extract edited files
|
|
127
|
+
edited_files = self.extract_edited_files(session_file)
|
|
128
|
+
|
|
129
|
+
if not edited_files:
|
|
130
|
+
# Fallback to analyzing recent files when no edits in session
|
|
131
|
+
logger.info("No files edited in current session, analyzing recent project files...")
|
|
132
|
+
return self.analyze_recent_files()
|
|
133
|
+
|
|
134
|
+
# Analyze each edited file
|
|
135
|
+
file_reports = {}
|
|
136
|
+
total_issues = 0
|
|
137
|
+
total_good_patterns = 0
|
|
138
|
+
quality_scores = []
|
|
139
|
+
|
|
140
|
+
for file_path in edited_files:
|
|
141
|
+
# Only analyze code files
|
|
142
|
+
if any(file_path.endswith(ext) for ext in ['.py', '.ts', '.js', '.tsx', '.jsx']):
|
|
143
|
+
try:
|
|
144
|
+
result = self.analyzer.analyze_file(file_path)
|
|
145
|
+
metrics = result['quality_metrics']
|
|
146
|
+
|
|
147
|
+
file_reports[file_path] = {
|
|
148
|
+
'quality_score': metrics['quality_score'],
|
|
149
|
+
'good_patterns': metrics['good_patterns_found'],
|
|
150
|
+
'issues': metrics['total_issues'],
|
|
151
|
+
'recommendations': result.get('recommendations', [])[:3], # Top 3
|
|
152
|
+
'top_issues': self._get_top_issues(result)
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
total_issues += metrics['total_issues']
|
|
156
|
+
total_good_patterns += metrics['good_patterns_found']
|
|
157
|
+
quality_scores.append(metrics['quality_score'])
|
|
158
|
+
|
|
159
|
+
# Track quality history
|
|
160
|
+
if file_path not in self.quality_history:
|
|
161
|
+
self.quality_history[file_path] = []
|
|
162
|
+
self.quality_history[file_path].append({
|
|
163
|
+
'timestamp': datetime.now().isoformat(),
|
|
164
|
+
'score': metrics['quality_score']
|
|
165
|
+
})
|
|
166
|
+
|
|
167
|
+
except Exception as e:
|
|
168
|
+
logger.error(f"Failed to analyze {file_path}: {e}")
|
|
169
|
+
|
|
170
|
+
if not file_reports:
|
|
171
|
+
return {
|
|
172
|
+
'status': 'no_code_files',
|
|
173
|
+
'session_id': self.current_session_id,
|
|
174
|
+
'message': 'No analyzable code files in session'
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
# Calculate session average
|
|
178
|
+
avg_quality = sum(quality_scores) / len(quality_scores) if quality_scores else 0
|
|
179
|
+
|
|
180
|
+
# Generate session report
|
|
181
|
+
return {
|
|
182
|
+
'status': 'success',
|
|
183
|
+
'session_id': self.current_session_id,
|
|
184
|
+
'scope_label': 'Sess', # Session scope - files edited in current session
|
|
185
|
+
'timestamp': datetime.now().isoformat(),
|
|
186
|
+
'summary': {
|
|
187
|
+
'files_analyzed': len(file_reports),
|
|
188
|
+
'avg_quality_score': round(avg_quality, 3),
|
|
189
|
+
'total_issues': total_issues,
|
|
190
|
+
'total_good_patterns': total_good_patterns,
|
|
191
|
+
'quality_grade': self._get_quality_grade(avg_quality)
|
|
192
|
+
},
|
|
193
|
+
'file_reports': file_reports,
|
|
194
|
+
'actionable_items': self._generate_actionable_items(file_reports),
|
|
195
|
+
'quality_trend': self._calculate_quality_trend()
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
def _get_top_issues(self, analysis_result: Dict) -> List[Dict]:
|
|
199
|
+
"""Extract top issues from analysis result."""
|
|
200
|
+
top_issues = []
|
|
201
|
+
|
|
202
|
+
for match in analysis_result.get('all_matches', []):
|
|
203
|
+
if match['quality'] == 'bad' and match['count'] > 0:
|
|
204
|
+
top_issues.append({
|
|
205
|
+
'id': match['id'],
|
|
206
|
+
'description': match['description'],
|
|
207
|
+
'count': match['count'],
|
|
208
|
+
'severity': 'high' if match['weight'] <= -3 else 'medium'
|
|
209
|
+
})
|
|
210
|
+
|
|
211
|
+
# Sort by count * weight (impact)
|
|
212
|
+
top_issues.sort(key=lambda x: x['count'], reverse=True)
|
|
213
|
+
return top_issues[:5] # Top 5 issues
|
|
214
|
+
|
|
215
|
+
def _get_quality_grade(self, score: float) -> str:
|
|
216
|
+
"""Convert quality score to letter grade."""
|
|
217
|
+
if score >= 0.9:
|
|
218
|
+
return 'A+'
|
|
219
|
+
elif score >= 0.8:
|
|
220
|
+
return 'A'
|
|
221
|
+
elif score >= 0.7:
|
|
222
|
+
return 'B'
|
|
223
|
+
elif score >= 0.6:
|
|
224
|
+
return 'C'
|
|
225
|
+
elif score >= 0.5:
|
|
226
|
+
return 'D'
|
|
227
|
+
else:
|
|
228
|
+
return 'F'
|
|
229
|
+
|
|
230
|
+
def _generate_actionable_items(self, file_reports: Dict) -> List[str]:
|
|
231
|
+
"""Generate actionable recommendations for the user."""
|
|
232
|
+
actions = []
|
|
233
|
+
|
|
234
|
+
# Collect all issues
|
|
235
|
+
all_issues = {}
|
|
236
|
+
for file_path, report in file_reports.items():
|
|
237
|
+
for issue in report.get('top_issues', []):
|
|
238
|
+
issue_id = issue['id']
|
|
239
|
+
if issue_id not in all_issues:
|
|
240
|
+
all_issues[issue_id] = {
|
|
241
|
+
'description': issue['description'],
|
|
242
|
+
'total_count': 0,
|
|
243
|
+
'files': []
|
|
244
|
+
}
|
|
245
|
+
all_issues[issue_id]['total_count'] += issue['count']
|
|
246
|
+
all_issues[issue_id]['files'].append(Path(file_path).name)
|
|
247
|
+
|
|
248
|
+
# Sort by total impact
|
|
249
|
+
sorted_issues = sorted(all_issues.items(), key=lambda x: x[1]['total_count'], reverse=True)
|
|
250
|
+
|
|
251
|
+
# Generate actions
|
|
252
|
+
for issue_id, issue_data in sorted_issues[:3]: # Top 3 issues
|
|
253
|
+
if issue_id == 'print-call':
|
|
254
|
+
actions.append(f"🔧 Replace {issue_data['total_count']} print statements with logger in {', '.join(issue_data['files'][:2])}")
|
|
255
|
+
elif issue_id == 'sync-open':
|
|
256
|
+
actions.append(f"⚡ Use async file operations ({issue_data['total_count']} sync opens found)")
|
|
257
|
+
elif issue_id == 'broad-except':
|
|
258
|
+
actions.append(f"🎯 Fix {issue_data['total_count']} bare except clauses for better error handling")
|
|
259
|
+
elif issue_id == 'no-console-log':
|
|
260
|
+
actions.append(f"🚫 Remove {issue_data['total_count']} console.log statements")
|
|
261
|
+
else:
|
|
262
|
+
actions.append(f"📝 Fix {issue_data['total_count']} instances of {issue_data['description']}")
|
|
263
|
+
|
|
264
|
+
return actions
|
|
265
|
+
|
|
266
|
+
def analyze_recent_files(self) -> Dict[str, Any]:
|
|
267
|
+
"""Analyze core project files when no session is found."""
|
|
268
|
+
project_root = Path(__file__).parent.parent
|
|
269
|
+
|
|
270
|
+
# Define core project files to analyze (not test files)
|
|
271
|
+
core_files = [
|
|
272
|
+
"scripts/session_quality_tracker.py",
|
|
273
|
+
"scripts/cc-statusline-unified.py",
|
|
274
|
+
"scripts/pattern_registry_enhanced.py",
|
|
275
|
+
"scripts/simplified_metadata_extractor.py",
|
|
276
|
+
"scripts/streaming-watcher.py",
|
|
277
|
+
"scripts/quality-report.py",
|
|
278
|
+
"mcp-server/src/server.py",
|
|
279
|
+
"mcp-server/src/search_tools.py",
|
|
280
|
+
"mcp-server/src/temporal_tools.py",
|
|
281
|
+
"mcp-server/src/reflection_tools.py",
|
|
282
|
+
]
|
|
283
|
+
|
|
284
|
+
edited_files = set()
|
|
285
|
+
for file_path in core_files:
|
|
286
|
+
full_path = project_root / file_path
|
|
287
|
+
if full_path.exists():
|
|
288
|
+
edited_files.add(str(full_path))
|
|
289
|
+
|
|
290
|
+
# Also check for recently modified files (last 30 minutes) to catch actual work
|
|
291
|
+
try:
|
|
292
|
+
# Validate project_root is within expected bounds
|
|
293
|
+
if not str(project_root.resolve()).startswith(str(Path(__file__).parent.parent.resolve())):
|
|
294
|
+
logger.error("Security: Invalid project root path")
|
|
295
|
+
return {}
|
|
296
|
+
|
|
297
|
+
# Use pathlib instead of subprocess for safer file discovery
|
|
298
|
+
scripts_dir = project_root / "scripts"
|
|
299
|
+
if scripts_dir.exists():
|
|
300
|
+
from datetime import datetime, timedelta
|
|
301
|
+
cutoff_time = datetime.now() - timedelta(minutes=30)
|
|
302
|
+
for py_file in scripts_dir.glob("*.py"):
|
|
303
|
+
if py_file.stat().st_mtime > cutoff_time.timestamp():
|
|
304
|
+
# Skip test files and temporary files
|
|
305
|
+
if "test_" not in py_file.name and "verify_" not in py_file.name:
|
|
306
|
+
edited_files.add(str(py_file))
|
|
307
|
+
except Exception as e:
|
|
308
|
+
logger.debug(f"Error checking recent files: {e}")
|
|
309
|
+
|
|
310
|
+
if not edited_files:
|
|
311
|
+
return {
|
|
312
|
+
'status': 'no_edits',
|
|
313
|
+
'session_id': 'recent_files',
|
|
314
|
+
'message': 'No recently modified code files'
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
# Analyze the files
|
|
318
|
+
self.current_session_id = "recent_files"
|
|
319
|
+
file_reports = {}
|
|
320
|
+
total_issues = 0
|
|
321
|
+
total_good_patterns = 0
|
|
322
|
+
quality_scores = []
|
|
323
|
+
|
|
324
|
+
for file_path in list(edited_files)[:10]: # Limit to 10 files for performance
|
|
325
|
+
try:
|
|
326
|
+
result = self.analyzer.analyze_file(file_path)
|
|
327
|
+
metrics = result['quality_metrics']
|
|
328
|
+
|
|
329
|
+
file_reports[file_path] = {
|
|
330
|
+
'quality_score': metrics['quality_score'],
|
|
331
|
+
'good_patterns': metrics['good_patterns_found'],
|
|
332
|
+
'issues': metrics['total_issues'],
|
|
333
|
+
'recommendations': result.get('recommendations', [])[:3],
|
|
334
|
+
'top_issues': self._get_top_issues(result)
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
total_issues += metrics['total_issues']
|
|
338
|
+
total_good_patterns += metrics['good_patterns_found']
|
|
339
|
+
quality_scores.append(metrics['quality_score'])
|
|
340
|
+
|
|
341
|
+
except Exception as e:
|
|
342
|
+
logger.error(f"Failed to analyze {file_path}: {e}")
|
|
343
|
+
|
|
344
|
+
if not file_reports:
|
|
345
|
+
return {
|
|
346
|
+
'status': 'no_code_files',
|
|
347
|
+
'session_id': 'recent_files',
|
|
348
|
+
'message': 'No analyzable code files'
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
avg_quality = sum(quality_scores) / len(quality_scores) if quality_scores else 0
|
|
352
|
+
|
|
353
|
+
# Determine scope label based on what files we're analyzing
|
|
354
|
+
scope_label = 'Core' # Default to core project files
|
|
355
|
+
if any('session_quality_tracker' in str(f) for f in edited_files):
|
|
356
|
+
scope_label = 'Fix' # Files we just fixed
|
|
357
|
+
elif any(Path(f).stat().st_mtime > (datetime.now().timestamp() - 1800) for f in edited_files if Path(f).exists()):
|
|
358
|
+
scope_label = 'Recent' # Recently modified
|
|
359
|
+
|
|
360
|
+
return {
|
|
361
|
+
'status': 'success',
|
|
362
|
+
'session_id': 'recent_files',
|
|
363
|
+
'scope_label': scope_label,
|
|
364
|
+
'timestamp': datetime.now().isoformat(),
|
|
365
|
+
'summary': {
|
|
366
|
+
'files_analyzed': len(file_reports),
|
|
367
|
+
'avg_quality_score': round(avg_quality, 3),
|
|
368
|
+
'total_issues': total_issues,
|
|
369
|
+
'total_good_patterns': total_good_patterns,
|
|
370
|
+
'quality_grade': self._get_quality_grade(avg_quality)
|
|
371
|
+
},
|
|
372
|
+
'file_reports': file_reports,
|
|
373
|
+
'actionable_items': self._generate_actionable_items(file_reports),
|
|
374
|
+
'quality_trend': self._calculate_quality_trend()
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
def _calculate_quality_trend(self) -> str:
|
|
378
|
+
"""Calculate quality trend across session."""
|
|
379
|
+
if not self.quality_history:
|
|
380
|
+
return "No trend data"
|
|
381
|
+
|
|
382
|
+
# Look at average change
|
|
383
|
+
improvements = 0
|
|
384
|
+
degradations = 0
|
|
385
|
+
|
|
386
|
+
for file_path, history in self.quality_history.items():
|
|
387
|
+
if len(history) >= 2:
|
|
388
|
+
delta = history[-1]['score'] - history[0]['score']
|
|
389
|
+
if delta > 0.05:
|
|
390
|
+
improvements += 1
|
|
391
|
+
elif delta < -0.05:
|
|
392
|
+
degradations += 1
|
|
393
|
+
|
|
394
|
+
if improvements > degradations:
|
|
395
|
+
return f"📈 Improving ({improvements} files better)"
|
|
396
|
+
elif degradations > improvements:
|
|
397
|
+
return f"📉 Degrading ({degradations} files worse)"
|
|
398
|
+
else:
|
|
399
|
+
return "➡️ Stable"
|
|
400
|
+
|
|
401
|
+
def generate_report(self, analysis: Dict) -> str:
|
|
402
|
+
"""Generate human-readable report."""
|
|
403
|
+
if analysis['status'] != 'success':
|
|
404
|
+
return f"❌ {analysis['message']}"
|
|
405
|
+
|
|
406
|
+
summary = analysis['summary']
|
|
407
|
+
grade = summary['quality_grade']
|
|
408
|
+
emoji = '🟢' if grade in ['A+', 'A'] else '🟡' if grade in ['B', 'C'] else '🔴'
|
|
409
|
+
|
|
410
|
+
report = []
|
|
411
|
+
report.append("=" * 60)
|
|
412
|
+
report.append("SESSION CODE QUALITY REPORT")
|
|
413
|
+
report.append("=" * 60)
|
|
414
|
+
report.append(f"Session: {analysis['session_id'][:8]}...")
|
|
415
|
+
report.append(f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
416
|
+
report.append("")
|
|
417
|
+
report.append(f"{emoji} Overall Grade: {grade} ({summary['avg_quality_score']:.1%})")
|
|
418
|
+
report.append(f"Files Analyzed: {summary['files_analyzed']}")
|
|
419
|
+
report.append(f"Total Issues: {summary['total_issues']}")
|
|
420
|
+
report.append(f"Good Patterns: {summary['total_good_patterns']}")
|
|
421
|
+
report.append(f"Trend: {analysis['quality_trend']}")
|
|
422
|
+
report.append("")
|
|
423
|
+
|
|
424
|
+
if analysis['actionable_items']:
|
|
425
|
+
report.append("ACTIONS NEEDED:")
|
|
426
|
+
for action in analysis['actionable_items']:
|
|
427
|
+
report.append(f" {action}")
|
|
428
|
+
report.append("")
|
|
429
|
+
|
|
430
|
+
report.append("FILE DETAILS:")
|
|
431
|
+
for file_path, file_report in analysis['file_reports'].items():
|
|
432
|
+
file_name = Path(file_path).name
|
|
433
|
+
score = file_report['quality_score']
|
|
434
|
+
emoji = '✅' if score > 0.7 else '⚠️' if score > 0.5 else '❌'
|
|
435
|
+
report.append(f" {emoji} {file_name}: {score:.1%} ({file_report['issues']} issues)")
|
|
436
|
+
|
|
437
|
+
report.append("")
|
|
438
|
+
report.append("💡 Ask Claude: 'Please fix the code quality issues in this session'")
|
|
439
|
+
report.append("=" * 60)
|
|
440
|
+
|
|
441
|
+
return '\n'.join(report)
|
|
442
|
+
|
|
443
|
+
|
|
444
|
+
def main():
|
|
445
|
+
"""Run session quality analysis."""
|
|
446
|
+
tracker = SessionQualityTracker()
|
|
447
|
+
|
|
448
|
+
logger.info("🔍 Analyzing current session code quality...")
|
|
449
|
+
logger.info("")
|
|
450
|
+
|
|
451
|
+
analysis = tracker.analyze_session_quality()
|
|
452
|
+
report = tracker.generate_report(analysis)
|
|
453
|
+
|
|
454
|
+
logger.info(report)
|
|
455
|
+
|
|
456
|
+
# Save report for watcher integration - PER PROJECT
|
|
457
|
+
# Always save cache, even with fallback analysis
|
|
458
|
+
if analysis.get('status') in ['success', 'fallback']:
|
|
459
|
+
# Get project name from current directory
|
|
460
|
+
project_name = os.path.basename(os.getcwd())
|
|
461
|
+
# Secure sanitization with whitelist approach
|
|
462
|
+
import re
|
|
463
|
+
safe_project_name = re.sub(r'[^a-zA-Z0-9_-]', '_', project_name)[:100]
|
|
464
|
+
|
|
465
|
+
# Save to per-project cache directory
|
|
466
|
+
cache_dir = Path.home() / ".claude-self-reflect" / "quality_cache"
|
|
467
|
+
cache_dir.mkdir(exist_ok=True, parents=True)
|
|
468
|
+
report_path = cache_dir / f"{safe_project_name}.json"
|
|
469
|
+
|
|
470
|
+
# Validate the report path stays within cache directory
|
|
471
|
+
if not str(report_path.resolve()).startswith(str(cache_dir.resolve())):
|
|
472
|
+
logger.error(f"Security: Invalid cache path for {project_name}")
|
|
473
|
+
return
|
|
474
|
+
|
|
475
|
+
with open(report_path, 'w') as f:
|
|
476
|
+
json.dump(analysis, f, indent=2)
|
|
477
|
+
logger.info(f"\n📊 Full report saved to: {report_path}")
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
if __name__ == "__main__":
|
|
481
|
+
main()
|
|
@@ -484,7 +484,20 @@ class QdrantService:
|
|
|
484
484
|
|
|
485
485
|
def __init__(self, config: Config, embedding_provider: EmbeddingProvider):
|
|
486
486
|
self.config = config
|
|
487
|
-
|
|
487
|
+
|
|
488
|
+
# Security: Validate Qdrant URL for remote connections
|
|
489
|
+
from urllib.parse import urlparse
|
|
490
|
+
parsed = urlparse(config.qdrant_url)
|
|
491
|
+
host = (parsed.hostname or "").lower()
|
|
492
|
+
|
|
493
|
+
if config.require_tls_for_remote and host not in ("localhost", "127.0.0.1", "qdrant") and parsed.scheme != "https":
|
|
494
|
+
raise ValueError(f"Insecure QDRANT_URL for remote host: {config.qdrant_url} (use https:// or set QDRANT_REQUIRE_TLS_FOR_REMOTE=false)")
|
|
495
|
+
|
|
496
|
+
# Initialize with API key if provided
|
|
497
|
+
self.client = AsyncQdrantClient(
|
|
498
|
+
url=config.qdrant_url,
|
|
499
|
+
api_key=config.qdrant_api_key if hasattr(config, 'qdrant_api_key') else None
|
|
500
|
+
)
|
|
488
501
|
self.embedding_provider = embedding_provider
|
|
489
502
|
self._collection_cache: Dict[str, float] = {}
|
|
490
503
|
self.request_semaphore = asyncio.Semaphore(config.max_concurrent_qdrant)
|
|
@@ -979,7 +992,66 @@ class StreamingWatcher:
|
|
|
979
992
|
text_parts.append(item.get('text', ''))
|
|
980
993
|
return ' '.join(text_parts)
|
|
981
994
|
return str(content) if content else ''
|
|
982
|
-
|
|
995
|
+
|
|
996
|
+
def _update_quality_cache(self, pattern_analysis: Dict, avg_score: float, project_name: str = None):
|
|
997
|
+
"""Update quality cache file for statusline display - PER PROJECT."""
|
|
998
|
+
try:
|
|
999
|
+
# Determine project name from current context or use provided
|
|
1000
|
+
if not project_name:
|
|
1001
|
+
# Try to infer from analyzed files
|
|
1002
|
+
files = pattern_analysis.get('files', [])
|
|
1003
|
+
if files and len(files) > 0:
|
|
1004
|
+
# Extract project from first file path
|
|
1005
|
+
first_file = Path(files[0])
|
|
1006
|
+
# Walk up to find .git directory or use immediate parent
|
|
1007
|
+
for parent in first_file.parents:
|
|
1008
|
+
if (parent / '.git').exists():
|
|
1009
|
+
project_name = parent.name
|
|
1010
|
+
break
|
|
1011
|
+
else:
|
|
1012
|
+
project_name = 'unknown'
|
|
1013
|
+
else:
|
|
1014
|
+
project_name = 'unknown'
|
|
1015
|
+
|
|
1016
|
+
# Sanitize project name for filename
|
|
1017
|
+
safe_project_name = project_name.replace('/', '-').replace(' ', '_')
|
|
1018
|
+
cache_dir = Path.home() / ".claude-self-reflect" / "quality_cache"
|
|
1019
|
+
cache_dir.mkdir(exist_ok=True, parents=True)
|
|
1020
|
+
cache_file = cache_dir / f"{safe_project_name}.json"
|
|
1021
|
+
|
|
1022
|
+
# Calculate total issues from pattern analysis
|
|
1023
|
+
total_issues = sum(p.get('count', 0) for p in pattern_analysis.get('issues', []))
|
|
1024
|
+
|
|
1025
|
+
# Determine grade based on score
|
|
1026
|
+
if avg_score >= 0.95:
|
|
1027
|
+
grade = 'A+' if total_issues < 10 else 'A'
|
|
1028
|
+
elif avg_score >= 0.8:
|
|
1029
|
+
grade = 'B'
|
|
1030
|
+
elif avg_score >= 0.6:
|
|
1031
|
+
grade = 'C'
|
|
1032
|
+
else:
|
|
1033
|
+
grade = 'D'
|
|
1034
|
+
|
|
1035
|
+
cache_data = {
|
|
1036
|
+
'status': 'success',
|
|
1037
|
+
'session_id': 'watcher',
|
|
1038
|
+
'timestamp': datetime.now().isoformat(),
|
|
1039
|
+
'summary': {
|
|
1040
|
+
'files_analyzed': len(pattern_analysis.get('files', [])),
|
|
1041
|
+
'avg_quality_score': round(avg_score, 3),
|
|
1042
|
+
'total_issues': total_issues,
|
|
1043
|
+
'quality_grade': grade
|
|
1044
|
+
}
|
|
1045
|
+
}
|
|
1046
|
+
|
|
1047
|
+
with open(cache_file, 'w') as f:
|
|
1048
|
+
json.dump(cache_data, f, indent=2)
|
|
1049
|
+
|
|
1050
|
+
logger.debug(f"Updated quality cache: {grade}/{total_issues}")
|
|
1051
|
+
|
|
1052
|
+
except Exception as e:
|
|
1053
|
+
logger.debug(f"Failed to update quality cache: {e}")
|
|
1054
|
+
|
|
983
1055
|
async def process_file(self, file_path: Path) -> bool:
|
|
984
1056
|
"""Process a single file."""
|
|
985
1057
|
try:
|
|
@@ -1034,7 +1106,70 @@ class StreamingWatcher:
|
|
|
1034
1106
|
|
|
1035
1107
|
# Extract metadata
|
|
1036
1108
|
tool_usage = extract_tool_usage_from_conversation(all_messages)
|
|
1037
|
-
|
|
1109
|
+
|
|
1110
|
+
# MANDATORY AST-GREP Analysis for HOT files
|
|
1111
|
+
pattern_analysis = {}
|
|
1112
|
+
avg_quality_score = 0.0
|
|
1113
|
+
freshness_level, _ = self.categorize_freshness(file_path)
|
|
1114
|
+
|
|
1115
|
+
# Analyze code quality for HOT files (current session)
|
|
1116
|
+
if freshness_level == FreshnessLevel.HOT and (tool_usage.get('files_edited') or tool_usage.get('files_analyzed')):
|
|
1117
|
+
try:
|
|
1118
|
+
# Import analyzer (lazy import to avoid startup overhead)
|
|
1119
|
+
from ast_grep_final_analyzer import FinalASTGrepAnalyzer
|
|
1120
|
+
from update_patterns import check_and_update_patterns
|
|
1121
|
+
|
|
1122
|
+
# Update patterns (24h cache, <100ms)
|
|
1123
|
+
check_and_update_patterns()
|
|
1124
|
+
|
|
1125
|
+
# Create analyzer
|
|
1126
|
+
if not hasattr(self, '_ast_analyzer'):
|
|
1127
|
+
self._ast_analyzer = FinalASTGrepAnalyzer()
|
|
1128
|
+
|
|
1129
|
+
# Analyze edited files from this session
|
|
1130
|
+
files_to_analyze = list(set(
|
|
1131
|
+
tool_usage.get('files_edited', [])[:5] +
|
|
1132
|
+
tool_usage.get('files_analyzed', [])[:5]
|
|
1133
|
+
))
|
|
1134
|
+
|
|
1135
|
+
quality_scores = []
|
|
1136
|
+
for file_ref in files_to_analyze:
|
|
1137
|
+
if file_ref and any(file_ref.endswith(ext) for ext in ['.py', '.ts', '.js', '.tsx', '.jsx']):
|
|
1138
|
+
try:
|
|
1139
|
+
if os.path.exists(file_ref):
|
|
1140
|
+
result = self._ast_analyzer.analyze_file(file_ref)
|
|
1141
|
+
metrics = result['quality_metrics']
|
|
1142
|
+
pattern_analysis[file_ref] = {
|
|
1143
|
+
'score': metrics['quality_score'],
|
|
1144
|
+
'good_patterns': metrics['good_patterns_found'],
|
|
1145
|
+
'bad_patterns': metrics['bad_patterns_found'],
|
|
1146
|
+
'issues': metrics['total_issues']
|
|
1147
|
+
}
|
|
1148
|
+
quality_scores.append(metrics['quality_score'])
|
|
1149
|
+
|
|
1150
|
+
# Log quality issues for HOT files
|
|
1151
|
+
if metrics['quality_score'] < 0.6:
|
|
1152
|
+
logger.warning(f"⚠️ Quality issue in {os.path.basename(file_ref)}: {metrics['quality_score']:.1%} ({metrics['total_issues']} issues)")
|
|
1153
|
+
except Exception as e:
|
|
1154
|
+
logger.debug(f"Could not analyze {file_ref}: {e}")
|
|
1155
|
+
|
|
1156
|
+
if quality_scores:
|
|
1157
|
+
avg_quality_score = sum(quality_scores) / len(quality_scores)
|
|
1158
|
+
logger.info(f"📊 Session quality: {avg_quality_score:.1%} for {len(quality_scores)} files")
|
|
1159
|
+
|
|
1160
|
+
# Update quality cache for statusline - watcher handles this automatically!
|
|
1161
|
+
# Pass project name from current file being processed
|
|
1162
|
+
project_name = file_path.parent.name if file_path else None
|
|
1163
|
+
self._update_quality_cache(pattern_analysis, avg_quality_score, project_name)
|
|
1164
|
+
|
|
1165
|
+
except Exception as e:
|
|
1166
|
+
logger.debug(f"AST analysis not available: {e}")
|
|
1167
|
+
|
|
1168
|
+
# Add pattern analysis to tool_usage metadata
|
|
1169
|
+
if pattern_analysis:
|
|
1170
|
+
tool_usage['pattern_analysis'] = pattern_analysis
|
|
1171
|
+
tool_usage['avg_quality_score'] = round(avg_quality_score, 3)
|
|
1172
|
+
|
|
1038
1173
|
# Build text
|
|
1039
1174
|
text_parts = []
|
|
1040
1175
|
for msg in all_messages:
|
|
@@ -1043,7 +1178,7 @@ class StreamingWatcher:
|
|
|
1043
1178
|
text = self._extract_message_text(content)
|
|
1044
1179
|
if text:
|
|
1045
1180
|
text_parts.append(f"{role}: {text}")
|
|
1046
|
-
|
|
1181
|
+
|
|
1047
1182
|
combined_text = "\n\n".join(text_parts)
|
|
1048
1183
|
if not combined_text.strip():
|
|
1049
1184
|
logger.warning(f"No textual content in {file_path}, marking as processed")
|
|
@@ -1057,7 +1192,7 @@ class StreamingWatcher:
|
|
|
1057
1192
|
}
|
|
1058
1193
|
self.stats["files_processed"] += 1
|
|
1059
1194
|
return True
|
|
1060
|
-
|
|
1195
|
+
|
|
1061
1196
|
concepts = extract_concepts(combined_text, tool_usage)
|
|
1062
1197
|
|
|
1063
1198
|
# Now we know we have content, ensure collection exists
|