claude-self-reflect 5.0.7 → 6.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/open-source-maintainer.md +1 -1
- package/.claude/agents/reflection-specialist.md +2 -2
- package/Dockerfile.async-importer +6 -4
- package/Dockerfile.importer +6 -6
- package/Dockerfile.safe-watcher +8 -8
- package/Dockerfile.streaming-importer +8 -1
- package/Dockerfile.watcher +8 -16
- package/README.md +0 -3
- package/docker-compose.yaml +2 -6
- package/installer/.claude/agents/README.md +138 -0
- package/package.json +5 -26
- package/src/__init__.py +0 -0
- package/src/cli/__init__.py +0 -0
- package/src/runtime/__init__.py +0 -0
- package/src/runtime/import-latest.py +124 -0
- package/{scripts → src/runtime}/precompact-hook.sh +1 -1
- package/src/runtime/streaming-importer.py +995 -0
- package/{scripts → src/runtime}/watcher-loop.sh +1 -1
- package/.claude/agents/claude-self-reflect-test.md +0 -1274
- package/.claude/agents/reflect-tester.md +0 -300
- package/scripts/add-timestamp-indexes.py +0 -134
- package/scripts/ast_grep_final_analyzer.py +0 -338
- package/scripts/ast_grep_unified_registry.py +0 -710
- package/scripts/check-collections.py +0 -29
- package/scripts/debug-august-parsing.py +0 -80
- package/scripts/debug-import-single.py +0 -91
- package/scripts/debug-project-resolver.py +0 -82
- package/scripts/debug-temporal-tools.py +0 -135
- package/scripts/import-conversations-enhanced.py +0 -672
- package/scripts/migrate-to-unified-state.py +0 -426
- package/scripts/session_quality_tracker.py +0 -671
- package/scripts/update_patterns.py +0 -334
- /package/{scripts → src}/importer/__init__.py +0 -0
- /package/{scripts → src}/importer/__main__.py +0 -0
- /package/{scripts → src}/importer/core/__init__.py +0 -0
- /package/{scripts → src}/importer/core/config.py +0 -0
- /package/{scripts → src}/importer/core/exceptions.py +0 -0
- /package/{scripts → src}/importer/core/models.py +0 -0
- /package/{scripts → src}/importer/embeddings/__init__.py +0 -0
- /package/{scripts → src}/importer/embeddings/base.py +0 -0
- /package/{scripts → src}/importer/embeddings/fastembed_provider.py +0 -0
- /package/{scripts → src}/importer/embeddings/validator.py +0 -0
- /package/{scripts → src}/importer/embeddings/voyage_provider.py +0 -0
- /package/{scripts → src}/importer/main.py +0 -0
- /package/{scripts → src}/importer/processors/__init__.py +0 -0
- /package/{scripts → src}/importer/processors/ast_extractor.py +0 -0
- /package/{scripts → src}/importer/processors/chunker.py +0 -0
- /package/{scripts → src}/importer/processors/concept_extractor.py +0 -0
- /package/{scripts → src}/importer/processors/conversation_parser.py +0 -0
- /package/{scripts → src}/importer/processors/tool_extractor.py +0 -0
- /package/{scripts → src}/importer/state/__init__.py +0 -0
- /package/{scripts → src}/importer/state/state_manager.py +0 -0
- /package/{scripts → src}/importer/storage/__init__.py +0 -0
- /package/{scripts → src}/importer/storage/qdrant_storage.py +0 -0
- /package/{scripts → src}/importer/utils/__init__.py +0 -0
- /package/{scripts → src}/importer/utils/logger.py +0 -0
- /package/{scripts → src}/importer/utils/project_normalizer.py +0 -0
- /package/{scripts → src/runtime}/delta-metadata-update-safe.py +0 -0
- /package/{scripts → src/runtime}/delta-metadata-update.py +0 -0
- /package/{scripts → src/runtime}/doctor.py +0 -0
- /package/{scripts → src/runtime}/embedding_service.py +0 -0
- /package/{scripts → src/runtime}/force-metadata-recovery.py +0 -0
- /package/{scripts → src/runtime}/import-conversations-unified.py +0 -0
- /package/{scripts → src/runtime}/import_strategies.py +0 -0
- /package/{scripts → src/runtime}/message_processors.py +0 -0
- /package/{scripts → src/runtime}/metadata_extractor.py +0 -0
- /package/{scripts → src/runtime}/streaming-watcher.py +0 -0
- /package/{scripts → src/runtime}/unified_state_manager.py +0 -0
- /package/{scripts → src/runtime}/utils.py +0 -0
|
@@ -1,671 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Session Quality Tracker
|
|
4
|
-
Analyzes code quality for files edited in the current Claude session.
|
|
5
|
-
MANDATORY AST-GREP analysis for real-time quality feedback.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
import json
|
|
9
|
-
import os
|
|
10
|
-
from pathlib import Path
|
|
11
|
-
from datetime import datetime, timedelta
|
|
12
|
-
from typing import Dict, List, Any, Optional, Set
|
|
13
|
-
import logging
|
|
14
|
-
import sys
|
|
15
|
-
|
|
16
|
-
# Setup logger
|
|
17
|
-
logger = logging.getLogger(__name__)
|
|
18
|
-
logging.basicConfig(level=logging.INFO, format='%(message)s')
|
|
19
|
-
|
|
20
|
-
# Add scripts directory to path
|
|
21
|
-
sys.path.append(str(Path(__file__).parent))
|
|
22
|
-
|
|
23
|
-
from ast_grep_final_analyzer import FinalASTGrepAnalyzer
|
|
24
|
-
from update_patterns import check_and_update_patterns
|
|
25
|
-
|
|
26
|
-
logging.basicConfig(level=logging.INFO)
|
|
27
|
-
logger = logging.getLogger(__name__)
|
|
28
|
-
|
|
29
|
-
class SessionQualityTracker:
|
|
30
|
-
"""
|
|
31
|
-
Tracks code quality for the current Claude session.
|
|
32
|
-
Provides real-time feedback on code quality issues.
|
|
33
|
-
"""
|
|
34
|
-
|
|
35
|
-
def __init__(self):
|
|
36
|
-
self.analyzer = FinalASTGrepAnalyzer()
|
|
37
|
-
self.session_files = set()
|
|
38
|
-
self.quality_history = {}
|
|
39
|
-
self.current_session_id = None
|
|
40
|
-
|
|
41
|
-
def find_active_session(self, project_path: Optional[str] = None) -> Optional[Path]:
|
|
42
|
-
"""Find the most recently modified JSONL file (active session)."""
|
|
43
|
-
claude_projects = Path.home() / ".claude" / "projects"
|
|
44
|
-
|
|
45
|
-
if project_path:
|
|
46
|
-
# Look for specific project
|
|
47
|
-
project_dir = claude_projects / project_path.replace('/', '-')
|
|
48
|
-
else:
|
|
49
|
-
# Look for claude-self-reflect project
|
|
50
|
-
project_dir = claude_projects / "-Users-ramakrishnanannaswamy-projects-claude-self-reflect"
|
|
51
|
-
|
|
52
|
-
if not project_dir.exists():
|
|
53
|
-
logger.warning(f"Project directory not found: {project_dir}")
|
|
54
|
-
return None
|
|
55
|
-
|
|
56
|
-
# Find most recent JSONL file (modified in last 2 hours)
|
|
57
|
-
recent_files = []
|
|
58
|
-
cutoff_time = datetime.now() - timedelta(hours=2)
|
|
59
|
-
|
|
60
|
-
for jsonl_file in project_dir.glob("*.jsonl"):
|
|
61
|
-
if jsonl_file.stat().st_mtime > cutoff_time.timestamp():
|
|
62
|
-
recent_files.append((jsonl_file, jsonl_file.stat().st_mtime))
|
|
63
|
-
|
|
64
|
-
if not recent_files:
|
|
65
|
-
return None
|
|
66
|
-
|
|
67
|
-
# Return most recent file
|
|
68
|
-
recent_files.sort(key=lambda x: x[1], reverse=True)
|
|
69
|
-
return recent_files[0][0]
|
|
70
|
-
|
|
71
|
-
def extract_edited_files(self, session_file: Path) -> Set[str]:
|
|
72
|
-
"""Extract files edited in the current session."""
|
|
73
|
-
edited_files = set()
|
|
74
|
-
|
|
75
|
-
try:
|
|
76
|
-
with open(session_file, 'r') as f:
|
|
77
|
-
for line in f:
|
|
78
|
-
try:
|
|
79
|
-
entry = json.loads(line.strip())
|
|
80
|
-
|
|
81
|
-
# Look for messages with tool usage
|
|
82
|
-
if 'messages' in entry:
|
|
83
|
-
for msg in entry['messages']:
|
|
84
|
-
if msg.get('role') == 'assistant' and 'content' in msg:
|
|
85
|
-
content = msg['content']
|
|
86
|
-
|
|
87
|
-
# Handle different content formats
|
|
88
|
-
if isinstance(content, list):
|
|
89
|
-
for item in content:
|
|
90
|
-
if isinstance(item, dict) and item.get('type') == 'tool_use':
|
|
91
|
-
tool_name = item.get('name', '')
|
|
92
|
-
if tool_name in ['Edit', 'Write', 'MultiEdit', 'NotebookEdit']:
|
|
93
|
-
input_data = item.get('input', {})
|
|
94
|
-
if 'file_path' in input_data:
|
|
95
|
-
file_path = input_data['file_path']
|
|
96
|
-
if file_path and os.path.exists(file_path):
|
|
97
|
-
edited_files.add(file_path)
|
|
98
|
-
|
|
99
|
-
except json.JSONDecodeError:
|
|
100
|
-
continue
|
|
101
|
-
except Exception as e:
|
|
102
|
-
logger.debug(f"Error processing line: {e}")
|
|
103
|
-
|
|
104
|
-
except Exception as e:
|
|
105
|
-
logger.error(f"Error reading session file: {e}")
|
|
106
|
-
|
|
107
|
-
return edited_files
|
|
108
|
-
|
|
109
|
-
def analyze_session_quality(self, session_file: Optional[Path] = None, use_tracker: bool = False) -> Dict[str, Any]:
|
|
110
|
-
"""
|
|
111
|
-
Analyze code quality for all files edited in current session.
|
|
112
|
-
Returns quality report with actionable insights.
|
|
113
|
-
"""
|
|
114
|
-
# Update patterns (uses cache, <100ms)
|
|
115
|
-
check_and_update_patterns()
|
|
116
|
-
|
|
117
|
-
# Check for session edit tracker first (priority mode)
|
|
118
|
-
if use_tracker or (not session_file):
|
|
119
|
-
tracker_file = Path.home() / ".claude-self-reflect" / "current_session_edits.json"
|
|
120
|
-
if tracker_file.exists():
|
|
121
|
-
try:
|
|
122
|
-
with open(tracker_file, 'r') as f:
|
|
123
|
-
tracker_data = json.load(f)
|
|
124
|
-
edited_files = set(tracker_data.get('edited_files', []))
|
|
125
|
-
if edited_files:
|
|
126
|
-
logger.info(f"Using session tracker: {len(edited_files)} files edited in session")
|
|
127
|
-
self.current_session_id = 'active_session'
|
|
128
|
-
# Use Session scope label for tracked edits
|
|
129
|
-
return self._analyze_files_with_scope(edited_files, scope_label='Session')
|
|
130
|
-
except Exception as e:
|
|
131
|
-
logger.debug(f"Error reading tracker file: {e}")
|
|
132
|
-
|
|
133
|
-
# Find active session if not provided
|
|
134
|
-
if not session_file:
|
|
135
|
-
session_file = self.find_active_session()
|
|
136
|
-
if not session_file:
|
|
137
|
-
# Fallback: analyze recently modified files in project
|
|
138
|
-
return self.analyze_recent_files()
|
|
139
|
-
|
|
140
|
-
self.current_session_id = session_file.stem
|
|
141
|
-
|
|
142
|
-
# Extract edited files
|
|
143
|
-
edited_files = self.extract_edited_files(session_file)
|
|
144
|
-
|
|
145
|
-
if not edited_files:
|
|
146
|
-
# Fallback to analyzing recent files when no edits in session
|
|
147
|
-
logger.info("No files edited in current session, analyzing recent project files...")
|
|
148
|
-
return self.analyze_recent_files()
|
|
149
|
-
|
|
150
|
-
# Analyze each edited file
|
|
151
|
-
file_reports = {}
|
|
152
|
-
total_issues = 0
|
|
153
|
-
total_good_patterns = 0
|
|
154
|
-
quality_scores = []
|
|
155
|
-
|
|
156
|
-
for file_path in edited_files:
|
|
157
|
-
# Only analyze code files
|
|
158
|
-
if any(file_path.endswith(ext) for ext in ['.py', '.ts', '.js', '.tsx', '.jsx']):
|
|
159
|
-
try:
|
|
160
|
-
result = self.analyzer.analyze_file(file_path)
|
|
161
|
-
metrics = result['quality_metrics']
|
|
162
|
-
|
|
163
|
-
file_reports[file_path] = {
|
|
164
|
-
'quality_score': metrics['quality_score'],
|
|
165
|
-
'good_patterns': metrics['good_patterns_found'],
|
|
166
|
-
'issues': metrics['total_issues'],
|
|
167
|
-
'recommendations': result.get('recommendations', [])[:3], # Top 3
|
|
168
|
-
'top_issues': self._get_top_issues(result)
|
|
169
|
-
}
|
|
170
|
-
|
|
171
|
-
total_issues += metrics['total_issues']
|
|
172
|
-
total_good_patterns += metrics['good_patterns_found']
|
|
173
|
-
quality_scores.append(metrics['quality_score'])
|
|
174
|
-
|
|
175
|
-
# Track quality history
|
|
176
|
-
if file_path not in self.quality_history:
|
|
177
|
-
self.quality_history[file_path] = []
|
|
178
|
-
self.quality_history[file_path].append({
|
|
179
|
-
'timestamp': datetime.now().isoformat(),
|
|
180
|
-
'score': metrics['quality_score']
|
|
181
|
-
})
|
|
182
|
-
|
|
183
|
-
except Exception as e:
|
|
184
|
-
logger.error(f"Failed to analyze {file_path}: {e}")
|
|
185
|
-
|
|
186
|
-
if not file_reports:
|
|
187
|
-
return {
|
|
188
|
-
'status': 'no_code_files',
|
|
189
|
-
'session_id': self.current_session_id,
|
|
190
|
-
'message': 'No analyzable code files in session'
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
# Calculate session average
|
|
194
|
-
avg_quality = sum(quality_scores) / len(quality_scores) if quality_scores else 0
|
|
195
|
-
|
|
196
|
-
# Generate session report
|
|
197
|
-
return {
|
|
198
|
-
'status': 'success',
|
|
199
|
-
'session_id': self.current_session_id,
|
|
200
|
-
'scope_label': 'Sess', # Session scope - files edited in current session
|
|
201
|
-
'timestamp': datetime.now().isoformat(),
|
|
202
|
-
'summary': {
|
|
203
|
-
'files_analyzed': len(file_reports),
|
|
204
|
-
'avg_quality_score': round(avg_quality, 3),
|
|
205
|
-
'total_issues': total_issues,
|
|
206
|
-
'total_good_patterns': total_good_patterns,
|
|
207
|
-
'quality_grade': self._get_quality_grade(avg_quality, total_issues)
|
|
208
|
-
},
|
|
209
|
-
'file_reports': file_reports,
|
|
210
|
-
'actionable_items': self._generate_actionable_items(file_reports),
|
|
211
|
-
'quality_trend': self._calculate_quality_trend()
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
def _get_top_issues(self, analysis_result: Dict) -> List[Dict]:
|
|
215
|
-
"""Extract top issues from analysis result."""
|
|
216
|
-
top_issues = []
|
|
217
|
-
|
|
218
|
-
for match in analysis_result.get('all_matches', []):
|
|
219
|
-
if match['quality'] == 'bad' and match['count'] > 0:
|
|
220
|
-
top_issues.append({
|
|
221
|
-
'id': match['id'],
|
|
222
|
-
'description': match['description'],
|
|
223
|
-
'count': match['count'],
|
|
224
|
-
'severity': 'high' if match['weight'] <= -3 else 'medium'
|
|
225
|
-
})
|
|
226
|
-
|
|
227
|
-
# Sort by count * weight (impact)
|
|
228
|
-
top_issues.sort(key=lambda x: x['count'], reverse=True)
|
|
229
|
-
return top_issues[:5] # Top 5 issues
|
|
230
|
-
|
|
231
|
-
def _get_quality_grade(self, score: float, total_issues: int = 0) -> str:
|
|
232
|
-
"""
|
|
233
|
-
Convert quality score to letter grade.
|
|
234
|
-
Based on consensus: issues should dominate grading.
|
|
235
|
-
|
|
236
|
-
Grade boundaries (adjusted for issue count):
|
|
237
|
-
- A+: score >= 0.97 AND issues <= 5
|
|
238
|
-
- A: score >= 0.93 AND issues <= 20
|
|
239
|
-
- B: score >= 0.83 AND issues <= 50
|
|
240
|
-
- C: score >= 0.73 AND issues <= 100
|
|
241
|
-
- D: score >= 0.60
|
|
242
|
-
- F: score < 0.60
|
|
243
|
-
"""
|
|
244
|
-
# Hard caps based on issue count (industry standard)
|
|
245
|
-
if total_issues > 200:
|
|
246
|
-
return 'F'
|
|
247
|
-
elif total_issues > 100:
|
|
248
|
-
# Many issues - max grade is C
|
|
249
|
-
if score >= 0.77:
|
|
250
|
-
return 'C+'
|
|
251
|
-
elif score >= 0.73:
|
|
252
|
-
return 'C'
|
|
253
|
-
elif score >= 0.70:
|
|
254
|
-
return 'C-'
|
|
255
|
-
elif score >= 0.60:
|
|
256
|
-
return 'D'
|
|
257
|
-
else:
|
|
258
|
-
return 'F'
|
|
259
|
-
elif total_issues > 50:
|
|
260
|
-
# Moderate issues - max grade is B
|
|
261
|
-
if score >= 0.87:
|
|
262
|
-
return 'B+'
|
|
263
|
-
elif score >= 0.83:
|
|
264
|
-
return 'B'
|
|
265
|
-
elif score >= 0.80:
|
|
266
|
-
return 'B-'
|
|
267
|
-
elif score >= 0.73:
|
|
268
|
-
return 'C'
|
|
269
|
-
elif score >= 0.60:
|
|
270
|
-
return 'D'
|
|
271
|
-
else:
|
|
272
|
-
return 'F'
|
|
273
|
-
elif total_issues > 20:
|
|
274
|
-
# Some issues - max grade is A-
|
|
275
|
-
if score >= 0.90:
|
|
276
|
-
return 'A-'
|
|
277
|
-
elif score >= 0.87:
|
|
278
|
-
return 'B+'
|
|
279
|
-
elif score >= 0.83:
|
|
280
|
-
return 'B'
|
|
281
|
-
elif score >= 0.73:
|
|
282
|
-
return 'C'
|
|
283
|
-
elif score >= 0.60:
|
|
284
|
-
return 'D'
|
|
285
|
-
else:
|
|
286
|
-
return 'F'
|
|
287
|
-
elif total_issues > 5:
|
|
288
|
-
# Few issues - max grade is A
|
|
289
|
-
if score >= 0.93:
|
|
290
|
-
return 'A'
|
|
291
|
-
elif score >= 0.90:
|
|
292
|
-
return 'A-'
|
|
293
|
-
elif score >= 0.83:
|
|
294
|
-
return 'B'
|
|
295
|
-
elif score >= 0.73:
|
|
296
|
-
return 'C'
|
|
297
|
-
elif score >= 0.60:
|
|
298
|
-
return 'D'
|
|
299
|
-
else:
|
|
300
|
-
return 'F'
|
|
301
|
-
else:
|
|
302
|
-
# Very few issues (0-5) - can achieve A+
|
|
303
|
-
if score >= 0.97:
|
|
304
|
-
return 'A+'
|
|
305
|
-
elif score >= 0.93:
|
|
306
|
-
return 'A'
|
|
307
|
-
elif score >= 0.90:
|
|
308
|
-
return 'A-'
|
|
309
|
-
elif score >= 0.83:
|
|
310
|
-
return 'B'
|
|
311
|
-
elif score >= 0.73:
|
|
312
|
-
return 'C'
|
|
313
|
-
elif score >= 0.60:
|
|
314
|
-
return 'D'
|
|
315
|
-
else:
|
|
316
|
-
return 'F'
|
|
317
|
-
|
|
318
|
-
def _generate_actionable_items(self, file_reports: Dict) -> List[str]:
|
|
319
|
-
"""Generate actionable recommendations for the user."""
|
|
320
|
-
actions = []
|
|
321
|
-
|
|
322
|
-
# Collect all issues
|
|
323
|
-
all_issues = {}
|
|
324
|
-
for file_path, report in file_reports.items():
|
|
325
|
-
for issue in report.get('top_issues', []):
|
|
326
|
-
issue_id = issue['id']
|
|
327
|
-
if issue_id not in all_issues:
|
|
328
|
-
all_issues[issue_id] = {
|
|
329
|
-
'description': issue['description'],
|
|
330
|
-
'total_count': 0,
|
|
331
|
-
'files': []
|
|
332
|
-
}
|
|
333
|
-
all_issues[issue_id]['total_count'] += issue['count']
|
|
334
|
-
all_issues[issue_id]['files'].append(Path(file_path).name)
|
|
335
|
-
|
|
336
|
-
# Sort by total impact
|
|
337
|
-
sorted_issues = sorted(all_issues.items(), key=lambda x: x[1]['total_count'], reverse=True)
|
|
338
|
-
|
|
339
|
-
# Generate actions
|
|
340
|
-
for issue_id, issue_data in sorted_issues[:3]: # Top 3 issues
|
|
341
|
-
if issue_id == 'print-call':
|
|
342
|
-
actions.append(f"🔧 Replace {issue_data['total_count']} print statements with logger in {', '.join(issue_data['files'][:2])}")
|
|
343
|
-
elif issue_id == 'sync-open':
|
|
344
|
-
actions.append(f"⚡ Use async file operations ({issue_data['total_count']} sync opens found)")
|
|
345
|
-
elif issue_id == 'broad-except':
|
|
346
|
-
actions.append(f"🎯 Fix {issue_data['total_count']} bare except clauses for better error handling")
|
|
347
|
-
elif issue_id == 'no-console-log':
|
|
348
|
-
actions.append(f"🚫 Remove {issue_data['total_count']} console.log statements")
|
|
349
|
-
else:
|
|
350
|
-
actions.append(f"📝 Fix {issue_data['total_count']} instances of {issue_data['description']}")
|
|
351
|
-
|
|
352
|
-
return actions
|
|
353
|
-
|
|
354
|
-
def _analyze_files_with_scope(self, edited_files: set, scope_label: str = 'Session') -> Dict[str, Any]:
|
|
355
|
-
"""
|
|
356
|
-
Analyze specific files with a given scope label.
|
|
357
|
-
Used for both session tracking and fallback modes.
|
|
358
|
-
"""
|
|
359
|
-
# Analyze each edited file
|
|
360
|
-
file_reports = {}
|
|
361
|
-
total_issues = 0
|
|
362
|
-
total_good_patterns = 0
|
|
363
|
-
quality_scores = []
|
|
364
|
-
|
|
365
|
-
for file_path in edited_files:
|
|
366
|
-
# Only analyze code files
|
|
367
|
-
if any(str(file_path).endswith(ext) for ext in ['.py', '.ts', '.js', '.tsx', '.jsx']):
|
|
368
|
-
try:
|
|
369
|
-
result = self.analyzer.analyze_file(file_path)
|
|
370
|
-
metrics = result['quality_metrics']
|
|
371
|
-
|
|
372
|
-
file_reports[file_path] = {
|
|
373
|
-
'quality_score': metrics['quality_score'],
|
|
374
|
-
'good_patterns': metrics['good_patterns_found'],
|
|
375
|
-
'issues': metrics['total_issues'],
|
|
376
|
-
'recommendations': result.get('recommendations', [])[:3], # Top 3
|
|
377
|
-
'top_issues': self._get_top_issues(result)
|
|
378
|
-
}
|
|
379
|
-
|
|
380
|
-
total_issues += metrics['total_issues']
|
|
381
|
-
total_good_patterns += metrics['good_patterns_found']
|
|
382
|
-
quality_scores.append(metrics['quality_score'])
|
|
383
|
-
|
|
384
|
-
# Track quality history
|
|
385
|
-
if file_path not in self.quality_history:
|
|
386
|
-
self.quality_history[file_path] = []
|
|
387
|
-
self.quality_history[file_path].append({
|
|
388
|
-
'timestamp': datetime.now().isoformat(),
|
|
389
|
-
'score': metrics['quality_score']
|
|
390
|
-
})
|
|
391
|
-
|
|
392
|
-
except Exception as e:
|
|
393
|
-
logger.error(f"Failed to analyze {file_path}: {e}")
|
|
394
|
-
|
|
395
|
-
if not file_reports:
|
|
396
|
-
return {
|
|
397
|
-
'status': 'no_code_files',
|
|
398
|
-
'session_id': self.current_session_id,
|
|
399
|
-
'scope_label': scope_label,
|
|
400
|
-
'message': 'No analyzable code files in session'
|
|
401
|
-
}
|
|
402
|
-
|
|
403
|
-
# Calculate session average
|
|
404
|
-
avg_quality = sum(quality_scores) / len(quality_scores) if quality_scores else 0
|
|
405
|
-
|
|
406
|
-
# Generate session report
|
|
407
|
-
return {
|
|
408
|
-
'status': 'success',
|
|
409
|
-
'session_id': self.current_session_id,
|
|
410
|
-
'scope_label': scope_label, # Use provided scope label
|
|
411
|
-
'timestamp': datetime.now().isoformat(),
|
|
412
|
-
'summary': {
|
|
413
|
-
'files_analyzed': len(file_reports),
|
|
414
|
-
'avg_quality_score': round(avg_quality, 3),
|
|
415
|
-
'total_issues': total_issues,
|
|
416
|
-
'total_good_patterns': total_good_patterns,
|
|
417
|
-
'quality_grade': self._get_quality_grade(avg_quality, total_issues)
|
|
418
|
-
},
|
|
419
|
-
'file_reports': file_reports,
|
|
420
|
-
'actionable_items': self._generate_actionable_items(file_reports),
|
|
421
|
-
'quality_trend': self._calculate_quality_trend()
|
|
422
|
-
}
|
|
423
|
-
|
|
424
|
-
def analyze_recent_files(self) -> Dict[str, Any]:
|
|
425
|
-
"""Analyze core project files when no session is found."""
|
|
426
|
-
# Use current working directory as project root
|
|
427
|
-
project_root = Path.cwd()
|
|
428
|
-
|
|
429
|
-
# Find code files in the project dynamically
|
|
430
|
-
code_extensions = {'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.cpp', '.c',
|
|
431
|
-
'.h', '.hpp', '.rs', '.go', '.rb', '.php'}
|
|
432
|
-
|
|
433
|
-
core_files = []
|
|
434
|
-
# Look for code files in the project (limit to avoid too many files)
|
|
435
|
-
for ext in code_extensions:
|
|
436
|
-
files = list(project_root.rglob(f'*{ext}'))
|
|
437
|
-
# Filter out common non-source directories
|
|
438
|
-
files = [f for f in files if not any(
|
|
439
|
-
skip in f.parts for skip in ['venv', '.venv', 'node_modules', '.git',
|
|
440
|
-
'__pycache__', '.pytest_cache', 'dist',
|
|
441
|
-
'build', 'target', '.idea', '.vscode']
|
|
442
|
-
)]
|
|
443
|
-
core_files.extend(files[:20]) # Take up to 20 files per extension
|
|
444
|
-
if len(core_files) >= 50: # Increased limit to 50 files for better coverage
|
|
445
|
-
break
|
|
446
|
-
|
|
447
|
-
# Convert to relative paths
|
|
448
|
-
core_files = [str(f.relative_to(project_root)) for f in core_files[:50]]
|
|
449
|
-
|
|
450
|
-
edited_files = set()
|
|
451
|
-
for file_path in core_files:
|
|
452
|
-
full_path = project_root / file_path
|
|
453
|
-
if full_path.exists():
|
|
454
|
-
edited_files.add(str(full_path))
|
|
455
|
-
|
|
456
|
-
# Also check for recently modified files (last 30 minutes) to catch actual work
|
|
457
|
-
try:
|
|
458
|
-
# No need to validate project_root - we can analyze any project
|
|
459
|
-
|
|
460
|
-
# Use pathlib instead of subprocess for safer file discovery
|
|
461
|
-
scripts_dir = project_root / "scripts"
|
|
462
|
-
if scripts_dir.exists():
|
|
463
|
-
from datetime import datetime, timedelta
|
|
464
|
-
cutoff_time = datetime.now() - timedelta(minutes=30)
|
|
465
|
-
for py_file in scripts_dir.glob("*.py"):
|
|
466
|
-
if py_file.stat().st_mtime > cutoff_time.timestamp():
|
|
467
|
-
# Skip test files and temporary files
|
|
468
|
-
if "test_" not in py_file.name and "verify_" not in py_file.name:
|
|
469
|
-
edited_files.add(str(py_file))
|
|
470
|
-
except Exception as e:
|
|
471
|
-
logger.debug(f"Error checking recent files: {e}")
|
|
472
|
-
|
|
473
|
-
if not edited_files:
|
|
474
|
-
return {
|
|
475
|
-
'status': 'no_edits',
|
|
476
|
-
'session_id': 'recent_files',
|
|
477
|
-
'message': 'No recently modified code files'
|
|
478
|
-
}
|
|
479
|
-
|
|
480
|
-
# Analyze the files
|
|
481
|
-
self.current_session_id = "recent_files"
|
|
482
|
-
file_reports = {}
|
|
483
|
-
total_issues = 0
|
|
484
|
-
total_good_patterns = 0
|
|
485
|
-
quality_scores = []
|
|
486
|
-
|
|
487
|
-
for file_path in list(edited_files)[:50]: # Analyze up to 50 files for better coverage
|
|
488
|
-
try:
|
|
489
|
-
result = self.analyzer.analyze_file(file_path)
|
|
490
|
-
metrics = result['quality_metrics']
|
|
491
|
-
|
|
492
|
-
file_reports[file_path] = {
|
|
493
|
-
'quality_score': metrics['quality_score'],
|
|
494
|
-
'good_patterns': metrics['good_patterns_found'],
|
|
495
|
-
'issues': metrics['total_issues'],
|
|
496
|
-
'recommendations': result.get('recommendations', [])[:3],
|
|
497
|
-
'top_issues': self._get_top_issues(result)
|
|
498
|
-
}
|
|
499
|
-
|
|
500
|
-
total_issues += metrics['total_issues']
|
|
501
|
-
total_good_patterns += metrics['good_patterns_found']
|
|
502
|
-
quality_scores.append(metrics['quality_score'])
|
|
503
|
-
|
|
504
|
-
except Exception as e:
|
|
505
|
-
logger.error(f"Failed to analyze {file_path}: {e}")
|
|
506
|
-
|
|
507
|
-
if not file_reports:
|
|
508
|
-
return {
|
|
509
|
-
'status': 'no_code_files',
|
|
510
|
-
'session_id': 'recent_files',
|
|
511
|
-
'message': 'No analyzable code files'
|
|
512
|
-
}
|
|
513
|
-
|
|
514
|
-
avg_quality = sum(quality_scores) / len(quality_scores) if quality_scores else 0
|
|
515
|
-
|
|
516
|
-
# Determine scope label based on what files we're analyzing
|
|
517
|
-
scope_label = 'Core' # Default to core project files
|
|
518
|
-
if any('session_quality_tracker' in str(f) for f in edited_files):
|
|
519
|
-
scope_label = 'Fix' # Files we just fixed
|
|
520
|
-
elif any(Path(f).stat().st_mtime > (datetime.now().timestamp() - 1800) for f in edited_files if Path(f).exists()):
|
|
521
|
-
scope_label = 'Recent' # Recently modified
|
|
522
|
-
|
|
523
|
-
return {
|
|
524
|
-
'status': 'success',
|
|
525
|
-
'session_id': 'recent_files',
|
|
526
|
-
'scope_label': scope_label,
|
|
527
|
-
'timestamp': datetime.now().isoformat(),
|
|
528
|
-
'summary': {
|
|
529
|
-
'files_analyzed': len(file_reports),
|
|
530
|
-
'avg_quality_score': round(avg_quality, 3),
|
|
531
|
-
'total_issues': total_issues,
|
|
532
|
-
'total_good_patterns': total_good_patterns,
|
|
533
|
-
'quality_grade': self._get_quality_grade(avg_quality, total_issues)
|
|
534
|
-
},
|
|
535
|
-
'file_reports': file_reports,
|
|
536
|
-
'actionable_items': self._generate_actionable_items(file_reports),
|
|
537
|
-
'quality_trend': self._calculate_quality_trend()
|
|
538
|
-
}
|
|
539
|
-
|
|
540
|
-
def _calculate_quality_trend(self) -> str:
|
|
541
|
-
"""Calculate quality trend across session."""
|
|
542
|
-
if not self.quality_history:
|
|
543
|
-
return "No trend data"
|
|
544
|
-
|
|
545
|
-
# Look at average change
|
|
546
|
-
improvements = 0
|
|
547
|
-
degradations = 0
|
|
548
|
-
|
|
549
|
-
for file_path, history in self.quality_history.items():
|
|
550
|
-
if len(history) >= 2:
|
|
551
|
-
delta = history[-1]['score'] - history[0]['score']
|
|
552
|
-
if delta > 0.05:
|
|
553
|
-
improvements += 1
|
|
554
|
-
elif delta < -0.05:
|
|
555
|
-
degradations += 1
|
|
556
|
-
|
|
557
|
-
if improvements > degradations:
|
|
558
|
-
return f"📈 Improving ({improvements} files better)"
|
|
559
|
-
elif degradations > improvements:
|
|
560
|
-
return f"📉 Degrading ({degradations} files worse)"
|
|
561
|
-
else:
|
|
562
|
-
return "➡️ Stable"
|
|
563
|
-
|
|
564
|
-
def generate_report(self, analysis: Dict) -> str:
|
|
565
|
-
"""Generate human-readable report."""
|
|
566
|
-
if analysis['status'] != 'success':
|
|
567
|
-
return f"❌ {analysis['message']}"
|
|
568
|
-
|
|
569
|
-
summary = analysis['summary']
|
|
570
|
-
grade = summary['quality_grade']
|
|
571
|
-
emoji = '🟢' if grade in ['A+', 'A'] else '🟡' if grade in ['B', 'C'] else '🔴'
|
|
572
|
-
|
|
573
|
-
report = []
|
|
574
|
-
report.append("=" * 60)
|
|
575
|
-
report.append("SESSION CODE QUALITY REPORT")
|
|
576
|
-
report.append("=" * 60)
|
|
577
|
-
report.append(f"Session: {analysis['session_id'][:8]}...")
|
|
578
|
-
report.append(f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
579
|
-
report.append("")
|
|
580
|
-
report.append(f"{emoji} Overall Grade: {grade} ({summary['avg_quality_score']:.1%})")
|
|
581
|
-
report.append(f"Files Analyzed: {summary['files_analyzed']}")
|
|
582
|
-
report.append(f"Total Issues: {summary['total_issues']}")
|
|
583
|
-
report.append(f"Good Patterns: {summary['total_good_patterns']}")
|
|
584
|
-
report.append(f"Trend: {analysis['quality_trend']}")
|
|
585
|
-
report.append("")
|
|
586
|
-
|
|
587
|
-
if analysis['actionable_items']:
|
|
588
|
-
report.append("ACTIONS NEEDED:")
|
|
589
|
-
for action in analysis['actionable_items']:
|
|
590
|
-
report.append(f" {action}")
|
|
591
|
-
report.append("")
|
|
592
|
-
|
|
593
|
-
report.append("FILE DETAILS:")
|
|
594
|
-
for file_path, file_report in analysis['file_reports'].items():
|
|
595
|
-
file_name = Path(file_path).name
|
|
596
|
-
score = file_report['quality_score']
|
|
597
|
-
emoji = '✅' if score > 0.7 else '⚠️' if score > 0.5 else '❌'
|
|
598
|
-
report.append(f" {emoji} {file_name}: {score:.1%} ({file_report['issues']} issues)")
|
|
599
|
-
|
|
600
|
-
report.append("")
|
|
601
|
-
report.append("💡 Ask Claude: 'Please fix the code quality issues in this session'")
|
|
602
|
-
report.append("=" * 60)
|
|
603
|
-
|
|
604
|
-
return '\n'.join(report)
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
def main(use_tracker=False):
|
|
608
|
-
"""Run session quality analysis."""
|
|
609
|
-
tracker = SessionQualityTracker()
|
|
610
|
-
|
|
611
|
-
logger.info("🔍 Analyzing current session code quality...")
|
|
612
|
-
logger.info("")
|
|
613
|
-
|
|
614
|
-
analysis = tracker.analyze_session_quality(use_tracker=use_tracker)
|
|
615
|
-
report = tracker.generate_report(analysis)
|
|
616
|
-
|
|
617
|
-
logger.info(report)
|
|
618
|
-
|
|
619
|
-
# Save report for watcher integration - PER PROJECT
|
|
620
|
-
# Always save cache, even with fallback analysis
|
|
621
|
-
if analysis.get('status') in ['success', 'fallback']:
|
|
622
|
-
# Get project name from environment or current directory
|
|
623
|
-
project_name = os.environ.get('QUALITY_PROJECT_NAME', os.path.basename(os.getcwd()))
|
|
624
|
-
# Secure sanitization with whitelist approach
|
|
625
|
-
import re
|
|
626
|
-
safe_project_name = re.sub(r'[^a-zA-Z0-9_-]', '_', project_name)[:100]
|
|
627
|
-
|
|
628
|
-
# Save to per-project cache directory
|
|
629
|
-
cache_dir = Path.home() / ".claude-self-reflect" / "quality_cache"
|
|
630
|
-
cache_dir.mkdir(exist_ok=True, parents=True)
|
|
631
|
-
report_path = cache_dir / f"{safe_project_name}.json"
|
|
632
|
-
|
|
633
|
-
# Validate the report path stays within cache directory
|
|
634
|
-
if not str(report_path.resolve()).startswith(str(cache_dir.resolve())):
|
|
635
|
-
logger.error(f"Security: Invalid cache path for {project_name}")
|
|
636
|
-
return
|
|
637
|
-
|
|
638
|
-
with open(report_path, 'w') as f:
|
|
639
|
-
json.dump(analysis, f, indent=2)
|
|
640
|
-
logger.info(f"\n📊 Full report saved to: {report_path}")
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
if __name__ == "__main__":
|
|
644
|
-
import argparse
|
|
645
|
-
parser = argparse.ArgumentParser(description='Analyze code quality for projects')
|
|
646
|
-
parser.add_argument('--project-path', help='Path to the project to analyze')
|
|
647
|
-
parser.add_argument('--project-name', help='Name of the project for cache file')
|
|
648
|
-
parser.add_argument('--use-tracker', action='store_true',
|
|
649
|
-
help='Use session edit tracker for analysis')
|
|
650
|
-
parser.add_argument('--update-cache-only', action='store_true',
|
|
651
|
-
help='Only update cache without printing report')
|
|
652
|
-
args = parser.parse_args()
|
|
653
|
-
|
|
654
|
-
# If external project specified, change to that directory
|
|
655
|
-
if args.project_path:
|
|
656
|
-
os.chdir(args.project_path)
|
|
657
|
-
|
|
658
|
-
# Override project name if specified
|
|
659
|
-
if args.project_name:
|
|
660
|
-
# This will be used in the main() function for cache naming
|
|
661
|
-
os.environ['QUALITY_PROJECT_NAME'] = args.project_name
|
|
662
|
-
|
|
663
|
-
# For cache-only mode, suppress output
|
|
664
|
-
if args.update_cache_only:
|
|
665
|
-
# Redirect logger to null
|
|
666
|
-
import os
|
|
667
|
-
import sys
|
|
668
|
-
sys.stdout = open(os.devnull, 'w')
|
|
669
|
-
sys.stderr = open(os.devnull, 'w')
|
|
670
|
-
|
|
671
|
-
main(use_tracker=args.use_tracker)
|