claude-self-reflect 3.2.4 ā 3.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/claude-self-reflect-test.md +992 -510
- package/.claude/agents/reflection-specialist.md +59 -3
- package/README.md +14 -5
- package/installer/cli.js +16 -0
- package/installer/postinstall.js +14 -0
- package/installer/statusline-setup.js +289 -0
- package/mcp-server/run-mcp.sh +73 -5
- package/mcp-server/src/app_context.py +64 -0
- package/mcp-server/src/config.py +57 -0
- package/mcp-server/src/connection_pool.py +286 -0
- package/mcp-server/src/decay_manager.py +106 -0
- package/mcp-server/src/embedding_manager.py +64 -40
- package/mcp-server/src/embeddings_old.py +141 -0
- package/mcp-server/src/models.py +64 -0
- package/mcp-server/src/parallel_search.py +305 -0
- package/mcp-server/src/project_resolver.py +5 -0
- package/mcp-server/src/reflection_tools.py +211 -0
- package/mcp-server/src/rich_formatting.py +196 -0
- package/mcp-server/src/search_tools.py +874 -0
- package/mcp-server/src/server.py +127 -1720
- package/mcp-server/src/temporal_design.py +132 -0
- package/mcp-server/src/temporal_tools.py +604 -0
- package/mcp-server/src/temporal_utils.py +384 -0
- package/mcp-server/src/utils.py +150 -67
- package/package.json +15 -1
- package/scripts/add-timestamp-indexes.py +134 -0
- package/scripts/ast_grep_final_analyzer.py +325 -0
- package/scripts/ast_grep_unified_registry.py +556 -0
- package/scripts/check-collections.py +29 -0
- package/scripts/csr-status +366 -0
- package/scripts/debug-august-parsing.py +76 -0
- package/scripts/debug-import-single.py +91 -0
- package/scripts/debug-project-resolver.py +82 -0
- package/scripts/debug-temporal-tools.py +135 -0
- package/scripts/delta-metadata-update.py +547 -0
- package/scripts/import-conversations-unified.py +157 -25
- package/scripts/precompact-hook.sh +33 -0
- package/scripts/session_quality_tracker.py +481 -0
- package/scripts/streaming-watcher.py +1578 -0
- package/scripts/update_patterns.py +334 -0
- package/scripts/utils.py +39 -0
|
@@ -0,0 +1,481 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Session Quality Tracker
|
|
4
|
+
Analyzes code quality for files edited in the current Claude session.
|
|
5
|
+
MANDATORY AST-GREP analysis for real-time quality feedback.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from datetime import datetime, timedelta
|
|
12
|
+
from typing import Dict, List, Any, Optional, Set
|
|
13
|
+
import logging
|
|
14
|
+
import sys
|
|
15
|
+
|
|
16
|
+
# Setup logger
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
logging.basicConfig(level=logging.INFO, format='%(message)s')
|
|
19
|
+
|
|
20
|
+
# Add scripts directory to path
|
|
21
|
+
sys.path.append(str(Path(__file__).parent))
|
|
22
|
+
|
|
23
|
+
from ast_grep_final_analyzer import FinalASTGrepAnalyzer
|
|
24
|
+
from update_patterns import check_and_update_patterns
|
|
25
|
+
|
|
26
|
+
logging.basicConfig(level=logging.INFO)
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
class SessionQualityTracker:
|
|
30
|
+
"""
|
|
31
|
+
Tracks code quality for the current Claude session.
|
|
32
|
+
Provides real-time feedback on code quality issues.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self):
|
|
36
|
+
self.analyzer = FinalASTGrepAnalyzer()
|
|
37
|
+
self.session_files = set()
|
|
38
|
+
self.quality_history = {}
|
|
39
|
+
self.current_session_id = None
|
|
40
|
+
|
|
41
|
+
def find_active_session(self, project_path: Optional[str] = None) -> Optional[Path]:
|
|
42
|
+
"""Find the most recently modified JSONL file (active session)."""
|
|
43
|
+
claude_projects = Path.home() / ".claude" / "projects"
|
|
44
|
+
|
|
45
|
+
if project_path:
|
|
46
|
+
# Look for specific project
|
|
47
|
+
project_dir = claude_projects / project_path.replace('/', '-')
|
|
48
|
+
else:
|
|
49
|
+
# Look for claude-self-reflect project
|
|
50
|
+
project_dir = claude_projects / "-Users-ramakrishnanannaswamy-projects-claude-self-reflect"
|
|
51
|
+
|
|
52
|
+
if not project_dir.exists():
|
|
53
|
+
logger.warning(f"Project directory not found: {project_dir}")
|
|
54
|
+
return None
|
|
55
|
+
|
|
56
|
+
# Find most recent JSONL file (modified in last 2 hours)
|
|
57
|
+
recent_files = []
|
|
58
|
+
cutoff_time = datetime.now() - timedelta(hours=2)
|
|
59
|
+
|
|
60
|
+
for jsonl_file in project_dir.glob("*.jsonl"):
|
|
61
|
+
if jsonl_file.stat().st_mtime > cutoff_time.timestamp():
|
|
62
|
+
recent_files.append((jsonl_file, jsonl_file.stat().st_mtime))
|
|
63
|
+
|
|
64
|
+
if not recent_files:
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
# Return most recent file
|
|
68
|
+
recent_files.sort(key=lambda x: x[1], reverse=True)
|
|
69
|
+
return recent_files[0][0]
|
|
70
|
+
|
|
71
|
+
def extract_edited_files(self, session_file: Path) -> Set[str]:
|
|
72
|
+
"""Extract files edited in the current session."""
|
|
73
|
+
edited_files = set()
|
|
74
|
+
|
|
75
|
+
try:
|
|
76
|
+
with open(session_file, 'r') as f:
|
|
77
|
+
for line in f:
|
|
78
|
+
try:
|
|
79
|
+
entry = json.loads(line.strip())
|
|
80
|
+
|
|
81
|
+
# Look for messages with tool usage
|
|
82
|
+
if 'messages' in entry:
|
|
83
|
+
for msg in entry['messages']:
|
|
84
|
+
if msg.get('role') == 'assistant' and 'content' in msg:
|
|
85
|
+
content = msg['content']
|
|
86
|
+
|
|
87
|
+
# Handle different content formats
|
|
88
|
+
if isinstance(content, list):
|
|
89
|
+
for item in content:
|
|
90
|
+
if isinstance(item, dict) and item.get('type') == 'tool_use':
|
|
91
|
+
tool_name = item.get('name', '')
|
|
92
|
+
if tool_name in ['Edit', 'Write', 'MultiEdit', 'NotebookEdit']:
|
|
93
|
+
input_data = item.get('input', {})
|
|
94
|
+
if 'file_path' in input_data:
|
|
95
|
+
file_path = input_data['file_path']
|
|
96
|
+
if file_path and os.path.exists(file_path):
|
|
97
|
+
edited_files.add(file_path)
|
|
98
|
+
|
|
99
|
+
except json.JSONDecodeError:
|
|
100
|
+
continue
|
|
101
|
+
except Exception as e:
|
|
102
|
+
logger.debug(f"Error processing line: {e}")
|
|
103
|
+
|
|
104
|
+
except Exception as e:
|
|
105
|
+
logger.error(f"Error reading session file: {e}")
|
|
106
|
+
|
|
107
|
+
return edited_files
|
|
108
|
+
|
|
109
|
+
def analyze_session_quality(self, session_file: Optional[Path] = None) -> Dict[str, Any]:
|
|
110
|
+
"""
|
|
111
|
+
Analyze code quality for all files edited in current session.
|
|
112
|
+
Returns quality report with actionable insights.
|
|
113
|
+
"""
|
|
114
|
+
# Update patterns (uses cache, <100ms)
|
|
115
|
+
check_and_update_patterns()
|
|
116
|
+
|
|
117
|
+
# Find active session if not provided
|
|
118
|
+
if not session_file:
|
|
119
|
+
session_file = self.find_active_session()
|
|
120
|
+
if not session_file:
|
|
121
|
+
# Fallback: analyze recently modified files in project
|
|
122
|
+
return self.analyze_recent_files()
|
|
123
|
+
|
|
124
|
+
self.current_session_id = session_file.stem
|
|
125
|
+
|
|
126
|
+
# Extract edited files
|
|
127
|
+
edited_files = self.extract_edited_files(session_file)
|
|
128
|
+
|
|
129
|
+
if not edited_files:
|
|
130
|
+
# Fallback to analyzing recent files when no edits in session
|
|
131
|
+
logger.info("No files edited in current session, analyzing recent project files...")
|
|
132
|
+
return self.analyze_recent_files()
|
|
133
|
+
|
|
134
|
+
# Analyze each edited file
|
|
135
|
+
file_reports = {}
|
|
136
|
+
total_issues = 0
|
|
137
|
+
total_good_patterns = 0
|
|
138
|
+
quality_scores = []
|
|
139
|
+
|
|
140
|
+
for file_path in edited_files:
|
|
141
|
+
# Only analyze code files
|
|
142
|
+
if any(file_path.endswith(ext) for ext in ['.py', '.ts', '.js', '.tsx', '.jsx']):
|
|
143
|
+
try:
|
|
144
|
+
result = self.analyzer.analyze_file(file_path)
|
|
145
|
+
metrics = result['quality_metrics']
|
|
146
|
+
|
|
147
|
+
file_reports[file_path] = {
|
|
148
|
+
'quality_score': metrics['quality_score'],
|
|
149
|
+
'good_patterns': metrics['good_patterns_found'],
|
|
150
|
+
'issues': metrics['total_issues'],
|
|
151
|
+
'recommendations': result.get('recommendations', [])[:3], # Top 3
|
|
152
|
+
'top_issues': self._get_top_issues(result)
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
total_issues += metrics['total_issues']
|
|
156
|
+
total_good_patterns += metrics['good_patterns_found']
|
|
157
|
+
quality_scores.append(metrics['quality_score'])
|
|
158
|
+
|
|
159
|
+
# Track quality history
|
|
160
|
+
if file_path not in self.quality_history:
|
|
161
|
+
self.quality_history[file_path] = []
|
|
162
|
+
self.quality_history[file_path].append({
|
|
163
|
+
'timestamp': datetime.now().isoformat(),
|
|
164
|
+
'score': metrics['quality_score']
|
|
165
|
+
})
|
|
166
|
+
|
|
167
|
+
except Exception as e:
|
|
168
|
+
logger.error(f"Failed to analyze {file_path}: {e}")
|
|
169
|
+
|
|
170
|
+
if not file_reports:
|
|
171
|
+
return {
|
|
172
|
+
'status': 'no_code_files',
|
|
173
|
+
'session_id': self.current_session_id,
|
|
174
|
+
'message': 'No analyzable code files in session'
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
# Calculate session average
|
|
178
|
+
avg_quality = sum(quality_scores) / len(quality_scores) if quality_scores else 0
|
|
179
|
+
|
|
180
|
+
# Generate session report
|
|
181
|
+
return {
|
|
182
|
+
'status': 'success',
|
|
183
|
+
'session_id': self.current_session_id,
|
|
184
|
+
'scope_label': 'Sess', # Session scope - files edited in current session
|
|
185
|
+
'timestamp': datetime.now().isoformat(),
|
|
186
|
+
'summary': {
|
|
187
|
+
'files_analyzed': len(file_reports),
|
|
188
|
+
'avg_quality_score': round(avg_quality, 3),
|
|
189
|
+
'total_issues': total_issues,
|
|
190
|
+
'total_good_patterns': total_good_patterns,
|
|
191
|
+
'quality_grade': self._get_quality_grade(avg_quality)
|
|
192
|
+
},
|
|
193
|
+
'file_reports': file_reports,
|
|
194
|
+
'actionable_items': self._generate_actionable_items(file_reports),
|
|
195
|
+
'quality_trend': self._calculate_quality_trend()
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
def _get_top_issues(self, analysis_result: Dict) -> List[Dict]:
|
|
199
|
+
"""Extract top issues from analysis result."""
|
|
200
|
+
top_issues = []
|
|
201
|
+
|
|
202
|
+
for match in analysis_result.get('all_matches', []):
|
|
203
|
+
if match['quality'] == 'bad' and match['count'] > 0:
|
|
204
|
+
top_issues.append({
|
|
205
|
+
'id': match['id'],
|
|
206
|
+
'description': match['description'],
|
|
207
|
+
'count': match['count'],
|
|
208
|
+
'severity': 'high' if match['weight'] <= -3 else 'medium'
|
|
209
|
+
})
|
|
210
|
+
|
|
211
|
+
# Sort by count * weight (impact)
|
|
212
|
+
top_issues.sort(key=lambda x: x['count'], reverse=True)
|
|
213
|
+
return top_issues[:5] # Top 5 issues
|
|
214
|
+
|
|
215
|
+
def _get_quality_grade(self, score: float) -> str:
|
|
216
|
+
"""Convert quality score to letter grade."""
|
|
217
|
+
if score >= 0.9:
|
|
218
|
+
return 'A+'
|
|
219
|
+
elif score >= 0.8:
|
|
220
|
+
return 'A'
|
|
221
|
+
elif score >= 0.7:
|
|
222
|
+
return 'B'
|
|
223
|
+
elif score >= 0.6:
|
|
224
|
+
return 'C'
|
|
225
|
+
elif score >= 0.5:
|
|
226
|
+
return 'D'
|
|
227
|
+
else:
|
|
228
|
+
return 'F'
|
|
229
|
+
|
|
230
|
+
def _generate_actionable_items(self, file_reports: Dict) -> List[str]:
|
|
231
|
+
"""Generate actionable recommendations for the user."""
|
|
232
|
+
actions = []
|
|
233
|
+
|
|
234
|
+
# Collect all issues
|
|
235
|
+
all_issues = {}
|
|
236
|
+
for file_path, report in file_reports.items():
|
|
237
|
+
for issue in report.get('top_issues', []):
|
|
238
|
+
issue_id = issue['id']
|
|
239
|
+
if issue_id not in all_issues:
|
|
240
|
+
all_issues[issue_id] = {
|
|
241
|
+
'description': issue['description'],
|
|
242
|
+
'total_count': 0,
|
|
243
|
+
'files': []
|
|
244
|
+
}
|
|
245
|
+
all_issues[issue_id]['total_count'] += issue['count']
|
|
246
|
+
all_issues[issue_id]['files'].append(Path(file_path).name)
|
|
247
|
+
|
|
248
|
+
# Sort by total impact
|
|
249
|
+
sorted_issues = sorted(all_issues.items(), key=lambda x: x[1]['total_count'], reverse=True)
|
|
250
|
+
|
|
251
|
+
# Generate actions
|
|
252
|
+
for issue_id, issue_data in sorted_issues[:3]: # Top 3 issues
|
|
253
|
+
if issue_id == 'print-call':
|
|
254
|
+
actions.append(f"š§ Replace {issue_data['total_count']} print statements with logger in {', '.join(issue_data['files'][:2])}")
|
|
255
|
+
elif issue_id == 'sync-open':
|
|
256
|
+
actions.append(f"ā” Use async file operations ({issue_data['total_count']} sync opens found)")
|
|
257
|
+
elif issue_id == 'broad-except':
|
|
258
|
+
actions.append(f"šÆ Fix {issue_data['total_count']} bare except clauses for better error handling")
|
|
259
|
+
elif issue_id == 'no-console-log':
|
|
260
|
+
actions.append(f"š« Remove {issue_data['total_count']} console.log statements")
|
|
261
|
+
else:
|
|
262
|
+
actions.append(f"š Fix {issue_data['total_count']} instances of {issue_data['description']}")
|
|
263
|
+
|
|
264
|
+
return actions
|
|
265
|
+
|
|
266
|
+
def analyze_recent_files(self) -> Dict[str, Any]:
|
|
267
|
+
"""Analyze core project files when no session is found."""
|
|
268
|
+
project_root = Path(__file__).parent.parent
|
|
269
|
+
|
|
270
|
+
# Define core project files to analyze (not test files)
|
|
271
|
+
core_files = [
|
|
272
|
+
"scripts/session_quality_tracker.py",
|
|
273
|
+
"scripts/cc-statusline-unified.py",
|
|
274
|
+
"scripts/pattern_registry_enhanced.py",
|
|
275
|
+
"scripts/simplified_metadata_extractor.py",
|
|
276
|
+
"scripts/streaming-watcher.py",
|
|
277
|
+
"scripts/quality-report.py",
|
|
278
|
+
"mcp-server/src/server.py",
|
|
279
|
+
"mcp-server/src/search_tools.py",
|
|
280
|
+
"mcp-server/src/temporal_tools.py",
|
|
281
|
+
"mcp-server/src/reflection_tools.py",
|
|
282
|
+
]
|
|
283
|
+
|
|
284
|
+
edited_files = set()
|
|
285
|
+
for file_path in core_files:
|
|
286
|
+
full_path = project_root / file_path
|
|
287
|
+
if full_path.exists():
|
|
288
|
+
edited_files.add(str(full_path))
|
|
289
|
+
|
|
290
|
+
# Also check for recently modified files (last 30 minutes) to catch actual work
|
|
291
|
+
try:
|
|
292
|
+
# Validate project_root is within expected bounds
|
|
293
|
+
if not str(project_root.resolve()).startswith(str(Path(__file__).parent.parent.resolve())):
|
|
294
|
+
logger.error("Security: Invalid project root path")
|
|
295
|
+
return {}
|
|
296
|
+
|
|
297
|
+
# Use pathlib instead of subprocess for safer file discovery
|
|
298
|
+
scripts_dir = project_root / "scripts"
|
|
299
|
+
if scripts_dir.exists():
|
|
300
|
+
from datetime import datetime, timedelta
|
|
301
|
+
cutoff_time = datetime.now() - timedelta(minutes=30)
|
|
302
|
+
for py_file in scripts_dir.glob("*.py"):
|
|
303
|
+
if py_file.stat().st_mtime > cutoff_time.timestamp():
|
|
304
|
+
# Skip test files and temporary files
|
|
305
|
+
if "test_" not in py_file.name and "verify_" not in py_file.name:
|
|
306
|
+
edited_files.add(str(py_file))
|
|
307
|
+
except Exception as e:
|
|
308
|
+
logger.debug(f"Error checking recent files: {e}")
|
|
309
|
+
|
|
310
|
+
if not edited_files:
|
|
311
|
+
return {
|
|
312
|
+
'status': 'no_edits',
|
|
313
|
+
'session_id': 'recent_files',
|
|
314
|
+
'message': 'No recently modified code files'
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
# Analyze the files
|
|
318
|
+
self.current_session_id = "recent_files"
|
|
319
|
+
file_reports = {}
|
|
320
|
+
total_issues = 0
|
|
321
|
+
total_good_patterns = 0
|
|
322
|
+
quality_scores = []
|
|
323
|
+
|
|
324
|
+
for file_path in list(edited_files)[:10]: # Limit to 10 files for performance
|
|
325
|
+
try:
|
|
326
|
+
result = self.analyzer.analyze_file(file_path)
|
|
327
|
+
metrics = result['quality_metrics']
|
|
328
|
+
|
|
329
|
+
file_reports[file_path] = {
|
|
330
|
+
'quality_score': metrics['quality_score'],
|
|
331
|
+
'good_patterns': metrics['good_patterns_found'],
|
|
332
|
+
'issues': metrics['total_issues'],
|
|
333
|
+
'recommendations': result.get('recommendations', [])[:3],
|
|
334
|
+
'top_issues': self._get_top_issues(result)
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
total_issues += metrics['total_issues']
|
|
338
|
+
total_good_patterns += metrics['good_patterns_found']
|
|
339
|
+
quality_scores.append(metrics['quality_score'])
|
|
340
|
+
|
|
341
|
+
except Exception as e:
|
|
342
|
+
logger.error(f"Failed to analyze {file_path}: {e}")
|
|
343
|
+
|
|
344
|
+
if not file_reports:
|
|
345
|
+
return {
|
|
346
|
+
'status': 'no_code_files',
|
|
347
|
+
'session_id': 'recent_files',
|
|
348
|
+
'message': 'No analyzable code files'
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
avg_quality = sum(quality_scores) / len(quality_scores) if quality_scores else 0
|
|
352
|
+
|
|
353
|
+
# Determine scope label based on what files we're analyzing
|
|
354
|
+
scope_label = 'Core' # Default to core project files
|
|
355
|
+
if any('session_quality_tracker' in str(f) for f in edited_files):
|
|
356
|
+
scope_label = 'Fix' # Files we just fixed
|
|
357
|
+
elif any(Path(f).stat().st_mtime > (datetime.now().timestamp() - 1800) for f in edited_files if Path(f).exists()):
|
|
358
|
+
scope_label = 'Recent' # Recently modified
|
|
359
|
+
|
|
360
|
+
return {
|
|
361
|
+
'status': 'success',
|
|
362
|
+
'session_id': 'recent_files',
|
|
363
|
+
'scope_label': scope_label,
|
|
364
|
+
'timestamp': datetime.now().isoformat(),
|
|
365
|
+
'summary': {
|
|
366
|
+
'files_analyzed': len(file_reports),
|
|
367
|
+
'avg_quality_score': round(avg_quality, 3),
|
|
368
|
+
'total_issues': total_issues,
|
|
369
|
+
'total_good_patterns': total_good_patterns,
|
|
370
|
+
'quality_grade': self._get_quality_grade(avg_quality)
|
|
371
|
+
},
|
|
372
|
+
'file_reports': file_reports,
|
|
373
|
+
'actionable_items': self._generate_actionable_items(file_reports),
|
|
374
|
+
'quality_trend': self._calculate_quality_trend()
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
def _calculate_quality_trend(self) -> str:
|
|
378
|
+
"""Calculate quality trend across session."""
|
|
379
|
+
if not self.quality_history:
|
|
380
|
+
return "No trend data"
|
|
381
|
+
|
|
382
|
+
# Look at average change
|
|
383
|
+
improvements = 0
|
|
384
|
+
degradations = 0
|
|
385
|
+
|
|
386
|
+
for file_path, history in self.quality_history.items():
|
|
387
|
+
if len(history) >= 2:
|
|
388
|
+
delta = history[-1]['score'] - history[0]['score']
|
|
389
|
+
if delta > 0.05:
|
|
390
|
+
improvements += 1
|
|
391
|
+
elif delta < -0.05:
|
|
392
|
+
degradations += 1
|
|
393
|
+
|
|
394
|
+
if improvements > degradations:
|
|
395
|
+
return f"š Improving ({improvements} files better)"
|
|
396
|
+
elif degradations > improvements:
|
|
397
|
+
return f"š Degrading ({degradations} files worse)"
|
|
398
|
+
else:
|
|
399
|
+
return "ā”ļø Stable"
|
|
400
|
+
|
|
401
|
+
def generate_report(self, analysis: Dict) -> str:
|
|
402
|
+
"""Generate human-readable report."""
|
|
403
|
+
if analysis['status'] != 'success':
|
|
404
|
+
return f"ā {analysis['message']}"
|
|
405
|
+
|
|
406
|
+
summary = analysis['summary']
|
|
407
|
+
grade = summary['quality_grade']
|
|
408
|
+
emoji = 'š¢' if grade in ['A+', 'A'] else 'š”' if grade in ['B', 'C'] else 'š“'
|
|
409
|
+
|
|
410
|
+
report = []
|
|
411
|
+
report.append("=" * 60)
|
|
412
|
+
report.append("SESSION CODE QUALITY REPORT")
|
|
413
|
+
report.append("=" * 60)
|
|
414
|
+
report.append(f"Session: {analysis['session_id'][:8]}...")
|
|
415
|
+
report.append(f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
416
|
+
report.append("")
|
|
417
|
+
report.append(f"{emoji} Overall Grade: {grade} ({summary['avg_quality_score']:.1%})")
|
|
418
|
+
report.append(f"Files Analyzed: {summary['files_analyzed']}")
|
|
419
|
+
report.append(f"Total Issues: {summary['total_issues']}")
|
|
420
|
+
report.append(f"Good Patterns: {summary['total_good_patterns']}")
|
|
421
|
+
report.append(f"Trend: {analysis['quality_trend']}")
|
|
422
|
+
report.append("")
|
|
423
|
+
|
|
424
|
+
if analysis['actionable_items']:
|
|
425
|
+
report.append("ACTIONS NEEDED:")
|
|
426
|
+
for action in analysis['actionable_items']:
|
|
427
|
+
report.append(f" {action}")
|
|
428
|
+
report.append("")
|
|
429
|
+
|
|
430
|
+
report.append("FILE DETAILS:")
|
|
431
|
+
for file_path, file_report in analysis['file_reports'].items():
|
|
432
|
+
file_name = Path(file_path).name
|
|
433
|
+
score = file_report['quality_score']
|
|
434
|
+
emoji = 'ā
' if score > 0.7 else 'ā ļø' if score > 0.5 else 'ā'
|
|
435
|
+
report.append(f" {emoji} {file_name}: {score:.1%} ({file_report['issues']} issues)")
|
|
436
|
+
|
|
437
|
+
report.append("")
|
|
438
|
+
report.append("š” Ask Claude: 'Please fix the code quality issues in this session'")
|
|
439
|
+
report.append("=" * 60)
|
|
440
|
+
|
|
441
|
+
return '\n'.join(report)
|
|
442
|
+
|
|
443
|
+
|
|
444
|
+
def main():
|
|
445
|
+
"""Run session quality analysis."""
|
|
446
|
+
tracker = SessionQualityTracker()
|
|
447
|
+
|
|
448
|
+
logger.info("š Analyzing current session code quality...")
|
|
449
|
+
logger.info("")
|
|
450
|
+
|
|
451
|
+
analysis = tracker.analyze_session_quality()
|
|
452
|
+
report = tracker.generate_report(analysis)
|
|
453
|
+
|
|
454
|
+
logger.info(report)
|
|
455
|
+
|
|
456
|
+
# Save report for watcher integration - PER PROJECT
|
|
457
|
+
# Always save cache, even with fallback analysis
|
|
458
|
+
if analysis.get('status') in ['success', 'fallback']:
|
|
459
|
+
# Get project name from current directory
|
|
460
|
+
project_name = os.path.basename(os.getcwd())
|
|
461
|
+
# Secure sanitization with whitelist approach
|
|
462
|
+
import re
|
|
463
|
+
safe_project_name = re.sub(r'[^a-zA-Z0-9_-]', '_', project_name)[:100]
|
|
464
|
+
|
|
465
|
+
# Save to per-project cache directory
|
|
466
|
+
cache_dir = Path.home() / ".claude-self-reflect" / "quality_cache"
|
|
467
|
+
cache_dir.mkdir(exist_ok=True, parents=True)
|
|
468
|
+
report_path = cache_dir / f"{safe_project_name}.json"
|
|
469
|
+
|
|
470
|
+
# Validate the report path stays within cache directory
|
|
471
|
+
if not str(report_path.resolve()).startswith(str(cache_dir.resolve())):
|
|
472
|
+
logger.error(f"Security: Invalid cache path for {project_name}")
|
|
473
|
+
return
|
|
474
|
+
|
|
475
|
+
with open(report_path, 'w') as f:
|
|
476
|
+
json.dump(analysis, f, indent=2)
|
|
477
|
+
logger.info(f"\nš Full report saved to: {report_path}")
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
if __name__ == "__main__":
|
|
481
|
+
main()
|