claude-self-reflect 3.3.0 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -135,13 +135,15 @@ def get_status() -> dict:
135
135
  # The actual structure has imported_files at the top level
136
136
  imported_files = data.get("imported_files", {})
137
137
 
138
- # Count all files in imported_files object (they are all fully imported)
138
+ # Count all files in imported_files object (only if they still exist on disk)
139
139
  for file_path in imported_files.keys():
140
140
  normalized_path = normalize_file_path(file_path)
141
141
  if normalized_path in file_to_project and normalized_path not in counted_files:
142
- project_name = file_to_project[normalized_path]
143
- project_stats[project_name]["indexed"] += 1
144
- counted_files.add(normalized_path)
142
+ # Verify file actually exists before counting it as indexed
143
+ if Path(normalized_path).exists():
144
+ project_name = file_to_project[normalized_path]
145
+ project_stats[project_name]["indexed"] += 1
146
+ counted_files.add(normalized_path)
145
147
 
146
148
  # Also check file_metadata for partially imported files
147
149
  file_metadata = data.get("file_metadata", {})
@@ -180,14 +182,17 @@ def get_status() -> dict:
180
182
  with open(watcher_state_file, 'r') as f:
181
183
  watcher_data = json.load(f)
182
184
 
183
- # Count files imported by the watcher
185
+ # Count files imported by the watcher (only if they still exist on disk)
184
186
  watcher_imports = watcher_data.get("imported_files", {})
185
187
  for file_path in watcher_imports.keys():
186
188
  normalized_path = normalize_file_path(file_path)
189
+ # CRITICAL: Only count if file exists on disk AND is in our project list
187
190
  if normalized_path in file_to_project and normalized_path not in counted_files:
188
- project_name = file_to_project[normalized_path]
189
- project_stats[project_name]["indexed"] += 1
190
- counted_files.add(normalized_path)
191
+ # Verify file actually exists before counting it as indexed
192
+ if Path(normalized_path).exists():
193
+ project_name = file_to_project[normalized_path]
194
+ project_stats[project_name]["indexed"] += 1
195
+ counted_files.add(normalized_path)
191
196
  except (json.JSONDecodeError, KeyError, OSError):
192
197
  # If watcher file is corrupted or unreadable, continue
193
198
  pass
@@ -82,10 +82,17 @@ class TemporalTools:
82
82
 
83
83
  # Filter collections by project
84
84
  if target_project != 'all':
85
+ # Use asyncio.to_thread to avoid blocking the event loop
86
+ import asyncio
85
87
  from qdrant_client import QdrantClient as SyncQdrantClient
86
- sync_client = SyncQdrantClient(url=self.qdrant_url)
87
- resolver = ProjectResolver(sync_client)
88
- project_collections = resolver.find_collections_for_project(target_project)
88
+
89
+ def get_project_collections():
90
+ sync_client = SyncQdrantClient(url=self.qdrant_url)
91
+ resolver = ProjectResolver(sync_client)
92
+ return resolver.find_collections_for_project(target_project)
93
+
94
+ # Run sync client in thread pool to avoid blocking
95
+ project_collections = await asyncio.to_thread(get_project_collections)
89
96
 
90
97
  if not project_collections:
91
98
  normalized_name = self.normalize_project_name(target_project)
@@ -0,0 +1,153 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test file with intentional quality issues for testing quality-fixer agent.
4
+ This file contains patterns that should be fixed:
5
+ - sync file operations that should be async
6
+ - global variables
7
+ - print statements
8
+ - long functions
9
+ """
10
+
11
+ import os
12
+ import json
13
+ import asyncio
14
+ import logging
15
+ import aiofiles
16
+ from typing import List, Dict, Any
17
+
18
+ # Set up logger instead of print statements
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # Configuration management class instead of global variables
22
+ class ConfigManager:
23
+ def __init__(self):
24
+ self.config = None
25
+ self.counter = 0
26
+
27
+ async def load_config(config_manager: ConfigManager) -> Dict[str, Any]:
28
+ """Load config using async file operations."""
29
+ # Async file operation using aiofiles
30
+ async with aiofiles.open("config.json", "r") as f:
31
+ content = await f.read()
32
+ config_manager.config = json.loads(content)
33
+
34
+ logger.info(f"Config loaded: {config_manager.config}")
35
+ return config_manager.config
36
+
37
+ async def save_data(data: Dict[str, Any], config_manager: ConfigManager) -> None:
38
+ """Save data using async operations."""
39
+ config_manager.counter += 1
40
+
41
+ # Async file operation using aiofiles
42
+ async with aiofiles.open("data.json", "w") as f:
43
+ await f.write(json.dumps(data))
44
+
45
+ logger.info(f"Data saved, counter: {config_manager.counter}")
46
+
47
+ def validate_items(items: List[str]) -> List[str]:
48
+ """Validate input items."""
49
+ valid_items = []
50
+ for item in items:
51
+ if not item:
52
+ logger.warning(f"Invalid item: {item}")
53
+ continue
54
+ valid_items.append(item)
55
+ return valid_items
56
+
57
+ def process_items(items: List[str]) -> List[str]:
58
+ """Process each item."""
59
+ return [item.upper() for item in items]
60
+
61
+ def filter_results(results: List[str]) -> List[str]:
62
+ """Filter results by length."""
63
+ return [result for result in results if len(result) > 3]
64
+
65
+ def create_summary(items: List[str], results: List[str], filtered: List[str]) -> Dict[str, int]:
66
+ """Create processing summary."""
67
+ return {
68
+ "total": len(items),
69
+ "processed": len(results),
70
+ "filtered": len(filtered)
71
+ }
72
+
73
+ async def save_results(filtered: List[str]) -> None:
74
+ """Save results to file asynchronously."""
75
+ async with aiofiles.open("results.txt", "w") as f:
76
+ for item in filtered:
77
+ await f.write(f"{item}\n")
78
+
79
+ async def process_items_improved(items: List[str], config_manager: ConfigManager) -> Dict[str, Any]:
80
+ """Improved function broken down into smaller functions."""
81
+ # Step 1: Validate items
82
+ valid_items = validate_items(items)
83
+
84
+ # Step 2: Process each item
85
+ results = process_items(valid_items)
86
+
87
+ # Step 3: Filter results
88
+ filtered = filter_results(results)
89
+
90
+ # Step 4: Sort results
91
+ filtered.sort()
92
+
93
+ # Step 5: Create summary
94
+ summary = create_summary(items, results, filtered)
95
+
96
+ # Step 6: Log summary
97
+ logger.info(f"Processing complete: {summary}")
98
+
99
+ # Step 7: Save results asynchronously
100
+ await save_results(filtered)
101
+
102
+ # Step 8: Update counter
103
+ config_manager.counter += len(filtered)
104
+
105
+ # Step 9: Create report
106
+ report = {
107
+ "summary": summary,
108
+ "results": filtered,
109
+ "counter": config_manager.counter
110
+ }
111
+
112
+ return report
113
+
114
+ async def debug_function() -> None:
115
+ """Function with debug statements."""
116
+ logger.debug("Debug: Starting function")
117
+
118
+ # Reading file asynchronously
119
+ if os.path.exists("debug.log"):
120
+ async with aiofiles.open("debug.log", "r") as f:
121
+ log_data = await f.read()
122
+ logger.debug(f"Log data: {log_data}")
123
+
124
+ logger.debug("Debug: Function complete")
125
+
126
+ # Using var instead of let/const (for JS patterns if analyzed)
127
+ var_example = "This would be flagged in JS"
128
+
129
+ async def main() -> None:
130
+ """Main execution function."""
131
+ # Set up logging
132
+ logging.basicConfig(level=logging.INFO)
133
+
134
+ # Initialize config manager
135
+ config_manager = ConfigManager()
136
+
137
+ logger.info("Starting application...")
138
+
139
+ try:
140
+ # Note: These operations would fail without actual files, but structure is correct
141
+ await load_config(config_manager)
142
+ await process_items_improved(["test", "data", "example"], config_manager)
143
+ await debug_function()
144
+ except FileNotFoundError:
145
+ logger.warning("Required files not found - this is expected in test context")
146
+ except Exception as e:
147
+ logger.error(f"Application error: {e}")
148
+
149
+ logger.info("Application complete!")
150
+
151
+ if __name__ == "__main__":
152
+ # Run async main function
153
+ asyncio.run(main())
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-self-reflect",
3
- "version": "3.3.0",
3
+ "version": "4.0.0",
4
4
  "description": "Give Claude perfect memory of all your conversations - Installation wizard for Python MCP server",
5
5
  "keywords": [
6
6
  "claude",
@@ -35,6 +35,11 @@
35
35
  },
36
36
  "files": [
37
37
  "installer/*.js",
38
+ "scripts/csr-status",
39
+ "scripts/session_quality_tracker.py",
40
+ "scripts/ast_grep_final_analyzer.py",
41
+ "scripts/ast_grep_unified_registry.py",
42
+ "scripts/update_patterns.py",
38
43
  "mcp-server/src/**/*.py",
39
44
  "mcp-server/pyproject.toml",
40
45
  "mcp-server/run-mcp.sh",
@@ -0,0 +1,328 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ FINAL AST-GREP Analyzer with Unified Registry
4
+ MANDATORY: Uses ast-grep-py + unified pattern registry
5
+ NO regex fallbacks, NO simplifications
6
+ """
7
+
8
+ import ast_grep_py as sg
9
+ from pathlib import Path
10
+ from typing import Dict, List, Any, Optional
11
+ from datetime import datetime
12
+ import json
13
+ import sys
14
+
15
+ # Import the unified registry
16
+ sys.path.append(str(Path(__file__).parent))
17
+ from ast_grep_unified_registry import get_unified_registry
18
+
19
+ class FinalASTGrepAnalyzer:
20
+ """
21
+ Final production-ready AST-GREP analyzer.
22
+ MANDATORY components:
23
+ - ast-grep-py for AST matching
24
+ - Unified pattern registry (custom + catalog)
25
+ - NO regex patterns
26
+ - NO fallbacks
27
+ """
28
+
29
+ def __init__(self):
30
+ """Initialize with unified registry."""
31
+ self.registry = get_unified_registry()
32
+ all_patterns = self.registry.get_all_patterns()
33
+
34
+ print(f"āœ… Loaded unified registry with {len(all_patterns)} patterns")
35
+ print(f" Languages: Python, TypeScript, JavaScript")
36
+ print(f" Good patterns: {len(self.registry.get_good_patterns())}")
37
+ print(f" Bad patterns: {len(self.registry.get_bad_patterns())}")
38
+
39
+ def analyze_file(self, file_path: str) -> Dict[str, Any]:
40
+ """
41
+ Analyze a file using unified AST-GREP patterns.
42
+ Returns detailed quality metrics and pattern matches.
43
+ """
44
+ if not Path(file_path).exists():
45
+ raise FileNotFoundError(f"File not found: {file_path}")
46
+
47
+ # Detect language from file extension
48
+ language = self._detect_language(file_path)
49
+
50
+ with open(file_path, 'r', encoding='utf-8') as f:
51
+ content = f.read()
52
+
53
+ # Count lines of code for normalization
54
+ lines_of_code = len(content.splitlines())
55
+
56
+ # Create SgRoot for the detected language
57
+ sg_language = self._get_sg_language(language)
58
+ root = sg.SgRoot(content, sg_language)
59
+ node = root.root()
60
+
61
+ # Get patterns for this language
62
+ language_patterns = self.registry.get_patterns_by_language(language)
63
+
64
+ # Track all matches
65
+ all_matches = []
66
+ pattern_errors = []
67
+ matches_by_category = {}
68
+
69
+ # Process each pattern
70
+ for pattern_def in language_patterns:
71
+ try:
72
+ pattern_str = pattern_def.get("pattern", "")
73
+ if not pattern_str:
74
+ continue
75
+
76
+ # Find matches using ast-grep-py
77
+ matches = node.find_all(pattern=pattern_str)
78
+
79
+ if matches:
80
+ category = pattern_def.get('category', 'unknown')
81
+ if category not in matches_by_category:
82
+ matches_by_category[category] = []
83
+
84
+ match_info = {
85
+ 'category': category,
86
+ 'id': pattern_def['id'],
87
+ 'description': pattern_def.get('description', ''),
88
+ 'quality': pattern_def.get('quality', 'neutral'),
89
+ 'weight': pattern_def.get('weight', 0),
90
+ 'count': len(matches),
91
+ 'locations': [
92
+ {
93
+ 'line': m.range().start.line + 1,
94
+ 'column': m.range().start.column,
95
+ 'text': m.text()[:80]
96
+ } for m in matches[:5] # First 5 examples
97
+ ]
98
+ }
99
+
100
+ matches_by_category[category].append(match_info)
101
+ all_matches.append(match_info)
102
+
103
+ except Exception as e:
104
+ # Record all pattern errors for debugging
105
+ pattern_errors.append({
106
+ 'pattern_id': pattern_def.get('id', '<unknown>'),
107
+ 'category': pattern_def.get('category', 'unknown'),
108
+ 'error': str(e)[:200]
109
+ })
110
+
111
+ # Calculate quality score with LOC normalization
112
+ quality_score = self.registry.calculate_quality_score(all_matches, loc=lines_of_code)
113
+
114
+ # Count good vs bad patterns
115
+ good_matches = [m for m in all_matches if m['quality'] == 'good']
116
+ bad_matches = [m for m in all_matches if m['quality'] == 'bad']
117
+
118
+ good_count = sum(m['count'] for m in good_matches)
119
+ bad_count = sum(m['count'] for m in bad_matches)
120
+
121
+ return {
122
+ 'file': file_path,
123
+ 'timestamp': datetime.now().isoformat(),
124
+ 'language': language,
125
+ 'engine': 'ast-grep-py + unified registry',
126
+ 'registry_info': {
127
+ 'total_patterns_available': len(language_patterns),
128
+ 'patterns_matched': len(all_matches),
129
+ 'patterns_errored': len(pattern_errors),
130
+ 'categories_found': list(matches_by_category.keys())
131
+ },
132
+ 'matches_by_category': matches_by_category,
133
+ 'all_matches': all_matches,
134
+ 'errors': pattern_errors[:5], # First 5 errors only
135
+ 'quality_metrics': {
136
+ 'quality_score': round(quality_score, 3),
137
+ 'good_patterns_found': good_count,
138
+ 'bad_patterns_found': bad_count,
139
+ 'unique_patterns_matched': len(all_matches),
140
+ 'total_issues': bad_count,
141
+ 'total_good_practices': good_count
142
+ },
143
+ 'recommendations': self._generate_recommendations(matches_by_category, quality_score)
144
+ }
145
+
146
+ def _detect_language(self, file_path: str) -> str:
147
+ """Detect language from file extension."""
148
+ ext = Path(file_path).suffix.lower()
149
+ lang_map = {
150
+ '.py': 'python',
151
+ '.ts': 'typescript',
152
+ '.tsx': 'tsx',
153
+ '.js': 'javascript',
154
+ '.jsx': 'jsx'
155
+ }
156
+ return lang_map.get(ext, 'python')
157
+
158
+ def _get_sg_language(self, language: str) -> str:
159
+ """Get ast-grep language identifier."""
160
+ # ast-grep-py uses different language identifiers
161
+ sg_map = {
162
+ 'python': 'python',
163
+ 'typescript': 'typescript',
164
+ 'tsx': 'tsx',
165
+ 'javascript': 'javascript',
166
+ 'jsx': 'jsx'
167
+ }
168
+ return sg_map.get(language, 'python')
169
+
170
+ def _generate_recommendations(self, matches: Dict, score: float) -> List[str]:
171
+ """Generate actionable recommendations based on matches."""
172
+ recommendations = []
173
+
174
+ if score < 0.3:
175
+ recommendations.append("šŸ”“ Critical: Code quality needs immediate attention")
176
+ elif score < 0.6:
177
+ recommendations.append("🟔 Warning: Several anti-patterns detected")
178
+ else:
179
+ recommendations.append("🟢 Good: Code follows most best practices")
180
+
181
+ # Check for specific issues
182
+ for category, category_matches in matches.items():
183
+ if 'antipatterns' in category:
184
+ total = sum(m['count'] for m in category_matches)
185
+ if total > 0:
186
+ recommendations.append(f"Fix {total} anti-patterns in {category}")
187
+
188
+ if 'logging' in category:
189
+ prints = sum(m['count'] for m in category_matches if 'print' in m['id'])
190
+ if prints > 0:
191
+ recommendations.append(f"Replace {prints} print statements with logger")
192
+
193
+ if 'error' in category:
194
+ bare = sum(m['count'] for m in category_matches if 'broad' in m['id'] or 'bare' in m['id'])
195
+ if bare > 0:
196
+ recommendations.append(f"Fix {bare} bare except clauses")
197
+
198
+ return recommendations
199
+
200
+ def generate_report(self, result: Dict[str, Any]) -> str:
201
+ """Generate a comprehensive analysis report."""
202
+ report = []
203
+ report.append("# AST-GREP Pattern Analysis Report")
204
+ report.append(f"\n**File**: {result['file']}")
205
+ report.append(f"**Language**: {result['language']}")
206
+ report.append(f"**Timestamp**: {result['timestamp']}")
207
+ report.append(f"**Engine**: {result['engine']}")
208
+
209
+ # Quality overview
210
+ metrics = result['quality_metrics']
211
+ score = metrics['quality_score']
212
+ emoji = "🟢" if score > 0.7 else "🟔" if score > 0.4 else "šŸ”“"
213
+
214
+ report.append("\n## Quality Overview")
215
+ report.append(f"- **Quality Score**: {emoji} {score:.1%}")
216
+ report.append(f"- **Good Practices**: {metrics['good_patterns_found']}")
217
+ report.append(f"- **Issues Found**: {metrics['total_issues']}")
218
+ report.append(f"- **Unique Patterns Matched**: {metrics['unique_patterns_matched']}")
219
+
220
+ # Recommendations
221
+ if result['recommendations']:
222
+ report.append("\n## Recommendations")
223
+ for rec in result['recommendations']:
224
+ report.append(f"- {rec}")
225
+
226
+ # Pattern matches by category
227
+ report.append("\n## Pattern Matches by Category")
228
+ for category, matches in result['matches_by_category'].items():
229
+ if matches:
230
+ total = sum(m['count'] for m in matches)
231
+ report.append(f"\n### {category} ({len(matches)} patterns, {total} matches)")
232
+
233
+ # Sort by count descending
234
+ sorted_matches = sorted(matches, key=lambda x: x['count'], reverse=True)
235
+
236
+ for match in sorted_matches[:5]: # Top 5 per category
237
+ quality_emoji = "āœ…" if match['quality'] == 'good' else "āŒ" if match['quality'] == 'bad' else "⚪"
238
+ report.append(f"- {quality_emoji} **{match['id']}**: {match['count']} instances")
239
+ report.append(f" - {match['description']}")
240
+ if match['locations']:
241
+ loc = match['locations'][0]
242
+ report.append(f" - Example (line {loc['line']}): `{loc['text'][:50]}...`")
243
+
244
+ # Registry info
245
+ report.append("\n## Pattern Registry Statistics")
246
+ info = result['registry_info']
247
+ report.append(f"- **Patterns Available**: {info['total_patterns_available']}")
248
+ report.append(f"- **Patterns Matched**: {info['patterns_matched']}")
249
+ report.append(f"- **Categories Found**: {', '.join(info['categories_found'])}")
250
+
251
+ report.append("\n## Compliance")
252
+ report.append("āœ… Using unified AST-GREP registry (custom + catalog)")
253
+ report.append("āœ… Using ast-grep-py for AST matching")
254
+ report.append("āœ… NO regex patterns or fallbacks")
255
+ report.append("āœ… Production-ready pattern analysis")
256
+
257
+ return '\n'.join(report)
258
+
259
+
260
+ def run_final_analysis():
261
+ """Run final AST-GREP analysis with unified registry."""
262
+ print("šŸš€ FINAL AST-GREP Analysis with Unified Registry")
263
+ print("=" * 60)
264
+
265
+ analyzer = FinalASTGrepAnalyzer()
266
+
267
+ # Analyze server.py
268
+ server_path = "/Users/ramakrishnanannaswamy/projects/claude-self-reflect/mcp-server/src/server.py"
269
+
270
+ print(f"\nAnalyzing: {server_path}")
271
+ print("-" * 40)
272
+
273
+ try:
274
+ result = analyzer.analyze_file(server_path)
275
+
276
+ # Display results
277
+ metrics = result['quality_metrics']
278
+ score = metrics['quality_score']
279
+
280
+ print(f"\nšŸ“Š Analysis Results:")
281
+ print(f" Language: {result['language']}")
282
+ print(f" Quality Score: {score:.1%}")
283
+ print(f" Good Practices: {metrics['good_patterns_found']}")
284
+ print(f" Issues: {metrics['total_issues']}")
285
+ print(f" Patterns Matched: {metrics['unique_patterns_matched']}")
286
+
287
+ print(f"\nšŸ’” Recommendations:")
288
+ for rec in result['recommendations']:
289
+ print(f" {rec}")
290
+
291
+ # Top issues
292
+ bad_patterns = [m for m in result['all_matches'] if m['quality'] == 'bad']
293
+ if bad_patterns:
294
+ print(f"\nāš ļø Top Issues to Fix:")
295
+ sorted_bad = sorted(bad_patterns, key=lambda x: x['count'] * abs(x['weight']), reverse=True)
296
+ for pattern in sorted_bad[:5]:
297
+ print(f" - {pattern['id']}: {pattern['count']} instances")
298
+ print(f" {pattern['description']}")
299
+
300
+ # Generate and save report
301
+ report = analyzer.generate_report(result)
302
+ report_path = "/Users/ramakrishnanannaswamy/projects/claude-self-reflect/scripts/final_analysis_report.md"
303
+ with open(report_path, 'w') as f:
304
+ f.write(report)
305
+
306
+ print(f"\nšŸ“ Full report saved to: {report_path}")
307
+
308
+ # Save JSON results
309
+ json_path = "/Users/ramakrishnanannaswamy/projects/claude-self-reflect/scripts/final_analysis_result.json"
310
+ with open(json_path, 'w') as f:
311
+ json.dump(result, f, indent=2)
312
+
313
+ print(f"šŸ“Š JSON results saved to: {json_path}")
314
+
315
+ print("\nāœ… Final AST-GREP analysis complete!")
316
+ print(" - Unified registry with 41 patterns")
317
+ print(" - Support for Python, TypeScript, JavaScript")
318
+ print(" - Ready for production integration")
319
+
320
+ return result
321
+
322
+ except Exception as e:
323
+ print(f"\nāŒ Analysis failed: {e}")
324
+ raise
325
+
326
+
327
+ if __name__ == "__main__":
328
+ run_final_analysis()