claude-self-reflect 3.2.4 → 3.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/.claude/agents/claude-self-reflect-test.md +992 -510
  2. package/.claude/agents/reflection-specialist.md +59 -3
  3. package/README.md +14 -5
  4. package/installer/cli.js +16 -0
  5. package/installer/postinstall.js +14 -0
  6. package/installer/statusline-setup.js +289 -0
  7. package/mcp-server/run-mcp.sh +73 -5
  8. package/mcp-server/src/app_context.py +64 -0
  9. package/mcp-server/src/config.py +57 -0
  10. package/mcp-server/src/connection_pool.py +286 -0
  11. package/mcp-server/src/decay_manager.py +106 -0
  12. package/mcp-server/src/embedding_manager.py +64 -40
  13. package/mcp-server/src/embeddings_old.py +141 -0
  14. package/mcp-server/src/models.py +64 -0
  15. package/mcp-server/src/parallel_search.py +305 -0
  16. package/mcp-server/src/project_resolver.py +5 -0
  17. package/mcp-server/src/reflection_tools.py +211 -0
  18. package/mcp-server/src/rich_formatting.py +196 -0
  19. package/mcp-server/src/search_tools.py +874 -0
  20. package/mcp-server/src/server.py +127 -1720
  21. package/mcp-server/src/temporal_design.py +132 -0
  22. package/mcp-server/src/temporal_tools.py +604 -0
  23. package/mcp-server/src/temporal_utils.py +384 -0
  24. package/mcp-server/src/utils.py +150 -67
  25. package/package.json +15 -1
  26. package/scripts/add-timestamp-indexes.py +134 -0
  27. package/scripts/ast_grep_final_analyzer.py +325 -0
  28. package/scripts/ast_grep_unified_registry.py +556 -0
  29. package/scripts/check-collections.py +29 -0
  30. package/scripts/csr-status +366 -0
  31. package/scripts/debug-august-parsing.py +76 -0
  32. package/scripts/debug-import-single.py +91 -0
  33. package/scripts/debug-project-resolver.py +82 -0
  34. package/scripts/debug-temporal-tools.py +135 -0
  35. package/scripts/delta-metadata-update.py +547 -0
  36. package/scripts/import-conversations-unified.py +157 -25
  37. package/scripts/precompact-hook.sh +33 -0
  38. package/scripts/session_quality_tracker.py +481 -0
  39. package/scripts/streaming-watcher.py +1578 -0
  40. package/scripts/update_patterns.py +334 -0
  41. package/scripts/utils.py +39 -0
@@ -0,0 +1,134 @@
1
+ #!/usr/bin/env python3
2
+ """Add timestamp indexes to all collections for OrderBy support."""
3
+
4
+ import asyncio
5
+ import os
6
+ from pathlib import Path
7
+ import sys
8
+
9
+ sys.path.insert(0, str(Path(__file__).parent.parent))
10
+
11
+ from qdrant_client import AsyncQdrantClient
12
+ from qdrant_client.models import PayloadSchemaType, OrderBy
13
+
14
+ QDRANT_URL = os.getenv("QDRANT_URL", "http://localhost:6333")
15
+
16
+ async def add_timestamp_indexes():
17
+ """Add timestamp indexes to all collections that need them."""
18
+ client = AsyncQdrantClient(url=QDRANT_URL)
19
+
20
+ print("Adding timestamp indexes for temporal query support...")
21
+ print("="*60)
22
+
23
+ # Get all collections
24
+ collections = await client.get_collections()
25
+ total = len(collections.collections)
26
+ print(f"Found {total} collections")
27
+
28
+ success_count = 0
29
+ skip_count = 0
30
+ error_count = 0
31
+
32
+ for i, col in enumerate(collections.collections, 1):
33
+ col_name = col.name
34
+ print(f"\n[{i}/{total}] Processing {col_name}...")
35
+
36
+ try:
37
+ # Check if collection has points
38
+ info = await client.get_collection(col_name)
39
+ if info.points_count == 0:
40
+ print(f" ⏭️ Skipped (empty collection)")
41
+ skip_count += 1
42
+ continue
43
+
44
+ # Check if timestamp field exists
45
+ points, _ = await client.scroll(
46
+ collection_name=col_name,
47
+ limit=1,
48
+ with_payload=["timestamp"]
49
+ )
50
+
51
+ if not points or not points[0].payload.get('timestamp'):
52
+ print(f" ⏭️ Skipped (no timestamp field)")
53
+ skip_count += 1
54
+ continue
55
+
56
+ # Try to use OrderBy to check if index exists
57
+ try:
58
+ await client.scroll(
59
+ collection_name=col_name,
60
+ order_by=OrderBy(key="timestamp", direction="desc"),
61
+ limit=1
62
+ )
63
+ print(f" ✅ Already has timestamp index")
64
+ skip_count += 1
65
+ except Exception as e:
66
+ if "No range index" in str(e):
67
+ # Need to create index
68
+ print(f" 🔧 Creating timestamp index...")
69
+ try:
70
+ await client.create_payload_index(
71
+ collection_name=col_name,
72
+ field_name="timestamp",
73
+ field_schema=PayloadSchemaType.DATETIME
74
+ )
75
+ print(f" ✅ Index created successfully")
76
+ success_count += 1
77
+ except Exception as create_error:
78
+ print(f" ❌ Failed to create index: {create_error}")
79
+ error_count += 1
80
+ else:
81
+ print(f" ⚠️ Unexpected error: {e}")
82
+ error_count += 1
83
+
84
+ except Exception as e:
85
+ print(f" ❌ Error: {e}")
86
+ error_count += 1
87
+
88
+ print("\n" + "="*60)
89
+ print("SUMMARY")
90
+ print("="*60)
91
+ print(f"✅ Indexes created: {success_count}")
92
+ print(f"⏭️ Skipped: {skip_count}")
93
+ print(f"❌ Errors: {error_count}")
94
+ print(f"📊 Total collections: {total}")
95
+
96
+ # Verify temporal queries work
97
+ if success_count > 0:
98
+ print("\n" + "="*60)
99
+ print("VERIFYING TEMPORAL QUERIES")
100
+ print("="*60)
101
+
102
+ # Find a collection with data to test
103
+ test_collection = None
104
+ for col in collections.collections:
105
+ try:
106
+ info = await client.get_collection(col.name)
107
+ if info.points_count > 100: # Find one with decent amount of data
108
+ test_collection = col.name
109
+ break
110
+ except:
111
+ pass
112
+
113
+ if test_collection:
114
+ print(f"Testing on {test_collection}...")
115
+ try:
116
+ # Test OrderBy
117
+ results, _ = await client.scroll(
118
+ collection_name=test_collection,
119
+ order_by=OrderBy(key="timestamp", direction="desc"),
120
+ limit=3,
121
+ with_payload=["timestamp", "text"]
122
+ )
123
+
124
+ print(f"✅ OrderBy works! Found {len(results)} recent conversations:")
125
+ for r in results:
126
+ ts = r.payload.get('timestamp', 'N/A')
127
+ text = r.payload.get('text', '')[:60] + '...'
128
+ print(f" - {ts}: {text}")
129
+
130
+ except Exception as e:
131
+ print(f"❌ OrderBy test failed: {e}")
132
+
133
+ if __name__ == "__main__":
134
+ asyncio.run(add_timestamp_indexes())
@@ -0,0 +1,325 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ FINAL AST-GREP Analyzer with Unified Registry
4
+ MANDATORY: Uses ast-grep-py + unified pattern registry
5
+ NO regex fallbacks, NO simplifications
6
+ """
7
+
8
+ import ast_grep_py as sg
9
+ from pathlib import Path
10
+ from typing import Dict, List, Any, Optional
11
+ from datetime import datetime
12
+ import json
13
+ import sys
14
+
15
+ # Import the unified registry
16
+ sys.path.append(str(Path(__file__).parent))
17
+ from ast_grep_unified_registry import get_unified_registry
18
+
19
+ class FinalASTGrepAnalyzer:
20
+ """
21
+ Final production-ready AST-GREP analyzer.
22
+ MANDATORY components:
23
+ - ast-grep-py for AST matching
24
+ - Unified pattern registry (custom + catalog)
25
+ - NO regex patterns
26
+ - NO fallbacks
27
+ """
28
+
29
+ def __init__(self):
30
+ """Initialize with unified registry."""
31
+ self.registry = get_unified_registry()
32
+ all_patterns = self.registry.get_all_patterns()
33
+
34
+ print(f"✅ Loaded unified registry with {len(all_patterns)} patterns")
35
+ print(f" Languages: Python, TypeScript, JavaScript")
36
+ print(f" Good patterns: {len(self.registry.get_good_patterns())}")
37
+ print(f" Bad patterns: {len(self.registry.get_bad_patterns())}")
38
+
39
+ def analyze_file(self, file_path: str) -> Dict[str, Any]:
40
+ """
41
+ Analyze a file using unified AST-GREP patterns.
42
+ Returns detailed quality metrics and pattern matches.
43
+ """
44
+ if not Path(file_path).exists():
45
+ raise FileNotFoundError(f"File not found: {file_path}")
46
+
47
+ # Detect language from file extension
48
+ language = self._detect_language(file_path)
49
+
50
+ with open(file_path, 'r', encoding='utf-8') as f:
51
+ content = f.read()
52
+
53
+ # Create SgRoot for the detected language
54
+ sg_language = self._get_sg_language(language)
55
+ root = sg.SgRoot(content, sg_language)
56
+ node = root.root()
57
+
58
+ # Get patterns for this language
59
+ language_patterns = self.registry.get_patterns_by_language(language)
60
+
61
+ # Track all matches
62
+ all_matches = []
63
+ pattern_errors = []
64
+ matches_by_category = {}
65
+
66
+ # Process each pattern
67
+ for pattern_def in language_patterns:
68
+ try:
69
+ pattern_str = pattern_def.get("pattern", "")
70
+ if not pattern_str:
71
+ continue
72
+
73
+ # Find matches using ast-grep-py
74
+ matches = node.find_all(pattern=pattern_str)
75
+
76
+ if matches:
77
+ category = pattern_def.get('category', 'unknown')
78
+ if category not in matches_by_category:
79
+ matches_by_category[category] = []
80
+
81
+ match_info = {
82
+ 'category': category,
83
+ 'id': pattern_def['id'],
84
+ 'description': pattern_def.get('description', ''),
85
+ 'quality': pattern_def.get('quality', 'neutral'),
86
+ 'weight': pattern_def.get('weight', 0),
87
+ 'count': len(matches),
88
+ 'locations': [
89
+ {
90
+ 'line': m.range().start.line + 1,
91
+ 'column': m.range().start.column,
92
+ 'text': m.text()[:80]
93
+ } for m in matches[:5] # First 5 examples
94
+ ]
95
+ }
96
+
97
+ matches_by_category[category].append(match_info)
98
+ all_matches.append(match_info)
99
+
100
+ except Exception as e:
101
+ # Record all pattern errors for debugging
102
+ pattern_errors.append({
103
+ 'pattern_id': pattern_def.get('id', '<unknown>'),
104
+ 'category': pattern_def.get('category', 'unknown'),
105
+ 'error': str(e)[:200]
106
+ })
107
+
108
+ # Calculate quality score
109
+ quality_score = self.registry.calculate_quality_score(all_matches)
110
+
111
+ # Count good vs bad patterns
112
+ good_matches = [m for m in all_matches if m['quality'] == 'good']
113
+ bad_matches = [m for m in all_matches if m['quality'] == 'bad']
114
+
115
+ good_count = sum(m['count'] for m in good_matches)
116
+ bad_count = sum(m['count'] for m in bad_matches)
117
+
118
+ return {
119
+ 'file': file_path,
120
+ 'timestamp': datetime.now().isoformat(),
121
+ 'language': language,
122
+ 'engine': 'ast-grep-py + unified registry',
123
+ 'registry_info': {
124
+ 'total_patterns_available': len(language_patterns),
125
+ 'patterns_matched': len(all_matches),
126
+ 'patterns_errored': len(pattern_errors),
127
+ 'categories_found': list(matches_by_category.keys())
128
+ },
129
+ 'matches_by_category': matches_by_category,
130
+ 'all_matches': all_matches,
131
+ 'errors': pattern_errors[:5], # First 5 errors only
132
+ 'quality_metrics': {
133
+ 'quality_score': round(quality_score, 3),
134
+ 'good_patterns_found': good_count,
135
+ 'bad_patterns_found': bad_count,
136
+ 'unique_patterns_matched': len(all_matches),
137
+ 'total_issues': bad_count,
138
+ 'total_good_practices': good_count
139
+ },
140
+ 'recommendations': self._generate_recommendations(matches_by_category, quality_score)
141
+ }
142
+
143
+ def _detect_language(self, file_path: str) -> str:
144
+ """Detect language from file extension."""
145
+ ext = Path(file_path).suffix.lower()
146
+ lang_map = {
147
+ '.py': 'python',
148
+ '.ts': 'typescript',
149
+ '.tsx': 'tsx',
150
+ '.js': 'javascript',
151
+ '.jsx': 'jsx'
152
+ }
153
+ return lang_map.get(ext, 'python')
154
+
155
+ def _get_sg_language(self, language: str) -> str:
156
+ """Get ast-grep language identifier."""
157
+ # ast-grep-py uses different language identifiers
158
+ sg_map = {
159
+ 'python': 'python',
160
+ 'typescript': 'typescript',
161
+ 'tsx': 'tsx',
162
+ 'javascript': 'javascript',
163
+ 'jsx': 'jsx'
164
+ }
165
+ return sg_map.get(language, 'python')
166
+
167
+ def _generate_recommendations(self, matches: Dict, score: float) -> List[str]:
168
+ """Generate actionable recommendations based on matches."""
169
+ recommendations = []
170
+
171
+ if score < 0.3:
172
+ recommendations.append("🔴 Critical: Code quality needs immediate attention")
173
+ elif score < 0.6:
174
+ recommendations.append("🟡 Warning: Several anti-patterns detected")
175
+ else:
176
+ recommendations.append("🟢 Good: Code follows most best practices")
177
+
178
+ # Check for specific issues
179
+ for category, category_matches in matches.items():
180
+ if 'antipatterns' in category:
181
+ total = sum(m['count'] for m in category_matches)
182
+ if total > 0:
183
+ recommendations.append(f"Fix {total} anti-patterns in {category}")
184
+
185
+ if 'logging' in category:
186
+ prints = sum(m['count'] for m in category_matches if 'print' in m['id'])
187
+ if prints > 0:
188
+ recommendations.append(f"Replace {prints} print statements with logger")
189
+
190
+ if 'error' in category:
191
+ bare = sum(m['count'] for m in category_matches if 'broad' in m['id'] or 'bare' in m['id'])
192
+ if bare > 0:
193
+ recommendations.append(f"Fix {bare} bare except clauses")
194
+
195
+ return recommendations
196
+
197
+ def generate_report(self, result: Dict[str, Any]) -> str:
198
+ """Generate a comprehensive analysis report."""
199
+ report = []
200
+ report.append("# AST-GREP Pattern Analysis Report")
201
+ report.append(f"\n**File**: {result['file']}")
202
+ report.append(f"**Language**: {result['language']}")
203
+ report.append(f"**Timestamp**: {result['timestamp']}")
204
+ report.append(f"**Engine**: {result['engine']}")
205
+
206
+ # Quality overview
207
+ metrics = result['quality_metrics']
208
+ score = metrics['quality_score']
209
+ emoji = "🟢" if score > 0.7 else "🟡" if score > 0.4 else "🔴"
210
+
211
+ report.append("\n## Quality Overview")
212
+ report.append(f"- **Quality Score**: {emoji} {score:.1%}")
213
+ report.append(f"- **Good Practices**: {metrics['good_patterns_found']}")
214
+ report.append(f"- **Issues Found**: {metrics['total_issues']}")
215
+ report.append(f"- **Unique Patterns Matched**: {metrics['unique_patterns_matched']}")
216
+
217
+ # Recommendations
218
+ if result['recommendations']:
219
+ report.append("\n## Recommendations")
220
+ for rec in result['recommendations']:
221
+ report.append(f"- {rec}")
222
+
223
+ # Pattern matches by category
224
+ report.append("\n## Pattern Matches by Category")
225
+ for category, matches in result['matches_by_category'].items():
226
+ if matches:
227
+ total = sum(m['count'] for m in matches)
228
+ report.append(f"\n### {category} ({len(matches)} patterns, {total} matches)")
229
+
230
+ # Sort by count descending
231
+ sorted_matches = sorted(matches, key=lambda x: x['count'], reverse=True)
232
+
233
+ for match in sorted_matches[:5]: # Top 5 per category
234
+ quality_emoji = "✅" if match['quality'] == 'good' else "❌" if match['quality'] == 'bad' else "⚪"
235
+ report.append(f"- {quality_emoji} **{match['id']}**: {match['count']} instances")
236
+ report.append(f" - {match['description']}")
237
+ if match['locations']:
238
+ loc = match['locations'][0]
239
+ report.append(f" - Example (line {loc['line']}): `{loc['text'][:50]}...`")
240
+
241
+ # Registry info
242
+ report.append("\n## Pattern Registry Statistics")
243
+ info = result['registry_info']
244
+ report.append(f"- **Patterns Available**: {info['total_patterns_available']}")
245
+ report.append(f"- **Patterns Matched**: {info['patterns_matched']}")
246
+ report.append(f"- **Categories Found**: {', '.join(info['categories_found'])}")
247
+
248
+ report.append("\n## Compliance")
249
+ report.append("✅ Using unified AST-GREP registry (custom + catalog)")
250
+ report.append("✅ Using ast-grep-py for AST matching")
251
+ report.append("✅ NO regex patterns or fallbacks")
252
+ report.append("✅ Production-ready pattern analysis")
253
+
254
+ return '\n'.join(report)
255
+
256
+
257
+ def run_final_analysis():
258
+ """Run final AST-GREP analysis with unified registry."""
259
+ print("🚀 FINAL AST-GREP Analysis with Unified Registry")
260
+ print("=" * 60)
261
+
262
+ analyzer = FinalASTGrepAnalyzer()
263
+
264
+ # Analyze server.py
265
+ server_path = "/Users/ramakrishnanannaswamy/projects/claude-self-reflect/mcp-server/src/server.py"
266
+
267
+ print(f"\nAnalyzing: {server_path}")
268
+ print("-" * 40)
269
+
270
+ try:
271
+ result = analyzer.analyze_file(server_path)
272
+
273
+ # Display results
274
+ metrics = result['quality_metrics']
275
+ score = metrics['quality_score']
276
+
277
+ print(f"\n📊 Analysis Results:")
278
+ print(f" Language: {result['language']}")
279
+ print(f" Quality Score: {score:.1%}")
280
+ print(f" Good Practices: {metrics['good_patterns_found']}")
281
+ print(f" Issues: {metrics['total_issues']}")
282
+ print(f" Patterns Matched: {metrics['unique_patterns_matched']}")
283
+
284
+ print(f"\n💡 Recommendations:")
285
+ for rec in result['recommendations']:
286
+ print(f" {rec}")
287
+
288
+ # Top issues
289
+ bad_patterns = [m for m in result['all_matches'] if m['quality'] == 'bad']
290
+ if bad_patterns:
291
+ print(f"\n⚠️ Top Issues to Fix:")
292
+ sorted_bad = sorted(bad_patterns, key=lambda x: x['count'] * abs(x['weight']), reverse=True)
293
+ for pattern in sorted_bad[:5]:
294
+ print(f" - {pattern['id']}: {pattern['count']} instances")
295
+ print(f" {pattern['description']}")
296
+
297
+ # Generate and save report
298
+ report = analyzer.generate_report(result)
299
+ report_path = "/Users/ramakrishnanannaswamy/projects/claude-self-reflect/scripts/final_analysis_report.md"
300
+ with open(report_path, 'w') as f:
301
+ f.write(report)
302
+
303
+ print(f"\n📝 Full report saved to: {report_path}")
304
+
305
+ # Save JSON results
306
+ json_path = "/Users/ramakrishnanannaswamy/projects/claude-self-reflect/scripts/final_analysis_result.json"
307
+ with open(json_path, 'w') as f:
308
+ json.dump(result, f, indent=2)
309
+
310
+ print(f"📊 JSON results saved to: {json_path}")
311
+
312
+ print("\n✅ Final AST-GREP analysis complete!")
313
+ print(" - Unified registry with 41 patterns")
314
+ print(" - Support for Python, TypeScript, JavaScript")
315
+ print(" - Ready for production integration")
316
+
317
+ return result
318
+
319
+ except Exception as e:
320
+ print(f"\n❌ Analysis failed: {e}")
321
+ raise
322
+
323
+
324
+ if __name__ == "__main__":
325
+ run_final_analysis()