claude-self-reflect 5.0.7 → 6.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/.claude/agents/open-source-maintainer.md +1 -1
  2. package/.claude/agents/reflection-specialist.md +2 -2
  3. package/Dockerfile.async-importer +6 -4
  4. package/Dockerfile.importer +6 -6
  5. package/Dockerfile.safe-watcher +8 -8
  6. package/Dockerfile.streaming-importer +8 -1
  7. package/Dockerfile.watcher +8 -16
  8. package/docker-compose.yaml +12 -6
  9. package/installer/.claude/agents/README.md +138 -0
  10. package/package.json +5 -26
  11. package/src/__init__.py +0 -0
  12. package/src/cli/__init__.py +0 -0
  13. package/src/runtime/__init__.py +0 -0
  14. package/src/runtime/import-latest.py +124 -0
  15. package/{scripts → src/runtime}/precompact-hook.sh +1 -1
  16. package/src/runtime/streaming-importer.py +995 -0
  17. package/{scripts → src/runtime}/watcher-loop.sh +1 -1
  18. package/.claude/agents/claude-self-reflect-test.md +0 -1274
  19. package/.claude/agents/reflect-tester.md +0 -300
  20. package/scripts/add-timestamp-indexes.py +0 -134
  21. package/scripts/ast_grep_final_analyzer.py +0 -338
  22. package/scripts/ast_grep_unified_registry.py +0 -710
  23. package/scripts/check-collections.py +0 -29
  24. package/scripts/debug-august-parsing.py +0 -80
  25. package/scripts/debug-import-single.py +0 -91
  26. package/scripts/debug-project-resolver.py +0 -82
  27. package/scripts/debug-temporal-tools.py +0 -135
  28. package/scripts/import-conversations-enhanced.py +0 -672
  29. package/scripts/migrate-to-unified-state.py +0 -426
  30. package/scripts/session_quality_tracker.py +0 -671
  31. package/scripts/update_patterns.py +0 -334
  32. /package/{scripts → src}/importer/__init__.py +0 -0
  33. /package/{scripts → src}/importer/__main__.py +0 -0
  34. /package/{scripts → src}/importer/core/__init__.py +0 -0
  35. /package/{scripts → src}/importer/core/config.py +0 -0
  36. /package/{scripts → src}/importer/core/exceptions.py +0 -0
  37. /package/{scripts → src}/importer/core/models.py +0 -0
  38. /package/{scripts → src}/importer/embeddings/__init__.py +0 -0
  39. /package/{scripts → src}/importer/embeddings/base.py +0 -0
  40. /package/{scripts → src}/importer/embeddings/fastembed_provider.py +0 -0
  41. /package/{scripts → src}/importer/embeddings/validator.py +0 -0
  42. /package/{scripts → src}/importer/embeddings/voyage_provider.py +0 -0
  43. /package/{scripts → src}/importer/main.py +0 -0
  44. /package/{scripts → src}/importer/processors/__init__.py +0 -0
  45. /package/{scripts → src}/importer/processors/ast_extractor.py +0 -0
  46. /package/{scripts → src}/importer/processors/chunker.py +0 -0
  47. /package/{scripts → src}/importer/processors/concept_extractor.py +0 -0
  48. /package/{scripts → src}/importer/processors/conversation_parser.py +0 -0
  49. /package/{scripts → src}/importer/processors/tool_extractor.py +0 -0
  50. /package/{scripts → src}/importer/state/__init__.py +0 -0
  51. /package/{scripts → src}/importer/state/state_manager.py +0 -0
  52. /package/{scripts → src}/importer/storage/__init__.py +0 -0
  53. /package/{scripts → src}/importer/storage/qdrant_storage.py +0 -0
  54. /package/{scripts → src}/importer/utils/__init__.py +0 -0
  55. /package/{scripts → src}/importer/utils/logger.py +0 -0
  56. /package/{scripts → src}/importer/utils/project_normalizer.py +0 -0
  57. /package/{scripts → src/runtime}/delta-metadata-update-safe.py +0 -0
  58. /package/{scripts → src/runtime}/delta-metadata-update.py +0 -0
  59. /package/{scripts → src/runtime}/doctor.py +0 -0
  60. /package/{scripts → src/runtime}/embedding_service.py +0 -0
  61. /package/{scripts → src/runtime}/force-metadata-recovery.py +0 -0
  62. /package/{scripts → src/runtime}/import-conversations-unified.py +0 -0
  63. /package/{scripts → src/runtime}/import_strategies.py +0 -0
  64. /package/{scripts → src/runtime}/message_processors.py +0 -0
  65. /package/{scripts → src/runtime}/metadata_extractor.py +0 -0
  66. /package/{scripts → src/runtime}/streaming-watcher.py +0 -0
  67. /package/{scripts → src/runtime}/unified_state_manager.py +0 -0
  68. /package/{scripts → src/runtime}/utils.py +0 -0
@@ -1,338 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- FINAL AST-GREP Analyzer with Unified Registry
4
- MANDATORY: Uses ast-grep-py + unified pattern registry
5
- NO regex fallbacks, NO simplifications
6
- """
7
-
8
- import ast_grep_py as sg
9
- from pathlib import Path
10
- from typing import Dict, List, Any, Optional
11
- from datetime import datetime
12
- import json
13
- import sys
14
-
15
- # Import the unified registry
16
- sys.path.append(str(Path(__file__).parent))
17
- from ast_grep_unified_registry import get_unified_registry
18
-
19
- class FinalASTGrepAnalyzer:
20
- """
21
- Final production-ready AST-GREP analyzer.
22
- MANDATORY components:
23
- - ast-grep-py for AST matching
24
- - Unified pattern registry (custom + catalog)
25
- - NO regex patterns
26
- - NO fallbacks
27
- """
28
-
29
- def __init__(self):
30
- """Initialize with unified registry."""
31
- self.registry = get_unified_registry()
32
- all_patterns = self.registry.get_all_patterns()
33
-
34
- print(f"✅ Loaded unified registry with {len(all_patterns)} patterns")
35
- print(f" Languages: Python, TypeScript, JavaScript")
36
- print(f" Good patterns: {len(self.registry.get_good_patterns())}")
37
- print(f" Bad patterns: {len(self.registry.get_bad_patterns())}")
38
-
39
- def analyze_file(self, file_path: str) -> Dict[str, Any]:
40
- """
41
- Analyze a file using unified AST-GREP patterns.
42
- Returns detailed quality metrics and pattern matches.
43
- """
44
- if not Path(file_path).exists():
45
- raise FileNotFoundError(f"File not found: {file_path}")
46
-
47
- # Detect language from file extension
48
- language = self._detect_language(file_path)
49
-
50
- with open(file_path, 'r', encoding='utf-8') as f:
51
- content = f.read()
52
-
53
- # Count lines of code for normalization
54
- lines_of_code = len(content.splitlines())
55
-
56
- # Create SgRoot for the detected language
57
- sg_language = self._get_sg_language(language)
58
- root = sg.SgRoot(content, sg_language)
59
- node = root.root()
60
-
61
- # Get patterns for this language
62
- language_patterns = self.registry.get_patterns_by_language(language)
63
-
64
- # Track all matches
65
- all_matches = []
66
- pattern_errors = []
67
- matches_by_category = {}
68
-
69
- # Process each pattern
70
- for pattern_def in language_patterns:
71
- try:
72
- pattern_str = pattern_def.get("pattern", "")
73
- if not pattern_str:
74
- continue
75
-
76
- # Find matches using ast-grep-py
77
- matches = node.find_all(pattern=pattern_str)
78
-
79
- if matches:
80
- category = pattern_def.get('category', 'unknown')
81
- if category not in matches_by_category:
82
- matches_by_category[category] = []
83
-
84
- match_info = {
85
- 'category': category,
86
- 'id': pattern_def['id'],
87
- 'description': pattern_def.get('description', ''),
88
- 'quality': pattern_def.get('quality', 'neutral'),
89
- 'weight': pattern_def.get('weight', 0),
90
- 'count': len(matches),
91
- 'locations': [
92
- {
93
- 'line': m.range().start.line + 1,
94
- 'column': m.range().start.column,
95
- 'text': m.text()[:80]
96
- } for m in matches[:5] # First 5 examples
97
- ]
98
- }
99
-
100
- matches_by_category[category].append(match_info)
101
- all_matches.append(match_info)
102
-
103
- except Exception as e:
104
- # Record all pattern errors for debugging
105
- pattern_errors.append({
106
- 'pattern_id': pattern_def.get('id', '<unknown>'),
107
- 'category': pattern_def.get('category', 'unknown'),
108
- 'error': str(e)[:200]
109
- })
110
-
111
- # Calculate quality score with LOC normalization
112
- quality_score = self.registry.calculate_quality_score(all_matches, loc=lines_of_code)
113
-
114
- # Count good vs bad patterns
115
- good_matches = [m for m in all_matches if m['quality'] == 'good']
116
- bad_matches = [m for m in all_matches if m['quality'] == 'bad']
117
-
118
- good_count = sum(m['count'] for m in good_matches)
119
- bad_count = sum(m['count'] for m in bad_matches)
120
-
121
- return {
122
- 'file': file_path,
123
- 'timestamp': datetime.now().isoformat(),
124
- 'language': language,
125
- 'engine': 'ast-grep-py + unified registry',
126
- 'registry_info': {
127
- 'total_patterns_available': len(language_patterns),
128
- 'patterns_matched': len(all_matches),
129
- 'patterns_errored': len(pattern_errors),
130
- 'categories_found': list(matches_by_category.keys())
131
- },
132
- 'matches_by_category': matches_by_category,
133
- 'all_matches': all_matches,
134
- 'errors': pattern_errors[:5], # First 5 errors only
135
- 'quality_metrics': {
136
- 'quality_score': round(quality_score, 3),
137
- 'good_patterns_found': good_count,
138
- 'bad_patterns_found': bad_count,
139
- 'unique_patterns_matched': len(all_matches),
140
- 'total_issues': bad_count,
141
- 'total_good_practices': good_count
142
- },
143
- 'recommendations': self._generate_recommendations(matches_by_category, quality_score)
144
- }
145
-
146
- def _detect_language(self, file_path: str) -> str:
147
- """Detect language from file extension."""
148
- ext = Path(file_path).suffix.lower()
149
- lang_map = {
150
- '.py': 'python',
151
- '.ts': 'typescript',
152
- '.tsx': 'tsx',
153
- '.js': 'javascript',
154
- '.jsx': 'jsx'
155
- }
156
- return lang_map.get(ext, 'python')
157
-
158
- def _get_sg_language(self, language: str) -> str:
159
- """Get ast-grep language identifier."""
160
- # ast-grep-py uses different language identifiers
161
- sg_map = {
162
- 'python': 'python',
163
- 'typescript': 'typescript',
164
- 'tsx': 'tsx',
165
- 'javascript': 'javascript',
166
- 'jsx': 'jsx'
167
- }
168
- return sg_map.get(language, 'python')
169
-
170
- def _generate_recommendations(self, matches: Dict, score: float) -> List[str]:
171
- """Generate actionable recommendations based on matches."""
172
- recommendations = []
173
-
174
- if score < 0.3:
175
- recommendations.append("🔴 Critical: Code quality needs immediate attention")
176
- elif score < 0.6:
177
- recommendations.append("🟡 Warning: Several anti-patterns detected")
178
- else:
179
- recommendations.append("🟢 Good: Code follows most best practices")
180
-
181
- # Check for specific issues
182
- for category, category_matches in matches.items():
183
- if 'antipatterns' in category:
184
- total = sum(m['count'] for m in category_matches)
185
- if total > 0:
186
- recommendations.append(f"Fix {total} anti-patterns in {category}")
187
-
188
- if 'logging' in category:
189
- prints = sum(m['count'] for m in category_matches if 'print' in m['id'])
190
- if prints > 0:
191
- recommendations.append(f"Replace {prints} print statements with logger")
192
-
193
- if 'error' in category:
194
- bare = sum(m['count'] for m in category_matches if 'broad' in m['id'] or 'bare' in m['id'])
195
- if bare > 0:
196
- recommendations.append(f"Fix {bare} bare except clauses")
197
-
198
- return recommendations
199
-
200
- def generate_report(self, result: Dict[str, Any]) -> str:
201
- """Generate a comprehensive analysis report."""
202
- report = []
203
- report.append("# AST-GREP Pattern Analysis Report")
204
- report.append(f"\n**File**: {result['file']}")
205
- report.append(f"**Language**: {result['language']}")
206
- report.append(f"**Timestamp**: {result['timestamp']}")
207
- report.append(f"**Engine**: {result['engine']}")
208
-
209
- # Quality overview
210
- metrics = result['quality_metrics']
211
- score = metrics['quality_score']
212
- emoji = "🟢" if score > 0.7 else "🟡" if score > 0.4 else "🔴"
213
-
214
- report.append("\n## Quality Overview")
215
- report.append(f"- **Quality Score**: {emoji} {score:.1%}")
216
- report.append(f"- **Good Practices**: {metrics['good_patterns_found']}")
217
- report.append(f"- **Issues Found**: {metrics['total_issues']}")
218
- report.append(f"- **Unique Patterns Matched**: {metrics['unique_patterns_matched']}")
219
-
220
- # Recommendations
221
- if result['recommendations']:
222
- report.append("\n## Recommendations")
223
- for rec in result['recommendations']:
224
- report.append(f"- {rec}")
225
-
226
- # Pattern matches by category
227
- report.append("\n## Pattern Matches by Category")
228
- for category, matches in result['matches_by_category'].items():
229
- if matches:
230
- total = sum(m['count'] for m in matches)
231
- report.append(f"\n### {category} ({len(matches)} patterns, {total} matches)")
232
-
233
- # Sort by count descending
234
- sorted_matches = sorted(matches, key=lambda x: x['count'], reverse=True)
235
-
236
- for match in sorted_matches[:5]: # Top 5 per category
237
- quality_emoji = "✅" if match['quality'] == 'good' else "❌" if match['quality'] == 'bad' else "⚪"
238
- report.append(f"- {quality_emoji} **{match['id']}**: {match['count']} instances")
239
- report.append(f" - {match['description']}")
240
- if match['locations']:
241
- loc = match['locations'][0]
242
- report.append(f" - Example (line {loc['line']}): `{loc['text'][:50]}...`")
243
-
244
- # Registry info
245
- report.append("\n## Pattern Registry Statistics")
246
- info = result['registry_info']
247
- report.append(f"- **Patterns Available**: {info['total_patterns_available']}")
248
- report.append(f"- **Patterns Matched**: {info['patterns_matched']}")
249
- report.append(f"- **Categories Found**: {', '.join(info['categories_found'])}")
250
-
251
- report.append("\n## Compliance")
252
- report.append("✅ Using unified AST-GREP registry (custom + catalog)")
253
- report.append("✅ Using ast-grep-py for AST matching")
254
- report.append("✅ NO regex patterns or fallbacks")
255
- report.append("✅ Production-ready pattern analysis")
256
-
257
- return '\n'.join(report)
258
-
259
-
260
- def run_final_analysis(file_path=None):
261
- """Run final AST-GREP analysis with unified registry."""
262
- print("🚀 FINAL AST-GREP Analysis with Unified Registry")
263
- print("=" * 60)
264
-
265
- analyzer = FinalASTGrepAnalyzer()
266
-
267
- # Use provided path or default
268
- # Use relative path from script location
269
- script_dir = Path(__file__).parent
270
- default_path = script_dir.parent / "mcp-server" / "src" / "server.py"
271
- server_path = file_path if file_path else str(default_path)
272
-
273
- print(f"\nAnalyzing: {server_path}")
274
- print("-" * 40)
275
-
276
- try:
277
- result = analyzer.analyze_file(server_path)
278
-
279
- # Display results
280
- metrics = result['quality_metrics']
281
- score = metrics['quality_score']
282
-
283
- print(f"\n📊 Analysis Results:")
284
- print(f" Language: {result['language']}")
285
- print(f" Quality Score: {score:.1%}")
286
- print(f" Good Practices: {metrics['good_patterns_found']}")
287
- print(f" Issues: {metrics['total_issues']}")
288
- print(f" Patterns Matched: {metrics['unique_patterns_matched']}")
289
-
290
- print(f"\n💡 Recommendations:")
291
- for rec in result['recommendations']:
292
- print(f" {rec}")
293
-
294
- # Top issues
295
- bad_patterns = [m for m in result['all_matches'] if m['quality'] == 'bad']
296
- if bad_patterns:
297
- print(f"\n⚠️ Top Issues to Fix:")
298
- sorted_bad = sorted(bad_patterns, key=lambda x: x['count'] * abs(x['weight']), reverse=True)
299
- for pattern in sorted_bad[:5]:
300
- print(f" - {pattern['id']}: {pattern['count']} instances")
301
- print(f" {pattern['description']}")
302
-
303
- # Generate and save report
304
- report = analyzer.generate_report(result)
305
- report_path = script_dir / "final_analysis_report.md"
306
- with open(report_path, 'w') as f:
307
- f.write(report)
308
-
309
- print(f"\n📝 Full report saved to: {report_path}")
310
-
311
- # Save JSON results
312
- json_path = script_dir / "final_analysis_result.json"
313
- with open(json_path, 'w') as f:
314
- json.dump(result, f, indent=2)
315
-
316
- print(f"📊 JSON results saved to: {json_path}")
317
-
318
- print("\n✅ Final AST-GREP analysis complete!")
319
- print(" - Unified registry with 41 patterns")
320
- print(" - Support for Python, TypeScript, JavaScript")
321
- print(" - Ready for production integration")
322
-
323
- return result
324
-
325
- except Exception as e:
326
- print(f"\n❌ Analysis failed: {e}")
327
- raise
328
-
329
-
330
- if __name__ == "__main__":
331
- import sys
332
- if len(sys.argv) > 1:
333
- # Use provided file path
334
- file_path = sys.argv[1]
335
- else:
336
- # Default to server.py
337
- file_path = str(default_path) # Use the same default path from above
338
- run_final_analysis(file_path)