claude-self-reflect 3.2.4 → 3.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/.claude/agents/claude-self-reflect-test.md +992 -510
  2. package/.claude/agents/reflection-specialist.md +59 -3
  3. package/README.md +14 -5
  4. package/installer/cli.js +16 -0
  5. package/installer/postinstall.js +14 -0
  6. package/installer/statusline-setup.js +289 -0
  7. package/mcp-server/run-mcp.sh +73 -5
  8. package/mcp-server/src/app_context.py +64 -0
  9. package/mcp-server/src/config.py +57 -0
  10. package/mcp-server/src/connection_pool.py +286 -0
  11. package/mcp-server/src/decay_manager.py +106 -0
  12. package/mcp-server/src/embedding_manager.py +64 -40
  13. package/mcp-server/src/embeddings_old.py +141 -0
  14. package/mcp-server/src/models.py +64 -0
  15. package/mcp-server/src/parallel_search.py +305 -0
  16. package/mcp-server/src/project_resolver.py +5 -0
  17. package/mcp-server/src/reflection_tools.py +211 -0
  18. package/mcp-server/src/rich_formatting.py +196 -0
  19. package/mcp-server/src/search_tools.py +874 -0
  20. package/mcp-server/src/server.py +127 -1720
  21. package/mcp-server/src/temporal_design.py +132 -0
  22. package/mcp-server/src/temporal_tools.py +604 -0
  23. package/mcp-server/src/temporal_utils.py +384 -0
  24. package/mcp-server/src/utils.py +150 -67
  25. package/package.json +15 -1
  26. package/scripts/add-timestamp-indexes.py +134 -0
  27. package/scripts/ast_grep_final_analyzer.py +325 -0
  28. package/scripts/ast_grep_unified_registry.py +556 -0
  29. package/scripts/check-collections.py +29 -0
  30. package/scripts/csr-status +366 -0
  31. package/scripts/debug-august-parsing.py +76 -0
  32. package/scripts/debug-import-single.py +91 -0
  33. package/scripts/debug-project-resolver.py +82 -0
  34. package/scripts/debug-temporal-tools.py +135 -0
  35. package/scripts/delta-metadata-update.py +547 -0
  36. package/scripts/import-conversations-unified.py +157 -25
  37. package/scripts/precompact-hook.sh +33 -0
  38. package/scripts/session_quality_tracker.py +481 -0
  39. package/scripts/streaming-watcher.py +1578 -0
  40. package/scripts/update_patterns.py +334 -0
  41. package/scripts/utils.py +39 -0
@@ -0,0 +1,334 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Auto-update AST-GREP patterns from official catalog.
4
+ MANDATORY feature - runs on every import to ensure latest patterns.
5
+ Fast execution: <1 second with caching.
6
+ """
7
+
8
+ import os
9
+ import json
10
+ import yaml
11
+ import re
12
+ import hashlib
13
+ from pathlib import Path
14
+ from datetime import datetime, timedelta
15
+ from typing import Dict, List, Any, Optional
16
+ import subprocess
17
+ import tempfile
18
+ import shutil
19
+ import logging
20
+
21
+ logging.basicConfig(level=logging.INFO)
22
+ logger = logging.getLogger(__name__)
23
+
24
+ # Configuration
25
+ CACHE_DIR = Path.home() / ".claude-self-reflect" / "cache" / "patterns"
26
+ CACHE_FILE = CACHE_DIR / "pattern_cache.json"
27
+ REGISTRY_FILE = Path(__file__).parent / "unified_registry.json"
28
+ CATALOG_REPO = "https://github.com/ast-grep/ast-grep.github.io.git"
29
+ CATALOG_PATH = "website/catalog"
30
+ CACHE_HOURS = 24 # Check for updates once per day
31
+
32
+ # Ensure cache directory exists
33
+ CACHE_DIR.mkdir(parents=True, exist_ok=True)
34
+
35
+
36
+ class PatternUpdater:
37
+ """Updates AST-GREP patterns from official catalog."""
38
+
39
+ def __init__(self):
40
+ self.patterns = {}
41
+ self.stats = {
42
+ 'total_patterns': 0,
43
+ 'new_patterns': 0,
44
+ 'updated_patterns': 0,
45
+ 'languages': set()
46
+ }
47
+
48
+ def should_update(self) -> bool:
49
+ """Check if patterns need updating based on cache age."""
50
+ if not CACHE_FILE.exists():
51
+ return True
52
+
53
+ try:
54
+ with open(CACHE_FILE, 'r') as f:
55
+ cache = json.load(f)
56
+
57
+ cached_time = datetime.fromisoformat(cache.get('timestamp', '2000-01-01'))
58
+ if datetime.now() - cached_time > timedelta(hours=CACHE_HOURS):
59
+ return True
60
+
61
+ # Also check if registry file is missing
62
+ if not REGISTRY_FILE.exists():
63
+ return True
64
+
65
+ return False
66
+ except:
67
+ return True
68
+
69
+ def fetch_catalog_patterns(self) -> Dict[str, List[Dict]]:
70
+ """Fetch latest patterns from AST-GREP GitHub catalog."""
71
+ patterns_by_lang = {}
72
+
73
+ with tempfile.TemporaryDirectory() as tmpdir:
74
+ repo_path = Path(tmpdir) / "ast-grep-catalog"
75
+
76
+ try:
77
+ # Clone or pull the repository (shallow clone for speed)
78
+ logger.info("Fetching latest AST-GREP patterns from GitHub...")
79
+ # Use shorter timeout to avoid blocking analysis
80
+ timeout = int(os.environ.get("AST_GREP_CATALOG_TIMEOUT", "10"))
81
+ subprocess.run(
82
+ ["git", "clone", "--depth", "1", "--single-branch",
83
+ CATALOG_REPO, str(repo_path)],
84
+ check=True,
85
+ capture_output=True,
86
+ timeout=timeout
87
+ )
88
+
89
+ catalog_dir = repo_path / CATALOG_PATH
90
+
91
+ # Process each language directory
92
+ for lang_dir in catalog_dir.iterdir():
93
+ if lang_dir.is_dir() and not lang_dir.name.startswith('.'):
94
+ language = lang_dir.name
95
+ patterns_by_lang[language] = []
96
+ self.stats['languages'].add(language)
97
+
98
+ # Process each pattern file
99
+ for pattern_file in lang_dir.glob("*.md"):
100
+ if pattern_file.name == "index.md":
101
+ continue
102
+
103
+ pattern = self._parse_pattern_file(pattern_file, language)
104
+ if pattern:
105
+ patterns_by_lang[language].append(pattern)
106
+ self.stats['total_patterns'] += 1
107
+
108
+ logger.info(f"Fetched {self.stats['total_patterns']} patterns for {len(self.stats['languages'])} languages")
109
+
110
+ except subprocess.TimeoutExpired:
111
+ logger.warning("GitHub fetch timed out, using cached patterns")
112
+ return {}
113
+ except Exception as e:
114
+ logger.warning(f"Failed to fetch from GitHub: {e}, using cached patterns")
115
+ return {}
116
+
117
+ return patterns_by_lang
118
+
119
+ def _parse_pattern_file(self, file_path: Path, language: str) -> Optional[Dict]:
120
+ """Parse a single pattern file from the catalog."""
121
+ try:
122
+ content = file_path.read_text()
123
+
124
+ # Extract YAML block
125
+ yaml_match = re.search(r'```yaml\n(.*?)\n```', content, re.DOTALL)
126
+ if not yaml_match:
127
+ return None
128
+
129
+ yaml_content = yaml_match.group(1)
130
+ pattern_data = yaml.safe_load(yaml_content)
131
+
132
+ # Extract metadata
133
+ title_match = re.search(r'^## (.+?)(?:\s*<Badge.*?>)?$', content, re.MULTILINE)
134
+ title = title_match.group(1).strip() if title_match else file_path.stem
135
+
136
+ # Extract description
137
+ desc_match = re.search(r'### Description\n\n(.+?)(?=\n###|\n```|\Z)', content, re.DOTALL)
138
+ description = desc_match.group(1).strip() if desc_match else ""
139
+
140
+ # Build pattern object
141
+ pattern = {
142
+ 'id': pattern_data.get('id', file_path.stem),
143
+ 'title': title,
144
+ 'description': description,
145
+ 'language': pattern_data.get('language', language),
146
+ 'file': file_path.name,
147
+ 'has_fix': 'fix' in pattern_data
148
+ }
149
+
150
+ # Extract rule
151
+ if 'rule' in pattern_data:
152
+ rule = pattern_data['rule']
153
+ if isinstance(rule, dict):
154
+ if 'pattern' in rule:
155
+ pattern['pattern'] = rule['pattern']
156
+ if 'any' in rule:
157
+ pattern['patterns'] = rule['any']
158
+ pattern['match_type'] = 'any'
159
+ if 'all' in rule:
160
+ pattern['patterns'] = rule['all']
161
+ pattern['match_type'] = 'all'
162
+ if 'inside' in rule:
163
+ pattern['inside'] = rule['inside']
164
+
165
+ # Add fix if present
166
+ if 'fix' in pattern_data:
167
+ pattern['fix'] = pattern_data['fix']
168
+
169
+ # Determine quality based on type
170
+ pattern['quality'] = self._determine_quality(pattern)
171
+ pattern['weight'] = self._calculate_weight(pattern)
172
+
173
+ return pattern
174
+
175
+ except Exception as e:
176
+ logger.debug(f"Failed to parse {file_path}: {e}")
177
+ return None
178
+
179
+ def _determine_quality(self, pattern: Dict) -> str:
180
+ """Determine pattern quality."""
181
+ if pattern.get('has_fix'):
182
+ return 'good'
183
+
184
+ # Patterns that detect issues are "bad" (they find bad code)
185
+ if any(word in pattern.get('id', '').lower()
186
+ for word in ['no-', 'missing-', 'avoid-', 'deprecated']):
187
+ return 'bad'
188
+
189
+ return 'neutral'
190
+
191
+ def _calculate_weight(self, pattern: Dict) -> int:
192
+ """Calculate pattern weight for scoring."""
193
+ quality = pattern.get('quality', 'neutral')
194
+ weights = {
195
+ 'good': 3,
196
+ 'neutral': 1,
197
+ 'bad': -3
198
+ }
199
+ return weights.get(quality, 1)
200
+
201
+ def merge_with_custom_patterns(self, catalog_patterns: Dict) -> Dict:
202
+ """Merge catalog patterns with custom local patterns."""
203
+ # Load existing registry if it exists
204
+ existing_patterns = {}
205
+ if REGISTRY_FILE.exists():
206
+ try:
207
+ with open(REGISTRY_FILE, 'r') as f:
208
+ registry = json.load(f)
209
+ existing_patterns = registry.get('patterns', {})
210
+ except:
211
+ pass
212
+
213
+ # Keep custom Python patterns (our manual additions)
214
+ custom_categories = [
215
+ 'python_async', 'python_error_handling', 'python_logging',
216
+ 'python_typing', 'python_antipatterns', 'python_qdrant', 'python_mcp'
217
+ ]
218
+
219
+ merged = {}
220
+ for category in custom_categories:
221
+ if category in existing_patterns:
222
+ merged[category] = existing_patterns[category]
223
+
224
+ # Add catalog patterns
225
+ for language, patterns in catalog_patterns.items():
226
+ category_name = f"{language}_catalog"
227
+ merged[category_name] = patterns
228
+
229
+ return merged
230
+
231
+ def save_registry(self, patterns: Dict):
232
+ """Save updated pattern registry."""
233
+ registry = {
234
+ 'source': 'unified-ast-grep-auto-updated',
235
+ 'version': '3.0.0',
236
+ 'timestamp': datetime.now().isoformat(),
237
+ 'patterns': patterns,
238
+ 'stats': {
239
+ 'total_patterns': sum(len(p) for p in patterns.values()),
240
+ 'categories': list(patterns.keys()),
241
+ 'languages': list(self.stats['languages']),
242
+ 'last_update': datetime.now().isoformat()
243
+ }
244
+ }
245
+
246
+ with open(REGISTRY_FILE, 'w') as f:
247
+ json.dump(registry, f, indent=2)
248
+
249
+ logger.info(f"Saved {registry['stats']['total_patterns']} patterns to {REGISTRY_FILE}")
250
+
251
+ def update_cache(self):
252
+ """Update cache file with timestamp."""
253
+ cache_data = {
254
+ 'timestamp': datetime.now().isoformat(),
255
+ 'stats': {
256
+ 'total_patterns': self.stats['total_patterns'],
257
+ 'languages': list(self.stats['languages'])
258
+ }
259
+ }
260
+
261
+ with open(CACHE_FILE, 'w') as f:
262
+ json.dump(cache_data, f)
263
+
264
+ def update_patterns(self, force: bool = False) -> bool:
265
+ """Main update function - FAST with caching."""
266
+ # Check if update needed (< 10ms)
267
+ if not force and not self.should_update():
268
+ logger.debug("Patterns are up to date (cached)")
269
+ return False
270
+
271
+ logger.info("Updating AST-GREP patterns...")
272
+
273
+ # Fetch from GitHub (only when cache expired)
274
+ catalog_patterns = self.fetch_catalog_patterns()
275
+
276
+ if catalog_patterns:
277
+ # Merge with custom patterns
278
+ merged_patterns = self.merge_with_custom_patterns(catalog_patterns)
279
+
280
+ # Save updated registry
281
+ self.save_registry(merged_patterns)
282
+
283
+ # Update cache timestamp
284
+ self.update_cache()
285
+
286
+ logger.info(f"✅ Pattern update complete: {self.stats['total_patterns']} patterns")
287
+ return True
288
+ else:
289
+ logger.info("Using existing patterns (GitHub unavailable)")
290
+ return False
291
+
292
+
293
+ def check_and_update_patterns(force: bool = False) -> bool:
294
+ """
295
+ Quick pattern update check - MANDATORY but FAST.
296
+ Called on every import, uses 24-hour cache.
297
+ """
298
+ updater = PatternUpdater()
299
+ return updater.update_patterns(force=force)
300
+
301
+
302
+ def install_time_update():
303
+ """Run during package installation - forces update."""
304
+ logger.info("Installing AST-GREP patterns...")
305
+ updater = PatternUpdater()
306
+ updater.update_patterns(force=True)
307
+
308
+
309
+ if __name__ == "__main__":
310
+ import sys
311
+
312
+ # Allow --force flag for manual updates
313
+ force = "--force" in sys.argv
314
+
315
+ if force:
316
+ print("Forcing pattern update from GitHub...")
317
+ else:
318
+ print("Checking for pattern updates (24-hour cache)...")
319
+
320
+ success = check_and_update_patterns(force=force)
321
+
322
+ if success:
323
+ print("✅ Patterns updated successfully")
324
+ else:
325
+ print("✅ Patterns are up to date")
326
+
327
+ # Show stats
328
+ if REGISTRY_FILE.exists():
329
+ with open(REGISTRY_FILE, 'r') as f:
330
+ registry = json.load(f)
331
+ stats = registry.get('stats', {})
332
+ print(f" Total patterns: {stats.get('total_patterns', 0)}")
333
+ print(f" Languages: {', '.join(stats.get('languages', []))}")
334
+ print(f" Last update: {stats.get('last_update', 'Unknown')}")
@@ -0,0 +1,39 @@
1
+ """Shared utilities for claude-self-reflect MCP server and scripts."""
2
+
3
+ from pathlib import Path
4
+
5
+
6
+ def normalize_project_name(project_path: str, _depth: int = 0) -> str:
7
+ """
8
+ Simplified project name normalization for consistent hashing.
9
+
10
+ Examples:
11
+ '/Users/name/.claude/projects/-Users-name-projects-myproject' -> 'myproject'
12
+ '-Users-name-projects-myproject' -> 'myproject'
13
+ '/path/to/myproject' -> 'myproject'
14
+ 'myproject' -> 'myproject'
15
+
16
+ Args:
17
+ project_path: Project path or name in any format
18
+ _depth: Internal recursion depth counter (for backwards compatibility)
19
+
20
+ Returns:
21
+ Normalized project name suitable for consistent hashing
22
+ """
23
+ if not project_path:
24
+ return ""
25
+
26
+ path = Path(project_path.rstrip('/'))
27
+
28
+ # Extract the final directory name
29
+ final_component = path.name
30
+
31
+ # If it's Claude's dash-separated format, extract project name
32
+ if final_component.startswith('-') and 'projects' in final_component:
33
+ # Find the last occurrence of 'projects-' to handle edge cases
34
+ idx = final_component.rfind('projects-')
35
+ if idx != -1:
36
+ return final_component[idx + len('projects-'):]
37
+
38
+ # For regular paths, just return the directory name
39
+ return final_component if final_component else path.parent.name