claude-self-reflect 3.3.1 → 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/claude-self-reflect-test.md +107 -8
- package/.claude/agents/quality-fixer.md +314 -0
- package/.claude/agents/reflection-specialist.md +40 -1
- package/mcp-server/run-mcp.sh +20 -6
- package/mcp-server/src/code_reload_tool.py +271 -0
- package/mcp-server/src/embedding_manager.py +60 -26
- package/mcp-server/src/enhanced_tool_registry.py +407 -0
- package/mcp-server/src/mode_switch_tool.py +181 -0
- package/mcp-server/src/parallel_search.py +8 -3
- package/mcp-server/src/project_resolver.py +20 -2
- package/mcp-server/src/reflection_tools.py +50 -8
- package/mcp-server/src/rich_formatting.py +103 -0
- package/mcp-server/src/search_tools.py +90 -37
- package/mcp-server/src/security_patches.py +555 -0
- package/mcp-server/src/server.py +318 -240
- package/mcp-server/src/status.py +13 -8
- package/mcp-server/src/test_quality.py +153 -0
- package/package.json +1 -1
- package/scripts/ast_grep_final_analyzer.py +5 -2
- package/scripts/ast_grep_unified_registry.py +170 -16
- package/scripts/csr-status +190 -45
- package/scripts/import-conversations-unified.py +10 -5
- package/scripts/session_quality_tracker.py +221 -41
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
"""Runtime code reloading tool for MCP server development."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
import importlib
|
|
6
|
+
import logging
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Dict, List, Optional, Literal
|
|
9
|
+
from fastmcp import Context
|
|
10
|
+
from pydantic import Field
|
|
11
|
+
import hashlib
|
|
12
|
+
import json
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class CodeReloader:
|
|
18
|
+
"""Handles runtime code reloading for the MCP server."""
|
|
19
|
+
|
|
20
|
+
def __init__(self):
|
|
21
|
+
"""Initialize the code reloader."""
|
|
22
|
+
self.module_hashes: Dict[str, str] = {}
|
|
23
|
+
self.reload_history: List[Dict] = []
|
|
24
|
+
self.cache_dir = Path.home() / '.claude-self-reflect' / 'reload_cache'
|
|
25
|
+
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
|
26
|
+
# Test comment: Hot reload test at 2025-09-15
|
|
27
|
+
logger.info("CodeReloader initialized with hot reload support")
|
|
28
|
+
|
|
29
|
+
def _get_file_hash(self, filepath: Path) -> str:
|
|
30
|
+
"""Get SHA256 hash of a file."""
|
|
31
|
+
with open(filepath, 'rb') as f:
|
|
32
|
+
return hashlib.sha256(f.read()).hexdigest()
|
|
33
|
+
|
|
34
|
+
def _get_changed_modules(self) -> List[str]:
|
|
35
|
+
"""Detect which modules have changed since last check."""
|
|
36
|
+
changed = []
|
|
37
|
+
src_dir = Path(__file__).parent
|
|
38
|
+
|
|
39
|
+
for py_file in src_dir.glob("*.py"):
|
|
40
|
+
if py_file.name == "__pycache__":
|
|
41
|
+
continue
|
|
42
|
+
|
|
43
|
+
module_name = f"src.{py_file.stem}"
|
|
44
|
+
current_hash = self._get_file_hash(py_file)
|
|
45
|
+
|
|
46
|
+
if module_name in self.module_hashes:
|
|
47
|
+
if self.module_hashes[module_name] != current_hash:
|
|
48
|
+
changed.append(module_name)
|
|
49
|
+
|
|
50
|
+
self.module_hashes[module_name] = current_hash
|
|
51
|
+
|
|
52
|
+
return changed
|
|
53
|
+
|
|
54
|
+
async def reload_modules(
|
|
55
|
+
self,
|
|
56
|
+
ctx: Context,
|
|
57
|
+
modules: Optional[List[str]] = None,
|
|
58
|
+
auto_detect: bool = True
|
|
59
|
+
) -> str:
|
|
60
|
+
"""Reload Python modules at runtime without restarting the MCP server."""
|
|
61
|
+
|
|
62
|
+
await ctx.debug("Starting code reload process...")
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
# Track what we're reloading
|
|
66
|
+
reload_targets = []
|
|
67
|
+
|
|
68
|
+
if auto_detect:
|
|
69
|
+
# Detect changed modules
|
|
70
|
+
changed = self._get_changed_modules()
|
|
71
|
+
if changed:
|
|
72
|
+
reload_targets.extend(changed)
|
|
73
|
+
await ctx.debug(f"Auto-detected changes in: {changed}")
|
|
74
|
+
|
|
75
|
+
if modules:
|
|
76
|
+
# Add explicitly requested modules
|
|
77
|
+
reload_targets.extend(modules)
|
|
78
|
+
|
|
79
|
+
if not reload_targets:
|
|
80
|
+
return "📊 No modules to reload. All code is up to date!"
|
|
81
|
+
|
|
82
|
+
# Perform the reload
|
|
83
|
+
reloaded = []
|
|
84
|
+
failed = []
|
|
85
|
+
|
|
86
|
+
for module_name in reload_targets:
|
|
87
|
+
try:
|
|
88
|
+
# SECURITY FIX: Validate module is in whitelist
|
|
89
|
+
from .security_patches import ModuleWhitelist
|
|
90
|
+
if not ModuleWhitelist.is_allowed_module(module_name):
|
|
91
|
+
logger.warning(f"Module not in whitelist, skipping: {module_name}")
|
|
92
|
+
failed.append((module_name, "Module not in whitelist"))
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
if module_name in sys.modules:
|
|
96
|
+
# Store old module reference for rollback
|
|
97
|
+
old_module = sys.modules[module_name]
|
|
98
|
+
|
|
99
|
+
# Reload the module
|
|
100
|
+
logger.info(f"Reloading module: {module_name}")
|
|
101
|
+
reloaded_module = importlib.reload(sys.modules[module_name])
|
|
102
|
+
|
|
103
|
+
# Update any global references if needed
|
|
104
|
+
self._update_global_references(module_name, reloaded_module)
|
|
105
|
+
|
|
106
|
+
reloaded.append(module_name)
|
|
107
|
+
await ctx.debug(f"✅ Reloaded: {module_name}")
|
|
108
|
+
else:
|
|
109
|
+
# Module not loaded yet, import it
|
|
110
|
+
importlib.import_module(module_name)
|
|
111
|
+
reloaded.append(module_name)
|
|
112
|
+
await ctx.debug(f"✅ Imported: {module_name}")
|
|
113
|
+
|
|
114
|
+
except Exception as e:
|
|
115
|
+
logger.error(f"Failed to reload {module_name}: {e}", exc_info=True)
|
|
116
|
+
failed.append((module_name, str(e)))
|
|
117
|
+
await ctx.debug(f"❌ Failed: {module_name} - {e}")
|
|
118
|
+
|
|
119
|
+
# Record reload history
|
|
120
|
+
self.reload_history.append({
|
|
121
|
+
"timestamp": os.environ.get('MCP_REQUEST_ID', 'unknown'),
|
|
122
|
+
"reloaded": reloaded,
|
|
123
|
+
"failed": failed
|
|
124
|
+
})
|
|
125
|
+
|
|
126
|
+
# Build response
|
|
127
|
+
response = "🔄 **Code Reload Results**\n\n"
|
|
128
|
+
|
|
129
|
+
if reloaded:
|
|
130
|
+
response += f"**Successfully Reloaded ({len(reloaded)}):**\n"
|
|
131
|
+
for module in reloaded:
|
|
132
|
+
response += f"- ✅ {module}\n"
|
|
133
|
+
response += "\n"
|
|
134
|
+
|
|
135
|
+
if failed:
|
|
136
|
+
response += f"**Failed to Reload ({len(failed)}):**\n"
|
|
137
|
+
for module, error in failed:
|
|
138
|
+
response += f"- ❌ {module}: {error}\n"
|
|
139
|
+
response += "\n"
|
|
140
|
+
|
|
141
|
+
response += "**Important Notes:**\n"
|
|
142
|
+
response += "- Class instances created before reload keep old code\n"
|
|
143
|
+
response += "- New requests will use the reloaded code\n"
|
|
144
|
+
response += "- Some changes may require full restart (e.g., new tools)\n"
|
|
145
|
+
|
|
146
|
+
return response
|
|
147
|
+
|
|
148
|
+
except Exception as e:
|
|
149
|
+
logger.error(f"Code reload failed: {e}", exc_info=True)
|
|
150
|
+
return f"❌ Code reload failed: {str(e)}"
|
|
151
|
+
|
|
152
|
+
def _update_global_references(self, module_name: str, new_module):
|
|
153
|
+
"""Update global references after module reload."""
|
|
154
|
+
# This is where we'd update any global singleton references
|
|
155
|
+
# For example, if we reload embedding_manager, we might need to
|
|
156
|
+
# update the global embedding manager instance
|
|
157
|
+
|
|
158
|
+
if module_name == "src.embedding_manager":
|
|
159
|
+
# Update the global embedding manager if it exists
|
|
160
|
+
if hasattr(new_module, 'get_embedding_manager'):
|
|
161
|
+
# The singleton pattern should handle this automatically
|
|
162
|
+
pass
|
|
163
|
+
|
|
164
|
+
elif module_name == "src.search_tools":
|
|
165
|
+
# Search tools might need to refresh their references
|
|
166
|
+
pass
|
|
167
|
+
|
|
168
|
+
# Add more specific updates as needed
|
|
169
|
+
|
|
170
|
+
async def get_reload_status(self, ctx: Context) -> str:
|
|
171
|
+
"""Get the current reload status and history."""
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
# Check for changed files
|
|
175
|
+
changed = self._get_changed_modules()
|
|
176
|
+
|
|
177
|
+
response = "📊 **Code Reload Status**\n\n"
|
|
178
|
+
|
|
179
|
+
response += "**Module Status:**\n"
|
|
180
|
+
if changed:
|
|
181
|
+
response += f"⚠️ {len(changed)} modules have pending changes:\n"
|
|
182
|
+
for module in changed:
|
|
183
|
+
response += f" - {module}\n"
|
|
184
|
+
else:
|
|
185
|
+
response += "✅ All modules are up to date\n"
|
|
186
|
+
|
|
187
|
+
response += f"\n**Tracked Modules:** {len(self.module_hashes)}\n"
|
|
188
|
+
|
|
189
|
+
if self.reload_history:
|
|
190
|
+
response += f"\n**Recent Reloads:**\n"
|
|
191
|
+
for entry in self.reload_history[-5:]: # Last 5 reloads
|
|
192
|
+
response += f"- {entry['timestamp']}: "
|
|
193
|
+
response += f"{len(entry['reloaded'])} success, "
|
|
194
|
+
response += f"{len(entry['failed'])} failed\n"
|
|
195
|
+
|
|
196
|
+
return response
|
|
197
|
+
|
|
198
|
+
except Exception as e:
|
|
199
|
+
logger.error(f"Failed to get reload status: {e}", exc_info=True)
|
|
200
|
+
return f"❌ Failed to get reload status: {str(e)}"
|
|
201
|
+
|
|
202
|
+
async def clear_python_cache(self, ctx: Context) -> str:
|
|
203
|
+
"""Clear Python's module cache and bytecode."""
|
|
204
|
+
|
|
205
|
+
try:
|
|
206
|
+
await ctx.debug("Clearing Python cache...")
|
|
207
|
+
|
|
208
|
+
# Clear __pycache__ directories
|
|
209
|
+
src_dir = Path(__file__).parent
|
|
210
|
+
pycache_dirs = list(src_dir.rglob("__pycache__"))
|
|
211
|
+
|
|
212
|
+
for pycache in pycache_dirs:
|
|
213
|
+
if pycache.is_dir():
|
|
214
|
+
import shutil
|
|
215
|
+
shutil.rmtree(pycache)
|
|
216
|
+
await ctx.debug(f"Removed: {pycache}")
|
|
217
|
+
|
|
218
|
+
# Clear import cache
|
|
219
|
+
importlib.invalidate_caches()
|
|
220
|
+
|
|
221
|
+
return f"✅ Cleared {len(pycache_dirs)} __pycache__ directories and invalidated import caches"
|
|
222
|
+
|
|
223
|
+
except Exception as e:
|
|
224
|
+
logger.error(f"Failed to clear cache: {e}", exc_info=True)
|
|
225
|
+
return f"❌ Failed to clear cache: {str(e)}"
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def register_code_reload_tool(mcp, get_embedding_manager):
|
|
229
|
+
"""Register the code reloading tool with the MCP server."""
|
|
230
|
+
|
|
231
|
+
reloader = CodeReloader()
|
|
232
|
+
|
|
233
|
+
@mcp.tool()
|
|
234
|
+
async def reload_code(
|
|
235
|
+
ctx: Context,
|
|
236
|
+
modules: Optional[List[str]] = Field(
|
|
237
|
+
default=None,
|
|
238
|
+
description="Specific modules to reload (e.g., ['src.search_tools', 'src.embedding_manager'])"
|
|
239
|
+
),
|
|
240
|
+
auto_detect: bool = Field(
|
|
241
|
+
default=True,
|
|
242
|
+
description="Automatically detect and reload changed modules"
|
|
243
|
+
)
|
|
244
|
+
) -> str:
|
|
245
|
+
"""Reload Python code at runtime without restarting the MCP server.
|
|
246
|
+
|
|
247
|
+
This allows hot-reloading of code changes during development, similar to
|
|
248
|
+
the mode switching capability. Changes take effect for new requests.
|
|
249
|
+
|
|
250
|
+
Note: Some changes (new tools, startup configuration) still require restart.
|
|
251
|
+
"""
|
|
252
|
+
return await reloader.reload_modules(ctx, modules, auto_detect)
|
|
253
|
+
|
|
254
|
+
@mcp.tool()
|
|
255
|
+
async def reload_status(ctx: Context) -> str:
|
|
256
|
+
"""Check which modules have pending changes and reload history.
|
|
257
|
+
|
|
258
|
+
Shows which files have been modified since last reload and
|
|
259
|
+
the history of recent reload operations.
|
|
260
|
+
"""
|
|
261
|
+
return await reloader.get_reload_status(ctx)
|
|
262
|
+
|
|
263
|
+
@mcp.tool()
|
|
264
|
+
async def clear_module_cache(ctx: Context) -> str:
|
|
265
|
+
"""Clear Python's module cache and __pycache__ directories.
|
|
266
|
+
|
|
267
|
+
Useful when reload isn't working due to cached bytecode.
|
|
268
|
+
"""
|
|
269
|
+
return await reloader.clear_python_cache(ctx)
|
|
270
|
+
|
|
271
|
+
logger.info("Code reload tools registered successfully")
|
|
@@ -50,38 +50,60 @@ class EmbeddingManager:
|
|
|
50
50
|
logger.warning(f"Error cleaning locks: {e}")
|
|
51
51
|
|
|
52
52
|
def initialize(self) -> bool:
|
|
53
|
-
"""Initialize
|
|
54
|
-
logger.info("Initializing embedding manager
|
|
53
|
+
"""Initialize embedding models based on configuration."""
|
|
54
|
+
logger.info("Initializing embedding manager...")
|
|
55
55
|
|
|
56
56
|
# Clean up any stale locks first
|
|
57
57
|
self._clean_stale_locks()
|
|
58
58
|
|
|
59
|
-
|
|
60
|
-
local_success = self._try_initialize_local()
|
|
59
|
+
local_success = False
|
|
61
60
|
voyage_success = False
|
|
62
61
|
|
|
63
|
-
|
|
62
|
+
# Only initialize models we actually need
|
|
63
|
+
if not self.prefer_local and self.voyage_key:
|
|
64
|
+
# Cloud mode: Skip local initialization to avoid error messages
|
|
65
|
+
logger.info("Cloud mode requested, skipping local model initialization")
|
|
64
66
|
voyage_success = self._try_initialize_voyage()
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
self.model_type = 'local'
|
|
75
|
-
logger.info("Default model set to LOCAL embeddings (fallback)")
|
|
67
|
+
if voyage_success:
|
|
68
|
+
self.model_type = 'voyage'
|
|
69
|
+
logger.info("Using VOYAGE embeddings (1024 dimensions)")
|
|
70
|
+
else:
|
|
71
|
+
# Fallback to local if voyage fails
|
|
72
|
+
logger.warning("Voyage initialization failed, falling back to local")
|
|
73
|
+
local_success = self._try_initialize_local()
|
|
74
|
+
if local_success:
|
|
75
|
+
self.model_type = 'local'
|
|
76
76
|
else:
|
|
77
|
-
|
|
78
|
-
|
|
77
|
+
# Local mode or mixed mode support
|
|
78
|
+
local_success = self._try_initialize_local()
|
|
79
|
+
|
|
80
|
+
# Only initialize voyage if NOT preferring local
|
|
81
|
+
if self.voyage_key and not self.prefer_local:
|
|
82
|
+
voyage_success = self._try_initialize_voyage()
|
|
83
|
+
|
|
84
|
+
# Set default model type - prefer_local takes priority
|
|
85
|
+
if self.prefer_local and local_success:
|
|
86
|
+
self.model_type = 'local'
|
|
87
|
+
logger.info("Using LOCAL embeddings (384 dimensions) - preferred")
|
|
88
|
+
elif voyage_success:
|
|
89
|
+
self.model_type = 'voyage'
|
|
90
|
+
logger.info("Using VOYAGE embeddings (1024 dimensions)")
|
|
91
|
+
elif local_success:
|
|
92
|
+
self.model_type = 'local'
|
|
93
|
+
logger.info("Using LOCAL embeddings (fallback)")
|
|
94
|
+
else:
|
|
95
|
+
logger.error("Failed to initialize any embedding model")
|
|
96
|
+
return False
|
|
79
97
|
|
|
80
98
|
logger.info(f"Embedding models available - Local: {local_success}, Voyage: {voyage_success}")
|
|
81
99
|
return True
|
|
82
100
|
|
|
83
101
|
def _try_initialize_local(self) -> bool:
|
|
84
102
|
"""Try to initialize local FastEmbed model with timeout and optimizations."""
|
|
103
|
+
return self.try_initialize_local()
|
|
104
|
+
|
|
105
|
+
def try_initialize_local(self) -> bool:
|
|
106
|
+
"""Public method to initialize local FastEmbed model with timeout and optimizations."""
|
|
85
107
|
try:
|
|
86
108
|
logger.info(f"Attempting to load local model: {self.embedding_model}")
|
|
87
109
|
|
|
@@ -137,16 +159,24 @@ class EmbeddingManager:
|
|
|
137
159
|
error = e
|
|
138
160
|
logger.error(f"Failed to initialize local model: {e}")
|
|
139
161
|
|
|
140
|
-
#
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
162
|
+
# SECURITY FIX: Use ThreadPoolExecutor with proper timeout handling
|
|
163
|
+
from concurrent.futures import ThreadPoolExecutor, TimeoutError as FuturesTimeoutError
|
|
164
|
+
|
|
165
|
+
# Create executor and manage lifecycle explicitly to avoid blocking on timeout
|
|
166
|
+
executor = ThreadPoolExecutor(max_workers=1)
|
|
167
|
+
future = executor.submit(init_model)
|
|
168
|
+
try:
|
|
169
|
+
future.result(timeout=self.download_timeout)
|
|
170
|
+
executor.shutdown(wait=True)
|
|
171
|
+
except FuturesTimeoutError:
|
|
147
172
|
logger.error(f"Model initialization timed out after {self.download_timeout}s")
|
|
148
173
|
logger.info("Tip: Set FASTEMBED_SKIP_HUGGINGFACE=true to use alternative download sources")
|
|
149
|
-
#
|
|
174
|
+
# Don't wait for the hung task
|
|
175
|
+
executor.shutdown(wait=False)
|
|
176
|
+
return False
|
|
177
|
+
except Exception as e:
|
|
178
|
+
logger.error(f"Model initialization failed: {e}")
|
|
179
|
+
executor.shutdown(wait=True)
|
|
150
180
|
return False
|
|
151
181
|
|
|
152
182
|
return success
|
|
@@ -160,6 +190,10 @@ class EmbeddingManager:
|
|
|
160
190
|
|
|
161
191
|
def _try_initialize_voyage(self) -> bool:
|
|
162
192
|
"""Try to initialize Voyage AI client."""
|
|
193
|
+
return self.try_initialize_voyage()
|
|
194
|
+
|
|
195
|
+
def try_initialize_voyage(self) -> bool:
|
|
196
|
+
"""Public method to initialize Voyage AI client."""
|
|
163
197
|
try:
|
|
164
198
|
logger.info("Attempting to initialize Voyage AI...")
|
|
165
199
|
import voyageai
|