claude-self-reflect 2.4.6 → 2.4.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/Dockerfile.watcher
CHANGED
|
@@ -23,23 +23,9 @@ RUN useradd -m -u 1000 watcher
|
|
|
23
23
|
# Create scripts directory and copy required files
|
|
24
24
|
RUN mkdir -p /scripts
|
|
25
25
|
|
|
26
|
-
# Copy
|
|
26
|
+
# Copy all necessary scripts
|
|
27
27
|
COPY scripts/import-conversations-unified.py /scripts/
|
|
28
|
-
|
|
29
|
-
# Create a minimal watcher script
|
|
30
|
-
RUN echo '#!/usr/bin/env python3\n\
|
|
31
|
-
import time\n\
|
|
32
|
-
import subprocess\n\
|
|
33
|
-
import os\n\
|
|
34
|
-
\n\
|
|
35
|
-
while True:\n\
|
|
36
|
-
try:\n\
|
|
37
|
-
print("Running import...", flush=True)\n\
|
|
38
|
-
subprocess.run(["/usr/local/bin/python", "/scripts/import-conversations-unified.py"], check=True)\n\
|
|
39
|
-
print("Import complete. Sleeping for 60 seconds...", flush=True)\n\
|
|
40
|
-
except Exception as e:\n\
|
|
41
|
-
print(f"Error: {e}", flush=True)\n\
|
|
42
|
-
time.sleep(60)\n' > /scripts/import-watcher.py
|
|
28
|
+
COPY scripts/import-watcher.py /scripts/
|
|
43
29
|
|
|
44
30
|
RUN chmod +x /scripts/*.py
|
|
45
31
|
|
|
@@ -262,7 +262,10 @@ async function configureClaude() {
|
|
|
262
262
|
|
|
263
263
|
// Create a script that runs the MCP server in Docker
|
|
264
264
|
const scriptContent = `#!/bin/bash
|
|
265
|
-
|
|
265
|
+
# Run the MCP server in the Docker container with stdin attached
|
|
266
|
+
# Using python -u for unbuffered output
|
|
267
|
+
# Using the main module which properly supports local embeddings
|
|
268
|
+
docker exec -i claude-reflection-mcp python -u -m src
|
|
266
269
|
`;
|
|
267
270
|
|
|
268
271
|
await fs.writeFile(mcpScript, scriptContent, { mode: 0o755 });
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
#!/bin/bash
|
|
2
2
|
# Run the MCP server in the Docker container with stdin attached
|
|
3
3
|
# Using python -u for unbuffered output
|
|
4
|
-
# Using
|
|
4
|
+
# Using the main module which properly supports local embeddings
|
|
5
5
|
docker exec -i claude-reflection-mcp python -u -m src
|
package/package.json
CHANGED
|
@@ -1,254 +0,0 @@
|
|
|
1
|
-
"""Claude Reflect MCP Server with Native Qdrant Memory Decay (v2.0.0)."""
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
from pathlib import Path
|
|
5
|
-
from typing import Any, Optional, List, Dict, Union
|
|
6
|
-
from datetime import datetime
|
|
7
|
-
import json
|
|
8
|
-
|
|
9
|
-
from fastmcp import FastMCP, Context
|
|
10
|
-
from pydantic import BaseModel, Field
|
|
11
|
-
from qdrant_client import AsyncQdrantClient, models
|
|
12
|
-
from qdrant_client.models import (
|
|
13
|
-
PointStruct, VectorParams, Distance
|
|
14
|
-
)
|
|
15
|
-
try:
|
|
16
|
-
from qdrant_client.models import (
|
|
17
|
-
Query, Formula, Expression, MultExpression,
|
|
18
|
-
ExpDecayExpression, DecayParamsExpression,
|
|
19
|
-
SearchRequest, NamedQuery
|
|
20
|
-
)
|
|
21
|
-
NATIVE_DECAY_AVAILABLE = True
|
|
22
|
-
except ImportError:
|
|
23
|
-
# Fallback for older qdrant-client versions
|
|
24
|
-
NATIVE_DECAY_AVAILABLE = False
|
|
25
|
-
Query = Formula = Expression = MultExpression = None
|
|
26
|
-
ExpDecayExpression = DecayParamsExpression = None
|
|
27
|
-
SearchRequest = NamedQuery = None
|
|
28
|
-
import voyageai
|
|
29
|
-
from dotenv import load_dotenv
|
|
30
|
-
|
|
31
|
-
# Load environment variables
|
|
32
|
-
env_path = Path(__file__).parent.parent.parent / '.env'
|
|
33
|
-
load_dotenv(env_path)
|
|
34
|
-
|
|
35
|
-
# Configuration
|
|
36
|
-
QDRANT_URL = os.getenv('QDRANT_URL', 'http://localhost:6333')
|
|
37
|
-
VOYAGE_API_KEY = os.getenv('VOYAGE_KEY') or os.getenv('VOYAGE_KEY-2')
|
|
38
|
-
ENABLE_MEMORY_DECAY = os.getenv('ENABLE_MEMORY_DECAY', 'false').lower() == 'true'
|
|
39
|
-
DECAY_WEIGHT = float(os.getenv('DECAY_WEIGHT', '0.3'))
|
|
40
|
-
DECAY_SCALE_DAYS = float(os.getenv('DECAY_SCALE_DAYS', '90'))
|
|
41
|
-
|
|
42
|
-
# Initialize Voyage AI client
|
|
43
|
-
voyage_client = voyageai.Client(api_key=VOYAGE_API_KEY) if VOYAGE_API_KEY else None
|
|
44
|
-
|
|
45
|
-
# Debug environment loading (disabled for production)
|
|
46
|
-
# print(f"[DEBUG] Qdrant Native Decay Server v2.0.0")
|
|
47
|
-
# print(f"[DEBUG] ENABLE_MEMORY_DECAY: {ENABLE_MEMORY_DECAY}")
|
|
48
|
-
# print(f"[DEBUG] DECAY_WEIGHT: {DECAY_WEIGHT}")
|
|
49
|
-
# print(f"[DEBUG] DECAY_SCALE_DAYS: {DECAY_SCALE_DAYS}")
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
class SearchResult(BaseModel):
|
|
53
|
-
"""A single search result."""
|
|
54
|
-
id: str
|
|
55
|
-
score: float
|
|
56
|
-
timestamp: str
|
|
57
|
-
role: str
|
|
58
|
-
excerpt: str
|
|
59
|
-
project_name: str
|
|
60
|
-
conversation_id: Optional[str] = None
|
|
61
|
-
collection_name: str
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
# Initialize FastMCP instance
|
|
65
|
-
mcp = FastMCP(
|
|
66
|
-
name="claude-reflect",
|
|
67
|
-
instructions="Search past conversations and store reflections with time-based memory decay (v2.0.0 - Native Qdrant)"
|
|
68
|
-
)
|
|
69
|
-
|
|
70
|
-
# Create Qdrant client
|
|
71
|
-
qdrant_client = AsyncQdrantClient(url=QDRANT_URL)
|
|
72
|
-
|
|
73
|
-
async def get_voyage_collections() -> List[str]:
|
|
74
|
-
"""Get all Voyage collections."""
|
|
75
|
-
collections = await qdrant_client.get_collections()
|
|
76
|
-
return [c.name for c in collections.collections if c.name.endswith('_voyage')]
|
|
77
|
-
|
|
78
|
-
async def generate_embedding(text: str) -> List[float]:
|
|
79
|
-
"""Generate embedding using Voyage AI."""
|
|
80
|
-
if not voyage_client:
|
|
81
|
-
raise ValueError("Voyage AI API key not configured")
|
|
82
|
-
|
|
83
|
-
result = voyage_client.embed(
|
|
84
|
-
texts=[text],
|
|
85
|
-
model="voyage-3-large",
|
|
86
|
-
input_type="query"
|
|
87
|
-
)
|
|
88
|
-
return result.embeddings[0]
|
|
89
|
-
|
|
90
|
-
# Register tools
|
|
91
|
-
@mcp.tool()
|
|
92
|
-
async def reflect_on_past(
|
|
93
|
-
ctx: Context,
|
|
94
|
-
query: str = Field(description="The search query to find semantically similar conversations"),
|
|
95
|
-
limit: int = Field(default=5, description="Maximum number of results to return"),
|
|
96
|
-
min_score: float = Field(default=0.3, description="Minimum similarity score (0-1)"),
|
|
97
|
-
use_decay: Union[int, str] = Field(default=-1, description="Apply time-based decay: 1=enable, 0=disable, -1=use environment default (accepts int or str)")
|
|
98
|
-
) -> str:
|
|
99
|
-
"""Search for relevant past conversations using semantic search with optional time decay."""
|
|
100
|
-
|
|
101
|
-
# Normalize use_decay to integer
|
|
102
|
-
if isinstance(use_decay, str):
|
|
103
|
-
try:
|
|
104
|
-
use_decay = int(use_decay)
|
|
105
|
-
except ValueError:
|
|
106
|
-
raise ValueError("use_decay must be '1', '0', or '-1'")
|
|
107
|
-
|
|
108
|
-
# Parse decay parameter using integer approach
|
|
109
|
-
should_use_decay = (
|
|
110
|
-
True if use_decay == 1
|
|
111
|
-
else False if use_decay == 0
|
|
112
|
-
else ENABLE_MEMORY_DECAY # -1 or any other value
|
|
113
|
-
)
|
|
114
|
-
|
|
115
|
-
await ctx.debug(f"Searching for: {query}")
|
|
116
|
-
await ctx.debug(f"Decay enabled: {should_use_decay}")
|
|
117
|
-
await ctx.debug(f"Using Qdrant Native Decay (v2.0.0)")
|
|
118
|
-
|
|
119
|
-
try:
|
|
120
|
-
# Generate embedding
|
|
121
|
-
query_embedding = await generate_embedding(query)
|
|
122
|
-
|
|
123
|
-
# Get all Voyage collections
|
|
124
|
-
voyage_collections = await get_voyage_collections()
|
|
125
|
-
if not voyage_collections:
|
|
126
|
-
return "No conversation collections found. Please import conversations first."
|
|
127
|
-
|
|
128
|
-
await ctx.debug(f"Searching across {len(voyage_collections)} collections")
|
|
129
|
-
|
|
130
|
-
all_results = []
|
|
131
|
-
|
|
132
|
-
# Search each collection with native Qdrant decay
|
|
133
|
-
for collection_name in voyage_collections:
|
|
134
|
-
try:
|
|
135
|
-
if should_use_decay and NATIVE_DECAY_AVAILABLE:
|
|
136
|
-
# Build the query with native Qdrant decay formula
|
|
137
|
-
query_obj = Query(
|
|
138
|
-
nearest=query_embedding,
|
|
139
|
-
formula=Formula(
|
|
140
|
-
sum=[
|
|
141
|
-
# Original similarity score
|
|
142
|
-
Expression(variable="score"),
|
|
143
|
-
# Decay boost term
|
|
144
|
-
Expression(
|
|
145
|
-
mult=MultExpression(
|
|
146
|
-
mult=[
|
|
147
|
-
# Decay weight
|
|
148
|
-
Expression(constant=DECAY_WEIGHT),
|
|
149
|
-
# Exponential decay function
|
|
150
|
-
Expression(
|
|
151
|
-
exp_decay=DecayParamsExpression(
|
|
152
|
-
# Use timestamp field for decay
|
|
153
|
-
x=Expression(datetime_key="timestamp"),
|
|
154
|
-
# Decay from current time (server-side)
|
|
155
|
-
target=Expression(datetime="now"),
|
|
156
|
-
# Scale in milliseconds
|
|
157
|
-
scale=DECAY_SCALE_DAYS * 24 * 60 * 60 * 1000,
|
|
158
|
-
# Standard exponential decay midpoint
|
|
159
|
-
midpoint=0.5
|
|
160
|
-
)
|
|
161
|
-
)
|
|
162
|
-
]
|
|
163
|
-
)
|
|
164
|
-
)
|
|
165
|
-
]
|
|
166
|
-
)
|
|
167
|
-
)
|
|
168
|
-
|
|
169
|
-
# Execute query with native decay
|
|
170
|
-
results = await qdrant_client.query_points(
|
|
171
|
-
collection_name=collection_name,
|
|
172
|
-
query=query_obj,
|
|
173
|
-
limit=limit,
|
|
174
|
-
score_threshold=min_score,
|
|
175
|
-
with_payload=True
|
|
176
|
-
)
|
|
177
|
-
|
|
178
|
-
await ctx.debug(f"Native decay search in {collection_name} returned {len(results.points)} results")
|
|
179
|
-
else:
|
|
180
|
-
# Standard search without decay
|
|
181
|
-
results = await qdrant_client.search(
|
|
182
|
-
collection_name=collection_name,
|
|
183
|
-
query_vector=query_embedding,
|
|
184
|
-
limit=limit,
|
|
185
|
-
score_threshold=min_score,
|
|
186
|
-
with_payload=True
|
|
187
|
-
)
|
|
188
|
-
results = models.QueryResponse(points=results)
|
|
189
|
-
|
|
190
|
-
# Process results
|
|
191
|
-
for point in results.points:
|
|
192
|
-
all_results.append(SearchResult(
|
|
193
|
-
id=str(point.id),
|
|
194
|
-
score=point.score,
|
|
195
|
-
timestamp=point.payload.get('timestamp', datetime.now().isoformat()),
|
|
196
|
-
role=point.payload.get('start_role', point.payload.get('role', 'unknown')),
|
|
197
|
-
excerpt=(point.payload.get('text', '')[:500] + '...'),
|
|
198
|
-
project_name=point.payload.get('project', collection_name.replace('conv_', '').replace('_voyage', '')),
|
|
199
|
-
conversation_id=point.payload.get('conversation_id'),
|
|
200
|
-
collection_name=collection_name
|
|
201
|
-
))
|
|
202
|
-
|
|
203
|
-
except Exception as e:
|
|
204
|
-
await ctx.debug(f"Error searching {collection_name}: {str(e)}")
|
|
205
|
-
continue
|
|
206
|
-
|
|
207
|
-
# Sort by score and limit
|
|
208
|
-
all_results.sort(key=lambda x: x.score, reverse=True)
|
|
209
|
-
all_results = all_results[:limit]
|
|
210
|
-
|
|
211
|
-
if not all_results:
|
|
212
|
-
return f"No conversations found matching '{query}'. Try different keywords or check if conversations have been imported."
|
|
213
|
-
|
|
214
|
-
# Format results
|
|
215
|
-
result_text = f"Found {len(all_results)} relevant conversation(s) for '{query}':\n\n"
|
|
216
|
-
for i, result in enumerate(all_results):
|
|
217
|
-
result_text += f"**Result {i+1}** (Score: {result.score:.3f})\n"
|
|
218
|
-
result_text += f"Time: {datetime.fromisoformat(result.timestamp).strftime('%Y-%m-%d %H:%M:%S')}\n"
|
|
219
|
-
result_text += f"Project: {result.project_name}\n"
|
|
220
|
-
result_text += f"Role: {result.role}\n"
|
|
221
|
-
result_text += f"Excerpt: {result.excerpt}\n"
|
|
222
|
-
result_text += "---\n\n"
|
|
223
|
-
|
|
224
|
-
return result_text
|
|
225
|
-
|
|
226
|
-
except Exception as e:
|
|
227
|
-
await ctx.error(f"Search failed: {str(e)}")
|
|
228
|
-
return f"Failed to search conversations: {str(e)}"
|
|
229
|
-
|
|
230
|
-
@mcp.tool()
|
|
231
|
-
async def store_reflection(
|
|
232
|
-
ctx: Context,
|
|
233
|
-
content: str = Field(description="The insight or reflection to store"),
|
|
234
|
-
tags: List[str] = Field(default=[], description="Tags to categorize this reflection")
|
|
235
|
-
) -> str:
|
|
236
|
-
"""Store an important insight or reflection for future reference."""
|
|
237
|
-
|
|
238
|
-
try:
|
|
239
|
-
# TODO: Implement actual storage in a dedicated reflections collection
|
|
240
|
-
# For now, just acknowledge the storage
|
|
241
|
-
tags_str = ', '.join(tags) if tags else 'none'
|
|
242
|
-
return f"Reflection stored successfully with tags: {tags_str}"
|
|
243
|
-
|
|
244
|
-
except Exception as e:
|
|
245
|
-
await ctx.error(f"Store failed: {str(e)}")
|
|
246
|
-
return f"Failed to store reflection: {str(e)}"
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
# Debug output (disabled for production)
|
|
250
|
-
# print(f"[DEBUG] FastMCP server v2.0.0 created with native Qdrant decay")
|
|
251
|
-
|
|
252
|
-
# Run the server when executed as main module
|
|
253
|
-
if __name__ == "__main__":
|
|
254
|
-
mcp.run(transport="stdio", show_banner=False)
|