claude-self-reflect 1.3.5 → 2.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/README.md +138 -0
- package/.claude/agents/docker-orchestrator.md +264 -0
- package/.claude/agents/documentation-writer.md +262 -0
- package/.claude/agents/import-debugger.md +203 -0
- package/.claude/agents/mcp-integration.md +286 -0
- package/.claude/agents/open-source-maintainer.md +150 -0
- package/.claude/agents/performance-tuner.md +276 -0
- package/.claude/agents/qdrant-specialist.md +138 -0
- package/.claude/agents/reflection-specialist.md +361 -0
- package/.claude/agents/search-optimizer.md +307 -0
- package/LICENSE +21 -0
- package/README.md +128 -0
- package/installer/cli.js +122 -0
- package/installer/postinstall.js +13 -0
- package/installer/setup-wizard.js +204 -0
- package/mcp-server/pyproject.toml +27 -0
- package/mcp-server/run-mcp.sh +21 -0
- package/mcp-server/src/__init__.py +1 -0
- package/mcp-server/src/__main__.py +23 -0
- package/mcp-server/src/server.py +316 -0
- package/mcp-server/src/server_v2.py +240 -0
- package/package.json +12 -36
- package/scripts/import-conversations-isolated.py +311 -0
- package/scripts/import-conversations-voyage-streaming.py +377 -0
- package/scripts/import-conversations-voyage.py +428 -0
- package/scripts/import-conversations.py +240 -0
- package/scripts/import-current-conversation.py +38 -0
- package/scripts/import-live-conversation.py +152 -0
- package/scripts/import-openai-enhanced.py +867 -0
- package/scripts/import-recent-only.py +29 -0
- package/scripts/import-single-project.py +278 -0
- package/scripts/import-watcher.py +169 -0
- package/config/claude-desktop-config.json +0 -12
- package/dist/cli.d.ts +0 -3
- package/dist/cli.d.ts.map +0 -1
- package/dist/cli.js +0 -55
- package/dist/cli.js.map +0 -1
- package/dist/embeddings-gemini.d.ts +0 -76
- package/dist/embeddings-gemini.d.ts.map +0 -1
- package/dist/embeddings-gemini.js +0 -158
- package/dist/embeddings-gemini.js.map +0 -1
- package/dist/embeddings.d.ts +0 -67
- package/dist/embeddings.d.ts.map +0 -1
- package/dist/embeddings.js +0 -252
- package/dist/embeddings.js.map +0 -1
- package/dist/index.d.ts +0 -3
- package/dist/index.d.ts.map +0 -1
- package/dist/index.js +0 -439
- package/dist/index.js.map +0 -1
- package/dist/project-isolation.d.ts +0 -29
- package/dist/project-isolation.d.ts.map +0 -1
- package/dist/project-isolation.js +0 -78
- package/dist/project-isolation.js.map +0 -1
- package/scripts/install-agent.js +0 -70
- package/scripts/setup-wizard.js +0 -596
- package/src/cli.ts +0 -56
- package/src/embeddings-gemini.ts +0 -176
- package/src/embeddings.ts +0 -296
- package/src/index.ts +0 -513
- package/src/project-isolation.ts +0 -93
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
"""Claude Reflect MCP Server with Memory Decay."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any, Optional, List, Dict, Union
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
import json
|
|
8
|
+
import numpy as np
|
|
9
|
+
|
|
10
|
+
from fastmcp import FastMCP, Context
|
|
11
|
+
from pydantic import BaseModel, Field
|
|
12
|
+
from qdrant_client import AsyncQdrantClient, models
|
|
13
|
+
from qdrant_client.models import (
|
|
14
|
+
PointStruct, VectorParams, Distance,
|
|
15
|
+
FormulaQuery, DecayParamsExpression, SumExpression,
|
|
16
|
+
DatetimeExpression, DatetimeKeyExpression
|
|
17
|
+
)
|
|
18
|
+
import voyageai
|
|
19
|
+
from dotenv import load_dotenv
|
|
20
|
+
|
|
21
|
+
# Load environment variables
|
|
22
|
+
env_path = Path(__file__).parent.parent.parent / '.env'
|
|
23
|
+
load_dotenv(env_path)
|
|
24
|
+
|
|
25
|
+
# Configuration
|
|
26
|
+
QDRANT_URL = os.getenv('QDRANT_URL', 'http://localhost:6333')
|
|
27
|
+
VOYAGE_API_KEY = os.getenv('VOYAGE_KEY') or os.getenv('VOYAGE_KEY-2')
|
|
28
|
+
ENABLE_MEMORY_DECAY = os.getenv('ENABLE_MEMORY_DECAY', 'false').lower() == 'true'
|
|
29
|
+
DECAY_WEIGHT = float(os.getenv('DECAY_WEIGHT', '0.3'))
|
|
30
|
+
DECAY_SCALE_DAYS = float(os.getenv('DECAY_SCALE_DAYS', '90'))
|
|
31
|
+
USE_NATIVE_DECAY = os.getenv('USE_NATIVE_DECAY', 'false').lower() == 'true'
|
|
32
|
+
|
|
33
|
+
# Initialize Voyage AI client
|
|
34
|
+
voyage_client = voyageai.Client(api_key=VOYAGE_API_KEY) if VOYAGE_API_KEY else None
|
|
35
|
+
|
|
36
|
+
# Debug environment loading
|
|
37
|
+
print(f"[DEBUG] Environment variables loaded:")
|
|
38
|
+
print(f"[DEBUG] ENABLE_MEMORY_DECAY: {ENABLE_MEMORY_DECAY}")
|
|
39
|
+
print(f"[DEBUG] USE_NATIVE_DECAY: {USE_NATIVE_DECAY}")
|
|
40
|
+
print(f"[DEBUG] DECAY_WEIGHT: {DECAY_WEIGHT}")
|
|
41
|
+
print(f"[DEBUG] DECAY_SCALE_DAYS: {DECAY_SCALE_DAYS}")
|
|
42
|
+
print(f"[DEBUG] env_path: {env_path}")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class SearchResult(BaseModel):
|
|
46
|
+
"""A single search result."""
|
|
47
|
+
id: str
|
|
48
|
+
score: float
|
|
49
|
+
timestamp: str
|
|
50
|
+
role: str
|
|
51
|
+
excerpt: str
|
|
52
|
+
project_name: str
|
|
53
|
+
conversation_id: Optional[str] = None
|
|
54
|
+
collection_name: str
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
# Initialize FastMCP instance
|
|
58
|
+
mcp = FastMCP(
|
|
59
|
+
name="claude-self-reflect",
|
|
60
|
+
instructions="Search past conversations and store reflections with time-based memory decay"
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# Create Qdrant client
|
|
64
|
+
qdrant_client = AsyncQdrantClient(url=QDRANT_URL)
|
|
65
|
+
|
|
66
|
+
async def get_voyage_collections() -> List[str]:
|
|
67
|
+
"""Get all Voyage collections."""
|
|
68
|
+
collections = await qdrant_client.get_collections()
|
|
69
|
+
return [c.name for c in collections.collections if c.name.endswith('_voyage')]
|
|
70
|
+
|
|
71
|
+
async def generate_embedding(text: str) -> List[float]:
|
|
72
|
+
"""Generate embedding using Voyage AI."""
|
|
73
|
+
if not voyage_client:
|
|
74
|
+
raise ValueError("Voyage AI API key not configured")
|
|
75
|
+
|
|
76
|
+
result = voyage_client.embed(
|
|
77
|
+
texts=[text],
|
|
78
|
+
model="voyage-3-large",
|
|
79
|
+
input_type="query"
|
|
80
|
+
)
|
|
81
|
+
return result.embeddings[0]
|
|
82
|
+
|
|
83
|
+
# Register tools
|
|
84
|
+
@mcp.tool()
|
|
85
|
+
async def reflect_on_past(
|
|
86
|
+
ctx: Context,
|
|
87
|
+
query: str = Field(description="The search query to find semantically similar conversations"),
|
|
88
|
+
limit: int = Field(default=5, description="Maximum number of results to return"),
|
|
89
|
+
min_score: float = Field(default=0.7, description="Minimum similarity score (0-1)"),
|
|
90
|
+
use_decay: Union[int, str] = Field(default=-1, description="Apply time-based decay: 1=enable, 0=disable, -1=use environment default (accepts int or str)")
|
|
91
|
+
) -> str:
|
|
92
|
+
"""Search for relevant past conversations using semantic search with optional time decay."""
|
|
93
|
+
|
|
94
|
+
# Normalize use_decay to integer
|
|
95
|
+
if isinstance(use_decay, str):
|
|
96
|
+
try:
|
|
97
|
+
use_decay = int(use_decay)
|
|
98
|
+
except ValueError:
|
|
99
|
+
raise ValueError("use_decay must be '1', '0', or '-1'")
|
|
100
|
+
|
|
101
|
+
# Parse decay parameter using integer approach
|
|
102
|
+
should_use_decay = (
|
|
103
|
+
True if use_decay == 1
|
|
104
|
+
else False if use_decay == 0
|
|
105
|
+
else ENABLE_MEMORY_DECAY # -1 or any other value
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
await ctx.debug(f"Searching for: {query}")
|
|
109
|
+
await ctx.debug(f"Decay enabled: {should_use_decay}")
|
|
110
|
+
await ctx.debug(f"Native decay mode: {USE_NATIVE_DECAY}")
|
|
111
|
+
await ctx.debug(f"ENABLE_MEMORY_DECAY env: {ENABLE_MEMORY_DECAY}")
|
|
112
|
+
await ctx.debug(f"DECAY_WEIGHT: {DECAY_WEIGHT}, DECAY_SCALE_DAYS: {DECAY_SCALE_DAYS}")
|
|
113
|
+
|
|
114
|
+
try:
|
|
115
|
+
# Generate embedding
|
|
116
|
+
query_embedding = await generate_embedding(query)
|
|
117
|
+
|
|
118
|
+
# Get all Voyage collections
|
|
119
|
+
voyage_collections = await get_voyage_collections()
|
|
120
|
+
if not voyage_collections:
|
|
121
|
+
return "No conversation collections found. Please import conversations first."
|
|
122
|
+
|
|
123
|
+
await ctx.debug(f"Searching across {len(voyage_collections)} collections")
|
|
124
|
+
|
|
125
|
+
all_results = []
|
|
126
|
+
|
|
127
|
+
# Search each collection
|
|
128
|
+
for collection_name in voyage_collections:
|
|
129
|
+
try:
|
|
130
|
+
if should_use_decay and USE_NATIVE_DECAY:
|
|
131
|
+
# Use native Qdrant decay
|
|
132
|
+
await ctx.debug(f"Using NATIVE Qdrant decay for {collection_name}")
|
|
133
|
+
|
|
134
|
+
# Build the query with native Qdrant decay formula
|
|
135
|
+
query_obj = FormulaQuery(
|
|
136
|
+
nearest=query_embedding,
|
|
137
|
+
formula=SumExpression(
|
|
138
|
+
sum=[
|
|
139
|
+
# Original similarity score
|
|
140
|
+
'score', # Variable expression can be a string
|
|
141
|
+
# Decay boost term
|
|
142
|
+
{
|
|
143
|
+
'mult': [
|
|
144
|
+
# Decay weight (constant as float)
|
|
145
|
+
DECAY_WEIGHT,
|
|
146
|
+
# Exponential decay function
|
|
147
|
+
{
|
|
148
|
+
'exp_decay': DecayParamsExpression(
|
|
149
|
+
# Use timestamp field for decay
|
|
150
|
+
x=DatetimeKeyExpression(datetime_key='timestamp'),
|
|
151
|
+
# Decay from current time (server-side)
|
|
152
|
+
target=DatetimeExpression(datetime='now'),
|
|
153
|
+
# Scale in milliseconds
|
|
154
|
+
scale=DECAY_SCALE_DAYS * 24 * 60 * 60 * 1000,
|
|
155
|
+
# Standard exponential decay midpoint
|
|
156
|
+
midpoint=0.5
|
|
157
|
+
)
|
|
158
|
+
}
|
|
159
|
+
]
|
|
160
|
+
}
|
|
161
|
+
]
|
|
162
|
+
)
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# Execute query with native decay
|
|
166
|
+
results = await qdrant_client.query_points(
|
|
167
|
+
collection_name=collection_name,
|
|
168
|
+
query=query_obj,
|
|
169
|
+
limit=limit,
|
|
170
|
+
score_threshold=min_score,
|
|
171
|
+
with_payload=True
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Process results from native decay search
|
|
175
|
+
for point in results.points:
|
|
176
|
+
all_results.append(SearchResult(
|
|
177
|
+
id=str(point.id),
|
|
178
|
+
score=point.score, # Score already includes decay
|
|
179
|
+
timestamp=point.payload.get('timestamp', datetime.now().isoformat()),
|
|
180
|
+
role=point.payload.get('start_role', point.payload.get('role', 'unknown')),
|
|
181
|
+
excerpt=(point.payload.get('text', '')[:500] + '...'),
|
|
182
|
+
project_name=point.payload.get('project', collection_name.replace('conv_', '').replace('_voyage', '')),
|
|
183
|
+
conversation_id=point.payload.get('conversation_id'),
|
|
184
|
+
collection_name=collection_name
|
|
185
|
+
))
|
|
186
|
+
|
|
187
|
+
elif should_use_decay:
|
|
188
|
+
# Use client-side decay (existing implementation)
|
|
189
|
+
await ctx.debug(f"Using CLIENT-SIDE decay for {collection_name}")
|
|
190
|
+
|
|
191
|
+
# Search without score threshold to get all candidates
|
|
192
|
+
results = await qdrant_client.search(
|
|
193
|
+
collection_name=collection_name,
|
|
194
|
+
query_vector=query_embedding,
|
|
195
|
+
limit=limit * 3, # Get more candidates for decay filtering
|
|
196
|
+
with_payload=True
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
# Apply decay scoring manually
|
|
200
|
+
now = datetime.now()
|
|
201
|
+
scale_ms = DECAY_SCALE_DAYS * 24 * 60 * 60 * 1000
|
|
202
|
+
|
|
203
|
+
decay_results = []
|
|
204
|
+
for point in results:
|
|
205
|
+
try:
|
|
206
|
+
# Get timestamp from payload
|
|
207
|
+
timestamp_str = point.payload.get('timestamp')
|
|
208
|
+
if timestamp_str:
|
|
209
|
+
timestamp = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
|
|
210
|
+
age_ms = (now - timestamp).total_seconds() * 1000
|
|
211
|
+
|
|
212
|
+
# Calculate decay factor
|
|
213
|
+
decay_factor = np.exp(-age_ms / scale_ms)
|
|
214
|
+
|
|
215
|
+
# Apply decay formula
|
|
216
|
+
adjusted_score = point.score + (DECAY_WEIGHT * decay_factor)
|
|
217
|
+
|
|
218
|
+
# Debug: show the calculation
|
|
219
|
+
age_days = age_ms / (24 * 60 * 60 * 1000)
|
|
220
|
+
await ctx.debug(f"Point: age={age_days:.1f} days, original_score={point.score:.3f}, decay_factor={decay_factor:.3f}, adjusted_score={adjusted_score:.3f}")
|
|
221
|
+
else:
|
|
222
|
+
adjusted_score = point.score
|
|
223
|
+
|
|
224
|
+
# Only include if above min_score after decay
|
|
225
|
+
if adjusted_score >= min_score:
|
|
226
|
+
decay_results.append((adjusted_score, point))
|
|
227
|
+
|
|
228
|
+
except Exception as e:
|
|
229
|
+
await ctx.debug(f"Error applying decay to point: {e}")
|
|
230
|
+
decay_results.append((point.score, point))
|
|
231
|
+
|
|
232
|
+
# Sort by adjusted score and take top results
|
|
233
|
+
decay_results.sort(key=lambda x: x[0], reverse=True)
|
|
234
|
+
|
|
235
|
+
# Convert to SearchResult format
|
|
236
|
+
for adjusted_score, point in decay_results[:limit]:
|
|
237
|
+
all_results.append(SearchResult(
|
|
238
|
+
id=str(point.id),
|
|
239
|
+
score=adjusted_score, # Use adjusted score
|
|
240
|
+
timestamp=point.payload.get('timestamp', datetime.now().isoformat()),
|
|
241
|
+
role=point.payload.get('start_role', point.payload.get('role', 'unknown')),
|
|
242
|
+
excerpt=(point.payload.get('text', '')[:500] + '...'),
|
|
243
|
+
project_name=point.payload.get('project', collection_name.replace('conv_', '').replace('_voyage', '')),
|
|
244
|
+
conversation_id=point.payload.get('conversation_id'),
|
|
245
|
+
collection_name=collection_name
|
|
246
|
+
))
|
|
247
|
+
else:
|
|
248
|
+
# Standard search without decay
|
|
249
|
+
results = await qdrant_client.search(
|
|
250
|
+
collection_name=collection_name,
|
|
251
|
+
query_vector=query_embedding,
|
|
252
|
+
limit=limit,
|
|
253
|
+
score_threshold=min_score,
|
|
254
|
+
with_payload=True
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
for point in results:
|
|
258
|
+
all_results.append(SearchResult(
|
|
259
|
+
id=str(point.id),
|
|
260
|
+
score=point.score,
|
|
261
|
+
timestamp=point.payload.get('timestamp', datetime.now().isoformat()),
|
|
262
|
+
role=point.payload.get('start_role', point.payload.get('role', 'unknown')),
|
|
263
|
+
excerpt=(point.payload.get('text', '')[:500] + '...'),
|
|
264
|
+
project_name=point.payload.get('project', collection_name.replace('conv_', '').replace('_voyage', '')),
|
|
265
|
+
conversation_id=point.payload.get('conversation_id'),
|
|
266
|
+
collection_name=collection_name
|
|
267
|
+
))
|
|
268
|
+
|
|
269
|
+
except Exception as e:
|
|
270
|
+
await ctx.debug(f"Error searching {collection_name}: {str(e)}")
|
|
271
|
+
continue
|
|
272
|
+
|
|
273
|
+
# Sort by score and limit
|
|
274
|
+
all_results.sort(key=lambda x: x.score, reverse=True)
|
|
275
|
+
all_results = all_results[:limit]
|
|
276
|
+
|
|
277
|
+
if not all_results:
|
|
278
|
+
return f"No conversations found matching '{query}'. Try different keywords or check if conversations have been imported."
|
|
279
|
+
|
|
280
|
+
# Format results
|
|
281
|
+
result_text = f"Found {len(all_results)} relevant conversation(s) for '{query}':\n\n"
|
|
282
|
+
for i, result in enumerate(all_results):
|
|
283
|
+
result_text += f"**Result {i+1}** (Score: {result.score:.3f})\n"
|
|
284
|
+
result_text += f"Time: {datetime.fromisoformat(result.timestamp).strftime('%Y-%m-%d %H:%M:%S')}\n"
|
|
285
|
+
result_text += f"Project: {result.project_name}\n"
|
|
286
|
+
result_text += f"Role: {result.role}\n"
|
|
287
|
+
result_text += f"Excerpt: {result.excerpt}\n"
|
|
288
|
+
result_text += "---\n\n"
|
|
289
|
+
|
|
290
|
+
return result_text
|
|
291
|
+
|
|
292
|
+
except Exception as e:
|
|
293
|
+
await ctx.error(f"Search failed: {str(e)}")
|
|
294
|
+
return f"Failed to search conversations: {str(e)}"
|
|
295
|
+
|
|
296
|
+
@mcp.tool()
|
|
297
|
+
async def store_reflection(
|
|
298
|
+
ctx: Context,
|
|
299
|
+
content: str = Field(description="The insight or reflection to store"),
|
|
300
|
+
tags: List[str] = Field(default=[], description="Tags to categorize this reflection")
|
|
301
|
+
) -> str:
|
|
302
|
+
"""Store an important insight or reflection for future reference."""
|
|
303
|
+
|
|
304
|
+
try:
|
|
305
|
+
# TODO: Implement actual storage in a dedicated reflections collection
|
|
306
|
+
# For now, just acknowledge the storage
|
|
307
|
+
tags_str = ', '.join(tags) if tags else 'none'
|
|
308
|
+
return f"Reflection stored successfully with tags: {tags_str}"
|
|
309
|
+
|
|
310
|
+
except Exception as e:
|
|
311
|
+
await ctx.error(f"Store failed: {str(e)}")
|
|
312
|
+
return f"Failed to store reflection: {str(e)}"
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
# Debug output
|
|
316
|
+
print(f"[DEBUG] FastMCP server created with name: {mcp.name}")
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
"""Claude Reflect MCP Server with Native Qdrant Memory Decay (v2.0.0)."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any, Optional, List, Dict, Union
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
from fastmcp import FastMCP, Context
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
from qdrant_client import AsyncQdrantClient, models
|
|
12
|
+
from qdrant_client.models import (
|
|
13
|
+
PointStruct, VectorParams, Distance,
|
|
14
|
+
Query, Formula, Expression, MultExpression,
|
|
15
|
+
ExpDecayExpression, DecayParamsExpression,
|
|
16
|
+
SearchRequest, NamedQuery
|
|
17
|
+
)
|
|
18
|
+
import voyageai
|
|
19
|
+
from dotenv import load_dotenv
|
|
20
|
+
|
|
21
|
+
# Load environment variables
|
|
22
|
+
env_path = Path(__file__).parent.parent.parent / '.env'
|
|
23
|
+
load_dotenv(env_path)
|
|
24
|
+
|
|
25
|
+
# Configuration
|
|
26
|
+
QDRANT_URL = os.getenv('QDRANT_URL', 'http://localhost:6333')
|
|
27
|
+
VOYAGE_API_KEY = os.getenv('VOYAGE_KEY') or os.getenv('VOYAGE_KEY-2')
|
|
28
|
+
ENABLE_MEMORY_DECAY = os.getenv('ENABLE_MEMORY_DECAY', 'false').lower() == 'true'
|
|
29
|
+
DECAY_WEIGHT = float(os.getenv('DECAY_WEIGHT', '0.3'))
|
|
30
|
+
DECAY_SCALE_DAYS = float(os.getenv('DECAY_SCALE_DAYS', '90'))
|
|
31
|
+
|
|
32
|
+
# Initialize Voyage AI client
|
|
33
|
+
voyage_client = voyageai.Client(api_key=VOYAGE_API_KEY) if VOYAGE_API_KEY else None
|
|
34
|
+
|
|
35
|
+
# Debug environment loading
|
|
36
|
+
print(f"[DEBUG] Qdrant Native Decay Server v2.0.0")
|
|
37
|
+
print(f"[DEBUG] ENABLE_MEMORY_DECAY: {ENABLE_MEMORY_DECAY}")
|
|
38
|
+
print(f"[DEBUG] DECAY_WEIGHT: {DECAY_WEIGHT}")
|
|
39
|
+
print(f"[DEBUG] DECAY_SCALE_DAYS: {DECAY_SCALE_DAYS}")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class SearchResult(BaseModel):
|
|
43
|
+
"""A single search result."""
|
|
44
|
+
id: str
|
|
45
|
+
score: float
|
|
46
|
+
timestamp: str
|
|
47
|
+
role: str
|
|
48
|
+
excerpt: str
|
|
49
|
+
project_name: str
|
|
50
|
+
conversation_id: Optional[str] = None
|
|
51
|
+
collection_name: str
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
# Initialize FastMCP instance
|
|
55
|
+
mcp = FastMCP(
|
|
56
|
+
name="claude-reflect",
|
|
57
|
+
instructions="Search past conversations and store reflections with time-based memory decay (v2.0.0 - Native Qdrant)"
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Create Qdrant client
|
|
61
|
+
qdrant_client = AsyncQdrantClient(url=QDRANT_URL)
|
|
62
|
+
|
|
63
|
+
async def get_voyage_collections() -> List[str]:
|
|
64
|
+
"""Get all Voyage collections."""
|
|
65
|
+
collections = await qdrant_client.get_collections()
|
|
66
|
+
return [c.name for c in collections.collections if c.name.endswith('_voyage')]
|
|
67
|
+
|
|
68
|
+
async def generate_embedding(text: str) -> List[float]:
|
|
69
|
+
"""Generate embedding using Voyage AI."""
|
|
70
|
+
if not voyage_client:
|
|
71
|
+
raise ValueError("Voyage AI API key not configured")
|
|
72
|
+
|
|
73
|
+
result = voyage_client.embed(
|
|
74
|
+
texts=[text],
|
|
75
|
+
model="voyage-3-large",
|
|
76
|
+
input_type="query"
|
|
77
|
+
)
|
|
78
|
+
return result.embeddings[0]
|
|
79
|
+
|
|
80
|
+
# Register tools
|
|
81
|
+
@mcp.tool()
|
|
82
|
+
async def reflect_on_past(
|
|
83
|
+
ctx: Context,
|
|
84
|
+
query: str = Field(description="The search query to find semantically similar conversations"),
|
|
85
|
+
limit: int = Field(default=5, description="Maximum number of results to return"),
|
|
86
|
+
min_score: float = Field(default=0.7, description="Minimum similarity score (0-1)"),
|
|
87
|
+
use_decay: Union[int, str] = Field(default=-1, description="Apply time-based decay: 1=enable, 0=disable, -1=use environment default (accepts int or str)")
|
|
88
|
+
) -> str:
|
|
89
|
+
"""Search for relevant past conversations using semantic search with optional time decay."""
|
|
90
|
+
|
|
91
|
+
# Normalize use_decay to integer
|
|
92
|
+
if isinstance(use_decay, str):
|
|
93
|
+
try:
|
|
94
|
+
use_decay = int(use_decay)
|
|
95
|
+
except ValueError:
|
|
96
|
+
raise ValueError("use_decay must be '1', '0', or '-1'")
|
|
97
|
+
|
|
98
|
+
# Parse decay parameter using integer approach
|
|
99
|
+
should_use_decay = (
|
|
100
|
+
True if use_decay == 1
|
|
101
|
+
else False if use_decay == 0
|
|
102
|
+
else ENABLE_MEMORY_DECAY # -1 or any other value
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
await ctx.debug(f"Searching for: {query}")
|
|
106
|
+
await ctx.debug(f"Decay enabled: {should_use_decay}")
|
|
107
|
+
await ctx.debug(f"Using Qdrant Native Decay (v2.0.0)")
|
|
108
|
+
|
|
109
|
+
try:
|
|
110
|
+
# Generate embedding
|
|
111
|
+
query_embedding = await generate_embedding(query)
|
|
112
|
+
|
|
113
|
+
# Get all Voyage collections
|
|
114
|
+
voyage_collections = await get_voyage_collections()
|
|
115
|
+
if not voyage_collections:
|
|
116
|
+
return "No conversation collections found. Please import conversations first."
|
|
117
|
+
|
|
118
|
+
await ctx.debug(f"Searching across {len(voyage_collections)} collections")
|
|
119
|
+
|
|
120
|
+
all_results = []
|
|
121
|
+
|
|
122
|
+
# Search each collection with native Qdrant decay
|
|
123
|
+
for collection_name in voyage_collections:
|
|
124
|
+
try:
|
|
125
|
+
if should_use_decay:
|
|
126
|
+
# Build the query with native Qdrant decay formula
|
|
127
|
+
query_obj = Query(
|
|
128
|
+
nearest=query_embedding,
|
|
129
|
+
formula=Formula(
|
|
130
|
+
sum=[
|
|
131
|
+
# Original similarity score
|
|
132
|
+
Expression(variable="score"),
|
|
133
|
+
# Decay boost term
|
|
134
|
+
Expression(
|
|
135
|
+
mult=MultExpression(
|
|
136
|
+
mult=[
|
|
137
|
+
# Decay weight
|
|
138
|
+
Expression(constant=DECAY_WEIGHT),
|
|
139
|
+
# Exponential decay function
|
|
140
|
+
Expression(
|
|
141
|
+
exp_decay=DecayParamsExpression(
|
|
142
|
+
# Use timestamp field for decay
|
|
143
|
+
x=Expression(datetime_key="timestamp"),
|
|
144
|
+
# Decay from current time (server-side)
|
|
145
|
+
target=Expression(datetime="now"),
|
|
146
|
+
# Scale in milliseconds
|
|
147
|
+
scale=DECAY_SCALE_DAYS * 24 * 60 * 60 * 1000,
|
|
148
|
+
# Standard exponential decay midpoint
|
|
149
|
+
midpoint=0.5
|
|
150
|
+
)
|
|
151
|
+
)
|
|
152
|
+
]
|
|
153
|
+
)
|
|
154
|
+
)
|
|
155
|
+
]
|
|
156
|
+
)
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# Execute query with native decay
|
|
160
|
+
results = await qdrant_client.query_points(
|
|
161
|
+
collection_name=collection_name,
|
|
162
|
+
query=query_obj,
|
|
163
|
+
limit=limit,
|
|
164
|
+
score_threshold=min_score,
|
|
165
|
+
with_payload=True
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
await ctx.debug(f"Native decay search in {collection_name} returned {len(results.points)} results")
|
|
169
|
+
else:
|
|
170
|
+
# Standard search without decay
|
|
171
|
+
results = await qdrant_client.search(
|
|
172
|
+
collection_name=collection_name,
|
|
173
|
+
query_vector=query_embedding,
|
|
174
|
+
limit=limit,
|
|
175
|
+
score_threshold=min_score,
|
|
176
|
+
with_payload=True
|
|
177
|
+
)
|
|
178
|
+
results = models.QueryResponse(points=results)
|
|
179
|
+
|
|
180
|
+
# Process results
|
|
181
|
+
for point in results.points:
|
|
182
|
+
all_results.append(SearchResult(
|
|
183
|
+
id=str(point.id),
|
|
184
|
+
score=point.score,
|
|
185
|
+
timestamp=point.payload.get('timestamp', datetime.now().isoformat()),
|
|
186
|
+
role=point.payload.get('start_role', point.payload.get('role', 'unknown')),
|
|
187
|
+
excerpt=(point.payload.get('text', '')[:500] + '...'),
|
|
188
|
+
project_name=point.payload.get('project', collection_name.replace('conv_', '').replace('_voyage', '')),
|
|
189
|
+
conversation_id=point.payload.get('conversation_id'),
|
|
190
|
+
collection_name=collection_name
|
|
191
|
+
))
|
|
192
|
+
|
|
193
|
+
except Exception as e:
|
|
194
|
+
await ctx.debug(f"Error searching {collection_name}: {str(e)}")
|
|
195
|
+
continue
|
|
196
|
+
|
|
197
|
+
# Sort by score and limit
|
|
198
|
+
all_results.sort(key=lambda x: x.score, reverse=True)
|
|
199
|
+
all_results = all_results[:limit]
|
|
200
|
+
|
|
201
|
+
if not all_results:
|
|
202
|
+
return f"No conversations found matching '{query}'. Try different keywords or check if conversations have been imported."
|
|
203
|
+
|
|
204
|
+
# Format results
|
|
205
|
+
result_text = f"Found {len(all_results)} relevant conversation(s) for '{query}':\n\n"
|
|
206
|
+
for i, result in enumerate(all_results):
|
|
207
|
+
result_text += f"**Result {i+1}** (Score: {result.score:.3f})\n"
|
|
208
|
+
result_text += f"Time: {datetime.fromisoformat(result.timestamp).strftime('%Y-%m-%d %H:%M:%S')}\n"
|
|
209
|
+
result_text += f"Project: {result.project_name}\n"
|
|
210
|
+
result_text += f"Role: {result.role}\n"
|
|
211
|
+
result_text += f"Excerpt: {result.excerpt}\n"
|
|
212
|
+
result_text += "---\n\n"
|
|
213
|
+
|
|
214
|
+
return result_text
|
|
215
|
+
|
|
216
|
+
except Exception as e:
|
|
217
|
+
await ctx.error(f"Search failed: {str(e)}")
|
|
218
|
+
return f"Failed to search conversations: {str(e)}"
|
|
219
|
+
|
|
220
|
+
@mcp.tool()
|
|
221
|
+
async def store_reflection(
|
|
222
|
+
ctx: Context,
|
|
223
|
+
content: str = Field(description="The insight or reflection to store"),
|
|
224
|
+
tags: List[str] = Field(default=[], description="Tags to categorize this reflection")
|
|
225
|
+
) -> str:
|
|
226
|
+
"""Store an important insight or reflection for future reference."""
|
|
227
|
+
|
|
228
|
+
try:
|
|
229
|
+
# TODO: Implement actual storage in a dedicated reflections collection
|
|
230
|
+
# For now, just acknowledge the storage
|
|
231
|
+
tags_str = ', '.join(tags) if tags else 'none'
|
|
232
|
+
return f"Reflection stored successfully with tags: {tags_str}"
|
|
233
|
+
|
|
234
|
+
except Exception as e:
|
|
235
|
+
await ctx.error(f"Store failed: {str(e)}")
|
|
236
|
+
return f"Failed to store reflection: {str(e)}"
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
# Debug output
|
|
240
|
+
print(f"[DEBUG] FastMCP server v2.0.0 created with native Qdrant decay")
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "claude-self-reflect",
|
|
3
|
-
"version": "
|
|
4
|
-
"description": "Give Claude perfect memory of all your conversations -
|
|
3
|
+
"version": "2.2.1",
|
|
4
|
+
"description": "Give Claude perfect memory of all your conversations - Installation wizard for Python MCP server",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"claude",
|
|
7
7
|
"mcp",
|
|
@@ -23,46 +23,22 @@
|
|
|
23
23
|
},
|
|
24
24
|
"license": "MIT",
|
|
25
25
|
"author": "Claude-Self-Reflect Contributors",
|
|
26
|
-
"main": "dist/index.js",
|
|
27
|
-
"types": "dist/index.d.ts",
|
|
28
26
|
"type": "module",
|
|
29
27
|
"bin": {
|
|
30
|
-
"claude-self-reflect": "./
|
|
28
|
+
"claude-self-reflect": "./installer/cli.js"
|
|
31
29
|
},
|
|
32
30
|
"files": [
|
|
33
|
-
"
|
|
34
|
-
"src",
|
|
35
|
-
"
|
|
36
|
-
"
|
|
37
|
-
"scripts/
|
|
31
|
+
"installer/*.js",
|
|
32
|
+
"mcp-server/src/**/*.py",
|
|
33
|
+
"mcp-server/pyproject.toml",
|
|
34
|
+
"mcp-server/run-mcp.sh",
|
|
35
|
+
"scripts/import-*.py",
|
|
36
|
+
".claude/agents/*.md",
|
|
38
37
|
"README.md",
|
|
39
|
-
"LICENSE"
|
|
40
|
-
"config/claude-desktop-config.json"
|
|
38
|
+
"LICENSE"
|
|
41
39
|
],
|
|
42
40
|
"scripts": {
|
|
43
|
-
"
|
|
44
|
-
"start": "node dist/index.js",
|
|
45
|
-
"dev": "tsx src/index.ts",
|
|
46
|
-
"test": "npm run test:integration",
|
|
47
|
-
"test:ci": "echo 'Integration tests require Qdrant - skipping in CI'",
|
|
48
|
-
"test:unit": "echo 'No unit tests yet'",
|
|
49
|
-
"test:integration": "tsx test/search-quality.test.ts",
|
|
50
|
-
"lint": "tsc --noEmit",
|
|
51
|
-
"postinstall": "node scripts/install-agent.js",
|
|
52
|
-
"prepublishOnly": "npm run build && npm run test:ci",
|
|
53
|
-
"version": "git add -A src",
|
|
54
|
-
"postversion": "git push && git push --tags"
|
|
55
|
-
},
|
|
56
|
-
"dependencies": {
|
|
57
|
-
"@modelcontextprotocol/sdk": "^0.5.0",
|
|
58
|
-
"@qdrant/js-client-rest": "^1.7.0",
|
|
59
|
-
"dotenv": "^17.2.1",
|
|
60
|
-
"node-fetch": "^3.3.2"
|
|
61
|
-
},
|
|
62
|
-
"devDependencies": {
|
|
63
|
-
"@types/node": "^20.0.0",
|
|
64
|
-
"tsx": "^4.0.0",
|
|
65
|
-
"typescript": "^5.0.0"
|
|
41
|
+
"postinstall": "node installer/postinstall.js"
|
|
66
42
|
},
|
|
67
43
|
"engines": {
|
|
68
44
|
"node": ">=18.0.0"
|
|
@@ -71,4 +47,4 @@
|
|
|
71
47
|
"access": "public",
|
|
72
48
|
"registry": "https://registry.npmjs.org/"
|
|
73
49
|
}
|
|
74
|
-
}
|
|
50
|
+
}
|