claude-self-reflect 3.2.4 → 3.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/claude-self-reflect-test.md +992 -510
- package/.claude/agents/reflection-specialist.md +59 -3
- package/README.md +14 -5
- package/installer/cli.js +16 -0
- package/installer/postinstall.js +14 -0
- package/installer/statusline-setup.js +289 -0
- package/mcp-server/run-mcp.sh +73 -5
- package/mcp-server/src/app_context.py +64 -0
- package/mcp-server/src/config.py +57 -0
- package/mcp-server/src/connection_pool.py +286 -0
- package/mcp-server/src/decay_manager.py +106 -0
- package/mcp-server/src/embedding_manager.py +64 -40
- package/mcp-server/src/embeddings_old.py +141 -0
- package/mcp-server/src/models.py +64 -0
- package/mcp-server/src/parallel_search.py +305 -0
- package/mcp-server/src/project_resolver.py +5 -0
- package/mcp-server/src/reflection_tools.py +211 -0
- package/mcp-server/src/rich_formatting.py +196 -0
- package/mcp-server/src/search_tools.py +874 -0
- package/mcp-server/src/server.py +127 -1720
- package/mcp-server/src/temporal_design.py +132 -0
- package/mcp-server/src/temporal_tools.py +604 -0
- package/mcp-server/src/temporal_utils.py +384 -0
- package/mcp-server/src/utils.py +150 -67
- package/package.json +15 -1
- package/scripts/add-timestamp-indexes.py +134 -0
- package/scripts/ast_grep_final_analyzer.py +325 -0
- package/scripts/ast_grep_unified_registry.py +556 -0
- package/scripts/check-collections.py +29 -0
- package/scripts/csr-status +366 -0
- package/scripts/debug-august-parsing.py +76 -0
- package/scripts/debug-import-single.py +91 -0
- package/scripts/debug-project-resolver.py +82 -0
- package/scripts/debug-temporal-tools.py +135 -0
- package/scripts/delta-metadata-update.py +547 -0
- package/scripts/import-conversations-unified.py +157 -25
- package/scripts/precompact-hook.sh +33 -0
- package/scripts/session_quality_tracker.py +481 -0
- package/scripts/streaming-watcher.py +1578 -0
- package/scripts/update_patterns.py +334 -0
- package/scripts/utils.py +39 -0
|
@@ -0,0 +1,874 @@
|
|
|
1
|
+
"""Search tools for Claude Self Reflect MCP server."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import json
|
|
5
|
+
import logging
|
|
6
|
+
import time
|
|
7
|
+
import html
|
|
8
|
+
from typing import Optional, List, Dict, Any
|
|
9
|
+
from datetime import datetime, timezone
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
from fastmcp import Context
|
|
13
|
+
from pydantic import Field
|
|
14
|
+
from qdrant_client import AsyncQdrantClient
|
|
15
|
+
from qdrant_client.models import PointStruct
|
|
16
|
+
|
|
17
|
+
from .parallel_search import parallel_search_collections
|
|
18
|
+
from .rich_formatting import format_search_results_rich
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class SearchTools:
|
|
24
|
+
"""Handles all search operations for the MCP server."""
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
qdrant_client: AsyncQdrantClient,
|
|
29
|
+
qdrant_url: str,
|
|
30
|
+
get_embedding_manager,
|
|
31
|
+
normalize_project_name,
|
|
32
|
+
enable_memory_decay: bool,
|
|
33
|
+
decay_weight: float,
|
|
34
|
+
decay_scale_days: float,
|
|
35
|
+
use_native_decay: bool,
|
|
36
|
+
native_decay_available: bool,
|
|
37
|
+
decay_manager=None,
|
|
38
|
+
project_resolver=None # Add project resolver
|
|
39
|
+
):
|
|
40
|
+
"""Initialize search tools with dependencies."""
|
|
41
|
+
self.qdrant_client = qdrant_client
|
|
42
|
+
self.qdrant_url = qdrant_url
|
|
43
|
+
self.get_embedding_manager = get_embedding_manager
|
|
44
|
+
self.normalize_project_name = normalize_project_name
|
|
45
|
+
self.enable_memory_decay = enable_memory_decay
|
|
46
|
+
self.decay_weight = decay_weight
|
|
47
|
+
self.decay_scale_days = decay_scale_days
|
|
48
|
+
self.use_native_decay = use_native_decay
|
|
49
|
+
self.native_decay_available = native_decay_available
|
|
50
|
+
self.decay_manager = decay_manager
|
|
51
|
+
self.project_resolver = project_resolver
|
|
52
|
+
|
|
53
|
+
# Helper functions will be implemented as methods
|
|
54
|
+
|
|
55
|
+
def get_project_from_cwd(self, cwd: str) -> Optional[str]:
|
|
56
|
+
"""Extract project name from current working directory."""
|
|
57
|
+
from pathlib import Path
|
|
58
|
+
|
|
59
|
+
path_parts = Path(cwd).parts
|
|
60
|
+
if 'projects' in path_parts:
|
|
61
|
+
idx = path_parts.index('projects')
|
|
62
|
+
if idx + 1 < len(path_parts):
|
|
63
|
+
return path_parts[idx + 1]
|
|
64
|
+
elif '.claude' in path_parts:
|
|
65
|
+
# If we're in a .claude directory, go up to find project
|
|
66
|
+
for i, part in enumerate(path_parts):
|
|
67
|
+
if part == '.claude' and i > 0:
|
|
68
|
+
return path_parts[i - 1]
|
|
69
|
+
|
|
70
|
+
# If still no project detected, use the last directory name
|
|
71
|
+
return Path(cwd).name
|
|
72
|
+
|
|
73
|
+
async def perform_search(
|
|
74
|
+
self,
|
|
75
|
+
ctx: Context,
|
|
76
|
+
query: str,
|
|
77
|
+
collection_name: str,
|
|
78
|
+
limit: int,
|
|
79
|
+
min_score: float
|
|
80
|
+
) -> List[Dict[str, Any]]:
|
|
81
|
+
"""Perform semantic search on a single collection."""
|
|
82
|
+
try:
|
|
83
|
+
# Generate embedding for query
|
|
84
|
+
embedding_manager = self.get_embedding_manager()
|
|
85
|
+
|
|
86
|
+
# Determine embedding type based on collection name
|
|
87
|
+
embedding_type = 'voyage' if collection_name.endswith('_voyage') else 'local'
|
|
88
|
+
query_embedding = await embedding_manager.generate_embedding(query, force_type=embedding_type)
|
|
89
|
+
|
|
90
|
+
# Search the collection
|
|
91
|
+
search_results = await self.qdrant_client.search(
|
|
92
|
+
collection_name=collection_name,
|
|
93
|
+
query_vector=query_embedding,
|
|
94
|
+
limit=limit,
|
|
95
|
+
score_threshold=min_score
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Convert results to dict format
|
|
99
|
+
results = []
|
|
100
|
+
for result in search_results:
|
|
101
|
+
results.append({
|
|
102
|
+
'conversation_id': result.payload.get('conversation_id'),
|
|
103
|
+
'timestamp': result.payload.get('timestamp'),
|
|
104
|
+
'content': result.payload.get('content', ''),
|
|
105
|
+
'score': result.score,
|
|
106
|
+
'collection': collection_name,
|
|
107
|
+
'payload': result.payload
|
|
108
|
+
})
|
|
109
|
+
|
|
110
|
+
return results
|
|
111
|
+
|
|
112
|
+
except Exception as e:
|
|
113
|
+
await ctx.debug(f"Error searching {collection_name}: {e}")
|
|
114
|
+
return []
|
|
115
|
+
|
|
116
|
+
def apply_decay_to_results(self, results: List[Dict], current_time: datetime) -> List[Dict]:
|
|
117
|
+
"""Apply time-based decay to search results."""
|
|
118
|
+
if not self.enable_memory_decay:
|
|
119
|
+
return results
|
|
120
|
+
|
|
121
|
+
for result in results:
|
|
122
|
+
try:
|
|
123
|
+
# Parse timestamp
|
|
124
|
+
timestamp_str = result.get('timestamp')
|
|
125
|
+
if timestamp_str:
|
|
126
|
+
from datetime import datetime, timezone
|
|
127
|
+
timestamp = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
|
|
128
|
+
|
|
129
|
+
# Calculate age in days
|
|
130
|
+
age = (current_time - timestamp).total_seconds() / 86400
|
|
131
|
+
|
|
132
|
+
# Apply exponential decay
|
|
133
|
+
decay_factor = pow(2, -age / self.decay_scale_days)
|
|
134
|
+
|
|
135
|
+
# Adjust score
|
|
136
|
+
original_score = result['score']
|
|
137
|
+
result['score'] = original_score * (1 - self.decay_weight) + decay_factor * self.decay_weight
|
|
138
|
+
result['original_score'] = original_score
|
|
139
|
+
result['decay_factor'] = decay_factor
|
|
140
|
+
|
|
141
|
+
except Exception as e:
|
|
142
|
+
logger.warning(f"Error applying decay to result: {e}")
|
|
143
|
+
|
|
144
|
+
return results
|
|
145
|
+
|
|
146
|
+
def format_search_results(
|
|
147
|
+
self,
|
|
148
|
+
results: List[Dict],
|
|
149
|
+
query: str,
|
|
150
|
+
brief: bool = False,
|
|
151
|
+
include_raw: bool = False,
|
|
152
|
+
response_format: str = "xml"
|
|
153
|
+
) -> str:
|
|
154
|
+
"""Format search results for display."""
|
|
155
|
+
if not results:
|
|
156
|
+
return "<search_results><message>No matching conversations found</message></search_results>"
|
|
157
|
+
|
|
158
|
+
if response_format == "markdown":
|
|
159
|
+
output = f"# Search Results for: {query}\n\n"
|
|
160
|
+
for i, result in enumerate(results, 1):
|
|
161
|
+
output += f"## Result {i}\n"
|
|
162
|
+
output += f"**Score:** {result['score']:.3f}\n"
|
|
163
|
+
output += f"**Timestamp:** {result.get('timestamp', 'N/A')}\n"
|
|
164
|
+
output += f"**Conversation ID:** {result.get('conversation_id', 'N/A')}\n\n"
|
|
165
|
+
if not brief:
|
|
166
|
+
# Handle both 'content' and 'excerpt' fields
|
|
167
|
+
content = result.get('content', result.get('excerpt', ''))
|
|
168
|
+
output += f"**Content:**\n```\n{content[:500]}{'...' if len(content) > 500 else ''}\n```\n\n"
|
|
169
|
+
if include_raw:
|
|
170
|
+
output += f"**Raw Payload:**\n```json\n{json.dumps(result.get('payload', {}), indent=2)}\n```\n\n"
|
|
171
|
+
else:
|
|
172
|
+
# XML format (default) with proper escaping
|
|
173
|
+
def _esc(x): return html.escape(str(x), quote=False)
|
|
174
|
+
|
|
175
|
+
output = f"<search_results>\n<query>{_esc(query)}</query>\n<count>{len(results)}</count>\n"
|
|
176
|
+
for i, result in enumerate(results, 1):
|
|
177
|
+
output += f"<result index=\"{i}\">\n"
|
|
178
|
+
output += f" <score>{result['score']:.3f}</score>\n"
|
|
179
|
+
output += f" <timestamp>{_esc(result.get('timestamp', 'N/A'))}</timestamp>\n"
|
|
180
|
+
output += f" <conversation_id>{_esc(result.get('conversation_id', 'N/A'))}</conversation_id>\n"
|
|
181
|
+
if not brief:
|
|
182
|
+
# Handle both 'content' and 'excerpt' fields
|
|
183
|
+
content = result.get('content', result.get('excerpt', result.get('text', '')))
|
|
184
|
+
truncated = content[:500] + ('...' if len(content) > 500 else '')
|
|
185
|
+
output += f" <content><![CDATA[{truncated}]]></content>\n"
|
|
186
|
+
if include_raw:
|
|
187
|
+
# Use CDATA for large JSON payloads
|
|
188
|
+
output += f" <raw_payload><![CDATA[{json.dumps(result.get('payload', {}), ensure_ascii=False)}]]></raw_payload>\n"
|
|
189
|
+
output += "</result>\n"
|
|
190
|
+
output += "</search_results>"
|
|
191
|
+
|
|
192
|
+
return output
|
|
193
|
+
|
|
194
|
+
async def reflect_on_past(
|
|
195
|
+
self,
|
|
196
|
+
ctx: Context,
|
|
197
|
+
query: str,
|
|
198
|
+
limit: int = 5,
|
|
199
|
+
min_score: float = 0.3,
|
|
200
|
+
use_decay: int = -1,
|
|
201
|
+
project: Optional[str] = None,
|
|
202
|
+
mode: str = "full",
|
|
203
|
+
brief: bool = False,
|
|
204
|
+
include_raw: bool = False,
|
|
205
|
+
response_format: str = "xml"
|
|
206
|
+
) -> str:
|
|
207
|
+
"""Search for relevant past conversations using semantic search with optional time decay."""
|
|
208
|
+
|
|
209
|
+
await ctx.debug(f"Searching for: {query}, project={project}, mode={mode}, decay={use_decay}")
|
|
210
|
+
|
|
211
|
+
try:
|
|
212
|
+
# Track timing for performance metrics
|
|
213
|
+
start_time = time.time()
|
|
214
|
+
timing_info = {}
|
|
215
|
+
|
|
216
|
+
# Determine project scope
|
|
217
|
+
target_project = project
|
|
218
|
+
if project is None:
|
|
219
|
+
cwd = os.environ.get('MCP_CLIENT_CWD', os.getcwd())
|
|
220
|
+
target_project = self.get_project_from_cwd(cwd)
|
|
221
|
+
await ctx.debug(f"Inferred project from CWD: {target_project}")
|
|
222
|
+
|
|
223
|
+
# Handle special cases
|
|
224
|
+
if mode == "quick":
|
|
225
|
+
return await self.quick_search(ctx, query, min_score, target_project)
|
|
226
|
+
elif mode == "summary":
|
|
227
|
+
return await self.search_summary(ctx, query, target_project)
|
|
228
|
+
|
|
229
|
+
# Get relevant collections based on project
|
|
230
|
+
await ctx.debug(f"Project resolver: {self.project_resolver is not None}, Target project: '{target_project}'")
|
|
231
|
+
if self.project_resolver and target_project and target_project != 'all':
|
|
232
|
+
# Use ProjectResolver to find matching collections
|
|
233
|
+
collection_names = self.project_resolver.find_collections_for_project(target_project)
|
|
234
|
+
await ctx.debug(f"ProjectResolver found {len(collection_names)} collections for '{target_project}'")
|
|
235
|
+
|
|
236
|
+
# Get collection objects
|
|
237
|
+
collections_response = await self.qdrant_client.get_collections()
|
|
238
|
+
all_collections = collections_response.collections
|
|
239
|
+
filtered_collections = [
|
|
240
|
+
c for c in all_collections
|
|
241
|
+
if c.name in collection_names
|
|
242
|
+
]
|
|
243
|
+
await ctx.debug(f"Filtered to {len(filtered_collections)} collections from {len(all_collections)} total")
|
|
244
|
+
else:
|
|
245
|
+
# Use all collections except reflections
|
|
246
|
+
collections_response = await self.qdrant_client.get_collections()
|
|
247
|
+
collections = collections_response.collections
|
|
248
|
+
filtered_collections = [
|
|
249
|
+
c for c in collections
|
|
250
|
+
if not c.name.startswith('reflections')
|
|
251
|
+
]
|
|
252
|
+
await ctx.debug(f"Searching across {len(filtered_collections)} collections")
|
|
253
|
+
|
|
254
|
+
if not filtered_collections:
|
|
255
|
+
return "<search_results><message>No collections found for the specified project</message></search_results>"
|
|
256
|
+
|
|
257
|
+
# Perform PARALLEL search across collections to avoid freeze
|
|
258
|
+
collection_names = [c.name for c in filtered_collections]
|
|
259
|
+
await ctx.debug(f"Starting parallel search across {len(collection_names)} collections")
|
|
260
|
+
|
|
261
|
+
# Create embedding function wrapper for parallel search
|
|
262
|
+
embedding_manager = self.get_embedding_manager()
|
|
263
|
+
async def generate_embedding_func(text: str, force_type: str = 'local'):
|
|
264
|
+
return await embedding_manager.generate_embedding(text, force_type=force_type)
|
|
265
|
+
|
|
266
|
+
# Track embedding generation timing
|
|
267
|
+
timing_info['embedding_start'] = time.time()
|
|
268
|
+
|
|
269
|
+
# Use parallel search to avoid sequential processing freeze
|
|
270
|
+
all_results, search_timing = await parallel_search_collections(
|
|
271
|
+
collections_to_search=collection_names,
|
|
272
|
+
query=query,
|
|
273
|
+
qdrant_client=self.qdrant_client,
|
|
274
|
+
ctx=ctx,
|
|
275
|
+
limit=limit * 2, # Get more results initially
|
|
276
|
+
min_score=min_score,
|
|
277
|
+
should_use_decay=use_decay == 1,
|
|
278
|
+
target_project=target_project,
|
|
279
|
+
generate_embedding_func=generate_embedding_func,
|
|
280
|
+
constants={'DECAY_SCALE_DAYS': self.decay_scale_days},
|
|
281
|
+
max_concurrent=10 # Limit concurrent searches to avoid overload
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
# Update timing info with search timing
|
|
285
|
+
timing_info['embedding_end'] = time.time() # Embeddings are generated inside parallel_search
|
|
286
|
+
timing_info['search_all_start'] = timing_info.get('embedding_start', time.time())
|
|
287
|
+
timing_info['search_all_end'] = time.time()
|
|
288
|
+
# search_timing is a list of collection timings, not a dict
|
|
289
|
+
|
|
290
|
+
await ctx.debug(f"Parallel search completed with {len(all_results)} total results")
|
|
291
|
+
|
|
292
|
+
# Debug: Log some details about results
|
|
293
|
+
if all_results:
|
|
294
|
+
await ctx.debug(f"Top result score: {all_results[0]['score']:.4f}")
|
|
295
|
+
else:
|
|
296
|
+
await ctx.debug(f"No results found. Timing info: {timing_info}")
|
|
297
|
+
|
|
298
|
+
if not all_results:
|
|
299
|
+
return "<search_results><message>No matching conversations found</message></search_results>"
|
|
300
|
+
|
|
301
|
+
# Sort and limit results
|
|
302
|
+
all_results.sort(key=lambda x: x['score'], reverse=True)
|
|
303
|
+
final_results = all_results[:limit]
|
|
304
|
+
|
|
305
|
+
# Use rich formatting for default XML format
|
|
306
|
+
if response_format == "xml" and not brief:
|
|
307
|
+
# Try to get indexing status for rich display
|
|
308
|
+
indexing_status = None
|
|
309
|
+
# TODO: Add indexing status retrieval here if needed
|
|
310
|
+
|
|
311
|
+
return format_search_results_rich(
|
|
312
|
+
results=final_results,
|
|
313
|
+
query=query,
|
|
314
|
+
target_project=target_project,
|
|
315
|
+
collections_searched=len(collection_names),
|
|
316
|
+
timing_info=timing_info,
|
|
317
|
+
start_time=start_time,
|
|
318
|
+
brief=brief,
|
|
319
|
+
include_raw=include_raw,
|
|
320
|
+
indexing_status=indexing_status
|
|
321
|
+
)
|
|
322
|
+
else:
|
|
323
|
+
# Fall back to standard formatting for markdown or brief mode
|
|
324
|
+
return self.format_search_results(
|
|
325
|
+
final_results,
|
|
326
|
+
query,
|
|
327
|
+
brief=brief,
|
|
328
|
+
include_raw=include_raw,
|
|
329
|
+
response_format=response_format
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
except Exception as e:
|
|
333
|
+
logger.error(f"Search failed: {e}", exc_info=True)
|
|
334
|
+
return f"<search_results><error>Search failed: {str(e)}</error></search_results>"
|
|
335
|
+
|
|
336
|
+
async def quick_search(
|
|
337
|
+
self,
|
|
338
|
+
ctx: Context,
|
|
339
|
+
query: str,
|
|
340
|
+
min_score: float = 0.3,
|
|
341
|
+
project: Optional[str] = None
|
|
342
|
+
) -> str:
|
|
343
|
+
"""Quick search that returns only the count and top result for fast overview."""
|
|
344
|
+
|
|
345
|
+
await ctx.debug(f"Quick search for: {query}, project={project}")
|
|
346
|
+
|
|
347
|
+
try:
|
|
348
|
+
# Determine project scope
|
|
349
|
+
target_project = project
|
|
350
|
+
if project is None:
|
|
351
|
+
cwd = os.environ.get('MCP_CLIENT_CWD', os.getcwd())
|
|
352
|
+
target_project = self.get_project_from_cwd(cwd)
|
|
353
|
+
|
|
354
|
+
# Get collections based on project
|
|
355
|
+
if self.project_resolver and target_project and target_project != 'all':
|
|
356
|
+
# Use ProjectResolver to find matching collections
|
|
357
|
+
collection_names = self.project_resolver.find_collections_for_project(target_project)
|
|
358
|
+
collections_response = await self.qdrant_client.get_collections()
|
|
359
|
+
all_collections = collections_response.collections
|
|
360
|
+
filtered_collections = [
|
|
361
|
+
c for c in all_collections
|
|
362
|
+
if c.name in collection_names
|
|
363
|
+
]
|
|
364
|
+
else:
|
|
365
|
+
# Use all collections except reflections
|
|
366
|
+
collections_response = await self.qdrant_client.get_collections()
|
|
367
|
+
collections = collections_response.collections
|
|
368
|
+
filtered_collections = [
|
|
369
|
+
c for c in collections
|
|
370
|
+
if not c.name.startswith('reflections')
|
|
371
|
+
]
|
|
372
|
+
|
|
373
|
+
# Quick PARALLEL count across collections
|
|
374
|
+
collection_names = [c.name for c in filtered_collections]
|
|
375
|
+
|
|
376
|
+
# Create embedding function wrapper
|
|
377
|
+
embedding_manager = self.get_embedding_manager()
|
|
378
|
+
async def generate_embedding_func(text: str, force_type: str = 'local'):
|
|
379
|
+
return await embedding_manager.generate_embedding(text, force_type=force_type)
|
|
380
|
+
|
|
381
|
+
# Use parallel search for quick check
|
|
382
|
+
all_results, _ = await parallel_search_collections(
|
|
383
|
+
collections_to_search=collection_names,
|
|
384
|
+
query=query,
|
|
385
|
+
qdrant_client=self.qdrant_client,
|
|
386
|
+
ctx=ctx,
|
|
387
|
+
limit=1, # Only need top result from each collection
|
|
388
|
+
min_score=min_score,
|
|
389
|
+
should_use_decay=False, # Quick search doesn't use decay
|
|
390
|
+
target_project=target_project,
|
|
391
|
+
generate_embedding_func=generate_embedding_func,
|
|
392
|
+
constants={'DECAY_SCALE_DAYS': self.decay_scale_days},
|
|
393
|
+
max_concurrent=20 # Higher concurrency for quick search
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
# Count collections with results and find top result
|
|
397
|
+
collections_with_matches = len(set(r.get('collection_name', r.get('collection', '')) for r in all_results))
|
|
398
|
+
top_result = max(all_results, key=lambda x: x.get('score', 0)) if all_results else None
|
|
399
|
+
top_score = top_result.get('score', 0) if top_result else 0
|
|
400
|
+
|
|
401
|
+
# Format quick search response with proper XML escaping
|
|
402
|
+
def _esc(x): return html.escape(str(x), quote=False)
|
|
403
|
+
|
|
404
|
+
if not top_result:
|
|
405
|
+
return "<quick_search><count>0</count><message>No matches found</message></quick_search>"
|
|
406
|
+
|
|
407
|
+
# Get preview text and ensure we have content fallbacks
|
|
408
|
+
preview_text = top_result.get('excerpt', top_result.get('content', top_result.get('text', '')))[:200]
|
|
409
|
+
|
|
410
|
+
return f"""<quick_search>
|
|
411
|
+
<count>{collections_with_matches}</count>
|
|
412
|
+
<collections_with_matches>{collections_with_matches}</collections_with_matches>
|
|
413
|
+
<top_result>
|
|
414
|
+
<score>{top_result['score']:.3f}</score>
|
|
415
|
+
<timestamp>{_esc(top_result.get('timestamp', 'N/A'))}</timestamp>
|
|
416
|
+
<preview><![CDATA[{preview_text}...]]></preview>
|
|
417
|
+
</top_result>
|
|
418
|
+
</quick_search>"""
|
|
419
|
+
|
|
420
|
+
except Exception as e:
|
|
421
|
+
logger.error(f"Quick search failed: {e}", exc_info=True)
|
|
422
|
+
def _esc(x): return html.escape(str(x), quote=False)
|
|
423
|
+
return f"<quick_search><error>Quick search failed: {_esc(str(e))}</error></quick_search>"
|
|
424
|
+
|
|
425
|
+
async def search_summary(
|
|
426
|
+
self,
|
|
427
|
+
ctx: Context,
|
|
428
|
+
query: str,
|
|
429
|
+
project: Optional[str] = None
|
|
430
|
+
) -> str:
|
|
431
|
+
"""Get aggregated insights from search results without individual result details."""
|
|
432
|
+
|
|
433
|
+
await ctx.debug(f"Getting search summary for: {query}, project={project}")
|
|
434
|
+
|
|
435
|
+
try:
|
|
436
|
+
# Determine project scope
|
|
437
|
+
target_project = project
|
|
438
|
+
if project is None:
|
|
439
|
+
cwd = os.environ.get('MCP_CLIENT_CWD', os.getcwd())
|
|
440
|
+
target_project = self.get_project_from_cwd(cwd)
|
|
441
|
+
|
|
442
|
+
# Get collections based on project
|
|
443
|
+
if self.project_resolver and target_project and target_project != 'all':
|
|
444
|
+
# Use ProjectResolver to find matching collections
|
|
445
|
+
collection_names = self.project_resolver.find_collections_for_project(target_project)
|
|
446
|
+
collections_response = await self.qdrant_client.get_collections()
|
|
447
|
+
all_collections = collections_response.collections
|
|
448
|
+
filtered_collections = [
|
|
449
|
+
c for c in all_collections
|
|
450
|
+
if c.name in collection_names
|
|
451
|
+
]
|
|
452
|
+
else:
|
|
453
|
+
# Use all collections except reflections
|
|
454
|
+
collections_response = await self.qdrant_client.get_collections()
|
|
455
|
+
collections = collections_response.collections
|
|
456
|
+
filtered_collections = [
|
|
457
|
+
c for c in collections
|
|
458
|
+
if not c.name.startswith('reflections')
|
|
459
|
+
]
|
|
460
|
+
|
|
461
|
+
# Gather results for summary using PARALLEL search
|
|
462
|
+
collection_names = [c.name for c in filtered_collections]
|
|
463
|
+
|
|
464
|
+
# Create embedding function wrapper
|
|
465
|
+
embedding_manager = self.get_embedding_manager()
|
|
466
|
+
async def generate_embedding_func(text: str, force_type: str = 'local'):
|
|
467
|
+
return await embedding_manager.generate_embedding(text, force_type=force_type)
|
|
468
|
+
|
|
469
|
+
# Use parallel search for summary
|
|
470
|
+
all_results, _ = await parallel_search_collections(
|
|
471
|
+
collections_to_search=collection_names,
|
|
472
|
+
query=query,
|
|
473
|
+
qdrant_client=self.qdrant_client,
|
|
474
|
+
ctx=ctx,
|
|
475
|
+
limit=10, # Get more results for summary
|
|
476
|
+
min_score=0.0, # Get all results for aggregation
|
|
477
|
+
should_use_decay=False, # Summary doesn't use decay
|
|
478
|
+
target_project=target_project,
|
|
479
|
+
generate_embedding_func=generate_embedding_func,
|
|
480
|
+
constants={'DECAY_SCALE_DAYS': self.decay_scale_days},
|
|
481
|
+
max_concurrent=15 # Balanced concurrency
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
if not all_results:
|
|
485
|
+
return "<search_summary><message>No matches found</message></search_summary>"
|
|
486
|
+
|
|
487
|
+
# Sort and get top results
|
|
488
|
+
all_results.sort(key=lambda x: x['score'], reverse=True)
|
|
489
|
+
top_results = all_results[:10]
|
|
490
|
+
|
|
491
|
+
# Analyze patterns
|
|
492
|
+
avg_score = sum(r['score'] for r in top_results) / len(top_results)
|
|
493
|
+
collections_found = len(set(r.get('collection_name', r.get('collection', '')) for r in top_results))
|
|
494
|
+
|
|
495
|
+
# Extract common concepts if available
|
|
496
|
+
all_concepts = []
|
|
497
|
+
for r in top_results:
|
|
498
|
+
if 'payload' in r and 'concepts' in r['payload']:
|
|
499
|
+
all_concepts.extend(r['payload']['concepts'])
|
|
500
|
+
|
|
501
|
+
from collections import Counter
|
|
502
|
+
concept_counts = Counter(all_concepts).most_common(5)
|
|
503
|
+
|
|
504
|
+
return f"""<search_summary>
|
|
505
|
+
<query>{query}</query>
|
|
506
|
+
<total_matches>{len(all_results)}</total_matches>
|
|
507
|
+
<average_score>{avg_score:.3f}</average_score>
|
|
508
|
+
<collections_matched>{collections_found}</collections_matched>
|
|
509
|
+
<top_concepts>{', '.join([c[0] for c in concept_counts]) if concept_counts else 'N/A'}</top_concepts>
|
|
510
|
+
<insight>Found {len(all_results)} matches across {collections_found} collections with average relevance of {avg_score:.3f}</insight>
|
|
511
|
+
</search_summary>"""
|
|
512
|
+
|
|
513
|
+
except Exception as e:
|
|
514
|
+
logger.error(f"Search summary failed: {e}", exc_info=True)
|
|
515
|
+
return f"<search_summary><error>Search summary failed: {str(e)}</error></search_summary>"
|
|
516
|
+
|
|
517
|
+
async def get_more_results(
|
|
518
|
+
self,
|
|
519
|
+
ctx: Context,
|
|
520
|
+
query: str,
|
|
521
|
+
offset: int = 3,
|
|
522
|
+
limit: int = 3,
|
|
523
|
+
min_score: float = 0.3,
|
|
524
|
+
project: Optional[str] = None
|
|
525
|
+
) -> str:
|
|
526
|
+
"""Get additional search results after an initial search (pagination support)."""
|
|
527
|
+
|
|
528
|
+
await ctx.debug(f"Getting more results for: {query}, offset={offset}, limit={limit}")
|
|
529
|
+
|
|
530
|
+
try:
|
|
531
|
+
# Determine project scope
|
|
532
|
+
target_project = project
|
|
533
|
+
if project is None:
|
|
534
|
+
cwd = os.environ.get('MCP_CLIENT_CWD', os.getcwd())
|
|
535
|
+
target_project = self.get_project_from_cwd(cwd)
|
|
536
|
+
|
|
537
|
+
# Get collections based on project
|
|
538
|
+
if self.project_resolver and target_project and target_project != 'all':
|
|
539
|
+
# Use ProjectResolver to find matching collections
|
|
540
|
+
collection_names = self.project_resolver.find_collections_for_project(target_project)
|
|
541
|
+
collections_response = await self.qdrant_client.get_collections()
|
|
542
|
+
all_collections = collections_response.collections
|
|
543
|
+
filtered_collections = [
|
|
544
|
+
c for c in all_collections
|
|
545
|
+
if c.name in collection_names
|
|
546
|
+
]
|
|
547
|
+
else:
|
|
548
|
+
# Use all collections except reflections
|
|
549
|
+
collections_response = await self.qdrant_client.get_collections()
|
|
550
|
+
collections = collections_response.collections
|
|
551
|
+
filtered_collections = [
|
|
552
|
+
c for c in collections
|
|
553
|
+
if not c.name.startswith('reflections')
|
|
554
|
+
]
|
|
555
|
+
|
|
556
|
+
# Gather all results using PARALLEL search
|
|
557
|
+
collection_names = [c.name for c in filtered_collections]
|
|
558
|
+
|
|
559
|
+
# Create embedding function wrapper
|
|
560
|
+
embedding_manager = self.get_embedding_manager()
|
|
561
|
+
async def generate_embedding_func(text: str, force_type: str = 'local'):
|
|
562
|
+
return await embedding_manager.generate_embedding(text, force_type=force_type)
|
|
563
|
+
|
|
564
|
+
# Use parallel search for pagination
|
|
565
|
+
all_results, _ = await parallel_search_collections(
|
|
566
|
+
collections_to_search=collection_names,
|
|
567
|
+
query=query,
|
|
568
|
+
qdrant_client=self.qdrant_client,
|
|
569
|
+
ctx=ctx,
|
|
570
|
+
limit=offset + limit, # Get more results than needed to handle offset
|
|
571
|
+
min_score=min_score,
|
|
572
|
+
should_use_decay=False, # Pagination doesn't use decay
|
|
573
|
+
target_project=target_project,
|
|
574
|
+
generate_embedding_func=generate_embedding_func,
|
|
575
|
+
constants={'DECAY_SCALE_DAYS': self.decay_scale_days},
|
|
576
|
+
max_concurrent=10 # Standard concurrency
|
|
577
|
+
)
|
|
578
|
+
|
|
579
|
+
if not all_results:
|
|
580
|
+
return "<more_results><message>No more results found</message></more_results>"
|
|
581
|
+
|
|
582
|
+
# Sort all results by score
|
|
583
|
+
all_results.sort(key=lambda x: x['score'], reverse=True)
|
|
584
|
+
|
|
585
|
+
# Apply offset and limit
|
|
586
|
+
paginated_results = all_results[offset:offset + limit]
|
|
587
|
+
|
|
588
|
+
if not paginated_results:
|
|
589
|
+
return f"<more_results><message>No results at offset {offset}</message></more_results>"
|
|
590
|
+
|
|
591
|
+
# Format paginated results
|
|
592
|
+
output = f"<more_results>\n<query>{query}</query>\n"
|
|
593
|
+
output += f"<offset>{offset}</offset>\n"
|
|
594
|
+
output += f"<limit>{limit}</limit>\n"
|
|
595
|
+
output += f"<total_available>{len(all_results)}</total_available>\n"
|
|
596
|
+
output += f"<results_returned>{len(paginated_results)}</results_returned>\n"
|
|
597
|
+
|
|
598
|
+
for i, result in enumerate(paginated_results, 1):
|
|
599
|
+
output += f"<result index=\"{offset + i}\">\n"
|
|
600
|
+
output += f" <score>{result['score']:.3f}</score>\n"
|
|
601
|
+
output += f" <timestamp>{result.get('timestamp', 'N/A')}</timestamp>\n"
|
|
602
|
+
output += f" <preview>{result.get('content', '')[:200]}...</preview>\n"
|
|
603
|
+
output += "</result>\n"
|
|
604
|
+
|
|
605
|
+
output += "</more_results>"
|
|
606
|
+
return output
|
|
607
|
+
|
|
608
|
+
except Exception as e:
|
|
609
|
+
logger.error(f"Get more results failed: {e}", exc_info=True)
|
|
610
|
+
return f"<more_results><error>Failed to get more results: {str(e)}</error></more_results>"
|
|
611
|
+
|
|
612
|
+
async def search_by_file(
|
|
613
|
+
self,
|
|
614
|
+
ctx: Context,
|
|
615
|
+
file_path: str,
|
|
616
|
+
limit: int = 10,
|
|
617
|
+
project: Optional[str] = None
|
|
618
|
+
) -> str:
|
|
619
|
+
"""Search for conversations that analyzed a specific file."""
|
|
620
|
+
|
|
621
|
+
await ctx.debug(f"Searching for file: {file_path}, project={project}")
|
|
622
|
+
|
|
623
|
+
try:
|
|
624
|
+
# Create multiple path variants to match how paths are stored
|
|
625
|
+
# Import uses normalize_file_path which replaces /Users/ with ~/
|
|
626
|
+
path_variants = set()
|
|
627
|
+
|
|
628
|
+
# Original path
|
|
629
|
+
path_variants.add(file_path)
|
|
630
|
+
|
|
631
|
+
# Basename only
|
|
632
|
+
path_variants.add(os.path.basename(file_path))
|
|
633
|
+
|
|
634
|
+
# Try to resolve if it's a valid path
|
|
635
|
+
try:
|
|
636
|
+
resolved_path = str(Path(file_path).resolve())
|
|
637
|
+
path_variants.add(resolved_path)
|
|
638
|
+
|
|
639
|
+
# Convert resolved path to ~/ format (matching how import stores it)
|
|
640
|
+
home_dir = str(Path.home())
|
|
641
|
+
if resolved_path.startswith(home_dir):
|
|
642
|
+
tilde_path = resolved_path.replace(home_dir, '~', 1)
|
|
643
|
+
path_variants.add(tilde_path)
|
|
644
|
+
|
|
645
|
+
# Also try with /Users/ replaced by ~/
|
|
646
|
+
if '/Users/' in resolved_path:
|
|
647
|
+
path_variants.add(resolved_path.replace('/Users/', '~/', 1))
|
|
648
|
+
except:
|
|
649
|
+
pass
|
|
650
|
+
|
|
651
|
+
# If path starts with ~, also try expanded version
|
|
652
|
+
if file_path.startswith('~'):
|
|
653
|
+
expanded = os.path.expanduser(file_path)
|
|
654
|
+
path_variants.add(expanded)
|
|
655
|
+
|
|
656
|
+
# Convert all to forward slashes for consistency
|
|
657
|
+
path_variants = {p.replace('\\', '/') for p in path_variants if p}
|
|
658
|
+
|
|
659
|
+
await ctx.debug(f"Searching with path variants: {list(path_variants)}")
|
|
660
|
+
|
|
661
|
+
# Search for file mentions in metadata
|
|
662
|
+
collections_response = await self.qdrant_client.get_collections()
|
|
663
|
+
collections = collections_response.collections
|
|
664
|
+
|
|
665
|
+
# Define async function to search a single collection using scroll
|
|
666
|
+
async def search_collection(collection_name: str):
|
|
667
|
+
try:
|
|
668
|
+
from qdrant_client import models
|
|
669
|
+
|
|
670
|
+
# Use scroll with proper filter for metadata-only search
|
|
671
|
+
results, _ = await self.qdrant_client.scroll(
|
|
672
|
+
collection_name=collection_name,
|
|
673
|
+
scroll_filter=models.Filter(
|
|
674
|
+
should=[
|
|
675
|
+
models.FieldCondition(
|
|
676
|
+
key="files_analyzed",
|
|
677
|
+
match=models.MatchValue(value=path_variant)
|
|
678
|
+
)
|
|
679
|
+
for path_variant in path_variants
|
|
680
|
+
]
|
|
681
|
+
),
|
|
682
|
+
limit=limit,
|
|
683
|
+
with_payload=True
|
|
684
|
+
)
|
|
685
|
+
|
|
686
|
+
formatted_results = []
|
|
687
|
+
for point in results:
|
|
688
|
+
formatted_results.append({
|
|
689
|
+
'conversation_id': point.payload.get('conversation_id'),
|
|
690
|
+
'timestamp': point.payload.get('timestamp'),
|
|
691
|
+
'content': point.payload.get('content', point.payload.get('text', '')),
|
|
692
|
+
'files_analyzed': point.payload.get('files_analyzed', []),
|
|
693
|
+
'score': 1.0 # No score in scroll, use 1.0 for found items
|
|
694
|
+
})
|
|
695
|
+
return formatted_results
|
|
696
|
+
|
|
697
|
+
except Exception as e:
|
|
698
|
+
await ctx.debug(f"Error searching {collection_name}: {e}")
|
|
699
|
+
return []
|
|
700
|
+
|
|
701
|
+
# Use asyncio.gather for PARALLEL search across all collections
|
|
702
|
+
import asyncio
|
|
703
|
+
search_tasks = [search_collection(c.name) for c in collections]
|
|
704
|
+
|
|
705
|
+
# Limit concurrent searches to avoid overload
|
|
706
|
+
batch_size = 20
|
|
707
|
+
all_results = []
|
|
708
|
+
for i in range(0, len(search_tasks), batch_size):
|
|
709
|
+
batch = search_tasks[i:i+batch_size]
|
|
710
|
+
batch_results = await asyncio.gather(*batch)
|
|
711
|
+
for results in batch_results:
|
|
712
|
+
all_results.extend(results)
|
|
713
|
+
|
|
714
|
+
# Format results
|
|
715
|
+
if not all_results:
|
|
716
|
+
return f"<file_search><message>No conversations found analyzing {file_path}</message></file_search>"
|
|
717
|
+
|
|
718
|
+
return self.format_search_results(all_results, f"file:{file_path}")
|
|
719
|
+
|
|
720
|
+
except Exception as e:
|
|
721
|
+
logger.error(f"File search failed: {e}", exc_info=True)
|
|
722
|
+
return f"<file_search><error>File search failed: {str(e)}</error></file_search>"
|
|
723
|
+
|
|
724
|
+
async def search_by_concept(
|
|
725
|
+
self,
|
|
726
|
+
ctx: Context,
|
|
727
|
+
concept: str,
|
|
728
|
+
limit: int = 10,
|
|
729
|
+
project: Optional[str] = None,
|
|
730
|
+
include_files: bool = True
|
|
731
|
+
) -> str:
|
|
732
|
+
"""Search for conversations about a specific development concept."""
|
|
733
|
+
|
|
734
|
+
await ctx.debug(f"Searching for concept: {concept}, project={project}")
|
|
735
|
+
|
|
736
|
+
try:
|
|
737
|
+
# Search using concept as query with semantic search
|
|
738
|
+
results = await self.reflect_on_past(
|
|
739
|
+
ctx, concept, limit=limit, project=project
|
|
740
|
+
)
|
|
741
|
+
|
|
742
|
+
# Enhance results with concept-specific formatting
|
|
743
|
+
# This is a simplified version - actual implementation would analyze concepts
|
|
744
|
+
return results
|
|
745
|
+
|
|
746
|
+
except Exception as e:
|
|
747
|
+
logger.error(f"Concept search failed: {e}", exc_info=True)
|
|
748
|
+
return f"<concept_search><error>Concept search failed: {str(e)}</error></concept_search>"
|
|
749
|
+
|
|
750
|
+
async def get_next_results(
|
|
751
|
+
self,
|
|
752
|
+
ctx: Context,
|
|
753
|
+
query: str,
|
|
754
|
+
offset: int = 3,
|
|
755
|
+
limit: int = 3,
|
|
756
|
+
min_score: float = 0.3,
|
|
757
|
+
project: Optional[str] = None
|
|
758
|
+
) -> str:
|
|
759
|
+
"""Get additional search results after an initial search (pagination support)."""
|
|
760
|
+
# This is an alias for get_more_results
|
|
761
|
+
return await self.get_more_results(ctx, query, offset, limit, min_score, project)
|
|
762
|
+
|
|
763
|
+
|
|
764
|
+
def register_search_tools(
|
|
765
|
+
mcp,
|
|
766
|
+
qdrant_client: AsyncQdrantClient,
|
|
767
|
+
qdrant_url: str,
|
|
768
|
+
get_embedding_manager,
|
|
769
|
+
normalize_project_name,
|
|
770
|
+
enable_memory_decay: bool,
|
|
771
|
+
decay_weight: float,
|
|
772
|
+
decay_scale_days: float,
|
|
773
|
+
use_native_decay: bool,
|
|
774
|
+
native_decay_available: bool,
|
|
775
|
+
decay_manager=None,
|
|
776
|
+
project_resolver=None # Add project resolver
|
|
777
|
+
):
|
|
778
|
+
"""Register search tools with the MCP server."""
|
|
779
|
+
|
|
780
|
+
tools = SearchTools(
|
|
781
|
+
qdrant_client,
|
|
782
|
+
qdrant_url,
|
|
783
|
+
get_embedding_manager,
|
|
784
|
+
normalize_project_name,
|
|
785
|
+
enable_memory_decay,
|
|
786
|
+
decay_weight,
|
|
787
|
+
decay_scale_days,
|
|
788
|
+
use_native_decay,
|
|
789
|
+
native_decay_available,
|
|
790
|
+
decay_manager,
|
|
791
|
+
project_resolver # Pass the resolver
|
|
792
|
+
)
|
|
793
|
+
|
|
794
|
+
@mcp.tool()
|
|
795
|
+
async def reflect_on_past(
|
|
796
|
+
ctx: Context,
|
|
797
|
+
query: str = Field(description="The search query to find semantically similar conversations"),
|
|
798
|
+
limit: int = Field(default=5, description="Maximum number of results to return"),
|
|
799
|
+
min_score: float = Field(default=0.3, description="Minimum similarity score (0-1)"),
|
|
800
|
+
use_decay: int = Field(default=-1, description="Apply time-based decay: 1=enable, 0=disable, -1=use environment default (accepts int or str)"),
|
|
801
|
+
project: Optional[str] = Field(default=None, description="Search specific project only. If not provided, searches current project based on working directory. Use 'all' to search across all projects."),
|
|
802
|
+
mode: str = Field(default="full", description="Search mode: 'full' (all results with details), 'quick' (count + top result only), 'summary' (aggregated insights without individual results)"),
|
|
803
|
+
brief: bool = Field(default=False, description="Brief mode: returns minimal information for faster response"),
|
|
804
|
+
include_raw: bool = Field(default=False, description="Include raw Qdrant payload data for debugging (increases response size)"),
|
|
805
|
+
response_format: str = Field(default="xml", description="Response format: 'xml' or 'markdown'")
|
|
806
|
+
) -> str:
|
|
807
|
+
"""Search for relevant past conversations using semantic search with optional time decay."""
|
|
808
|
+
return await tools.reflect_on_past(ctx, query, limit, min_score, use_decay, project, mode, brief, include_raw, response_format)
|
|
809
|
+
|
|
810
|
+
@mcp.tool()
|
|
811
|
+
async def quick_search(
|
|
812
|
+
ctx: Context,
|
|
813
|
+
query: str = Field(description="The search query to find semantically similar conversations"),
|
|
814
|
+
min_score: float = Field(default=0.3, description="Minimum similarity score (0-1)"),
|
|
815
|
+
project: Optional[str] = Field(default=None, description="Search specific project only. If not provided, searches current project based on working directory. Use 'all' to search across all projects.")
|
|
816
|
+
) -> str:
|
|
817
|
+
"""Quick search that returns only the count and top result for fast overview."""
|
|
818
|
+
return await tools.quick_search(ctx, query, min_score, project)
|
|
819
|
+
|
|
820
|
+
@mcp.tool()
|
|
821
|
+
async def search_summary(
|
|
822
|
+
ctx: Context,
|
|
823
|
+
query: str = Field(description="The search query to find semantically similar conversations"),
|
|
824
|
+
project: Optional[str] = Field(default=None, description="Search specific project only. If not provided, searches current project based on working directory. Use 'all' to search across all projects.")
|
|
825
|
+
) -> str:
|
|
826
|
+
"""Get aggregated insights from search results without individual result details."""
|
|
827
|
+
return await tools.search_summary(ctx, query, project)
|
|
828
|
+
|
|
829
|
+
@mcp.tool()
|
|
830
|
+
async def get_more_results(
|
|
831
|
+
ctx: Context,
|
|
832
|
+
query: str = Field(description="The original search query"),
|
|
833
|
+
offset: int = Field(default=3, description="Number of results to skip (for pagination)"),
|
|
834
|
+
limit: int = Field(default=3, description="Number of additional results to return"),
|
|
835
|
+
min_score: float = Field(default=0.3, description="Minimum similarity score (0-1)"),
|
|
836
|
+
project: Optional[str] = Field(default=None, description="Search specific project only")
|
|
837
|
+
) -> str:
|
|
838
|
+
"""Get additional search results after an initial search (pagination support)."""
|
|
839
|
+
return await tools.get_more_results(ctx, query, offset, limit, min_score, project)
|
|
840
|
+
|
|
841
|
+
@mcp.tool()
|
|
842
|
+
async def search_by_file(
|
|
843
|
+
ctx: Context,
|
|
844
|
+
file_path: str = Field(description="The file path to search for in conversations"),
|
|
845
|
+
limit: int = Field(default=10, description="Maximum number of results to return"),
|
|
846
|
+
project: Optional[str] = Field(default=None, description="Search specific project only. Use 'all' to search across all projects.")
|
|
847
|
+
) -> str:
|
|
848
|
+
"""Search for conversations that analyzed a specific file."""
|
|
849
|
+
return await tools.search_by_file(ctx, file_path, limit, project)
|
|
850
|
+
|
|
851
|
+
@mcp.tool()
|
|
852
|
+
async def search_by_concept(
|
|
853
|
+
ctx: Context,
|
|
854
|
+
concept: str = Field(description="The concept to search for (e.g., 'security', 'docker', 'testing')"),
|
|
855
|
+
limit: int = Field(default=10, description="Maximum number of results to return"),
|
|
856
|
+
project: Optional[str] = Field(default=None, description="Search specific project only. Use 'all' to search across all projects."),
|
|
857
|
+
include_files: bool = Field(default=True, description="Include file information in results")
|
|
858
|
+
) -> str:
|
|
859
|
+
"""Search for conversations about a specific development concept."""
|
|
860
|
+
return await tools.search_by_concept(ctx, concept, limit, project, include_files)
|
|
861
|
+
|
|
862
|
+
@mcp.tool()
|
|
863
|
+
async def get_next_results(
|
|
864
|
+
ctx: Context,
|
|
865
|
+
query: str = Field(description="The original search query"),
|
|
866
|
+
offset: int = Field(default=3, description="Number of results to skip (for pagination)"),
|
|
867
|
+
limit: int = Field(default=3, description="Number of additional results to return"),
|
|
868
|
+
min_score: float = Field(default=0.3, description="Minimum similarity score (0-1)"),
|
|
869
|
+
project: Optional[str] = Field(default=None, description="Search specific project only")
|
|
870
|
+
) -> str:
|
|
871
|
+
"""Get additional search results after an initial search (pagination support)."""
|
|
872
|
+
return await tools.get_next_results(ctx, query, offset, limit, min_score, project)
|
|
873
|
+
|
|
874
|
+
logger.info("Search tools registered successfully")
|