claude-self-reflect 3.0.2 → 3.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/mcp-server/src/server.py +257 -4
- package/package.json +1 -1
package/mcp-server/src/server.py
CHANGED
|
@@ -576,12 +576,17 @@ async def reflect_on_past(
|
|
|
576
576
|
project: Optional[str] = Field(default=None, description="Search specific project only. If not provided, searches current project based on working directory. Use 'all' to search across all projects."),
|
|
577
577
|
include_raw: bool = Field(default=False, description="Include raw Qdrant payload data for debugging (increases response size)"),
|
|
578
578
|
response_format: str = Field(default="xml", description="Response format: 'xml' or 'markdown'"),
|
|
579
|
-
brief: bool = Field(default=False, description="Brief mode: returns minimal information for faster response")
|
|
579
|
+
brief: bool = Field(default=False, description="Brief mode: returns minimal information for faster response"),
|
|
580
|
+
mode: str = Field(default="full", description="Search mode: 'full' (all results with details), 'quick' (count + top result only), 'summary' (aggregated insights without individual results)")
|
|
580
581
|
) -> str:
|
|
581
582
|
"""Search for relevant past conversations using semantic search with optional time decay."""
|
|
582
583
|
|
|
583
584
|
logger.info(f"=== SEARCH START === Query: '{query}', Project: '{project}', Limit: {limit}")
|
|
584
585
|
|
|
586
|
+
# Validate mode parameter
|
|
587
|
+
if mode not in ['full', 'quick', 'summary']:
|
|
588
|
+
return f"<error>Invalid mode '{mode}'. Must be 'full', 'quick', or 'summary'</error>"
|
|
589
|
+
|
|
585
590
|
# Start timing
|
|
586
591
|
start_time = time.time()
|
|
587
592
|
timing_info = {}
|
|
@@ -607,6 +612,7 @@ async def reflect_on_past(
|
|
|
607
612
|
|
|
608
613
|
# Always get the working directory for logging purposes
|
|
609
614
|
cwd = os.environ.get('MCP_CLIENT_CWD', os.getcwd())
|
|
615
|
+
await ctx.debug(f"CWD: {cwd}, Project param: {project}")
|
|
610
616
|
|
|
611
617
|
if project is None:
|
|
612
618
|
# Use MCP_CLIENT_CWD environment variable set by run-mcp.sh
|
|
@@ -628,6 +634,8 @@ async def reflect_on_past(
|
|
|
628
634
|
# If still no project detected, use the last directory name
|
|
629
635
|
if target_project is None:
|
|
630
636
|
target_project = Path(cwd).name
|
|
637
|
+
|
|
638
|
+
await ctx.debug(f"Auto-detected project from path: {target_project}")
|
|
631
639
|
|
|
632
640
|
# For project matching, we need to handle the dash-encoded format
|
|
633
641
|
# Convert folder name to the format used in stored data
|
|
@@ -1121,10 +1129,21 @@ async def reflect_on_past(
|
|
|
1121
1129
|
# Sort by score and limit
|
|
1122
1130
|
timing_info['sort_start'] = time.time()
|
|
1123
1131
|
all_results.sort(key=lambda x: x.score, reverse=True)
|
|
1124
|
-
|
|
1132
|
+
|
|
1133
|
+
# Apply mode-specific limits
|
|
1134
|
+
if mode == "quick":
|
|
1135
|
+
# For quick mode, only keep the top result
|
|
1136
|
+
all_results = all_results[:1]
|
|
1137
|
+
elif mode == "summary":
|
|
1138
|
+
# For summary mode, we'll process all results but not return individual ones
|
|
1139
|
+
pass # Keep all for aggregation
|
|
1140
|
+
else:
|
|
1141
|
+
# For full mode, apply the normal limit
|
|
1142
|
+
all_results = all_results[:limit]
|
|
1143
|
+
|
|
1125
1144
|
timing_info['sort_end'] = time.time()
|
|
1126
1145
|
|
|
1127
|
-
logger.info(f"Total results: {len(all_results)}, Returning: {len(all_results[:limit])}")
|
|
1146
|
+
logger.info(f"Total results: {len(all_results)}, Mode: {mode}, Returning: {len(all_results[:limit])}")
|
|
1128
1147
|
for r in all_results[:3]: # Log first 3
|
|
1129
1148
|
logger.debug(f"Result: id={r.id}, has_patterns={bool(r.code_patterns)}, pattern_keys={list(r.code_patterns.keys()) if r.code_patterns else None}")
|
|
1130
1149
|
|
|
@@ -1137,9 +1156,97 @@ async def reflect_on_past(
|
|
|
1137
1156
|
# Update indexing status before returning results
|
|
1138
1157
|
await update_indexing_status()
|
|
1139
1158
|
|
|
1140
|
-
# Format results based on response_format
|
|
1159
|
+
# Format results based on response_format and mode
|
|
1141
1160
|
timing_info['format_start'] = time.time()
|
|
1142
1161
|
|
|
1162
|
+
# Handle mode-specific responses
|
|
1163
|
+
if mode == "quick":
|
|
1164
|
+
# Quick mode: return just count and top result
|
|
1165
|
+
total_count = len(all_results) # Before we limited to 1
|
|
1166
|
+
if response_format == "xml":
|
|
1167
|
+
result_text = f"<quick_search>\n"
|
|
1168
|
+
result_text += f" <count>{total_count}</count>\n"
|
|
1169
|
+
if all_results:
|
|
1170
|
+
top_result = all_results[0]
|
|
1171
|
+
result_text += f" <top_result>\n"
|
|
1172
|
+
result_text += f" <score>{top_result.score:.3f}</score>\n"
|
|
1173
|
+
result_text += f" <excerpt>{escape(top_result.excerpt[:200])}</excerpt>\n"
|
|
1174
|
+
result_text += f" <project>{escape(top_result.project_name)}</project>\n"
|
|
1175
|
+
result_text += f" <conversation_id>{escape(top_result.conversation_id or '')}</conversation_id>\n"
|
|
1176
|
+
result_text += f" </top_result>\n"
|
|
1177
|
+
result_text += f"</quick_search>"
|
|
1178
|
+
return result_text
|
|
1179
|
+
else:
|
|
1180
|
+
# Markdown format for quick mode
|
|
1181
|
+
if all_results:
|
|
1182
|
+
return f"**Found {total_count} matches**\n\nTop result (score: {all_results[0].score:.3f}):\n{all_results[0].excerpt[:200]}"
|
|
1183
|
+
else:
|
|
1184
|
+
return f"No matches found for '{query}'"
|
|
1185
|
+
|
|
1186
|
+
elif mode == "summary":
|
|
1187
|
+
# Summary mode: return aggregated insights without individual results
|
|
1188
|
+
if not all_results:
|
|
1189
|
+
return f"No conversations found to summarize for '{query}'"
|
|
1190
|
+
|
|
1191
|
+
# Aggregate data
|
|
1192
|
+
total_count = len(all_results)
|
|
1193
|
+
avg_score = sum(r.score for r in all_results) / total_count
|
|
1194
|
+
|
|
1195
|
+
# Extract common concepts and tools
|
|
1196
|
+
all_concepts = []
|
|
1197
|
+
all_tools = []
|
|
1198
|
+
all_files = []
|
|
1199
|
+
projects = set()
|
|
1200
|
+
|
|
1201
|
+
for result in all_results:
|
|
1202
|
+
if result.concepts:
|
|
1203
|
+
all_concepts.extend(result.concepts)
|
|
1204
|
+
if result.tools_used:
|
|
1205
|
+
all_tools.extend(result.tools_used)
|
|
1206
|
+
if result.files_analyzed:
|
|
1207
|
+
all_files.extend(result.files_analyzed)
|
|
1208
|
+
projects.add(result.project_name)
|
|
1209
|
+
|
|
1210
|
+
# Count frequencies
|
|
1211
|
+
from collections import Counter
|
|
1212
|
+
concept_counts = Counter(all_concepts).most_common(5)
|
|
1213
|
+
tool_counts = Counter(all_tools).most_common(5)
|
|
1214
|
+
|
|
1215
|
+
if response_format == "xml":
|
|
1216
|
+
result_text = f"<search_summary>\n"
|
|
1217
|
+
result_text += f" <query>{escape(query)}</query>\n"
|
|
1218
|
+
result_text += f" <total_matches>{total_count}</total_matches>\n"
|
|
1219
|
+
result_text += f" <average_score>{avg_score:.3f}</average_score>\n"
|
|
1220
|
+
result_text += f" <projects_involved>{len(projects)}</projects_involved>\n"
|
|
1221
|
+
if concept_counts:
|
|
1222
|
+
result_text += f" <common_concepts>\n"
|
|
1223
|
+
for concept, count in concept_counts:
|
|
1224
|
+
result_text += f" <concept count=\"{count}\">{escape(concept)}</concept>\n"
|
|
1225
|
+
result_text += f" </common_concepts>\n"
|
|
1226
|
+
if tool_counts:
|
|
1227
|
+
result_text += f" <common_tools>\n"
|
|
1228
|
+
for tool, count in tool_counts:
|
|
1229
|
+
result_text += f" <tool count=\"{count}\">{escape(tool)}</tool>\n"
|
|
1230
|
+
result_text += f" </common_tools>\n"
|
|
1231
|
+
result_text += f"</search_summary>"
|
|
1232
|
+
return result_text
|
|
1233
|
+
else:
|
|
1234
|
+
# Markdown format for summary
|
|
1235
|
+
result_text = f"## Summary for: {query}\n\n"
|
|
1236
|
+
result_text += f"- **Total matches**: {total_count}\n"
|
|
1237
|
+
result_text += f"- **Average relevance**: {avg_score:.3f}\n"
|
|
1238
|
+
result_text += f"- **Projects involved**: {len(projects)}\n\n"
|
|
1239
|
+
if concept_counts:
|
|
1240
|
+
result_text += "**Common concepts**:\n"
|
|
1241
|
+
for concept, count in concept_counts:
|
|
1242
|
+
result_text += f"- {concept} ({count} occurrences)\n"
|
|
1243
|
+
if tool_counts:
|
|
1244
|
+
result_text += "\n**Common tools**:\n"
|
|
1245
|
+
for tool, count in tool_counts:
|
|
1246
|
+
result_text += f"- {tool} ({count} uses)\n"
|
|
1247
|
+
return result_text
|
|
1248
|
+
|
|
1249
|
+
# Continue with normal formatting for full mode
|
|
1143
1250
|
if response_format == "xml":
|
|
1144
1251
|
# Add upfront summary for immediate visibility (before collapsible XML)
|
|
1145
1252
|
upfront_summary = ""
|
|
@@ -1842,6 +1949,8 @@ async def search_by_concept(
|
|
|
1842
1949
|
})
|
|
1843
1950
|
|
|
1844
1951
|
except Exception as e:
|
|
1952
|
+
# Log unexpected errors but continue with other collections
|
|
1953
|
+
logger.debug(f"Error searching collection {collection_name}: {e}")
|
|
1845
1954
|
continue
|
|
1846
1955
|
|
|
1847
1956
|
# If no results from metadata search OR no metadata exists, fall back to semantic search
|
|
@@ -1868,6 +1977,8 @@ async def search_by_concept(
|
|
|
1868
1977
|
})
|
|
1869
1978
|
|
|
1870
1979
|
except Exception as e:
|
|
1980
|
+
# Log unexpected errors but continue with other collections
|
|
1981
|
+
logger.debug(f"Error searching collection {collection_name}: {e}")
|
|
1871
1982
|
continue
|
|
1872
1983
|
|
|
1873
1984
|
# Sort by score and limit
|
|
@@ -2020,6 +2131,148 @@ This gives you access to:
|
|
|
2020
2131
|
</full_conversation>"""
|
|
2021
2132
|
|
|
2022
2133
|
|
|
2134
|
+
@mcp.tool()
|
|
2135
|
+
async def get_next_results(
|
|
2136
|
+
ctx: Context,
|
|
2137
|
+
query: str = Field(description="The original search query"),
|
|
2138
|
+
offset: int = Field(default=3, description="Number of results to skip (for pagination)"),
|
|
2139
|
+
limit: int = Field(default=3, description="Number of additional results to return"),
|
|
2140
|
+
min_score: float = Field(default=0.7, description="Minimum similarity score (0-1)"),
|
|
2141
|
+
project: Optional[str] = Field(default=None, description="Search specific project only")
|
|
2142
|
+
) -> str:
|
|
2143
|
+
"""Get additional search results after an initial search (pagination support)."""
|
|
2144
|
+
global qdrant_client, embedding_manager
|
|
2145
|
+
|
|
2146
|
+
try:
|
|
2147
|
+
# Generate embedding for the query
|
|
2148
|
+
embedding = await generate_embedding(query)
|
|
2149
|
+
|
|
2150
|
+
# Determine which collections to search
|
|
2151
|
+
if project == "all" or not project:
|
|
2152
|
+
# Search all collections if project is "all" or not specified
|
|
2153
|
+
collections = await get_all_collections()
|
|
2154
|
+
else:
|
|
2155
|
+
# Search specific project
|
|
2156
|
+
all_collections = await get_all_collections()
|
|
2157
|
+
project_hash = hashlib.md5(project.encode()).hexdigest()[:8]
|
|
2158
|
+
collections = [
|
|
2159
|
+
c for c in all_collections
|
|
2160
|
+
if c.startswith(f"conv_{project_hash}_")
|
|
2161
|
+
]
|
|
2162
|
+
if not collections:
|
|
2163
|
+
# Fall back to searching all collections
|
|
2164
|
+
collections = all_collections
|
|
2165
|
+
|
|
2166
|
+
if not collections:
|
|
2167
|
+
return """<next_results>
|
|
2168
|
+
<error>No collections available to search</error>
|
|
2169
|
+
</next_results>"""
|
|
2170
|
+
|
|
2171
|
+
# Collect all results from all collections
|
|
2172
|
+
all_results = []
|
|
2173
|
+
for collection_name in collections:
|
|
2174
|
+
try:
|
|
2175
|
+
# Check if collection exists
|
|
2176
|
+
collection_info = await qdrant_client.get_collection(collection_name)
|
|
2177
|
+
if not collection_info:
|
|
2178
|
+
continue
|
|
2179
|
+
|
|
2180
|
+
# Search with reasonable limit to account for offset
|
|
2181
|
+
max_search_limit = 100 # Define a reasonable cap
|
|
2182
|
+
search_limit = min(offset + limit + 10, max_search_limit)
|
|
2183
|
+
results = await qdrant_client.search(
|
|
2184
|
+
collection_name=collection_name,
|
|
2185
|
+
query_vector=embedding,
|
|
2186
|
+
limit=search_limit,
|
|
2187
|
+
score_threshold=min_score
|
|
2188
|
+
)
|
|
2189
|
+
|
|
2190
|
+
for point in results:
|
|
2191
|
+
payload = point.payload
|
|
2192
|
+
score = float(point.score)
|
|
2193
|
+
|
|
2194
|
+
# Apply time-based decay if enabled
|
|
2195
|
+
use_decay_bool = ENABLE_MEMORY_DECAY # Use global default
|
|
2196
|
+
if use_decay_bool and 'timestamp' in payload:
|
|
2197
|
+
try:
|
|
2198
|
+
timestamp = datetime.fromisoformat(payload['timestamp'].replace('Z', '+00:00'))
|
|
2199
|
+
age_days = (datetime.now(timezone.utc) - timestamp).days
|
|
2200
|
+
decay_factor = DECAY_WEIGHT + (1 - DECAY_WEIGHT) * math.exp(-age_days / DECAY_SCALE_DAYS)
|
|
2201
|
+
score = score * decay_factor
|
|
2202
|
+
except (ValueError, TypeError) as e:
|
|
2203
|
+
# Log but continue - timestamp format issue shouldn't break search
|
|
2204
|
+
logger.debug(f"Failed to apply decay for timestamp {payload.get('timestamp')}: {e}")
|
|
2205
|
+
|
|
2206
|
+
all_results.append({
|
|
2207
|
+
'score': score,
|
|
2208
|
+
'payload': payload,
|
|
2209
|
+
'collection': collection_name
|
|
2210
|
+
})
|
|
2211
|
+
|
|
2212
|
+
except Exception as e:
|
|
2213
|
+
# Log unexpected errors but continue with other collections
|
|
2214
|
+
logger.debug(f"Error searching collection {collection_name}: {e}")
|
|
2215
|
+
continue
|
|
2216
|
+
|
|
2217
|
+
# Sort by score
|
|
2218
|
+
all_results.sort(key=lambda x: x['score'], reverse=True)
|
|
2219
|
+
|
|
2220
|
+
# Apply pagination
|
|
2221
|
+
paginated_results = all_results[offset:offset + limit]
|
|
2222
|
+
|
|
2223
|
+
if not paginated_results:
|
|
2224
|
+
return f"""<next_results>
|
|
2225
|
+
<query>{query}</query>
|
|
2226
|
+
<offset>{offset}</offset>
|
|
2227
|
+
<status>no_more_results</status>
|
|
2228
|
+
<message>No additional results found beyond offset {offset}</message>
|
|
2229
|
+
</next_results>"""
|
|
2230
|
+
|
|
2231
|
+
# Format results
|
|
2232
|
+
results_text = []
|
|
2233
|
+
for i, result in enumerate(paginated_results, start=offset + 1):
|
|
2234
|
+
payload = result['payload']
|
|
2235
|
+
score = result['score']
|
|
2236
|
+
timestamp = payload.get('timestamp', 'Unknown')
|
|
2237
|
+
conversation_id = payload.get('conversation_id', 'Unknown')
|
|
2238
|
+
project = payload.get('project', 'Unknown')
|
|
2239
|
+
|
|
2240
|
+
# Get text preview (store text once to avoid multiple calls)
|
|
2241
|
+
text = payload.get('text', '')
|
|
2242
|
+
text_preview = text[:300] + '...' if len(text) > 300 else text
|
|
2243
|
+
|
|
2244
|
+
results_text.append(f"""
|
|
2245
|
+
<result index="{i}">
|
|
2246
|
+
<score>{score:.3f}</score>
|
|
2247
|
+
<timestamp>{timestamp}</timestamp>
|
|
2248
|
+
<project>{project}</project>
|
|
2249
|
+
<conversation_id>{conversation_id}</conversation_id>
|
|
2250
|
+
<preview>{text_preview}</preview>
|
|
2251
|
+
</result>""")
|
|
2252
|
+
|
|
2253
|
+
# Check if there are more results available
|
|
2254
|
+
has_more = len(all_results) > (offset + limit)
|
|
2255
|
+
next_offset = offset + limit if has_more else None
|
|
2256
|
+
|
|
2257
|
+
return f"""<next_results>
|
|
2258
|
+
<query>{query}</query>
|
|
2259
|
+
<offset>{offset}</offset>
|
|
2260
|
+
<limit>{limit}</limit>
|
|
2261
|
+
<count>{len(paginated_results)}</count>
|
|
2262
|
+
<total_available>{len(all_results)}</total_available>
|
|
2263
|
+
<has_more>{has_more}</has_more>
|
|
2264
|
+
{f'<next_offset>{next_offset}</next_offset>' if next_offset else ''}
|
|
2265
|
+
<results>{''.join(results_text)}
|
|
2266
|
+
</results>
|
|
2267
|
+
</next_results>"""
|
|
2268
|
+
|
|
2269
|
+
except Exception as e:
|
|
2270
|
+
await ctx.error(f"Pagination failed: {str(e)}")
|
|
2271
|
+
return f"""<next_results>
|
|
2272
|
+
<error>Failed to get next results: {str(e)}</error>
|
|
2273
|
+
</next_results>"""
|
|
2274
|
+
|
|
2275
|
+
|
|
2023
2276
|
# Run the server
|
|
2024
2277
|
if __name__ == "__main__":
|
|
2025
2278
|
import sys
|