claude-self-reflect 3.3.1 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,407 @@
1
+ """Enhanced tool registry with improved descriptions for better tool selection.
2
+
3
+ This module provides enhanced tool registration with:
4
+ 1. csr_ namespace prefix for all tools
5
+ 2. Explicit "when to use" guidance in descriptions
6
+ 3. Response format flexibility (concise/detailed)
7
+ 4. Better tool grouping and discoverability
8
+ """
9
+
10
+ from typing import Optional, List, Literal
11
+ from fastmcp import Context
12
+ from pydantic import Field
13
+
14
+
15
+ def register_enhanced_search_tools(mcp, tools):
16
+ """Register search tools with enhanced descriptions for better selection rates."""
17
+
18
+ # Primary search tool - most commonly needed
19
+ @mcp.tool(name="csr_reflect_on_past")
20
+ async def csr_reflect_on_past(
21
+ ctx: Context,
22
+ query: str = Field(
23
+ description="The search query to find semantically similar conversations"
24
+ ),
25
+ limit: int = Field(
26
+ default=5,
27
+ description="Maximum number of results to return"
28
+ ),
29
+ min_score: float = Field(
30
+ default=0.3,
31
+ description="Minimum similarity score (0-1)"
32
+ ),
33
+ use_decay: int = Field(
34
+ default=-1,
35
+ description="Apply time-based decay: 1=enable, 0=disable, -1=use environment default"
36
+ ),
37
+ project: Optional[str] = Field(
38
+ default=None,
39
+ description="Search specific project only. Use 'all' to search across all projects"
40
+ ),
41
+ mode: str = Field(
42
+ default="full",
43
+ description="Search mode: 'full' (all results), 'quick' (count only), 'summary' (insights)"
44
+ ),
45
+ response_format: Literal["concise", "detailed", "xml"] = Field(
46
+ default="xml",
47
+ description="Output format: 'concise' for brief results, 'detailed' for full context, 'xml' for structured"
48
+ )
49
+ ) -> str:
50
+ """Search past Claude conversations semantically to find relevant context.
51
+
52
+ WHEN TO USE THIS TOOL:
53
+ - User asks "what did we discuss about X?" or "find conversations about Y"
54
+ - You need context from previous work on similar problems
55
+ - User mentions "remember when" or "last time we"
56
+ - Debugging issues that may have been solved before
57
+ - Finding implementation patterns used in the project
58
+
59
+ EXAMPLES THAT TRIGGER THIS TOOL:
60
+ - "What did we work on with Docker last week?"
61
+ - "Find all conversations about authentication"
62
+ - "How did we solve the memory leak issue?"
63
+ - "Search for discussions about database optimization"
64
+
65
+ This is the PRIMARY tool for conversation memory - use it liberally!
66
+ """
67
+ # Map response_format to existing parameters
68
+ brief = response_format == "concise"
69
+ include_raw = response_format == "detailed"
70
+
71
+ return await tools.reflect_on_past(
72
+ ctx, query, limit, min_score, use_decay,
73
+ project, mode, brief, include_raw, response_format="xml"
74
+ )
75
+
76
+ # Quick existence check - for fast validation
77
+ @mcp.tool(name="csr_quick_check")
78
+ async def csr_quick_check(
79
+ ctx: Context,
80
+ query: str = Field(
81
+ description="Topic or concept to check for existence"
82
+ ),
83
+ min_score: float = Field(
84
+ default=0.3,
85
+ description="Minimum similarity score (0-1)"
86
+ ),
87
+ project: Optional[str] = Field(
88
+ default=None,
89
+ description="Search specific project only"
90
+ )
91
+ ) -> str:
92
+ """Quick check if a topic was discussed before (returns count + top match only).
93
+
94
+ WHEN TO USE THIS TOOL:
95
+ - User asks "have we discussed X?" or "is there anything about Y?"
96
+ - You need a yes/no answer about topic existence
97
+ - Checking if a problem was encountered before
98
+ - Validating if a concept is familiar to the project
99
+
100
+ EXAMPLES THAT TRIGGER THIS TOOL:
101
+ - "Have we talked about WebSockets?"
102
+ - "Is there any discussion about React hooks?"
103
+ - "Did we ever implement caching?"
104
+
105
+ Much faster than full search - use for existence checks!
106
+ """
107
+ return await tools.quick_search(ctx, query, min_score, project)
108
+
109
+ # Time-based search - for recent work
110
+ @mcp.tool(name="csr_recent_work")
111
+ async def csr_recent_work(
112
+ ctx: Context,
113
+ limit: int = Field(
114
+ default=10,
115
+ description="Number of recent conversations to return"
116
+ ),
117
+ group_by: str = Field(
118
+ default="conversation",
119
+ description="Group by 'conversation', 'day', or 'session'"
120
+ ),
121
+ include_reflections: bool = Field(
122
+ default=True,
123
+ description="Include stored reflections"
124
+ ),
125
+ project: Optional[str] = Field(
126
+ default=None,
127
+ description="Specific project or 'all' for cross-project"
128
+ ),
129
+ response_format: Literal["concise", "detailed"] = Field(
130
+ default="concise",
131
+ description="Output verbosity level"
132
+ )
133
+ ) -> str:
134
+ """Get recent work conversations to understand current context.
135
+
136
+ WHEN TO USE THIS TOOL:
137
+ - User asks "what did we work on recently?" or "what were we doing?"
138
+ - Starting a new session and need context
139
+ - User says "continue from where we left off"
140
+ - Reviewing progress over time periods
141
+
142
+ EXAMPLES THAT TRIGGER THIS TOOL:
143
+ - "What did we work on yesterday?"
144
+ - "Show me the last 5 things we discussed"
145
+ - "What have I been working on this week?"
146
+ - "Let's continue from last time"
147
+
148
+ Essential for session continuity and context awareness!
149
+ """
150
+ return await tools.get_recent_work(ctx, group_by, limit, include_reflections, project)
151
+
152
+ # Time-constrained semantic search
153
+ @mcp.tool(name="csr_search_by_time")
154
+ async def csr_search_by_time(
155
+ ctx: Context,
156
+ query: str = Field(
157
+ description="Semantic search query"
158
+ ),
159
+ time_range: Optional[str] = Field(
160
+ default=None,
161
+ description="Natural language time like 'last week', 'yesterday'"
162
+ ),
163
+ since: Optional[str] = Field(
164
+ default=None,
165
+ description="ISO timestamp or relative time"
166
+ ),
167
+ until: Optional[str] = Field(
168
+ default=None,
169
+ description="ISO timestamp or relative time"
170
+ ),
171
+ limit: int = Field(
172
+ default=10,
173
+ description="Maximum number of results"
174
+ ),
175
+ min_score: float = Field(
176
+ default=0.3,
177
+ description="Minimum similarity score"
178
+ ),
179
+ project: Optional[str] = Field(
180
+ default=None,
181
+ description="Specific project or 'all'"
182
+ )
183
+ ) -> str:
184
+ """Search with time constraints for time-specific queries.
185
+
186
+ WHEN TO USE THIS TOOL:
187
+ - Query includes time references like "last week", "yesterday", "this month"
188
+ - User wants recent occurrences of a topic
189
+ - Debugging issues that started at a specific time
190
+ - Finding when something was first discussed
191
+
192
+ EXAMPLES THAT TRIGGER THIS TOOL:
193
+ - "Docker errors from last week"
194
+ - "What did we discuss about testing yesterday?"
195
+ - "Authentication problems in the past 3 days"
196
+ - "Recent conversations about performance"
197
+
198
+ Combines semantic search with temporal filtering!
199
+ """
200
+ return await tools.search_by_recency(
201
+ ctx, query, limit, min_score, project,
202
+ since, until, time_range
203
+ )
204
+
205
+ # File-based search - for code archaeology
206
+ @mcp.tool(name="csr_search_by_file")
207
+ async def csr_search_by_file(
208
+ ctx: Context,
209
+ file_path: str = Field(
210
+ description="File path to search for (absolute or relative)"
211
+ ),
212
+ limit: int = Field(
213
+ default=10,
214
+ description="Maximum number of results"
215
+ ),
216
+ project: Optional[str] = Field(
217
+ default=None,
218
+ description="Search specific project only"
219
+ )
220
+ ) -> str:
221
+ """Find all conversations that analyzed or modified a specific file.
222
+
223
+ WHEN TO USE THIS TOOL:
224
+ - User asks "when did we modify X file?" or "who worked on Y?"
225
+ - Investigating file history beyond git
226
+ - Understanding why changes were made to a file
227
+ - Finding discussions about specific code files
228
+
229
+ EXAMPLES THAT TRIGGER THIS TOOL:
230
+ - "When did we last modify server.py?"
231
+ - "Find all discussions about package.json"
232
+ - "What changes were made to the auth module?"
233
+ - "Who worked on the database schema?"
234
+
235
+ Perfect for code archaeology and understanding file evolution!
236
+ """
237
+ return await tools.search_by_file(ctx, file_path, limit, project)
238
+
239
+ # Concept-based search - for thematic queries
240
+ @mcp.tool(name="csr_search_by_concept")
241
+ async def csr_search_by_concept(
242
+ ctx: Context,
243
+ concept: str = Field(
244
+ description="Development concept (e.g., 'security', 'testing', 'performance')"
245
+ ),
246
+ limit: int = Field(
247
+ default=10,
248
+ description="Maximum number of results"
249
+ ),
250
+ include_files: bool = Field(
251
+ default=True,
252
+ description="Include file information"
253
+ ),
254
+ project: Optional[str] = Field(
255
+ default=None,
256
+ description="Search specific project only"
257
+ )
258
+ ) -> str:
259
+ """Search for conversations about specific development concepts or themes.
260
+
261
+ WHEN TO USE THIS TOOL:
262
+ - User asks about broad topics like "security", "testing", "performance"
263
+ - Looking for all discussions on a technical theme
264
+ - Gathering knowledge about how a concept is handled
265
+ - Finding patterns across multiple conversations
266
+
267
+ EXAMPLES THAT TRIGGER THIS TOOL:
268
+ - "Show me all security-related discussions"
269
+ - "Find conversations about testing strategies"
270
+ - "What have we discussed about performance?"
271
+ - "Look for Docker-related conversations"
272
+
273
+ Ideal for thematic analysis and knowledge gathering!
274
+ """
275
+ return await tools.search_by_concept(ctx, concept, limit, project, include_files)
276
+
277
+ # Insight storage - for knowledge persistence
278
+ @mcp.tool(name="csr_store_insight")
279
+ async def csr_store_insight(
280
+ ctx: Context,
281
+ content: str = Field(
282
+ description="The insight, solution, or learning to store"
283
+ ),
284
+ tags: List[str] = Field(
285
+ default=[],
286
+ description="Tags for categorization (e.g., ['docker', 'debugging'])"
287
+ )
288
+ ) -> str:
289
+ """Store important insights, solutions, or learnings for future reference.
290
+
291
+ WHEN TO USE THIS TOOL:
292
+ - User says "remember this" or "store this solution"
293
+ - After solving a complex problem
294
+ - When discovering important patterns or gotchas
295
+ - User provides valuable configuration or setup info
296
+ - After successful debugging sessions
297
+
298
+ EXAMPLES THAT TRIGGER THIS TOOL:
299
+ - "Remember this Docker configuration for next time"
300
+ - "Store this solution for the auth problem"
301
+ - "Save this debugging technique"
302
+ - "This is important - the API key goes in .env"
303
+
304
+ Critical for building institutional memory!
305
+ """
306
+ return await tools.store_reflection(ctx, content, tags)
307
+
308
+ # Aggregated insights - for analysis
309
+ @mcp.tool(name="csr_search_insights")
310
+ async def csr_search_insights(
311
+ ctx: Context,
312
+ query: str = Field(
313
+ description="Topic to analyze across conversations"
314
+ ),
315
+ project: Optional[str] = Field(
316
+ default=None,
317
+ description="Search specific project only"
318
+ )
319
+ ) -> str:
320
+ """Get aggregated insights and patterns from search results.
321
+
322
+ WHEN TO USE THIS TOOL:
323
+ - User wants patterns or trends, not individual results
324
+ - Analyzing how a topic evolved over time
325
+ - Understanding common themes across conversations
326
+ - Getting a high-level view without details
327
+
328
+ EXAMPLES THAT TRIGGER THIS TOOL:
329
+ - "What patterns do we see in error handling?"
330
+ - "Summarize our authentication discussions"
331
+ - "What are the common Docker issues we face?"
332
+ - "Give me insights about our testing approach"
333
+
334
+ Provides analysis, not just search results!
335
+ """
336
+ return await tools.search_summary(ctx, query, project)
337
+
338
+ # Pagination support - for deep dives
339
+ @mcp.tool(name="csr_get_more")
340
+ async def csr_get_more(
341
+ ctx: Context,
342
+ query: str = Field(
343
+ description="The original search query"
344
+ ),
345
+ offset: int = Field(
346
+ default=3,
347
+ description="Number of results to skip"
348
+ ),
349
+ limit: int = Field(
350
+ default=3,
351
+ description="Number of additional results"
352
+ ),
353
+ min_score: float = Field(
354
+ default=0.3,
355
+ description="Minimum similarity score"
356
+ ),
357
+ project: Optional[str] = Field(
358
+ default=None,
359
+ description="Search specific project only"
360
+ )
361
+ ) -> str:
362
+ """Get additional search results for paginated exploration.
363
+
364
+ WHEN TO USE THIS TOOL:
365
+ - User says "show me more" after a search
366
+ - Initial results weren't sufficient
367
+ - Deep diving into a topic
368
+ - User wants comprehensive coverage
369
+
370
+ EXAMPLES THAT TRIGGER THIS TOOL:
371
+ - "Show me more results"
372
+ - "What else is there?"
373
+ - "Keep searching"
374
+ - "I need more examples"
375
+
376
+ Use after initial search when more context is needed!
377
+ """
378
+ return await tools.get_more_results(ctx, query, offset, limit, min_score, project)
379
+
380
+ # Full conversation retrieval
381
+ @mcp.tool(name="csr_get_full_conversation")
382
+ async def csr_get_full_conversation(
383
+ ctx: Context,
384
+ conversation_id: str = Field(
385
+ description="Conversation ID from search results (cid field)"
386
+ ),
387
+ project: Optional[str] = Field(
388
+ default=None,
389
+ description="Optional project name to help locate the file"
390
+ )
391
+ ) -> str:
392
+ """Get the full conversation file path to read complete context.
393
+
394
+ WHEN TO USE THIS TOOL:
395
+ - Search result was truncated but needs full context
396
+ - User wants to see the entire conversation
397
+ - Debugging requires complete conversation history
398
+ - Following up on a specific conversation ID
399
+
400
+ EXAMPLES THAT TRIGGER THIS TOOL:
401
+ - "Show me the full conversation for cid_12345"
402
+ - "I need to see everything from that discussion"
403
+ - "Get the complete context for that result"
404
+
405
+ Returns file path for agents to read complete conversations!
406
+ """
407
+ return await tools.get_full_conversation(ctx, conversation_id, project)
@@ -0,0 +1,181 @@
1
+ """Runtime mode switching tool for embedding models."""
2
+
3
+ import os
4
+ import logging
5
+ from typing import Literal
6
+ from fastmcp import Context
7
+ from pydantic import Field
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class ModeSwitcher:
13
+ """Handles runtime switching between embedding modes."""
14
+
15
+ def __init__(self, get_embedding_manager):
16
+ """Initialize with embedding manager getter."""
17
+ self.get_embedding_manager = get_embedding_manager
18
+
19
+ async def switch_mode(
20
+ self,
21
+ ctx: Context,
22
+ mode: Literal["local", "cloud"]
23
+ ) -> str:
24
+ """Switch between local and cloud embedding modes at runtime."""
25
+
26
+ await ctx.debug(f"Switching to {mode} mode...")
27
+
28
+ try:
29
+ # Get the current embedding manager
30
+ manager = self.get_embedding_manager()
31
+
32
+ # Store current state
33
+ old_mode = manager.model_type
34
+ old_prefer_local = manager.prefer_local
35
+
36
+ # Update configuration based on requested mode
37
+ if mode == "local":
38
+ # Switch to local mode
39
+ manager.prefer_local = True
40
+ # Clear voyage key to force local
41
+ manager.voyage_key = None
42
+
43
+ # Reinitialize with local preference
44
+ if not manager.local_model:
45
+ success = manager.try_initialize_local()
46
+ if not success:
47
+ return "❌ Failed to initialize local model"
48
+
49
+ # Update default model type
50
+ manager.model_type = 'local'
51
+
52
+ await ctx.debug("Switched to LOCAL mode (FastEmbed, 384 dimensions)")
53
+
54
+ elif mode == "cloud":
55
+ # Switch to cloud mode
56
+ # First check if we have a Voyage key
57
+ voyage_key = os.getenv('VOYAGE_KEY') or os.getenv('VOYAGE_KEY-2')
58
+ if not voyage_key:
59
+ # Try to load from .env file
60
+ from pathlib import Path
61
+ from dotenv import load_dotenv
62
+ env_path = Path(__file__).parent.parent.parent / '.env'
63
+ load_dotenv(env_path, override=True)
64
+ voyage_key = os.getenv('VOYAGE_KEY') or os.getenv('VOYAGE_KEY-2')
65
+
66
+ if not voyage_key:
67
+ return "❌ Cannot switch to cloud mode: VOYAGE_KEY not found in environment or .env file"
68
+
69
+ manager.prefer_local = False
70
+ manager.voyage_key = voyage_key
71
+
72
+ # Reinitialize Voyage client
73
+ if not manager.voyage_client:
74
+ success = manager.try_initialize_voyage()
75
+ if not success:
76
+ # Restore previous state
77
+ manager.prefer_local = old_prefer_local
78
+ manager.model_type = old_mode
79
+ return "❌ Failed to initialize Voyage AI client"
80
+
81
+ # Update default model type
82
+ manager.model_type = 'voyage'
83
+
84
+ await ctx.debug("Switched to CLOUD mode (Voyage AI, 1024 dimensions)")
85
+
86
+ # Log the switch
87
+ logger.info(f"Mode switched from {old_mode} to {manager.model_type}")
88
+
89
+ # Prepare detailed response
90
+ return f"""✅ Successfully switched to {mode.upper()} mode!
91
+
92
+ **Previous Configuration:**
93
+ - Mode: {old_mode}
94
+ - Prefer Local: {old_prefer_local}
95
+
96
+ **New Configuration:**
97
+ - Mode: {manager.model_type}
98
+ - Prefer Local: {manager.prefer_local}
99
+ - Vector Dimensions: {manager.get_vector_dimension()}
100
+ - Has Voyage Key: {bool(manager.voyage_key)}
101
+
102
+ **Important Notes:**
103
+ - New reflections will go to: reflections_{manager.model_type}
104
+ - Existing collections remain unchanged
105
+ - No restart required! 🎉
106
+
107
+ **Next Steps:**
108
+ - Use `store_reflection` to test the new mode
109
+ - Use `reflect_on_past` to search across all collections"""
110
+
111
+ except Exception as e:
112
+ logger.error(f"Failed to switch mode: {e}", exc_info=True)
113
+ return f"❌ Failed to switch mode: {str(e)}"
114
+
115
+ async def get_current_mode(self, ctx: Context) -> str:
116
+ """Get the current embedding mode and configuration."""
117
+
118
+ try:
119
+ manager = self.get_embedding_manager()
120
+
121
+ # Check actual model availability
122
+ local_available = manager.local_model is not None
123
+ voyage_available = manager.voyage_client is not None
124
+
125
+ return f"""📊 Current Embedding Configuration:
126
+
127
+ **Active Mode:** {manager.model_type.upper()}
128
+ **Vector Dimensions:** {manager.get_vector_dimension()}
129
+
130
+ **Configuration:**
131
+ - Prefer Local: {manager.prefer_local}
132
+ - Has Voyage Key: {bool(manager.voyage_key)}
133
+
134
+ **Available Models:**
135
+ - Local (FastEmbed): {'✅ Initialized' if local_available else '❌ Not initialized'}
136
+ - Cloud (Voyage AI): {'✅ Initialized' if voyage_available else '❌ Not initialized'}
137
+
138
+ **Collection Names:**
139
+ - Reflections: reflections_{manager.model_type}
140
+ - Conversations: [project]_{manager.model_type}
141
+
142
+ **Environment:**
143
+ - PREFER_LOCAL_EMBEDDINGS: {os.getenv('PREFER_LOCAL_EMBEDDINGS', 'not set')}
144
+ - VOYAGE_KEY: {'set' if manager.voyage_key else 'not set'}"""
145
+
146
+ except Exception as e:
147
+ logger.error(f"Failed to get current mode: {e}", exc_info=True)
148
+ return f"❌ Failed to get current mode: {str(e)}"
149
+
150
+
151
+ def register_mode_switch_tool(mcp, get_embedding_manager):
152
+ """Register the mode switching tool with the MCP server."""
153
+
154
+ switcher = ModeSwitcher(get_embedding_manager)
155
+
156
+ @mcp.tool()
157
+ async def switch_embedding_mode(
158
+ ctx: Context,
159
+ mode: Literal["local", "cloud"] = Field(
160
+ description="Target embedding mode: 'local' for FastEmbed (384 dim), 'cloud' for Voyage AI (1024 dim)"
161
+ )
162
+ ) -> str:
163
+ """Switch between local and cloud embedding modes at runtime without restarting the MCP server.
164
+
165
+ This allows dynamic switching between:
166
+ - LOCAL mode: FastEmbed with 384 dimensions (privacy-first, no API calls)
167
+ - CLOUD mode: Voyage AI with 1024 dimensions (better quality, requires API key)
168
+
169
+ No restart required! The change takes effect immediately for all new operations.
170
+ """
171
+ return await switcher.switch_mode(ctx, mode)
172
+
173
+ @mcp.tool()
174
+ async def get_embedding_mode(ctx: Context) -> str:
175
+ """Get the current embedding mode configuration and status.
176
+
177
+ Shows which mode is active, available models, and collection naming.
178
+ """
179
+ return await switcher.get_current_mode(ctx)
180
+
181
+ logger.info("Mode switching tools registered successfully")
@@ -70,11 +70,15 @@ async def search_single_collection(
70
70
  # This code path is intentionally disabled
71
71
  pass
72
72
  else:
73
+ # SECURITY FIX: Reduce memory multiplier to prevent OOM
74
+ from .security_patches import MemoryOptimizer
75
+ safe_limit = MemoryOptimizer.calculate_safe_limit(limit, 1.5) if should_use_decay else limit
76
+
73
77
  # Standard search without native decay or client-side decay
74
78
  search_results = await qdrant_client.search(
75
79
  collection_name=collection_name,
76
80
  query_vector=query_embedding,
77
- limit=limit * 3 if should_use_decay else limit, # Get more results for client-side decay
81
+ limit=safe_limit, # Use safe limit to prevent memory explosion
78
82
  score_threshold=min_score if not should_use_decay else 0.0,
79
83
  with_payload=True
80
84
  )
@@ -292,8 +296,9 @@ async def parallel_search_collections(
292
296
 
293
297
  for result in search_results:
294
298
  if isinstance(result, Exception):
295
- # Handle exceptions from gather
296
- logger.error(f"Search task failed: {result}")
299
+ # SECURITY FIX: Proper exception logging with context
300
+ from .security_patches import ExceptionLogger
301
+ ExceptionLogger.log_exception(result, "parallel_search_task")
297
302
  continue
298
303
 
299
304
  collection_name, results, timing = result
@@ -219,11 +219,27 @@ class ProjectResolver:
219
219
  logger.debug(f"Failed to scroll {coll_name}: {e}")
220
220
  continue
221
221
 
222
+ # Add appropriate reflection collections based on the found conversation collections
223
+ # If we found _local collections, add reflections_local
224
+ # If we found _voyage collections, add reflections_voyage
225
+ reflection_collections = set()
226
+ for coll in matching_collections:
227
+ if coll.endswith('_local') and 'reflections_local' in collection_names:
228
+ reflection_collections.add('reflections_local')
229
+ elif coll.endswith('_voyage') and 'reflections_voyage' in collection_names:
230
+ reflection_collections.add('reflections_voyage')
231
+
232
+ # Also check for the legacy 'reflections' collection
233
+ if 'reflections' in collection_names:
234
+ reflection_collections.add('reflections')
235
+
236
+ matching_collections.update(reflection_collections)
237
+
222
238
  # Cache the result with TTL
223
239
  result = list(matching_collections)
224
240
  self._cache[user_project_name] = matching_collections
225
241
  self._cache_ttl[user_project_name] = time()
226
-
242
+
227
243
  return result
228
244
 
229
245
  def _get_collection_names(self, force_refresh: bool = False) -> List[str]:
@@ -244,7 +260,9 @@ class ProjectResolver:
244
260
  # Fetch fresh collection list
245
261
  try:
246
262
  all_collections = self.client.get_collections().collections
247
- collection_names = [c.name for c in all_collections if c.name.startswith('conv_')]
263
+ # Include both conversation collections and reflection collections
264
+ collection_names = [c.name for c in all_collections
265
+ if c.name.startswith('conv_') or c.name.startswith('reflections')]
248
266
 
249
267
  # Update cache
250
268
  self._collections_cache = collection_names