claude-self-reflect 3.2.4 → 3.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/.claude/agents/claude-self-reflect-test.md +992 -510
  2. package/.claude/agents/reflection-specialist.md +59 -3
  3. package/README.md +14 -5
  4. package/installer/cli.js +16 -0
  5. package/installer/postinstall.js +14 -0
  6. package/installer/statusline-setup.js +289 -0
  7. package/mcp-server/run-mcp.sh +73 -5
  8. package/mcp-server/src/app_context.py +64 -0
  9. package/mcp-server/src/config.py +57 -0
  10. package/mcp-server/src/connection_pool.py +286 -0
  11. package/mcp-server/src/decay_manager.py +106 -0
  12. package/mcp-server/src/embedding_manager.py +64 -40
  13. package/mcp-server/src/embeddings_old.py +141 -0
  14. package/mcp-server/src/models.py +64 -0
  15. package/mcp-server/src/parallel_search.py +305 -0
  16. package/mcp-server/src/project_resolver.py +5 -0
  17. package/mcp-server/src/reflection_tools.py +211 -0
  18. package/mcp-server/src/rich_formatting.py +196 -0
  19. package/mcp-server/src/search_tools.py +874 -0
  20. package/mcp-server/src/server.py +127 -1720
  21. package/mcp-server/src/temporal_design.py +132 -0
  22. package/mcp-server/src/temporal_tools.py +604 -0
  23. package/mcp-server/src/temporal_utils.py +384 -0
  24. package/mcp-server/src/utils.py +150 -67
  25. package/package.json +15 -1
  26. package/scripts/add-timestamp-indexes.py +134 -0
  27. package/scripts/ast_grep_final_analyzer.py +325 -0
  28. package/scripts/ast_grep_unified_registry.py +556 -0
  29. package/scripts/check-collections.py +29 -0
  30. package/scripts/csr-status +366 -0
  31. package/scripts/debug-august-parsing.py +76 -0
  32. package/scripts/debug-import-single.py +91 -0
  33. package/scripts/debug-project-resolver.py +82 -0
  34. package/scripts/debug-temporal-tools.py +135 -0
  35. package/scripts/delta-metadata-update.py +547 -0
  36. package/scripts/import-conversations-unified.py +157 -25
  37. package/scripts/precompact-hook.sh +33 -0
  38. package/scripts/session_quality_tracker.py +481 -0
  39. package/scripts/streaming-watcher.py +1578 -0
  40. package/scripts/update_patterns.py +334 -0
  41. package/scripts/utils.py +39 -0
@@ -0,0 +1,211 @@
1
+ """Reflection tools for Claude Self Reflect MCP server."""
2
+
3
+ import os
4
+ import json
5
+ import hashlib
6
+ import logging
7
+ from typing import Optional, List, Dict, Any
8
+ from datetime import datetime, timezone
9
+ from pathlib import Path
10
+ import uuid
11
+
12
+ from fastmcp import Context
13
+ from pydantic import Field
14
+ from qdrant_client import AsyncQdrantClient
15
+ from qdrant_client.models import PointStruct, VectorParams, Distance
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class ReflectionTools:
21
+ """Handles reflection storage and conversation retrieval operations."""
22
+
23
+ def __init__(
24
+ self,
25
+ qdrant_client: AsyncQdrantClient,
26
+ qdrant_url: str,
27
+ get_embedding_manager,
28
+ normalize_project_name
29
+ ):
30
+ """Initialize reflection tools with dependencies."""
31
+ self.qdrant_client = qdrant_client
32
+ self.qdrant_url = qdrant_url
33
+ self.get_embedding_manager = get_embedding_manager
34
+ self.normalize_project_name = normalize_project_name
35
+
36
+ async def store_reflection(
37
+ self,
38
+ ctx: Context,
39
+ content: str,
40
+ tags: List[str] = []
41
+ ) -> str:
42
+ """Store an important insight or reflection for future reference."""
43
+
44
+ await ctx.debug(f"Storing reflection with {len(tags)} tags")
45
+
46
+ try:
47
+ # Determine collection name based on active model type, not prefer_local
48
+ embedding_manager = self.get_embedding_manager()
49
+ # Use actual model_type to ensure consistency
50
+ embedding_type = embedding_manager.model_type or ("voyage" if embedding_manager.voyage_client else "local")
51
+ collection_name = f"reflections_{embedding_type}"
52
+
53
+ # Ensure reflections collection exists
54
+ try:
55
+ await self.qdrant_client.get_collection(collection_name)
56
+ await ctx.debug(f"Using existing {collection_name} collection")
57
+ except Exception:
58
+ # Collection doesn't exist, create it
59
+ await ctx.debug(f"Creating {collection_name} collection")
60
+
61
+ # Get embedding dimensions for the specific type
62
+ embedding_dim = embedding_manager.get_vector_dimension(force_type=embedding_type)
63
+
64
+ await self.qdrant_client.create_collection(
65
+ collection_name=collection_name,
66
+ vectors_config=VectorParams(
67
+ size=embedding_dim,
68
+ distance=Distance.COSINE
69
+ )
70
+ )
71
+
72
+ # Generate embedding with the same forced type for consistency
73
+ embedding = await embedding_manager.generate_embedding(content, force_type=embedding_type)
74
+
75
+ # Guard against failed embeddings
76
+ if not embedding:
77
+ await ctx.debug("Failed to generate embedding for reflection")
78
+ return "Failed to store reflection: embedding generation failed"
79
+
80
+ # Create unique ID
81
+ reflection_id = hashlib.md5(f"{content}{datetime.now().isoformat()}".encode()).hexdigest()
82
+
83
+ # Prepare metadata
84
+ metadata = {
85
+ "content": content,
86
+ "tags": tags,
87
+ "timestamp": datetime.now(timezone.utc).isoformat(),
88
+ "type": "reflection"
89
+ }
90
+
91
+ # Store in Qdrant
92
+ await self.qdrant_client.upsert(
93
+ collection_name=collection_name,
94
+ points=[
95
+ PointStruct(
96
+ id=reflection_id,
97
+ vector=embedding,
98
+ payload=metadata
99
+ )
100
+ ]
101
+ )
102
+
103
+ await ctx.debug(f"Stored reflection with ID {reflection_id}")
104
+
105
+ return f"""Reflection stored successfully.
106
+ ID: {reflection_id}
107
+ Tags: {', '.join(tags) if tags else 'none'}
108
+ Timestamp: {metadata['timestamp']}"""
109
+
110
+ except Exception as e:
111
+ logger.error(f"Failed to store reflection: {e}", exc_info=True)
112
+ return f"Failed to store reflection: {str(e)}"
113
+
114
+ async def get_full_conversation(
115
+ self,
116
+ ctx: Context,
117
+ conversation_id: str,
118
+ project: Optional[str] = None
119
+ ) -> str:
120
+ """Get the full JSONL conversation file path for a conversation ID.
121
+ This allows agents to read complete conversations instead of truncated excerpts."""
122
+
123
+ await ctx.debug(f"Getting full conversation for ID: {conversation_id}, project: {project}")
124
+
125
+ try:
126
+ # Base path for conversations
127
+ base_path = Path.home() / '.claude' / 'projects'
128
+
129
+ # If project is specified, try to find it in that project
130
+ if project:
131
+ # Normalize project name for path matching
132
+ project_normalized = self.normalize_project_name(project)
133
+
134
+ # Look for project directories that match
135
+ for project_dir in base_path.glob('*'):
136
+ if project_normalized in project_dir.name.lower():
137
+ # Look for JSONL files in this project
138
+ for jsonl_file in project_dir.glob('*.jsonl'):
139
+ # Check if filename matches conversation_id (with or without .jsonl)
140
+ if conversation_id in jsonl_file.stem or conversation_id == jsonl_file.stem:
141
+ await ctx.debug(f"Found conversation by filename in {jsonl_file}")
142
+ return f"""<conversation_file>
143
+ <conversation_id>{conversation_id}</conversation_id>
144
+ <file_path>{str(jsonl_file)}</file_path>
145
+ <project>{project_dir.name}</project>
146
+ <message>Use the Read tool with this file path to read the complete conversation.</message>
147
+ </conversation_file>"""
148
+
149
+ # If not found in specific project or no project specified, search all
150
+ await ctx.debug("Searching all projects for conversation")
151
+ for project_dir in base_path.glob('*'):
152
+ for jsonl_file in project_dir.glob('*.jsonl'):
153
+ # Check if filename matches conversation_id (with or without .jsonl)
154
+ if conversation_id in jsonl_file.stem or conversation_id == jsonl_file.stem:
155
+ await ctx.debug(f"Found conversation by filename in {jsonl_file}")
156
+ return f"""<conversation_file>
157
+ <conversation_id>{conversation_id}</conversation_id>
158
+ <file_path>{str(jsonl_file)}</file_path>
159
+ <project>{project_dir.name}</project>
160
+ <message>Use the Read tool with this file path to read the complete conversation.</message>
161
+ </conversation_file>"""
162
+
163
+ # Not found
164
+ return f"""<conversation_file>
165
+ <error>Conversation ID '{conversation_id}' not found in any project.</error>
166
+ <suggestion>The conversation may not have been imported yet, or the ID may be incorrect.</suggestion>
167
+ </conversation_file>"""
168
+
169
+ except Exception as e:
170
+ logger.error(f"Failed to get conversation file: {e}", exc_info=True)
171
+ return f"""<conversation_file>
172
+ <error>Failed to locate conversation: {str(e)}</error>
173
+ </conversation_file>"""
174
+
175
+
176
+ def register_reflection_tools(
177
+ mcp,
178
+ qdrant_client: AsyncQdrantClient,
179
+ qdrant_url: str,
180
+ get_embedding_manager,
181
+ normalize_project_name
182
+ ):
183
+ """Register reflection tools with the MCP server."""
184
+
185
+ tools = ReflectionTools(
186
+ qdrant_client,
187
+ qdrant_url,
188
+ get_embedding_manager,
189
+ normalize_project_name
190
+ )
191
+
192
+ @mcp.tool()
193
+ async def store_reflection(
194
+ ctx: Context,
195
+ content: str = Field(description="The insight or reflection to store"),
196
+ tags: List[str] = Field(default=[], description="Tags to categorize this reflection")
197
+ ) -> str:
198
+ """Store an important insight or reflection for future reference."""
199
+ return await tools.store_reflection(ctx, content, tags)
200
+
201
+ @mcp.tool()
202
+ async def get_full_conversation(
203
+ ctx: Context,
204
+ conversation_id: str = Field(description="The conversation ID from search results (cid)"),
205
+ project: Optional[str] = Field(default=None, description="Optional project name to help locate the file")
206
+ ) -> str:
207
+ """Get the full JSONL conversation file path for a conversation ID.
208
+ This allows agents to read complete conversations instead of truncated excerpts."""
209
+ return await tools.get_full_conversation(ctx, conversation_id, project)
210
+
211
+ logger.info("Reflection tools registered successfully")
@@ -0,0 +1,196 @@
1
+ """Rich formatting for search results with emojis and enhanced display."""
2
+
3
+ import json
4
+ import time
5
+ from datetime import datetime, timezone
6
+ from typing import List, Dict, Any, Optional
7
+ import logging
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ def format_search_results_rich(
13
+ results: List[Dict],
14
+ query: str,
15
+ target_project: str,
16
+ collections_searched: int,
17
+ timing_info: Dict[str, float],
18
+ start_time: float,
19
+ brief: bool = False,
20
+ include_raw: bool = False,
21
+ indexing_status: Optional[Dict] = None
22
+ ) -> str:
23
+ """Format search results with rich formatting including emojis and performance metrics."""
24
+
25
+ # Initialize upfront summary
26
+ upfront_summary = ""
27
+
28
+ # Show result summary with emojis
29
+ if results:
30
+ score_info = "high" if results[0]['score'] >= 0.85 else "good" if results[0]['score'] >= 0.75 else "partial"
31
+ upfront_summary += f"🎯 RESULTS: {len(results)} matches ({score_info} relevance, top score: {results[0]['score']:.3f})\n"
32
+
33
+ # Show performance metrics
34
+ total_time = time.time() - start_time
35
+ indexing_info = ""
36
+ if indexing_status and indexing_status.get("percentage", 100) < 100.0:
37
+ indexing_info = f" | 📊 {indexing_status['indexed_conversations']}/{indexing_status['total_conversations']} indexed"
38
+ upfront_summary += f"⚡ PERFORMANCE: {int(total_time * 1000)}ms ({collections_searched} collections searched{indexing_info})\n"
39
+ else:
40
+ upfront_summary += f"❌ NO RESULTS: No conversations found matching '{query}'\n"
41
+
42
+ # Start XML format with upfront summary
43
+ result_text = upfront_summary + "\n<search>\n"
44
+
45
+ # Add indexing status if not fully baselined
46
+ if indexing_status and indexing_status.get("percentage", 100) < 95.0:
47
+ result_text += f' <info status="indexing" progress="{indexing_status["percentage"]:.1f}%" backlog="{indexing_status.get("backlog_count", 0)}">\n'
48
+ result_text += f' <message>📊 Indexing: {indexing_status["indexed_conversations"]}/{indexing_status["total_conversations"]} conversations ({indexing_status["percentage"]:.1f}% complete)</message>\n'
49
+ result_text += f" </info>\n"
50
+
51
+ # Add high-level result summary
52
+ if results:
53
+ # Count time-based results
54
+ now = datetime.now(timezone.utc)
55
+ today_count = 0
56
+ yesterday_count = 0
57
+ week_count = 0
58
+
59
+ for result in results:
60
+ timestamp_str = result.get('timestamp', '')
61
+ if timestamp_str:
62
+ try:
63
+ # Clean timestamp
64
+ timestamp_clean = timestamp_str.replace('Z', '+00:00') if timestamp_str.endswith('Z') else timestamp_str
65
+ timestamp_dt = datetime.fromisoformat(timestamp_clean)
66
+ if timestamp_dt.tzinfo is None:
67
+ timestamp_dt = timestamp_dt.replace(tzinfo=timezone.utc)
68
+
69
+ days_ago = (now - timestamp_dt).days
70
+ if days_ago == 0:
71
+ today_count += 1
72
+ elif days_ago == 1:
73
+ yesterday_count += 1
74
+ if days_ago <= 7:
75
+ week_count += 1
76
+ except:
77
+ pass
78
+
79
+ # Compact summary with key info
80
+ time_info = ""
81
+ if today_count > 0:
82
+ time_info = f"{today_count} today"
83
+ elif yesterday_count > 0:
84
+ time_info = f"{yesterday_count} yesterday"
85
+ elif week_count > 0:
86
+ time_info = f"{week_count} this week"
87
+ else:
88
+ time_info = "older results"
89
+
90
+ score_info = "high" if results[0]['score'] >= 0.85 else "good" if results[0]['score'] >= 0.75 else "partial"
91
+
92
+ result_text += f' <summary count="{len(results)}" relevance="{score_info}" recency="{time_info}" top-score="{results[0]["score"]:.3f}">\n'
93
+
94
+ # Short preview of top result
95
+ top_excerpt = results[0].get('excerpt', results[0].get('content', ''))[:100].strip()
96
+ if '...' not in top_excerpt:
97
+ top_excerpt += "..."
98
+ result_text += f' <preview>{top_excerpt}</preview>\n'
99
+ result_text += f" </summary>\n"
100
+ else:
101
+ result_text += f" <result-summary>\n"
102
+ result_text += f" <headline>No matches found</headline>\n"
103
+ result_text += f" <relevance>No conversations matched your query</relevance>\n"
104
+ result_text += f" </result-summary>\n"
105
+
106
+ # Add metadata
107
+ result_text += f" <meta>\n"
108
+ result_text += f" <q>{query}</q>\n"
109
+ result_text += f" <scope>{target_project if target_project != 'all' else 'all'}</scope>\n"
110
+ result_text += f" <count>{len(results)}</count>\n"
111
+ if results:
112
+ result_text += f" <range>{results[-1]['score']:.3f}-{results[0]['score']:.3f}</range>\n"
113
+
114
+ # Add performance metadata
115
+ total_time = time.time() - start_time
116
+ result_text += f" <perf>\n"
117
+ result_text += f" <ttl>{int(total_time * 1000)}</ttl>\n"
118
+ result_text += f" <emb>{int((timing_info.get('embedding_end', 0) - timing_info.get('embedding_start', 0)) * 1000)}</emb>\n"
119
+ result_text += f" <srch>{int((timing_info.get('search_all_end', 0) - timing_info.get('search_all_start', 0)) * 1000)}</srch>\n"
120
+ result_text += f" <cols>{collections_searched}</cols>\n"
121
+ result_text += f" </perf>\n"
122
+ result_text += f" </meta>\n"
123
+
124
+ # Add individual results
125
+ result_text += " <results>\n"
126
+ for i, result in enumerate(results):
127
+ result_text += f' <r rank="{i+1}">\n'
128
+ result_text += f" <s>{result['score']:.3f}</s>\n"
129
+ result_text += f" <p>{result.get('project_name', 'unknown')}</p>\n"
130
+
131
+ # Calculate relative time
132
+ timestamp_str = result.get('timestamp', '')
133
+ if timestamp_str:
134
+ try:
135
+ timestamp_clean = timestamp_str.replace('Z', '+00:00') if timestamp_str.endswith('Z') else timestamp_str
136
+ timestamp_dt = datetime.fromisoformat(timestamp_clean)
137
+ if timestamp_dt.tzinfo is None:
138
+ timestamp_dt = timestamp_dt.replace(tzinfo=timezone.utc)
139
+ now = datetime.now(timezone.utc)
140
+ days_ago = (now - timestamp_dt).days
141
+ if days_ago == 0:
142
+ time_str = "today"
143
+ elif days_ago == 1:
144
+ time_str = "yesterday"
145
+ else:
146
+ time_str = f"{days_ago}d"
147
+ result_text += f" <t>{time_str}</t>\n"
148
+ except:
149
+ result_text += f" <t>unknown</t>\n"
150
+
151
+ # Get excerpt/content
152
+ excerpt = result.get('excerpt', result.get('content', ''))
153
+
154
+ if not brief and excerpt:
155
+ # Extract title from first line of excerpt
156
+ excerpt_lines = excerpt.split('\n')
157
+ title = excerpt_lines[0][:80] + "..." if len(excerpt_lines[0]) > 80 else excerpt_lines[0]
158
+ result_text += f" <title>{title}</title>\n"
159
+
160
+ # Key finding - summarize the main point
161
+ key_finding = excerpt[:100] + "..." if len(excerpt) > 100 else excerpt
162
+ result_text += f" <key-finding>{key_finding.strip()}</key-finding>\n"
163
+
164
+ # Always include excerpt
165
+ if brief:
166
+ brief_excerpt = excerpt[:100] + "..." if len(excerpt) > 100 else excerpt
167
+ result_text += f" <excerpt>{brief_excerpt.strip()}</excerpt>\n"
168
+ else:
169
+ result_text += f" <excerpt><![CDATA[{excerpt}]]></excerpt>\n"
170
+
171
+ # Add conversation ID if present
172
+ if result.get('conversation_id'):
173
+ result_text += f" <cid>{result['conversation_id']}</cid>\n"
174
+
175
+ # Include raw data if requested
176
+ if include_raw and result.get('raw_payload'):
177
+ result_text += " <raw>\n"
178
+ payload = result['raw_payload']
179
+ result_text += f" <txt><![CDATA[{payload.get('text', '')}]]></txt>\n"
180
+ result_text += f" <id>{result.get('id', '')}</id>\n"
181
+ result_text += " </raw>\n"
182
+
183
+ # Add metadata fields if present
184
+ if result.get('files_analyzed'):
185
+ result_text += f" <files>{', '.join(result['files_analyzed'][:5])}</files>\n"
186
+ if result.get('tools_used'):
187
+ result_text += f" <tools>{', '.join(result['tools_used'][:5])}</tools>\n"
188
+ if result.get('concepts'):
189
+ result_text += f" <concepts>{', '.join(result['concepts'][:5])}</concepts>\n"
190
+
191
+ result_text += " </r>\n"
192
+
193
+ result_text += " </results>\n"
194
+ result_text += "</search>\n"
195
+
196
+ return result_text