claude-self-reflect 3.2.3 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/.claude/agents/claude-self-reflect-test.md +595 -528
  2. package/.claude/agents/documentation-writer.md +1 -1
  3. package/.claude/agents/qdrant-specialist.md +2 -2
  4. package/.claude/agents/reflection-specialist.md +61 -5
  5. package/.claude/agents/search-optimizer.md +9 -7
  6. package/README.md +16 -9
  7. package/mcp-server/pyproject.toml +1 -1
  8. package/mcp-server/run-mcp.sh +49 -5
  9. package/mcp-server/src/app_context.py +64 -0
  10. package/mcp-server/src/config.py +57 -0
  11. package/mcp-server/src/connection_pool.py +286 -0
  12. package/mcp-server/src/decay_manager.py +106 -0
  13. package/mcp-server/src/embedding_manager.py +64 -40
  14. package/mcp-server/src/embeddings_old.py +141 -0
  15. package/mcp-server/src/models.py +64 -0
  16. package/mcp-server/src/parallel_search.py +371 -0
  17. package/mcp-server/src/project_resolver.py +33 -46
  18. package/mcp-server/src/reflection_tools.py +206 -0
  19. package/mcp-server/src/rich_formatting.py +196 -0
  20. package/mcp-server/src/search_tools.py +826 -0
  21. package/mcp-server/src/server.py +140 -1715
  22. package/mcp-server/src/temporal_design.py +132 -0
  23. package/mcp-server/src/temporal_tools.py +597 -0
  24. package/mcp-server/src/temporal_utils.py +384 -0
  25. package/mcp-server/src/utils.py +150 -67
  26. package/package.json +11 -1
  27. package/scripts/add-timestamp-indexes.py +134 -0
  28. package/scripts/check-collections.py +29 -0
  29. package/scripts/debug-august-parsing.py +76 -0
  30. package/scripts/debug-import-single.py +91 -0
  31. package/scripts/debug-project-resolver.py +82 -0
  32. package/scripts/debug-temporal-tools.py +135 -0
  33. package/scripts/delta-metadata-update.py +547 -0
  34. package/scripts/import-conversations-unified.py +65 -6
  35. package/scripts/importer/utils/project_normalizer.py +22 -9
  36. package/scripts/precompact-hook.sh +33 -0
  37. package/scripts/streaming-watcher.py +1443 -0
  38. package/scripts/utils.py +39 -0
  39. package/shared/__init__.py +5 -0
  40. package/shared/normalization.py +54 -0
@@ -0,0 +1,134 @@
1
+ #!/usr/bin/env python3
2
+ """Add timestamp indexes to all collections for OrderBy support."""
3
+
4
+ import asyncio
5
+ import os
6
+ from pathlib import Path
7
+ import sys
8
+
9
+ sys.path.insert(0, str(Path(__file__).parent.parent))
10
+
11
+ from qdrant_client import AsyncQdrantClient
12
+ from qdrant_client.models import PayloadSchemaType, OrderBy
13
+
14
+ QDRANT_URL = os.getenv("QDRANT_URL", "http://localhost:6333")
15
+
16
+ async def add_timestamp_indexes():
17
+ """Add timestamp indexes to all collections that need them."""
18
+ client = AsyncQdrantClient(url=QDRANT_URL)
19
+
20
+ print("Adding timestamp indexes for temporal query support...")
21
+ print("="*60)
22
+
23
+ # Get all collections
24
+ collections = await client.get_collections()
25
+ total = len(collections.collections)
26
+ print(f"Found {total} collections")
27
+
28
+ success_count = 0
29
+ skip_count = 0
30
+ error_count = 0
31
+
32
+ for i, col in enumerate(collections.collections, 1):
33
+ col_name = col.name
34
+ print(f"\n[{i}/{total}] Processing {col_name}...")
35
+
36
+ try:
37
+ # Check if collection has points
38
+ info = await client.get_collection(col_name)
39
+ if info.points_count == 0:
40
+ print(f" ⏭️ Skipped (empty collection)")
41
+ skip_count += 1
42
+ continue
43
+
44
+ # Check if timestamp field exists
45
+ points, _ = await client.scroll(
46
+ collection_name=col_name,
47
+ limit=1,
48
+ with_payload=["timestamp"]
49
+ )
50
+
51
+ if not points or not points[0].payload.get('timestamp'):
52
+ print(f" ⏭️ Skipped (no timestamp field)")
53
+ skip_count += 1
54
+ continue
55
+
56
+ # Try to use OrderBy to check if index exists
57
+ try:
58
+ await client.scroll(
59
+ collection_name=col_name,
60
+ order_by=OrderBy(key="timestamp", direction="desc"),
61
+ limit=1
62
+ )
63
+ print(f" ✅ Already has timestamp index")
64
+ skip_count += 1
65
+ except Exception as e:
66
+ if "No range index" in str(e):
67
+ # Need to create index
68
+ print(f" 🔧 Creating timestamp index...")
69
+ try:
70
+ await client.create_payload_index(
71
+ collection_name=col_name,
72
+ field_name="timestamp",
73
+ field_schema=PayloadSchemaType.DATETIME
74
+ )
75
+ print(f" ✅ Index created successfully")
76
+ success_count += 1
77
+ except Exception as create_error:
78
+ print(f" ❌ Failed to create index: {create_error}")
79
+ error_count += 1
80
+ else:
81
+ print(f" ⚠️ Unexpected error: {e}")
82
+ error_count += 1
83
+
84
+ except Exception as e:
85
+ print(f" ❌ Error: {e}")
86
+ error_count += 1
87
+
88
+ print("\n" + "="*60)
89
+ print("SUMMARY")
90
+ print("="*60)
91
+ print(f"✅ Indexes created: {success_count}")
92
+ print(f"⏭️ Skipped: {skip_count}")
93
+ print(f"❌ Errors: {error_count}")
94
+ print(f"📊 Total collections: {total}")
95
+
96
+ # Verify temporal queries work
97
+ if success_count > 0:
98
+ print("\n" + "="*60)
99
+ print("VERIFYING TEMPORAL QUERIES")
100
+ print("="*60)
101
+
102
+ # Find a collection with data to test
103
+ test_collection = None
104
+ for col in collections.collections:
105
+ try:
106
+ info = await client.get_collection(col.name)
107
+ if info.points_count > 100: # Find one with decent amount of data
108
+ test_collection = col.name
109
+ break
110
+ except:
111
+ pass
112
+
113
+ if test_collection:
114
+ print(f"Testing on {test_collection}...")
115
+ try:
116
+ # Test OrderBy
117
+ results, _ = await client.scroll(
118
+ collection_name=test_collection,
119
+ order_by=OrderBy(key="timestamp", direction="desc"),
120
+ limit=3,
121
+ with_payload=["timestamp", "text"]
122
+ )
123
+
124
+ print(f"✅ OrderBy works! Found {len(results)} recent conversations:")
125
+ for r in results:
126
+ ts = r.payload.get('timestamp', 'N/A')
127
+ text = r.payload.get('text', '')[:60] + '...'
128
+ print(f" - {ts}: {text}")
129
+
130
+ except Exception as e:
131
+ print(f"❌ OrderBy test failed: {e}")
132
+
133
+ if __name__ == "__main__":
134
+ asyncio.run(add_timestamp_indexes())
@@ -0,0 +1,29 @@
1
+ #!/usr/bin/env python3
2
+ """Check Qdrant collections."""
3
+
4
+ import os
5
+ from qdrant_client import QdrantClient
6
+
7
+ # Configuration
8
+ QDRANT_URL = os.getenv("QDRANT_URL", "http://localhost:6333")
9
+
10
+ def main():
11
+ """List all collections."""
12
+ client = QdrantClient(url=QDRANT_URL)
13
+
14
+ # Get all collections
15
+ collections = client.get_collections()
16
+
17
+ print("Qdrant Collections:")
18
+ print("-" * 60)
19
+
20
+ voyage_collections = []
21
+ for collection in collections.collections:
22
+ print(f"- {collection.name}")
23
+ if collection.name.endswith("_voyage"):
24
+ voyage_collections.append(collection.name)
25
+
26
+ print(f"\nFound {len(voyage_collections)} Voyage collections")
27
+
28
+ if __name__ == "__main__":
29
+ main()
@@ -0,0 +1,76 @@
1
+ #!/usr/bin/env python3
2
+ """Debug why August files aren't parsing properly."""
3
+
4
+ import json
5
+ import sys
6
+
7
+ def parse_jsonl_file(file_path):
8
+ """Parse JSONL file and extract messages."""
9
+ messages = []
10
+
11
+ with open(file_path, 'r', encoding='utf-8') as f:
12
+ for line_num, line in enumerate(f, 1):
13
+ line = line.strip()
14
+ if not line:
15
+ continue
16
+
17
+ try:
18
+ data = json.loads(line)
19
+
20
+ # Skip summary messages
21
+ if data.get('type') == 'summary':
22
+ print(f"Line {line_num}: Skipping summary")
23
+ continue
24
+
25
+ # Handle messages with type user/assistant at root level
26
+ if data.get('type') in ['user', 'assistant']:
27
+ if 'message' in data and data['message']:
28
+ msg = data['message']
29
+ if msg.get('role') and msg.get('content'):
30
+ content = msg['content']
31
+ if isinstance(content, list):
32
+ text_parts = []
33
+ for item in content:
34
+ if isinstance(item, dict) and item.get('type') == 'text':
35
+ text_parts.append(item.get('text', ''))
36
+ elif isinstance(item, str):
37
+ text_parts.append(item)
38
+ content = '\n'.join(text_parts)
39
+
40
+ if content:
41
+ messages.append({
42
+ 'role': msg['role'],
43
+ 'content': content[:200] + '...' if len(content) > 200 else content,
44
+ 'line': line_num
45
+ })
46
+ print(f"Line {line_num}: Extracted {msg['role']} message ({len(content)} chars)")
47
+ else:
48
+ print(f"Line {line_num}: Empty content for {msg['role']}")
49
+ else:
50
+ print(f"Line {line_num}: Missing role or content in message field")
51
+ else:
52
+ print(f"Line {line_num}: No message field for type={data.get('type')}")
53
+ else:
54
+ print(f"Line {line_num}: Unknown type={data.get('type')}")
55
+
56
+ except Exception as e:
57
+ print(f"Line {line_num}: Parse error - {e}")
58
+
59
+ return messages
60
+
61
+ if __name__ == "__main__":
62
+ file_path = "/Users/ramakrishnanannaswamy/.claude/projects/-Users-ramakrishnanannaswamy-projects-claude-self-reflect/7b3354ed-d6d2-4eab-b328-1fced4bb63bb.jsonl"
63
+
64
+ print(f"Parsing: {file_path}")
65
+ print("=" * 60)
66
+
67
+ messages = parse_jsonl_file(file_path)
68
+
69
+ print("\n" + "=" * 60)
70
+ print(f"Total messages extracted: {len(messages)}")
71
+
72
+ if messages:
73
+ print("\nFirst 5 messages:")
74
+ for i, msg in enumerate(messages[:5]):
75
+ print(f"\n{i+1}. Line {msg['line']}: {msg['role']}")
76
+ print(f" Content: {msg['content'][:100]}...")
@@ -0,0 +1,91 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Debug import of a single file with summary messages
4
+ """
5
+
6
+ import json
7
+ from pathlib import Path
8
+
9
+ # Target file
10
+ test_file = Path.home() / '.claude/projects/-Users-ramakrishnanannaswamy-projects-claude-self-reflect/c072a61e-aebb-4c85-960b-c5ffeafa7115.jsonl'
11
+
12
+ print(f"Analyzing: {test_file.name}\n")
13
+
14
+ # Read and analyze the file
15
+ all_messages = []
16
+ summary_count = 0
17
+ user_count = 0
18
+ assistant_count = 0
19
+ other_count = 0
20
+
21
+ with open(test_file, 'r') as f:
22
+ for i, line in enumerate(f, 1):
23
+ if line.strip():
24
+ try:
25
+ data = json.loads(line)
26
+ msg_type = data.get('type', 'unknown')
27
+
28
+ print(f"Line {i}: type={msg_type}", end="")
29
+
30
+ # Check what would be extracted
31
+ if msg_type == 'summary':
32
+ summary_count += 1
33
+ print(f" -> SKIPPED (summary)")
34
+ continue
35
+
36
+ # Check for messages with type user/assistant
37
+ if msg_type in ['user', 'assistant']:
38
+ if 'message' in data and data['message']:
39
+ msg = data['message']
40
+ if msg.get('role') and msg.get('content'):
41
+ all_messages.append(msg)
42
+ if msg_type == 'user':
43
+ user_count += 1
44
+ else:
45
+ assistant_count += 1
46
+
47
+ # Extract a preview of content
48
+ content = msg.get('content', '')
49
+ if isinstance(content, list) and len(content) > 0:
50
+ first_item = content[0]
51
+ if isinstance(first_item, dict):
52
+ preview = str(first_item.get('content', first_item.get('text', '')))[:50]
53
+ else:
54
+ preview = str(first_item)[:50]
55
+ else:
56
+ preview = str(content)[:50]
57
+
58
+ print(f" -> EXTRACTED (role={msg['role']}, preview: {preview}...)")
59
+ else:
60
+ print(f" -> NO role/content in message")
61
+ else:
62
+ print(f" -> NO message field")
63
+ else:
64
+ other_count += 1
65
+ print(f" -> OTHER TYPE")
66
+
67
+ except json.JSONDecodeError as e:
68
+ print(f"Line {i}: INVALID JSON - {e}")
69
+
70
+ print(f"\n=== SUMMARY ===")
71
+ print(f"Total lines: {i}")
72
+ print(f"Summaries (skipped): {summary_count}")
73
+ print(f"User messages: {user_count}")
74
+ print(f"Assistant messages: {assistant_count}")
75
+ print(f"Other types: {other_count}")
76
+ print(f"Total extracted messages: {len(all_messages)}")
77
+
78
+ # Check for Memento content
79
+ memento_found = False
80
+ for msg in all_messages:
81
+ content = str(msg.get('content', ''))
82
+ if 'memento' in content.lower():
83
+ memento_found = True
84
+ break
85
+
86
+ print(f"\nMemento content found in messages: {memento_found}")
87
+
88
+ if len(all_messages) > 0:
89
+ print(f"\n✅ File SHOULD be importable with {len(all_messages)} messages")
90
+ else:
91
+ print(f"\n❌ File would result in ZERO messages imported")
@@ -0,0 +1,82 @@
1
+ #!/usr/bin/env python3
2
+ """Test ProjectResolver to see if it's finding collections correctly."""
3
+
4
+ import sys
5
+ from pathlib import Path
6
+ sys.path.insert(0, str(Path(__file__).parent.parent / 'mcp-server' / 'src'))
7
+
8
+ from qdrant_client import QdrantClient
9
+ from project_resolver import ProjectResolver
10
+
11
+ # Connect to Qdrant
12
+ client = QdrantClient(url="http://localhost:6333")
13
+
14
+ # Create resolver
15
+ resolver = ProjectResolver(client)
16
+
17
+ # Test projects
18
+ test_projects = [
19
+ "claude-self-reflect",
20
+ "memento",
21
+ "cc-enhance",
22
+ "all"
23
+ ]
24
+
25
+ print("=== Testing ProjectResolver ===\n")
26
+
27
+ for project in test_projects:
28
+ print(f"Project: '{project}'")
29
+ collections = resolver.find_collections_for_project(project)
30
+ print(f" Found {len(collections)} collections")
31
+
32
+ if collections:
33
+ # Show first 3 collections
34
+ for coll in collections[:3]:
35
+ try:
36
+ info = client.get_collection(coll)
37
+ suffix = "_local" if coll.endswith("_local") else "_voyage"
38
+ print(f" - {coll}: {info.points_count} points ({suffix})")
39
+ except:
40
+ print(f" - {coll}: <error getting info>")
41
+ else:
42
+ print(" - No collections found!")
43
+ print()
44
+
45
+ # Also test the normalization directly
46
+ print("\n=== Testing Direct Normalization ===")
47
+ from shared.normalization import normalize_project_name
48
+ import hashlib
49
+
50
+ test_paths = [
51
+ "/Users/ramakrishnanannaswamy/projects/claude-self-reflect",
52
+ "/Users/ramakrishnanannaswamy/projects/memento",
53
+ "/Users/ramakrishnanannaswamy/projects/cc-enhance"
54
+ ]
55
+
56
+ for path in test_paths:
57
+ normalized = normalize_project_name(path)
58
+ name_hash = hashlib.md5(normalized.encode()).hexdigest()[:8]
59
+ collection_local = f"conv_{name_hash}_local"
60
+ collection_voyage = f"conv_{name_hash}_voyage"
61
+
62
+ print(f"Path: {path}")
63
+ print(f" Normalized: {normalized}")
64
+ print(f" Hash: {name_hash}")
65
+ print(f" Expected collections:")
66
+ print(f" - {collection_local}")
67
+ print(f" - {collection_voyage}")
68
+
69
+ # Check if these exist
70
+ all_collections = [c.name for c in client.get_collections().collections]
71
+ if collection_local in all_collections:
72
+ info = client.get_collection(collection_local)
73
+ print(f" ✓ {collection_local} exists with {info.points_count} points")
74
+ else:
75
+ print(f" ✗ {collection_local} not found")
76
+
77
+ if collection_voyage in all_collections:
78
+ info = client.get_collection(collection_voyage)
79
+ print(f" ✓ {collection_voyage} exists with {info.points_count} points")
80
+ else:
81
+ print(f" ✗ {collection_voyage} not found")
82
+ print()
@@ -0,0 +1,135 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Debug script for testing temporal tools in Claude Self Reflect.
4
+ This script directly tests the temporal tools that should be available via MCP.
5
+ """
6
+
7
+ import os
8
+ import sys
9
+ import asyncio
10
+ import json
11
+ import traceback
12
+ from pathlib import Path
13
+
14
+ # Add the mcp-server source to Python path
15
+ sys.path.append(str(Path(__file__).parent.parent / "mcp-server" / "src"))
16
+
17
+ os.environ["QDRANT_URL"] = "http://localhost:6333"
18
+
19
+ async def test_temporal_tools():
20
+ """Test all temporal tools."""
21
+ print("=== TEMPORAL TOOLS DEBUG SCRIPT ===")
22
+
23
+ try:
24
+ # Import required modules
25
+ from server import (
26
+ get_recent_work, search_by_recency, get_timeline,
27
+ get_all_collections, QDRANT_URL
28
+ )
29
+ from fastmcp import Context
30
+
31
+ print(f"✅ Successfully imported temporal tools")
32
+ print(f"✅ Qdrant URL: {QDRANT_URL}")
33
+
34
+ # Check if Qdrant is available
35
+ collections = await get_all_collections()
36
+ print(f"✅ Found {len(collections)} collections: {collections[:5]}...")
37
+
38
+ # Create a mock context for testing
39
+ class MockContext(Context):
40
+ def __init__(self):
41
+ pass
42
+ async def debug(self, message):
43
+ print(f"DEBUG: {message}")
44
+ async def error(self, message):
45
+ print(f"ERROR: {message}")
46
+
47
+ ctx = MockContext()
48
+
49
+ # Test 1: get_recent_work with default parameters
50
+ print("\n--- Test 1: get_recent_work (default) ---")
51
+ try:
52
+ result = await get_recent_work(ctx)
53
+ print(f"✅ get_recent_work succeeded")
54
+ print(f"Result length: {len(result) if result else 0} characters")
55
+ if result and len(result) < 500:
56
+ print(f"Result: {result}")
57
+ except Exception as e:
58
+ print(f"❌ get_recent_work failed: {e}")
59
+ traceback.print_exc()
60
+
61
+ # Test 2: get_recent_work with project='all'
62
+ print("\n--- Test 2: get_recent_work (project=all) ---")
63
+ try:
64
+ result = await get_recent_work(ctx, project="all", limit=5)
65
+ print(f"✅ get_recent_work (project=all) succeeded")
66
+ print(f"Result length: {len(result) if result else 0} characters")
67
+ except Exception as e:
68
+ print(f"❌ get_recent_work (project=all) failed: {e}")
69
+ traceback.print_exc()
70
+
71
+ # Test 3: get_recent_work with different group_by options
72
+ for group_by in ["conversation", "day", "session"]:
73
+ print(f"\n--- Test 3.{group_by}: get_recent_work (group_by={group_by}) ---")
74
+ try:
75
+ result = await get_recent_work(ctx, limit=3, group_by=group_by)
76
+ print(f"✅ get_recent_work (group_by={group_by}) succeeded")
77
+ print(f"Result length: {len(result) if result else 0} characters")
78
+ except Exception as e:
79
+ print(f"❌ get_recent_work (group_by={group_by}) failed: {e}")
80
+ traceback.print_exc()
81
+
82
+ # Test 4: search_by_recency with time_range
83
+ print("\n--- Test 4: search_by_recency (time_range) ---")
84
+ try:
85
+ result = await search_by_recency(
86
+ ctx,
87
+ query="testing debugging",
88
+ time_range="last week",
89
+ limit=5
90
+ )
91
+ print(f"✅ search_by_recency (time_range) succeeded")
92
+ print(f"Result length: {len(result) if result else 0} characters")
93
+ except Exception as e:
94
+ print(f"❌ search_by_recency (time_range) failed: {e}")
95
+ traceback.print_exc()
96
+
97
+ # Test 5: search_by_recency with since/until
98
+ print("\n--- Test 5: search_by_recency (since/until) ---")
99
+ try:
100
+ result = await search_by_recency(
101
+ ctx,
102
+ query="python script",
103
+ since="yesterday",
104
+ limit=3
105
+ )
106
+ print(f"✅ search_by_recency (since/until) succeeded")
107
+ print(f"Result length: {len(result) if result else 0} characters")
108
+ except Exception as e:
109
+ print(f"❌ search_by_recency (since/until) failed: {e}")
110
+ traceback.print_exc()
111
+
112
+ # Test 6: get_timeline with different granularities
113
+ for granularity in ["day", "week"]:
114
+ print(f"\n--- Test 6.{granularity}: get_timeline (granularity={granularity}) ---")
115
+ try:
116
+ result = await get_timeline(
117
+ ctx,
118
+ time_range="last week",
119
+ granularity=granularity,
120
+ include_stats=True
121
+ )
122
+ print(f"✅ get_timeline (granularity={granularity}) succeeded")
123
+ print(f"Result length: {len(result) if result else 0} characters")
124
+ except Exception as e:
125
+ print(f"❌ get_timeline (granularity={granularity}) failed: {e}")
126
+ traceback.print_exc()
127
+
128
+ print("\n=== TEMPORAL TOOLS TEST COMPLETE ===")
129
+
130
+ except Exception as e:
131
+ print(f"❌ Critical error during setup: {e}")
132
+ traceback.print_exc()
133
+
134
+ if __name__ == "__main__":
135
+ asyncio.run(test_temporal_tools())