claude-self-reflect 1.3.4 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/.claude/agents/README.md +138 -0
  2. package/.claude/agents/docker-orchestrator.md +264 -0
  3. package/.claude/agents/documentation-writer.md +262 -0
  4. package/.claude/agents/import-debugger.md +203 -0
  5. package/.claude/agents/mcp-integration.md +286 -0
  6. package/.claude/agents/open-source-maintainer.md +150 -0
  7. package/.claude/agents/performance-tuner.md +276 -0
  8. package/.claude/agents/qdrant-specialist.md +138 -0
  9. package/.claude/agents/reflection-specialist.md +361 -0
  10. package/.claude/agents/search-optimizer.md +307 -0
  11. package/LICENSE +21 -0
  12. package/README.md +363 -0
  13. package/installer/cli.js +122 -0
  14. package/installer/postinstall.js +13 -0
  15. package/installer/setup-wizard.js +204 -0
  16. package/mcp-server/pyproject.toml +27 -0
  17. package/mcp-server/run-mcp.sh +21 -0
  18. package/mcp-server/src/__init__.py +1 -0
  19. package/mcp-server/src/__main__.py +23 -0
  20. package/mcp-server/src/server.py +316 -0
  21. package/mcp-server/src/server_v2.py +240 -0
  22. package/package.json +12 -36
  23. package/scripts/import-conversations-isolated.py +311 -0
  24. package/scripts/import-conversations-voyage-streaming.py +377 -0
  25. package/scripts/import-conversations-voyage.py +428 -0
  26. package/scripts/import-conversations.py +240 -0
  27. package/scripts/import-current-conversation.py +38 -0
  28. package/scripts/import-live-conversation.py +152 -0
  29. package/scripts/import-openai-enhanced.py +867 -0
  30. package/scripts/import-recent-only.py +29 -0
  31. package/scripts/import-single-project.py +278 -0
  32. package/scripts/import-watcher.py +169 -0
  33. package/config/claude-desktop-config.json +0 -12
  34. package/dist/cli.d.ts +0 -3
  35. package/dist/cli.d.ts.map +0 -1
  36. package/dist/cli.js +0 -55
  37. package/dist/cli.js.map +0 -1
  38. package/dist/embeddings-gemini.d.ts +0 -76
  39. package/dist/embeddings-gemini.d.ts.map +0 -1
  40. package/dist/embeddings-gemini.js +0 -158
  41. package/dist/embeddings-gemini.js.map +0 -1
  42. package/dist/embeddings.d.ts +0 -67
  43. package/dist/embeddings.d.ts.map +0 -1
  44. package/dist/embeddings.js +0 -252
  45. package/dist/embeddings.js.map +0 -1
  46. package/dist/index.d.ts +0 -3
  47. package/dist/index.d.ts.map +0 -1
  48. package/dist/index.js +0 -439
  49. package/dist/index.js.map +0 -1
  50. package/dist/project-isolation.d.ts +0 -29
  51. package/dist/project-isolation.d.ts.map +0 -1
  52. package/dist/project-isolation.js +0 -78
  53. package/dist/project-isolation.js.map +0 -1
  54. package/scripts/install-agent.js +0 -70
  55. package/scripts/setup-wizard.js +0 -596
  56. package/src/cli.ts +0 -56
  57. package/src/embeddings-gemini.ts +0 -176
  58. package/src/embeddings.ts +0 -296
  59. package/src/index.ts +0 -513
  60. package/src/project-isolation.ts +0 -93
package/README.md ADDED
@@ -0,0 +1,363 @@
1
+ # Claude-Self-Reflect - Conversation Memory for Claude
2
+
3
+ Give Claude perfect memory across all conversations. Semantic search over your entire conversation history using vector database and MCP (Model Context Protocol).
4
+
5
+ ## Motivation, Alternatives & Past Attempts
6
+
7
+ **Motivation**: Claude has no memory between conversations. Every chat starts from scratch, requiring you to re-explain context, repeat solutions, and manually search through conversation files.
8
+
9
+ **Our Solution**: A semantic memory layer that automatically indexes your conversations and provides instant search through Claude's native tools.
10
+
11
+ **Past Attempts**:
12
+ - Neo4j graph database - Too complex for simple conversation retrieval
13
+ - Keyword search - Missed semantically similar content
14
+ - Manual organization - Doesn't scale with hundreds of conversations
15
+
16
+ **Why Qdrant + Vectors**: Industry-standard approach used by LangChain, Dify, and others. Optimized for semantic similarity, not complex relationships.
17
+
18
+ ## Glimpse of the Future
19
+
20
+ Imagine asking Claude:
21
+ - "What did we discuss about database design last month?"
22
+ - "Find that debugging solution we discovered together"
23
+ - "Have we encountered this error before?"
24
+
25
+ And getting instant, accurate answers from your entire conversation history. That's Claude-Self-Reflect.
26
+
27
+ ## Quick Start
28
+
29
+ ```bash
30
+ # One command setup - handles everything interactively
31
+ npm install -g claude-self-reflect && claude-self-reflect setup
32
+ ```
33
+
34
+ **That's it!** The setup wizard will:
35
+ - ✅ Check Python 3.10+ installation
36
+ - ✅ Start Qdrant vector database
37
+ - ✅ Install the Python MCP server
38
+ - ✅ Configure your API keys
39
+ - ✅ Set up Claude Code integration
40
+
41
+ - **Need details?** See [Installation Guide](docs/installation-guide.md)
42
+ - **Embedding providers?** See [Embedding Provider Guide](docs/embedding-providers.md)
43
+ - **Manual setup?** See [Advanced Configuration](docs/installation-guide.md#manual-setup-advanced-users)
44
+
45
+ ## Architecture Overview
46
+
47
+ ![Architecture Diagram](docs/diagrams/architecture.png)
48
+
49
+ The system consists of four main components:
50
+ - **Claude Code/Desktop**: The MCP client that requests memory operations
51
+ - **MCP Server**: TypeScript service providing search and store tools
52
+ - **Import Pipeline**: Python service that processes conversation logs
53
+ - **Qdrant Database**: Vector storage with semantic search capabilities
54
+
55
+ See also:
56
+ - [Data Flow Diagram](docs/diagrams/data-flow.png) - How data moves through the system
57
+ - [Import Process](docs/diagrams/import-process.png) - Detailed import workflow
58
+ - [Search Operation](docs/diagrams/search-operation.png) - How semantic search works
59
+
60
+ ## Why Qdrant Over Neo4j?
61
+
62
+ 1. **Simplicity**: Two tools (store/find) vs complex entity/relationship management
63
+ 2. **Performance**: Optimized for semantic search, no graph traversal overhead
64
+ 3. **Proven Pattern**: Industry standard for conversation memory (LangChain, Dify, etc.)
65
+ 4. **No Import Issues**: Direct vector storage without entity extraction complexity
66
+
67
+ ## Project Structure
68
+
69
+ ```
70
+ claude-self-reflect/
71
+ ├── mcp-server/ # Python MCP server using FastMCP
72
+ │ ├── src/ # Server source code
73
+ │ ├── pyproject.toml # Python package configuration
74
+ │ └── run-mcp.sh # MCP startup script
75
+ ├── scripts/ # Import and utility scripts
76
+ │ ├── import-*.py # Various import scripts for conversations
77
+ │ └── test-*.py # Test scripts for features
78
+ ├── .claude/agents/ # Claude sub-agents for specialized tasks
79
+ ├── config/ # Configuration files
80
+ ├── data/ # Qdrant vector database storage
81
+ └── docs/ # Documentation and guides
82
+ ```
83
+
84
+ ## Components
85
+
86
+ ### 1. Qdrant Vector Database
87
+ - Stores conversation embeddings with metadata
88
+ - Provides fast semantic similarity search
89
+ - Built-in vector indexing and retrieval
90
+
91
+ ### 2. MCP Server for Conversation Memory
92
+ - **Tool 1**: `store_reflection` - Store important insights and decisions
93
+ - **Tool 2**: `reflect_on_past` - Search through conversation history
94
+ - Simple semantic search without complex entity extraction
95
+ - Python-based using FastMCP framework
96
+
97
+ ### 3. Python Importer
98
+ - Reads JSONL files from Claude conversation logs
99
+ - Creates conversation chunks for context
100
+ - Generates embeddings using Voyage AI (voyage-3-large)
101
+ - Stores directly in Qdrant with metadata
102
+
103
+
104
+ ## Using the Reflection Agent
105
+
106
+ ### In Claude Code
107
+ The reflection agent activates automatically when you ask about past conversations:
108
+
109
+ ![Reflection Agent in Action](docs/images/Reflection-specialist.png)
110
+
111
+ ```
112
+ "What did we discuss about database design?"
113
+ "Find our previous debugging session"
114
+ "Have we encountered this error before?"
115
+ ```
116
+
117
+ Or explicitly request it:
118
+ ```
119
+ "Use the reflection agent to search for our API discussions"
120
+ ```
121
+
122
+ ### Direct Tool Usage (Advanced)
123
+ You can also ask Claude to search directly:
124
+
125
+ ```
126
+ User: Can you check our past conversations about authentication?
127
+ Claude: I'll search through our conversation history about authentication...
128
+
129
+ User: Remember that we decided to use JWT tokens for the API
130
+ Claude: I'll store this decision for future reference...
131
+ ```
132
+
133
+ ## 🧪 Testing & Dry-Run Mode
134
+
135
+ ### Validate Your Setup
136
+
137
+ Before importing, validate that everything is configured correctly:
138
+
139
+ ```bash
140
+ # Run comprehensive validation
141
+ python scripts/validate-setup.py
142
+
143
+ # Example output:
144
+ # ✅ API Key [PASS] Voyage API key is valid
145
+ # ✅ Qdrant [PASS] Connected to http://localhost:6333
146
+ # ✅ Claude Logs [PASS] 24 projects, 265 files, 125.3 MB
147
+ # ✅ Disk Space [PASS] 45.2 GB free
148
+ ```
149
+
150
+ ### Dry-Run Mode
151
+
152
+ Test the import process without making any changes:
153
+
154
+ ```bash
155
+ # See what would be imported (no API calls, no database changes)
156
+ python scripts/import-openai-enhanced.py --dry-run
157
+
158
+ # Dry-run with preview of sample chunks
159
+ python scripts/import-openai-enhanced.py --dry-run --preview
160
+
161
+ # Validate setup only (checks connections, API keys, etc.)
162
+ python scripts/import-openai-enhanced.py --validate-only
163
+ ```
164
+
165
+ ### Example Dry-Run Output
166
+
167
+ ```
168
+ 🔍 Running in DRY-RUN mode...
169
+ ============================================================
170
+ 🚀 Initializing Claude-Self-Reflect Importer...
171
+
172
+ 📊 Import Summary:
173
+ • Total files: 265
174
+ • New files to import: 265
175
+ • Estimated chunks: ~2,650
176
+ • Estimated cost: FREE (within 200M token limit)
177
+ • Embedding model: voyage-3.5-lite
178
+
179
+ 🔍 DRY-RUN MODE - No changes will be made
180
+
181
+ ⏳ Starting import...
182
+
183
+ [DRY-RUN] Would ensure collection: conv_a1b2c3d4_voyage
184
+ [DRY-RUN] Would import 127 chunks to collection: conv_a1b2c3d4_voyage
185
+
186
+ 📊 Final Statistics:
187
+ • Time elapsed: 2 seconds
188
+ • Projects to import: 24
189
+ • Messages processed: 10,165
190
+ • Chunks created: 2,650
191
+ • Embeddings would be generated: 2,650
192
+ • API calls would be made: 133
193
+ • 💰 Estimated cost: FREE (within 200M token limit)
194
+ ```
195
+
196
+ ### Cost Estimation
197
+
198
+ The dry-run mode provides accurate cost estimates:
199
+
200
+ **Free Tiers:**
201
+ - Voyage AI: 200M tokens FREE, then $0.02 per 1M tokens
202
+ - Google Gemini: Unlimited FREE (data used for training)
203
+ - Local: Always FREE
204
+
205
+ **Paid Only:**
206
+ - OpenAI: $0.02 per 1M tokens (no free tier)
207
+
208
+ **Reality Check:** With 500 tokens per conversation chunk, 200M free tokens = ~400,000 conversation chunks. Most users never reach the paid tier.
209
+
210
+ ### Continuous Testing
211
+
212
+ ```bash
213
+ # Test import of a single project
214
+ python scripts/import-openai-enhanced.py ~/.claude/projects/my-project --dry-run
215
+
216
+ # Monitor import progress in real-time
217
+ python scripts/import-openai-enhanced.py --dry-run | tee import-test.log
218
+ ```
219
+
220
+ ## 🚀 Advanced Features
221
+
222
+ ### Memory Decay (v1.3.1)
223
+ Remember that brilliant debugging session from last week? Memory Decay ensures it stays at your fingertips. That random chat from 6 months ago? It gracefully fades into the background, just like human memory.
224
+
225
+ #### What is Memory Decay?
226
+
227
+ Memory Decay transforms your conversation search from a flat, time-agnostic system into an intelligent memory that understands recency matters. When you search for "React hooks debugging", you want last week's breakthrough solution, not that outdated approach from last year.
228
+
229
+ Here's the magic: Memory Decay applies an exponential decay function to search scores, blending semantic similarity with temporal relevance. The result? Recent conversations get a massive boost while older ones gradually diminish.
230
+
231
+ #### The Numbers That Matter
232
+
233
+ Without Memory Decay:
234
+ - Search: "qdrant implementation"
235
+ - Top result: 6-month-old conversation (Score: 0.361)
236
+ - All results: Scores range from 0.35 to 0.36
237
+ - No consideration of when discussions happened
238
+
239
+ With Memory Decay Enabled:
240
+ - Same search: "qdrant implementation"
241
+ - Top result: Last week's conversation (Score: 0.605)
242
+ - All results: Scores range from 0.59 to 0.61
243
+ - **That's a 68% score boost for recent content!**
244
+
245
+ #### How It Works - The Technical Deep Dive
246
+
247
+ The decay formula elegantly combines semantic similarity with time-based relevance:
248
+
249
+ ```
250
+ final_score = semantic_score × (1 - decay_weight) + decay_factor × decay_weight
251
+ ```
252
+
253
+ Where:
254
+ - `semantic_score`: How well the content matches your query (0.0 to 1.0)
255
+ - `decay_weight`: How much recency matters (default: 0.3 or 30%)
256
+ - `decay_factor`: Exponential decay based on age: `e^(-age_days / half_life)`
257
+ - `half_life`: Days until relevance drops by 50% (default: 90 days)
258
+
259
+ #### Real-World Example
260
+
261
+ Let's say you search for "authentication strategy":
262
+
263
+ **Identical content at different ages:**
264
+ - Today's discussion: Score 1.000 (100% fresh)
265
+ - 30 days old: Score 0.915 (still highly relevant)
266
+ - 90 days old: Score 0.810 (starting to fade)
267
+ - 180 days old: Score 0.741 (significantly diminished)
268
+ - 365 days old: Score 0.705 (barely relevant)
269
+
270
+ #### Configuration Options
271
+
272
+ ```env
273
+ # Enable/disable memory decay globally
274
+ ENABLE_MEMORY_DECAY=true # Default: false (opt-in feature)
275
+
276
+ # How much should recency affect scores? (0.0 to 1.0)
277
+ DECAY_WEIGHT=0.3 # 30% weight on recency, 70% on content
278
+
279
+ # How fast should memories fade?
280
+ DECAY_SCALE_DAYS=90 # 90-day half-life (3 months)
281
+ ```
282
+
283
+ #### Per-Search Control
284
+
285
+ You have complete control over decay on each search:
286
+
287
+ ```javascript
288
+ // Search with decay (prioritize recent)
289
+ await mcp.reflect_on_past({
290
+ query: "database optimization",
291
+ useDecay: true
292
+ });
293
+
294
+ // Search without decay (all time periods equal)
295
+ await mcp.reflect_on_past({
296
+ query: "foundational architecture decisions",
297
+ useDecay: false
298
+ });
299
+ ```
300
+
301
+ #### Performance Characteristics
302
+
303
+ We've optimized Memory Decay to be lightning fast:
304
+ - **Overhead**: Just 0.009 seconds for 1000 search results
305
+ - **Method**: Client-side calculation after vector search
306
+ - **Scalability**: Linear with result count, not database size
307
+
308
+ #### The Philosophy
309
+
310
+ Memory Decay isn't just a feature - it's a recognition that not all memories are equal. Your conversation history should work like your brain: keeping recent, relevant information readily accessible while letting older details fade naturally. This isn't about losing information - every conversation remains searchable. It's about surfacing what matters most, when it matters most.
311
+
312
+ See [Memory Decay Guide](docs/memory-decay.md) for advanced configuration and implementation details.
313
+
314
+ ## 🤝 Why Claude-Self-Reflect?
315
+
316
+ ### Key Advantages
317
+ - **Local-First**: Your conversations stay on your machine
318
+ - **Zero Configuration**: Works out of the box with sensible defaults
319
+ - **Claude-Native**: Built specifically for Claude Code & Desktop
320
+ - **Semantic Search**: Understands meaning, not just keywords
321
+ - **Continuous Import**: Automatically indexes new conversations
322
+ - **Privacy-Focused**: No data leaves your local environment
323
+
324
+
325
+ ### CLAUDE.md vs Claude-Self-Reflect
326
+
327
+ | Aspect | CLAUDE.md | Claude-Self-Reflect |
328
+ |--------|-----------|-------------------|
329
+ | **Purpose** | Project-specific instructions | Conversation memory across all projects |
330
+ | **Scope** | Single project context | Global conversation history |
331
+ | **Storage** | Text file in project | Vector database (Qdrant) |
332
+ | **Search** | Exact text matching | Semantic similarity search |
333
+ | **Updates** | Manual editing | Automatic indexing |
334
+ | **Best For** | Project rules & guidelines | Finding past discussions & decisions |
335
+
336
+ **Use both together**: CLAUDE.md for project-specific rules, Claude-Self-Reflect for conversation history.
337
+
338
+
339
+
340
+ ## Troubleshooting
341
+
342
+ Having issues? Check our [Troubleshooting Guide](docs/troubleshooting.md) or:
343
+
344
+ - Ask in [Discussions](https://github.com/ramakay/claude-self-reflect/discussions)
345
+ - Report bugs in [Issues](https://github.com/ramakay/claude-self-reflect/issues)
346
+
347
+ ## Roadmap
348
+
349
+ **Q1 2025**: Conversation summarization, time-based filtering, export history
350
+ **Q2 2025**: Multi-modal memory, analytics dashboard, team sharing
351
+ **Long Term**: Active learning, conversation graphs, enterprise features
352
+
353
+ [Full Roadmap & Contributing](CONTRIBUTING.md)
354
+
355
+ ## License
356
+
357
+ MIT License - see [LICENSE](LICENSE) for details.
358
+
359
+ ---
360
+
361
+ <p align="center">
362
+ Built with ❤️ for the Claude community by <a href="https://github.com/ramakay">ramakay</a>
363
+ </p>
@@ -0,0 +1,122 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { spawn } from 'child_process';
4
+ import { fileURLToPath } from 'url';
5
+ import { dirname, join } from 'path';
6
+ import fs from 'fs/promises';
7
+
8
+ const __filename = fileURLToPath(import.meta.url);
9
+ const __dirname = dirname(__filename);
10
+
11
+ const commands = {
12
+ setup: 'Run the setup wizard to configure Claude Self-Reflect',
13
+ doctor: 'Check your installation and diagnose issues',
14
+ help: 'Show this help message'
15
+ };
16
+
17
+ async function setup() {
18
+ console.log('🚀 Claude Self-Reflect Setup Wizard\n');
19
+
20
+ const setupPath = join(__dirname, 'setup-wizard.js');
21
+ const child = spawn('node', [setupPath], { stdio: 'inherit' });
22
+
23
+ child.on('exit', (code) => {
24
+ process.exit(code || 0);
25
+ });
26
+ }
27
+
28
+ async function doctor() {
29
+ console.log('🔍 Checking Claude Self-Reflect installation...\n');
30
+
31
+ const checks = [
32
+ {
33
+ name: 'Python 3.10+',
34
+ check: async () => {
35
+ try {
36
+ const { execSync } = await import('child_process');
37
+ const version = execSync('python3 --version').toString().trim();
38
+ return { passed: true, message: version };
39
+ } catch {
40
+ return { passed: false, message: 'Python 3.10+ not found' };
41
+ }
42
+ }
43
+ },
44
+ {
45
+ name: 'Qdrant',
46
+ check: async () => {
47
+ try {
48
+ const response = await fetch('http://localhost:6333');
49
+ const data = await response.json();
50
+ if (data.title && data.title.includes('qdrant')) {
51
+ return { passed: true, message: `Qdrant ${data.version} is running on port 6333` };
52
+ }
53
+ } catch {}
54
+ return { passed: false, message: 'Qdrant not accessible on localhost:6333' };
55
+ }
56
+ },
57
+ {
58
+ name: 'MCP Server',
59
+ check: async () => {
60
+ const mcpPath = join(__dirname, '..', 'mcp-server', 'pyproject.toml');
61
+ try {
62
+ await fs.access(mcpPath);
63
+ return { passed: true, message: 'MCP server files found' };
64
+ } catch {
65
+ return { passed: false, message: 'MCP server files not found' };
66
+ }
67
+ }
68
+ },
69
+ {
70
+ name: 'Environment Variables',
71
+ check: async () => {
72
+ const envPath = join(__dirname, '..', '.env');
73
+ try {
74
+ const content = await fs.readFile(envPath, 'utf-8');
75
+ const hasVoyageKey = content.includes('VOYAGE_KEY=') && !content.includes('VOYAGE_KEY=your-');
76
+ if (hasVoyageKey) {
77
+ return { passed: true, message: '.env file configured with VOYAGE_KEY' };
78
+ }
79
+ return { passed: false, message: '.env file missing VOYAGE_KEY' };
80
+ } catch {
81
+ return { passed: false, message: '.env file not found' };
82
+ }
83
+ }
84
+ }
85
+ ];
86
+
87
+ for (const check of checks) {
88
+ const result = await check.check();
89
+ const icon = result.passed ? '✅' : '❌';
90
+ console.log(`${icon} ${check.name}: ${result.message}`);
91
+ }
92
+
93
+ console.log('\n💡 Run "claude-self-reflect setup" to fix any issues');
94
+ }
95
+
96
+ function help() {
97
+ console.log('Claude Self-Reflect - Perfect memory for Claude\n');
98
+ console.log('Usage: claude-self-reflect <command>\n');
99
+ console.log('Commands:');
100
+
101
+ for (const [cmd, desc] of Object.entries(commands)) {
102
+ console.log(` ${cmd.padEnd(10)} ${desc}`);
103
+ }
104
+
105
+ console.log('\nFor more information: https://github.com/ramakay/claude-self-reflect');
106
+ }
107
+
108
+ // Main
109
+ const command = process.argv[2] || 'help';
110
+
111
+ switch (command) {
112
+ case 'setup':
113
+ setup();
114
+ break;
115
+ case 'doctor':
116
+ doctor();
117
+ break;
118
+ case 'help':
119
+ default:
120
+ help();
121
+ break;
122
+ }
@@ -0,0 +1,13 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { fileURLToPath } from 'url';
4
+ import { dirname, join } from 'path';
5
+
6
+ const __filename = fileURLToPath(import.meta.url);
7
+ const __dirname = dirname(__filename);
8
+
9
+ // Only show message if not in development
10
+ if (!process.cwd().includes('claude-self-reflect')) {
11
+ console.log('\n🎉 Claude Self-Reflect installed!\n');
12
+ console.log('Run "claude-self-reflect setup" to configure your installation.\n');
13
+ }
@@ -0,0 +1,204 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { execSync, spawn } from 'child_process';
4
+ import { fileURLToPath } from 'url';
5
+ import { dirname, join } from 'path';
6
+ import fs from 'fs/promises';
7
+ import readline from 'readline';
8
+ import path from 'path';
9
+
10
+ const __filename = fileURLToPath(import.meta.url);
11
+ const __dirname = dirname(__filename);
12
+ const projectRoot = join(__dirname, '..');
13
+
14
+ const rl = readline.createInterface({
15
+ input: process.stdin,
16
+ output: process.stdout
17
+ });
18
+
19
+ const question = (query) => new Promise((resolve) => rl.question(query, resolve));
20
+
21
+ async function checkPython() {
22
+ console.log('\n📦 Checking Python installation...');
23
+ try {
24
+ const version = execSync('python3 --version').toString().trim();
25
+ console.log(`✅ Found ${version}`);
26
+ return true;
27
+ } catch {
28
+ console.log('❌ Python 3.10+ not found');
29
+ console.log(' Please install Python from https://python.org');
30
+ return false;
31
+ }
32
+ }
33
+
34
+ async function checkQdrant() {
35
+ console.log('\n🐳 Checking Qdrant...');
36
+ try {
37
+ const response = await fetch('http://localhost:6333/health');
38
+ if (response.ok) {
39
+ console.log('✅ Qdrant is already running');
40
+ return true;
41
+ }
42
+ } catch {}
43
+
44
+ console.log('❌ Qdrant not found');
45
+ const start = await question('Would you like to start Qdrant with Docker? (y/n): ');
46
+
47
+ if (start.toLowerCase() === 'y') {
48
+ try {
49
+ console.log('Starting Qdrant...');
50
+ execSync('docker run -d --name qdrant -p 6333:6333 qdrant/qdrant:latest', { stdio: 'inherit' });
51
+ console.log('✅ Qdrant started successfully');
52
+ return true;
53
+ } catch (error) {
54
+ console.log('❌ Failed to start Qdrant. Please install Docker first.');
55
+ return false;
56
+ }
57
+ }
58
+
59
+ return false;
60
+ }
61
+
62
+ async function setupPythonEnvironment() {
63
+ console.log('\n🐍 Setting up Python MCP server...');
64
+
65
+ const mcpPath = join(projectRoot, 'mcp-server');
66
+
67
+ try {
68
+ // Create virtual environment
69
+ console.log('Creating virtual environment...');
70
+ execSync(`cd "${mcpPath}" && python3 -m venv venv`, { stdio: 'inherit' });
71
+
72
+ // Install dependencies
73
+ console.log('Installing dependencies...');
74
+ const activateCmd = process.platform === 'win32'
75
+ ? 'venv\\Scripts\\activate'
76
+ : 'source venv/bin/activate';
77
+
78
+ execSync(`cd "${mcpPath}" && ${activateCmd} && pip install -e .`, {
79
+ stdio: 'inherit',
80
+ shell: true
81
+ });
82
+
83
+ console.log('✅ Python environment setup complete');
84
+ return true;
85
+ } catch (error) {
86
+ console.log('❌ Failed to setup Python environment:', error.message);
87
+ return false;
88
+ }
89
+ }
90
+
91
+ async function configureEnvironment() {
92
+ console.log('\n🔐 Configuring environment variables...');
93
+
94
+ const envPath = join(projectRoot, '.env');
95
+ let envContent = '';
96
+
97
+ try {
98
+ envContent = await fs.readFile(envPath, 'utf-8');
99
+ } catch {
100
+ // .env doesn't exist, create it
101
+ }
102
+
103
+ // Check for VOYAGE_KEY
104
+ if (!envContent.includes('VOYAGE_KEY=') || envContent.includes('VOYAGE_KEY=your-')) {
105
+ console.log('\nVoyage AI provides embeddings for semantic search.');
106
+ console.log('Get your free API key at: https://www.voyageai.com/');
107
+
108
+ const voyageKey = await question('Enter your Voyage AI API key (or press Enter to skip): ');
109
+
110
+ if (voyageKey) {
111
+ envContent = envContent.replace(/VOYAGE_KEY=.*/g, '');
112
+ envContent += `\nVOYAGE_KEY=${voyageKey}\n`;
113
+ }
114
+ }
115
+
116
+ // Set default Qdrant URL if not present
117
+ if (!envContent.includes('QDRANT_URL=')) {
118
+ envContent += 'QDRANT_URL=http://localhost:6333\n';
119
+ }
120
+
121
+ await fs.writeFile(envPath, envContent.trim() + '\n');
122
+ console.log('✅ Environment configured');
123
+ }
124
+
125
+ async function setupClaude() {
126
+ console.log('\n🤖 Claude Code MCP Configuration...');
127
+
128
+ const runScript = join(projectRoot, 'mcp-server', 'run-mcp.sh');
129
+
130
+ console.log('\nAdd this to your Claude Code settings:');
131
+ console.log('```bash');
132
+ console.log(`claude mcp add claude-self-reflect "${runScript}" -e VOYAGE_KEY="<your-key>" -e QDRANT_URL="http://localhost:6333"`);
133
+ console.log('```');
134
+
135
+ console.log('\nThen restart Claude Code for the changes to take effect.');
136
+ }
137
+
138
+ async function installAgents() {
139
+ console.log('\n🤖 Installing Claude agents...');
140
+
141
+ const agentsSource = join(projectRoot, '.claude', 'agents');
142
+ const agentsDest = join(process.cwd(), '.claude', 'agents');
143
+
144
+ if (agentsSource === agentsDest) {
145
+ console.log('📦 Skipping agent installation in package directory');
146
+ return;
147
+ }
148
+
149
+ try {
150
+ await fs.mkdir(path.dirname(agentsDest), { recursive: true });
151
+
152
+ // Check if already exists
153
+ try {
154
+ await fs.access(agentsDest);
155
+ console.log('✅ Agents already installed');
156
+ return;
157
+ } catch {
158
+ // Copy agents
159
+ await fs.cp(agentsSource, agentsDest, { recursive: true });
160
+ console.log('✅ Agents installed to .claude/agents/');
161
+ }
162
+ } catch (error) {
163
+ console.log('⚠️ Could not install agents:', error.message);
164
+ }
165
+ }
166
+
167
+ async function main() {
168
+ console.log('🚀 Welcome to Claude Self-Reflect Setup!\n');
169
+ console.log('This wizard will help you set up conversation memory for Claude.\n');
170
+
171
+ // Check prerequisites
172
+ const pythonOk = await checkPython();
173
+ if (!pythonOk) {
174
+ console.log('\n❌ Setup cannot continue without Python');
175
+ process.exit(1);
176
+ }
177
+
178
+ const qdrantOk = await checkQdrant();
179
+ if (!qdrantOk) {
180
+ console.log('\n⚠️ Qdrant is required for the vector database');
181
+ }
182
+
183
+ // Setup Python environment
184
+ await setupPythonEnvironment();
185
+
186
+ // Configure environment
187
+ await configureEnvironment();
188
+
189
+ // Install agents
190
+ await installAgents();
191
+
192
+ // Show Claude configuration
193
+ await setupClaude();
194
+
195
+ console.log('\n✅ Setup complete!');
196
+ console.log('\nNext steps:');
197
+ console.log('1. Import your conversations: cd scripts && python import-conversations-voyage.py');
198
+ console.log('2. Use the reflection tools in Claude Code');
199
+ console.log('\nFor more info: https://github.com/ramakay/claude-self-reflect');
200
+
201
+ rl.close();
202
+ }
203
+
204
+ main().catch(console.error);