claude-self-reflect 6.0.5 → 7.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.env.example CHANGED
@@ -28,3 +28,37 @@ CHUNK_SIZE=50
28
28
  MAX_FILES_PER_CYCLE=10
29
29
  HOT_WINDOW_MINUTES=15
30
30
  MAX_COLD_FILES_PER_CYCLE=3
31
+
32
+ # ====================================================================================
33
+ # Batch Automation Configuration (OPTIONAL - Requires Anthropic API Key)
34
+ # ====================================================================================
35
+ # Batch automation provides 9.3x better search quality with automated narratives
36
+ # Disabled by default - enable via CLI during installation or manually
37
+
38
+ # Anthropic API Key (required for batch automation)
39
+ # Get your key from https://console.anthropic.com
40
+ ANTHROPIC_API_KEY=
41
+
42
+ # Qdrant API Key (optional for standalone mode, required for shared/multi-user)
43
+ # STANDALONE: Leave empty if running locally just for yourself
44
+ # SHARED: Set a strong API key if multiple people access this Qdrant instance
45
+ QDRANT_API_KEY=
46
+
47
+ # Batch Automation Directories
48
+ CSR_HOME=~/.claude-self-reflect
49
+ CSR_CONFIG_DIR=~/.claude-self-reflect/config
50
+ CSR_BATCH_STATE_DIR=~/.claude-self-reflect/batch_state
51
+ CSR_BATCH_QUEUE_DIR=~/.claude-self-reflect/batch_queue
52
+
53
+ # Batch Triggers
54
+ BATCH_SIZE_TRIGGER=10 # Trigger batch after N files
55
+ BATCH_TIME_TRIGGER_MINUTES=30 # Or after N minutes
56
+
57
+ # Subprocess Settings
58
+ SUBPROCESS_TIMEOUT_SECONDS=1800 # 30 minute timeout for batch operations
59
+
60
+ # Watcher Timing
61
+ HOT_CHECK_INTERVAL_S=2 # Check hot files every N seconds
62
+ NORMAL_CHECK_INTERVAL_S=60 # Normal check interval
63
+ WARM_WINDOW_HOURS=24 # Files < N hours are warm
64
+ MAX_COLD_FILES=5 # Max cold files per cycle
@@ -0,0 +1,36 @@
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies
6
+ RUN apt-get update && apt-get install -y \
7
+ gcc \
8
+ g++ \
9
+ && rm -rf /var/lib/apt/lists/*
10
+
11
+ # Copy requirements
12
+ COPY requirements.txt .
13
+ RUN pip install --no-cache-dir -r requirements.txt
14
+
15
+ # Copy source code
16
+ COPY src/ ./src/
17
+ COPY docs/design/ ./docs/design/
18
+
19
+ # Create non-root user
20
+ RUN groupadd -r appuser && \
21
+ useradd -r -g appuser -u 1001 appuser
22
+
23
+ # Create directories for batch state and set ownership
24
+ RUN mkdir -p /home/appuser/.claude-self-reflect/batch_state && \
25
+ chown -R appuser:appuser /home/appuser/.claude-self-reflect && \
26
+ chown -R appuser:appuser /app
27
+
28
+ # Set Python path
29
+ ENV PYTHONPATH=/app
30
+ ENV HOME=/home/appuser
31
+
32
+ # Switch to non-root user
33
+ USER appuser
34
+
35
+ # Run batch monitor
36
+ CMD ["python", "/app/src/runtime/batch_monitor.py"]
@@ -0,0 +1,38 @@
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies
6
+ RUN apt-get update && apt-get install -y \
7
+ gcc \
8
+ g++ \
9
+ && rm -rf /var/lib/apt/lists/*
10
+
11
+ # Copy requirements
12
+ COPY requirements.txt .
13
+ RUN pip install --no-cache-dir -r requirements.txt
14
+
15
+ # Copy source code
16
+ COPY src/ ./src/
17
+ COPY docs/design/ ./docs/design/
18
+
19
+ # Create non-root user
20
+ RUN groupadd -r appuser && \
21
+ useradd -r -g appuser -u 1001 appuser
22
+
23
+ # Create directories for state and queue and set ownership
24
+ RUN mkdir -p /home/appuser/.claude-self-reflect/config && \
25
+ mkdir -p /home/appuser/.claude-self-reflect/batch_queue && \
26
+ mkdir -p /home/appuser/.claude-self-reflect/batch_state && \
27
+ chown -R appuser:appuser /home/appuser/.claude-self-reflect && \
28
+ chown -R appuser:appuser /app
29
+
30
+ # Set Python path
31
+ ENV PYTHONPATH=/app
32
+ ENV HOME=/home/appuser
33
+
34
+ # Switch to non-root user
35
+ USER appuser
36
+
37
+ # Run batch watcher
38
+ CMD ["python", "/app/src/runtime/batch_watcher.py"]
package/README.md CHANGED
@@ -26,6 +26,8 @@ Give Claude perfect memory of all your conversations. Search past discussions in
26
26
 
27
27
  **100% Local by Default** • **20x Faster** • **Zero Configuration** • **Production Ready**
28
28
 
29
+ > **Latest: v7.0 Automated Narratives** - 9.3x better search quality via AI-powered summaries. [Learn more →](#v70-automated-narrative-generation)
30
+
29
31
  ## Why This Exists
30
32
 
31
33
  Claude starts fresh every conversation. You've solved complex bugs, designed architectures, made critical decisions - all forgotten. Until now.
@@ -63,7 +65,7 @@ claude-self-reflect setup
63
65
  ```
64
66
 
65
67
  > [!TIP]
66
- > **v4.0+ Auto-Migration**: Updates from v3.x automatically migrate during npm install - no manual steps needed!
68
+ > **Auto-Migration**: Updates automatically handle breaking changes. Simply run `npm update -g claude-self-reflect`.
67
69
 
68
70
  <details open>
69
71
  <summary>Cloud Mode (Better Search Accuracy)</summary>
@@ -84,18 +86,15 @@ claude-self-reflect setup --voyage-key=YOUR_ACTUAL_KEY_HERE
84
86
 
85
87
  ## Performance
86
88
 
87
- <details open>
88
- <summary><b>v4.0 Performance Improvements</b></summary>
89
-
90
- | Metric | v3.x | v4.0 | Improvement |
91
- |--------|------|------|-------------|
89
+ | Metric | Before | After | Improvement |
90
+ |--------|--------|-------|-------------|
92
91
  | **Status Check** | 119ms | 6ms | **20x faster** |
93
92
  | **Storage Usage** | 100MB | 50MB | **50% reduction** |
94
93
  | **Import Speed** | 10/sec | 100/sec | **10x faster** |
95
94
  | **Memory Usage** | 500MB | 50MB | **90% reduction** |
96
95
  | **Search Latency** | 15ms | 3ms | **5x faster** |
97
96
 
98
- ### How We Compare
97
+ ### Competitive Comparison
99
98
 
100
99
  | Feature | Claude Self-Reflect | MemGPT | LangChain Memory |
101
100
  |---------|---------------------|---------|------------------|
@@ -106,8 +105,6 @@ claude-self-reflect setup --voyage-key=YOUR_ACTUAL_KEY_HERE
106
105
  | **Setup time** | 5 min | 30+ min | 20+ min |
107
106
  | **Docker required** | Yes | Python | Python |
108
107
 
109
- </details>
110
-
111
108
  ## The Magic
112
109
 
113
110
  ![Self Reflection vs The Grind](docs/images/red-reflection.webp)
@@ -177,32 +174,115 @@ Your code quality displayed live as you work:
177
174
 
178
175
  </details>
179
176
 
177
+ ## v7.0 Automated Narrative Generation
178
+
179
+ **9.3x Better Search Quality** • **50% Cost Savings** • **Fully Automated**
180
+
181
+ v7.0 introduces AI-powered conversation narratives that transform raw conversation excerpts into rich problem-solution summaries with comprehensive metadata extraction.
182
+
183
+ ### Before/After Comparison
184
+
185
+ | Metric | v6.x (Raw Excerpts) | v7.0 (AI Narratives) | Improvement |
186
+ |--------|---------------------|----------------------|-------------|
187
+ | **Search Quality** | 0.074 | 0.691 | **9.3x better** |
188
+ | **Token Compression** | 100% | 18% | **82% reduction** |
189
+ | **Cost per Conversation** | $0.025 | $0.012 | **50% savings** |
190
+ | **Metadata Richness** | Basic | Tools + Concepts + Files | **Full context** |
191
+
192
+ ### What You Get
193
+
194
+ **Enhanced Search Results:**
195
+ - **Problem-Solution Patterns**: Conversations structured as challenges encountered and solutions implemented
196
+ - **Rich Metadata**: Automatic extraction of tools used, technical concepts, and files modified
197
+ - **Context Compression**: 82% token reduction while maintaining searchability
198
+ - **Better Relevance**: Search scores improved from 0.074 to 0.691 (9.3x)
199
+
200
+ **Cost-Effective Processing:**
201
+ - Anthropic Batch API: $0.012 per conversation (vs $0.025 standard)
202
+ - Automatic batch queuing and processing
203
+ - Progress monitoring via Docker containers
204
+ - Evaluation generation for quality assurance
205
+
206
+ **Fully Automated Workflow:**
207
+ ```bash
208
+ # 1. Watch for new conversations
209
+ docker compose up batch-watcher
210
+
211
+ # 2. Auto-trigger batch processing when threshold reached
212
+ # (Configurable: BATCH_THRESHOLD_FILES, default 10)
213
+
214
+ # 3. Monitor batch progress
215
+ docker compose logs batch-monitor -f
216
+
217
+ # 4. Enhanced narratives automatically imported to Qdrant
218
+ ```
219
+
220
+ ### Example: Raw Excerpt vs AI Narrative
221
+
222
+ **Before (v6.x)** - Raw excerpt showing basic conversation flow:
223
+ ```
224
+ User: How do I fix the Docker memory issue?
225
+ Assistant: The container was limited to 2GB but only using 266MB...
226
+ ```
227
+
228
+ **After (v7.0)** - Rich narrative with metadata:
229
+ ```
230
+ PROBLEM: Docker container memory consumption investigation revealed
231
+ discrepancy between limits (2GB) and actual usage (266MB). Analysis
232
+ required to determine if memory limit was appropriate.
233
+
234
+ SOLUTION: Discovered issue occurred with MAX_QUEUE_SIZE=1000 outside
235
+ Docker environment. Implemented proper Docker resource constraints
236
+ stabilizing memory at 341MB.
237
+
238
+ TOOLS USED: Docker, grep, Edit
239
+ CONCEPTS: container-memory, resource-limits, queue-sizing
240
+ FILES: docker-compose.yaml, batch_watcher.py
241
+ ```
242
+
243
+ ### Getting Started with Narratives
244
+
245
+ Narratives are automatically generated for new conversations. To process existing conversations:
246
+
247
+ ```bash
248
+ # Process all existing conversations in batch
249
+ python docs/design/batch_import_all_projects.py
250
+
251
+ # Monitor batch progress
252
+ docker compose logs batch-monitor -f
253
+
254
+ # Check completion status
255
+ curl http://localhost:6333/collections/csr_claude-self-reflect_local_384d
256
+ ```
257
+
258
+ For complete documentation, see [Batch Automation Guide](docs/testing/NARRATIVE_TESTING_SUMMARY.md).
259
+
180
260
  ## Key Features
181
261
 
182
262
  <details>
183
263
  <summary><b>MCP Tools Available to Claude</b></summary>
184
264
 
185
- **Search & Memory Tools:**
265
+ **Search & Memory:**
186
266
  - `reflect_on_past` - Search past conversations using semantic similarity with time decay (supports quick/summary modes)
187
267
  - `store_reflection` - Store important insights or learnings for future reference
188
268
  - `get_next_results` - Paginate through additional search results
189
269
  - `search_by_file` - Find conversations that analyzed specific files
190
270
  - `search_by_concept` - Search for conversations about development concepts
191
- - `get_full_conversation` - Retrieve complete JSONL conversation files (v2.8.8)
271
+ - `get_full_conversation` - Retrieve complete JSONL conversation files
192
272
 
193
- **NEW: Temporal Query Tools (v3.3.0):**
273
+ **Temporal Queries:**
194
274
  - `get_recent_work` - Answer "What did we work on last?" with session grouping
195
275
  - `search_by_recency` - Time-constrained search like "docker issues last week"
196
276
  - `get_timeline` - Activity timeline with statistics and patterns
197
277
 
198
- **Runtime Configuration Tools (v4.0):**
278
+ **Runtime Configuration:**
199
279
  - `switch_embedding_mode` - Switch between local/cloud modes without restart
200
280
  - `get_embedding_mode` - Check current embedding configuration
201
281
  - `reload_code` - Hot reload Python code changes
202
282
  - `reload_status` - Check reload state
203
283
  - `clear_module_cache` - Clear Python cache
204
284
 
205
- **Status & Monitoring Tools:**
285
+ **Status & Monitoring:**
206
286
  - `get_status` - Real-time import progress and system status
207
287
  - `get_health` - Comprehensive system health check
208
288
  - `collection_status` - Check Qdrant collection health and stats
@@ -296,9 +376,6 @@ Files are categorized by age and processed with priority queuing to ensure newes
296
376
 
297
377
  ## Requirements
298
378
 
299
- > [!WARNING]
300
- > **Breaking Change in v4.0**: Collections now use prefixed naming (e.g., `csr_project_local_384d`). Run migration automatically via `npm update`.
301
-
302
379
  <details>
303
380
  <summary><b>System Requirements</b></summary>
304
381
 
@@ -376,20 +453,44 @@ npm uninstall -g claude-self-reflect
376
453
 
377
454
  ## Keeping Up to Date
378
455
 
379
- > [!TIP]
380
- > **For Existing Users**: Simply run `npm update -g claude-self-reflect` to get the latest features and improvements. Updates are automatic and preserve your data.
381
-
382
- <details>
383
- <summary>Recent Improvements</summary>
456
+ ```bash
457
+ npm update -g claude-self-reflect
458
+ ```
384
459
 
385
- - **20x faster performance** - Status checks, search, and imports
386
- - **Runtime configuration** - Switch modes without restarting
387
- - **Unified state management** - Single source of truth
388
- - **AST-GREP integration** - Code quality analysis
389
- - **Temporal search tools** - Find recent work and time-based queries
390
- - **Auto-migration** - Updates handle breaking changes automatically
460
+ Updates are automatic and preserve your data. See [full changelog](docs/release-history.md) for details.
391
461
 
392
- [Full changelog](docs/release-history.md)
462
+ <details>
463
+ <summary><b>Release Evolution</b></summary>
464
+
465
+ ### v7.0 - Automated Narratives (Oct 2025)
466
+ - **9.3x better search quality** via AI-powered conversation summaries
467
+ - **50% cost savings** using Anthropic Batch API ($0.012 per conversation)
468
+ - **82% token compression** while maintaining searchability
469
+ - Rich metadata extraction (tools, concepts, files)
470
+ - Problem-solution narrative structure
471
+ - Automated batch processing with Docker monitoring
472
+
473
+ ### v4.0 - Performance Revolution (Sep 2025)
474
+ - **20x faster** status checks (119ms → 6ms)
475
+ - **50% storage reduction** via unified state management
476
+ - **10x faster imports** (10/sec → 100/sec)
477
+ - **90% memory reduction** (500MB → 50MB)
478
+ - Runtime mode switching (no restart required)
479
+ - Prefixed collection naming (breaking change)
480
+ - Code quality tracking with AST-GREP (100+ patterns)
481
+
482
+ ### v3.3 - Temporal Intelligence (Aug 2025)
483
+ - Time-based search: "docker issues last week"
484
+ - Session grouping: "What did we work on last?"
485
+ - Activity timelines with statistics
486
+ - Recency-aware queries
487
+
488
+ ### v2.8 - Full Context Access (Jul 2025)
489
+ - Complete conversation retrieval
490
+ - JSONL file access for deeper analysis
491
+ - Enhanced debugging capabilities
492
+
493
+ [View complete changelog →](docs/release-history.md)
393
494
 
394
495
  </details>
395
496
 
@@ -2,13 +2,15 @@ volumes:
2
2
  qdrant_data:
3
3
 
4
4
  services:
5
- # Fix permissions for config directory
5
+ # Fix permissions for config directory (UID 1001 matches appuser in Dockerfiles)
6
6
  init-permissions:
7
7
  image: alpine
8
- command: chown -R 1000:1000 /config
8
+ command: sh -c "chown -R 1001:1001 /config && chown -R 1001:1001 /batch_queue && chown -R 1001:1001 /batch_state"
9
9
  volumes:
10
- - ${CONFIG_PATH:-~/.claude-self-reflect/config}:/config
11
- profiles: ["watch", "import", "async", "safe-watch"]
10
+ - ${CONFIG_PATH:-${HOME}/.claude-self-reflect/config}:/config
11
+ - ${CSR_BATCH_QUEUE_DIR:-${HOME}/.claude-self-reflect/batch_queue}:/batch_queue
12
+ - ${CSR_BATCH_STATE_DIR:-${HOME}/.claude-self-reflect/batch_state}:/batch_state
13
+ profiles: ["watch", "import", "async", "safe-watch", "batch-automation"]
12
14
 
13
15
  # Qdrant vector database - the heart of semantic search
14
16
  qdrant:
@@ -20,7 +22,7 @@ services:
20
22
  - qdrant_data:/qdrant/storage
21
23
  # Note: Using CONFIG_PATH variable to support global npm installs (fixes #71)
22
24
  # macOS Docker Desktop restricts mounts to /Users, /Volumes, /private, /tmp
23
- - ${CONFIG_PATH:-~/.claude-self-reflect/config}/qdrant-config.yaml:/qdrant/config/config.yaml:ro
25
+ - ${CONFIG_PATH:-${HOME}/.claude-self-reflect/config}/qdrant-config.yaml:/qdrant/config/config.yaml:ro
24
26
  environment:
25
27
  - QDRANT__LOG_LEVEL=INFO
26
28
  - QDRANT__SERVICE__HTTP_PORT=6333
@@ -38,8 +40,8 @@ services:
38
40
  - init-permissions
39
41
  - qdrant
40
42
  volumes:
41
- - ${CLAUDE_LOGS_PATH:-~/.claude/projects}:/logs:ro
42
- - ${CONFIG_PATH:-~/.claude-self-reflect/config}:/config
43
+ - ${CLAUDE_LOGS_PATH:-${HOME}/.claude/projects}:/logs:ro
44
+ - ${CONFIG_PATH:-${HOME}/.claude-self-reflect/config}:/config
43
45
  environment:
44
46
  - QDRANT_URL=http://qdrant:6333
45
47
  - STATE_FILE=/config/imported-files.json
@@ -67,8 +69,8 @@ services:
67
69
  - init-permissions
68
70
  - qdrant
69
71
  volumes:
70
- - ${CLAUDE_LOGS_PATH:-~/.claude/projects}:/logs:ro
71
- - ${CONFIG_PATH:-~/.claude-self-reflect/config}:/config
72
+ - ${CLAUDE_LOGS_PATH:-${HOME}/.claude/projects}:/logs:ro
73
+ - ${CONFIG_PATH:-${HOME}/.claude-self-reflect/config}:/config
72
74
  - /tmp:/tmp
73
75
  environment:
74
76
  - QDRANT_URL=http://qdrant:6333
@@ -97,8 +99,8 @@ services:
97
99
  - init-permissions
98
100
  - qdrant
99
101
  volumes:
100
- - ${CLAUDE_LOGS_PATH:-~/.claude/projects}:/logs:ro
101
- - ${CONFIG_PATH:-~/.claude-self-reflect/config}:/config
102
+ - ${CLAUDE_LOGS_PATH:-${HOME}/.claude/projects}:/logs:ro
103
+ - ${CONFIG_PATH:-${HOME}/.claude-self-reflect/config}:/config
102
104
  environment:
103
105
  - QDRANT_URL=http://qdrant:6333
104
106
  - STATE_FILE=/config/streaming-state.json # FIXED: Use streaming-specific state file
@@ -136,8 +138,8 @@ services:
136
138
  depends_on:
137
139
  - qdrant
138
140
  volumes:
139
- - ${CLAUDE_LOGS_PATH:-~/.claude/projects}:/logs:ro
140
- - ${CONFIG_PATH:-~/.claude-self-reflect/config}:/config
141
+ - ${CLAUDE_LOGS_PATH:-${HOME}/.claude/projects}:/logs:ro
142
+ - ${CONFIG_PATH:-${HOME}/.claude-self-reflect/config}:/config
141
143
  environment:
142
144
  - QDRANT_URL=http://qdrant:6333
143
145
  - STATE_FILE=/config/imported-files.json
@@ -171,8 +173,8 @@ services:
171
173
  - init-permissions
172
174
  - qdrant
173
175
  volumes:
174
- - ${CLAUDE_LOGS_PATH:-~/.claude/projects}:/logs:ro
175
- - ${CONFIG_PATH:-~/.claude-self-reflect/config}:/config
176
+ - ${CLAUDE_LOGS_PATH:-${HOME}/.claude/projects}:/logs:ro
177
+ - ${CONFIG_PATH:-${HOME}/.claude-self-reflect/config}:/config
176
178
  environment:
177
179
  - QDRANT_URL=http://qdrant:6333
178
180
  - STATE_FILE=/config/csr-watcher.json
@@ -221,6 +223,94 @@ services:
221
223
  tty: true
222
224
  profiles: ["mcp"]
223
225
 
226
+ # Batch watcher - Queues conversations and triggers batch narrative generation
227
+ # OPTIONAL: Requires ANTHROPIC_API_KEY. Disabled by default.
228
+ # Enable via CLI during installation or: docker compose --profile batch-automation up -d
229
+ batch-watcher:
230
+ build:
231
+ context: .
232
+ dockerfile: Dockerfile.batch-watcher
233
+ container_name: claude-reflection-batch-watcher
234
+ depends_on:
235
+ - init-permissions
236
+ - qdrant
237
+ volumes:
238
+ - ${CLAUDE_LOGS_PATH:-${HOME}/.claude/projects}:/logs:ro
239
+ - ${CONFIG_PATH:-${HOME}/.claude-self-reflect/config}:/home/appuser/.claude-self-reflect/config
240
+ - ${CSR_BATCH_QUEUE_DIR:-${HOME}/.claude-self-reflect/batch_queue}:/home/appuser/.claude-self-reflect/batch_queue
241
+ - ${CSR_BATCH_STATE_DIR:-${HOME}/.claude-self-reflect/batch_state}:/home/appuser/.claude-self-reflect/batch_state
242
+ environment:
243
+ - QDRANT_URL=http://qdrant:6333
244
+ - QDRANT_API_KEY=${QDRANT_API_KEY:-}
245
+ - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
246
+ - CSR_HOME=/home/appuser/.claude-self-reflect
247
+ - CSR_CONFIG_DIR=/home/appuser/.claude-self-reflect/config
248
+ - CSR_BATCH_QUEUE_DIR=/home/appuser/.claude-self-reflect/batch_queue
249
+ - CSR_BATCH_STATE_DIR=/home/appuser/.claude-self-reflect/batch_state
250
+ - CLAUDE_PROJECTS_DIR=/logs
251
+ - BATCH_SIZE_TRIGGER=${BATCH_SIZE_TRIGGER:-10}
252
+ - BATCH_TIME_TRIGGER_MINUTES=${BATCH_TIME_TRIGGER_MINUTES:-30}
253
+ - HOT_WINDOW_MINUTES=${HOT_WINDOW_MINUTES:-5}
254
+ - WARM_WINDOW_HOURS=${WARM_WINDOW_HOURS:-24}
255
+ - MAX_COLD_FILES=${MAX_COLD_FILES:-5}
256
+ - SUBPROCESS_TIMEOUT_SECONDS=${SUBPROCESS_TIMEOUT_SECONDS:-1800}
257
+ - PYTHONUNBUFFERED=1
258
+ - PYTHONPATH=/app
259
+ restart: unless-stopped
260
+ profiles: ["batch-automation"]
261
+ mem_limit: 2g
262
+ memswap_limit: 2g
263
+ healthcheck:
264
+ test: ["CMD-SHELL", "ps aux | grep -q '[p]ython.*batch_watcher' || exit 1"]
265
+ interval: 30s
266
+ timeout: 10s
267
+ retries: 3
268
+ start_period: 10s
269
+ logging:
270
+ driver: "json-file"
271
+ options:
272
+ max-size: "10m"
273
+ max-file: "3"
274
+ labels: "service=batch-watcher"
275
+
276
+ # Batch monitor - Monitors batch API jobs and triggers evaluations
277
+ # OPTIONAL: Requires ANTHROPIC_API_KEY. Disabled by default.
278
+ batch-monitor:
279
+ build:
280
+ context: .
281
+ dockerfile: Dockerfile.batch-monitor
282
+ container_name: claude-reflection-batch-monitor
283
+ depends_on:
284
+ - init-permissions
285
+ - qdrant
286
+ volumes:
287
+ - ${CSR_BATCH_STATE_DIR:-${HOME}/.claude-self-reflect/batch_state}:/home/appuser/.claude-self-reflect/batch_state
288
+ environment:
289
+ - QDRANT_URL=http://qdrant:6333
290
+ - QDRANT_API_KEY=${QDRANT_API_KEY:-}
291
+ - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
292
+ - CSR_HOME=/home/appuser/.claude-self-reflect
293
+ - CSR_BATCH_STATE_DIR=/home/appuser/.claude-self-reflect/batch_state
294
+ - CHECK_INTERVAL=${BATCH_MONITOR_INTERVAL:-60}
295
+ - PYTHONUNBUFFERED=1
296
+ - PYTHONPATH=/app
297
+ restart: unless-stopped
298
+ profiles: ["batch-automation"]
299
+ mem_limit: 512m
300
+ memswap_limit: 512m
301
+ healthcheck:
302
+ test: ["CMD-SHELL", "ps aux | grep -q '[p]ython.*batch_monitor' || exit 1"]
303
+ interval: 30s
304
+ timeout: 10s
305
+ retries: 3
306
+ start_period: 10s
307
+ logging:
308
+ driver: "json-file"
309
+ options:
310
+ max-size: "10m"
311
+ max-file: "3"
312
+ labels: "service=batch-monitor"
313
+
224
314
  networks:
225
315
  default:
226
316
  name: claude-reflection-network
@@ -557,6 +557,109 @@ async function enrichMetadata() {
557
557
  }
558
558
  }
559
559
 
560
+ async function setupBatchAutomation() {
561
+ console.log('\n🚀 AI-Powered Narratives (NEW in v7.0!)...');
562
+ console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
563
+ console.log('Transform your conversations into rich, searchable narratives.');
564
+ console.log('');
565
+ console.log('📊 Benefits:');
566
+ console.log(' • 9.3x better search quality (0.074 → 0.691 relevance score)');
567
+ console.log(' • 82% token compression while maintaining searchability');
568
+ console.log(' • 50% cost savings using Anthropic Batch API (~$0.012/conversation)');
569
+ console.log(' • Automatic extraction: tools used, files modified, concepts');
570
+ console.log('');
571
+ console.log('📝 What You Get:');
572
+ console.log(' • Problem-solution structured summaries');
573
+ console.log(' • Rich metadata (tools, concepts, files)');
574
+ console.log(' • Fully automated batch processing');
575
+ console.log('');
576
+ console.log('⚙️ How It Works:');
577
+ console.log(' 1. Background watcher queues new conversations');
578
+ console.log(' 2. Auto-triggers batch when threshold reached (default: 10)');
579
+ console.log(' 3. Anthropic Batch API generates narratives');
580
+ console.log(' 4. Enhanced narratives auto-imported to Qdrant');
581
+ console.log('');
582
+ console.log('🔐 Privacy: Conversations sent to Anthropic Batch API for narrative generation.');
583
+ console.log(' Review: https://www.anthropic.com/privacy');
584
+ console.log('');
585
+
586
+ const enableChoice = await question('Enable AI-powered narratives? (y/n) [recommended for best search]: ');
587
+
588
+ if (enableChoice.toLowerCase() === 'y') {
589
+ console.log('\n🔑 Anthropic API Key Required');
590
+ console.log(' Get your key: https://console.anthropic.com/settings/keys');
591
+ console.log(' Cost: ~$0.012 per conversation via Batch API');
592
+
593
+ const apiKey = await question('\nPaste your Anthropic API key (sk-ant-...): ');
594
+
595
+ if (apiKey && apiKey.trim().startsWith('sk-ant-')) {
596
+ // Read current .env
597
+ const envPath = path.join(projectRoot, '.env');
598
+ let envContent = '';
599
+ try {
600
+ envContent = await fs.readFile(envPath, 'utf8');
601
+ } catch {
602
+ // File doesn't exist yet
603
+ }
604
+
605
+ // Remove existing ANTHROPIC_API_KEY if present
606
+ envContent = envContent.replace(/ANTHROPIC_API_KEY=.*/g, '');
607
+
608
+ // Add new key
609
+ envContent += `\n# Batch Automation (v7.0 AI-Powered Narratives)\nANTHROPIC_API_KEY=${apiKey.trim()}\n`;
610
+
611
+ // Write back
612
+ await fs.writeFile(envPath, envContent.trim() + '\n');
613
+
614
+ console.log('✅ API key saved to .env');
615
+
616
+ // Start batch automation services
617
+ console.log('\n🚀 Starting batch automation services...');
618
+ try {
619
+ safeExec('docker', ['compose', '--profile', 'batch-automation', 'up', '-d'], {
620
+ cwd: projectRoot,
621
+ stdio: 'inherit'
622
+ });
623
+
624
+ console.log('\n✅ Batch automation enabled!');
625
+ console.log(' • batch-watcher: Monitors for new conversations');
626
+ console.log(' • batch-monitor: Processes narrative generation');
627
+ console.log('');
628
+ console.log('📊 Monitor Progress:');
629
+ console.log(' docker compose logs batch-watcher -f');
630
+ console.log(' docker compose logs batch-monitor -f');
631
+ console.log('');
632
+ console.log('🎯 Next: New conversations will be automatically enhanced with narratives');
633
+
634
+ } catch (error) {
635
+ console.log('\n⚠️ Could not start batch services automatically');
636
+ console.log(' Start manually: docker compose --profile batch-automation up -d');
637
+ }
638
+
639
+ } else if (apiKey && apiKey.trim()) {
640
+ console.log('\n❌ Invalid API key format. Anthropic keys start with "sk-ant-"');
641
+ console.log(' Skipping batch automation. You can enable it later by:');
642
+ console.log(' 1. Adding ANTHROPIC_API_KEY to .env');
643
+ console.log(' 2. Running: docker compose --profile batch-automation up -d');
644
+ } else {
645
+ console.log('\n📝 Skipping batch automation.');
646
+ console.log(' You can enable it later by:');
647
+ console.log(' 1. Get API key: https://console.anthropic.com/settings/keys');
648
+ console.log(' 2. Add to .env: ANTHROPIC_API_KEY=sk-ant-...');
649
+ console.log(' 3. Run: docker compose --profile batch-automation up -d');
650
+ }
651
+
652
+ } else {
653
+ console.log('\n📝 Skipping batch automation (staying with standard search).');
654
+ console.log(' You can enable AI narratives later by:');
655
+ console.log(' 1. Get API key: https://console.anthropic.com/settings/keys');
656
+ console.log(' 2. Add to .env: ANTHROPIC_API_KEY=sk-ant-...');
657
+ console.log(' 3. Run: docker compose --profile batch-automation up -d');
658
+ console.log('');
659
+ console.log('💡 Tip: Even without narratives, you still get excellent local search!');
660
+ }
661
+ }
662
+
560
663
  async function startWatcher() {
561
664
  console.log('\n🔄 Starting the streaming watcher...');
562
665
  console.log(' • HOT files (<5 min): 2-second processing');
@@ -687,10 +790,13 @@ async function main() {
687
790
 
688
791
  // Import conversations
689
792
  await importConversations();
690
-
793
+
691
794
  // Enrich metadata (new in v2.5.19)
692
795
  await enrichMetadata();
693
-
796
+
797
+ // Setup batch automation (new in v7.0)
798
+ await setupBatchAutomation();
799
+
694
800
  // Start the watcher
695
801
  await startWatcher();
696
802
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-self-reflect",
3
- "version": "6.0.5",
3
+ "version": "7.0.0",
4
4
  "description": "Give Claude perfect memory of all your conversations - Installation wizard for Python MCP server",
5
5
  "keywords": [
6
6
  "claude",