claude-self-reflect 2.8.5 → 2.8.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +112 -24
- package/mcp-server/src/health.py +190 -0
- package/mcp-server/src/project_resolver.py +45 -9
- package/package.json +1 -1
- package/scripts/import-conversations-unified.py +8 -5
- package/Dockerfile.mcp-server.bak +0 -20
- package/Dockerfile.watcher.bak +0 -50
- package/scripts/import-latest.py +0 -124
- package/scripts/import-old-format.py +0 -171
package/README.md
CHANGED
|
@@ -26,6 +26,21 @@ Give Claude perfect memory of all your conversations. Search past discussions in
|
|
|
26
26
|
|
|
27
27
|
**🔒 100% Local by Default** • **⚡ Blazing Fast Search** • **🚀 Zero Configuration** • **🏭 Production Ready**
|
|
28
28
|
|
|
29
|
+
## 📑 Table of Contents
|
|
30
|
+
|
|
31
|
+
- [🚀 Quick Install](#-quick-install)
|
|
32
|
+
- [✨ The Magic](#-the-magic)
|
|
33
|
+
- [📊 Before & After](#-before--after)
|
|
34
|
+
- [💬 Real Examples](#-real-examples)
|
|
35
|
+
- [🆕 NEW: Real-time Indexing Status](#-new-real-time-indexing-status-in-your-terminal)
|
|
36
|
+
- [🎯 Key Features](#-key-features)
|
|
37
|
+
- [🏗️ Architecture](#️-architecture)
|
|
38
|
+
- [🛠️ Requirements](#️-requirements)
|
|
39
|
+
- [📖 Documentation](#-documentation)
|
|
40
|
+
- [📦 What's New](#-whats-new)
|
|
41
|
+
- [🔧 Troubleshooting](#-troubleshooting)
|
|
42
|
+
- [👥 Contributors](#-contributors)
|
|
43
|
+
|
|
29
44
|
## 🚀 Quick Install
|
|
30
45
|
|
|
31
46
|
```bash
|
|
@@ -97,7 +112,9 @@ Works with [Claude Code Statusline](https://github.com/sirmalloc/ccstatusline) -
|
|
|
97
112
|
|
|
98
113
|
## 🎯 Key Features
|
|
99
114
|
|
|
100
|
-
|
|
115
|
+
<details>
|
|
116
|
+
<summary><b>📊 Statusline Integration</b></summary>
|
|
117
|
+
|
|
101
118
|
See your indexing progress right in your terminal! Works with [Claude Code Statusline](https://github.com/sirmalloc/ccstatusline):
|
|
102
119
|
- **Progress Bar** - Visual indicator `[████████ ] 91%`
|
|
103
120
|
- **Indexing Lag** - Shows backlog `• 7h behind`
|
|
@@ -106,7 +123,11 @@ See your indexing progress right in your terminal! Works with [Claude Code Statu
|
|
|
106
123
|
|
|
107
124
|
[Learn more about statusline integration →](docs/statusline-integration.md)
|
|
108
125
|
|
|
109
|
-
|
|
126
|
+
</details>
|
|
127
|
+
|
|
128
|
+
<details>
|
|
129
|
+
<summary><b>🔍 Project-Scoped Search</b></summary>
|
|
130
|
+
|
|
110
131
|
Searches are **project-aware by default**. Claude automatically searches within your current project:
|
|
111
132
|
|
|
112
133
|
```
|
|
@@ -119,21 +140,37 @@ You: "Search all projects for WebSocket implementations"
|
|
|
119
140
|
Claude: [Searches across ALL your projects]
|
|
120
141
|
```
|
|
121
142
|
|
|
122
|
-
|
|
143
|
+
</details>
|
|
144
|
+
|
|
145
|
+
<details>
|
|
146
|
+
<summary><b>⏱️ Memory Decay</b></summary>
|
|
147
|
+
|
|
123
148
|
Recent conversations matter more. Old ones fade. Like your brain, but reliable.
|
|
149
|
+
- **90-day half-life**: Recent memories stay strong
|
|
150
|
+
- **Graceful aging**: Old information fades naturally
|
|
151
|
+
- **Configurable**: Adjust decay rate to your needs
|
|
152
|
+
|
|
153
|
+
</details>
|
|
154
|
+
|
|
155
|
+
<details>
|
|
156
|
+
<summary><b>⚡ Performance at Scale</b></summary>
|
|
124
157
|
|
|
125
|
-
### ⚡ Performance at Scale
|
|
126
158
|
- **Search**: <3ms average response time
|
|
127
159
|
- **Scale**: 600+ conversations across 24 projects
|
|
128
160
|
- **Reliability**: 100% indexing success rate
|
|
129
161
|
- **Memory**: 96% reduction from v2.5.15
|
|
162
|
+
- **Real-time**: HOT/WARM/COLD intelligent prioritization
|
|
163
|
+
|
|
164
|
+
</details>
|
|
130
165
|
|
|
131
166
|
## 🏗️ Architecture
|
|
132
167
|
|
|
168
|
+
<details>
|
|
169
|
+
<summary><b>View Architecture Diagram & Details</b></summary>
|
|
170
|
+
|
|
133
171
|

|
|
134
172
|
|
|
135
|
-
|
|
136
|
-
<summary>🔥 HOT/WARM/COLD Intelligent Prioritization</summary>
|
|
173
|
+
### 🔥 HOT/WARM/COLD Intelligent Prioritization
|
|
137
174
|
|
|
138
175
|
- **🔥 HOT** (< 5 minutes): 2-second intervals for near real-time import
|
|
139
176
|
- **🌡️ WARM** (< 24 hours): Normal priority with starvation prevention
|
|
@@ -141,13 +178,37 @@ Recent conversations matter more. Old ones fade. Like your brain, but reliable.
|
|
|
141
178
|
|
|
142
179
|
Files are categorized by age and processed with priority queuing to ensure newest content gets imported quickly while preventing older files from being starved.
|
|
143
180
|
|
|
181
|
+
### Components
|
|
182
|
+
- **Vector Database**: Qdrant for semantic search
|
|
183
|
+
- **MCP Server**: Python-based using FastMCP
|
|
184
|
+
- **Embeddings**: Local (FastEmbed) or Cloud (Voyage AI)
|
|
185
|
+
- **Import Pipeline**: Docker-based with automatic monitoring
|
|
186
|
+
|
|
144
187
|
</details>
|
|
145
188
|
|
|
146
189
|
## 🛠️ Requirements
|
|
147
190
|
|
|
191
|
+
<details>
|
|
192
|
+
<summary><b>System Requirements</b></summary>
|
|
193
|
+
|
|
194
|
+
### Minimum Requirements
|
|
148
195
|
- **Docker Desktop** (macOS/Windows) or **Docker Engine** (Linux)
|
|
149
196
|
- **Node.js** 16+ (for the setup wizard)
|
|
150
197
|
- **Claude Code** CLI
|
|
198
|
+
- **4GB RAM** available for Docker
|
|
199
|
+
- **2GB disk space** for vector database
|
|
200
|
+
|
|
201
|
+
### Recommended
|
|
202
|
+
- **8GB RAM** for optimal performance
|
|
203
|
+
- **SSD storage** for faster indexing
|
|
204
|
+
- **Docker Desktop 4.0+** for best compatibility
|
|
205
|
+
|
|
206
|
+
### Operating Systems
|
|
207
|
+
- ✅ macOS 11+ (Intel & Apple Silicon)
|
|
208
|
+
- ✅ Windows 10/11 with WSL2
|
|
209
|
+
- ✅ Linux (Ubuntu 20.04+, Debian 11+)
|
|
210
|
+
|
|
211
|
+
</details>
|
|
151
212
|
|
|
152
213
|
## 📖 Documentation
|
|
153
214
|
|
|
@@ -254,9 +315,10 @@ docker compose run --rm importer python /app/scripts/delta-metadata-update-safe.
|
|
|
254
315
|
|
|
255
316
|
## 🔧 Troubleshooting
|
|
256
317
|
|
|
257
|
-
|
|
318
|
+
<details>
|
|
319
|
+
<summary><b>Common Issues and Solutions</b></summary>
|
|
258
320
|
|
|
259
|
-
|
|
321
|
+
### 1. "No collections created" after import
|
|
260
322
|
**Symptom**: Import runs but Qdrant shows no collections
|
|
261
323
|
**Cause**: Docker can't access Claude projects directory
|
|
262
324
|
**Solution**:
|
|
@@ -272,12 +334,12 @@ cat .env | grep CLAUDE_LOGS_PATH
|
|
|
272
334
|
# Should show: CLAUDE_LOGS_PATH=/Users/YOUR_NAME/.claude/projects
|
|
273
335
|
```
|
|
274
336
|
|
|
275
|
-
|
|
337
|
+
### 2. MCP server shows "ERROR" but it's actually INFO
|
|
276
338
|
**Symptom**: `[ERROR] MCP server "claude-self-reflect" Server stderr: INFO Starting MCP server`
|
|
277
339
|
**Cause**: Claude Code displays all stderr output as errors
|
|
278
340
|
**Solution**: This is not an actual error - the MCP is working correctly. The INFO message confirms successful startup.
|
|
279
341
|
|
|
280
|
-
|
|
342
|
+
### 3. "No JSONL files found"
|
|
281
343
|
**Symptom**: Setup can't find any conversation files
|
|
282
344
|
**Cause**: Claude Code hasn't been used yet or stores files elsewhere
|
|
283
345
|
**Solution**:
|
|
@@ -289,7 +351,7 @@ ls ~/.claude/projects/
|
|
|
289
351
|
# The watcher will import them automatically
|
|
290
352
|
```
|
|
291
353
|
|
|
292
|
-
|
|
354
|
+
### 4. Docker volume mount issues
|
|
293
355
|
**Symptom**: Import fails with permission errors
|
|
294
356
|
**Cause**: Docker can't access home directory
|
|
295
357
|
**Solution**:
|
|
@@ -303,7 +365,7 @@ docker compose down
|
|
|
303
365
|
claude-self-reflect setup
|
|
304
366
|
```
|
|
305
367
|
|
|
306
|
-
|
|
368
|
+
### 5. Qdrant not accessible
|
|
307
369
|
**Symptom**: Can't connect to localhost:6333
|
|
308
370
|
**Solution**:
|
|
309
371
|
```bash
|
|
@@ -317,9 +379,12 @@ docker compose ps
|
|
|
317
379
|
docker compose logs qdrant
|
|
318
380
|
```
|
|
319
381
|
|
|
320
|
-
|
|
382
|
+
</details>
|
|
321
383
|
|
|
322
|
-
|
|
384
|
+
<details>
|
|
385
|
+
<summary><b>Diagnostic Tools</b></summary>
|
|
386
|
+
|
|
387
|
+
### Run Comprehensive Diagnostics
|
|
323
388
|
```bash
|
|
324
389
|
claude-self-reflect doctor
|
|
325
390
|
```
|
|
@@ -331,18 +396,41 @@ This checks:
|
|
|
331
396
|
- Import status and collections
|
|
332
397
|
- Service health
|
|
333
398
|
|
|
334
|
-
###
|
|
399
|
+
### Check Logs
|
|
400
|
+
```bash
|
|
401
|
+
# View all service logs
|
|
402
|
+
docker compose logs -f
|
|
403
|
+
|
|
404
|
+
# View specific service
|
|
405
|
+
docker compose logs qdrant
|
|
406
|
+
docker compose logs watcher
|
|
407
|
+
```
|
|
408
|
+
|
|
409
|
+
### Generate Diagnostic Report
|
|
410
|
+
```bash
|
|
411
|
+
# Create diagnostic file for issue reporting
|
|
412
|
+
claude-self-reflect doctor > diagnostic.txt
|
|
413
|
+
```
|
|
335
414
|
|
|
336
|
-
|
|
337
|
-
```bash
|
|
338
|
-
docker compose logs -f
|
|
339
|
-
```
|
|
415
|
+
</details>
|
|
340
416
|
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
417
|
+
<details>
|
|
418
|
+
<summary><b>Getting Help</b></summary>
|
|
419
|
+
|
|
420
|
+
1. **Check Documentation**
|
|
421
|
+
- [Troubleshooting Guide](docs/troubleshooting.md)
|
|
422
|
+
- [FAQ](docs/faq.md)
|
|
423
|
+
- [Windows Setup](docs/windows-setup.md)
|
|
424
|
+
|
|
425
|
+
2. **Community Support**
|
|
426
|
+
- [GitHub Discussions](https://github.com/ramakay/claude-self-reflect/discussions)
|
|
427
|
+
- [Discord Community](https://discord.gg/claude-self-reflect)
|
|
428
|
+
|
|
429
|
+
3. **Report Issues**
|
|
430
|
+
- [GitHub Issues](https://github.com/ramakay/claude-self-reflect/issues)
|
|
431
|
+
- Include diagnostic output when reporting
|
|
432
|
+
|
|
433
|
+
</details>
|
|
346
434
|
|
|
347
435
|
## 👥 Contributors
|
|
348
436
|
|
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Health check endpoint for Claude Self-Reflect system
|
|
4
|
+
|
|
5
|
+
Provides a simple way to monitor system health and import status.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
import sys
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from datetime import datetime, timedelta
|
|
13
|
+
from typing import Dict, Any
|
|
14
|
+
|
|
15
|
+
# No sys.path modification needed - using subprocess for imports
|
|
16
|
+
|
|
17
|
+
def check_qdrant_health() -> Dict[str, Any]:
|
|
18
|
+
"""Check if Qdrant is accessible and has data"""
|
|
19
|
+
try:
|
|
20
|
+
from qdrant_client import QdrantClient
|
|
21
|
+
from qdrant_client.http.exceptions import ResponseHandlingException
|
|
22
|
+
|
|
23
|
+
# Add timeout for network operations
|
|
24
|
+
client = QdrantClient(
|
|
25
|
+
'localhost',
|
|
26
|
+
port=6333,
|
|
27
|
+
timeout=5 # 5 second timeout
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
collections = client.get_collections().collections
|
|
31
|
+
|
|
32
|
+
return {
|
|
33
|
+
'status': 'healthy',
|
|
34
|
+
'collections': len(collections),
|
|
35
|
+
'accessible': True
|
|
36
|
+
}
|
|
37
|
+
except (ResponseHandlingException, ConnectionError, TimeoutError) as e:
|
|
38
|
+
# Sanitize error messages to avoid information disclosure
|
|
39
|
+
return {
|
|
40
|
+
'status': 'unhealthy',
|
|
41
|
+
'error': 'Connection failed',
|
|
42
|
+
'accessible': False
|
|
43
|
+
}
|
|
44
|
+
except Exception as e:
|
|
45
|
+
return {
|
|
46
|
+
'status': 'unhealthy',
|
|
47
|
+
'error': 'Service unavailable',
|
|
48
|
+
'accessible': False
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
def check_import_status() -> Dict[str, Any]:
|
|
52
|
+
"""Check import status from status.py"""
|
|
53
|
+
try:
|
|
54
|
+
# Run status.py and parse output
|
|
55
|
+
import subprocess
|
|
56
|
+
import json
|
|
57
|
+
|
|
58
|
+
result = subprocess.run(
|
|
59
|
+
[sys.executable, str(Path(__file__).parent / "status.py")],
|
|
60
|
+
capture_output=True,
|
|
61
|
+
text=True,
|
|
62
|
+
timeout=10
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
if result.returncode == 0:
|
|
66
|
+
status = json.loads(result.stdout)
|
|
67
|
+
return {
|
|
68
|
+
'percentage': status['overall']['percentage'],
|
|
69
|
+
'indexed': status['overall']['indexed'],
|
|
70
|
+
'total': status['overall']['total'],
|
|
71
|
+
'backlog': status['overall']['backlog']
|
|
72
|
+
}
|
|
73
|
+
else:
|
|
74
|
+
return {
|
|
75
|
+
'error': result.stderr,
|
|
76
|
+
'percentage': 0
|
|
77
|
+
}
|
|
78
|
+
except Exception as e:
|
|
79
|
+
return {
|
|
80
|
+
'error': str(e),
|
|
81
|
+
'percentage': 0
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
def check_watcher_status() -> Dict[str, Any]:
|
|
85
|
+
"""Check if Docker watcher is running"""
|
|
86
|
+
try:
|
|
87
|
+
import subprocess
|
|
88
|
+
result = subprocess.run(
|
|
89
|
+
["docker", "ps", "--filter", "name=claude-reflection-safe-watcher", "--format", "{{.Status}}"],
|
|
90
|
+
capture_output=True,
|
|
91
|
+
text=True,
|
|
92
|
+
timeout=5
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
if result.stdout and "Up" in result.stdout:
|
|
96
|
+
return {
|
|
97
|
+
'status': 'running',
|
|
98
|
+
'details': result.stdout.strip()
|
|
99
|
+
}
|
|
100
|
+
else:
|
|
101
|
+
return {
|
|
102
|
+
'status': 'stopped',
|
|
103
|
+
'details': 'Container not running'
|
|
104
|
+
}
|
|
105
|
+
except Exception as e:
|
|
106
|
+
return {
|
|
107
|
+
'status': 'unknown',
|
|
108
|
+
'error': 'Docker check failed'
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
def check_recent_imports() -> Dict[str, Any]:
|
|
112
|
+
"""Check for recent import activity"""
|
|
113
|
+
try:
|
|
114
|
+
import_file = Path.home() / ".claude-self-reflect" / "config" / "imported-files.json"
|
|
115
|
+
if import_file.exists():
|
|
116
|
+
mtime = datetime.fromtimestamp(import_file.stat().st_mtime)
|
|
117
|
+
age = datetime.now() - mtime
|
|
118
|
+
|
|
119
|
+
return {
|
|
120
|
+
'last_import': mtime.isoformat(),
|
|
121
|
+
'hours_ago': round(age.total_seconds() / 3600, 1),
|
|
122
|
+
'active': age < timedelta(hours=24)
|
|
123
|
+
}
|
|
124
|
+
return {
|
|
125
|
+
'last_import': None,
|
|
126
|
+
'active': False
|
|
127
|
+
}
|
|
128
|
+
except Exception as e:
|
|
129
|
+
return {
|
|
130
|
+
'error': 'Import check failed',
|
|
131
|
+
'active': False
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
def get_health_status() -> Dict[str, Any]:
|
|
135
|
+
"""Get comprehensive health status"""
|
|
136
|
+
|
|
137
|
+
# Collect all health checks
|
|
138
|
+
qdrant = check_qdrant_health()
|
|
139
|
+
imports = check_import_status()
|
|
140
|
+
watcher = check_watcher_status()
|
|
141
|
+
recent = check_recent_imports()
|
|
142
|
+
|
|
143
|
+
# Determine overall health
|
|
144
|
+
is_healthy = (
|
|
145
|
+
qdrant.get('accessible', False) and
|
|
146
|
+
imports.get('percentage', 0) > 95 and
|
|
147
|
+
watcher.get('status') == 'running'
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
return {
|
|
151
|
+
'timestamp': datetime.now().isoformat(),
|
|
152
|
+
'healthy': is_healthy,
|
|
153
|
+
'status': 'healthy' if is_healthy else 'degraded',
|
|
154
|
+
'components': {
|
|
155
|
+
'qdrant': qdrant,
|
|
156
|
+
'imports': imports,
|
|
157
|
+
'watcher': watcher,
|
|
158
|
+
'recent_activity': recent
|
|
159
|
+
},
|
|
160
|
+
'summary': {
|
|
161
|
+
'import_percentage': imports.get('percentage', 0),
|
|
162
|
+
'collections': qdrant.get('collections', 0),
|
|
163
|
+
'watcher_running': watcher.get('status') == 'running',
|
|
164
|
+
'recent_imports': recent.get('active', False)
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
def main():
|
|
169
|
+
"""Main entry point for CLI usage"""
|
|
170
|
+
try:
|
|
171
|
+
health = get_health_status()
|
|
172
|
+
|
|
173
|
+
# Pretty print
|
|
174
|
+
print(json.dumps(health, indent=2))
|
|
175
|
+
|
|
176
|
+
# Exit code based on health
|
|
177
|
+
sys.exit(0 if health['healthy'] else 1)
|
|
178
|
+
|
|
179
|
+
except Exception as e:
|
|
180
|
+
error_response = {
|
|
181
|
+
'timestamp': datetime.now().isoformat(),
|
|
182
|
+
'healthy': False,
|
|
183
|
+
'status': 'error',
|
|
184
|
+
'error': 'Health check failed'
|
|
185
|
+
}
|
|
186
|
+
print(json.dumps(error_response, indent=2))
|
|
187
|
+
sys.exit(2)
|
|
188
|
+
|
|
189
|
+
if __name__ == "__main__":
|
|
190
|
+
main()
|
|
@@ -76,31 +76,64 @@ class ProjectResolver:
|
|
|
76
76
|
return []
|
|
77
77
|
|
|
78
78
|
# Strategy 1: Direct hash of input (handles full paths)
|
|
79
|
-
|
|
79
|
+
# Try both MD5 (used by streaming-watcher) and SHA256 (legacy)
|
|
80
|
+
direct_hash_md5 = hashlib.md5(user_project_name.encode()).hexdigest()[:8]
|
|
81
|
+
direct_hash_sha256 = hashlib.sha256(user_project_name.encode()).hexdigest()[:16]
|
|
82
|
+
|
|
80
83
|
# Match exact hash segment between underscores, not substring
|
|
81
84
|
direct_matches = [c for c in collection_names
|
|
82
|
-
if f"_{
|
|
85
|
+
if f"_{direct_hash_md5}_" in c or c.endswith(f"_{direct_hash_md5}") or
|
|
86
|
+
f"_{direct_hash_sha256}_" in c or c.endswith(f"_{direct_hash_sha256}")]
|
|
83
87
|
matching_collections.update(direct_matches)
|
|
84
88
|
|
|
85
89
|
# Strategy 2: Try normalized version
|
|
86
90
|
normalized = self._normalize_project_name(user_project_name)
|
|
87
91
|
if normalized != user_project_name:
|
|
88
|
-
|
|
92
|
+
norm_hash_md5 = hashlib.md5(normalized.encode()).hexdigest()[:8]
|
|
93
|
+
norm_hash_sha256 = hashlib.sha256(normalized.encode()).hexdigest()[:16]
|
|
94
|
+
|
|
89
95
|
# Match exact hash segment between underscores, not substring
|
|
90
96
|
norm_matches = [c for c in collection_names
|
|
91
|
-
if f"_{
|
|
97
|
+
if f"_{norm_hash_md5}_" in c or c.endswith(f"_{norm_hash_md5}") or
|
|
98
|
+
f"_{norm_hash_sha256}_" in c or c.endswith(f"_{norm_hash_sha256}")]
|
|
92
99
|
matching_collections.update(norm_matches)
|
|
93
100
|
|
|
94
101
|
# Strategy 3: Case-insensitive normalized version
|
|
95
102
|
lower_normalized = normalized.lower()
|
|
96
103
|
if lower_normalized != normalized:
|
|
97
|
-
|
|
104
|
+
lower_hash_md5 = hashlib.md5(lower_normalized.encode()).hexdigest()[:8]
|
|
105
|
+
lower_hash_sha256 = hashlib.sha256(lower_normalized.encode()).hexdigest()[:16]
|
|
106
|
+
|
|
98
107
|
# Match exact hash segment between underscores, not substring
|
|
99
108
|
lower_matches = [c for c in collection_names
|
|
100
|
-
if f"_{
|
|
109
|
+
if f"_{lower_hash_md5}_" in c or c.endswith(f"_{lower_hash_md5}") or
|
|
110
|
+
f"_{lower_hash_sha256}_" in c or c.endswith(f"_{lower_hash_sha256}")]
|
|
101
111
|
matching_collections.update(lower_matches)
|
|
102
112
|
|
|
103
|
-
# Strategy 4:
|
|
113
|
+
# Strategy 4: ALWAYS try mapping project name to full directory path in .claude/projects/
|
|
114
|
+
# This ensures we find all related collections, not just the first match
|
|
115
|
+
# This handles the case where streaming-watcher uses full path but MCP uses short name
|
|
116
|
+
if not user_project_name.startswith('-'):
|
|
117
|
+
# Check if there's a matching directory in .claude/projects/
|
|
118
|
+
projects_dir = Path.home() / ".claude" / "projects"
|
|
119
|
+
if projects_dir.exists():
|
|
120
|
+
for proj_dir in projects_dir.iterdir():
|
|
121
|
+
if proj_dir.is_dir():
|
|
122
|
+
# Check if the directory name contains the project name
|
|
123
|
+
# This handles both "claude-self-reflect" and "-Users-...-projects-claude-self-reflect"
|
|
124
|
+
if (proj_dir.name.endswith(f"-{user_project_name}") or
|
|
125
|
+
f"-{user_project_name}" in proj_dir.name or
|
|
126
|
+
proj_dir.name == user_project_name):
|
|
127
|
+
# Found a matching directory - hash its name
|
|
128
|
+
dir_name = proj_dir.name
|
|
129
|
+
dir_hash_md5 = hashlib.md5(dir_name.encode()).hexdigest()[:8]
|
|
130
|
+
|
|
131
|
+
# Find collections with this hash
|
|
132
|
+
dir_matches = [c for c in collection_names
|
|
133
|
+
if f"_{dir_hash_md5}_" in c or c.endswith(f"_{dir_hash_md5}")]
|
|
134
|
+
matching_collections.update(dir_matches)
|
|
135
|
+
|
|
136
|
+
# Strategy 5: Use segment-based discovery for complex paths
|
|
104
137
|
if not matching_collections:
|
|
105
138
|
# Extract segments from the input
|
|
106
139
|
segments = self._extract_project_segments(user_project_name)
|
|
@@ -111,10 +144,13 @@ class ProjectResolver:
|
|
|
111
144
|
|
|
112
145
|
# Try each candidate
|
|
113
146
|
for candidate in candidates:
|
|
114
|
-
|
|
147
|
+
candidate_hash_md5 = hashlib.md5(candidate.encode()).hexdigest()[:8]
|
|
148
|
+
candidate_hash_sha256 = hashlib.sha256(candidate.encode()).hexdigest()[:16]
|
|
149
|
+
|
|
115
150
|
# Match exact hash segment between underscores, not substring
|
|
116
151
|
candidate_matches = [c for c in collection_names
|
|
117
|
-
if f"_{
|
|
152
|
+
if f"_{candidate_hash_md5}_" in c or c.endswith(f"_{candidate_hash_md5}") or
|
|
153
|
+
f"_{candidate_hash_sha256}_" in c or c.endswith(f"_{candidate_hash_sha256}")]
|
|
118
154
|
matching_collections.update(candidate_matches)
|
|
119
155
|
|
|
120
156
|
# Stop if we found matches
|
package/package.json
CHANGED
|
@@ -30,13 +30,16 @@ logger = logging.getLogger(__name__)
|
|
|
30
30
|
|
|
31
31
|
# Environment variables
|
|
32
32
|
QDRANT_URL = os.getenv("QDRANT_URL", "http://localhost:6333")
|
|
33
|
-
STATE_FILE = os.getenv("STATE_FILE", "/config/imported-files.json")
|
|
33
|
+
STATE_FILE = os.getenv("STATE_FILE", os.path.expanduser("~/.claude-self-reflect/config/imported-files.json"))
|
|
34
34
|
PREFER_LOCAL_EMBEDDINGS = os.getenv("PREFER_LOCAL_EMBEDDINGS", "true").lower() == "true"
|
|
35
35
|
VOYAGE_API_KEY = os.getenv("VOYAGE_KEY")
|
|
36
36
|
MAX_CHUNK_SIZE = int(os.getenv("MAX_CHUNK_SIZE", "50")) # Messages per chunk
|
|
37
37
|
|
|
38
|
-
# Initialize Qdrant client
|
|
39
|
-
client = QdrantClient(
|
|
38
|
+
# Initialize Qdrant client with timeout
|
|
39
|
+
client = QdrantClient(
|
|
40
|
+
url=QDRANT_URL,
|
|
41
|
+
timeout=30 # 30 second timeout for network operations
|
|
42
|
+
)
|
|
40
43
|
|
|
41
44
|
# Initialize embedding provider
|
|
42
45
|
embedding_provider = None
|
|
@@ -138,11 +141,11 @@ def process_and_upload_chunk(messages: List[Dict[str, Any]], chunk_index: int,
|
|
|
138
141
|
payload=payload
|
|
139
142
|
)
|
|
140
143
|
|
|
141
|
-
# Upload immediately
|
|
144
|
+
# Upload immediately (no wait for better throughput)
|
|
142
145
|
client.upsert(
|
|
143
146
|
collection_name=collection_name,
|
|
144
147
|
points=[point],
|
|
145
|
-
wait=
|
|
148
|
+
wait=False # Don't wait for indexing to complete
|
|
146
149
|
)
|
|
147
150
|
|
|
148
151
|
return 1
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
FROM python:3.13-slim
|
|
2
|
-
|
|
3
|
-
WORKDIR /app
|
|
4
|
-
|
|
5
|
-
# Update system packages for security
|
|
6
|
-
RUN apt-get update && apt-get upgrade -y && rm -rf /var/lib/apt/lists/*
|
|
7
|
-
|
|
8
|
-
# Copy the MCP server package files
|
|
9
|
-
COPY mcp-server/pyproject.toml ./
|
|
10
|
-
COPY mcp-server/src ./src
|
|
11
|
-
|
|
12
|
-
# Install the package in development mode
|
|
13
|
-
RUN pip install --no-cache-dir -e .
|
|
14
|
-
|
|
15
|
-
# Create a non-root user
|
|
16
|
-
RUN useradd -m -u 1000 mcpuser
|
|
17
|
-
USER mcpuser
|
|
18
|
-
|
|
19
|
-
# Keep the container running and wait for docker exec commands
|
|
20
|
-
CMD ["tail", "-f", "/dev/null"]
|
package/Dockerfile.watcher.bak
DELETED
|
@@ -1,50 +0,0 @@
|
|
|
1
|
-
FROM python:3.13-slim
|
|
2
|
-
|
|
3
|
-
# Update system packages for security and install build dependencies for psutil
|
|
4
|
-
RUN apt-get update && apt-get upgrade -y && apt-get install -y \
|
|
5
|
-
gcc \
|
|
6
|
-
python3-dev \
|
|
7
|
-
&& rm -rf /var/lib/apt/lists/*
|
|
8
|
-
|
|
9
|
-
# Install Python dependencies
|
|
10
|
-
RUN pip install --no-cache-dir \
|
|
11
|
-
psutil==5.9.5 \
|
|
12
|
-
qdrant-client>=1.7.0 \
|
|
13
|
-
openai>=1.0.0 \
|
|
14
|
-
backoff>=2.2.0 \
|
|
15
|
-
requests>=2.31.0 \
|
|
16
|
-
tqdm>=4.66.0 \
|
|
17
|
-
voyageai>=0.2.0 \
|
|
18
|
-
fastembed>=0.4.0
|
|
19
|
-
|
|
20
|
-
# Create non-root user
|
|
21
|
-
RUN useradd -m -u 1000 watcher
|
|
22
|
-
|
|
23
|
-
# Pre-download FastEmbed model to avoid runtime downloads
|
|
24
|
-
RUN mkdir -p /home/watcher/.cache && \
|
|
25
|
-
FASTEMBED_CACHE_PATH=/home/watcher/.cache/fastembed python -c "from fastembed import TextEmbedding; import os; os.environ['FASTEMBED_CACHE_PATH']='/home/watcher/.cache/fastembed'; TextEmbedding('sentence-transformers/all-MiniLM-L6-v2')" && \
|
|
26
|
-
chown -R watcher:watcher /home/watcher/.cache
|
|
27
|
-
|
|
28
|
-
# Create scripts directory and copy required files
|
|
29
|
-
RUN mkdir -p /scripts
|
|
30
|
-
|
|
31
|
-
# Copy all necessary scripts
|
|
32
|
-
COPY scripts/import-conversations-unified.py /scripts/
|
|
33
|
-
COPY scripts/import-watcher.py /scripts/
|
|
34
|
-
COPY scripts/streaming-importer.py /scripts/
|
|
35
|
-
COPY scripts/utils.py /scripts/
|
|
36
|
-
COPY scripts/trigger-import.py /scripts/
|
|
37
|
-
|
|
38
|
-
# Copy MCP server directory for utils
|
|
39
|
-
COPY mcp-server/src/utils.py /mcp-server/src/utils.py
|
|
40
|
-
|
|
41
|
-
RUN chmod +x /scripts/*.py
|
|
42
|
-
|
|
43
|
-
# Set working directory
|
|
44
|
-
WORKDIR /app
|
|
45
|
-
|
|
46
|
-
# Switch to non-root user
|
|
47
|
-
USER watcher
|
|
48
|
-
|
|
49
|
-
# Default command - use streaming importer for low memory usage
|
|
50
|
-
CMD ["python", "/scripts/streaming-importer.py"]
|
package/scripts/import-latest.py
DELETED
|
@@ -1,124 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Quick import script for current project's latest conversations.
|
|
4
|
-
Designed for PreCompact hook integration - targets <10 second imports.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import os
|
|
8
|
-
import sys
|
|
9
|
-
import json
|
|
10
|
-
import subprocess
|
|
11
|
-
from datetime import datetime, timedelta
|
|
12
|
-
from pathlib import Path
|
|
13
|
-
import logging
|
|
14
|
-
|
|
15
|
-
# Configuration
|
|
16
|
-
LOGS_DIR = os.getenv("LOGS_DIR", os.path.expanduser("~/.claude/projects"))
|
|
17
|
-
STATE_FILE = os.getenv("STATE_FILE", os.path.expanduser("~/.claude-self-reflect-state.json"))
|
|
18
|
-
HOURS_BACK = int(os.getenv("IMPORT_HOURS_BACK", "2")) # Only import last 2 hours by default
|
|
19
|
-
|
|
20
|
-
# Set up logging
|
|
21
|
-
logging.basicConfig(
|
|
22
|
-
level=logging.INFO,
|
|
23
|
-
format='%(asctime)s - %(levelname)s - %(message)s'
|
|
24
|
-
)
|
|
25
|
-
logger = logging.getLogger(__name__)
|
|
26
|
-
|
|
27
|
-
def load_state():
|
|
28
|
-
"""Load import state from file."""
|
|
29
|
-
if os.path.exists(STATE_FILE):
|
|
30
|
-
try:
|
|
31
|
-
with open(STATE_FILE, 'r') as f:
|
|
32
|
-
return json.load(f)
|
|
33
|
-
except:
|
|
34
|
-
return {}
|
|
35
|
-
return {}
|
|
36
|
-
|
|
37
|
-
def save_state(state):
|
|
38
|
-
"""Save import state to file."""
|
|
39
|
-
os.makedirs(os.path.dirname(STATE_FILE), exist_ok=True)
|
|
40
|
-
with open(STATE_FILE, 'w') as f:
|
|
41
|
-
json.dump(state, f, indent=2)
|
|
42
|
-
|
|
43
|
-
def get_project_from_cwd():
|
|
44
|
-
"""Detect project from current working directory."""
|
|
45
|
-
cwd = os.getcwd()
|
|
46
|
-
# Convert path to project name format used in logs
|
|
47
|
-
# Claude logs use format: -Users-username-path-to-project
|
|
48
|
-
project_name = cwd.replace('/', '-')
|
|
49
|
-
# Keep the leading dash as that's how Claude stores it
|
|
50
|
-
if not project_name.startswith('-'):
|
|
51
|
-
project_name = '-' + project_name
|
|
52
|
-
return project_name
|
|
53
|
-
|
|
54
|
-
def get_recent_files(project_path: Path, hours_back: int):
|
|
55
|
-
"""Get JSONL files modified in the last N hours."""
|
|
56
|
-
cutoff_time = datetime.now() - timedelta(hours=hours_back)
|
|
57
|
-
recent_files = []
|
|
58
|
-
|
|
59
|
-
for jsonl_file in project_path.glob("*.jsonl"):
|
|
60
|
-
mtime = datetime.fromtimestamp(jsonl_file.stat().st_mtime)
|
|
61
|
-
if mtime > cutoff_time:
|
|
62
|
-
recent_files.append(jsonl_file)
|
|
63
|
-
|
|
64
|
-
return sorted(recent_files, key=lambda f: f.stat().st_mtime, reverse=True)
|
|
65
|
-
|
|
66
|
-
def main():
|
|
67
|
-
"""Main quick import function."""
|
|
68
|
-
start_time = datetime.now()
|
|
69
|
-
|
|
70
|
-
# Detect current project
|
|
71
|
-
project_name = get_project_from_cwd()
|
|
72
|
-
project_path = Path(LOGS_DIR) / project_name
|
|
73
|
-
|
|
74
|
-
if not project_path.exists():
|
|
75
|
-
logger.warning(f"Project logs not found: {project_path}")
|
|
76
|
-
logger.info("Make sure you're in a project directory with Claude conversations.")
|
|
77
|
-
return
|
|
78
|
-
|
|
79
|
-
logger.info(f"Quick importing latest conversations for: {project_name}")
|
|
80
|
-
|
|
81
|
-
# Get recent files
|
|
82
|
-
recent_files = get_recent_files(project_path, HOURS_BACK)
|
|
83
|
-
logger.info(f"Found {len(recent_files)} files modified in last {HOURS_BACK} hours")
|
|
84
|
-
|
|
85
|
-
if not recent_files:
|
|
86
|
-
logger.info("No recent conversations to import")
|
|
87
|
-
return
|
|
88
|
-
|
|
89
|
-
# For now, just call the unified importer with the specific project
|
|
90
|
-
# This is a temporary solution until we implement incremental imports
|
|
91
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
92
|
-
unified_script = os.path.join(script_dir, "import-conversations-unified.py")
|
|
93
|
-
|
|
94
|
-
# Set environment to only process this project
|
|
95
|
-
env = os.environ.copy()
|
|
96
|
-
env['LOGS_DIR'] = str(project_path.parent)
|
|
97
|
-
env['IMPORT_PROJECT'] = project_name
|
|
98
|
-
|
|
99
|
-
try:
|
|
100
|
-
# Run the unified importer for just this project
|
|
101
|
-
result = subprocess.run(
|
|
102
|
-
[sys.executable, unified_script],
|
|
103
|
-
env=env,
|
|
104
|
-
capture_output=True,
|
|
105
|
-
text=True,
|
|
106
|
-
timeout=60 # 60 second timeout
|
|
107
|
-
)
|
|
108
|
-
|
|
109
|
-
if result.returncode == 0:
|
|
110
|
-
logger.info("Quick import completed successfully")
|
|
111
|
-
else:
|
|
112
|
-
logger.error(f"Import failed: {result.stderr}")
|
|
113
|
-
|
|
114
|
-
except subprocess.TimeoutExpired:
|
|
115
|
-
logger.warning("Import timed out after 60 seconds")
|
|
116
|
-
except Exception as e:
|
|
117
|
-
logger.error(f"Error during import: {e}")
|
|
118
|
-
|
|
119
|
-
# Report timing
|
|
120
|
-
elapsed = (datetime.now() - start_time).total_seconds()
|
|
121
|
-
logger.info(f"Quick import completed in {elapsed:.1f} seconds")
|
|
122
|
-
|
|
123
|
-
if __name__ == "__main__":
|
|
124
|
-
main()
|
|
@@ -1,171 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Import old format JSONL files from Claude conversations.
|
|
4
|
-
These files have a different structure with type/summary fields instead of messages.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import json
|
|
8
|
-
import sys
|
|
9
|
-
from pathlib import Path
|
|
10
|
-
import hashlib
|
|
11
|
-
import uuid
|
|
12
|
-
from datetime import datetime
|
|
13
|
-
from qdrant_client import QdrantClient
|
|
14
|
-
from qdrant_client.models import Distance, VectorParams, PointStruct
|
|
15
|
-
from fastembed import TextEmbedding
|
|
16
|
-
import logging
|
|
17
|
-
|
|
18
|
-
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
19
|
-
logger = logging.getLogger(__name__)
|
|
20
|
-
|
|
21
|
-
def import_old_format_project(project_dir: Path, project_path: str = None):
|
|
22
|
-
"""Import old format JSONL files from a project directory."""
|
|
23
|
-
|
|
24
|
-
# Initialize
|
|
25
|
-
client = QdrantClient(url='http://localhost:6333')
|
|
26
|
-
model = TextEmbedding(model_name='sentence-transformers/all-MiniLM-L6-v2', max_length=512)
|
|
27
|
-
|
|
28
|
-
# Determine project path from directory name if not provided
|
|
29
|
-
if not project_path:
|
|
30
|
-
# Convert -Users-username-projects-projectname back to path
|
|
31
|
-
dir_name = project_dir.name
|
|
32
|
-
project_path = '/' + dir_name.strip('-').replace('-', '/')
|
|
33
|
-
|
|
34
|
-
# Create collection name
|
|
35
|
-
project_hash = hashlib.md5(project_path.encode()).hexdigest()[:8]
|
|
36
|
-
collection_name = f'conv_{project_hash}_local'
|
|
37
|
-
|
|
38
|
-
logger.info(f'Project: {project_path}')
|
|
39
|
-
logger.info(f'Collection: {collection_name}')
|
|
40
|
-
|
|
41
|
-
# Create collection if needed
|
|
42
|
-
try:
|
|
43
|
-
client.get_collection(collection_name)
|
|
44
|
-
logger.info('Collection exists')
|
|
45
|
-
except:
|
|
46
|
-
client.create_collection(
|
|
47
|
-
collection_name=collection_name,
|
|
48
|
-
vectors_config=VectorParams(size=384, distance=Distance.COSINE)
|
|
49
|
-
)
|
|
50
|
-
logger.info('Created collection')
|
|
51
|
-
|
|
52
|
-
# Process all JSONL files
|
|
53
|
-
jsonl_files = list(project_dir.glob('*.jsonl'))
|
|
54
|
-
logger.info(f'Found {len(jsonl_files)} files to import')
|
|
55
|
-
|
|
56
|
-
total_points = 0
|
|
57
|
-
for file_path in jsonl_files:
|
|
58
|
-
logger.info(f'Processing {file_path.name}...')
|
|
59
|
-
points_batch = []
|
|
60
|
-
|
|
61
|
-
with open(file_path, 'r', encoding='utf-8') as f:
|
|
62
|
-
conversation_text = []
|
|
63
|
-
file_timestamp = file_path.stat().st_mtime
|
|
64
|
-
|
|
65
|
-
for line_num, line in enumerate(f, 1):
|
|
66
|
-
try:
|
|
67
|
-
data = json.loads(line)
|
|
68
|
-
msg_type = data.get('type', '')
|
|
69
|
-
|
|
70
|
-
# Extract text content based on type
|
|
71
|
-
content = None
|
|
72
|
-
if msg_type == 'summary' and data.get('summary'):
|
|
73
|
-
content = f"[Conversation Summary] {data['summary']}"
|
|
74
|
-
elif msg_type == 'user' and data.get('summary'):
|
|
75
|
-
content = f"User: {data['summary']}"
|
|
76
|
-
elif msg_type == 'assistant' and data.get('summary'):
|
|
77
|
-
content = f"Assistant: {data['summary']}"
|
|
78
|
-
elif msg_type in ['user', 'assistant']:
|
|
79
|
-
# Try to get content from other fields
|
|
80
|
-
if 'content' in data:
|
|
81
|
-
content = f"{msg_type.title()}: {data['content']}"
|
|
82
|
-
elif 'text' in data:
|
|
83
|
-
content = f"{msg_type.title()}: {data['text']}"
|
|
84
|
-
|
|
85
|
-
if content:
|
|
86
|
-
conversation_text.append(content)
|
|
87
|
-
|
|
88
|
-
# Create chunks every 5 messages or at end
|
|
89
|
-
if len(conversation_text) >= 5:
|
|
90
|
-
chunk_text = '\n\n'.join(conversation_text)
|
|
91
|
-
if chunk_text.strip():
|
|
92
|
-
# Generate embedding
|
|
93
|
-
embedding = list(model.embed([chunk_text[:2000]]))[0] # Limit to 2000 chars
|
|
94
|
-
|
|
95
|
-
point = PointStruct(
|
|
96
|
-
id=str(uuid.uuid4()),
|
|
97
|
-
vector=embedding.tolist(),
|
|
98
|
-
payload={
|
|
99
|
-
'content': chunk_text[:1000], # Store first 1000 chars
|
|
100
|
-
'full_content': chunk_text[:4000], # Store more for context
|
|
101
|
-
'project_path': project_path,
|
|
102
|
-
'file_path': str(file_path),
|
|
103
|
-
'file_name': file_path.name,
|
|
104
|
-
'conversation_id': file_path.stem,
|
|
105
|
-
'chunk_index': len(points_batch),
|
|
106
|
-
'timestamp': file_timestamp,
|
|
107
|
-
'type': 'conversation_chunk'
|
|
108
|
-
}
|
|
109
|
-
)
|
|
110
|
-
points_batch.append(point)
|
|
111
|
-
conversation_text = []
|
|
112
|
-
|
|
113
|
-
except json.JSONDecodeError:
|
|
114
|
-
logger.warning(f'Invalid JSON at line {line_num} in {file_path.name}')
|
|
115
|
-
except Exception as e:
|
|
116
|
-
logger.warning(f'Error processing line {line_num}: {e}')
|
|
117
|
-
|
|
118
|
-
# Handle remaining text
|
|
119
|
-
if conversation_text:
|
|
120
|
-
chunk_text = '\n\n'.join(conversation_text)
|
|
121
|
-
if chunk_text.strip():
|
|
122
|
-
embedding = list(model.embed([chunk_text[:2000]]))[0]
|
|
123
|
-
|
|
124
|
-
point = PointStruct(
|
|
125
|
-
id=str(uuid.uuid4()),
|
|
126
|
-
vector=embedding.tolist(),
|
|
127
|
-
payload={
|
|
128
|
-
'content': chunk_text[:1000],
|
|
129
|
-
'full_content': chunk_text[:4000],
|
|
130
|
-
'project_path': project_path,
|
|
131
|
-
'file_path': str(file_path),
|
|
132
|
-
'file_name': file_path.name,
|
|
133
|
-
'conversation_id': file_path.stem,
|
|
134
|
-
'chunk_index': len(points_batch),
|
|
135
|
-
'timestamp': file_timestamp,
|
|
136
|
-
'type': 'conversation_chunk'
|
|
137
|
-
}
|
|
138
|
-
)
|
|
139
|
-
points_batch.append(point)
|
|
140
|
-
|
|
141
|
-
# Upload batch
|
|
142
|
-
if points_batch:
|
|
143
|
-
client.upsert(collection_name=collection_name, points=points_batch)
|
|
144
|
-
logger.info(f' Uploaded {len(points_batch)} chunks from {file_path.name}')
|
|
145
|
-
total_points += len(points_batch)
|
|
146
|
-
|
|
147
|
-
# Verify
|
|
148
|
-
info = client.get_collection(collection_name)
|
|
149
|
-
logger.info(f'\nImport complete!')
|
|
150
|
-
logger.info(f'Collection {collection_name} now has {info.points_count} points')
|
|
151
|
-
logger.info(f'Added {total_points} new points in this import')
|
|
152
|
-
|
|
153
|
-
return collection_name, total_points
|
|
154
|
-
|
|
155
|
-
def main():
|
|
156
|
-
if len(sys.argv) < 2:
|
|
157
|
-
print("Usage: python import-old-format.py <project-directory> [project-path]")
|
|
158
|
-
print("Example: python import-old-format.py ~/.claude/projects/-Users-me-projects-myapp /Users/me/projects/myapp")
|
|
159
|
-
sys.exit(1)
|
|
160
|
-
|
|
161
|
-
project_dir = Path(sys.argv[1]).expanduser()
|
|
162
|
-
project_path = sys.argv[2] if len(sys.argv) > 2 else None
|
|
163
|
-
|
|
164
|
-
if not project_dir.exists():
|
|
165
|
-
print(f"Error: Directory {project_dir} does not exist")
|
|
166
|
-
sys.exit(1)
|
|
167
|
-
|
|
168
|
-
import_old_format_project(project_dir, project_path)
|
|
169
|
-
|
|
170
|
-
if __name__ == "__main__":
|
|
171
|
-
main()
|