claude-memory-agent 2.1.0 → 2.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/cli.js +11 -1
- package/bin/lib/banner.js +39 -0
- package/bin/lib/environment.js +166 -0
- package/bin/lib/installer.js +291 -0
- package/bin/lib/models.js +95 -0
- package/bin/lib/steps/advanced.js +101 -0
- package/bin/lib/steps/confirm.js +87 -0
- package/bin/lib/steps/model.js +57 -0
- package/bin/lib/steps/provider.js +65 -0
- package/bin/lib/steps/scope.js +59 -0
- package/bin/lib/steps/server.js +74 -0
- package/bin/lib/ui.js +75 -0
- package/bin/onboarding.js +164 -0
- package/bin/postinstall.js +22 -257
- package/config.py +103 -4
- package/dashboard.html +697 -27
- package/hooks/extract_memories.py +439 -0
- package/hooks/pre_compact_hook.py +76 -0
- package/hooks/session_end_hook.py +149 -0
- package/hooks/stop_hook.py +372 -0
- package/install.py +91 -37
- package/main.py +1636 -892
- package/mcp_server.py +451 -0
- package/package.json +14 -3
- package/requirements.txt +12 -8
- package/services/adaptive_ranker.py +272 -0
- package/services/agent_catalog.json +153 -0
- package/services/agent_registry.py +245 -730
- package/services/claude_md_sync.py +320 -4
- package/services/consolidation.py +417 -0
- package/services/database.py +586 -105
- package/services/embedding_pipeline.py +262 -0
- package/services/embeddings.py +493 -85
- package/services/memory_decay.py +408 -0
- package/services/native_memory_paths.py +86 -0
- package/services/native_memory_sync.py +496 -0
- package/services/response_manager.py +183 -0
- package/services/terminal_ui.py +199 -0
- package/services/tier_manager.py +235 -0
- package/services/websocket.py +26 -6
- package/skills/search.py +136 -61
- package/skills/session_review.py +210 -23
- package/skills/store.py +125 -18
- package/terminal_dashboard.py +474 -0
- package/hooks/__pycache__/auto-detect-response.cpython-312.pyc +0 -0
- package/hooks/__pycache__/auto_capture.cpython-312.pyc +0 -0
- package/hooks/__pycache__/grounding-hook.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_start.cpython-312.pyc +0 -0
- package/services/__pycache__/__init__.cpython-312.pyc +0 -0
- package/services/__pycache__/agent_registry.cpython-312.pyc +0 -0
- package/services/__pycache__/auth.cpython-312.pyc +0 -0
- package/services/__pycache__/auto_inject.cpython-312.pyc +0 -0
- package/services/__pycache__/claude_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/services/__pycache__/compaction_flush.cpython-312.pyc +0 -0
- package/services/__pycache__/confidence.cpython-312.pyc +0 -0
- package/services/__pycache__/curator.cpython-312.pyc +0 -0
- package/services/__pycache__/daily_log.cpython-312.pyc +0 -0
- package/services/__pycache__/database.cpython-312.pyc +0 -0
- package/services/__pycache__/embeddings.cpython-312.pyc +0 -0
- package/services/__pycache__/insights.cpython-312.pyc +0 -0
- package/services/__pycache__/llm_analyzer.cpython-312.pyc +0 -0
- package/services/__pycache__/memory_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/retry_queue.cpython-312.pyc +0 -0
- package/services/__pycache__/timeline.cpython-312.pyc +0 -0
- package/services/__pycache__/vector_index.cpython-312.pyc +0 -0
- package/services/__pycache__/websocket.cpython-312.pyc +0 -0
- package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
- package/skills/__pycache__/admin.cpython-312.pyc +0 -0
- package/skills/__pycache__/checkpoint.cpython-312.pyc +0 -0
- package/skills/__pycache__/claude_md.cpython-312.pyc +0 -0
- package/skills/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/skills/__pycache__/confidence_tracker.cpython-312.pyc +0 -0
- package/skills/__pycache__/context.cpython-312.pyc +0 -0
- package/skills/__pycache__/curator.cpython-312.pyc +0 -0
- package/skills/__pycache__/grounding.cpython-312.pyc +0 -0
- package/skills/__pycache__/insights.cpython-312.pyc +0 -0
- package/skills/__pycache__/natural_language.cpython-312.pyc +0 -0
- package/skills/__pycache__/retrieve.cpython-312.pyc +0 -0
- package/skills/__pycache__/search.cpython-312.pyc +0 -0
- package/skills/__pycache__/session_review.cpython-312.pyc +0 -0
- package/skills/__pycache__/state.cpython-312.pyc +0 -0
- package/skills/__pycache__/store.cpython-312.pyc +0 -0
- package/skills/__pycache__/summarize.cpython-312.pyc +0 -0
- package/skills/__pycache__/timeline.cpython-312.pyc +0 -0
- package/skills/__pycache__/verification.cpython-312.pyc +0 -0
- package/test_automation.py +0 -221
- package/test_complete.py +0 -338
- package/test_full.py +0 -322
- package/verify_db.py +0 -134
|
@@ -0,0 +1,372 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Stop hook for Claude Code.
|
|
4
|
+
|
|
5
|
+
Fires after every Claude response. Unlike PreCompact/SessionEnd hooks which
|
|
6
|
+
scan the full transcript, this hook analyzes ONLY the latest assistant
|
|
7
|
+
response for high-signal content worth persisting immediately.
|
|
8
|
+
|
|
9
|
+
Design constraints:
|
|
10
|
+
- Runs after EVERY response -- must complete in < 2 seconds
|
|
11
|
+
- Extracts at most 2 memories per invocation
|
|
12
|
+
- Focuses only on explicit, high-confidence signals (decisions, error
|
|
13
|
+
resolutions, architecture notes)
|
|
14
|
+
- Shares the cursor dedup hash list with extract_memories.py so the
|
|
15
|
+
heavier hooks don't re-extract the same content
|
|
16
|
+
- Uses stdlib only (no pip dependencies)
|
|
17
|
+
- Always exits 0 -- never blocks the user
|
|
18
|
+
|
|
19
|
+
Stdin JSON schema (provided by Claude Code):
|
|
20
|
+
{
|
|
21
|
+
"session_id": "...",
|
|
22
|
+
"transcript_path": "...",
|
|
23
|
+
"hook_event_name": "Stop",
|
|
24
|
+
"cwd": "...",
|
|
25
|
+
"stop_hook_active": true,
|
|
26
|
+
... (assistant's last response in transcript)
|
|
27
|
+
}
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
import os
|
|
31
|
+
import sys
|
|
32
|
+
import json
|
|
33
|
+
import re
|
|
34
|
+
import time
|
|
35
|
+
import hashlib
|
|
36
|
+
from pathlib import Path
|
|
37
|
+
from typing import Dict, Any, List, Optional
|
|
38
|
+
|
|
39
|
+
# ---------------------------------------------------------------------------
|
|
40
|
+
# Configuration
|
|
41
|
+
# ---------------------------------------------------------------------------
|
|
42
|
+
|
|
43
|
+
MEMORY_AGENT_URL = os.getenv("MEMORY_AGENT_URL", "http://localhost:8102")
|
|
44
|
+
API_KEY = os.getenv("MEMORY_API_KEY", "")
|
|
45
|
+
CURSOR_DIR = Path.home() / ".claude"
|
|
46
|
+
CURSOR_FILE = CURSOR_DIR / "memory-agent-cursor.json"
|
|
47
|
+
MAX_MEMORIES_PER_STOP = 2 # Hard cap -- stay fast
|
|
48
|
+
MAX_CONTENT_LENGTH = 500 # Truncate for storage
|
|
49
|
+
API_TIMEOUT_SECONDS = 1.5 # Tight timeout for API calls
|
|
50
|
+
TOTAL_TIME_BUDGET = 2.0 # Total wall-clock budget
|
|
51
|
+
|
|
52
|
+
# ---------------------------------------------------------------------------
|
|
53
|
+
# High-signal extraction patterns (intentionally narrow)
|
|
54
|
+
#
|
|
55
|
+
# These are stricter than the ones in extract_memories.py because the Stop
|
|
56
|
+
# hook runs on every response and must avoid false positives. The heavier
|
|
57
|
+
# PreCompact/SessionEnd hooks catch the rest.
|
|
58
|
+
# ---------------------------------------------------------------------------
|
|
59
|
+
|
|
60
|
+
# Explicit decisions -- strong first-person phrasing
|
|
61
|
+
DECISION_PATTERNS = [
|
|
62
|
+
re.compile(
|
|
63
|
+
r"(?:^|\n)\s*(?:I decided to|I've decided to|Let's go with|The approach will be|"
|
|
64
|
+
r"We(?:'ll| will) go with|The decision is to) (.{20,}?)(?:\.|$)",
|
|
65
|
+
re.IGNORECASE | re.MULTILINE,
|
|
66
|
+
),
|
|
67
|
+
]
|
|
68
|
+
|
|
69
|
+
# Error resolutions -- explicit fix language
|
|
70
|
+
ERROR_RESOLUTION_PATTERNS = [
|
|
71
|
+
re.compile(
|
|
72
|
+
r"(?:^|\n)\s*(?:The fix is|The fix was|Root cause was|Root cause:|"
|
|
73
|
+
r"This was caused by|The bug was|The issue was|Resolution:) (.{20,}?)(?:\.|$)",
|
|
74
|
+
re.IGNORECASE | re.MULTILINE,
|
|
75
|
+
),
|
|
76
|
+
]
|
|
77
|
+
|
|
78
|
+
# Architecture / convention notes
|
|
79
|
+
ARCHITECTURE_PATTERNS = [
|
|
80
|
+
re.compile(
|
|
81
|
+
r"(?:^|\n)\s*(?:The architecture|This pattern|Convention:|"
|
|
82
|
+
r"The convention is|Key pattern:|Architecture note:) (.{20,}?)(?:\.|$)",
|
|
83
|
+
re.IGNORECASE | re.MULTILINE,
|
|
84
|
+
),
|
|
85
|
+
]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
# ---------------------------------------------------------------------------
|
|
89
|
+
# Cursor interaction (reuses same file as extract_memories.py)
|
|
90
|
+
# ---------------------------------------------------------------------------
|
|
91
|
+
|
|
92
|
+
def _load_cursor_hashes(session_id: str) -> set:
|
|
93
|
+
"""Load the set of already-extracted content hashes for this session."""
|
|
94
|
+
try:
|
|
95
|
+
if CURSOR_FILE.exists():
|
|
96
|
+
data = json.loads(CURSOR_FILE.read_text(encoding="utf-8"))
|
|
97
|
+
session = data.get(session_id, {})
|
|
98
|
+
return set(session.get("extracted_hashes", []))
|
|
99
|
+
except (json.JSONDecodeError, OSError):
|
|
100
|
+
pass
|
|
101
|
+
return set()
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def _save_cursor_hashes(session_id: str, new_hashes: List[str]):
|
|
105
|
+
"""Append new hashes to the session's cursor entry."""
|
|
106
|
+
try:
|
|
107
|
+
CURSOR_DIR.mkdir(parents=True, exist_ok=True)
|
|
108
|
+
data = {}
|
|
109
|
+
if CURSOR_FILE.exists():
|
|
110
|
+
try:
|
|
111
|
+
data = json.loads(CURSOR_FILE.read_text(encoding="utf-8"))
|
|
112
|
+
except (json.JSONDecodeError, OSError):
|
|
113
|
+
data = {}
|
|
114
|
+
|
|
115
|
+
session = data.get(session_id, {"byte_offset": 0, "extracted_hashes": []})
|
|
116
|
+
existing = set(session.get("extracted_hashes", []))
|
|
117
|
+
merged = list(existing | set(new_hashes))
|
|
118
|
+
# Cap to prevent unbounded growth
|
|
119
|
+
if len(merged) > 200:
|
|
120
|
+
merged = merged[-200:]
|
|
121
|
+
session["extracted_hashes"] = merged
|
|
122
|
+
data[session_id] = session
|
|
123
|
+
|
|
124
|
+
CURSOR_FILE.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
|
125
|
+
except OSError:
|
|
126
|
+
pass # Fail silently
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _content_hash(text: str) -> str:
|
|
130
|
+
"""Short MD5 prefix for dedup -- matches extract_memories.content_hash."""
|
|
131
|
+
return hashlib.md5(text.strip().lower().encode("utf-8")).hexdigest()[:12]
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
# ---------------------------------------------------------------------------
|
|
135
|
+
# Response extraction
|
|
136
|
+
# ---------------------------------------------------------------------------
|
|
137
|
+
|
|
138
|
+
def _get_latest_response(transcript_path: str) -> str:
|
|
139
|
+
"""
|
|
140
|
+
Read the transcript file and return only the last assistant response.
|
|
141
|
+
|
|
142
|
+
Claude Code transcripts are JSONL where each line is a message object.
|
|
143
|
+
We read the file from the end backwards to find the last assistant turn.
|
|
144
|
+
For speed we only read the trailing portion of the file (last 32 KB max).
|
|
145
|
+
"""
|
|
146
|
+
path = Path(transcript_path)
|
|
147
|
+
if not path.exists():
|
|
148
|
+
return ""
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
file_size = path.stat().st_size
|
|
152
|
+
if file_size == 0:
|
|
153
|
+
return ""
|
|
154
|
+
|
|
155
|
+
# Read at most the last 32 KB -- the latest response should be there
|
|
156
|
+
read_start = max(0, file_size - 32768)
|
|
157
|
+
with open(path, "r", encoding="utf-8", errors="replace") as f:
|
|
158
|
+
if read_start > 0:
|
|
159
|
+
f.seek(read_start)
|
|
160
|
+
# Skip partial line
|
|
161
|
+
f.readline()
|
|
162
|
+
tail = f.read()
|
|
163
|
+
|
|
164
|
+
if not tail.strip():
|
|
165
|
+
return ""
|
|
166
|
+
|
|
167
|
+
# Walk lines in reverse to find last assistant message
|
|
168
|
+
lines = tail.strip().split('\n')
|
|
169
|
+
for line in reversed(lines):
|
|
170
|
+
line = line.strip()
|
|
171
|
+
if not line:
|
|
172
|
+
continue
|
|
173
|
+
try:
|
|
174
|
+
msg = json.loads(line)
|
|
175
|
+
# Claude Code JSONL format: {"role": "assistant", "content": ...}
|
|
176
|
+
if msg.get("role") == "assistant":
|
|
177
|
+
content = msg.get("content", "")
|
|
178
|
+
if isinstance(content, list):
|
|
179
|
+
# Multi-part content (text blocks)
|
|
180
|
+
parts = []
|
|
181
|
+
for part in content:
|
|
182
|
+
if isinstance(part, dict) and part.get("type") == "text":
|
|
183
|
+
parts.append(part.get("text", ""))
|
|
184
|
+
elif isinstance(part, str):
|
|
185
|
+
parts.append(part)
|
|
186
|
+
return "\n".join(parts)
|
|
187
|
+
elif isinstance(content, str):
|
|
188
|
+
return content
|
|
189
|
+
except (json.JSONDecodeError, TypeError):
|
|
190
|
+
continue
|
|
191
|
+
|
|
192
|
+
# Fallback: if JSONL parsing fails, return last chunk of raw text
|
|
193
|
+
# (transcript might be plain text rather than JSONL)
|
|
194
|
+
return tail[-8192:] if len(tail) > 8192 else tail
|
|
195
|
+
|
|
196
|
+
except OSError:
|
|
197
|
+
return ""
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def _extract_high_signal(text: str, existing_hashes: set) -> List[Dict[str, Any]]:
|
|
201
|
+
"""
|
|
202
|
+
Scan text for high-signal patterns. Returns at most MAX_MEMORIES_PER_STOP items.
|
|
203
|
+
"""
|
|
204
|
+
extractions: List[Dict[str, Any]] = []
|
|
205
|
+
seen = set(existing_hashes)
|
|
206
|
+
|
|
207
|
+
def _try_add(content: str, mem_type: str, importance: int, tags: List[str]):
|
|
208
|
+
if len(extractions) >= MAX_MEMORIES_PER_STOP:
|
|
209
|
+
return
|
|
210
|
+
h = _content_hash(content)
|
|
211
|
+
if h in seen:
|
|
212
|
+
return
|
|
213
|
+
seen.add(h)
|
|
214
|
+
if len(content) > MAX_CONTENT_LENGTH:
|
|
215
|
+
content = content[:MAX_CONTENT_LENGTH] + "..."
|
|
216
|
+
extractions.append({
|
|
217
|
+
"content": content,
|
|
218
|
+
"type": mem_type,
|
|
219
|
+
"importance": importance,
|
|
220
|
+
"tags": tags + ["auto-extracted", "stop-hook"],
|
|
221
|
+
"hash": h,
|
|
222
|
+
})
|
|
223
|
+
|
|
224
|
+
def _context_around(match_obj, source_text: str, chars: int = 200) -> str:
|
|
225
|
+
"""Grab surrounding context aligned to line boundaries."""
|
|
226
|
+
start = max(0, match_obj.start() - chars)
|
|
227
|
+
end = min(len(source_text), match_obj.end() + chars)
|
|
228
|
+
while start > 0 and source_text[start] != '\n':
|
|
229
|
+
start -= 1
|
|
230
|
+
while end < len(source_text) and source_text[end] != '\n':
|
|
231
|
+
end += 1
|
|
232
|
+
return source_text[start:end].strip()
|
|
233
|
+
|
|
234
|
+
# --- Decisions (importance 7 -- higher than extract_memories' 6 because
|
|
235
|
+
# these patterns are narrower / higher confidence) ---
|
|
236
|
+
for pat in DECISION_PATTERNS:
|
|
237
|
+
for m in pat.finditer(text):
|
|
238
|
+
ctx = _context_around(m, text)
|
|
239
|
+
if len(ctx) > 30:
|
|
240
|
+
_try_add(ctx, "decision", 7, ["decision"])
|
|
241
|
+
|
|
242
|
+
# --- Error resolutions (importance 7) ---
|
|
243
|
+
for pat in ERROR_RESOLUTION_PATTERNS:
|
|
244
|
+
for m in pat.finditer(text):
|
|
245
|
+
ctx = _context_around(m, text)
|
|
246
|
+
if len(ctx) > 30:
|
|
247
|
+
_try_add(ctx, "error", 7, ["error", "resolution"])
|
|
248
|
+
|
|
249
|
+
# --- Architecture notes (importance 6) ---
|
|
250
|
+
for pat in ARCHITECTURE_PATTERNS:
|
|
251
|
+
for m in pat.finditer(text):
|
|
252
|
+
ctx = _context_around(m, text)
|
|
253
|
+
if len(ctx) > 30:
|
|
254
|
+
_try_add(ctx, "decision", 6, ["architecture", "pattern"])
|
|
255
|
+
|
|
256
|
+
return extractions
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
# ---------------------------------------------------------------------------
|
|
260
|
+
# API call (mirrors extract_memories.store_memory_sync, tighter timeout)
|
|
261
|
+
# ---------------------------------------------------------------------------
|
|
262
|
+
|
|
263
|
+
def _store_memory(extraction: Dict[str, Any], project_path: Optional[str] = None) -> bool:
|
|
264
|
+
"""Store a single memory via the memory agent A2A endpoint."""
|
|
265
|
+
import urllib.request
|
|
266
|
+
import urllib.error
|
|
267
|
+
|
|
268
|
+
payload = {
|
|
269
|
+
"jsonrpc": "2.0",
|
|
270
|
+
"method": "tasks/send",
|
|
271
|
+
"params": {
|
|
272
|
+
"message": {"parts": [{"type": "text", "text": ""}]},
|
|
273
|
+
"metadata": {
|
|
274
|
+
"skill_id": "store_memory",
|
|
275
|
+
"params": {
|
|
276
|
+
"content": extraction["content"],
|
|
277
|
+
"type": extraction["type"],
|
|
278
|
+
"importance": extraction["importance"],
|
|
279
|
+
"tags": extraction["tags"],
|
|
280
|
+
"project_path": project_path,
|
|
281
|
+
"agent_type": "stop-hook",
|
|
282
|
+
"outcome_status": "pending",
|
|
283
|
+
"confidence": 0.45, # Slightly above auto-extracted (0.4)
|
|
284
|
+
},
|
|
285
|
+
},
|
|
286
|
+
},
|
|
287
|
+
"id": f"stop-{extraction['hash']}-{int(time.time())}",
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
headers = {"Content-Type": "application/json"}
|
|
291
|
+
if API_KEY:
|
|
292
|
+
headers["X-Memory-Key"] = API_KEY
|
|
293
|
+
|
|
294
|
+
try:
|
|
295
|
+
data = json.dumps(payload).encode("utf-8")
|
|
296
|
+
req = urllib.request.Request(
|
|
297
|
+
f"{MEMORY_AGENT_URL}/a2a",
|
|
298
|
+
data=data,
|
|
299
|
+
headers=headers,
|
|
300
|
+
method="POST",
|
|
301
|
+
)
|
|
302
|
+
with urllib.request.urlopen(req, timeout=API_TIMEOUT_SECONDS) as resp:
|
|
303
|
+
return resp.status == 200
|
|
304
|
+
except (urllib.error.URLError, urllib.error.HTTPError, OSError, TimeoutError):
|
|
305
|
+
return False
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
# ---------------------------------------------------------------------------
|
|
309
|
+
# Main
|
|
310
|
+
# ---------------------------------------------------------------------------
|
|
311
|
+
|
|
312
|
+
def main():
|
|
313
|
+
start = time.time()
|
|
314
|
+
|
|
315
|
+
try:
|
|
316
|
+
# --- Read stdin JSON ---
|
|
317
|
+
hook_data: Dict[str, Any] = {}
|
|
318
|
+
if not sys.stdin.isatty():
|
|
319
|
+
raw = sys.stdin.read()
|
|
320
|
+
if raw.strip():
|
|
321
|
+
hook_data = json.loads(raw)
|
|
322
|
+
|
|
323
|
+
session_id = hook_data.get("session_id", "")
|
|
324
|
+
transcript_path = hook_data.get("transcript_path", "")
|
|
325
|
+
project_path = hook_data.get("cwd") or hook_data.get("project_path", "")
|
|
326
|
+
|
|
327
|
+
if not transcript_path or not session_id:
|
|
328
|
+
sys.exit(0)
|
|
329
|
+
|
|
330
|
+
# --- Load existing hashes for dedup ---
|
|
331
|
+
existing_hashes = _load_cursor_hashes(session_id)
|
|
332
|
+
|
|
333
|
+
# --- Get only the latest assistant response ---
|
|
334
|
+
response_text = _get_latest_response(transcript_path)
|
|
335
|
+
if not response_text or len(response_text) < 40:
|
|
336
|
+
sys.exit(0)
|
|
337
|
+
|
|
338
|
+
# --- Extract high-signal content ---
|
|
339
|
+
extractions = _extract_high_signal(response_text, existing_hashes)
|
|
340
|
+
if not extractions:
|
|
341
|
+
sys.exit(0)
|
|
342
|
+
|
|
343
|
+
# --- Store via API (with time budget) ---
|
|
344
|
+
stored_hashes: List[str] = []
|
|
345
|
+
for extraction in extractions:
|
|
346
|
+
elapsed = time.time() - start
|
|
347
|
+
if elapsed >= TOTAL_TIME_BUDGET:
|
|
348
|
+
break
|
|
349
|
+
if _store_memory(extraction, project_path):
|
|
350
|
+
stored_hashes.append(extraction["hash"])
|
|
351
|
+
|
|
352
|
+
# --- Persist new hashes to cursor file ---
|
|
353
|
+
if stored_hashes:
|
|
354
|
+
_save_cursor_hashes(session_id, stored_hashes)
|
|
355
|
+
|
|
356
|
+
elapsed_total = round(time.time() - start, 3)
|
|
357
|
+
print(
|
|
358
|
+
f"[Stop] session={session_id} "
|
|
359
|
+
f"found={len(extractions)} stored={len(stored_hashes)} "
|
|
360
|
+
f"elapsed={elapsed_total}s",
|
|
361
|
+
file=sys.stderr,
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
except Exception as e:
|
|
365
|
+
elapsed = round(time.time() - start, 3)
|
|
366
|
+
print(f"[Stop] Error (non-fatal): {e} [{elapsed}s]", file=sys.stderr)
|
|
367
|
+
|
|
368
|
+
sys.exit(0)
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
if __name__ == "__main__":
|
|
372
|
+
main()
|
package/install.py
CHANGED
|
@@ -36,7 +36,8 @@ DEFAULT_CONFIG = {
|
|
|
36
36
|
"HOST": "0.0.0.0",
|
|
37
37
|
"MEMORY_AGENT_URL": "http://localhost:8102",
|
|
38
38
|
"OLLAMA_HOST": "http://localhost:11434",
|
|
39
|
-
"EMBEDDING_MODEL": "
|
|
39
|
+
"EMBEDDING_MODEL": "Alibaba-NLP/gte-large-en-v1.5",
|
|
40
|
+
"EMBEDDING_PROVIDER": "sentence-transformers",
|
|
40
41
|
"LOG_LEVEL": "INFO",
|
|
41
42
|
"USE_VECTOR_INDEX": "true",
|
|
42
43
|
"DB_POOL_SIZE": "5",
|
|
@@ -47,12 +48,7 @@ DEFAULT_CONFIG = {
|
|
|
47
48
|
# Claude Code settings paths
|
|
48
49
|
def get_claude_settings_dir() -> Path:
|
|
49
50
|
"""Get the Claude Code settings directory."""
|
|
50
|
-
|
|
51
|
-
return Path.home() / ".claude"
|
|
52
|
-
elif sys.platform == "darwin":
|
|
53
|
-
return Path.home() / ".claude"
|
|
54
|
-
else: # Linux
|
|
55
|
-
return Path.home() / ".claude"
|
|
51
|
+
return Path.home() / ".claude"
|
|
56
52
|
|
|
57
53
|
def get_claude_settings_file() -> Path:
|
|
58
54
|
"""Get the Claude Code settings.json file path."""
|
|
@@ -280,17 +276,18 @@ def check_ollama() -> bool:
|
|
|
280
276
|
print_warning("Ollama not detected")
|
|
281
277
|
print("")
|
|
282
278
|
print(" " + "="*56)
|
|
283
|
-
print(" OLLAMA
|
|
279
|
+
print(" OLLAMA (OPTIONAL)")
|
|
284
280
|
print(" " + "="*56)
|
|
285
281
|
print("")
|
|
286
|
-
print("
|
|
287
|
-
print("
|
|
282
|
+
print(" Ollama is optional. The default provider (sentence-transformers)")
|
|
283
|
+
print(" runs locally without Ollama. Install Ollama only if you prefer")
|
|
284
|
+
print(" the Ollama provider.")
|
|
288
285
|
print("")
|
|
289
|
-
print(" To install Ollama:")
|
|
286
|
+
print(" To install Ollama (if desired):")
|
|
290
287
|
print(" 1. Download from: https://ollama.ai/download")
|
|
291
288
|
print(" 2. Install and run: ollama pull nomic-embed-text")
|
|
292
289
|
print(" 3. Start Ollama: ollama serve")
|
|
293
|
-
print(" 4.
|
|
290
|
+
print(" 4. Set EMBEDDING_PROVIDER=ollama in .env")
|
|
294
291
|
print("")
|
|
295
292
|
return False
|
|
296
293
|
|
|
@@ -357,10 +354,13 @@ def create_env_file(config: Dict[str, str], force: bool = False) -> bool:
|
|
|
357
354
|
f"PORT={config['PORT']}",
|
|
358
355
|
f"MEMORY_AGENT_URL={config['MEMORY_AGENT_URL']}",
|
|
359
356
|
"",
|
|
360
|
-
"#
|
|
361
|
-
f"
|
|
357
|
+
"# Embedding Configuration",
|
|
358
|
+
f"EMBEDDING_PROVIDER={config.get('EMBEDDING_PROVIDER', 'sentence-transformers')}",
|
|
362
359
|
f"EMBEDDING_MODEL={config['EMBEDDING_MODEL']}",
|
|
363
360
|
"",
|
|
361
|
+
"# Ollama Configuration (only needed if EMBEDDING_PROVIDER=ollama)",
|
|
362
|
+
f"OLLAMA_HOST={config['OLLAMA_HOST']}",
|
|
363
|
+
"",
|
|
364
364
|
"# Database Configuration",
|
|
365
365
|
f"DATABASE_PATH={AGENT_DIR / 'memories.db'}",
|
|
366
366
|
f"USE_VECTOR_INDEX={config['USE_VECTOR_INDEX']}",
|
|
@@ -452,10 +452,9 @@ echo "Memory Agent started (PID: $!)"
|
|
|
452
452
|
return False
|
|
453
453
|
|
|
454
454
|
|
|
455
|
-
def
|
|
456
|
-
"""
|
|
457
|
-
|
|
458
|
-
settings_dir = get_claude_settings_dir()
|
|
455
|
+
def _write_mcp_settings(settings_file: Path, config: Dict[str, str]) -> bool:
|
|
456
|
+
"""Write MCP settings to a given settings file."""
|
|
457
|
+
settings_dir = settings_file.parent
|
|
459
458
|
|
|
460
459
|
# Ensure settings directory exists
|
|
461
460
|
settings_dir.mkdir(parents=True, exist_ok=True)
|
|
@@ -465,7 +464,7 @@ def configure_claude_mcp(config: Dict[str, str]) -> bool:
|
|
|
465
464
|
try:
|
|
466
465
|
settings = json.loads(settings_file.read_text())
|
|
467
466
|
except json.JSONDecodeError:
|
|
468
|
-
print_warning("Existing
|
|
467
|
+
print_warning(f"Existing {settings_file.name} is invalid, creating backup")
|
|
469
468
|
shutil.copy(settings_file, settings_file.with_suffix(".json.bak"))
|
|
470
469
|
settings = {}
|
|
471
470
|
else:
|
|
@@ -478,7 +477,7 @@ def configure_claude_mcp(config: Dict[str, str]) -> bool:
|
|
|
478
477
|
# Add/update claude-memory server configuration
|
|
479
478
|
settings["mcpServers"]["claude-memory"] = {
|
|
480
479
|
"command": sys.executable,
|
|
481
|
-
"args": [str(AGENT_DIR / "
|
|
480
|
+
"args": [str(AGENT_DIR / "mcp_server.py")],
|
|
482
481
|
"env": {
|
|
483
482
|
"MEMORY_AGENT_URL": config["MEMORY_AGENT_URL"],
|
|
484
483
|
"PORT": config["PORT"],
|
|
@@ -494,6 +493,35 @@ def configure_claude_mcp(config: Dict[str, str]) -> bool:
|
|
|
494
493
|
return False
|
|
495
494
|
|
|
496
495
|
|
|
496
|
+
def configure_claude_mcp(config: Dict[str, str], scope: str = "global", project_path: Optional[str] = None) -> bool:
|
|
497
|
+
"""Configure Claude Code MCP settings.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
config: Configuration dictionary with PORT, MEMORY_AGENT_URL, etc.
|
|
501
|
+
scope: Installation scope - 'global', 'project', or 'both'.
|
|
502
|
+
project_path: Project directory path for project-specific installation.
|
|
503
|
+
"""
|
|
504
|
+
success = True
|
|
505
|
+
|
|
506
|
+
if scope in ("global", "both"):
|
|
507
|
+
settings_file = get_claude_settings_file()
|
|
508
|
+
if not _write_mcp_settings(settings_file, config):
|
|
509
|
+
success = False
|
|
510
|
+
|
|
511
|
+
if scope in ("project", "both"):
|
|
512
|
+
if project_path:
|
|
513
|
+
project_settings_dir = Path(project_path) / ".claude"
|
|
514
|
+
project_settings_file = project_settings_dir / "settings.local.json"
|
|
515
|
+
if not _write_mcp_settings(project_settings_file, config):
|
|
516
|
+
success = False
|
|
517
|
+
else:
|
|
518
|
+
print_warning("Project path not specified, skipping project-level MCP settings")
|
|
519
|
+
if scope == "project":
|
|
520
|
+
success = False
|
|
521
|
+
|
|
522
|
+
return success
|
|
523
|
+
|
|
524
|
+
|
|
497
525
|
def setup_hooks(config: Dict[str, str]) -> bool:
|
|
498
526
|
"""Set up Claude Code hooks for auto-start and context injection."""
|
|
499
527
|
hooks_dir = get_hooks_dir()
|
|
@@ -535,7 +563,7 @@ def setup_hooks(config: Dict[str, str]) -> bool:
|
|
|
535
563
|
return True
|
|
536
564
|
|
|
537
565
|
|
|
538
|
-
def configure_hooks_json() -> bool:
|
|
566
|
+
def configure_hooks_json(auto: bool = False) -> bool:
|
|
539
567
|
"""Configure hooks.json to enable the hooks."""
|
|
540
568
|
hooks_file = get_claude_settings_dir() / "hooks.json"
|
|
541
569
|
|
|
@@ -568,8 +596,9 @@ def configure_hooks_json() -> bool:
|
|
|
568
596
|
if hooks_file.exists():
|
|
569
597
|
try:
|
|
570
598
|
existing = json.loads(hooks_file.read_text())
|
|
571
|
-
#
|
|
572
|
-
|
|
599
|
+
# In auto mode, always merge; otherwise ask
|
|
600
|
+
should_update = auto or prompt_yes_no("hooks.json exists. Update with memory agent hooks?", default=True)
|
|
601
|
+
if should_update:
|
|
573
602
|
if "hooks" not in existing:
|
|
574
603
|
existing["hooks"] = {}
|
|
575
604
|
existing["hooks"].update(hooks_config["hooks"])
|
|
@@ -679,9 +708,10 @@ def print_post_install_instructions(config: Dict[str, str]):
|
|
|
679
708
|
|
|
680
709
|
print("Next steps:")
|
|
681
710
|
print("")
|
|
682
|
-
print("1.
|
|
683
|
-
print(f" ollama pull
|
|
711
|
+
print("1. (Optional) If using Ollama provider, make sure Ollama is running:")
|
|
712
|
+
print(f" ollama pull nomic-embed-text")
|
|
684
713
|
print(f" ollama serve")
|
|
714
|
+
print(f" Then set EMBEDDING_PROVIDER=ollama in .env")
|
|
685
715
|
print("")
|
|
686
716
|
print("2. Start the Memory Agent:")
|
|
687
717
|
print(f" cd \"{AGENT_DIR}\"")
|
|
@@ -770,6 +800,28 @@ def main():
|
|
|
770
800
|
action="store_true",
|
|
771
801
|
help="Skip Claude Code installation check (for standalone use)"
|
|
772
802
|
)
|
|
803
|
+
parser.add_argument(
|
|
804
|
+
"--skip-env",
|
|
805
|
+
action="store_true",
|
|
806
|
+
help="Skip .env file creation (already created by Node.js wizard)"
|
|
807
|
+
)
|
|
808
|
+
parser.add_argument(
|
|
809
|
+
"--scope",
|
|
810
|
+
choices=["global", "project", "both"],
|
|
811
|
+
default="global",
|
|
812
|
+
help="Installation scope for Claude Code settings"
|
|
813
|
+
)
|
|
814
|
+
parser.add_argument(
|
|
815
|
+
"--project-path",
|
|
816
|
+
type=str,
|
|
817
|
+
default=None,
|
|
818
|
+
help="Project path for project-specific installation"
|
|
819
|
+
)
|
|
820
|
+
parser.add_argument(
|
|
821
|
+
"--no-start",
|
|
822
|
+
action="store_true",
|
|
823
|
+
help="Don't auto-start the agent after installation"
|
|
824
|
+
)
|
|
773
825
|
|
|
774
826
|
args = parser.parse_args()
|
|
775
827
|
|
|
@@ -810,7 +862,7 @@ def main():
|
|
|
810
862
|
if not install_claude_code():
|
|
811
863
|
print_error("Could not install Claude Code automatically.")
|
|
812
864
|
print("Please install manually: npm install -g @anthropic-ai/claude-code")
|
|
813
|
-
if not prompt_yes_no("Continue anyway (memory agent only)?", default=False):
|
|
865
|
+
if not args.auto and not prompt_yes_no("Continue anyway (memory agent only)?", default=False):
|
|
814
866
|
return 1
|
|
815
867
|
else:
|
|
816
868
|
claude_ok = True
|
|
@@ -841,7 +893,7 @@ def main():
|
|
|
841
893
|
config["OLLAMA_HOST"]
|
|
842
894
|
)
|
|
843
895
|
|
|
844
|
-
if prompt_yes_no("Use default embedding model (
|
|
896
|
+
if prompt_yes_no("Use default embedding model (gte-large-en-v1.5 via sentence-transformers)?"):
|
|
845
897
|
pass
|
|
846
898
|
else:
|
|
847
899
|
config["EMBEDDING_MODEL"] = prompt_value(
|
|
@@ -861,8 +913,11 @@ def main():
|
|
|
861
913
|
|
|
862
914
|
# Step 4: Create .env file
|
|
863
915
|
print_step(4, total_steps, "Creating configuration file...")
|
|
864
|
-
if not
|
|
865
|
-
|
|
916
|
+
if not args.skip_env:
|
|
917
|
+
if not create_env_file(config, force=args.auto):
|
|
918
|
+
return 1
|
|
919
|
+
else:
|
|
920
|
+
print_success("Skipped .env creation (--skip-env)")
|
|
866
921
|
|
|
867
922
|
# Step 5: Fix hardcoded values
|
|
868
923
|
print_step(5, total_steps, "Fixing hardcoded values...")
|
|
@@ -879,11 +934,11 @@ def main():
|
|
|
879
934
|
|
|
880
935
|
if claude_ok:
|
|
881
936
|
if args.auto or prompt_yes_no("Configure Claude Code MCP settings?"):
|
|
882
|
-
configure_claude_mcp(config)
|
|
937
|
+
configure_claude_mcp(config, scope=args.scope, project_path=args.project_path)
|
|
883
938
|
|
|
884
939
|
if args.auto or prompt_yes_no("Install Claude Code hooks?"):
|
|
885
940
|
setup_hooks(config)
|
|
886
|
-
configure_hooks_json()
|
|
941
|
+
configure_hooks_json(auto=args.auto)
|
|
887
942
|
else:
|
|
888
943
|
print_warning("Skipping Claude Code configuration (Claude Code not installed)")
|
|
889
944
|
print(" Run 'python install.py' again after installing Claude Code")
|
|
@@ -892,9 +947,11 @@ def main():
|
|
|
892
947
|
print_step(8, total_steps, "Verifying installation...")
|
|
893
948
|
verify_installation()
|
|
894
949
|
|
|
895
|
-
# Step 9: Auto-start agent
|
|
950
|
+
# Step 9: Auto-start agent
|
|
896
951
|
print_step(9, total_steps, "Starting Memory Agent...")
|
|
897
|
-
if
|
|
952
|
+
if args.no_start:
|
|
953
|
+
print_success("Skipped auto-start (--no-start)")
|
|
954
|
+
else:
|
|
898
955
|
try:
|
|
899
956
|
subprocess.run(
|
|
900
957
|
[sys.executable, str(AGENT_DIR / "memory-agent"), "start"],
|
|
@@ -904,10 +961,7 @@ def main():
|
|
|
904
961
|
print_success("Memory Agent started!")
|
|
905
962
|
except Exception as e:
|
|
906
963
|
print_warning(f"Could not auto-start agent: {e}")
|
|
907
|
-
print(" Start manually with:
|
|
908
|
-
else:
|
|
909
|
-
print_warning("Skipping auto-start (Ollama not running)")
|
|
910
|
-
print(" After installing Ollama, run: claude-memory-agent start")
|
|
964
|
+
print(" Start manually with: python main.py")
|
|
911
965
|
|
|
912
966
|
# Done!
|
|
913
967
|
print_post_install_instructions(config)
|