claude-memory-agent 2.0.1 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +206 -206
- package/agent_card.py +186 -0
- package/bin/cli.js +327 -185
- package/bin/lib/banner.js +39 -0
- package/bin/lib/environment.js +166 -0
- package/bin/lib/installer.js +291 -0
- package/bin/lib/models.js +95 -0
- package/bin/lib/steps/advanced.js +101 -0
- package/bin/lib/steps/confirm.js +87 -0
- package/bin/lib/steps/model.js +57 -0
- package/bin/lib/steps/provider.js +65 -0
- package/bin/lib/steps/scope.js +59 -0
- package/bin/lib/steps/server.js +74 -0
- package/bin/lib/ui.js +75 -0
- package/bin/onboarding.js +164 -0
- package/bin/postinstall.js +35 -270
- package/config.py +103 -4
- package/dashboard.html +4902 -2689
- package/hooks/extract_memories.py +439 -0
- package/hooks/grounding-hook.py +422 -348
- package/hooks/pre_compact_hook.py +76 -0
- package/hooks/session_end.py +293 -192
- package/hooks/session_end_hook.py +149 -0
- package/hooks/session_start.py +227 -227
- package/hooks/stop_hook.py +372 -0
- package/install.py +972 -902
- package/main.py +5240 -2859
- package/mcp_server.py +451 -0
- package/package.json +58 -47
- package/requirements.txt +12 -8
- package/services/__init__.py +50 -50
- package/services/adaptive_ranker.py +272 -0
- package/services/agent_catalog.json +153 -0
- package/services/agent_registry.py +245 -730
- package/services/claude_md_sync.py +320 -4
- package/services/consolidation.py +417 -0
- package/services/curator.py +1606 -0
- package/services/database.py +4118 -2485
- package/services/embedding_pipeline.py +262 -0
- package/services/embeddings.py +493 -85
- package/services/memory_decay.py +408 -0
- package/services/native_memory_paths.py +86 -0
- package/services/native_memory_sync.py +496 -0
- package/services/response_manager.py +183 -0
- package/services/terminal_ui.py +199 -0
- package/services/tier_manager.py +235 -0
- package/services/websocket.py +26 -6
- package/skills/__init__.py +21 -1
- package/skills/confidence_tracker.py +441 -0
- package/skills/context.py +675 -0
- package/skills/curator.py +348 -0
- package/skills/search.py +444 -213
- package/skills/session_review.py +605 -0
- package/skills/store.py +484 -179
- package/terminal_dashboard.py +474 -0
- package/update_system.py +829 -817
- package/hooks/__pycache__/auto-detect-response.cpython-312.pyc +0 -0
- package/hooks/__pycache__/auto_capture.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_start.cpython-312.pyc +0 -0
- package/services/__pycache__/__init__.cpython-312.pyc +0 -0
- package/services/__pycache__/agent_registry.cpython-312.pyc +0 -0
- package/services/__pycache__/auth.cpython-312.pyc +0 -0
- package/services/__pycache__/auto_inject.cpython-312.pyc +0 -0
- package/services/__pycache__/claude_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/services/__pycache__/compaction_flush.cpython-312.pyc +0 -0
- package/services/__pycache__/confidence.cpython-312.pyc +0 -0
- package/services/__pycache__/daily_log.cpython-312.pyc +0 -0
- package/services/__pycache__/database.cpython-312.pyc +0 -0
- package/services/__pycache__/embeddings.cpython-312.pyc +0 -0
- package/services/__pycache__/insights.cpython-312.pyc +0 -0
- package/services/__pycache__/llm_analyzer.cpython-312.pyc +0 -0
- package/services/__pycache__/memory_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/retry_queue.cpython-312.pyc +0 -0
- package/services/__pycache__/timeline.cpython-312.pyc +0 -0
- package/services/__pycache__/vector_index.cpython-312.pyc +0 -0
- package/services/__pycache__/websocket.cpython-312.pyc +0 -0
- package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
- package/skills/__pycache__/admin.cpython-312.pyc +0 -0
- package/skills/__pycache__/checkpoint.cpython-312.pyc +0 -0
- package/skills/__pycache__/claude_md.cpython-312.pyc +0 -0
- package/skills/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/skills/__pycache__/grounding.cpython-312.pyc +0 -0
- package/skills/__pycache__/insights.cpython-312.pyc +0 -0
- package/skills/__pycache__/natural_language.cpython-312.pyc +0 -0
- package/skills/__pycache__/retrieve.cpython-312.pyc +0 -0
- package/skills/__pycache__/search.cpython-312.pyc +0 -0
- package/skills/__pycache__/state.cpython-312.pyc +0 -0
- package/skills/__pycache__/store.cpython-312.pyc +0 -0
- package/skills/__pycache__/summarize.cpython-312.pyc +0 -0
- package/skills/__pycache__/timeline.cpython-312.pyc +0 -0
- package/skills/__pycache__/verification.cpython-312.pyc +0 -0
- package/test_automation.py +0 -221
- package/test_complete.py +0 -338
- package/test_full.py +0 -322
- package/verify_db.py +0 -134
package/hooks/grounding-hook.py
CHANGED
|
@@ -1,348 +1,422 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Grounding Hook for Claude Code - Automatic Context Injection
|
|
4
|
-
|
|
5
|
-
This script is called by Claude Code's UserPromptSubmit hook.
|
|
6
|
-
It fetches the current session context and outputs it to stdout,
|
|
7
|
-
which Claude Code automatically injects into Claude's context.
|
|
8
|
-
|
|
9
|
-
This is the REAL anti-hallucination layer - automatic, not relying on Claude to call tools.
|
|
10
|
-
|
|
11
|
-
Moltbot-inspired additions:
|
|
12
|
-
- Checks flush conditions (events > 50 or time > 30min)
|
|
13
|
-
- Loads MEMORY.md content into grounding context
|
|
14
|
-
- Loads today's daily log highlights
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
import os
|
|
18
|
-
import sys
|
|
19
|
-
import json
|
|
20
|
-
import logging
|
|
21
|
-
import requests
|
|
22
|
-
from pathlib import Path
|
|
23
|
-
from typing import Any, Optional
|
|
24
|
-
|
|
25
|
-
# Configure logging to stderr (important for Claude Code hooks)
|
|
26
|
-
logging.basicConfig(
|
|
27
|
-
level=logging.INFO,
|
|
28
|
-
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
29
|
-
stream=sys.stderr
|
|
30
|
-
)
|
|
31
|
-
logger = logging.getLogger(__name__)
|
|
32
|
-
|
|
33
|
-
# Configuration from environment
|
|
34
|
-
MEMORY_AGENT_URL = os.getenv("MEMORY_AGENT_URL", "http://localhost:8102")
|
|
35
|
-
API_TIMEOUT = int(os.getenv("API_TIMEOUT", "30"))
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
def safe_get(data: Any, *keys, default: Any = None) -> Any:
|
|
39
|
-
"""
|
|
40
|
-
Safely navigate nested data structures (dicts and lists).
|
|
41
|
-
|
|
42
|
-
Args:
|
|
43
|
-
data: The data structure to navigate
|
|
44
|
-
*keys: Keys (str for dict) or indices (int for list) to traverse
|
|
45
|
-
default: Value to return if path doesn't exist
|
|
46
|
-
|
|
47
|
-
Returns:
|
|
48
|
-
The value at the path, or default if not found
|
|
49
|
-
|
|
50
|
-
Example:
|
|
51
|
-
safe_get(result, "result", "artifacts", 0, "parts", 0, "text")
|
|
52
|
-
"""
|
|
53
|
-
for key in keys:
|
|
54
|
-
if data is None:
|
|
55
|
-
return default
|
|
56
|
-
if isinstance(data, dict):
|
|
57
|
-
data = data.get(key, default)
|
|
58
|
-
elif isinstance(data, list) and isinstance(key, int):
|
|
59
|
-
if 0 <= key < len(data):
|
|
60
|
-
data = data[key]
|
|
61
|
-
else:
|
|
62
|
-
return default
|
|
63
|
-
else:
|
|
64
|
-
return default
|
|
65
|
-
return data
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def get_project_path():
|
|
69
|
-
"""Get current working directory as project path."""
|
|
70
|
-
return os.getcwd()
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
def load_session_data():
|
|
74
|
-
"""Load session data from JSON file."""
|
|
75
|
-
session_file = Path(get_project_path()) / ".claude_session"
|
|
76
|
-
if session_file.exists():
|
|
77
|
-
try:
|
|
78
|
-
content = session_file.read_text().strip()
|
|
79
|
-
# Try JSON format first
|
|
80
|
-
return json.loads(content)
|
|
81
|
-
except json.JSONDecodeError as e:
|
|
82
|
-
logger.debug(f"JSON decode error, trying legacy format: {e}")
|
|
83
|
-
# Fall back to legacy plain text format (just session_id)
|
|
84
|
-
try:
|
|
85
|
-
content = session_file.read_text().strip()
|
|
86
|
-
return {"session_id": content}
|
|
87
|
-
except (IOError, OSError) as read_err:
|
|
88
|
-
logger.warning(f"Failed to read session file: {read_err}")
|
|
89
|
-
return None
|
|
90
|
-
except (IOError, OSError) as e:
|
|
91
|
-
logger.warning(f"Failed to read session file: {e}")
|
|
92
|
-
return None
|
|
93
|
-
return None
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
def save_session_data(data: dict):
|
|
97
|
-
"""Save session data to JSON file."""
|
|
98
|
-
session_file = Path(get_project_path()) / ".claude_session"
|
|
99
|
-
try:
|
|
100
|
-
session_file.write_text(json.dumps(data, indent=2))
|
|
101
|
-
except (IOError, OSError) as e:
|
|
102
|
-
logger.warning(f"Failed to save session data: {e}")
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
def get_session_id():
|
|
106
|
-
"""Get or create session ID from environment or file."""
|
|
107
|
-
# Try environment variable first
|
|
108
|
-
session_id = os.getenv("CLAUDE_SESSION_ID")
|
|
109
|
-
if session_id:
|
|
110
|
-
return session_id
|
|
111
|
-
|
|
112
|
-
# Try session file in project
|
|
113
|
-
data = load_session_data()
|
|
114
|
-
return data.get("session_id") if data else None
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
def call_memory_agent(skill_id: str, params: dict) -> Optional[dict]:
|
|
118
|
-
"""Call the memory agent API."""
|
|
119
|
-
try:
|
|
120
|
-
response = requests.post(
|
|
121
|
-
f"{MEMORY_AGENT_URL}/a2a",
|
|
122
|
-
json={
|
|
123
|
-
"jsonrpc": "2.0",
|
|
124
|
-
"id": "grounding-hook",
|
|
125
|
-
"method": "tasks/send",
|
|
126
|
-
"params": {
|
|
127
|
-
"message": {"parts": [{"type": "text", "text": ""}]},
|
|
128
|
-
"metadata": {
|
|
129
|
-
"skill_id": skill_id,
|
|
130
|
-
"params": params
|
|
131
|
-
}
|
|
132
|
-
}
|
|
133
|
-
},
|
|
134
|
-
timeout=API_TIMEOUT
|
|
135
|
-
)
|
|
136
|
-
result = response.json()
|
|
137
|
-
|
|
138
|
-
# Safely extract the artifact text using safe_get
|
|
139
|
-
artifact_text = safe_get(result, "result", "artifacts", 0, "parts", 0, "text")
|
|
140
|
-
if artifact_text:
|
|
141
|
-
try:
|
|
142
|
-
return json.loads(artifact_text)
|
|
143
|
-
except json.JSONDecodeError as e:
|
|
144
|
-
logger.debug(f"Failed to parse artifact text as JSON for skill '{skill_id}': {e}")
|
|
145
|
-
return None
|
|
146
|
-
return None
|
|
147
|
-
|
|
148
|
-
except requests.RequestException as e:
|
|
149
|
-
# Silently fail - don't break Claude Code if memory agent is down
|
|
150
|
-
logger.debug(f"Memory agent request failed for skill '{skill_id}': {e}")
|
|
151
|
-
return None
|
|
152
|
-
except json.JSONDecodeError as e:
|
|
153
|
-
logger.debug(f"Failed to decode memory agent response for skill '{skill_id}': {e}")
|
|
154
|
-
return None
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
def format_grounding_context(context: dict) -> str:
|
|
158
|
-
"""Format the grounding context for injection."""
|
|
159
|
-
if not context or not context.get("success"):
|
|
160
|
-
return ""
|
|
161
|
-
|
|
162
|
-
grounding = context.get("grounding", {})
|
|
163
|
-
|
|
164
|
-
lines = ["[GROUNDING CONTEXT - VERIFY BEFORE RESPONDING]"]
|
|
165
|
-
|
|
166
|
-
# Current goal
|
|
167
|
-
if grounding.get("current_goal"):
|
|
168
|
-
lines.append(f"CURRENT GOAL: {grounding['current_goal']}")
|
|
169
|
-
|
|
170
|
-
# Entity registry
|
|
171
|
-
registry = grounding.get("entity_registry", {})
|
|
172
|
-
if registry:
|
|
173
|
-
lines.append("ENTITY REGISTRY (use these exact references):")
|
|
174
|
-
for key, value in list(registry.items())[:5]:
|
|
175
|
-
lines.append(f" - {key}: {value}")
|
|
176
|
-
|
|
177
|
-
# Anchors (verified facts)
|
|
178
|
-
anchors = grounding.get("anchors", [])
|
|
179
|
-
if anchors:
|
|
180
|
-
lines.append("ANCHORS (verified facts - DO NOT CONTRADICT):")
|
|
181
|
-
for anchor in anchors[:5]:
|
|
182
|
-
lines.append(f" - {anchor}")
|
|
183
|
-
|
|
184
|
-
# Recent decisions
|
|
185
|
-
decisions = grounding.get("decisions", [])
|
|
186
|
-
if decisions:
|
|
187
|
-
lines.append("RECENT DECISIONS:")
|
|
188
|
-
for decision in decisions[:3]:
|
|
189
|
-
lines.append(f" - {decision}")
|
|
190
|
-
|
|
191
|
-
# Recent events
|
|
192
|
-
events = grounding.get("recent_events", [])
|
|
193
|
-
if events:
|
|
194
|
-
lines.append("RECENT EVENTS:")
|
|
195
|
-
for event in events[:5]:
|
|
196
|
-
lines.append(f" - [{event.get('type', '?')}] {event.get('summary', '')}")
|
|
197
|
-
|
|
198
|
-
# Contradictions warning
|
|
199
|
-
contradictions = grounding.get("contradictions", [])
|
|
200
|
-
if contradictions:
|
|
201
|
-
lines.append("WARNING - POTENTIAL CONTRADICTIONS DETECTED:")
|
|
202
|
-
for c in contradictions[:3]:
|
|
203
|
-
lines.append(f" - {c.get('content', '')[:100]}")
|
|
204
|
-
|
|
205
|
-
# Pending questions
|
|
206
|
-
pending = grounding.get("pending_questions", [])
|
|
207
|
-
if pending:
|
|
208
|
-
lines.append("PENDING QUESTIONS:")
|
|
209
|
-
for q in pending[:3]:
|
|
210
|
-
lines.append(f" - {q}")
|
|
211
|
-
|
|
212
|
-
lines.append("[/GROUNDING CONTEXT]")
|
|
213
|
-
lines.append("") # Empty line after
|
|
214
|
-
|
|
215
|
-
return "\n".join(lines)
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
def format_memory_md_context(memory_md: dict) -> str:
|
|
219
|
-
"""Format MEMORY.md content for injection."""
|
|
220
|
-
if not memory_md or not memory_md.get("exists"):
|
|
221
|
-
return ""
|
|
222
|
-
|
|
223
|
-
summary = memory_md.get("summary", "")
|
|
224
|
-
if not summary:
|
|
225
|
-
return ""
|
|
226
|
-
|
|
227
|
-
lines = ["[CORE FACTS from MEMORY.md]"]
|
|
228
|
-
lines.append(summary)
|
|
229
|
-
lines.append("[/CORE FACTS]")
|
|
230
|
-
lines.append("")
|
|
231
|
-
|
|
232
|
-
return "\n".join(lines)
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
def format_daily_highlights(highlights: dict) -> str:
|
|
236
|
-
"""Format daily log highlights for injection."""
|
|
237
|
-
if not highlights or not highlights.get("highlights"):
|
|
238
|
-
return ""
|
|
239
|
-
|
|
240
|
-
entries = highlights.get("highlights", [])
|
|
241
|
-
if not entries:
|
|
242
|
-
return ""
|
|
243
|
-
|
|
244
|
-
lines = ["[TODAY'S HIGHLIGHTS from Daily Log]"]
|
|
245
|
-
for entry in entries[:5]:
|
|
246
|
-
lines.append(f" - {entry}")
|
|
247
|
-
lines.append("[/TODAY'S HIGHLIGHTS]")
|
|
248
|
-
lines.append("")
|
|
249
|
-
|
|
250
|
-
return "\n".join(lines)
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
def
|
|
254
|
-
"""
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
"
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
"
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
if
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Grounding Hook for Claude Code - Automatic Context Injection
|
|
4
|
+
|
|
5
|
+
This script is called by Claude Code's UserPromptSubmit hook.
|
|
6
|
+
It fetches the current session context and outputs it to stdout,
|
|
7
|
+
which Claude Code automatically injects into Claude's context.
|
|
8
|
+
|
|
9
|
+
This is the REAL anti-hallucination layer - automatic, not relying on Claude to call tools.
|
|
10
|
+
|
|
11
|
+
Moltbot-inspired additions:
|
|
12
|
+
- Checks flush conditions (events > 50 or time > 30min)
|
|
13
|
+
- Loads MEMORY.md content into grounding context
|
|
14
|
+
- Loads today's daily log highlights
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import os
|
|
18
|
+
import sys
|
|
19
|
+
import json
|
|
20
|
+
import logging
|
|
21
|
+
import requests
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import Any, Optional
|
|
24
|
+
|
|
25
|
+
# Configure logging to stderr (important for Claude Code hooks)
|
|
26
|
+
logging.basicConfig(
|
|
27
|
+
level=logging.INFO,
|
|
28
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
29
|
+
stream=sys.stderr
|
|
30
|
+
)
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
# Configuration from environment
|
|
34
|
+
MEMORY_AGENT_URL = os.getenv("MEMORY_AGENT_URL", "http://localhost:8102")
|
|
35
|
+
API_TIMEOUT = int(os.getenv("API_TIMEOUT", "30"))
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def safe_get(data: Any, *keys, default: Any = None) -> Any:
|
|
39
|
+
"""
|
|
40
|
+
Safely navigate nested data structures (dicts and lists).
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
data: The data structure to navigate
|
|
44
|
+
*keys: Keys (str for dict) or indices (int for list) to traverse
|
|
45
|
+
default: Value to return if path doesn't exist
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
The value at the path, or default if not found
|
|
49
|
+
|
|
50
|
+
Example:
|
|
51
|
+
safe_get(result, "result", "artifacts", 0, "parts", 0, "text")
|
|
52
|
+
"""
|
|
53
|
+
for key in keys:
|
|
54
|
+
if data is None:
|
|
55
|
+
return default
|
|
56
|
+
if isinstance(data, dict):
|
|
57
|
+
data = data.get(key, default)
|
|
58
|
+
elif isinstance(data, list) and isinstance(key, int):
|
|
59
|
+
if 0 <= key < len(data):
|
|
60
|
+
data = data[key]
|
|
61
|
+
else:
|
|
62
|
+
return default
|
|
63
|
+
else:
|
|
64
|
+
return default
|
|
65
|
+
return data
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def get_project_path():
|
|
69
|
+
"""Get current working directory as project path."""
|
|
70
|
+
return os.getcwd()
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def load_session_data():
|
|
74
|
+
"""Load session data from JSON file."""
|
|
75
|
+
session_file = Path(get_project_path()) / ".claude_session"
|
|
76
|
+
if session_file.exists():
|
|
77
|
+
try:
|
|
78
|
+
content = session_file.read_text().strip()
|
|
79
|
+
# Try JSON format first
|
|
80
|
+
return json.loads(content)
|
|
81
|
+
except json.JSONDecodeError as e:
|
|
82
|
+
logger.debug(f"JSON decode error, trying legacy format: {e}")
|
|
83
|
+
# Fall back to legacy plain text format (just session_id)
|
|
84
|
+
try:
|
|
85
|
+
content = session_file.read_text().strip()
|
|
86
|
+
return {"session_id": content}
|
|
87
|
+
except (IOError, OSError) as read_err:
|
|
88
|
+
logger.warning(f"Failed to read session file: {read_err}")
|
|
89
|
+
return None
|
|
90
|
+
except (IOError, OSError) as e:
|
|
91
|
+
logger.warning(f"Failed to read session file: {e}")
|
|
92
|
+
return None
|
|
93
|
+
return None
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def save_session_data(data: dict):
|
|
97
|
+
"""Save session data to JSON file."""
|
|
98
|
+
session_file = Path(get_project_path()) / ".claude_session"
|
|
99
|
+
try:
|
|
100
|
+
session_file.write_text(json.dumps(data, indent=2))
|
|
101
|
+
except (IOError, OSError) as e:
|
|
102
|
+
logger.warning(f"Failed to save session data: {e}")
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def get_session_id():
|
|
106
|
+
"""Get or create session ID from environment or file."""
|
|
107
|
+
# Try environment variable first
|
|
108
|
+
session_id = os.getenv("CLAUDE_SESSION_ID")
|
|
109
|
+
if session_id:
|
|
110
|
+
return session_id
|
|
111
|
+
|
|
112
|
+
# Try session file in project
|
|
113
|
+
data = load_session_data()
|
|
114
|
+
return data.get("session_id") if data else None
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def call_memory_agent(skill_id: str, params: dict) -> Optional[dict]:
|
|
118
|
+
"""Call the memory agent API."""
|
|
119
|
+
try:
|
|
120
|
+
response = requests.post(
|
|
121
|
+
f"{MEMORY_AGENT_URL}/a2a",
|
|
122
|
+
json={
|
|
123
|
+
"jsonrpc": "2.0",
|
|
124
|
+
"id": "grounding-hook",
|
|
125
|
+
"method": "tasks/send",
|
|
126
|
+
"params": {
|
|
127
|
+
"message": {"parts": [{"type": "text", "text": ""}]},
|
|
128
|
+
"metadata": {
|
|
129
|
+
"skill_id": skill_id,
|
|
130
|
+
"params": params
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
},
|
|
134
|
+
timeout=API_TIMEOUT
|
|
135
|
+
)
|
|
136
|
+
result = response.json()
|
|
137
|
+
|
|
138
|
+
# Safely extract the artifact text using safe_get
|
|
139
|
+
artifact_text = safe_get(result, "result", "artifacts", 0, "parts", 0, "text")
|
|
140
|
+
if artifact_text:
|
|
141
|
+
try:
|
|
142
|
+
return json.loads(artifact_text)
|
|
143
|
+
except json.JSONDecodeError as e:
|
|
144
|
+
logger.debug(f"Failed to parse artifact text as JSON for skill '{skill_id}': {e}")
|
|
145
|
+
return None
|
|
146
|
+
return None
|
|
147
|
+
|
|
148
|
+
except requests.RequestException as e:
|
|
149
|
+
# Silently fail - don't break Claude Code if memory agent is down
|
|
150
|
+
logger.debug(f"Memory agent request failed for skill '{skill_id}': {e}")
|
|
151
|
+
return None
|
|
152
|
+
except json.JSONDecodeError as e:
|
|
153
|
+
logger.debug(f"Failed to decode memory agent response for skill '{skill_id}': {e}")
|
|
154
|
+
return None
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def format_grounding_context(context: dict) -> str:
|
|
158
|
+
"""Format the grounding context for injection."""
|
|
159
|
+
if not context or not context.get("success"):
|
|
160
|
+
return ""
|
|
161
|
+
|
|
162
|
+
grounding = context.get("grounding", {})
|
|
163
|
+
|
|
164
|
+
lines = ["[GROUNDING CONTEXT - VERIFY BEFORE RESPONDING]"]
|
|
165
|
+
|
|
166
|
+
# Current goal
|
|
167
|
+
if grounding.get("current_goal"):
|
|
168
|
+
lines.append(f"CURRENT GOAL: {grounding['current_goal']}")
|
|
169
|
+
|
|
170
|
+
# Entity registry
|
|
171
|
+
registry = grounding.get("entity_registry", {})
|
|
172
|
+
if registry:
|
|
173
|
+
lines.append("ENTITY REGISTRY (use these exact references):")
|
|
174
|
+
for key, value in list(registry.items())[:5]:
|
|
175
|
+
lines.append(f" - {key}: {value}")
|
|
176
|
+
|
|
177
|
+
# Anchors (verified facts)
|
|
178
|
+
anchors = grounding.get("anchors", [])
|
|
179
|
+
if anchors:
|
|
180
|
+
lines.append("ANCHORS (verified facts - DO NOT CONTRADICT):")
|
|
181
|
+
for anchor in anchors[:5]:
|
|
182
|
+
lines.append(f" - {anchor}")
|
|
183
|
+
|
|
184
|
+
# Recent decisions
|
|
185
|
+
decisions = grounding.get("decisions", [])
|
|
186
|
+
if decisions:
|
|
187
|
+
lines.append("RECENT DECISIONS:")
|
|
188
|
+
for decision in decisions[:3]:
|
|
189
|
+
lines.append(f" - {decision}")
|
|
190
|
+
|
|
191
|
+
# Recent events
|
|
192
|
+
events = grounding.get("recent_events", [])
|
|
193
|
+
if events:
|
|
194
|
+
lines.append("RECENT EVENTS:")
|
|
195
|
+
for event in events[:5]:
|
|
196
|
+
lines.append(f" - [{event.get('type', '?')}] {event.get('summary', '')}")
|
|
197
|
+
|
|
198
|
+
# Contradictions warning
|
|
199
|
+
contradictions = grounding.get("contradictions", [])
|
|
200
|
+
if contradictions:
|
|
201
|
+
lines.append("WARNING - POTENTIAL CONTRADICTIONS DETECTED:")
|
|
202
|
+
for c in contradictions[:3]:
|
|
203
|
+
lines.append(f" - {c.get('content', '')[:100]}")
|
|
204
|
+
|
|
205
|
+
# Pending questions
|
|
206
|
+
pending = grounding.get("pending_questions", [])
|
|
207
|
+
if pending:
|
|
208
|
+
lines.append("PENDING QUESTIONS:")
|
|
209
|
+
for q in pending[:3]:
|
|
210
|
+
lines.append(f" - {q}")
|
|
211
|
+
|
|
212
|
+
lines.append("[/GROUNDING CONTEXT]")
|
|
213
|
+
lines.append("") # Empty line after
|
|
214
|
+
|
|
215
|
+
return "\n".join(lines)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def format_memory_md_context(memory_md: dict) -> str:
|
|
219
|
+
"""Format MEMORY.md content for injection."""
|
|
220
|
+
if not memory_md or not memory_md.get("exists"):
|
|
221
|
+
return ""
|
|
222
|
+
|
|
223
|
+
summary = memory_md.get("summary", "")
|
|
224
|
+
if not summary:
|
|
225
|
+
return ""
|
|
226
|
+
|
|
227
|
+
lines = ["[CORE FACTS from MEMORY.md]"]
|
|
228
|
+
lines.append(summary)
|
|
229
|
+
lines.append("[/CORE FACTS]")
|
|
230
|
+
lines.append("")
|
|
231
|
+
|
|
232
|
+
return "\n".join(lines)
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def format_daily_highlights(highlights: dict) -> str:
|
|
236
|
+
"""Format daily log highlights for injection."""
|
|
237
|
+
if not highlights or not highlights.get("highlights"):
|
|
238
|
+
return ""
|
|
239
|
+
|
|
240
|
+
entries = highlights.get("highlights", [])
|
|
241
|
+
if not entries:
|
|
242
|
+
return ""
|
|
243
|
+
|
|
244
|
+
lines = ["[TODAY'S HIGHLIGHTS from Daily Log]"]
|
|
245
|
+
for entry in entries[:5]:
|
|
246
|
+
lines.append(f" - {entry}")
|
|
247
|
+
lines.append("[/TODAY'S HIGHLIGHTS]")
|
|
248
|
+
lines.append("")
|
|
249
|
+
|
|
250
|
+
return "\n".join(lines)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def format_curator_context(curator_summary: dict, curator_status: dict) -> str:
|
|
254
|
+
"""Format curator context for injection."""
|
|
255
|
+
if not curator_summary and not curator_status:
|
|
256
|
+
return ""
|
|
257
|
+
|
|
258
|
+
lines = ["[CURATOR CONTEXT]"]
|
|
259
|
+
|
|
260
|
+
# Knowledge graph summary
|
|
261
|
+
if curator_summary:
|
|
262
|
+
context = curator_summary.get("context", "")
|
|
263
|
+
if context:
|
|
264
|
+
lines.append("Relevant Knowledge:")
|
|
265
|
+
for line in context.split("\n")[:10]:
|
|
266
|
+
if line.strip():
|
|
267
|
+
lines.append(f" {line}")
|
|
268
|
+
|
|
269
|
+
# Graph relationships
|
|
270
|
+
graph_context = curator_summary.get("graph_context")
|
|
271
|
+
if graph_context and graph_context.get("summary"):
|
|
272
|
+
lines.append("")
|
|
273
|
+
lines.append(f"Graph: {graph_context['summary']}")
|
|
274
|
+
|
|
275
|
+
# Pending reviews
|
|
276
|
+
pending = curator_summary.get("pending_reviews", {})
|
|
277
|
+
if pending.get("total_pending", 0) > 0:
|
|
278
|
+
lines.append("")
|
|
279
|
+
lines.append("Pending Reviews:")
|
|
280
|
+
if pending.get("duplicate_clusters", 0) > 0:
|
|
281
|
+
lines.append(f" - {pending['duplicate_clusters']} duplicate clusters")
|
|
282
|
+
if pending.get("suggested_links", 0) > 0:
|
|
283
|
+
lines.append(f" - {pending['suggested_links']} suggested links")
|
|
284
|
+
if pending.get("orphan_memories", 0) > 0:
|
|
285
|
+
lines.append(f" - {pending['orphan_memories']} orphan memories")
|
|
286
|
+
|
|
287
|
+
# Curator status summary
|
|
288
|
+
if curator_status:
|
|
289
|
+
orphan_count = curator_status.get("orphan_count", 0)
|
|
290
|
+
connection_ratio = curator_status.get("connection_ratio", 0)
|
|
291
|
+
if orphan_count > 10:
|
|
292
|
+
lines.append(f"Warning: {orphan_count} orphan memories need linking")
|
|
293
|
+
if connection_ratio < 0.5:
|
|
294
|
+
lines.append(f"Note: Low graph connectivity ({connection_ratio:.1%})")
|
|
295
|
+
|
|
296
|
+
lines.append("[/CURATOR CONTEXT]")
|
|
297
|
+
lines.append("")
|
|
298
|
+
|
|
299
|
+
return "\n".join(lines)
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
def check_and_trigger_flush(session_id: str, project_path: str):
|
|
303
|
+
"""Check if flush is needed and trigger it."""
|
|
304
|
+
# Check flush conditions
|
|
305
|
+
flush_check = call_memory_agent("check_flush_needed", {
|
|
306
|
+
"session_id": session_id
|
|
307
|
+
})
|
|
308
|
+
|
|
309
|
+
if flush_check and flush_check.get("flush_needed"):
|
|
310
|
+
reasons = flush_check.get("reasons", [])
|
|
311
|
+
logger.info(f"Flush needed: {', '.join(reasons)}")
|
|
312
|
+
|
|
313
|
+
# Trigger flush
|
|
314
|
+
flush_result = call_memory_agent("pre_compaction_flush", {
|
|
315
|
+
"project_path": project_path,
|
|
316
|
+
"session_id": session_id
|
|
317
|
+
})
|
|
318
|
+
|
|
319
|
+
if flush_result and flush_result.get("success"):
|
|
320
|
+
logger.info(f"Flush completed: {flush_result.get('file_path')}")
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def main():
|
|
324
|
+
"""Main entry point for the hook."""
|
|
325
|
+
project_path = get_project_path()
|
|
326
|
+
session_id = get_session_id()
|
|
327
|
+
|
|
328
|
+
# If no session, try to init one
|
|
329
|
+
if not session_id:
|
|
330
|
+
init_result = call_memory_agent("state_init_session", {
|
|
331
|
+
"project_path": project_path
|
|
332
|
+
})
|
|
333
|
+
if init_result and init_result.get("session_id"):
|
|
334
|
+
session_id = init_result["session_id"]
|
|
335
|
+
# Save session data as JSON
|
|
336
|
+
save_session_data({"session_id": session_id})
|
|
337
|
+
|
|
338
|
+
if not session_id:
|
|
339
|
+
# No session, no grounding - exit silently
|
|
340
|
+
sys.exit(0)
|
|
341
|
+
|
|
342
|
+
# ============================================================
|
|
343
|
+
# MOLTBOT-INSPIRED: Check flush conditions
|
|
344
|
+
# ============================================================
|
|
345
|
+
check_and_trigger_flush(session_id, project_path)
|
|
346
|
+
|
|
347
|
+
# ============================================================
|
|
348
|
+
# MOLTBOT-INSPIRED: Load MEMORY.md summary
|
|
349
|
+
# ============================================================
|
|
350
|
+
memory_md = call_memory_agent("get_memory_md_summary", {
|
|
351
|
+
"project_path": project_path
|
|
352
|
+
})
|
|
353
|
+
|
|
354
|
+
memory_md_context = format_memory_md_context(memory_md) if memory_md else ""
|
|
355
|
+
|
|
356
|
+
# ============================================================
|
|
357
|
+
# MOLTBOT-INSPIRED: Load today's daily log highlights
|
|
358
|
+
# ============================================================
|
|
359
|
+
daily_highlights = call_memory_agent("daily_log_highlights", {
|
|
360
|
+
"project_path": project_path
|
|
361
|
+
})
|
|
362
|
+
|
|
363
|
+
daily_context = format_daily_highlights(daily_highlights) if daily_highlights else ""
|
|
364
|
+
|
|
365
|
+
# ============================================================
|
|
366
|
+
# ORIGINAL: Get grounding context
|
|
367
|
+
# ============================================================
|
|
368
|
+
context = call_memory_agent("context_refresh", {
|
|
369
|
+
"session_id": session_id,
|
|
370
|
+
"include_recent_events": 5,
|
|
371
|
+
"include_state": True,
|
|
372
|
+
"include_checkpoint": True,
|
|
373
|
+
"check_contradictions": True
|
|
374
|
+
})
|
|
375
|
+
|
|
376
|
+
grounding_context = format_grounding_context(context) if context else ""
|
|
377
|
+
|
|
378
|
+
# ============================================================
|
|
379
|
+
# CURATOR: Get curated context and status
|
|
380
|
+
# ============================================================
|
|
381
|
+
# Get curator summary for current context (lightweight)
|
|
382
|
+
curator_summary = None
|
|
383
|
+
curator_status = None
|
|
384
|
+
|
|
385
|
+
# Only fetch curator context if there's user input to contextualize
|
|
386
|
+
user_input = os.getenv("CLAUDE_USER_INPUT", "")
|
|
387
|
+
if user_input and len(user_input) > 10:
|
|
388
|
+
curator_summary = call_memory_agent("curator_get_summary", {
|
|
389
|
+
"query": user_input[:500], # Limit query length
|
|
390
|
+
"project_path": project_path,
|
|
391
|
+
"max_memories": 5,
|
|
392
|
+
"include_graph": True
|
|
393
|
+
})
|
|
394
|
+
|
|
395
|
+
# Always get curator status for warnings
|
|
396
|
+
curator_status = call_memory_agent("curator_get_status", {})
|
|
397
|
+
|
|
398
|
+
curator_context = format_curator_context(curator_summary, curator_status)
|
|
399
|
+
|
|
400
|
+
# Combine all context
|
|
401
|
+
output_parts = []
|
|
402
|
+
|
|
403
|
+
if memory_md_context:
|
|
404
|
+
output_parts.append(memory_md_context)
|
|
405
|
+
|
|
406
|
+
if daily_context:
|
|
407
|
+
output_parts.append(daily_context)
|
|
408
|
+
|
|
409
|
+
if grounding_context:
|
|
410
|
+
output_parts.append(grounding_context)
|
|
411
|
+
|
|
412
|
+
if curator_context:
|
|
413
|
+
output_parts.append(curator_context)
|
|
414
|
+
|
|
415
|
+
if output_parts:
|
|
416
|
+
print("\n".join(output_parts))
|
|
417
|
+
|
|
418
|
+
sys.exit(0)
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
if __name__ == "__main__":
|
|
422
|
+
main()
|