claude-memory-agent 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +107 -0
- package/README.md +200 -0
- package/agent_card.py +512 -0
- package/bin/cli.js +181 -0
- package/bin/postinstall.js +216 -0
- package/config.py +104 -0
- package/dashboard.html +2689 -0
- package/hooks/README.md +196 -0
- package/hooks/__pycache__/auto-detect-response.cpython-312.pyc +0 -0
- package/hooks/__pycache__/auto_capture.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_start.cpython-312.pyc +0 -0
- package/hooks/auto-detect-response.py +348 -0
- package/hooks/auto_capture.py +255 -0
- package/hooks/detect-correction.py +173 -0
- package/hooks/grounding-hook.py +348 -0
- package/hooks/log-tool-use.py +234 -0
- package/hooks/log-user-request.py +208 -0
- package/hooks/pre-tool-decision.py +218 -0
- package/hooks/problem-detector.py +343 -0
- package/hooks/session_end.py +192 -0
- package/hooks/session_start.py +227 -0
- package/install.py +887 -0
- package/main.py +2859 -0
- package/manager.py +997 -0
- package/package.json +55 -0
- package/requirements.txt +8 -0
- package/run_server.py +136 -0
- package/services/__init__.py +50 -0
- package/services/__pycache__/__init__.cpython-312.pyc +0 -0
- package/services/__pycache__/agent_registry.cpython-312.pyc +0 -0
- package/services/__pycache__/auth.cpython-312.pyc +0 -0
- package/services/__pycache__/auto_inject.cpython-312.pyc +0 -0
- package/services/__pycache__/claude_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/services/__pycache__/compaction_flush.cpython-312.pyc +0 -0
- package/services/__pycache__/confidence.cpython-312.pyc +0 -0
- package/services/__pycache__/daily_log.cpython-312.pyc +0 -0
- package/services/__pycache__/database.cpython-312.pyc +0 -0
- package/services/__pycache__/embeddings.cpython-312.pyc +0 -0
- package/services/__pycache__/insights.cpython-312.pyc +0 -0
- package/services/__pycache__/llm_analyzer.cpython-312.pyc +0 -0
- package/services/__pycache__/memory_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/retry_queue.cpython-312.pyc +0 -0
- package/services/__pycache__/timeline.cpython-312.pyc +0 -0
- package/services/__pycache__/vector_index.cpython-312.pyc +0 -0
- package/services/__pycache__/websocket.cpython-312.pyc +0 -0
- package/services/agent_registry.py +753 -0
- package/services/auth.py +331 -0
- package/services/auto_inject.py +250 -0
- package/services/claude_md_sync.py +275 -0
- package/services/cleanup.py +667 -0
- package/services/compaction_flush.py +447 -0
- package/services/confidence.py +301 -0
- package/services/daily_log.py +333 -0
- package/services/database.py +2485 -0
- package/services/embeddings.py +358 -0
- package/services/insights.py +632 -0
- package/services/llm_analyzer.py +595 -0
- package/services/memory_md_sync.py +409 -0
- package/services/retry_queue.py +453 -0
- package/services/timeline.py +579 -0
- package/services/vector_index.py +398 -0
- package/services/websocket.py +257 -0
- package/skills/__init__.py +6 -0
- package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
- package/skills/__pycache__/admin.cpython-312.pyc +0 -0
- package/skills/__pycache__/checkpoint.cpython-312.pyc +0 -0
- package/skills/__pycache__/claude_md.cpython-312.pyc +0 -0
- package/skills/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/skills/__pycache__/grounding.cpython-312.pyc +0 -0
- package/skills/__pycache__/insights.cpython-312.pyc +0 -0
- package/skills/__pycache__/natural_language.cpython-312.pyc +0 -0
- package/skills/__pycache__/retrieve.cpython-312.pyc +0 -0
- package/skills/__pycache__/search.cpython-312.pyc +0 -0
- package/skills/__pycache__/state.cpython-312.pyc +0 -0
- package/skills/__pycache__/store.cpython-312.pyc +0 -0
- package/skills/__pycache__/summarize.cpython-312.pyc +0 -0
- package/skills/__pycache__/timeline.cpython-312.pyc +0 -0
- package/skills/__pycache__/verification.cpython-312.pyc +0 -0
- package/skills/admin.py +469 -0
- package/skills/checkpoint.py +198 -0
- package/skills/claude_md.py +363 -0
- package/skills/cleanup.py +241 -0
- package/skills/grounding.py +801 -0
- package/skills/insights.py +231 -0
- package/skills/natural_language.py +277 -0
- package/skills/retrieve.py +67 -0
- package/skills/search.py +213 -0
- package/skills/state.py +182 -0
- package/skills/store.py +179 -0
- package/skills/summarize.py +588 -0
- package/skills/timeline.py +387 -0
- package/skills/verification.py +391 -0
- package/start_daemon.py +155 -0
- package/test_automation.py +221 -0
- package/test_complete.py +338 -0
- package/test_full.py +322 -0
- package/update_system.py +817 -0
- package/verify_db.py +134 -0
|
@@ -0,0 +1,348 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Grounding Hook for Claude Code - Automatic Context Injection
|
|
4
|
+
|
|
5
|
+
This script is called by Claude Code's UserPromptSubmit hook.
|
|
6
|
+
It fetches the current session context and outputs it to stdout,
|
|
7
|
+
which Claude Code automatically injects into Claude's context.
|
|
8
|
+
|
|
9
|
+
This is the REAL anti-hallucination layer - automatic, not relying on Claude to call tools.
|
|
10
|
+
|
|
11
|
+
Moltbot-inspired additions:
|
|
12
|
+
- Checks flush conditions (events > 50 or time > 30min)
|
|
13
|
+
- Loads MEMORY.md content into grounding context
|
|
14
|
+
- Loads today's daily log highlights
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import os
|
|
18
|
+
import sys
|
|
19
|
+
import json
|
|
20
|
+
import logging
|
|
21
|
+
import requests
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import Any, Optional
|
|
24
|
+
|
|
25
|
+
# Configure logging to stderr (important for Claude Code hooks)
|
|
26
|
+
logging.basicConfig(
|
|
27
|
+
level=logging.INFO,
|
|
28
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
29
|
+
stream=sys.stderr
|
|
30
|
+
)
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
# Configuration from environment
|
|
34
|
+
MEMORY_AGENT_URL = os.getenv("MEMORY_AGENT_URL", "http://localhost:8102")
|
|
35
|
+
API_TIMEOUT = int(os.getenv("API_TIMEOUT", "30"))
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def safe_get(data: Any, *keys, default: Any = None) -> Any:
|
|
39
|
+
"""
|
|
40
|
+
Safely navigate nested data structures (dicts and lists).
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
data: The data structure to navigate
|
|
44
|
+
*keys: Keys (str for dict) or indices (int for list) to traverse
|
|
45
|
+
default: Value to return if path doesn't exist
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
The value at the path, or default if not found
|
|
49
|
+
|
|
50
|
+
Example:
|
|
51
|
+
safe_get(result, "result", "artifacts", 0, "parts", 0, "text")
|
|
52
|
+
"""
|
|
53
|
+
for key in keys:
|
|
54
|
+
if data is None:
|
|
55
|
+
return default
|
|
56
|
+
if isinstance(data, dict):
|
|
57
|
+
data = data.get(key, default)
|
|
58
|
+
elif isinstance(data, list) and isinstance(key, int):
|
|
59
|
+
if 0 <= key < len(data):
|
|
60
|
+
data = data[key]
|
|
61
|
+
else:
|
|
62
|
+
return default
|
|
63
|
+
else:
|
|
64
|
+
return default
|
|
65
|
+
return data
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def get_project_path():
|
|
69
|
+
"""Get current working directory as project path."""
|
|
70
|
+
return os.getcwd()
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def load_session_data():
|
|
74
|
+
"""Load session data from JSON file."""
|
|
75
|
+
session_file = Path(get_project_path()) / ".claude_session"
|
|
76
|
+
if session_file.exists():
|
|
77
|
+
try:
|
|
78
|
+
content = session_file.read_text().strip()
|
|
79
|
+
# Try JSON format first
|
|
80
|
+
return json.loads(content)
|
|
81
|
+
except json.JSONDecodeError as e:
|
|
82
|
+
logger.debug(f"JSON decode error, trying legacy format: {e}")
|
|
83
|
+
# Fall back to legacy plain text format (just session_id)
|
|
84
|
+
try:
|
|
85
|
+
content = session_file.read_text().strip()
|
|
86
|
+
return {"session_id": content}
|
|
87
|
+
except (IOError, OSError) as read_err:
|
|
88
|
+
logger.warning(f"Failed to read session file: {read_err}")
|
|
89
|
+
return None
|
|
90
|
+
except (IOError, OSError) as e:
|
|
91
|
+
logger.warning(f"Failed to read session file: {e}")
|
|
92
|
+
return None
|
|
93
|
+
return None
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def save_session_data(data: dict):
|
|
97
|
+
"""Save session data to JSON file."""
|
|
98
|
+
session_file = Path(get_project_path()) / ".claude_session"
|
|
99
|
+
try:
|
|
100
|
+
session_file.write_text(json.dumps(data, indent=2))
|
|
101
|
+
except (IOError, OSError) as e:
|
|
102
|
+
logger.warning(f"Failed to save session data: {e}")
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def get_session_id():
|
|
106
|
+
"""Get or create session ID from environment or file."""
|
|
107
|
+
# Try environment variable first
|
|
108
|
+
session_id = os.getenv("CLAUDE_SESSION_ID")
|
|
109
|
+
if session_id:
|
|
110
|
+
return session_id
|
|
111
|
+
|
|
112
|
+
# Try session file in project
|
|
113
|
+
data = load_session_data()
|
|
114
|
+
return data.get("session_id") if data else None
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def call_memory_agent(skill_id: str, params: dict) -> Optional[dict]:
|
|
118
|
+
"""Call the memory agent API."""
|
|
119
|
+
try:
|
|
120
|
+
response = requests.post(
|
|
121
|
+
f"{MEMORY_AGENT_URL}/a2a",
|
|
122
|
+
json={
|
|
123
|
+
"jsonrpc": "2.0",
|
|
124
|
+
"id": "grounding-hook",
|
|
125
|
+
"method": "tasks/send",
|
|
126
|
+
"params": {
|
|
127
|
+
"message": {"parts": [{"type": "text", "text": ""}]},
|
|
128
|
+
"metadata": {
|
|
129
|
+
"skill_id": skill_id,
|
|
130
|
+
"params": params
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
},
|
|
134
|
+
timeout=API_TIMEOUT
|
|
135
|
+
)
|
|
136
|
+
result = response.json()
|
|
137
|
+
|
|
138
|
+
# Safely extract the artifact text using safe_get
|
|
139
|
+
artifact_text = safe_get(result, "result", "artifacts", 0, "parts", 0, "text")
|
|
140
|
+
if artifact_text:
|
|
141
|
+
try:
|
|
142
|
+
return json.loads(artifact_text)
|
|
143
|
+
except json.JSONDecodeError as e:
|
|
144
|
+
logger.debug(f"Failed to parse artifact text as JSON for skill '{skill_id}': {e}")
|
|
145
|
+
return None
|
|
146
|
+
return None
|
|
147
|
+
|
|
148
|
+
except requests.RequestException as e:
|
|
149
|
+
# Silently fail - don't break Claude Code if memory agent is down
|
|
150
|
+
logger.debug(f"Memory agent request failed for skill '{skill_id}': {e}")
|
|
151
|
+
return None
|
|
152
|
+
except json.JSONDecodeError as e:
|
|
153
|
+
logger.debug(f"Failed to decode memory agent response for skill '{skill_id}': {e}")
|
|
154
|
+
return None
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def format_grounding_context(context: dict) -> str:
|
|
158
|
+
"""Format the grounding context for injection."""
|
|
159
|
+
if not context or not context.get("success"):
|
|
160
|
+
return ""
|
|
161
|
+
|
|
162
|
+
grounding = context.get("grounding", {})
|
|
163
|
+
|
|
164
|
+
lines = ["[GROUNDING CONTEXT - VERIFY BEFORE RESPONDING]"]
|
|
165
|
+
|
|
166
|
+
# Current goal
|
|
167
|
+
if grounding.get("current_goal"):
|
|
168
|
+
lines.append(f"CURRENT GOAL: {grounding['current_goal']}")
|
|
169
|
+
|
|
170
|
+
# Entity registry
|
|
171
|
+
registry = grounding.get("entity_registry", {})
|
|
172
|
+
if registry:
|
|
173
|
+
lines.append("ENTITY REGISTRY (use these exact references):")
|
|
174
|
+
for key, value in list(registry.items())[:5]:
|
|
175
|
+
lines.append(f" - {key}: {value}")
|
|
176
|
+
|
|
177
|
+
# Anchors (verified facts)
|
|
178
|
+
anchors = grounding.get("anchors", [])
|
|
179
|
+
if anchors:
|
|
180
|
+
lines.append("ANCHORS (verified facts - DO NOT CONTRADICT):")
|
|
181
|
+
for anchor in anchors[:5]:
|
|
182
|
+
lines.append(f" - {anchor}")
|
|
183
|
+
|
|
184
|
+
# Recent decisions
|
|
185
|
+
decisions = grounding.get("decisions", [])
|
|
186
|
+
if decisions:
|
|
187
|
+
lines.append("RECENT DECISIONS:")
|
|
188
|
+
for decision in decisions[:3]:
|
|
189
|
+
lines.append(f" - {decision}")
|
|
190
|
+
|
|
191
|
+
# Recent events
|
|
192
|
+
events = grounding.get("recent_events", [])
|
|
193
|
+
if events:
|
|
194
|
+
lines.append("RECENT EVENTS:")
|
|
195
|
+
for event in events[:5]:
|
|
196
|
+
lines.append(f" - [{event.get('type', '?')}] {event.get('summary', '')}")
|
|
197
|
+
|
|
198
|
+
# Contradictions warning
|
|
199
|
+
contradictions = grounding.get("contradictions", [])
|
|
200
|
+
if contradictions:
|
|
201
|
+
lines.append("WARNING - POTENTIAL CONTRADICTIONS DETECTED:")
|
|
202
|
+
for c in contradictions[:3]:
|
|
203
|
+
lines.append(f" - {c.get('content', '')[:100]}")
|
|
204
|
+
|
|
205
|
+
# Pending questions
|
|
206
|
+
pending = grounding.get("pending_questions", [])
|
|
207
|
+
if pending:
|
|
208
|
+
lines.append("PENDING QUESTIONS:")
|
|
209
|
+
for q in pending[:3]:
|
|
210
|
+
lines.append(f" - {q}")
|
|
211
|
+
|
|
212
|
+
lines.append("[/GROUNDING CONTEXT]")
|
|
213
|
+
lines.append("") # Empty line after
|
|
214
|
+
|
|
215
|
+
return "\n".join(lines)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def format_memory_md_context(memory_md: dict) -> str:
|
|
219
|
+
"""Format MEMORY.md content for injection."""
|
|
220
|
+
if not memory_md or not memory_md.get("exists"):
|
|
221
|
+
return ""
|
|
222
|
+
|
|
223
|
+
summary = memory_md.get("summary", "")
|
|
224
|
+
if not summary:
|
|
225
|
+
return ""
|
|
226
|
+
|
|
227
|
+
lines = ["[CORE FACTS from MEMORY.md]"]
|
|
228
|
+
lines.append(summary)
|
|
229
|
+
lines.append("[/CORE FACTS]")
|
|
230
|
+
lines.append("")
|
|
231
|
+
|
|
232
|
+
return "\n".join(lines)
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def format_daily_highlights(highlights: dict) -> str:
|
|
236
|
+
"""Format daily log highlights for injection."""
|
|
237
|
+
if not highlights or not highlights.get("highlights"):
|
|
238
|
+
return ""
|
|
239
|
+
|
|
240
|
+
entries = highlights.get("highlights", [])
|
|
241
|
+
if not entries:
|
|
242
|
+
return ""
|
|
243
|
+
|
|
244
|
+
lines = ["[TODAY'S HIGHLIGHTS from Daily Log]"]
|
|
245
|
+
for entry in entries[:5]:
|
|
246
|
+
lines.append(f" - {entry}")
|
|
247
|
+
lines.append("[/TODAY'S HIGHLIGHTS]")
|
|
248
|
+
lines.append("")
|
|
249
|
+
|
|
250
|
+
return "\n".join(lines)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def check_and_trigger_flush(session_id: str, project_path: str):
|
|
254
|
+
"""Check if flush is needed and trigger it."""
|
|
255
|
+
# Check flush conditions
|
|
256
|
+
flush_check = call_memory_agent("check_flush_needed", {
|
|
257
|
+
"session_id": session_id
|
|
258
|
+
})
|
|
259
|
+
|
|
260
|
+
if flush_check and flush_check.get("flush_needed"):
|
|
261
|
+
reasons = flush_check.get("reasons", [])
|
|
262
|
+
logger.info(f"Flush needed: {', '.join(reasons)}")
|
|
263
|
+
|
|
264
|
+
# Trigger flush
|
|
265
|
+
flush_result = call_memory_agent("pre_compaction_flush", {
|
|
266
|
+
"project_path": project_path,
|
|
267
|
+
"session_id": session_id
|
|
268
|
+
})
|
|
269
|
+
|
|
270
|
+
if flush_result and flush_result.get("success"):
|
|
271
|
+
logger.info(f"Flush completed: {flush_result.get('file_path')}")
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def main():
|
|
275
|
+
"""Main entry point for the hook."""
|
|
276
|
+
project_path = get_project_path()
|
|
277
|
+
session_id = get_session_id()
|
|
278
|
+
|
|
279
|
+
# If no session, try to init one
|
|
280
|
+
if not session_id:
|
|
281
|
+
init_result = call_memory_agent("state_init_session", {
|
|
282
|
+
"project_path": project_path
|
|
283
|
+
})
|
|
284
|
+
if init_result and init_result.get("session_id"):
|
|
285
|
+
session_id = init_result["session_id"]
|
|
286
|
+
# Save session data as JSON
|
|
287
|
+
save_session_data({"session_id": session_id})
|
|
288
|
+
|
|
289
|
+
if not session_id:
|
|
290
|
+
# No session, no grounding - exit silently
|
|
291
|
+
sys.exit(0)
|
|
292
|
+
|
|
293
|
+
# ============================================================
|
|
294
|
+
# MOLTBOT-INSPIRED: Check flush conditions
|
|
295
|
+
# ============================================================
|
|
296
|
+
check_and_trigger_flush(session_id, project_path)
|
|
297
|
+
|
|
298
|
+
# ============================================================
|
|
299
|
+
# MOLTBOT-INSPIRED: Load MEMORY.md summary
|
|
300
|
+
# ============================================================
|
|
301
|
+
memory_md = call_memory_agent("get_memory_md_summary", {
|
|
302
|
+
"project_path": project_path
|
|
303
|
+
})
|
|
304
|
+
|
|
305
|
+
memory_md_context = format_memory_md_context(memory_md) if memory_md else ""
|
|
306
|
+
|
|
307
|
+
# ============================================================
|
|
308
|
+
# MOLTBOT-INSPIRED: Load today's daily log highlights
|
|
309
|
+
# ============================================================
|
|
310
|
+
daily_highlights = call_memory_agent("daily_log_highlights", {
|
|
311
|
+
"project_path": project_path
|
|
312
|
+
})
|
|
313
|
+
|
|
314
|
+
daily_context = format_daily_highlights(daily_highlights) if daily_highlights else ""
|
|
315
|
+
|
|
316
|
+
# ============================================================
|
|
317
|
+
# ORIGINAL: Get grounding context
|
|
318
|
+
# ============================================================
|
|
319
|
+
context = call_memory_agent("context_refresh", {
|
|
320
|
+
"session_id": session_id,
|
|
321
|
+
"include_recent_events": 5,
|
|
322
|
+
"include_state": True,
|
|
323
|
+
"include_checkpoint": True,
|
|
324
|
+
"check_contradictions": True
|
|
325
|
+
})
|
|
326
|
+
|
|
327
|
+
grounding_context = format_grounding_context(context) if context else ""
|
|
328
|
+
|
|
329
|
+
# Combine all context
|
|
330
|
+
output_parts = []
|
|
331
|
+
|
|
332
|
+
if memory_md_context:
|
|
333
|
+
output_parts.append(memory_md_context)
|
|
334
|
+
|
|
335
|
+
if daily_context:
|
|
336
|
+
output_parts.append(daily_context)
|
|
337
|
+
|
|
338
|
+
if grounding_context:
|
|
339
|
+
output_parts.append(grounding_context)
|
|
340
|
+
|
|
341
|
+
if output_parts:
|
|
342
|
+
print("\n".join(output_parts))
|
|
343
|
+
|
|
344
|
+
sys.exit(0)
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
if __name__ == "__main__":
|
|
348
|
+
main()
|
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Tool Use Logger Hook for Claude Code
|
|
4
|
+
|
|
5
|
+
This script logs tool calls to the session timeline.
|
|
6
|
+
Called via PostToolUse hook - logs the action after it completes.
|
|
7
|
+
|
|
8
|
+
Reads current_request_id from .claude_session to link actions to the root user request.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
import sys
|
|
13
|
+
import json
|
|
14
|
+
import re
|
|
15
|
+
import logging
|
|
16
|
+
import requests
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Optional
|
|
19
|
+
|
|
20
|
+
# Configure logging to stderr (important for Claude Code hooks)
|
|
21
|
+
logging.basicConfig(
|
|
22
|
+
level=logging.INFO,
|
|
23
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
24
|
+
stream=sys.stderr
|
|
25
|
+
)
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
# Configuration from environment
|
|
29
|
+
MEMORY_AGENT_URL = os.getenv("MEMORY_AGENT_URL", "http://localhost:8102")
|
|
30
|
+
API_TIMEOUT = int(os.getenv("API_TIMEOUT", "30"))
|
|
31
|
+
|
|
32
|
+
# Tools that represent meaningful actions to track
|
|
33
|
+
TRACKABLE_TOOLS = {
|
|
34
|
+
"Edit": "edited file",
|
|
35
|
+
"Write": "wrote file",
|
|
36
|
+
"Bash": "ran command",
|
|
37
|
+
"Read": "read file",
|
|
38
|
+
"Grep": "searched code",
|
|
39
|
+
"Glob": "searched files"
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def load_session_data():
|
|
44
|
+
"""Load session data from JSON file."""
|
|
45
|
+
session_file = Path(os.getcwd()) / ".claude_session"
|
|
46
|
+
if session_file.exists():
|
|
47
|
+
try:
|
|
48
|
+
content = session_file.read_text().strip()
|
|
49
|
+
# Try JSON format first
|
|
50
|
+
return json.loads(content)
|
|
51
|
+
except json.JSONDecodeError as e:
|
|
52
|
+
logger.debug(f"JSON decode error, trying legacy format: {e}")
|
|
53
|
+
# Fall back to legacy plain text format (just session_id)
|
|
54
|
+
try:
|
|
55
|
+
content = session_file.read_text().strip()
|
|
56
|
+
return {"session_id": content}
|
|
57
|
+
except (IOError, OSError) as read_err:
|
|
58
|
+
logger.warning(f"Failed to read session file: {read_err}")
|
|
59
|
+
return None
|
|
60
|
+
except (IOError, OSError) as e:
|
|
61
|
+
logger.warning(f"Failed to read session file: {e}")
|
|
62
|
+
return None
|
|
63
|
+
return None
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def save_session_data(data: dict):
|
|
67
|
+
"""Save session data to JSON file."""
|
|
68
|
+
session_file = Path(os.getcwd()) / ".claude_session"
|
|
69
|
+
try:
|
|
70
|
+
session_file.write_text(json.dumps(data, indent=2))
|
|
71
|
+
except (IOError, OSError) as e:
|
|
72
|
+
logger.warning(f"Failed to save session data: {e}")
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def get_session_id():
|
|
76
|
+
"""Get session ID from file."""
|
|
77
|
+
data = load_session_data()
|
|
78
|
+
return data.get("session_id") if data else None
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def call_memory_agent(skill_id: str, params: dict) -> Optional[dict]:
|
|
82
|
+
"""Call the memory agent API."""
|
|
83
|
+
try:
|
|
84
|
+
response = requests.post(
|
|
85
|
+
f"{MEMORY_AGENT_URL}/a2a",
|
|
86
|
+
json={
|
|
87
|
+
"jsonrpc": "2.0",
|
|
88
|
+
"id": "tool-hook",
|
|
89
|
+
"method": "tasks/send",
|
|
90
|
+
"params": {
|
|
91
|
+
"message": {"parts": [{"type": "text", "text": ""}]},
|
|
92
|
+
"metadata": {
|
|
93
|
+
"skill_id": skill_id,
|
|
94
|
+
"params": params
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
},
|
|
98
|
+
timeout=API_TIMEOUT
|
|
99
|
+
)
|
|
100
|
+
return response.json()
|
|
101
|
+
except requests.RequestException as e:
|
|
102
|
+
logger.debug(f"Memory agent request failed for skill '{skill_id}': {e}")
|
|
103
|
+
return None
|
|
104
|
+
except json.JSONDecodeError as e:
|
|
105
|
+
logger.debug(f"Failed to decode memory agent response for skill '{skill_id}': {e}")
|
|
106
|
+
return None
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def extract_entities(tool_name: str, tool_input: dict) -> Optional[dict]:
|
|
110
|
+
"""Extract entity references from tool input."""
|
|
111
|
+
entities = {}
|
|
112
|
+
|
|
113
|
+
if tool_name in ["Edit", "Write", "Read"]:
|
|
114
|
+
file_path = tool_input.get("file_path") or tool_input.get("path")
|
|
115
|
+
if file_path:
|
|
116
|
+
entities["files"] = [file_path]
|
|
117
|
+
|
|
118
|
+
if tool_name == "Bash":
|
|
119
|
+
command = tool_input.get("command", "")
|
|
120
|
+
# Extract file paths from command (simple heuristic)
|
|
121
|
+
paths = re.findall(r'[\w\-./\\]+\.(py|js|ts|json|md|yaml|yml)', command)
|
|
122
|
+
if paths:
|
|
123
|
+
entities["files"] = paths
|
|
124
|
+
|
|
125
|
+
if tool_name == "Grep":
|
|
126
|
+
pattern = tool_input.get("pattern")
|
|
127
|
+
if pattern:
|
|
128
|
+
entities["patterns"] = [pattern]
|
|
129
|
+
|
|
130
|
+
return entities if entities else None
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def main():
|
|
134
|
+
"""Log the tool use to timeline."""
|
|
135
|
+
# Read hook input from stdin
|
|
136
|
+
try:
|
|
137
|
+
hook_input = json.load(sys.stdin)
|
|
138
|
+
except json.JSONDecodeError as e:
|
|
139
|
+
logger.debug(f"Failed to parse hook input JSON: {e}")
|
|
140
|
+
sys.exit(0)
|
|
141
|
+
except (IOError, OSError) as e:
|
|
142
|
+
logger.debug(f"Failed to read stdin: {e}")
|
|
143
|
+
sys.exit(0)
|
|
144
|
+
|
|
145
|
+
tool_name = hook_input.get("tool_name") or hook_input.get("tool")
|
|
146
|
+
if not tool_name:
|
|
147
|
+
sys.exit(0)
|
|
148
|
+
|
|
149
|
+
# Only track meaningful tools
|
|
150
|
+
if tool_name not in TRACKABLE_TOOLS:
|
|
151
|
+
sys.exit(0)
|
|
152
|
+
|
|
153
|
+
# Load session data (includes current_request_id for causal chain)
|
|
154
|
+
session_data = load_session_data()
|
|
155
|
+
if not session_data:
|
|
156
|
+
sys.exit(0)
|
|
157
|
+
|
|
158
|
+
session_id = session_data.get("session_id")
|
|
159
|
+
if not session_id:
|
|
160
|
+
sys.exit(0)
|
|
161
|
+
|
|
162
|
+
# Get the current request ID for causal chain linking
|
|
163
|
+
root_event_id = session_data.get("current_request_id")
|
|
164
|
+
|
|
165
|
+
# Get decision event ID (from PreToolUse hook) for proper chain linking
|
|
166
|
+
# Chain: user_request → decision → action
|
|
167
|
+
decision_event_id = session_data.get("current_decision_id")
|
|
168
|
+
pending_tool = session_data.get("pending_tool")
|
|
169
|
+
|
|
170
|
+
tool_input = hook_input.get("tool_input") or hook_input.get("input") or {}
|
|
171
|
+
tool_output = hook_input.get("tool_output") or hook_input.get("output") or ""
|
|
172
|
+
|
|
173
|
+
# Build summary
|
|
174
|
+
action_verb = TRACKABLE_TOOLS.get(tool_name, "used tool")
|
|
175
|
+
|
|
176
|
+
if tool_name in ["Edit", "Write", "Read"]:
|
|
177
|
+
file_path = tool_input.get("file_path") or tool_input.get("path") or "unknown"
|
|
178
|
+
# Get just filename
|
|
179
|
+
filename = Path(file_path).name if file_path else "unknown"
|
|
180
|
+
summary = f"{action_verb}: {filename}"
|
|
181
|
+
elif tool_name == "Bash":
|
|
182
|
+
command = tool_input.get("command", "")[:50]
|
|
183
|
+
summary = f"{action_verb}: {command}"
|
|
184
|
+
elif tool_name == "Grep":
|
|
185
|
+
pattern = tool_input.get("pattern", "")[:30]
|
|
186
|
+
summary = f"{action_verb} for: {pattern}"
|
|
187
|
+
elif tool_name == "Glob":
|
|
188
|
+
pattern = tool_input.get("pattern", "")[:30]
|
|
189
|
+
summary = f"{action_verb}: {pattern}"
|
|
190
|
+
else:
|
|
191
|
+
summary = f"{action_verb}"
|
|
192
|
+
|
|
193
|
+
# Check if successful
|
|
194
|
+
success = True
|
|
195
|
+
if isinstance(tool_output, str):
|
|
196
|
+
if "error" in tool_output.lower() or "failed" in tool_output.lower():
|
|
197
|
+
success = False
|
|
198
|
+
|
|
199
|
+
# Extract entities
|
|
200
|
+
entities = extract_entities(tool_name, tool_input)
|
|
201
|
+
|
|
202
|
+
# Log to timeline with causal chain linking
|
|
203
|
+
log_params = {
|
|
204
|
+
"session_id": session_id,
|
|
205
|
+
"event_type": "action",
|
|
206
|
+
"summary": summary[:200],
|
|
207
|
+
"details": json.dumps({"tool": tool_name, "input": tool_input})[:500] if tool_input else None,
|
|
208
|
+
"entities": entities,
|
|
209
|
+
"outcome": "success" if success else "failed",
|
|
210
|
+
"project_path": os.getcwd()
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
# Add causal chain links
|
|
214
|
+
# Chain: user_request → decision → action
|
|
215
|
+
if root_event_id:
|
|
216
|
+
log_params["root_event_id"] = root_event_id
|
|
217
|
+
|
|
218
|
+
# Link to decision event if this is the tool that was pre-logged
|
|
219
|
+
if decision_event_id and pending_tool == tool_name:
|
|
220
|
+
log_params["parent_event_id"] = decision_event_id
|
|
221
|
+
# Clear the pending decision after linking
|
|
222
|
+
session_data.pop("current_decision_id", None)
|
|
223
|
+
session_data.pop("pending_tool", None)
|
|
224
|
+
save_session_data(session_data)
|
|
225
|
+
elif root_event_id:
|
|
226
|
+
log_params["parent_event_id"] = root_event_id
|
|
227
|
+
|
|
228
|
+
call_memory_agent("timeline_log", log_params)
|
|
229
|
+
|
|
230
|
+
sys.exit(0)
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
if __name__ == "__main__":
|
|
234
|
+
main()
|