omni-cortex 1.17.1__py3-none-any.whl → 1.17.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omni_cortex/__init__.py +3 -0
- omni_cortex/_bundled/dashboard/backend/.env.example +12 -0
- omni_cortex/_bundled/dashboard/backend/backfill_summaries.py +280 -0
- omni_cortex/_bundled/dashboard/backend/chat_service.py +631 -0
- omni_cortex/_bundled/dashboard/backend/database.py +1773 -0
- omni_cortex/_bundled/dashboard/backend/image_service.py +552 -0
- omni_cortex/_bundled/dashboard/backend/logging_config.py +122 -0
- omni_cortex/_bundled/dashboard/backend/main.py +1888 -0
- omni_cortex/_bundled/dashboard/backend/models.py +472 -0
- omni_cortex/_bundled/dashboard/backend/project_config.py +170 -0
- omni_cortex/_bundled/dashboard/backend/project_scanner.py +164 -0
- omni_cortex/_bundled/dashboard/backend/prompt_security.py +111 -0
- omni_cortex/_bundled/dashboard/backend/pyproject.toml +23 -0
- omni_cortex/_bundled/dashboard/backend/security.py +104 -0
- omni_cortex/_bundled/dashboard/backend/uv.lock +1110 -0
- omni_cortex/_bundled/dashboard/backend/websocket_manager.py +104 -0
- omni_cortex/_bundled/hooks/post_tool_use.py +497 -0
- omni_cortex/_bundled/hooks/pre_tool_use.py +277 -0
- omni_cortex/_bundled/hooks/session_utils.py +186 -0
- omni_cortex/_bundled/hooks/stop.py +219 -0
- omni_cortex/_bundled/hooks/subagent_stop.py +120 -0
- omni_cortex/_bundled/hooks/user_prompt.py +220 -0
- omni_cortex/categorization/__init__.py +9 -0
- omni_cortex/categorization/auto_tags.py +166 -0
- omni_cortex/categorization/auto_type.py +165 -0
- omni_cortex/config.py +141 -0
- omni_cortex/dashboard.py +238 -0
- omni_cortex/database/__init__.py +24 -0
- omni_cortex/database/connection.py +137 -0
- omni_cortex/database/migrations.py +210 -0
- omni_cortex/database/schema.py +212 -0
- omni_cortex/database/sync.py +421 -0
- omni_cortex/decay/__init__.py +7 -0
- omni_cortex/decay/importance.py +147 -0
- omni_cortex/embeddings/__init__.py +35 -0
- omni_cortex/embeddings/local.py +442 -0
- omni_cortex/models/__init__.py +20 -0
- omni_cortex/models/activity.py +265 -0
- omni_cortex/models/agent.py +144 -0
- omni_cortex/models/memory.py +395 -0
- omni_cortex/models/relationship.py +206 -0
- omni_cortex/models/session.py +290 -0
- omni_cortex/resources/__init__.py +1 -0
- omni_cortex/search/__init__.py +22 -0
- omni_cortex/search/hybrid.py +197 -0
- omni_cortex/search/keyword.py +204 -0
- omni_cortex/search/ranking.py +127 -0
- omni_cortex/search/semantic.py +232 -0
- omni_cortex/server.py +360 -0
- omni_cortex/setup.py +284 -0
- omni_cortex/tools/__init__.py +13 -0
- omni_cortex/tools/activities.py +453 -0
- omni_cortex/tools/memories.py +536 -0
- omni_cortex/tools/sessions.py +311 -0
- omni_cortex/tools/utilities.py +477 -0
- omni_cortex/utils/__init__.py +13 -0
- omni_cortex/utils/formatting.py +282 -0
- omni_cortex/utils/ids.py +72 -0
- omni_cortex/utils/timestamps.py +129 -0
- omni_cortex/utils/truncation.py +111 -0
- {omni_cortex-1.17.1.dist-info → omni_cortex-1.17.3.dist-info}/METADATA +1 -1
- omni_cortex-1.17.3.dist-info/RECORD +86 -0
- omni_cortex-1.17.1.dist-info/RECORD +0 -26
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/.env.example +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/backfill_summaries.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/chat_service.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/database.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/image_service.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/logging_config.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/main.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/models.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/project_config.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/prompt_security.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/security.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/uv.lock +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/hooks/post_tool_use.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/hooks/pre_tool_use.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/hooks/session_utils.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/hooks/stop.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
- {omni_cortex-1.17.1.data → omni_cortex-1.17.3.data}/data/share/omni-cortex/hooks/user_prompt.py +0 -0
- {omni_cortex-1.17.1.dist-info → omni_cortex-1.17.3.dist-info}/WHEEL +0 -0
- {omni_cortex-1.17.1.dist-info → omni_cortex-1.17.3.dist-info}/entry_points.txt +0 -0
- {omni_cortex-1.17.1.dist-info → omni_cortex-1.17.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""WebSocket manager for real-time updates."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from typing import Any
|
|
7
|
+
from uuid import uuid4
|
|
8
|
+
|
|
9
|
+
from fastapi import WebSocket
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class WebSocketManager:
|
|
13
|
+
"""Manages WebSocket connections and broadcasts."""
|
|
14
|
+
|
|
15
|
+
def __init__(self):
|
|
16
|
+
self.connections: dict[str, WebSocket] = {}
|
|
17
|
+
self._lock = asyncio.Lock()
|
|
18
|
+
|
|
19
|
+
async def connect(self, websocket: WebSocket, client_id: str | None = None) -> str:
|
|
20
|
+
"""Accept a new WebSocket connection."""
|
|
21
|
+
await websocket.accept()
|
|
22
|
+
client_id = client_id or str(uuid4())
|
|
23
|
+
async with self._lock:
|
|
24
|
+
self.connections[client_id] = websocket
|
|
25
|
+
print(f"[WS] Client connected: {client_id} (total: {len(self.connections)})")
|
|
26
|
+
return client_id
|
|
27
|
+
|
|
28
|
+
async def disconnect(self, client_id: str):
|
|
29
|
+
"""Remove a WebSocket connection."""
|
|
30
|
+
async with self._lock:
|
|
31
|
+
if client_id in self.connections:
|
|
32
|
+
del self.connections[client_id]
|
|
33
|
+
print(f"[WS] Client disconnected: {client_id} (total: {len(self.connections)})")
|
|
34
|
+
|
|
35
|
+
async def broadcast(self, event_type: str, data: dict[str, Any]):
|
|
36
|
+
"""Broadcast a message to all connected clients."""
|
|
37
|
+
if not self.connections:
|
|
38
|
+
return
|
|
39
|
+
|
|
40
|
+
message = json.dumps({
|
|
41
|
+
"event_type": event_type,
|
|
42
|
+
"data": data,
|
|
43
|
+
"timestamp": datetime.now().isoformat(),
|
|
44
|
+
}, default=str)
|
|
45
|
+
|
|
46
|
+
disconnected = []
|
|
47
|
+
async with self._lock:
|
|
48
|
+
for client_id, websocket in self.connections.items():
|
|
49
|
+
try:
|
|
50
|
+
await websocket.send_text(message)
|
|
51
|
+
except Exception as e:
|
|
52
|
+
print(f"[WS] Failed to send to {client_id}: {e}")
|
|
53
|
+
disconnected.append(client_id)
|
|
54
|
+
|
|
55
|
+
# Clean up disconnected clients
|
|
56
|
+
for client_id in disconnected:
|
|
57
|
+
del self.connections[client_id]
|
|
58
|
+
|
|
59
|
+
async def send_to_client(self, client_id: str, event_type: str, data: dict[str, Any]):
|
|
60
|
+
"""Send a message to a specific client."""
|
|
61
|
+
message = json.dumps({
|
|
62
|
+
"event_type": event_type,
|
|
63
|
+
"data": data,
|
|
64
|
+
"timestamp": datetime.now().isoformat(),
|
|
65
|
+
}, default=str)
|
|
66
|
+
|
|
67
|
+
async with self._lock:
|
|
68
|
+
if client_id in self.connections:
|
|
69
|
+
try:
|
|
70
|
+
await self.connections[client_id].send_text(message)
|
|
71
|
+
except Exception as e:
|
|
72
|
+
print(f"[WS] Failed to send to {client_id}: {e}")
|
|
73
|
+
del self.connections[client_id]
|
|
74
|
+
|
|
75
|
+
@property
|
|
76
|
+
def connection_count(self) -> int:
|
|
77
|
+
"""Get the number of active connections."""
|
|
78
|
+
return len(self.connections)
|
|
79
|
+
|
|
80
|
+
# Typed broadcast methods (IndyDevDan pattern)
|
|
81
|
+
async def broadcast_activity_logged(self, project: str, activity: dict[str, Any]):
|
|
82
|
+
"""Broadcast when a new activity is logged."""
|
|
83
|
+
await self.broadcast("activity_logged", {
|
|
84
|
+
"project": project,
|
|
85
|
+
"activity": activity,
|
|
86
|
+
})
|
|
87
|
+
|
|
88
|
+
async def broadcast_session_updated(self, project: str, session: dict[str, Any]):
|
|
89
|
+
"""Broadcast when a session is updated."""
|
|
90
|
+
await self.broadcast("session_updated", {
|
|
91
|
+
"project": project,
|
|
92
|
+
"session": session,
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
async def broadcast_stats_updated(self, project: str, stats: dict[str, Any]):
|
|
96
|
+
"""Broadcast when stats change (for charts/panels)."""
|
|
97
|
+
await self.broadcast("stats_updated", {
|
|
98
|
+
"project": project,
|
|
99
|
+
"stats": stats,
|
|
100
|
+
})
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# Global manager instance
|
|
104
|
+
manager = WebSocketManager()
|
|
@@ -0,0 +1,497 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""PostToolUse hook - logs tool result after execution.
|
|
3
|
+
|
|
4
|
+
This hook is called by Claude Code after each tool completes.
|
|
5
|
+
It logs the tool output, duration, and success/error status.
|
|
6
|
+
|
|
7
|
+
Hook configuration for settings.json:
|
|
8
|
+
{
|
|
9
|
+
"hooks": {
|
|
10
|
+
"PostToolUse": [
|
|
11
|
+
{
|
|
12
|
+
"type": "command",
|
|
13
|
+
"command": "python hooks/post_tool_use.py"
|
|
14
|
+
}
|
|
15
|
+
]
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import json
|
|
21
|
+
import re
|
|
22
|
+
import sys
|
|
23
|
+
import os
|
|
24
|
+
import sqlite3
|
|
25
|
+
from datetime import datetime, timezone
|
|
26
|
+
from pathlib import Path
|
|
27
|
+
from typing import Optional, Tuple
|
|
28
|
+
|
|
29
|
+
# Import shared session management
|
|
30
|
+
from session_utils import get_or_create_session
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# === Tool Timing Management ===
|
|
34
|
+
# Read tool start timestamps and calculate duration
|
|
35
|
+
|
|
36
|
+
def get_timing_file_path() -> Path:
|
|
37
|
+
"""Get the path to the tool timing file."""
|
|
38
|
+
project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
|
39
|
+
return Path(project_path) / ".omni-cortex" / "tool_timing.json"
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def load_timing_data() -> dict:
|
|
43
|
+
"""Load current timing data from file."""
|
|
44
|
+
timing_file = get_timing_file_path()
|
|
45
|
+
if not timing_file.exists():
|
|
46
|
+
return {}
|
|
47
|
+
try:
|
|
48
|
+
with open(timing_file, "r") as f:
|
|
49
|
+
return json.load(f)
|
|
50
|
+
except (json.JSONDecodeError, IOError):
|
|
51
|
+
return {}
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def save_timing_data(data: dict) -> None:
|
|
55
|
+
"""Save timing data to file."""
|
|
56
|
+
timing_file = get_timing_file_path()
|
|
57
|
+
timing_file.parent.mkdir(parents=True, exist_ok=True)
|
|
58
|
+
with open(timing_file, "w") as f:
|
|
59
|
+
json.dump(data, f)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def get_tool_duration(tool_name: str, agent_id: str = None) -> Tuple[Optional[int], Optional[str]]:
|
|
63
|
+
"""Get the duration for a tool execution and clean up.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
tool_name: Name of the tool that finished
|
|
67
|
+
agent_id: Optional agent ID
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
Tuple of (duration_ms, activity_id) or (None, None) if not found
|
|
71
|
+
"""
|
|
72
|
+
timing_data = load_timing_data()
|
|
73
|
+
key = f"{tool_name}_{agent_id or 'main'}"
|
|
74
|
+
|
|
75
|
+
if key not in timing_data:
|
|
76
|
+
return None, None
|
|
77
|
+
|
|
78
|
+
entry = timing_data[key]
|
|
79
|
+
start_time_ms = entry.get("start_time_ms")
|
|
80
|
+
activity_id = entry.get("activity_id")
|
|
81
|
+
|
|
82
|
+
if not start_time_ms:
|
|
83
|
+
return None, activity_id
|
|
84
|
+
|
|
85
|
+
# Calculate duration
|
|
86
|
+
end_time_ms = int(datetime.now(timezone.utc).timestamp() * 1000)
|
|
87
|
+
duration_ms = end_time_ms - start_time_ms
|
|
88
|
+
|
|
89
|
+
# Remove the entry (tool call complete)
|
|
90
|
+
del timing_data[key]
|
|
91
|
+
save_timing_data(timing_data)
|
|
92
|
+
|
|
93
|
+
return duration_ms, activity_id
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
# Patterns for sensitive field names that should be redacted
|
|
97
|
+
SENSITIVE_FIELD_PATTERNS = [
|
|
98
|
+
r'(?i)(api[_-]?key|apikey)',
|
|
99
|
+
r'(?i)(password|passwd|pwd)',
|
|
100
|
+
r'(?i)(secret|token|credential)',
|
|
101
|
+
r'(?i)(auth[_-]?token|access[_-]?token)',
|
|
102
|
+
r'(?i)(private[_-]?key|ssh[_-]?key)',
|
|
103
|
+
]
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def redact_sensitive_fields(data: dict) -> dict:
|
|
107
|
+
"""Redact sensitive fields from a dictionary for safe logging.
|
|
108
|
+
|
|
109
|
+
Recursively processes nested dicts and lists.
|
|
110
|
+
"""
|
|
111
|
+
if not isinstance(data, dict):
|
|
112
|
+
return data
|
|
113
|
+
|
|
114
|
+
result = {}
|
|
115
|
+
for key, value in data.items():
|
|
116
|
+
# Check if key matches sensitive patterns
|
|
117
|
+
is_sensitive = any(
|
|
118
|
+
re.search(pattern, str(key))
|
|
119
|
+
for pattern in SENSITIVE_FIELD_PATTERNS
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
if is_sensitive:
|
|
123
|
+
result[key] = '[REDACTED]'
|
|
124
|
+
elif isinstance(value, dict):
|
|
125
|
+
result[key] = redact_sensitive_fields(value)
|
|
126
|
+
elif isinstance(value, list):
|
|
127
|
+
result[key] = [
|
|
128
|
+
redact_sensitive_fields(item) if isinstance(item, dict) else item
|
|
129
|
+
for item in value
|
|
130
|
+
]
|
|
131
|
+
else:
|
|
132
|
+
result[key] = value
|
|
133
|
+
|
|
134
|
+
return result
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def get_db_path() -> Path:
|
|
138
|
+
"""Get the database path for the current project."""
|
|
139
|
+
project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
|
140
|
+
return Path(project_path) / ".omni-cortex" / "cortex.db"
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def ensure_database(db_path: Path) -> sqlite3.Connection:
|
|
144
|
+
"""Ensure database exists and is initialized.
|
|
145
|
+
|
|
146
|
+
Auto-creates the database and schema if it doesn't exist.
|
|
147
|
+
This enables 'out of the box' functionality.
|
|
148
|
+
"""
|
|
149
|
+
db_path.parent.mkdir(parents=True, exist_ok=True)
|
|
150
|
+
conn = sqlite3.connect(str(db_path))
|
|
151
|
+
|
|
152
|
+
# Check if schema exists
|
|
153
|
+
cursor = conn.cursor()
|
|
154
|
+
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='activities'")
|
|
155
|
+
if cursor.fetchone() is None:
|
|
156
|
+
# Apply minimal schema for activities (full schema applied by MCP)
|
|
157
|
+
conn.executescript("""
|
|
158
|
+
CREATE TABLE IF NOT EXISTS activities (
|
|
159
|
+
id TEXT PRIMARY KEY,
|
|
160
|
+
session_id TEXT,
|
|
161
|
+
agent_id TEXT,
|
|
162
|
+
timestamp TEXT NOT NULL,
|
|
163
|
+
event_type TEXT NOT NULL,
|
|
164
|
+
tool_name TEXT,
|
|
165
|
+
tool_input TEXT,
|
|
166
|
+
tool_output TEXT,
|
|
167
|
+
duration_ms INTEGER,
|
|
168
|
+
success INTEGER DEFAULT 1,
|
|
169
|
+
error_message TEXT,
|
|
170
|
+
project_path TEXT,
|
|
171
|
+
file_path TEXT,
|
|
172
|
+
metadata TEXT
|
|
173
|
+
);
|
|
174
|
+
CREATE INDEX IF NOT EXISTS idx_activities_timestamp ON activities(timestamp DESC);
|
|
175
|
+
CREATE INDEX IF NOT EXISTS idx_activities_tool ON activities(tool_name);
|
|
176
|
+
""")
|
|
177
|
+
conn.commit()
|
|
178
|
+
|
|
179
|
+
return conn
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def generate_id() -> str:
|
|
183
|
+
"""Generate a unique activity ID."""
|
|
184
|
+
timestamp_ms = int(datetime.now().timestamp() * 1000)
|
|
185
|
+
random_hex = os.urandom(4).hex()
|
|
186
|
+
return f"act_{timestamp_ms}_{random_hex}"
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def truncate(text: str, max_length: int = 10000) -> str:
|
|
190
|
+
"""Truncate text to max length."""
|
|
191
|
+
if len(text) <= max_length:
|
|
192
|
+
return text
|
|
193
|
+
return text[:max_length - 20] + "\n... [truncated]"
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def extract_skill_info(tool_input: dict, project_path: str) -> tuple:
|
|
197
|
+
"""Extract skill name and scope from Skill tool input.
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
Tuple of (skill_name, command_scope)
|
|
201
|
+
"""
|
|
202
|
+
try:
|
|
203
|
+
skill_name = tool_input.get("skill", "")
|
|
204
|
+
if not skill_name:
|
|
205
|
+
return None, None
|
|
206
|
+
|
|
207
|
+
# Determine scope by checking file locations
|
|
208
|
+
project_cmd = Path(project_path) / ".claude" / "commands" / f"{skill_name}.md"
|
|
209
|
+
if project_cmd.exists():
|
|
210
|
+
return skill_name, "project"
|
|
211
|
+
|
|
212
|
+
universal_cmd = Path.home() / ".claude" / "commands" / f"{skill_name}.md"
|
|
213
|
+
if universal_cmd.exists():
|
|
214
|
+
return skill_name, "universal"
|
|
215
|
+
|
|
216
|
+
return skill_name, "unknown"
|
|
217
|
+
except Exception:
|
|
218
|
+
return None, None
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def extract_mcp_server(tool_name: str) -> str:
|
|
222
|
+
"""Extract MCP server name from tool name pattern mcp__servername__toolname."""
|
|
223
|
+
if not tool_name or not tool_name.startswith("mcp__"):
|
|
224
|
+
return None
|
|
225
|
+
|
|
226
|
+
parts = tool_name.split("__")
|
|
227
|
+
if len(parts) >= 3:
|
|
228
|
+
return parts[1]
|
|
229
|
+
return None
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def ensure_analytics_columns(conn: sqlite3.Connection) -> None:
|
|
233
|
+
"""Ensure command analytics columns exist in activities table."""
|
|
234
|
+
cursor = conn.cursor()
|
|
235
|
+
columns = cursor.execute("PRAGMA table_info(activities)").fetchall()
|
|
236
|
+
column_names = [col[1] for col in columns]
|
|
237
|
+
|
|
238
|
+
new_columns = [
|
|
239
|
+
("command_name", "TEXT"),
|
|
240
|
+
("command_scope", "TEXT"),
|
|
241
|
+
("mcp_server", "TEXT"),
|
|
242
|
+
("skill_name", "TEXT"),
|
|
243
|
+
("summary", "TEXT"),
|
|
244
|
+
("summary_detail", "TEXT"),
|
|
245
|
+
]
|
|
246
|
+
|
|
247
|
+
for col_name, col_type in new_columns:
|
|
248
|
+
if col_name not in column_names:
|
|
249
|
+
cursor.execute(f"ALTER TABLE activities ADD COLUMN {col_name} {col_type}")
|
|
250
|
+
|
|
251
|
+
conn.commit()
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def generate_summary(tool_name: str, tool_input: dict, success: bool) -> tuple:
|
|
255
|
+
"""Generate short and detailed summaries for an activity.
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
Tuple of (summary, summary_detail)
|
|
259
|
+
"""
|
|
260
|
+
if not tool_name:
|
|
261
|
+
return None, None
|
|
262
|
+
|
|
263
|
+
input_data = tool_input if isinstance(tool_input, dict) else {}
|
|
264
|
+
short = ""
|
|
265
|
+
detail = ""
|
|
266
|
+
|
|
267
|
+
if tool_name == "Read":
|
|
268
|
+
path = input_data.get("file_path", "unknown")
|
|
269
|
+
filename = Path(path).name if path else "file"
|
|
270
|
+
short = f"Read file: {filename}"
|
|
271
|
+
detail = f"Reading contents of {path}"
|
|
272
|
+
|
|
273
|
+
elif tool_name == "Write":
|
|
274
|
+
path = input_data.get("file_path", "unknown")
|
|
275
|
+
filename = Path(path).name if path else "file"
|
|
276
|
+
short = f"Write file: {filename}"
|
|
277
|
+
detail = f"Writing/creating file at {path}"
|
|
278
|
+
|
|
279
|
+
elif tool_name == "Edit":
|
|
280
|
+
path = input_data.get("file_path", "unknown")
|
|
281
|
+
filename = Path(path).name if path else "file"
|
|
282
|
+
short = f"Edit file: {filename}"
|
|
283
|
+
detail = f"Editing {path}"
|
|
284
|
+
|
|
285
|
+
elif tool_name == "Bash":
|
|
286
|
+
cmd = str(input_data.get("command", ""))[:50]
|
|
287
|
+
short = f"Run: {cmd}..."
|
|
288
|
+
detail = f"Executing: {input_data.get('command', 'unknown')}"
|
|
289
|
+
|
|
290
|
+
elif tool_name == "Grep":
|
|
291
|
+
pattern = input_data.get("pattern", "")
|
|
292
|
+
short = f"Search: {pattern[:30]}"
|
|
293
|
+
detail = f"Searching for pattern: {pattern}"
|
|
294
|
+
|
|
295
|
+
elif tool_name == "Glob":
|
|
296
|
+
pattern = input_data.get("pattern", "")
|
|
297
|
+
short = f"Find files: {pattern[:30]}"
|
|
298
|
+
detail = f"Finding files matching: {pattern}"
|
|
299
|
+
|
|
300
|
+
elif tool_name == "Skill":
|
|
301
|
+
skill = input_data.get("skill", "unknown")
|
|
302
|
+
short = f"Run skill: /{skill}"
|
|
303
|
+
detail = f"Executing slash command /{skill}"
|
|
304
|
+
|
|
305
|
+
elif tool_name == "Task":
|
|
306
|
+
desc = input_data.get("description", "task")
|
|
307
|
+
short = f"Spawn agent: {desc[:30]}"
|
|
308
|
+
detail = f"Launching sub-agent: {desc}"
|
|
309
|
+
|
|
310
|
+
elif tool_name == "TodoWrite":
|
|
311
|
+
todos = input_data.get("todos", [])
|
|
312
|
+
count = len(todos) if isinstance(todos, list) else 0
|
|
313
|
+
short = f"Update todo: {count} items"
|
|
314
|
+
detail = f"Managing task list with {count} items"
|
|
315
|
+
|
|
316
|
+
elif tool_name.startswith("mcp__"):
|
|
317
|
+
parts = tool_name.split("__")
|
|
318
|
+
server = parts[1] if len(parts) > 1 else "unknown"
|
|
319
|
+
tool = parts[2] if len(parts) > 2 else tool_name
|
|
320
|
+
short = f"MCP: {server}/{tool}"
|
|
321
|
+
detail = f"Calling {tool} from MCP server {server}"
|
|
322
|
+
|
|
323
|
+
else:
|
|
324
|
+
short = f"Tool: {tool_name}"
|
|
325
|
+
detail = f"Using tool {tool_name}"
|
|
326
|
+
|
|
327
|
+
if not success:
|
|
328
|
+
short = f"[FAILED] {short}"
|
|
329
|
+
detail = f"[FAILED] {detail}"
|
|
330
|
+
|
|
331
|
+
return short, detail
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
def main():
|
|
335
|
+
"""Process PostToolUse hook."""
|
|
336
|
+
try:
|
|
337
|
+
# Read all input at once (more reliable than json.load on stdin)
|
|
338
|
+
raw_input = sys.stdin.read()
|
|
339
|
+
if not raw_input or not raw_input.strip():
|
|
340
|
+
print(json.dumps({}))
|
|
341
|
+
return
|
|
342
|
+
|
|
343
|
+
input_data = json.loads(raw_input)
|
|
344
|
+
|
|
345
|
+
# Extract data from hook input
|
|
346
|
+
# Note: Claude Code uses 'tool_response' not 'tool_output'
|
|
347
|
+
tool_name = input_data.get("tool_name")
|
|
348
|
+
tool_input = input_data.get("tool_input", {})
|
|
349
|
+
tool_response = input_data.get("tool_response", {}) # Correct field name
|
|
350
|
+
agent_id = input_data.get("agent_id")
|
|
351
|
+
|
|
352
|
+
# Determine success/error from response content
|
|
353
|
+
# Claude Code doesn't send 'is_error' - we must detect from response
|
|
354
|
+
is_error = False
|
|
355
|
+
error_message = None
|
|
356
|
+
|
|
357
|
+
if isinstance(tool_response, dict):
|
|
358
|
+
# Check for explicit error field
|
|
359
|
+
if "error" in tool_response:
|
|
360
|
+
is_error = True
|
|
361
|
+
error_message = str(tool_response.get("error", ""))[:500]
|
|
362
|
+
|
|
363
|
+
# For Bash: check stderr or error patterns in stdout
|
|
364
|
+
elif tool_name == "Bash":
|
|
365
|
+
stderr = tool_response.get("stderr", "")
|
|
366
|
+
stdout = tool_response.get("stdout", "")
|
|
367
|
+
|
|
368
|
+
# Check stderr for content (excluding common non-errors)
|
|
369
|
+
if stderr and stderr.strip():
|
|
370
|
+
# Filter out common non-error stderr output
|
|
371
|
+
stderr_lower = stderr.lower()
|
|
372
|
+
non_error_patterns = ["warning:", "note:", "info:"]
|
|
373
|
+
if not any(p in stderr_lower for p in non_error_patterns):
|
|
374
|
+
is_error = True
|
|
375
|
+
error_message = stderr[:500]
|
|
376
|
+
|
|
377
|
+
# Check stdout for common error patterns
|
|
378
|
+
if not is_error and stdout:
|
|
379
|
+
error_patterns = [
|
|
380
|
+
"command not found",
|
|
381
|
+
"No such file or directory",
|
|
382
|
+
"Permission denied",
|
|
383
|
+
"fatal:",
|
|
384
|
+
"error:",
|
|
385
|
+
"Error:",
|
|
386
|
+
"FAILED",
|
|
387
|
+
"Cannot find",
|
|
388
|
+
"not recognized",
|
|
389
|
+
"Exit code 1",
|
|
390
|
+
]
|
|
391
|
+
stdout_check = stdout[:1000] # Check first 1000 chars
|
|
392
|
+
for pattern in error_patterns:
|
|
393
|
+
if pattern in stdout_check:
|
|
394
|
+
is_error = True
|
|
395
|
+
error_message = f"Error pattern detected: {pattern}"
|
|
396
|
+
break
|
|
397
|
+
|
|
398
|
+
# For Read: check for file errors
|
|
399
|
+
elif tool_name == "Read":
|
|
400
|
+
if "error" in str(tool_response).lower():
|
|
401
|
+
is_error = True
|
|
402
|
+
error_message = "File read error"
|
|
403
|
+
|
|
404
|
+
# Legacy fallback: also check tool_output for backwards compatibility
|
|
405
|
+
tool_output = tool_response if tool_response else input_data.get("tool_output", {})
|
|
406
|
+
|
|
407
|
+
# Skip logging our own tools to prevent recursion
|
|
408
|
+
# MCP tools are named like "mcp__omni-cortex__cortex_remember"
|
|
409
|
+
if tool_name and ("cortex_" in tool_name or "omni-cortex" in tool_name):
|
|
410
|
+
print(json.dumps({}))
|
|
411
|
+
return
|
|
412
|
+
|
|
413
|
+
project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
|
414
|
+
|
|
415
|
+
# Auto-initialize database (creates if not exists)
|
|
416
|
+
db_path = get_db_path()
|
|
417
|
+
conn = ensure_database(db_path)
|
|
418
|
+
|
|
419
|
+
# Ensure analytics columns exist
|
|
420
|
+
ensure_analytics_columns(conn)
|
|
421
|
+
|
|
422
|
+
# Get or create session (auto-manages session lifecycle)
|
|
423
|
+
session_id = get_or_create_session(conn, project_path)
|
|
424
|
+
|
|
425
|
+
# Redact sensitive fields before logging
|
|
426
|
+
safe_input = redact_sensitive_fields(tool_input) if isinstance(tool_input, dict) else tool_input
|
|
427
|
+
safe_output = redact_sensitive_fields(tool_response) if isinstance(tool_response, dict) else tool_response
|
|
428
|
+
|
|
429
|
+
# Extract command analytics
|
|
430
|
+
skill_name = None
|
|
431
|
+
command_scope = None
|
|
432
|
+
mcp_server = None
|
|
433
|
+
|
|
434
|
+
# Extract skill info from Skill tool calls
|
|
435
|
+
if tool_name == "Skill" and isinstance(tool_input, dict):
|
|
436
|
+
skill_name, command_scope = extract_skill_info(tool_input, project_path)
|
|
437
|
+
|
|
438
|
+
# Extract MCP server from tool name (mcp__servername__toolname pattern)
|
|
439
|
+
if tool_name and tool_name.startswith("mcp__"):
|
|
440
|
+
mcp_server = extract_mcp_server(tool_name)
|
|
441
|
+
|
|
442
|
+
# Generate summary for activity
|
|
443
|
+
summary = None
|
|
444
|
+
summary_detail = None
|
|
445
|
+
try:
|
|
446
|
+
summary, summary_detail = generate_summary(tool_name, safe_input, not is_error)
|
|
447
|
+
except Exception:
|
|
448
|
+
pass
|
|
449
|
+
|
|
450
|
+
# Get tool duration from pre_tool_use timing data
|
|
451
|
+
duration_ms, _ = get_tool_duration(tool_name, agent_id)
|
|
452
|
+
|
|
453
|
+
# Insert activity record with analytics columns and duration
|
|
454
|
+
cursor = conn.cursor()
|
|
455
|
+
cursor.execute(
|
|
456
|
+
"""
|
|
457
|
+
INSERT INTO activities (
|
|
458
|
+
id, session_id, agent_id, timestamp, event_type,
|
|
459
|
+
tool_name, tool_input, tool_output, duration_ms, success, error_message, project_path,
|
|
460
|
+
skill_name, command_scope, mcp_server, summary, summary_detail
|
|
461
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
462
|
+
""",
|
|
463
|
+
(
|
|
464
|
+
generate_id(),
|
|
465
|
+
session_id,
|
|
466
|
+
agent_id,
|
|
467
|
+
datetime.now(timezone.utc).isoformat(),
|
|
468
|
+
"post_tool_use",
|
|
469
|
+
tool_name,
|
|
470
|
+
truncate(json.dumps(safe_input, default=str)),
|
|
471
|
+
truncate(json.dumps(safe_output, default=str)),
|
|
472
|
+
duration_ms,
|
|
473
|
+
0 if is_error else 1,
|
|
474
|
+
error_message,
|
|
475
|
+
project_path,
|
|
476
|
+
skill_name,
|
|
477
|
+
command_scope,
|
|
478
|
+
mcp_server,
|
|
479
|
+
summary,
|
|
480
|
+
summary_detail,
|
|
481
|
+
),
|
|
482
|
+
)
|
|
483
|
+
conn.commit()
|
|
484
|
+
conn.close()
|
|
485
|
+
|
|
486
|
+
# Return empty response (no modification)
|
|
487
|
+
print(json.dumps({}))
|
|
488
|
+
|
|
489
|
+
except Exception as e:
|
|
490
|
+
# Hooks should never block - log error but continue
|
|
491
|
+
print(json.dumps({"systemMessage": f"Cortex post_tool_use: {e}"}))
|
|
492
|
+
|
|
493
|
+
sys.exit(0)
|
|
494
|
+
|
|
495
|
+
|
|
496
|
+
if __name__ == "__main__":
|
|
497
|
+
main()
|