omni-cortex 1.3.0__py3-none-any.whl → 1.11.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/.env.example +12 -0
- omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/backfill_summaries.py +280 -0
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/chat_service.py +19 -10
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/database.py +97 -18
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/image_service.py +21 -12
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/logging_config.py +34 -4
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/main.py +390 -13
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/models.py +64 -12
- omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/prompt_security.py +111 -0
- omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/security.py +104 -0
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +24 -2
- omni_cortex-1.11.3.data/data/share/omni-cortex/hooks/post_tool_use.py +429 -0
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/pre_tool_use.py +52 -2
- omni_cortex-1.11.3.data/data/share/omni-cortex/hooks/session_utils.py +186 -0
- {omni_cortex-1.3.0.dist-info → omni_cortex-1.11.3.dist-info}/METADATA +237 -8
- omni_cortex-1.11.3.dist-info/RECORD +25 -0
- omni_cortex-1.3.0.data/data/share/omni-cortex/hooks/post_tool_use.py +0 -160
- omni_cortex-1.3.0.dist-info/RECORD +0 -20
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/project_config.py +0 -0
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +0 -0
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/uv.lock +0 -0
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/stop.py +0 -0
- {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
- {omni_cortex-1.3.0.dist-info → omni_cortex-1.11.3.dist-info}/WHEEL +0 -0
- {omni_cortex-1.3.0.dist-info → omni_cortex-1.11.3.dist-info}/entry_points.txt +0 -0
- {omni_cortex-1.3.0.dist-info → omni_cortex-1.11.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
"""Prompt injection protection for Omni-Cortex."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
import logging
|
|
5
|
+
from html import escape as html_escape
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def xml_escape(text: str) -> str:
|
|
12
|
+
"""Escape text for safe inclusion in XML-structured prompts.
|
|
13
|
+
|
|
14
|
+
Converts special characters to prevent prompt injection via
|
|
15
|
+
XML/HTML-like delimiters.
|
|
16
|
+
"""
|
|
17
|
+
return html_escape(text, quote=True)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def build_safe_prompt(
|
|
21
|
+
system_instruction: str,
|
|
22
|
+
user_data: dict[str, str],
|
|
23
|
+
user_question: str
|
|
24
|
+
) -> str:
|
|
25
|
+
"""Build a prompt with clear instruction/data separation.
|
|
26
|
+
|
|
27
|
+
Uses XML tags to separate trusted instructions from untrusted data,
|
|
28
|
+
making it harder for injected content to be interpreted as instructions.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
system_instruction: Trusted system prompt (not escaped)
|
|
32
|
+
user_data: Dict of data sections to include (escaped)
|
|
33
|
+
user_question: User's question (escaped)
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Safely structured prompt string
|
|
37
|
+
"""
|
|
38
|
+
parts = [system_instruction, ""]
|
|
39
|
+
|
|
40
|
+
# Add data sections with XML escaping
|
|
41
|
+
for section_name, content in user_data.items():
|
|
42
|
+
if content:
|
|
43
|
+
parts.append(f"<{section_name}>")
|
|
44
|
+
parts.append(xml_escape(content))
|
|
45
|
+
parts.append(f"</{section_name}>")
|
|
46
|
+
parts.append("")
|
|
47
|
+
|
|
48
|
+
# Add user question
|
|
49
|
+
parts.append("<user_question>")
|
|
50
|
+
parts.append(xml_escape(user_question))
|
|
51
|
+
parts.append("</user_question>")
|
|
52
|
+
|
|
53
|
+
return "\n".join(parts)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
# Known prompt injection patterns
|
|
57
|
+
INJECTION_PATTERNS = [
|
|
58
|
+
(r'(?i)(ignore|disregard|forget)\s+(all\s+)?(previous|prior|above)\s+instructions?',
|
|
59
|
+
'instruction override attempt'),
|
|
60
|
+
(r'(?i)(new\s+)?system\s+(prompt|instruction|message)',
|
|
61
|
+
'system prompt manipulation'),
|
|
62
|
+
(r'(?i)you\s+(must|should|will|are\s+required\s+to)\s+now',
|
|
63
|
+
'imperative command injection'),
|
|
64
|
+
(r'(?i)(hidden|secret|special)\s+instruction',
|
|
65
|
+
'hidden instruction claim'),
|
|
66
|
+
(r'(?i)\[/?system\]|\[/?inst\]|<\/?system>|<\/?instruction>',
|
|
67
|
+
'fake delimiter injection'),
|
|
68
|
+
(r'(?i)bypass|jailbreak|DAN|GODMODE',
|
|
69
|
+
'known jailbreak signature'),
|
|
70
|
+
]
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def detect_injection_patterns(content: str) -> list[str]:
|
|
74
|
+
"""Detect potential prompt injection patterns in content.
|
|
75
|
+
|
|
76
|
+
Returns list of detected patterns (empty if clean).
|
|
77
|
+
"""
|
|
78
|
+
detected = []
|
|
79
|
+
for pattern, description in INJECTION_PATTERNS:
|
|
80
|
+
if re.search(pattern, content):
|
|
81
|
+
detected.append(description)
|
|
82
|
+
|
|
83
|
+
return detected
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def sanitize_memory_content(content: str, warn_on_detection: bool = True) -> tuple[str, list[str]]:
|
|
87
|
+
"""Sanitize memory content and detect injection attempts.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
content: Raw memory content
|
|
91
|
+
warn_on_detection: If True, log warnings for detected patterns
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Tuple of (sanitized_content, list_of_detected_patterns)
|
|
95
|
+
"""
|
|
96
|
+
detected = detect_injection_patterns(content)
|
|
97
|
+
|
|
98
|
+
if detected and warn_on_detection:
|
|
99
|
+
logger.warning(f"Potential injection patterns detected: {detected}")
|
|
100
|
+
|
|
101
|
+
# Content is still returned - we sanitize via XML escaping when used in prompts
|
|
102
|
+
return content, detected
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def sanitize_context_data(data: str) -> str:
|
|
106
|
+
"""Escape context data for safe inclusion in prompts.
|
|
107
|
+
|
|
108
|
+
This is the primary defense - all user-supplied data should be
|
|
109
|
+
escaped before inclusion in prompts to prevent injection.
|
|
110
|
+
"""
|
|
111
|
+
return xml_escape(data)
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""Security utilities for Omni-Cortex Dashboard."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import re
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class PathValidator:
|
|
10
|
+
"""Validate and sanitize file paths to prevent traversal attacks."""
|
|
11
|
+
|
|
12
|
+
# Pattern for valid omni-cortex database paths
|
|
13
|
+
VALID_DB_PATTERN = re.compile(r'^.*[/\\]\.omni-cortex[/\\]cortex\.db$')
|
|
14
|
+
GLOBAL_DB_PATTERN = re.compile(r'^.*[/\\]\.omni-cortex[/\\]global\.db$')
|
|
15
|
+
|
|
16
|
+
@staticmethod
|
|
17
|
+
def is_valid_project_db(path: str) -> bool:
|
|
18
|
+
"""Check if path is a valid omni-cortex project database."""
|
|
19
|
+
try:
|
|
20
|
+
resolved = Path(path).resolve()
|
|
21
|
+
path_str = str(resolved)
|
|
22
|
+
|
|
23
|
+
# Must match expected patterns
|
|
24
|
+
if PathValidator.VALID_DB_PATTERN.match(path_str):
|
|
25
|
+
return resolved.exists() and resolved.is_file()
|
|
26
|
+
if PathValidator.GLOBAL_DB_PATTERN.match(path_str):
|
|
27
|
+
return resolved.exists() and resolved.is_file()
|
|
28
|
+
|
|
29
|
+
return False
|
|
30
|
+
except (ValueError, OSError):
|
|
31
|
+
return False
|
|
32
|
+
|
|
33
|
+
@staticmethod
|
|
34
|
+
def validate_project_path(path: str) -> Path:
|
|
35
|
+
"""Validate and return resolved path, or raise ValueError."""
|
|
36
|
+
if not PathValidator.is_valid_project_db(path):
|
|
37
|
+
raise ValueError(f"Invalid project database path: {path}")
|
|
38
|
+
return Path(path).resolve()
|
|
39
|
+
|
|
40
|
+
@staticmethod
|
|
41
|
+
def is_safe_static_path(base_dir: Path, requested_path: str) -> Optional[Path]:
|
|
42
|
+
"""Validate static file path is within base directory.
|
|
43
|
+
|
|
44
|
+
Returns resolved path if safe, None if traversal detected.
|
|
45
|
+
"""
|
|
46
|
+
try:
|
|
47
|
+
# Resolve both paths to absolute
|
|
48
|
+
base_resolved = base_dir.resolve()
|
|
49
|
+
requested = (base_dir / requested_path).resolve()
|
|
50
|
+
|
|
51
|
+
# Check if requested path is under base directory
|
|
52
|
+
if base_resolved in requested.parents or requested == base_resolved:
|
|
53
|
+
if requested.exists() and requested.is_file():
|
|
54
|
+
return requested
|
|
55
|
+
|
|
56
|
+
return None
|
|
57
|
+
except (ValueError, OSError):
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def sanitize_log_input(value: str, max_length: int = 200) -> str:
|
|
62
|
+
"""Sanitize user input for safe logging.
|
|
63
|
+
|
|
64
|
+
Prevents log injection by:
|
|
65
|
+
- Escaping newlines
|
|
66
|
+
- Limiting length
|
|
67
|
+
- Removing control characters
|
|
68
|
+
"""
|
|
69
|
+
if not isinstance(value, str):
|
|
70
|
+
value = str(value)
|
|
71
|
+
|
|
72
|
+
# Remove control characters except spaces
|
|
73
|
+
sanitized = ''.join(c if c.isprintable() or c == ' ' else '?' for c in value)
|
|
74
|
+
|
|
75
|
+
# Escape potential log injection patterns
|
|
76
|
+
sanitized = sanitized.replace('\n', '\\n').replace('\r', '\\r')
|
|
77
|
+
|
|
78
|
+
# Truncate
|
|
79
|
+
if len(sanitized) > max_length:
|
|
80
|
+
sanitized = sanitized[:max_length] + '...'
|
|
81
|
+
|
|
82
|
+
return sanitized
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
# Environment-based configuration
|
|
86
|
+
IS_PRODUCTION = os.getenv("ENVIRONMENT", "development") == "production"
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def get_cors_config():
|
|
90
|
+
"""Get CORS configuration based on environment."""
|
|
91
|
+
if IS_PRODUCTION:
|
|
92
|
+
origins = os.getenv("CORS_ORIGINS", "").split(",")
|
|
93
|
+
origins = [o.strip() for o in origins if o.strip()]
|
|
94
|
+
return {
|
|
95
|
+
"allow_origins": origins,
|
|
96
|
+
"allow_methods": ["GET", "POST", "PUT", "DELETE"],
|
|
97
|
+
"allow_headers": ["Content-Type", "Authorization", "X-API-Key"],
|
|
98
|
+
}
|
|
99
|
+
else:
|
|
100
|
+
return {
|
|
101
|
+
"allow_origins": ["http://localhost:5173", "http://127.0.0.1:5173"],
|
|
102
|
+
"allow_methods": ["*"],
|
|
103
|
+
"allow_headers": ["*"],
|
|
104
|
+
}
|
|
@@ -41,7 +41,7 @@ class WebSocketManager:
|
|
|
41
41
|
"event_type": event_type,
|
|
42
42
|
"data": data,
|
|
43
43
|
"timestamp": datetime.now().isoformat(),
|
|
44
|
-
})
|
|
44
|
+
}, default=str)
|
|
45
45
|
|
|
46
46
|
disconnected = []
|
|
47
47
|
async with self._lock:
|
|
@@ -62,7 +62,7 @@ class WebSocketManager:
|
|
|
62
62
|
"event_type": event_type,
|
|
63
63
|
"data": data,
|
|
64
64
|
"timestamp": datetime.now().isoformat(),
|
|
65
|
-
})
|
|
65
|
+
}, default=str)
|
|
66
66
|
|
|
67
67
|
async with self._lock:
|
|
68
68
|
if client_id in self.connections:
|
|
@@ -77,6 +77,28 @@ class WebSocketManager:
|
|
|
77
77
|
"""Get the number of active connections."""
|
|
78
78
|
return len(self.connections)
|
|
79
79
|
|
|
80
|
+
# Typed broadcast methods (IndyDevDan pattern)
|
|
81
|
+
async def broadcast_activity_logged(self, project: str, activity: dict[str, Any]):
|
|
82
|
+
"""Broadcast when a new activity is logged."""
|
|
83
|
+
await self.broadcast("activity_logged", {
|
|
84
|
+
"project": project,
|
|
85
|
+
"activity": activity,
|
|
86
|
+
})
|
|
87
|
+
|
|
88
|
+
async def broadcast_session_updated(self, project: str, session: dict[str, Any]):
|
|
89
|
+
"""Broadcast when a session is updated."""
|
|
90
|
+
await self.broadcast("session_updated", {
|
|
91
|
+
"project": project,
|
|
92
|
+
"session": session,
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
async def broadcast_stats_updated(self, project: str, stats: dict[str, Any]):
|
|
96
|
+
"""Broadcast when stats change (for charts/panels)."""
|
|
97
|
+
await self.broadcast("stats_updated", {
|
|
98
|
+
"project": project,
|
|
99
|
+
"stats": stats,
|
|
100
|
+
})
|
|
101
|
+
|
|
80
102
|
|
|
81
103
|
# Global manager instance
|
|
82
104
|
manager = WebSocketManager()
|
|
@@ -0,0 +1,429 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""PostToolUse hook - logs tool result after execution.
|
|
3
|
+
|
|
4
|
+
This hook is called by Claude Code after each tool completes.
|
|
5
|
+
It logs the tool output, duration, and success/error status.
|
|
6
|
+
|
|
7
|
+
Hook configuration for settings.json:
|
|
8
|
+
{
|
|
9
|
+
"hooks": {
|
|
10
|
+
"PostToolUse": [
|
|
11
|
+
{
|
|
12
|
+
"type": "command",
|
|
13
|
+
"command": "python hooks/post_tool_use.py"
|
|
14
|
+
}
|
|
15
|
+
]
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import json
|
|
21
|
+
import re
|
|
22
|
+
import sys
|
|
23
|
+
import os
|
|
24
|
+
import sqlite3
|
|
25
|
+
from datetime import datetime, timezone
|
|
26
|
+
from pathlib import Path
|
|
27
|
+
|
|
28
|
+
# Import shared session management
|
|
29
|
+
from session_utils import get_or_create_session
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# Patterns for sensitive field names that should be redacted
|
|
33
|
+
SENSITIVE_FIELD_PATTERNS = [
|
|
34
|
+
r'(?i)(api[_-]?key|apikey)',
|
|
35
|
+
r'(?i)(password|passwd|pwd)',
|
|
36
|
+
r'(?i)(secret|token|credential)',
|
|
37
|
+
r'(?i)(auth[_-]?token|access[_-]?token)',
|
|
38
|
+
r'(?i)(private[_-]?key|ssh[_-]?key)',
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def redact_sensitive_fields(data: dict) -> dict:
|
|
43
|
+
"""Redact sensitive fields from a dictionary for safe logging.
|
|
44
|
+
|
|
45
|
+
Recursively processes nested dicts and lists.
|
|
46
|
+
"""
|
|
47
|
+
if not isinstance(data, dict):
|
|
48
|
+
return data
|
|
49
|
+
|
|
50
|
+
result = {}
|
|
51
|
+
for key, value in data.items():
|
|
52
|
+
# Check if key matches sensitive patterns
|
|
53
|
+
is_sensitive = any(
|
|
54
|
+
re.search(pattern, str(key))
|
|
55
|
+
for pattern in SENSITIVE_FIELD_PATTERNS
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
if is_sensitive:
|
|
59
|
+
result[key] = '[REDACTED]'
|
|
60
|
+
elif isinstance(value, dict):
|
|
61
|
+
result[key] = redact_sensitive_fields(value)
|
|
62
|
+
elif isinstance(value, list):
|
|
63
|
+
result[key] = [
|
|
64
|
+
redact_sensitive_fields(item) if isinstance(item, dict) else item
|
|
65
|
+
for item in value
|
|
66
|
+
]
|
|
67
|
+
else:
|
|
68
|
+
result[key] = value
|
|
69
|
+
|
|
70
|
+
return result
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def get_db_path() -> Path:
|
|
74
|
+
"""Get the database path for the current project."""
|
|
75
|
+
project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
|
76
|
+
return Path(project_path) / ".omni-cortex" / "cortex.db"
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def ensure_database(db_path: Path) -> sqlite3.Connection:
|
|
80
|
+
"""Ensure database exists and is initialized.
|
|
81
|
+
|
|
82
|
+
Auto-creates the database and schema if it doesn't exist.
|
|
83
|
+
This enables 'out of the box' functionality.
|
|
84
|
+
"""
|
|
85
|
+
db_path.parent.mkdir(parents=True, exist_ok=True)
|
|
86
|
+
conn = sqlite3.connect(str(db_path))
|
|
87
|
+
|
|
88
|
+
# Check if schema exists
|
|
89
|
+
cursor = conn.cursor()
|
|
90
|
+
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='activities'")
|
|
91
|
+
if cursor.fetchone() is None:
|
|
92
|
+
# Apply minimal schema for activities (full schema applied by MCP)
|
|
93
|
+
conn.executescript("""
|
|
94
|
+
CREATE TABLE IF NOT EXISTS activities (
|
|
95
|
+
id TEXT PRIMARY KEY,
|
|
96
|
+
session_id TEXT,
|
|
97
|
+
agent_id TEXT,
|
|
98
|
+
timestamp TEXT NOT NULL,
|
|
99
|
+
event_type TEXT NOT NULL,
|
|
100
|
+
tool_name TEXT,
|
|
101
|
+
tool_input TEXT,
|
|
102
|
+
tool_output TEXT,
|
|
103
|
+
duration_ms INTEGER,
|
|
104
|
+
success INTEGER DEFAULT 1,
|
|
105
|
+
error_message TEXT,
|
|
106
|
+
project_path TEXT,
|
|
107
|
+
file_path TEXT,
|
|
108
|
+
metadata TEXT
|
|
109
|
+
);
|
|
110
|
+
CREATE INDEX IF NOT EXISTS idx_activities_timestamp ON activities(timestamp DESC);
|
|
111
|
+
CREATE INDEX IF NOT EXISTS idx_activities_tool ON activities(tool_name);
|
|
112
|
+
""")
|
|
113
|
+
conn.commit()
|
|
114
|
+
|
|
115
|
+
return conn
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def generate_id() -> str:
|
|
119
|
+
"""Generate a unique activity ID."""
|
|
120
|
+
timestamp_ms = int(datetime.now().timestamp() * 1000)
|
|
121
|
+
random_hex = os.urandom(4).hex()
|
|
122
|
+
return f"act_{timestamp_ms}_{random_hex}"
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def truncate(text: str, max_length: int = 10000) -> str:
|
|
126
|
+
"""Truncate text to max length."""
|
|
127
|
+
if len(text) <= max_length:
|
|
128
|
+
return text
|
|
129
|
+
return text[:max_length - 20] + "\n... [truncated]"
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def extract_skill_info(tool_input: dict, project_path: str) -> tuple:
|
|
133
|
+
"""Extract skill name and scope from Skill tool input.
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
Tuple of (skill_name, command_scope)
|
|
137
|
+
"""
|
|
138
|
+
try:
|
|
139
|
+
skill_name = tool_input.get("skill", "")
|
|
140
|
+
if not skill_name:
|
|
141
|
+
return None, None
|
|
142
|
+
|
|
143
|
+
# Determine scope by checking file locations
|
|
144
|
+
project_cmd = Path(project_path) / ".claude" / "commands" / f"{skill_name}.md"
|
|
145
|
+
if project_cmd.exists():
|
|
146
|
+
return skill_name, "project"
|
|
147
|
+
|
|
148
|
+
universal_cmd = Path.home() / ".claude" / "commands" / f"{skill_name}.md"
|
|
149
|
+
if universal_cmd.exists():
|
|
150
|
+
return skill_name, "universal"
|
|
151
|
+
|
|
152
|
+
return skill_name, "unknown"
|
|
153
|
+
except Exception:
|
|
154
|
+
return None, None
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def extract_mcp_server(tool_name: str) -> str:
|
|
158
|
+
"""Extract MCP server name from tool name pattern mcp__servername__toolname."""
|
|
159
|
+
if not tool_name or not tool_name.startswith("mcp__"):
|
|
160
|
+
return None
|
|
161
|
+
|
|
162
|
+
parts = tool_name.split("__")
|
|
163
|
+
if len(parts) >= 3:
|
|
164
|
+
return parts[1]
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def ensure_analytics_columns(conn: sqlite3.Connection) -> None:
|
|
169
|
+
"""Ensure command analytics columns exist in activities table."""
|
|
170
|
+
cursor = conn.cursor()
|
|
171
|
+
columns = cursor.execute("PRAGMA table_info(activities)").fetchall()
|
|
172
|
+
column_names = [col[1] for col in columns]
|
|
173
|
+
|
|
174
|
+
new_columns = [
|
|
175
|
+
("command_name", "TEXT"),
|
|
176
|
+
("command_scope", "TEXT"),
|
|
177
|
+
("mcp_server", "TEXT"),
|
|
178
|
+
("skill_name", "TEXT"),
|
|
179
|
+
("summary", "TEXT"),
|
|
180
|
+
("summary_detail", "TEXT"),
|
|
181
|
+
]
|
|
182
|
+
|
|
183
|
+
for col_name, col_type in new_columns:
|
|
184
|
+
if col_name not in column_names:
|
|
185
|
+
cursor.execute(f"ALTER TABLE activities ADD COLUMN {col_name} {col_type}")
|
|
186
|
+
|
|
187
|
+
conn.commit()
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def generate_summary(tool_name: str, tool_input: dict, success: bool) -> tuple:
|
|
191
|
+
"""Generate short and detailed summaries for an activity.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
Tuple of (summary, summary_detail)
|
|
195
|
+
"""
|
|
196
|
+
if not tool_name:
|
|
197
|
+
return None, None
|
|
198
|
+
|
|
199
|
+
input_data = tool_input if isinstance(tool_input, dict) else {}
|
|
200
|
+
short = ""
|
|
201
|
+
detail = ""
|
|
202
|
+
|
|
203
|
+
if tool_name == "Read":
|
|
204
|
+
path = input_data.get("file_path", "unknown")
|
|
205
|
+
filename = Path(path).name if path else "file"
|
|
206
|
+
short = f"Read file: {filename}"
|
|
207
|
+
detail = f"Reading contents of {path}"
|
|
208
|
+
|
|
209
|
+
elif tool_name == "Write":
|
|
210
|
+
path = input_data.get("file_path", "unknown")
|
|
211
|
+
filename = Path(path).name if path else "file"
|
|
212
|
+
short = f"Write file: {filename}"
|
|
213
|
+
detail = f"Writing/creating file at {path}"
|
|
214
|
+
|
|
215
|
+
elif tool_name == "Edit":
|
|
216
|
+
path = input_data.get("file_path", "unknown")
|
|
217
|
+
filename = Path(path).name if path else "file"
|
|
218
|
+
short = f"Edit file: {filename}"
|
|
219
|
+
detail = f"Editing {path}"
|
|
220
|
+
|
|
221
|
+
elif tool_name == "Bash":
|
|
222
|
+
cmd = str(input_data.get("command", ""))[:50]
|
|
223
|
+
short = f"Run: {cmd}..."
|
|
224
|
+
detail = f"Executing: {input_data.get('command', 'unknown')}"
|
|
225
|
+
|
|
226
|
+
elif tool_name == "Grep":
|
|
227
|
+
pattern = input_data.get("pattern", "")
|
|
228
|
+
short = f"Search: {pattern[:30]}"
|
|
229
|
+
detail = f"Searching for pattern: {pattern}"
|
|
230
|
+
|
|
231
|
+
elif tool_name == "Glob":
|
|
232
|
+
pattern = input_data.get("pattern", "")
|
|
233
|
+
short = f"Find files: {pattern[:30]}"
|
|
234
|
+
detail = f"Finding files matching: {pattern}"
|
|
235
|
+
|
|
236
|
+
elif tool_name == "Skill":
|
|
237
|
+
skill = input_data.get("skill", "unknown")
|
|
238
|
+
short = f"Run skill: /{skill}"
|
|
239
|
+
detail = f"Executing slash command /{skill}"
|
|
240
|
+
|
|
241
|
+
elif tool_name == "Task":
|
|
242
|
+
desc = input_data.get("description", "task")
|
|
243
|
+
short = f"Spawn agent: {desc[:30]}"
|
|
244
|
+
detail = f"Launching sub-agent: {desc}"
|
|
245
|
+
|
|
246
|
+
elif tool_name == "TodoWrite":
|
|
247
|
+
todos = input_data.get("todos", [])
|
|
248
|
+
count = len(todos) if isinstance(todos, list) else 0
|
|
249
|
+
short = f"Update todo: {count} items"
|
|
250
|
+
detail = f"Managing task list with {count} items"
|
|
251
|
+
|
|
252
|
+
elif tool_name.startswith("mcp__"):
|
|
253
|
+
parts = tool_name.split("__")
|
|
254
|
+
server = parts[1] if len(parts) > 1 else "unknown"
|
|
255
|
+
tool = parts[2] if len(parts) > 2 else tool_name
|
|
256
|
+
short = f"MCP: {server}/{tool}"
|
|
257
|
+
detail = f"Calling {tool} from MCP server {server}"
|
|
258
|
+
|
|
259
|
+
else:
|
|
260
|
+
short = f"Tool: {tool_name}"
|
|
261
|
+
detail = f"Using tool {tool_name}"
|
|
262
|
+
|
|
263
|
+
if not success:
|
|
264
|
+
short = f"[FAILED] {short}"
|
|
265
|
+
detail = f"[FAILED] {detail}"
|
|
266
|
+
|
|
267
|
+
return short, detail
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def main():
|
|
271
|
+
"""Process PostToolUse hook."""
|
|
272
|
+
try:
|
|
273
|
+
# Read all input at once (more reliable than json.load on stdin)
|
|
274
|
+
raw_input = sys.stdin.read()
|
|
275
|
+
if not raw_input or not raw_input.strip():
|
|
276
|
+
print(json.dumps({}))
|
|
277
|
+
return
|
|
278
|
+
|
|
279
|
+
input_data = json.loads(raw_input)
|
|
280
|
+
|
|
281
|
+
# Extract data from hook input
|
|
282
|
+
# Note: Claude Code uses 'tool_response' not 'tool_output'
|
|
283
|
+
tool_name = input_data.get("tool_name")
|
|
284
|
+
tool_input = input_data.get("tool_input", {})
|
|
285
|
+
tool_response = input_data.get("tool_response", {}) # Correct field name
|
|
286
|
+
agent_id = input_data.get("agent_id")
|
|
287
|
+
|
|
288
|
+
# Determine success/error from response content
|
|
289
|
+
# Claude Code doesn't send 'is_error' - we must detect from response
|
|
290
|
+
is_error = False
|
|
291
|
+
error_message = None
|
|
292
|
+
|
|
293
|
+
if isinstance(tool_response, dict):
|
|
294
|
+
# Check for explicit error field
|
|
295
|
+
if "error" in tool_response:
|
|
296
|
+
is_error = True
|
|
297
|
+
error_message = str(tool_response.get("error", ""))[:500]
|
|
298
|
+
|
|
299
|
+
# For Bash: check stderr or error patterns in stdout
|
|
300
|
+
elif tool_name == "Bash":
|
|
301
|
+
stderr = tool_response.get("stderr", "")
|
|
302
|
+
stdout = tool_response.get("stdout", "")
|
|
303
|
+
|
|
304
|
+
# Check stderr for content (excluding common non-errors)
|
|
305
|
+
if stderr and stderr.strip():
|
|
306
|
+
# Filter out common non-error stderr output
|
|
307
|
+
stderr_lower = stderr.lower()
|
|
308
|
+
non_error_patterns = ["warning:", "note:", "info:"]
|
|
309
|
+
if not any(p in stderr_lower for p in non_error_patterns):
|
|
310
|
+
is_error = True
|
|
311
|
+
error_message = stderr[:500]
|
|
312
|
+
|
|
313
|
+
# Check stdout for common error patterns
|
|
314
|
+
if not is_error and stdout:
|
|
315
|
+
error_patterns = [
|
|
316
|
+
"command not found",
|
|
317
|
+
"No such file or directory",
|
|
318
|
+
"Permission denied",
|
|
319
|
+
"fatal:",
|
|
320
|
+
"error:",
|
|
321
|
+
"Error:",
|
|
322
|
+
"FAILED",
|
|
323
|
+
"Cannot find",
|
|
324
|
+
"not recognized",
|
|
325
|
+
"Exit code 1",
|
|
326
|
+
]
|
|
327
|
+
stdout_check = stdout[:1000] # Check first 1000 chars
|
|
328
|
+
for pattern in error_patterns:
|
|
329
|
+
if pattern in stdout_check:
|
|
330
|
+
is_error = True
|
|
331
|
+
error_message = f"Error pattern detected: {pattern}"
|
|
332
|
+
break
|
|
333
|
+
|
|
334
|
+
# For Read: check for file errors
|
|
335
|
+
elif tool_name == "Read":
|
|
336
|
+
if "error" in str(tool_response).lower():
|
|
337
|
+
is_error = True
|
|
338
|
+
error_message = "File read error"
|
|
339
|
+
|
|
340
|
+
# Legacy fallback: also check tool_output for backwards compatibility
|
|
341
|
+
tool_output = tool_response if tool_response else input_data.get("tool_output", {})
|
|
342
|
+
|
|
343
|
+
# Skip logging our own tools to prevent recursion
|
|
344
|
+
# MCP tools are named like "mcp__omni-cortex__cortex_remember"
|
|
345
|
+
if tool_name and ("cortex_" in tool_name or "omni-cortex" in tool_name):
|
|
346
|
+
print(json.dumps({}))
|
|
347
|
+
return
|
|
348
|
+
|
|
349
|
+
project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
|
350
|
+
|
|
351
|
+
# Auto-initialize database (creates if not exists)
|
|
352
|
+
db_path = get_db_path()
|
|
353
|
+
conn = ensure_database(db_path)
|
|
354
|
+
|
|
355
|
+
# Ensure analytics columns exist
|
|
356
|
+
ensure_analytics_columns(conn)
|
|
357
|
+
|
|
358
|
+
# Get or create session (auto-manages session lifecycle)
|
|
359
|
+
session_id = get_or_create_session(conn, project_path)
|
|
360
|
+
|
|
361
|
+
# Redact sensitive fields before logging
|
|
362
|
+
safe_input = redact_sensitive_fields(tool_input) if isinstance(tool_input, dict) else tool_input
|
|
363
|
+
safe_output = redact_sensitive_fields(tool_response) if isinstance(tool_response, dict) else tool_response
|
|
364
|
+
|
|
365
|
+
# Extract command analytics
|
|
366
|
+
skill_name = None
|
|
367
|
+
command_scope = None
|
|
368
|
+
mcp_server = None
|
|
369
|
+
|
|
370
|
+
# Extract skill info from Skill tool calls
|
|
371
|
+
if tool_name == "Skill" and isinstance(tool_input, dict):
|
|
372
|
+
skill_name, command_scope = extract_skill_info(tool_input, project_path)
|
|
373
|
+
|
|
374
|
+
# Extract MCP server from tool name (mcp__servername__toolname pattern)
|
|
375
|
+
if tool_name and tool_name.startswith("mcp__"):
|
|
376
|
+
mcp_server = extract_mcp_server(tool_name)
|
|
377
|
+
|
|
378
|
+
# Generate summary for activity
|
|
379
|
+
summary = None
|
|
380
|
+
summary_detail = None
|
|
381
|
+
try:
|
|
382
|
+
summary, summary_detail = generate_summary(tool_name, safe_input, not is_error)
|
|
383
|
+
except Exception:
|
|
384
|
+
pass
|
|
385
|
+
|
|
386
|
+
# Insert activity record with analytics columns
|
|
387
|
+
cursor = conn.cursor()
|
|
388
|
+
cursor.execute(
|
|
389
|
+
"""
|
|
390
|
+
INSERT INTO activities (
|
|
391
|
+
id, session_id, agent_id, timestamp, event_type,
|
|
392
|
+
tool_name, tool_input, tool_output, success, error_message, project_path,
|
|
393
|
+
skill_name, command_scope, mcp_server, summary, summary_detail
|
|
394
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
395
|
+
""",
|
|
396
|
+
(
|
|
397
|
+
generate_id(),
|
|
398
|
+
session_id,
|
|
399
|
+
agent_id,
|
|
400
|
+
datetime.now(timezone.utc).isoformat(),
|
|
401
|
+
"post_tool_use",
|
|
402
|
+
tool_name,
|
|
403
|
+
truncate(json.dumps(safe_input, default=str)),
|
|
404
|
+
truncate(json.dumps(safe_output, default=str)),
|
|
405
|
+
0 if is_error else 1,
|
|
406
|
+
error_message,
|
|
407
|
+
project_path,
|
|
408
|
+
skill_name,
|
|
409
|
+
command_scope,
|
|
410
|
+
mcp_server,
|
|
411
|
+
summary,
|
|
412
|
+
summary_detail,
|
|
413
|
+
),
|
|
414
|
+
)
|
|
415
|
+
conn.commit()
|
|
416
|
+
conn.close()
|
|
417
|
+
|
|
418
|
+
# Return empty response (no modification)
|
|
419
|
+
print(json.dumps({}))
|
|
420
|
+
|
|
421
|
+
except Exception as e:
|
|
422
|
+
# Hooks should never block - log error but continue
|
|
423
|
+
print(json.dumps({"systemMessage": f"Cortex post_tool_use: {e}"}))
|
|
424
|
+
|
|
425
|
+
sys.exit(0)
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
if __name__ == "__main__":
|
|
429
|
+
main()
|