omni-cortex 1.11.2__py3-none-any.whl → 1.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/main.py +2 -2
  2. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +2 -2
  3. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/hooks/post_tool_use.py +127 -9
  4. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/hooks/pre_tool_use.py +69 -1
  5. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/hooks/stop.py +41 -6
  6. omni_cortex-1.12.0.data/data/share/omni-cortex/hooks/user_prompt.py +220 -0
  7. {omni_cortex-1.11.2.dist-info → omni_cortex-1.12.0.dist-info}/METADATA +1 -1
  8. omni_cortex-1.12.0.dist-info/RECORD +26 -0
  9. omni_cortex-1.11.2.dist-info/RECORD +0 -25
  10. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/.env.example +0 -0
  11. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/backfill_summaries.py +0 -0
  12. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/chat_service.py +0 -0
  13. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/database.py +0 -0
  14. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/image_service.py +0 -0
  15. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/logging_config.py +0 -0
  16. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/models.py +0 -0
  17. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/project_config.py +0 -0
  18. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +0 -0
  19. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/prompt_security.py +0 -0
  20. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
  21. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/security.py +0 -0
  22. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/dashboard/backend/uv.lock +0 -0
  23. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/hooks/session_utils.py +0 -0
  24. {omni_cortex-1.11.2.data → omni_cortex-1.12.0.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
  25. {omni_cortex-1.11.2.dist-info → omni_cortex-1.12.0.dist-info}/WHEEL +0 -0
  26. {omni_cortex-1.11.2.dist-info → omni_cortex-1.12.0.dist-info}/entry_points.txt +0 -0
  27. {omni_cortex-1.11.2.dist-info → omni_cortex-1.12.0.dist-info}/licenses/LICENSE +0 -0
@@ -381,9 +381,9 @@ async def get_aggregate_memories(request: AggregateMemoryRequest):
381
381
  log_error(f"/api/aggregate/memories (project: {project_path})", e)
382
382
  continue
383
383
 
384
- # Sort by last_accessed or created_at
384
+ # Sort by last_accessed or created_at (convert to str to handle mixed tz-aware/naive)
385
385
  all_memories.sort(
386
- key=lambda x: x.get('last_accessed') or x.get('created_at') or '',
386
+ key=lambda x: str(x.get('last_accessed') or x.get('created_at') or ''),
387
387
  reverse=True
388
388
  )
389
389
 
@@ -41,7 +41,7 @@ class WebSocketManager:
41
41
  "event_type": event_type,
42
42
  "data": data,
43
43
  "timestamp": datetime.now().isoformat(),
44
- })
44
+ }, default=str)
45
45
 
46
46
  disconnected = []
47
47
  async with self._lock:
@@ -62,7 +62,7 @@ class WebSocketManager:
62
62
  "event_type": event_type,
63
63
  "data": data,
64
64
  "timestamp": datetime.now().isoformat(),
65
- })
65
+ }, default=str)
66
66
 
67
67
  async with self._lock:
68
68
  if client_id in self.connections:
@@ -24,11 +24,75 @@ import os
24
24
  import sqlite3
25
25
  from datetime import datetime, timezone
26
26
  from pathlib import Path
27
+ from typing import Optional, Tuple
27
28
 
28
29
  # Import shared session management
29
30
  from session_utils import get_or_create_session
30
31
 
31
32
 
33
+ # === Tool Timing Management ===
34
+ # Read tool start timestamps and calculate duration
35
+
36
+ def get_timing_file_path() -> Path:
37
+ """Get the path to the tool timing file."""
38
+ project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
39
+ return Path(project_path) / ".omni-cortex" / "tool_timing.json"
40
+
41
+
42
+ def load_timing_data() -> dict:
43
+ """Load current timing data from file."""
44
+ timing_file = get_timing_file_path()
45
+ if not timing_file.exists():
46
+ return {}
47
+ try:
48
+ with open(timing_file, "r") as f:
49
+ return json.load(f)
50
+ except (json.JSONDecodeError, IOError):
51
+ return {}
52
+
53
+
54
+ def save_timing_data(data: dict) -> None:
55
+ """Save timing data to file."""
56
+ timing_file = get_timing_file_path()
57
+ timing_file.parent.mkdir(parents=True, exist_ok=True)
58
+ with open(timing_file, "w") as f:
59
+ json.dump(data, f)
60
+
61
+
62
+ def get_tool_duration(tool_name: str, agent_id: str = None) -> Tuple[Optional[int], Optional[str]]:
63
+ """Get the duration for a tool execution and clean up.
64
+
65
+ Args:
66
+ tool_name: Name of the tool that finished
67
+ agent_id: Optional agent ID
68
+
69
+ Returns:
70
+ Tuple of (duration_ms, activity_id) or (None, None) if not found
71
+ """
72
+ timing_data = load_timing_data()
73
+ key = f"{tool_name}_{agent_id or 'main'}"
74
+
75
+ if key not in timing_data:
76
+ return None, None
77
+
78
+ entry = timing_data[key]
79
+ start_time_ms = entry.get("start_time_ms")
80
+ activity_id = entry.get("activity_id")
81
+
82
+ if not start_time_ms:
83
+ return None, activity_id
84
+
85
+ # Calculate duration
86
+ end_time_ms = int(datetime.now(timezone.utc).timestamp() * 1000)
87
+ duration_ms = end_time_ms - start_time_ms
88
+
89
+ # Remove the entry (tool call complete)
90
+ del timing_data[key]
91
+ save_timing_data(timing_data)
92
+
93
+ return duration_ms, activity_id
94
+
95
+
32
96
  # Patterns for sensitive field names that should be redacted
33
97
  SENSITIVE_FIELD_PATTERNS = [
34
98
  r'(?i)(api[_-]?key|apikey)',
@@ -279,16 +343,66 @@ def main():
279
343
  input_data = json.loads(raw_input)
280
344
 
281
345
  # Extract data from hook input
346
+ # Note: Claude Code uses 'tool_response' not 'tool_output'
282
347
  tool_name = input_data.get("tool_name")
283
348
  tool_input = input_data.get("tool_input", {})
284
- tool_output = input_data.get("tool_output", {})
349
+ tool_response = input_data.get("tool_response", {}) # Correct field name
285
350
  agent_id = input_data.get("agent_id")
286
351
 
287
- # Determine success/error
288
- is_error = input_data.get("is_error", False)
352
+ # Determine success/error from response content
353
+ # Claude Code doesn't send 'is_error' - we must detect from response
354
+ is_error = False
289
355
  error_message = None
290
- if is_error and isinstance(tool_output, dict):
291
- error_message = tool_output.get("error") or tool_output.get("message")
356
+
357
+ if isinstance(tool_response, dict):
358
+ # Check for explicit error field
359
+ if "error" in tool_response:
360
+ is_error = True
361
+ error_message = str(tool_response.get("error", ""))[:500]
362
+
363
+ # For Bash: check stderr or error patterns in stdout
364
+ elif tool_name == "Bash":
365
+ stderr = tool_response.get("stderr", "")
366
+ stdout = tool_response.get("stdout", "")
367
+
368
+ # Check stderr for content (excluding common non-errors)
369
+ if stderr and stderr.strip():
370
+ # Filter out common non-error stderr output
371
+ stderr_lower = stderr.lower()
372
+ non_error_patterns = ["warning:", "note:", "info:"]
373
+ if not any(p in stderr_lower for p in non_error_patterns):
374
+ is_error = True
375
+ error_message = stderr[:500]
376
+
377
+ # Check stdout for common error patterns
378
+ if not is_error and stdout:
379
+ error_patterns = [
380
+ "command not found",
381
+ "No such file or directory",
382
+ "Permission denied",
383
+ "fatal:",
384
+ "error:",
385
+ "Error:",
386
+ "FAILED",
387
+ "Cannot find",
388
+ "not recognized",
389
+ "Exit code 1",
390
+ ]
391
+ stdout_check = stdout[:1000] # Check first 1000 chars
392
+ for pattern in error_patterns:
393
+ if pattern in stdout_check:
394
+ is_error = True
395
+ error_message = f"Error pattern detected: {pattern}"
396
+ break
397
+
398
+ # For Read: check for file errors
399
+ elif tool_name == "Read":
400
+ if "error" in str(tool_response).lower():
401
+ is_error = True
402
+ error_message = "File read error"
403
+
404
+ # Legacy fallback: also check tool_output for backwards compatibility
405
+ tool_output = tool_response if tool_response else input_data.get("tool_output", {})
292
406
 
293
407
  # Skip logging our own tools to prevent recursion
294
408
  # MCP tools are named like "mcp__omni-cortex__cortex_remember"
@@ -310,7 +424,7 @@ def main():
310
424
 
311
425
  # Redact sensitive fields before logging
312
426
  safe_input = redact_sensitive_fields(tool_input) if isinstance(tool_input, dict) else tool_input
313
- safe_output = redact_sensitive_fields(tool_output) if isinstance(tool_output, dict) else tool_output
427
+ safe_output = redact_sensitive_fields(tool_response) if isinstance(tool_response, dict) else tool_response
314
428
 
315
429
  # Extract command analytics
316
430
  skill_name = None
@@ -333,15 +447,18 @@ def main():
333
447
  except Exception:
334
448
  pass
335
449
 
336
- # Insert activity record with analytics columns
450
+ # Get tool duration from pre_tool_use timing data
451
+ duration_ms, _ = get_tool_duration(tool_name, agent_id)
452
+
453
+ # Insert activity record with analytics columns and duration
337
454
  cursor = conn.cursor()
338
455
  cursor.execute(
339
456
  """
340
457
  INSERT INTO activities (
341
458
  id, session_id, agent_id, timestamp, event_type,
342
- tool_name, tool_input, tool_output, success, error_message, project_path,
459
+ tool_name, tool_input, tool_output, duration_ms, success, error_message, project_path,
343
460
  skill_name, command_scope, mcp_server, summary, summary_detail
344
- ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
461
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
345
462
  """,
346
463
  (
347
464
  generate_id(),
@@ -352,6 +469,7 @@ def main():
352
469
  tool_name,
353
470
  truncate(json.dumps(safe_input, default=str)),
354
471
  truncate(json.dumps(safe_output, default=str)),
472
+ duration_ms,
355
473
  0 if is_error else 1,
356
474
  error_message,
357
475
  project_path,
@@ -29,6 +29,68 @@ from pathlib import Path
29
29
  from session_utils import get_or_create_session
30
30
 
31
31
 
32
+ # === Tool Timing Management ===
33
+ # Store tool start timestamps for duration calculation in post_tool_use
34
+
35
+ def get_timing_file_path() -> Path:
36
+ """Get the path to the tool timing file."""
37
+ project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
38
+ return Path(project_path) / ".omni-cortex" / "tool_timing.json"
39
+
40
+
41
+ def load_timing_data() -> dict:
42
+ """Load current timing data from file."""
43
+ timing_file = get_timing_file_path()
44
+ if not timing_file.exists():
45
+ return {}
46
+ try:
47
+ with open(timing_file, "r") as f:
48
+ return json.load(f)
49
+ except (json.JSONDecodeError, IOError):
50
+ return {}
51
+
52
+
53
+ def save_timing_data(data: dict) -> None:
54
+ """Save timing data to file."""
55
+ timing_file = get_timing_file_path()
56
+ timing_file.parent.mkdir(parents=True, exist_ok=True)
57
+ with open(timing_file, "w") as f:
58
+ json.dump(data, f)
59
+
60
+
61
+ def record_tool_start(tool_name: str, activity_id: str, agent_id: str = None) -> None:
62
+ """Record the start time for a tool execution.
63
+
64
+ Args:
65
+ tool_name: Name of the tool being executed
66
+ activity_id: Unique activity ID for this tool call
67
+ agent_id: Optional agent ID
68
+ """
69
+ timing_data = load_timing_data()
70
+
71
+ # Use activity_id as key (unique per tool call)
72
+ # Also store by tool_name for simpler matching in post_tool_use
73
+ key = f"{tool_name}_{agent_id or 'main'}"
74
+
75
+ timing_data[key] = {
76
+ "activity_id": activity_id,
77
+ "tool_name": tool_name,
78
+ "agent_id": agent_id,
79
+ "start_time_ms": int(datetime.now(timezone.utc).timestamp() * 1000),
80
+ "start_time_iso": datetime.now(timezone.utc).isoformat(),
81
+ }
82
+
83
+ # Clean up old entries (older than 1 hour) to prevent file bloat
84
+ now_ms = int(datetime.now(timezone.utc).timestamp() * 1000)
85
+ one_hour_ms = 60 * 60 * 1000
86
+ timing_data = {
87
+ k: v for k, v in timing_data.items()
88
+ if now_ms - v.get("start_time_ms", 0) < one_hour_ms
89
+ }
90
+
91
+ save_timing_data(timing_data)
92
+
93
+
32
94
  # Patterns for sensitive field names that should be redacted
33
95
  SENSITIVE_FIELD_PATTERNS = [
34
96
  r'(?i)(api[_-]?key|apikey)',
@@ -172,6 +234,12 @@ def main():
172
234
  # Redact sensitive fields before logging
173
235
  safe_input = redact_sensitive_fields(tool_input) if isinstance(tool_input, dict) else tool_input
174
236
 
237
+ # Generate activity ID
238
+ activity_id = generate_id()
239
+
240
+ # Record tool start time for duration calculation
241
+ record_tool_start(tool_name, activity_id, agent_id)
242
+
175
243
  # Insert activity record
176
244
  cursor = conn.cursor()
177
245
  cursor.execute(
@@ -182,7 +250,7 @@ def main():
182
250
  ) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
183
251
  """,
184
252
  (
185
- generate_id(),
253
+ activity_id,
186
254
  session_id,
187
255
  agent_id,
188
256
  datetime.now(timezone.utc).isoformat(),
@@ -70,10 +70,24 @@ def main():
70
70
  conn.close()
71
71
  return
72
72
 
73
- # End the session
73
+ # Get session start time for duration calculation
74
+ cursor.execute("SELECT started_at FROM sessions WHERE id = ?", (session_id,))
75
+ session_row = cursor.fetchone()
76
+ session_duration_ms = None
77
+
78
+ if session_row and session_row["started_at"]:
79
+ try:
80
+ started_at = session_row["started_at"]
81
+ started_dt = datetime.fromisoformat(started_at.replace("Z", "+00:00"))
82
+ ended_dt = datetime.now(timezone.utc)
83
+ session_duration_ms = int((ended_dt - started_dt).total_seconds() * 1000)
84
+ except (ValueError, TypeError):
85
+ pass
86
+
87
+ # End the session with duration
74
88
  cursor.execute(
75
- "UPDATE sessions SET ended_at = ? WHERE id = ? AND ended_at IS NULL",
76
- (now, session_id),
89
+ "UPDATE sessions SET ended_at = ?, duration_ms = ? WHERE id = ? AND ended_at IS NULL",
90
+ (now, session_duration_ms, session_id),
77
91
  )
78
92
 
79
93
  # Gather session statistics
@@ -131,12 +145,28 @@ def main():
131
145
  )
132
146
  existing = cursor.fetchone()
133
147
 
148
+ # Calculate tool duration breakdown from activities
149
+ cursor.execute(
150
+ """
151
+ SELECT tool_name, SUM(duration_ms) as total_ms, COUNT(*) as cnt
152
+ FROM activities
153
+ WHERE session_id = ? AND tool_name IS NOT NULL AND duration_ms IS NOT NULL
154
+ GROUP BY tool_name
155
+ """,
156
+ (session_id,),
157
+ )
158
+ tool_duration_breakdown = {
159
+ row["tool_name"]: {"total_ms": row["total_ms"], "count": row["cnt"]}
160
+ for row in cursor.fetchall()
161
+ }
162
+
134
163
  if existing:
135
164
  cursor.execute(
136
165
  """
137
166
  UPDATE session_summaries
138
167
  SET key_errors = ?, files_modified = ?, tools_used = ?,
139
- total_activities = ?, total_memories_created = ?
168
+ total_activities = ?, total_memories_created = ?,
169
+ duration_ms = ?, tool_duration_breakdown = ?
140
170
  WHERE session_id = ?
141
171
  """,
142
172
  (
@@ -145,6 +175,8 @@ def main():
145
175
  json.dumps(tools_used) if tools_used else None,
146
176
  total_activities,
147
177
  total_memories,
178
+ session_duration_ms,
179
+ json.dumps(tool_duration_breakdown) if tool_duration_breakdown else None,
148
180
  session_id,
149
181
  ),
150
182
  )
@@ -153,8 +185,9 @@ def main():
153
185
  """
154
186
  INSERT INTO session_summaries (
155
187
  id, session_id, key_errors, files_modified, tools_used,
156
- total_activities, total_memories_created, created_at
157
- ) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
188
+ total_activities, total_memories_created, created_at,
189
+ duration_ms, tool_duration_breakdown
190
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
158
191
  """,
159
192
  (
160
193
  generate_id("sum"),
@@ -165,6 +198,8 @@ def main():
165
198
  total_activities,
166
199
  total_memories,
167
200
  now,
201
+ session_duration_ms,
202
+ json.dumps(tool_duration_breakdown) if tool_duration_breakdown else None,
168
203
  ),
169
204
  )
170
205
 
@@ -0,0 +1,220 @@
1
+ #!/usr/bin/env python3
2
+ """UserPromptSubmit hook - captures user messages for style analysis.
3
+
4
+ This hook is called by Claude Code when the user submits a prompt.
5
+ It logs the user message to the Cortex database for later style analysis.
6
+
7
+ Hook configuration for settings.json:
8
+ {
9
+ "hooks": {
10
+ "UserPromptSubmit": [
11
+ {
12
+ "hooks": [
13
+ {
14
+ "type": "command",
15
+ "command": "python hooks/user_prompt.py"
16
+ }
17
+ ]
18
+ }
19
+ ]
20
+ }
21
+ }
22
+ """
23
+
24
+ import json
25
+ import re
26
+ import sys
27
+ import os
28
+ import sqlite3
29
+ from datetime import datetime, timezone
30
+ from pathlib import Path
31
+
32
+ # Import shared session management
33
+ from session_utils import get_or_create_session
34
+
35
+
36
+ def get_db_path() -> Path:
37
+ """Get the database path for the current project."""
38
+ project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
39
+ return Path(project_path) / ".omni-cortex" / "cortex.db"
40
+
41
+
42
+ def ensure_database(db_path: Path) -> sqlite3.Connection:
43
+ """Ensure database exists and has user_messages table."""
44
+ db_path.parent.mkdir(parents=True, exist_ok=True)
45
+ conn = sqlite3.connect(str(db_path))
46
+
47
+ # Check if user_messages table exists
48
+ cursor = conn.cursor()
49
+ cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='user_messages'")
50
+ if cursor.fetchone() is None:
51
+ # Apply minimal schema for user_messages
52
+ conn.executescript("""
53
+ CREATE TABLE IF NOT EXISTS user_messages (
54
+ id TEXT PRIMARY KEY,
55
+ session_id TEXT,
56
+ timestamp TEXT NOT NULL,
57
+ content TEXT NOT NULL,
58
+ word_count INTEGER,
59
+ char_count INTEGER,
60
+ line_count INTEGER,
61
+ has_code_blocks INTEGER DEFAULT 0,
62
+ has_questions INTEGER DEFAULT 0,
63
+ has_commands INTEGER DEFAULT 0,
64
+ tone_indicators TEXT,
65
+ project_path TEXT,
66
+ metadata TEXT
67
+ );
68
+ CREATE INDEX IF NOT EXISTS idx_user_messages_timestamp ON user_messages(timestamp DESC);
69
+ """)
70
+ conn.commit()
71
+
72
+ return conn
73
+
74
+
75
+ def generate_id() -> str:
76
+ """Generate a unique message ID."""
77
+ timestamp_ms = int(datetime.now().timestamp() * 1000)
78
+ random_hex = os.urandom(4).hex()
79
+ return f"msg_{timestamp_ms}_{random_hex}"
80
+
81
+
82
+ def analyze_message(content: str) -> dict:
83
+ """Analyze message characteristics for style profiling."""
84
+ # Basic counts
85
+ word_count = len(content.split())
86
+ char_count = len(content)
87
+ line_count = len(content.splitlines()) or 1
88
+
89
+ # Detect code blocks
90
+ has_code_blocks = 1 if re.search(r'```[\s\S]*?```|`[^`]+`', content) else 0
91
+
92
+ # Detect questions
93
+ has_questions = 1 if re.search(r'\?|^(what|how|why|when|where|who|which|can|could|would|should|is|are|do|does|did)\b', content, re.IGNORECASE | re.MULTILINE) else 0
94
+
95
+ # Detect slash commands
96
+ has_commands = 1 if content.strip().startswith('/') else 0
97
+
98
+ # Tone indicators
99
+ tone_indicators = []
100
+
101
+ # Urgency markers
102
+ if re.search(r'\b(urgent|asap|immediately|quick|fast|hurry)\b', content, re.IGNORECASE):
103
+ tone_indicators.append("urgent")
104
+
105
+ # Polite markers
106
+ if re.search(r'\b(please|thanks|thank you|appreciate|kindly)\b', content, re.IGNORECASE):
107
+ tone_indicators.append("polite")
108
+
109
+ # Direct/imperative
110
+ if re.match(r'^(fix|add|remove|update|change|create|delete|run|test|check|show|list|find)\b', content.strip(), re.IGNORECASE):
111
+ tone_indicators.append("direct")
112
+
113
+ # Questioning/exploratory
114
+ if has_questions:
115
+ tone_indicators.append("inquisitive")
116
+
117
+ # Technical
118
+ if re.search(r'\b(function|class|method|variable|api|database|server|error|bug|issue)\b', content, re.IGNORECASE):
119
+ tone_indicators.append("technical")
120
+
121
+ # Casual
122
+ if re.search(r'\b(hey|hi|yo|cool|awesome|great|nice)\b', content, re.IGNORECASE):
123
+ tone_indicators.append("casual")
124
+
125
+ return {
126
+ "word_count": word_count,
127
+ "char_count": char_count,
128
+ "line_count": line_count,
129
+ "has_code_blocks": has_code_blocks,
130
+ "has_questions": has_questions,
131
+ "has_commands": has_commands,
132
+ "tone_indicators": json.dumps(tone_indicators),
133
+ }
134
+
135
+
136
+ def main():
137
+ """Process UserPromptSubmit hook."""
138
+ try:
139
+ # Read input from stdin
140
+ import select
141
+ if sys.platform != "win32":
142
+ ready, _, _ = select.select([sys.stdin], [], [], 5.0)
143
+ if not ready:
144
+ print(json.dumps({}))
145
+ return
146
+
147
+ raw_input = sys.stdin.read()
148
+ if not raw_input or not raw_input.strip():
149
+ print(json.dumps({}))
150
+ return
151
+
152
+ input_data = json.loads(raw_input)
153
+
154
+ # Extract user prompt
155
+ prompt = input_data.get("prompt", "")
156
+ if not prompt or not prompt.strip():
157
+ print(json.dumps({}))
158
+ return
159
+
160
+ # Skip very short messages (likely just commands)
161
+ if len(prompt.strip()) < 3:
162
+ print(json.dumps({}))
163
+ return
164
+
165
+ project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
166
+
167
+ # Initialize database
168
+ db_path = get_db_path()
169
+ conn = ensure_database(db_path)
170
+
171
+ # Get or create session
172
+ session_id = get_or_create_session(conn, project_path)
173
+
174
+ # Analyze message
175
+ analysis = analyze_message(prompt)
176
+
177
+ # Generate message ID
178
+ message_id = generate_id()
179
+
180
+ # Insert message record
181
+ cursor = conn.cursor()
182
+ cursor.execute(
183
+ """
184
+ INSERT INTO user_messages (
185
+ id, session_id, timestamp, content, word_count, char_count,
186
+ line_count, has_code_blocks, has_questions, has_commands,
187
+ tone_indicators, project_path
188
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
189
+ """,
190
+ (
191
+ message_id,
192
+ session_id,
193
+ datetime.now(timezone.utc).isoformat(),
194
+ prompt,
195
+ analysis["word_count"],
196
+ analysis["char_count"],
197
+ analysis["line_count"],
198
+ analysis["has_code_blocks"],
199
+ analysis["has_questions"],
200
+ analysis["has_commands"],
201
+ analysis["tone_indicators"],
202
+ project_path,
203
+ ),
204
+ )
205
+ conn.commit()
206
+ conn.close()
207
+
208
+ # Return empty response (don't modify prompt)
209
+ print(json.dumps({}))
210
+
211
+ except Exception as e:
212
+ # Hooks should never block - log error but continue
213
+ # Don't print system message to avoid polluting user experience
214
+ print(json.dumps({}))
215
+
216
+ sys.exit(0)
217
+
218
+
219
+ if __name__ == "__main__":
220
+ main()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omni-cortex
3
- Version: 1.11.2
3
+ Version: 1.12.0
4
4
  Summary: Give Claude Code a perfect memory - auto-logs everything, searches smartly, and gets smarter over time
5
5
  Project-URL: Homepage, https://github.com/AllCytes/Omni-Cortex
6
6
  Project-URL: Repository, https://github.com/AllCytes/Omni-Cortex
@@ -0,0 +1,26 @@
1
+ omni_cortex-1.12.0.data/data/share/omni-cortex/hooks/post_tool_use.py,sha256=zdaKChi8zOghRlHswisCBSQE3kW1MtmM6AFfI_ivvpI,16581
2
+ omni_cortex-1.12.0.data/data/share/omni-cortex/hooks/pre_tool_use.py,sha256=3_V6Qw5m40eGrMmm5i94vINzeVxmcJvivdPa69H3AOI,8585
3
+ omni_cortex-1.12.0.data/data/share/omni-cortex/hooks/session_utils.py,sha256=3SKPCytqWuRPOupWdzmwBoKBDJqtLcT1Nle_pueDQUY,5746
4
+ omni_cortex-1.12.0.data/data/share/omni-cortex/hooks/stop.py,sha256=UroliJsyIS9_lj29-1d_r-80V4AfTMUFCaOjJZv3lwM,6976
5
+ omni_cortex-1.12.0.data/data/share/omni-cortex/hooks/subagent_stop.py,sha256=V9HQSFGNOfkg8ZCstPEy4h5V8BP4AbrVr8teFzN1kNk,3314
6
+ omni_cortex-1.12.0.data/data/share/omni-cortex/hooks/user_prompt.py,sha256=WNHJvhnkb9rXQ_HDpr6eLpM5vwy1Y1xl1EUoqyNC-x8,6859
7
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/.env.example,sha256=9xS7-UiWlMddRwzlyyyKNHAMlNTsgH-2sPV266guJpQ,372
8
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/backfill_summaries.py,sha256=ElchfcBv4pmVr2PsePCgFlCyuvf4_jDJj_C3AmMhu7U,8973
9
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/chat_service.py,sha256=5UCvLayZGeSdGsYAzOeupumclAhoFLusGYLdyl33ANc,9304
10
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/database.py,sha256=LAN7GSM2tvMcJaL0RrGJurH9-tw3cs2QtPduqCbLvj0,34974
11
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/image_service.py,sha256=NP6ojFpHb6iNTYRkXqYu1CL6WvooZpZ54mjLiWSWG_g,19205
12
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/logging_config.py,sha256=WnunFGET9zlsn9WBpVsio2zI7BiUQanE0xzAQQxIhII,3944
13
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/main.py,sha256=rJrmYJvkGhRsXOdYKOTRPMVnwA00W5QoGJ_Aa3v-TRE,46219
14
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/models.py,sha256=LkmcYq1imsyDlMYnX3Z_FOTmPsu37MQEfJSI-w5EjvM,7330
15
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/project_config.py,sha256=ZxGoeRpHvN5qQyf2hRxrAZiHrPSwdQp59f0di6O1LKM,4352
16
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/project_scanner.py,sha256=lwFXS8iJbOoxf7FAyo2TjH25neaMHiJ8B3jS57XxtDI,5713
17
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/prompt_security.py,sha256=LcdZhYy1CfpSq_4BPO6lMJ15phc2ZXLUSBAnAvODVCI,3423
18
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/pyproject.toml,sha256=9pbbGQXLe1Xd06nZAtDySCHIlfMWvPaB-C6tGZR6umc,502
19
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/security.py,sha256=nQsoPE0n5dtY9ive00d33W1gL48GgK7C5Ae0BK2oW2k,3479
20
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/uv.lock,sha256=miB9zGGSirBkjDE-OZTPCnv43Yc98xuAz_Ne8vTNFHg,186004
21
+ omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/websocket_manager.py,sha256=gNQLd94AcC-InumGQmUolREhiogCzilYWpLN8SRZjHI,3645
22
+ omni_cortex-1.12.0.dist-info/METADATA,sha256=_SKxV6UBJR4dQr44TyZA8c9yY1OumxSOV8Q5JkvxRHA,15712
23
+ omni_cortex-1.12.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
24
+ omni_cortex-1.12.0.dist-info/entry_points.txt,sha256=rohx4mFH2ffZmMb9QXPZmFf-ZGjA3jpKVDVeET-ttiM,150
25
+ omni_cortex-1.12.0.dist-info/licenses/LICENSE,sha256=oG_397owMmi-Umxp5sYocJ6RPohp9_bDNnnEu9OUphg,1072
26
+ omni_cortex-1.12.0.dist-info/RECORD,,
@@ -1,25 +0,0 @@
1
- omni_cortex-1.11.2.data/data/share/omni-cortex/hooks/post_tool_use.py,sha256=yBLoYvEdUunbG8fclzIEWszCptworkUUcOUSKzNsvms,12271
2
- omni_cortex-1.11.2.data/data/share/omni-cortex/hooks/pre_tool_use.py,sha256=mkZ7eeBnjWkIgNnrqfYSXIhhLNYYk4hQx_6F0pNrGoc,6395
3
- omni_cortex-1.11.2.data/data/share/omni-cortex/hooks/session_utils.py,sha256=3SKPCytqWuRPOupWdzmwBoKBDJqtLcT1Nle_pueDQUY,5746
4
- omni_cortex-1.11.2.data/data/share/omni-cortex/hooks/stop.py,sha256=T1bwcmbTLj0gzjrVvFBT1zB6wff4J2YkYBAY-ZxZI5g,5336
5
- omni_cortex-1.11.2.data/data/share/omni-cortex/hooks/subagent_stop.py,sha256=V9HQSFGNOfkg8ZCstPEy4h5V8BP4AbrVr8teFzN1kNk,3314
6
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/.env.example,sha256=9xS7-UiWlMddRwzlyyyKNHAMlNTsgH-2sPV266guJpQ,372
7
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/backfill_summaries.py,sha256=ElchfcBv4pmVr2PsePCgFlCyuvf4_jDJj_C3AmMhu7U,8973
8
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/chat_service.py,sha256=5UCvLayZGeSdGsYAzOeupumclAhoFLusGYLdyl33ANc,9304
9
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/database.py,sha256=LAN7GSM2tvMcJaL0RrGJurH9-tw3cs2QtPduqCbLvj0,34974
10
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/image_service.py,sha256=NP6ojFpHb6iNTYRkXqYu1CL6WvooZpZ54mjLiWSWG_g,19205
11
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/logging_config.py,sha256=WnunFGET9zlsn9WBpVsio2zI7BiUQanE0xzAQQxIhII,3944
12
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/main.py,sha256=rexhrCwQa19a4RFTv9zdyq8amv2J_73GDfFY9MvB43A,46166
13
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/models.py,sha256=LkmcYq1imsyDlMYnX3Z_FOTmPsu37MQEfJSI-w5EjvM,7330
14
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/project_config.py,sha256=ZxGoeRpHvN5qQyf2hRxrAZiHrPSwdQp59f0di6O1LKM,4352
15
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/project_scanner.py,sha256=lwFXS8iJbOoxf7FAyo2TjH25neaMHiJ8B3jS57XxtDI,5713
16
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/prompt_security.py,sha256=LcdZhYy1CfpSq_4BPO6lMJ15phc2ZXLUSBAnAvODVCI,3423
17
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/pyproject.toml,sha256=9pbbGQXLe1Xd06nZAtDySCHIlfMWvPaB-C6tGZR6umc,502
18
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/security.py,sha256=nQsoPE0n5dtY9ive00d33W1gL48GgK7C5Ae0BK2oW2k,3479
19
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/uv.lock,sha256=miB9zGGSirBkjDE-OZTPCnv43Yc98xuAz_Ne8vTNFHg,186004
20
- omni_cortex-1.11.2.data/data/share/omni-cortex/dashboard/backend/websocket_manager.py,sha256=ABXAtlhBI5vnTcwdQUS-UQcDyTn-rWZL5OKEP9YY-kU,3619
21
- omni_cortex-1.11.2.dist-info/METADATA,sha256=4XyJy3q4ZO1OfMaHO-mTYNw-kqWhchOXOGxqaBPnfGY,15712
22
- omni_cortex-1.11.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
23
- omni_cortex-1.11.2.dist-info/entry_points.txt,sha256=rohx4mFH2ffZmMb9QXPZmFf-ZGjA3jpKVDVeET-ttiM,150
24
- omni_cortex-1.11.2.dist-info/licenses/LICENSE,sha256=oG_397owMmi-Umxp5sYocJ6RPohp9_bDNnnEu9OUphg,1072
25
- omni_cortex-1.11.2.dist-info/RECORD,,