omni-cortex 1.2.0__py3-none-any.whl → 1.11.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/.env.example +12 -0
  2. omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/backfill_summaries.py +280 -0
  3. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/chat_service.py +59 -32
  4. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/database.py +305 -18
  5. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/image_service.py +35 -16
  6. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/logging_config.py +34 -4
  7. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/main.py +451 -13
  8. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/models.py +64 -12
  9. omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/prompt_security.py +111 -0
  10. omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/security.py +104 -0
  11. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/uv.lock +414 -1
  12. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +24 -2
  13. omni_cortex-1.11.3.data/data/share/omni-cortex/hooks/post_tool_use.py +429 -0
  14. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/pre_tool_use.py +52 -2
  15. omni_cortex-1.11.3.data/data/share/omni-cortex/hooks/session_utils.py +186 -0
  16. {omni_cortex-1.2.0.dist-info → omni_cortex-1.11.3.dist-info}/METADATA +237 -8
  17. omni_cortex-1.11.3.dist-info/RECORD +25 -0
  18. omni_cortex-1.2.0.data/data/share/omni-cortex/hooks/post_tool_use.py +0 -160
  19. omni_cortex-1.2.0.dist-info/RECORD +0 -20
  20. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/project_config.py +0 -0
  21. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +0 -0
  22. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
  23. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/stop.py +0 -0
  24. {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
  25. {omni_cortex-1.2.0.dist-info → omni_cortex-1.11.3.dist-info}/WHEEL +0 -0
  26. {omni_cortex-1.2.0.dist-info → omni_cortex-1.11.3.dist-info}/entry_points.txt +0 -0
  27. {omni_cortex-1.2.0.dist-info → omni_cortex-1.11.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,429 @@
1
+ #!/usr/bin/env python3
2
+ """PostToolUse hook - logs tool result after execution.
3
+
4
+ This hook is called by Claude Code after each tool completes.
5
+ It logs the tool output, duration, and success/error status.
6
+
7
+ Hook configuration for settings.json:
8
+ {
9
+ "hooks": {
10
+ "PostToolUse": [
11
+ {
12
+ "type": "command",
13
+ "command": "python hooks/post_tool_use.py"
14
+ }
15
+ ]
16
+ }
17
+ }
18
+ """
19
+
20
+ import json
21
+ import re
22
+ import sys
23
+ import os
24
+ import sqlite3
25
+ from datetime import datetime, timezone
26
+ from pathlib import Path
27
+
28
+ # Import shared session management
29
+ from session_utils import get_or_create_session
30
+
31
+
32
+ # Patterns for sensitive field names that should be redacted
33
+ SENSITIVE_FIELD_PATTERNS = [
34
+ r'(?i)(api[_-]?key|apikey)',
35
+ r'(?i)(password|passwd|pwd)',
36
+ r'(?i)(secret|token|credential)',
37
+ r'(?i)(auth[_-]?token|access[_-]?token)',
38
+ r'(?i)(private[_-]?key|ssh[_-]?key)',
39
+ ]
40
+
41
+
42
+ def redact_sensitive_fields(data: dict) -> dict:
43
+ """Redact sensitive fields from a dictionary for safe logging.
44
+
45
+ Recursively processes nested dicts and lists.
46
+ """
47
+ if not isinstance(data, dict):
48
+ return data
49
+
50
+ result = {}
51
+ for key, value in data.items():
52
+ # Check if key matches sensitive patterns
53
+ is_sensitive = any(
54
+ re.search(pattern, str(key))
55
+ for pattern in SENSITIVE_FIELD_PATTERNS
56
+ )
57
+
58
+ if is_sensitive:
59
+ result[key] = '[REDACTED]'
60
+ elif isinstance(value, dict):
61
+ result[key] = redact_sensitive_fields(value)
62
+ elif isinstance(value, list):
63
+ result[key] = [
64
+ redact_sensitive_fields(item) if isinstance(item, dict) else item
65
+ for item in value
66
+ ]
67
+ else:
68
+ result[key] = value
69
+
70
+ return result
71
+
72
+
73
+ def get_db_path() -> Path:
74
+ """Get the database path for the current project."""
75
+ project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
76
+ return Path(project_path) / ".omni-cortex" / "cortex.db"
77
+
78
+
79
+ def ensure_database(db_path: Path) -> sqlite3.Connection:
80
+ """Ensure database exists and is initialized.
81
+
82
+ Auto-creates the database and schema if it doesn't exist.
83
+ This enables 'out of the box' functionality.
84
+ """
85
+ db_path.parent.mkdir(parents=True, exist_ok=True)
86
+ conn = sqlite3.connect(str(db_path))
87
+
88
+ # Check if schema exists
89
+ cursor = conn.cursor()
90
+ cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='activities'")
91
+ if cursor.fetchone() is None:
92
+ # Apply minimal schema for activities (full schema applied by MCP)
93
+ conn.executescript("""
94
+ CREATE TABLE IF NOT EXISTS activities (
95
+ id TEXT PRIMARY KEY,
96
+ session_id TEXT,
97
+ agent_id TEXT,
98
+ timestamp TEXT NOT NULL,
99
+ event_type TEXT NOT NULL,
100
+ tool_name TEXT,
101
+ tool_input TEXT,
102
+ tool_output TEXT,
103
+ duration_ms INTEGER,
104
+ success INTEGER DEFAULT 1,
105
+ error_message TEXT,
106
+ project_path TEXT,
107
+ file_path TEXT,
108
+ metadata TEXT
109
+ );
110
+ CREATE INDEX IF NOT EXISTS idx_activities_timestamp ON activities(timestamp DESC);
111
+ CREATE INDEX IF NOT EXISTS idx_activities_tool ON activities(tool_name);
112
+ """)
113
+ conn.commit()
114
+
115
+ return conn
116
+
117
+
118
+ def generate_id() -> str:
119
+ """Generate a unique activity ID."""
120
+ timestamp_ms = int(datetime.now().timestamp() * 1000)
121
+ random_hex = os.urandom(4).hex()
122
+ return f"act_{timestamp_ms}_{random_hex}"
123
+
124
+
125
+ def truncate(text: str, max_length: int = 10000) -> str:
126
+ """Truncate text to max length."""
127
+ if len(text) <= max_length:
128
+ return text
129
+ return text[:max_length - 20] + "\n... [truncated]"
130
+
131
+
132
+ def extract_skill_info(tool_input: dict, project_path: str) -> tuple:
133
+ """Extract skill name and scope from Skill tool input.
134
+
135
+ Returns:
136
+ Tuple of (skill_name, command_scope)
137
+ """
138
+ try:
139
+ skill_name = tool_input.get("skill", "")
140
+ if not skill_name:
141
+ return None, None
142
+
143
+ # Determine scope by checking file locations
144
+ project_cmd = Path(project_path) / ".claude" / "commands" / f"{skill_name}.md"
145
+ if project_cmd.exists():
146
+ return skill_name, "project"
147
+
148
+ universal_cmd = Path.home() / ".claude" / "commands" / f"{skill_name}.md"
149
+ if universal_cmd.exists():
150
+ return skill_name, "universal"
151
+
152
+ return skill_name, "unknown"
153
+ except Exception:
154
+ return None, None
155
+
156
+
157
+ def extract_mcp_server(tool_name: str) -> str:
158
+ """Extract MCP server name from tool name pattern mcp__servername__toolname."""
159
+ if not tool_name or not tool_name.startswith("mcp__"):
160
+ return None
161
+
162
+ parts = tool_name.split("__")
163
+ if len(parts) >= 3:
164
+ return parts[1]
165
+ return None
166
+
167
+
168
+ def ensure_analytics_columns(conn: sqlite3.Connection) -> None:
169
+ """Ensure command analytics columns exist in activities table."""
170
+ cursor = conn.cursor()
171
+ columns = cursor.execute("PRAGMA table_info(activities)").fetchall()
172
+ column_names = [col[1] for col in columns]
173
+
174
+ new_columns = [
175
+ ("command_name", "TEXT"),
176
+ ("command_scope", "TEXT"),
177
+ ("mcp_server", "TEXT"),
178
+ ("skill_name", "TEXT"),
179
+ ("summary", "TEXT"),
180
+ ("summary_detail", "TEXT"),
181
+ ]
182
+
183
+ for col_name, col_type in new_columns:
184
+ if col_name not in column_names:
185
+ cursor.execute(f"ALTER TABLE activities ADD COLUMN {col_name} {col_type}")
186
+
187
+ conn.commit()
188
+
189
+
190
+ def generate_summary(tool_name: str, tool_input: dict, success: bool) -> tuple:
191
+ """Generate short and detailed summaries for an activity.
192
+
193
+ Returns:
194
+ Tuple of (summary, summary_detail)
195
+ """
196
+ if not tool_name:
197
+ return None, None
198
+
199
+ input_data = tool_input if isinstance(tool_input, dict) else {}
200
+ short = ""
201
+ detail = ""
202
+
203
+ if tool_name == "Read":
204
+ path = input_data.get("file_path", "unknown")
205
+ filename = Path(path).name if path else "file"
206
+ short = f"Read file: {filename}"
207
+ detail = f"Reading contents of {path}"
208
+
209
+ elif tool_name == "Write":
210
+ path = input_data.get("file_path", "unknown")
211
+ filename = Path(path).name if path else "file"
212
+ short = f"Write file: {filename}"
213
+ detail = f"Writing/creating file at {path}"
214
+
215
+ elif tool_name == "Edit":
216
+ path = input_data.get("file_path", "unknown")
217
+ filename = Path(path).name if path else "file"
218
+ short = f"Edit file: {filename}"
219
+ detail = f"Editing {path}"
220
+
221
+ elif tool_name == "Bash":
222
+ cmd = str(input_data.get("command", ""))[:50]
223
+ short = f"Run: {cmd}..."
224
+ detail = f"Executing: {input_data.get('command', 'unknown')}"
225
+
226
+ elif tool_name == "Grep":
227
+ pattern = input_data.get("pattern", "")
228
+ short = f"Search: {pattern[:30]}"
229
+ detail = f"Searching for pattern: {pattern}"
230
+
231
+ elif tool_name == "Glob":
232
+ pattern = input_data.get("pattern", "")
233
+ short = f"Find files: {pattern[:30]}"
234
+ detail = f"Finding files matching: {pattern}"
235
+
236
+ elif tool_name == "Skill":
237
+ skill = input_data.get("skill", "unknown")
238
+ short = f"Run skill: /{skill}"
239
+ detail = f"Executing slash command /{skill}"
240
+
241
+ elif tool_name == "Task":
242
+ desc = input_data.get("description", "task")
243
+ short = f"Spawn agent: {desc[:30]}"
244
+ detail = f"Launching sub-agent: {desc}"
245
+
246
+ elif tool_name == "TodoWrite":
247
+ todos = input_data.get("todos", [])
248
+ count = len(todos) if isinstance(todos, list) else 0
249
+ short = f"Update todo: {count} items"
250
+ detail = f"Managing task list with {count} items"
251
+
252
+ elif tool_name.startswith("mcp__"):
253
+ parts = tool_name.split("__")
254
+ server = parts[1] if len(parts) > 1 else "unknown"
255
+ tool = parts[2] if len(parts) > 2 else tool_name
256
+ short = f"MCP: {server}/{tool}"
257
+ detail = f"Calling {tool} from MCP server {server}"
258
+
259
+ else:
260
+ short = f"Tool: {tool_name}"
261
+ detail = f"Using tool {tool_name}"
262
+
263
+ if not success:
264
+ short = f"[FAILED] {short}"
265
+ detail = f"[FAILED] {detail}"
266
+
267
+ return short, detail
268
+
269
+
270
+ def main():
271
+ """Process PostToolUse hook."""
272
+ try:
273
+ # Read all input at once (more reliable than json.load on stdin)
274
+ raw_input = sys.stdin.read()
275
+ if not raw_input or not raw_input.strip():
276
+ print(json.dumps({}))
277
+ return
278
+
279
+ input_data = json.loads(raw_input)
280
+
281
+ # Extract data from hook input
282
+ # Note: Claude Code uses 'tool_response' not 'tool_output'
283
+ tool_name = input_data.get("tool_name")
284
+ tool_input = input_data.get("tool_input", {})
285
+ tool_response = input_data.get("tool_response", {}) # Correct field name
286
+ agent_id = input_data.get("agent_id")
287
+
288
+ # Determine success/error from response content
289
+ # Claude Code doesn't send 'is_error' - we must detect from response
290
+ is_error = False
291
+ error_message = None
292
+
293
+ if isinstance(tool_response, dict):
294
+ # Check for explicit error field
295
+ if "error" in tool_response:
296
+ is_error = True
297
+ error_message = str(tool_response.get("error", ""))[:500]
298
+
299
+ # For Bash: check stderr or error patterns in stdout
300
+ elif tool_name == "Bash":
301
+ stderr = tool_response.get("stderr", "")
302
+ stdout = tool_response.get("stdout", "")
303
+
304
+ # Check stderr for content (excluding common non-errors)
305
+ if stderr and stderr.strip():
306
+ # Filter out common non-error stderr output
307
+ stderr_lower = stderr.lower()
308
+ non_error_patterns = ["warning:", "note:", "info:"]
309
+ if not any(p in stderr_lower for p in non_error_patterns):
310
+ is_error = True
311
+ error_message = stderr[:500]
312
+
313
+ # Check stdout for common error patterns
314
+ if not is_error and stdout:
315
+ error_patterns = [
316
+ "command not found",
317
+ "No such file or directory",
318
+ "Permission denied",
319
+ "fatal:",
320
+ "error:",
321
+ "Error:",
322
+ "FAILED",
323
+ "Cannot find",
324
+ "not recognized",
325
+ "Exit code 1",
326
+ ]
327
+ stdout_check = stdout[:1000] # Check first 1000 chars
328
+ for pattern in error_patterns:
329
+ if pattern in stdout_check:
330
+ is_error = True
331
+ error_message = f"Error pattern detected: {pattern}"
332
+ break
333
+
334
+ # For Read: check for file errors
335
+ elif tool_name == "Read":
336
+ if "error" in str(tool_response).lower():
337
+ is_error = True
338
+ error_message = "File read error"
339
+
340
+ # Legacy fallback: also check tool_output for backwards compatibility
341
+ tool_output = tool_response if tool_response else input_data.get("tool_output", {})
342
+
343
+ # Skip logging our own tools to prevent recursion
344
+ # MCP tools are named like "mcp__omni-cortex__cortex_remember"
345
+ if tool_name and ("cortex_" in tool_name or "omni-cortex" in tool_name):
346
+ print(json.dumps({}))
347
+ return
348
+
349
+ project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
350
+
351
+ # Auto-initialize database (creates if not exists)
352
+ db_path = get_db_path()
353
+ conn = ensure_database(db_path)
354
+
355
+ # Ensure analytics columns exist
356
+ ensure_analytics_columns(conn)
357
+
358
+ # Get or create session (auto-manages session lifecycle)
359
+ session_id = get_or_create_session(conn, project_path)
360
+
361
+ # Redact sensitive fields before logging
362
+ safe_input = redact_sensitive_fields(tool_input) if isinstance(tool_input, dict) else tool_input
363
+ safe_output = redact_sensitive_fields(tool_response) if isinstance(tool_response, dict) else tool_response
364
+
365
+ # Extract command analytics
366
+ skill_name = None
367
+ command_scope = None
368
+ mcp_server = None
369
+
370
+ # Extract skill info from Skill tool calls
371
+ if tool_name == "Skill" and isinstance(tool_input, dict):
372
+ skill_name, command_scope = extract_skill_info(tool_input, project_path)
373
+
374
+ # Extract MCP server from tool name (mcp__servername__toolname pattern)
375
+ if tool_name and tool_name.startswith("mcp__"):
376
+ mcp_server = extract_mcp_server(tool_name)
377
+
378
+ # Generate summary for activity
379
+ summary = None
380
+ summary_detail = None
381
+ try:
382
+ summary, summary_detail = generate_summary(tool_name, safe_input, not is_error)
383
+ except Exception:
384
+ pass
385
+
386
+ # Insert activity record with analytics columns
387
+ cursor = conn.cursor()
388
+ cursor.execute(
389
+ """
390
+ INSERT INTO activities (
391
+ id, session_id, agent_id, timestamp, event_type,
392
+ tool_name, tool_input, tool_output, success, error_message, project_path,
393
+ skill_name, command_scope, mcp_server, summary, summary_detail
394
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
395
+ """,
396
+ (
397
+ generate_id(),
398
+ session_id,
399
+ agent_id,
400
+ datetime.now(timezone.utc).isoformat(),
401
+ "post_tool_use",
402
+ tool_name,
403
+ truncate(json.dumps(safe_input, default=str)),
404
+ truncate(json.dumps(safe_output, default=str)),
405
+ 0 if is_error else 1,
406
+ error_message,
407
+ project_path,
408
+ skill_name,
409
+ command_scope,
410
+ mcp_server,
411
+ summary,
412
+ summary_detail,
413
+ ),
414
+ )
415
+ conn.commit()
416
+ conn.close()
417
+
418
+ # Return empty response (no modification)
419
+ print(json.dumps({}))
420
+
421
+ except Exception as e:
422
+ # Hooks should never block - log error but continue
423
+ print(json.dumps({"systemMessage": f"Cortex post_tool_use: {e}"}))
424
+
425
+ sys.exit(0)
426
+
427
+
428
+ if __name__ == "__main__":
429
+ main()
@@ -18,12 +18,57 @@ Hook configuration for settings.json:
18
18
  """
19
19
 
20
20
  import json
21
+ import re
21
22
  import sys
22
23
  import os
23
24
  import sqlite3
24
25
  from datetime import datetime, timezone
25
26
  from pathlib import Path
26
27
 
28
+ # Import shared session management
29
+ from session_utils import get_or_create_session
30
+
31
+
32
+ # Patterns for sensitive field names that should be redacted
33
+ SENSITIVE_FIELD_PATTERNS = [
34
+ r'(?i)(api[_-]?key|apikey)',
35
+ r'(?i)(password|passwd|pwd)',
36
+ r'(?i)(secret|token|credential)',
37
+ r'(?i)(auth[_-]?token|access[_-]?token)',
38
+ r'(?i)(private[_-]?key|ssh[_-]?key)',
39
+ ]
40
+
41
+
42
+ def redact_sensitive_fields(data: dict) -> dict:
43
+ """Redact sensitive fields from a dictionary for safe logging.
44
+
45
+ Recursively processes nested dicts and lists.
46
+ """
47
+ if not isinstance(data, dict):
48
+ return data
49
+
50
+ result = {}
51
+ for key, value in data.items():
52
+ # Check if key matches sensitive patterns
53
+ is_sensitive = any(
54
+ re.search(pattern, str(key))
55
+ for pattern in SENSITIVE_FIELD_PATTERNS
56
+ )
57
+
58
+ if is_sensitive:
59
+ result[key] = '[REDACTED]'
60
+ elif isinstance(value, dict):
61
+ result[key] = redact_sensitive_fields(value)
62
+ elif isinstance(value, list):
63
+ result[key] = [
64
+ redact_sensitive_fields(item) if isinstance(item, dict) else item
65
+ for item in value
66
+ ]
67
+ else:
68
+ result[key] = value
69
+
70
+ return result
71
+
27
72
 
28
73
  def get_db_path() -> Path:
29
74
  """Get the database path for the current project."""
@@ -115,13 +160,18 @@ def main():
115
160
  print(json.dumps({}))
116
161
  return
117
162
 
118
- session_id = os.environ.get("CLAUDE_SESSION_ID")
119
163
  project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
120
164
 
121
165
  # Auto-initialize database (creates if not exists)
122
166
  db_path = get_db_path()
123
167
  conn = ensure_database(db_path)
124
168
 
169
+ # Get or create session (auto-manages session lifecycle)
170
+ session_id = get_or_create_session(conn, project_path)
171
+
172
+ # Redact sensitive fields before logging
173
+ safe_input = redact_sensitive_fields(tool_input) if isinstance(tool_input, dict) else tool_input
174
+
125
175
  # Insert activity record
126
176
  cursor = conn.cursor()
127
177
  cursor.execute(
@@ -138,7 +188,7 @@ def main():
138
188
  datetime.now(timezone.utc).isoformat(),
139
189
  "pre_tool_use",
140
190
  tool_name,
141
- truncate(json.dumps(tool_input, default=str)),
191
+ truncate(json.dumps(safe_input, default=str)),
142
192
  project_path,
143
193
  ),
144
194
  )
@@ -0,0 +1,186 @@
1
+ #!/usr/bin/env python3
2
+ """Shared session management utilities for Claude Code hooks.
3
+
4
+ This module provides session management functionality that can be shared
5
+ across pre_tool_use.py and post_tool_use.py hooks to ensure consistent
6
+ session tracking.
7
+
8
+ Session Management Logic:
9
+ 1. Check for existing session file at `.omni-cortex/current_session.json`
10
+ 2. If session exists and is valid (not timed out), use it
11
+ 3. If no valid session, create a new one in both file and database
12
+ 4. Update last_activity_at on each use to track session activity
13
+ """
14
+
15
+ import json
16
+ import os
17
+ import sqlite3
18
+ import time
19
+ from datetime import datetime, timezone
20
+ from pathlib import Path
21
+ from typing import Optional
22
+
23
+
24
+ # Session timeout in seconds (4 hours of inactivity = new session)
25
+ SESSION_TIMEOUT_SECONDS = 4 * 60 * 60
26
+
27
+
28
+ def generate_session_id() -> str:
29
+ """Generate a unique session ID matching the MCP format.
30
+
31
+ Returns:
32
+ Session ID in format: sess_{timestamp_ms}_{random_hex}
33
+ """
34
+ timestamp_ms = int(time.time() * 1000)
35
+ random_hex = os.urandom(4).hex()
36
+ return f"sess_{timestamp_ms}_{random_hex}"
37
+
38
+
39
+ def get_session_file_path() -> Path:
40
+ """Get the path to the current session file.
41
+
42
+ Returns:
43
+ Path to .omni-cortex/current_session.json
44
+ """
45
+ project_path = os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
46
+ return Path(project_path) / ".omni-cortex" / "current_session.json"
47
+
48
+
49
+ def load_session_file() -> Optional[dict]:
50
+ """Load the current session from file if it exists.
51
+
52
+ Returns:
53
+ Session data dict or None if file doesn't exist or is invalid
54
+ """
55
+ session_file = get_session_file_path()
56
+ if not session_file.exists():
57
+ return None
58
+
59
+ try:
60
+ with open(session_file, "r") as f:
61
+ return json.load(f)
62
+ except (json.JSONDecodeError, IOError):
63
+ return None
64
+
65
+
66
+ def save_session_file(session_data: dict) -> None:
67
+ """Save the current session to file.
68
+
69
+ Args:
70
+ session_data: Dict containing session_id, project_path, started_at, last_activity_at
71
+ """
72
+ session_file = get_session_file_path()
73
+ session_file.parent.mkdir(parents=True, exist_ok=True)
74
+
75
+ with open(session_file, "w") as f:
76
+ json.dump(session_data, f, indent=2)
77
+
78
+
79
+ def is_session_valid(session_data: dict) -> bool:
80
+ """Check if a session is still valid (not timed out).
81
+
82
+ A session is valid if:
83
+ - It has a last_activity_at timestamp
84
+ - The timestamp is within SESSION_TIMEOUT_SECONDS of now
85
+
86
+ Args:
87
+ session_data: Session dict with last_activity_at field
88
+
89
+ Returns:
90
+ True if session is valid, False otherwise
91
+ """
92
+ last_activity = session_data.get("last_activity_at")
93
+ if not last_activity:
94
+ return False
95
+
96
+ try:
97
+ last_time = datetime.fromisoformat(last_activity.replace("Z", "+00:00"))
98
+ now = datetime.now(timezone.utc)
99
+ elapsed_seconds = (now - last_time).total_seconds()
100
+ return elapsed_seconds < SESSION_TIMEOUT_SECONDS
101
+ except (ValueError, TypeError):
102
+ return False
103
+
104
+
105
+ def create_session_in_db(conn: sqlite3.Connection, session_id: str, project_path: str) -> None:
106
+ """Create a new session record in the database.
107
+
108
+ Also creates the sessions table if it doesn't exist (for first-run scenarios).
109
+
110
+ Args:
111
+ conn: SQLite database connection
112
+ session_id: The session ID to create
113
+ project_path: The project directory path
114
+ """
115
+ cursor = conn.cursor()
116
+ now = datetime.now(timezone.utc).isoformat()
117
+
118
+ # Check if sessions table exists (it might not if only activities table was created)
119
+ cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='sessions'")
120
+ if cursor.fetchone() is None:
121
+ # Create sessions table with minimal schema
122
+ conn.executescript("""
123
+ CREATE TABLE IF NOT EXISTS sessions (
124
+ id TEXT PRIMARY KEY,
125
+ project_path TEXT NOT NULL,
126
+ started_at TEXT NOT NULL,
127
+ ended_at TEXT,
128
+ summary TEXT,
129
+ tags TEXT,
130
+ metadata TEXT
131
+ );
132
+ CREATE INDEX IF NOT EXISTS idx_sessions_started ON sessions(started_at DESC);
133
+ CREATE INDEX IF NOT EXISTS idx_sessions_project ON sessions(project_path);
134
+ """)
135
+ conn.commit()
136
+
137
+ cursor.execute(
138
+ """
139
+ INSERT OR IGNORE INTO sessions (id, project_path, started_at)
140
+ VALUES (?, ?, ?)
141
+ """,
142
+ (session_id, project_path, now),
143
+ )
144
+ conn.commit()
145
+
146
+
147
+ def get_or_create_session(conn: sqlite3.Connection, project_path: str) -> str:
148
+ """Get the current session ID, creating a new one if needed.
149
+
150
+ Session management logic:
151
+ 1. Check for existing session file
152
+ 2. If exists and not timed out, use it and update last_activity
153
+ 3. If doesn't exist or timed out, create new session
154
+
155
+ Args:
156
+ conn: SQLite database connection
157
+ project_path: The project directory path
158
+
159
+ Returns:
160
+ The session ID to use for activity logging
161
+ """
162
+ session_data = load_session_file()
163
+ now_iso = datetime.now(timezone.utc).isoformat()
164
+
165
+ if session_data and is_session_valid(session_data):
166
+ # Update last activity time
167
+ session_data["last_activity_at"] = now_iso
168
+ save_session_file(session_data)
169
+ return session_data["session_id"]
170
+
171
+ # Create new session
172
+ session_id = generate_session_id()
173
+
174
+ # Create in database
175
+ create_session_in_db(conn, session_id, project_path)
176
+
177
+ # Save to file
178
+ session_data = {
179
+ "session_id": session_id,
180
+ "project_path": project_path,
181
+ "started_at": now_iso,
182
+ "last_activity_at": now_iso,
183
+ }
184
+ save_session_file(session_data)
185
+
186
+ return session_id