omni-cortex 1.17.0__py3-none-any.whl → 1.17.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. omni_cortex/__init__.py +3 -0
  2. omni_cortex/categorization/__init__.py +9 -0
  3. omni_cortex/categorization/auto_tags.py +166 -0
  4. omni_cortex/categorization/auto_type.py +165 -0
  5. omni_cortex/config.py +141 -0
  6. omni_cortex/dashboard.py +232 -0
  7. omni_cortex/database/__init__.py +24 -0
  8. omni_cortex/database/connection.py +137 -0
  9. omni_cortex/database/migrations.py +210 -0
  10. omni_cortex/database/schema.py +212 -0
  11. omni_cortex/database/sync.py +421 -0
  12. omni_cortex/decay/__init__.py +7 -0
  13. omni_cortex/decay/importance.py +147 -0
  14. omni_cortex/embeddings/__init__.py +35 -0
  15. omni_cortex/embeddings/local.py +442 -0
  16. omni_cortex/models/__init__.py +20 -0
  17. omni_cortex/models/activity.py +265 -0
  18. omni_cortex/models/agent.py +144 -0
  19. omni_cortex/models/memory.py +395 -0
  20. omni_cortex/models/relationship.py +206 -0
  21. omni_cortex/models/session.py +290 -0
  22. omni_cortex/resources/__init__.py +1 -0
  23. omni_cortex/search/__init__.py +22 -0
  24. omni_cortex/search/hybrid.py +197 -0
  25. omni_cortex/search/keyword.py +204 -0
  26. omni_cortex/search/ranking.py +127 -0
  27. omni_cortex/search/semantic.py +232 -0
  28. omni_cortex/server.py +360 -0
  29. omni_cortex/setup.py +278 -0
  30. omni_cortex/tools/__init__.py +13 -0
  31. omni_cortex/tools/activities.py +453 -0
  32. omni_cortex/tools/memories.py +536 -0
  33. omni_cortex/tools/sessions.py +311 -0
  34. omni_cortex/tools/utilities.py +477 -0
  35. omni_cortex/utils/__init__.py +13 -0
  36. omni_cortex/utils/formatting.py +282 -0
  37. omni_cortex/utils/ids.py +72 -0
  38. omni_cortex/utils/timestamps.py +129 -0
  39. omni_cortex/utils/truncation.py +111 -0
  40. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/main.py +43 -13
  41. {omni_cortex-1.17.0.dist-info → omni_cortex-1.17.2.dist-info}/METADATA +1 -1
  42. omni_cortex-1.17.2.dist-info/RECORD +65 -0
  43. omni_cortex-1.17.0.dist-info/RECORD +0 -26
  44. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/.env.example +0 -0
  45. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/backfill_summaries.py +0 -0
  46. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/chat_service.py +0 -0
  47. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/database.py +0 -0
  48. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/image_service.py +0 -0
  49. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/logging_config.py +0 -0
  50. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/models.py +0 -0
  51. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/project_config.py +0 -0
  52. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +0 -0
  53. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/prompt_security.py +0 -0
  54. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
  55. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/security.py +0 -0
  56. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/uv.lock +0 -0
  57. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +0 -0
  58. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/hooks/post_tool_use.py +0 -0
  59. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/hooks/pre_tool_use.py +0 -0
  60. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/hooks/session_utils.py +0 -0
  61. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/hooks/stop.py +0 -0
  62. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
  63. {omni_cortex-1.17.0.data → omni_cortex-1.17.2.data}/data/share/omni-cortex/hooks/user_prompt.py +0 -0
  64. {omni_cortex-1.17.0.dist-info → omni_cortex-1.17.2.dist-info}/WHEEL +0 -0
  65. {omni_cortex-1.17.0.dist-info → omni_cortex-1.17.2.dist-info}/entry_points.txt +0 -0
  66. {omni_cortex-1.17.0.dist-info → omni_cortex-1.17.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,282 @@
1
+ """Output formatting utilities for Omni Cortex."""
2
+
3
+ import json
4
+ import re
5
+ from html import escape as html_escape
6
+ from typing import Any, Optional
7
+ from datetime import datetime
8
+
9
+ from .timestamps import format_relative_time
10
+
11
+
12
+ def xml_escape(text: str) -> str:
13
+ """Escape text for safe inclusion in XML-structured outputs.
14
+
15
+ Prevents prompt injection by escaping special characters that
16
+ could be interpreted as XML/instruction delimiters.
17
+ """
18
+ return html_escape(text, quote=True)
19
+
20
+
21
+ # Known prompt injection patterns
22
+ _INJECTION_PATTERNS = [
23
+ (r'(?i)(ignore|disregard|forget)\s+(all\s+)?(previous|prior|above)\s+instructions?',
24
+ 'instruction override'),
25
+ (r'(?i)(new\s+)?system\s+(prompt|instruction|message)',
26
+ 'system prompt manipulation'),
27
+ (r'(?i)\[/?system\]|\[/?inst\]|<\/?system>|<\/?instruction>',
28
+ 'fake delimiter'),
29
+ (r'(?i)bypass|jailbreak|DAN|GODMODE',
30
+ 'jailbreak signature'),
31
+ ]
32
+
33
+
34
+ def detect_injection_patterns(content: str) -> list[str]:
35
+ """Detect potential prompt injection patterns in content."""
36
+ detected = []
37
+ for pattern, description in _INJECTION_PATTERNS:
38
+ if re.search(pattern, content):
39
+ detected.append(description)
40
+ return detected
41
+
42
+
43
+ def format_memory_markdown(
44
+ memory: dict[str, Any],
45
+ related_memories: Optional[list[dict[str, Any]]] = None,
46
+ ) -> str:
47
+ """Format a memory as markdown.
48
+
49
+ Args:
50
+ memory: Memory dictionary
51
+ related_memories: Optional list of related memory dicts with relationship info
52
+
53
+ Returns:
54
+ Markdown formatted string
55
+ """
56
+ lines = []
57
+
58
+ # Header with ID and type
59
+ mem_type = memory.get("type", "general")
60
+ lines.append(f"## [{mem_type}] {memory.get('id', 'unknown')}")
61
+ lines.append("")
62
+
63
+ # Content - XML escape to prevent prompt injection when returned to Claude
64
+ content = memory.get("content", "")
65
+ # Detect and flag potential injection patterns
66
+ injections = detect_injection_patterns(content)
67
+ if injections:
68
+ lines.append(f"[Security Note: Content contains patterns that may be injection attempts: {', '.join(injections)}]")
69
+ lines.append(xml_escape(content))
70
+ lines.append("")
71
+
72
+ # Metadata
73
+ lines.append("---")
74
+
75
+ # Tags
76
+ tags = memory.get("tags")
77
+ if tags:
78
+ if isinstance(tags, str):
79
+ tags = json.loads(tags)
80
+ if tags:
81
+ lines.append(f"**Tags:** {', '.join(tags)}")
82
+
83
+ # Context - also escape
84
+ context = memory.get("context")
85
+ if context:
86
+ lines.append(f"**Context:** {xml_escape(context)}")
87
+
88
+ # Timestamps
89
+ created = memory.get("created_at")
90
+ if created:
91
+ lines.append(f"**Created:** {format_relative_time(created)}")
92
+
93
+ accessed = memory.get("last_accessed")
94
+ if accessed:
95
+ lines.append(f"**Last accessed:** {format_relative_time(accessed)}")
96
+
97
+ # Importance
98
+ importance = memory.get("importance_score", 50)
99
+ lines.append(f"**Importance:** {importance:.0f}/100")
100
+
101
+ # Status
102
+ status = memory.get("status", "fresh")
103
+ lines.append(f"**Status:** {status}")
104
+
105
+ # Related memories
106
+ if related_memories:
107
+ lines.append("")
108
+ lines.append("**Related:**")
109
+ for related in related_memories[:3]: # Limit to 3
110
+ rel_type = related.get("relationship_type", "related_to")
111
+ rel_id = related.get("id", "unknown")
112
+ rel_content = xml_escape(related.get("content", "")[:50])
113
+ lines.append(f" - [{rel_type}] {rel_id}: {rel_content}...")
114
+
115
+ return "\n".join(lines)
116
+
117
+
118
+ def format_memories_list_markdown(
119
+ memories: list[dict[str, Any]],
120
+ total: int = 0,
121
+ related_map: Optional[dict[str, list[dict[str, Any]]]] = None,
122
+ ) -> str:
123
+ """Format a list of memories as markdown.
124
+
125
+ Args:
126
+ memories: List of memory dictionaries
127
+ total: Total count (for pagination info)
128
+ related_map: Optional dict mapping memory IDs to their related memories
129
+
130
+ Returns:
131
+ Markdown formatted string
132
+ """
133
+ if not memories:
134
+ return "No memories found."
135
+
136
+ lines = []
137
+ lines.append(f"# Memories ({len(memories)}" + (f" of {total})" if total > len(memories) else ")"))
138
+ lines.append("")
139
+
140
+ for memory in memories:
141
+ # Get related memories for this memory if available
142
+ memory_id = memory.get("id")
143
+ related = related_map.get(memory_id) if related_map and memory_id else None
144
+ lines.append(format_memory_markdown(memory, related_memories=related))
145
+ lines.append("")
146
+
147
+ return "\n".join(lines)
148
+
149
+
150
+ def format_activity_markdown(activity: dict[str, Any]) -> str:
151
+ """Format an activity as markdown.
152
+
153
+ Args:
154
+ activity: Activity dictionary
155
+
156
+ Returns:
157
+ Markdown formatted string
158
+ """
159
+ lines = []
160
+
161
+ timestamp = activity.get("timestamp", "")
162
+ event_type = activity.get("event_type", "")
163
+ tool_name = activity.get("tool_name", "")
164
+
165
+ # Header
166
+ header = f"**{event_type}**"
167
+ if tool_name:
168
+ header += f": `{tool_name}`"
169
+ lines.append(header)
170
+
171
+ lines.append(f" - Time: {format_relative_time(timestamp)}")
172
+
173
+ # Success/Error
174
+ success = activity.get("success", 1)
175
+ if not success:
176
+ error = activity.get("error_message", "Unknown error")
177
+ lines.append(f" - Status: Failed - {error}")
178
+ elif activity.get("duration_ms"):
179
+ lines.append(f" - Duration: {activity['duration_ms']}ms")
180
+
181
+ return "\n".join(lines)
182
+
183
+
184
+ def format_timeline_markdown(
185
+ activities: list[dict[str, Any]],
186
+ memories: list[dict[str, Any]],
187
+ group_by: str = "hour"
188
+ ) -> str:
189
+ """Format a timeline as markdown.
190
+
191
+ Args:
192
+ activities: List of activity dictionaries
193
+ memories: List of memory dictionaries
194
+ group_by: How to group items (hour, day, session)
195
+
196
+ Returns:
197
+ Markdown formatted string
198
+ """
199
+ lines = []
200
+ lines.append("# Timeline")
201
+ lines.append("")
202
+
203
+ # Combine and sort by timestamp
204
+ items = []
205
+ for act in activities:
206
+ items.append({
207
+ "type": "activity",
208
+ "timestamp": act.get("timestamp"),
209
+ "data": act
210
+ })
211
+ for mem in memories:
212
+ items.append({
213
+ "type": "memory",
214
+ "timestamp": mem.get("created_at"),
215
+ "data": mem
216
+ })
217
+
218
+ items.sort(key=lambda x: x.get("timestamp", ""), reverse=True)
219
+
220
+ if not items:
221
+ return "# Timeline\n\nNo items found."
222
+
223
+ for item in items:
224
+ if item["type"] == "activity":
225
+ lines.append(format_activity_markdown(item["data"]))
226
+ else:
227
+ mem = item["data"]
228
+ lines.append(f"**Memory created**: [{mem.get('type')}] {mem.get('id')}")
229
+ content = xml_escape(mem.get("content", "")[:100])
230
+ lines.append(f" > {content}...")
231
+ lines.append("")
232
+
233
+ return "\n".join(lines)
234
+
235
+
236
+ def format_session_context_markdown(
237
+ sessions: list[dict[str, Any]],
238
+ learnings: list[str],
239
+ decisions: list[str],
240
+ errors: list[str]
241
+ ) -> str:
242
+ """Format session context as markdown for continuity.
243
+
244
+ Args:
245
+ sessions: Recent sessions
246
+ learnings: Key learnings
247
+ decisions: Key decisions
248
+ errors: Key errors encountered
249
+
250
+ Returns:
251
+ Markdown formatted context string
252
+ """
253
+ lines = []
254
+ lines.append("# Session Context")
255
+ lines.append("")
256
+
257
+ if sessions:
258
+ last = sessions[0]
259
+ ended = last.get("ended_at")
260
+ if ended:
261
+ lines.append(f"Last session ended {format_relative_time(ended)}")
262
+ lines.append("")
263
+
264
+ if learnings:
265
+ lines.append("## Key Learnings")
266
+ for learning in learnings[:5]:
267
+ lines.append(f"- {learning}")
268
+ lines.append("")
269
+
270
+ if decisions:
271
+ lines.append("## Key Decisions")
272
+ for decision in decisions[:5]:
273
+ lines.append(f"- {decision}")
274
+ lines.append("")
275
+
276
+ if errors:
277
+ lines.append("## Errors Encountered")
278
+ for error in errors[:5]:
279
+ lines.append(f"- {error}")
280
+ lines.append("")
281
+
282
+ return "\n".join(lines)
@@ -0,0 +1,72 @@
1
+ """ID generation utilities for Omni Cortex."""
2
+
3
+ import os
4
+ import time
5
+ from typing import Literal
6
+
7
+
8
+ IdPrefix = Literal["mem", "act", "sess", "rel", "emb", "sum"]
9
+
10
+
11
+ def generate_id(prefix: IdPrefix) -> str:
12
+ """Generate a unique ID with timestamp and random suffix.
13
+
14
+ Format: {prefix}_{timestamp_ms}_{random_hex}
15
+
16
+ Args:
17
+ prefix: One of mem, act, sess, rel, emb, sum
18
+
19
+ Returns:
20
+ Unique ID string
21
+ """
22
+ timestamp_ms = int(time.time() * 1000)
23
+ random_hex = os.urandom(4).hex()
24
+ return f"{prefix}_{timestamp_ms}_{random_hex}"
25
+
26
+
27
+ def generate_memory_id() -> str:
28
+ """Generate a memory ID."""
29
+ return generate_id("mem")
30
+
31
+
32
+ def generate_activity_id() -> str:
33
+ """Generate an activity ID."""
34
+ return generate_id("act")
35
+
36
+
37
+ def generate_session_id() -> str:
38
+ """Generate a session ID."""
39
+ return generate_id("sess")
40
+
41
+
42
+ def generate_relationship_id() -> str:
43
+ """Generate a relationship ID."""
44
+ return generate_id("rel")
45
+
46
+
47
+ def generate_embedding_id() -> str:
48
+ """Generate an embedding ID."""
49
+ return generate_id("emb")
50
+
51
+
52
+ def generate_summary_id() -> str:
53
+ """Generate a session summary ID."""
54
+ return generate_id("sum")
55
+
56
+
57
+ def parse_id_timestamp(id_str: str) -> int:
58
+ """Extract timestamp from an ID.
59
+
60
+ Args:
61
+ id_str: ID string in format prefix_timestamp_random
62
+
63
+ Returns:
64
+ Timestamp in milliseconds
65
+ """
66
+ try:
67
+ parts = id_str.split("_")
68
+ if len(parts) >= 2:
69
+ return int(parts[1])
70
+ except (ValueError, IndexError):
71
+ pass
72
+ return 0
@@ -0,0 +1,129 @@
1
+ """Timestamp utilities for Omni Cortex."""
2
+
3
+ from datetime import datetime, timezone, timedelta
4
+ from typing import Optional
5
+
6
+
7
+ def now_iso() -> str:
8
+ """Get current time as ISO 8601 string with timezone.
9
+
10
+ Returns:
11
+ ISO 8601 formatted timestamp with UTC timezone
12
+ """
13
+ return datetime.now(timezone.utc).isoformat()
14
+
15
+
16
+ def parse_iso(iso_string: str) -> datetime:
17
+ """Parse an ISO 8601 string to datetime.
18
+
19
+ Args:
20
+ iso_string: ISO 8601 formatted string
21
+
22
+ Returns:
23
+ datetime object with timezone info
24
+ """
25
+ # Handle various ISO formats
26
+ dt = datetime.fromisoformat(iso_string.replace("Z", "+00:00"))
27
+ if dt.tzinfo is None:
28
+ dt = dt.replace(tzinfo=timezone.utc)
29
+ return dt
30
+
31
+
32
+ def format_relative_time(dt: datetime | str) -> str:
33
+ """Format a datetime as relative time (e.g., '2 hours ago').
34
+
35
+ Args:
36
+ dt: datetime object or ISO string
37
+
38
+ Returns:
39
+ Human-readable relative time string
40
+ """
41
+ if isinstance(dt, str):
42
+ dt = parse_iso(dt)
43
+
44
+ now = datetime.now(timezone.utc)
45
+ if dt.tzinfo is None:
46
+ dt = dt.replace(tzinfo=timezone.utc)
47
+
48
+ diff = now - dt
49
+
50
+ if diff < timedelta(minutes=1):
51
+ return "just now"
52
+ elif diff < timedelta(hours=1):
53
+ minutes = int(diff.total_seconds() / 60)
54
+ return f"{minutes} minute{'s' if minutes != 1 else ''} ago"
55
+ elif diff < timedelta(days=1):
56
+ hours = int(diff.total_seconds() / 3600)
57
+ return f"{hours} hour{'s' if hours != 1 else ''} ago"
58
+ elif diff < timedelta(days=7):
59
+ days = diff.days
60
+ return f"{days} day{'s' if days != 1 else ''} ago"
61
+ elif diff < timedelta(days=30):
62
+ weeks = diff.days // 7
63
+ return f"{weeks} week{'s' if weeks != 1 else ''} ago"
64
+ elif diff < timedelta(days=365):
65
+ months = diff.days // 30
66
+ return f"{months} month{'s' if months != 1 else ''} ago"
67
+ else:
68
+ years = diff.days // 365
69
+ return f"{years} year{'s' if years != 1 else ''} ago"
70
+
71
+
72
+ def format_duration(ms: int) -> str:
73
+ """Format milliseconds as human-readable duration.
74
+
75
+ Args:
76
+ ms: Duration in milliseconds
77
+
78
+ Returns:
79
+ Human-readable duration string
80
+ """
81
+ if ms < 1000:
82
+ return f"{ms}ms"
83
+ elif ms < 60000:
84
+ seconds = ms / 1000
85
+ return f"{seconds:.1f}s"
86
+ elif ms < 3600000:
87
+ minutes = ms / 60000
88
+ return f"{minutes:.1f}m"
89
+ else:
90
+ hours = ms / 3600000
91
+ return f"{hours:.1f}h"
92
+
93
+
94
+ def days_since(dt: datetime | str) -> int:
95
+ """Calculate days since a given datetime.
96
+
97
+ Args:
98
+ dt: datetime object or ISO string
99
+
100
+ Returns:
101
+ Number of days since the datetime
102
+ """
103
+ if isinstance(dt, str):
104
+ dt = parse_iso(dt)
105
+
106
+ now = datetime.now(timezone.utc)
107
+ if dt.tzinfo is None:
108
+ dt = dt.replace(tzinfo=timezone.utc)
109
+
110
+ return (now - dt).days
111
+
112
+
113
+ def hours_since(dt: datetime | str) -> float:
114
+ """Calculate hours since a given datetime.
115
+
116
+ Args:
117
+ dt: datetime object or ISO string
118
+
119
+ Returns:
120
+ Number of hours since the datetime
121
+ """
122
+ if isinstance(dt, str):
123
+ dt = parse_iso(dt)
124
+
125
+ now = datetime.now(timezone.utc)
126
+ if dt.tzinfo is None:
127
+ dt = dt.replace(tzinfo=timezone.utc)
128
+
129
+ return (now - dt).total_seconds() / 3600
@@ -0,0 +1,111 @@
1
+ """Output truncation utilities for Omni Cortex."""
2
+
3
+ import json
4
+ from typing import Any
5
+
6
+
7
+ DEFAULT_MAX_LENGTH = 10000
8
+ TRUNCATION_SUFFIX = "\n... [truncated]"
9
+
10
+
11
+ def truncate_output(text: str, max_length: int = DEFAULT_MAX_LENGTH) -> str:
12
+ """Truncate text to maximum length.
13
+
14
+ Args:
15
+ text: Text to truncate
16
+ max_length: Maximum allowed length
17
+
18
+ Returns:
19
+ Truncated text with suffix if truncated
20
+ """
21
+ if len(text) <= max_length:
22
+ return text
23
+
24
+ # Reserve space for truncation suffix
25
+ cut_length = max_length - len(TRUNCATION_SUFFIX)
26
+ if cut_length <= 0:
27
+ return text[:max_length]
28
+
29
+ return text[:cut_length] + TRUNCATION_SUFFIX
30
+
31
+
32
+ def truncate_json(data: Any, max_length: int = DEFAULT_MAX_LENGTH) -> str:
33
+ """Serialize to JSON and truncate if necessary.
34
+
35
+ Args:
36
+ data: Data to serialize
37
+ max_length: Maximum allowed length
38
+
39
+ Returns:
40
+ JSON string, truncated if necessary
41
+ """
42
+ json_str = json.dumps(data, default=str)
43
+ return truncate_output(json_str, max_length)
44
+
45
+
46
+ def truncate_dict_values(
47
+ data: dict[str, Any],
48
+ max_value_length: int = 1000
49
+ ) -> dict[str, Any]:
50
+ """Truncate string values in a dictionary.
51
+
52
+ Args:
53
+ data: Dictionary with values to truncate
54
+ max_value_length: Maximum length for each string value
55
+
56
+ Returns:
57
+ Dictionary with truncated string values
58
+ """
59
+ result = {}
60
+ for key, value in data.items():
61
+ if isinstance(value, str) and len(value) > max_value_length:
62
+ result[key] = value[:max_value_length] + "..."
63
+ elif isinstance(value, dict):
64
+ result[key] = truncate_dict_values(value, max_value_length)
65
+ elif isinstance(value, list):
66
+ result[key] = [
67
+ truncate_dict_values(item, max_value_length) if isinstance(item, dict)
68
+ else (item[:max_value_length] + "..." if isinstance(item, str) and len(item) > max_value_length else item)
69
+ for item in value
70
+ ]
71
+ else:
72
+ result[key] = value
73
+ return result
74
+
75
+
76
+ def smart_truncate(text: str, max_length: int = DEFAULT_MAX_LENGTH) -> str:
77
+ """Truncate text at a sensible boundary (newline, sentence, word).
78
+
79
+ Args:
80
+ text: Text to truncate
81
+ max_length: Maximum allowed length
82
+
83
+ Returns:
84
+ Truncated text at a natural boundary
85
+ """
86
+ if len(text) <= max_length:
87
+ return text
88
+
89
+ # Reserve space for suffix
90
+ cut_length = max_length - len(TRUNCATION_SUFFIX)
91
+ if cut_length <= 0:
92
+ return text[:max_length]
93
+
94
+ # Try to cut at a newline
95
+ last_newline = text.rfind("\n", 0, cut_length)
96
+ if last_newline > cut_length * 0.7: # Only if reasonably close to the end
97
+ return text[:last_newline] + TRUNCATION_SUFFIX
98
+
99
+ # Try to cut at a sentence boundary
100
+ for char in [". ", "! ", "? "]:
101
+ last_sentence = text.rfind(char, 0, cut_length)
102
+ if last_sentence > cut_length * 0.7:
103
+ return text[:last_sentence + 1] + TRUNCATION_SUFFIX
104
+
105
+ # Try to cut at a word boundary
106
+ last_space = text.rfind(" ", 0, cut_length)
107
+ if last_space > cut_length * 0.8:
108
+ return text[:last_space] + TRUNCATION_SUFFIX
109
+
110
+ # Fall back to hard cut
111
+ return text[:cut_length] + TRUNCATION_SUFFIX
@@ -791,7 +791,8 @@ async def list_activities(
791
791
  # Ensure migrations are applied (adds summary columns if missing)
792
792
  ensure_migrations(project)
793
793
 
794
- return get_activities(project, event_type, tool_name, limit, offset)
794
+ activities = get_activities(project, event_type, tool_name, limit, offset)
795
+ return {"activities": activities, "count": len(activities)}
795
796
 
796
797
 
797
798
  @app.get("/api/timeline")
@@ -1693,9 +1694,12 @@ async def get_agent_stats_endpoint(
1693
1694
  # --- ADW Endpoints ---
1694
1695
 
1695
1696
 
1696
- def scan_adw_folder() -> list[dict]:
1697
- """Scan agents/ folder for ADW runs."""
1698
- agents_dir = Path("agents")
1697
+ def scan_adw_folder(project_path: str) -> list[dict]:
1698
+ """Scan agents/ folder for ADW runs relative to project directory."""
1699
+ # Get project directory from db path (e.g., /project/.cortex/cortex.db -> /project)
1700
+ project_dir = Path(project_path).parent.parent if project_path.endswith(".db") else Path(project_path)
1701
+ agents_dir = project_dir / "agents"
1702
+
1699
1703
  if not agents_dir.exists():
1700
1704
  return []
1701
1705
 
@@ -1712,7 +1716,8 @@ def scan_adw_folder() -> list[dict]:
1712
1716
  "status": state.get("status", "unknown"),
1713
1717
  "current_phase": state.get("current_phase", "unknown"),
1714
1718
  "phases_completed": len(state.get("completed_phases", [])),
1715
- "phases_total": 4 # plan, build, validate, release
1719
+ "phases_total": 4, # plan, build, validate, release
1720
+ "project_path": str(project_dir)
1716
1721
  })
1717
1722
  except json.JSONDecodeError:
1718
1723
  pass
@@ -1724,7 +1729,9 @@ def scan_adw_folder() -> list[dict]:
1724
1729
 
1725
1730
  def get_adw_state_with_agents(adw_id: str, db_path: str) -> Optional[dict]:
1726
1731
  """Get ADW state with correlated agent activity."""
1727
- adw_dir = Path(f"agents/{adw_id}")
1732
+ # Get project directory from db path
1733
+ project_dir = Path(db_path).parent.parent if db_path.endswith(".db") else Path(db_path)
1734
+ adw_dir = project_dir / "agents" / adw_id
1728
1735
  state_file = adw_dir / "adw_state.json"
1729
1736
 
1730
1737
  if not state_file.exists():
@@ -1749,17 +1756,37 @@ def get_adw_state_with_agents(adw_id: str, db_path: str) -> Optional[dict]:
1749
1756
  else:
1750
1757
  status = "pending"
1751
1758
 
1752
- # Find agents that ran in this phase (from output files)
1753
- agent_ids = []
1759
+ # Find agents that ran in this phase (from output files) and count calls
1760
+ phase_agents = []
1761
+ total_phase_calls = 0
1754
1762
  if phase_dir.exists():
1755
1763
  for output_file in phase_dir.glob("*_output.jsonl"):
1756
1764
  agent_name = output_file.stem.replace("_output", "")
1757
- agent_ids.append(agent_name)
1765
+ # Count tool_use entries in the JSONL file
1766
+ call_count = 0
1767
+ try:
1768
+ with open(output_file, "r", encoding="utf-8") as f:
1769
+ for line in f:
1770
+ try:
1771
+ entry = json.loads(line)
1772
+ if entry.get("type") == "tool_use":
1773
+ call_count += 1
1774
+ except json.JSONDecodeError:
1775
+ pass
1776
+ except Exception:
1777
+ pass
1778
+ phase_agents.append({
1779
+ "id": agent_name,
1780
+ "call_count": call_count
1781
+ })
1782
+ total_phase_calls += call_count
1758
1783
 
1759
1784
  phases.append({
1760
1785
  "name": phase_name,
1761
1786
  "status": status,
1762
- "agent_ids": agent_ids,
1787
+ "agents": phase_agents, # Now includes id and call_count
1788
+ "agent_ids": [a["id"] for a in phase_agents], # Keep for backwards compat
1789
+ "call_count": total_phase_calls,
1763
1790
  "duration_seconds": None # Could be computed from timestamps if needed
1764
1791
  })
1765
1792
 
@@ -1776,9 +1803,12 @@ def get_adw_state_with_agents(adw_id: str, db_path: str) -> Optional[dict]:
1776
1803
 
1777
1804
 
1778
1805
  @app.get("/api/adw/list")
1779
- async def list_adw_runs(limit: int = Query(20, ge=1, le=100)):
1780
- """List all ADW runs from agents/ folder."""
1781
- adw_runs = scan_adw_folder()[:limit]
1806
+ async def list_adw_runs(
1807
+ project: str = Query(..., description="Path to the database file"),
1808
+ limit: int = Query(20, ge=1, le=100)
1809
+ ):
1810
+ """List all ADW runs from agents/ folder for the selected project."""
1811
+ adw_runs = scan_adw_folder(project)[:limit]
1782
1812
  return {"adw_runs": adw_runs, "count": len(adw_runs)}
1783
1813
 
1784
1814