omni-cortex 1.4.0__tar.gz → 1.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/PKG-INFO +1 -1
- omni_cortex-1.5.0/dashboard/backend/backfill_summaries.py +280 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/database.py +94 -16
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/main.py +27 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/models.py +8 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/__init__.py +1 -1
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/database/migrations.py +6 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/database/schema.py +8 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/tools/activities.py +132 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/pyproject.toml +1 -1
- omni_cortex-1.5.0/scripts/check-venv.py +106 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/.gitignore +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/LICENSE +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/README.md +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/.env.example +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/chat_service.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/image_service.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/logging_config.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/project_config.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/project_scanner.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/prompt_security.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/pyproject.toml +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/security.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/uv.lock +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/dashboard/backend/websocket_manager.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/hooks/post_tool_use.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/hooks/pre_tool_use.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/hooks/stop.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/hooks/subagent_stop.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/categorization/__init__.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/categorization/auto_tags.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/categorization/auto_type.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/config.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/dashboard.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/database/__init__.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/database/connection.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/database/sync.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/decay/__init__.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/decay/importance.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/embeddings/__init__.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/embeddings/local.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/models/__init__.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/models/activity.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/models/agent.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/models/memory.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/models/relationship.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/models/session.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/resources/__init__.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/search/__init__.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/search/hybrid.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/search/keyword.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/search/ranking.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/search/semantic.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/server.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/setup.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/tools/__init__.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/tools/memories.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/tools/sessions.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/tools/utilities.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/utils/__init__.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/utils/formatting.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/utils/ids.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/utils/timestamps.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/omni_cortex/utils/truncation.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/scripts/import_ken_memories.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/scripts/populate_session_data.py +0 -0
- {omni_cortex-1.4.0 → omni_cortex-1.5.0}/scripts/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: omni-cortex
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.5.0
|
|
4
4
|
Summary: Give Claude Code a perfect memory - auto-logs everything, searches smartly, and gets smarter over time
|
|
5
5
|
Project-URL: Homepage, https://github.com/AllCytes/Omni-Cortex
|
|
6
6
|
Project-URL: Repository, https://github.com/AllCytes/Omni-Cortex
|
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
"""Backfill utility for generating activity summaries.
|
|
2
|
+
|
|
3
|
+
This module provides functions to retroactively generate natural language
|
|
4
|
+
summaries for existing activity records that don't have them.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import sqlite3
|
|
9
|
+
import sys
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
# Add parent paths for imports
|
|
14
|
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
|
|
15
|
+
|
|
16
|
+
from database import get_write_connection, ensure_migrations
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def generate_activity_summary(
|
|
20
|
+
tool_name: Optional[str],
|
|
21
|
+
tool_input: Optional[str],
|
|
22
|
+
success: bool,
|
|
23
|
+
file_path: Optional[str],
|
|
24
|
+
event_type: str,
|
|
25
|
+
) -> tuple[str, str]:
|
|
26
|
+
"""Generate natural language summary for an activity.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
tuple of (short_summary, detailed_summary)
|
|
30
|
+
"""
|
|
31
|
+
short = ""
|
|
32
|
+
detail = ""
|
|
33
|
+
|
|
34
|
+
# Parse tool input if available
|
|
35
|
+
input_data = {}
|
|
36
|
+
if tool_input:
|
|
37
|
+
try:
|
|
38
|
+
input_data = json.loads(tool_input)
|
|
39
|
+
except (json.JSONDecodeError, TypeError):
|
|
40
|
+
pass
|
|
41
|
+
|
|
42
|
+
# Generate summaries based on tool type
|
|
43
|
+
if tool_name == "Read":
|
|
44
|
+
path = input_data.get("file_path", file_path or "unknown file")
|
|
45
|
+
filename = Path(path).name if path else "file"
|
|
46
|
+
short = f"Read file: {filename}"
|
|
47
|
+
detail = f"Reading contents of {path}"
|
|
48
|
+
|
|
49
|
+
elif tool_name == "Write":
|
|
50
|
+
path = input_data.get("file_path", file_path or "unknown file")
|
|
51
|
+
filename = Path(path).name if path else "file"
|
|
52
|
+
short = f"Write file: {filename}"
|
|
53
|
+
detail = f"Writing/creating file at {path}"
|
|
54
|
+
|
|
55
|
+
elif tool_name == "Edit":
|
|
56
|
+
path = input_data.get("file_path", file_path or "unknown file")
|
|
57
|
+
filename = Path(path).name if path else "file"
|
|
58
|
+
short = f"Edit file: {filename}"
|
|
59
|
+
detail = f"Editing {path} - replacing text content"
|
|
60
|
+
|
|
61
|
+
elif tool_name == "Bash":
|
|
62
|
+
cmd = input_data.get("command", "")[:50]
|
|
63
|
+
short = f"Run command: {cmd}..."
|
|
64
|
+
detail = f"Executing bash command: {input_data.get('command', 'unknown')}"
|
|
65
|
+
|
|
66
|
+
elif tool_name == "Grep":
|
|
67
|
+
pattern = input_data.get("pattern", "")
|
|
68
|
+
short = f"Search for: {pattern[:30]}"
|
|
69
|
+
detail = f"Searching codebase for pattern: {pattern}"
|
|
70
|
+
|
|
71
|
+
elif tool_name == "Glob":
|
|
72
|
+
pattern = input_data.get("pattern", "")
|
|
73
|
+
short = f"Find files: {pattern[:30]}"
|
|
74
|
+
detail = f"Finding files matching pattern: {pattern}"
|
|
75
|
+
|
|
76
|
+
elif tool_name == "Skill":
|
|
77
|
+
skill = input_data.get("skill", "unknown")
|
|
78
|
+
short = f"Run skill: /{skill}"
|
|
79
|
+
detail = f"Executing slash command /{skill}"
|
|
80
|
+
|
|
81
|
+
elif tool_name == "Task":
|
|
82
|
+
desc = input_data.get("description", "task")
|
|
83
|
+
short = f"Spawn agent: {desc[:30]}"
|
|
84
|
+
detail = f"Launching sub-agent for: {input_data.get('prompt', desc)[:100]}"
|
|
85
|
+
|
|
86
|
+
elif tool_name == "WebSearch":
|
|
87
|
+
query = input_data.get("query", "")
|
|
88
|
+
short = f"Web search: {query[:30]}"
|
|
89
|
+
detail = f"Searching the web for: {query}"
|
|
90
|
+
|
|
91
|
+
elif tool_name == "WebFetch":
|
|
92
|
+
url = input_data.get("url", "")
|
|
93
|
+
short = f"Fetch URL: {url[:40]}"
|
|
94
|
+
detail = f"Fetching content from: {url}"
|
|
95
|
+
|
|
96
|
+
elif tool_name == "TodoWrite":
|
|
97
|
+
todos = input_data.get("todos", [])
|
|
98
|
+
count = len(todos) if isinstance(todos, list) else 0
|
|
99
|
+
short = f"Update todo list: {count} items"
|
|
100
|
+
detail = f"Managing task list with {count} items"
|
|
101
|
+
|
|
102
|
+
elif tool_name == "AskUserQuestion":
|
|
103
|
+
questions = input_data.get("questions", [])
|
|
104
|
+
count = len(questions) if isinstance(questions, list) else 1
|
|
105
|
+
short = f"Ask user: {count} question(s)"
|
|
106
|
+
detail = f"Prompting user for input with {count} question(s)"
|
|
107
|
+
|
|
108
|
+
elif tool_name and tool_name.startswith("mcp__"):
|
|
109
|
+
parts = tool_name.split("__")
|
|
110
|
+
server = parts[1] if len(parts) > 1 else "unknown"
|
|
111
|
+
tool = parts[2] if len(parts) > 2 else tool_name
|
|
112
|
+
short = f"MCP call: {server}/{tool}"
|
|
113
|
+
detail = f"Calling {tool} tool from MCP server {server}"
|
|
114
|
+
|
|
115
|
+
elif tool_name == "cortex_remember" or (tool_name and "remember" in tool_name.lower()):
|
|
116
|
+
params = input_data.get("params", {})
|
|
117
|
+
content = params.get("content", "") if isinstance(params, dict) else ""
|
|
118
|
+
short = f"Store memory: {content[:30]}..." if content else "Store memory"
|
|
119
|
+
detail = f"Saving to memory system: {content[:100]}" if content else "Saving to memory system"
|
|
120
|
+
|
|
121
|
+
elif tool_name == "cortex_recall" or (tool_name and "recall" in tool_name.lower()):
|
|
122
|
+
params = input_data.get("params", {})
|
|
123
|
+
query = params.get("query", "") if isinstance(params, dict) else ""
|
|
124
|
+
short = f"Recall: {query[:30]}" if query else "Recall memories"
|
|
125
|
+
detail = f"Searching memories for: {query}" if query else "Retrieving memories"
|
|
126
|
+
|
|
127
|
+
elif tool_name == "NotebookEdit":
|
|
128
|
+
path = input_data.get("notebook_path", "")
|
|
129
|
+
filename = Path(path).name if path else "notebook"
|
|
130
|
+
short = f"Edit notebook: {filename}"
|
|
131
|
+
detail = f"Editing Jupyter notebook {path}"
|
|
132
|
+
|
|
133
|
+
else:
|
|
134
|
+
short = f"{event_type}: {tool_name or 'unknown'}"
|
|
135
|
+
detail = f"Activity type {event_type} with tool {tool_name}"
|
|
136
|
+
|
|
137
|
+
# Add status suffix for failures
|
|
138
|
+
if not success:
|
|
139
|
+
short = f"[FAILED] {short}"
|
|
140
|
+
detail = f"[FAILED] {detail}"
|
|
141
|
+
|
|
142
|
+
return short, detail
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def backfill_activity_summaries(db_path: str) -> int:
|
|
146
|
+
"""Generate summaries for activities that don't have them.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
db_path: Path to the SQLite database
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Number of activities updated
|
|
153
|
+
"""
|
|
154
|
+
# First ensure migrations are applied
|
|
155
|
+
ensure_migrations(db_path)
|
|
156
|
+
|
|
157
|
+
conn = get_write_connection(db_path)
|
|
158
|
+
|
|
159
|
+
# Check if summary column exists
|
|
160
|
+
columns = conn.execute("PRAGMA table_info(activities)").fetchall()
|
|
161
|
+
column_names = {col[1] for col in columns}
|
|
162
|
+
|
|
163
|
+
if "summary" not in column_names:
|
|
164
|
+
print(f"[Backfill] Summary column not found in {db_path}, skipping")
|
|
165
|
+
conn.close()
|
|
166
|
+
return 0
|
|
167
|
+
|
|
168
|
+
cursor = conn.execute("""
|
|
169
|
+
SELECT id, tool_name, tool_input, success, file_path, event_type
|
|
170
|
+
FROM activities
|
|
171
|
+
WHERE summary IS NULL OR summary = ''
|
|
172
|
+
""")
|
|
173
|
+
|
|
174
|
+
count = 0
|
|
175
|
+
for row in cursor.fetchall():
|
|
176
|
+
short, detail = generate_activity_summary(
|
|
177
|
+
row["tool_name"],
|
|
178
|
+
row["tool_input"],
|
|
179
|
+
bool(row["success"]),
|
|
180
|
+
row["file_path"],
|
|
181
|
+
row["event_type"],
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
conn.execute(
|
|
185
|
+
"""
|
|
186
|
+
UPDATE activities
|
|
187
|
+
SET summary = ?, summary_detail = ?
|
|
188
|
+
WHERE id = ?
|
|
189
|
+
""",
|
|
190
|
+
(short, detail, row["id"]),
|
|
191
|
+
)
|
|
192
|
+
count += 1
|
|
193
|
+
|
|
194
|
+
if count % 100 == 0:
|
|
195
|
+
conn.commit()
|
|
196
|
+
print(f"[Backfill] Processed {count} activities...")
|
|
197
|
+
|
|
198
|
+
conn.commit()
|
|
199
|
+
conn.close()
|
|
200
|
+
return count
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def backfill_mcp_servers(db_path: str) -> int:
|
|
204
|
+
"""Extract and populate mcp_server for existing activities.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
db_path: Path to the SQLite database
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
Number of activities updated
|
|
211
|
+
"""
|
|
212
|
+
# First ensure migrations are applied
|
|
213
|
+
ensure_migrations(db_path)
|
|
214
|
+
|
|
215
|
+
conn = get_write_connection(db_path)
|
|
216
|
+
|
|
217
|
+
# Check if mcp_server column exists
|
|
218
|
+
columns = conn.execute("PRAGMA table_info(activities)").fetchall()
|
|
219
|
+
column_names = {col[1] for col in columns}
|
|
220
|
+
|
|
221
|
+
if "mcp_server" not in column_names:
|
|
222
|
+
print(f"[Backfill] mcp_server column not found in {db_path}, skipping")
|
|
223
|
+
conn.close()
|
|
224
|
+
return 0
|
|
225
|
+
|
|
226
|
+
cursor = conn.execute("""
|
|
227
|
+
SELECT id, tool_name FROM activities
|
|
228
|
+
WHERE tool_name LIKE 'mcp__%'
|
|
229
|
+
AND (mcp_server IS NULL OR mcp_server = '')
|
|
230
|
+
""")
|
|
231
|
+
|
|
232
|
+
count = 0
|
|
233
|
+
for row in cursor.fetchall():
|
|
234
|
+
parts = row["tool_name"].split("__")
|
|
235
|
+
if len(parts) >= 2:
|
|
236
|
+
server = parts[1]
|
|
237
|
+
conn.execute(
|
|
238
|
+
"UPDATE activities SET mcp_server = ? WHERE id = ?",
|
|
239
|
+
(server, row["id"]),
|
|
240
|
+
)
|
|
241
|
+
count += 1
|
|
242
|
+
|
|
243
|
+
conn.commit()
|
|
244
|
+
conn.close()
|
|
245
|
+
return count
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def backfill_all(db_path: str) -> dict:
|
|
249
|
+
"""Run all backfill operations on a database.
|
|
250
|
+
|
|
251
|
+
Args:
|
|
252
|
+
db_path: Path to the SQLite database
|
|
253
|
+
|
|
254
|
+
Returns:
|
|
255
|
+
Dictionary with counts of updated records
|
|
256
|
+
"""
|
|
257
|
+
print(f"[Backfill] Starting backfill for {db_path}")
|
|
258
|
+
|
|
259
|
+
results = {
|
|
260
|
+
"summaries": backfill_activity_summaries(db_path),
|
|
261
|
+
"mcp_servers": backfill_mcp_servers(db_path),
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
print(f"[Backfill] Complete: {results['summaries']} summaries, {results['mcp_servers']} MCP servers")
|
|
265
|
+
return results
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
if __name__ == "__main__":
|
|
269
|
+
# Allow running from command line with database path as argument
|
|
270
|
+
if len(sys.argv) < 2:
|
|
271
|
+
print("Usage: python backfill_summaries.py <path-to-database>")
|
|
272
|
+
sys.exit(1)
|
|
273
|
+
|
|
274
|
+
db_path = sys.argv[1]
|
|
275
|
+
if not Path(db_path).exists():
|
|
276
|
+
print(f"Error: Database not found at {db_path}")
|
|
277
|
+
sys.exit(1)
|
|
278
|
+
|
|
279
|
+
results = backfill_all(db_path)
|
|
280
|
+
print(f"Backfill complete: {results}")
|
|
@@ -24,6 +24,58 @@ def get_write_connection(db_path: str) -> sqlite3.Connection:
|
|
|
24
24
|
return conn
|
|
25
25
|
|
|
26
26
|
|
|
27
|
+
def ensure_migrations(db_path: str) -> None:
|
|
28
|
+
"""Ensure database has latest migrations applied.
|
|
29
|
+
|
|
30
|
+
This function checks for and applies any missing schema updates,
|
|
31
|
+
including command analytics columns and natural language summary columns.
|
|
32
|
+
"""
|
|
33
|
+
conn = get_write_connection(db_path)
|
|
34
|
+
|
|
35
|
+
# Check if activities table exists
|
|
36
|
+
table_check = conn.execute(
|
|
37
|
+
"SELECT name FROM sqlite_master WHERE type='table' AND name='activities'"
|
|
38
|
+
).fetchone()
|
|
39
|
+
|
|
40
|
+
if not table_check:
|
|
41
|
+
conn.close()
|
|
42
|
+
return
|
|
43
|
+
|
|
44
|
+
# Check available columns
|
|
45
|
+
columns = conn.execute("PRAGMA table_info(activities)").fetchall()
|
|
46
|
+
column_names = {col[1] for col in columns}
|
|
47
|
+
|
|
48
|
+
migrations_applied = []
|
|
49
|
+
|
|
50
|
+
# Migration v1.1: Command analytics columns
|
|
51
|
+
if "command_name" not in column_names:
|
|
52
|
+
conn.executescript("""
|
|
53
|
+
ALTER TABLE activities ADD COLUMN command_name TEXT;
|
|
54
|
+
ALTER TABLE activities ADD COLUMN command_scope TEXT;
|
|
55
|
+
ALTER TABLE activities ADD COLUMN mcp_server TEXT;
|
|
56
|
+
ALTER TABLE activities ADD COLUMN skill_name TEXT;
|
|
57
|
+
|
|
58
|
+
CREATE INDEX IF NOT EXISTS idx_activities_command ON activities(command_name);
|
|
59
|
+
CREATE INDEX IF NOT EXISTS idx_activities_mcp ON activities(mcp_server);
|
|
60
|
+
CREATE INDEX IF NOT EXISTS idx_activities_skill ON activities(skill_name);
|
|
61
|
+
""")
|
|
62
|
+
migrations_applied.append("v1.1: command analytics columns")
|
|
63
|
+
|
|
64
|
+
# Migration v1.2: Natural language summary columns
|
|
65
|
+
if "summary" not in column_names:
|
|
66
|
+
conn.executescript("""
|
|
67
|
+
ALTER TABLE activities ADD COLUMN summary TEXT;
|
|
68
|
+
ALTER TABLE activities ADD COLUMN summary_detail TEXT;
|
|
69
|
+
""")
|
|
70
|
+
migrations_applied.append("v1.2: summary columns")
|
|
71
|
+
|
|
72
|
+
if migrations_applied:
|
|
73
|
+
conn.commit()
|
|
74
|
+
print(f"[Database] Applied migrations: {', '.join(migrations_applied)}")
|
|
75
|
+
|
|
76
|
+
conn.close()
|
|
77
|
+
|
|
78
|
+
|
|
27
79
|
def parse_tags(tags_str: Optional[str]) -> list[str]:
|
|
28
80
|
"""Parse tags from JSON string."""
|
|
29
81
|
if not tags_str:
|
|
@@ -183,9 +235,13 @@ def get_activities(
|
|
|
183
235
|
limit: int = 100,
|
|
184
236
|
offset: int = 0,
|
|
185
237
|
) -> list[Activity]:
|
|
186
|
-
"""Get activity log entries."""
|
|
238
|
+
"""Get activity log entries with all available fields."""
|
|
187
239
|
conn = get_connection(db_path)
|
|
188
240
|
|
|
241
|
+
# Check available columns for backward compatibility
|
|
242
|
+
columns = conn.execute("PRAGMA table_info(activities)").fetchall()
|
|
243
|
+
column_names = {col[1] for col in columns}
|
|
244
|
+
|
|
189
245
|
query = "SELECT * FROM activities WHERE 1=1"
|
|
190
246
|
params: list = []
|
|
191
247
|
|
|
@@ -212,21 +268,37 @@ def get_activities(
|
|
|
212
268
|
# Fallback for edge cases
|
|
213
269
|
ts = datetime.now()
|
|
214
270
|
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
271
|
+
activity_data = {
|
|
272
|
+
"id": row["id"],
|
|
273
|
+
"session_id": row["session_id"],
|
|
274
|
+
"event_type": row["event_type"],
|
|
275
|
+
"tool_name": row["tool_name"],
|
|
276
|
+
"tool_input": row["tool_input"],
|
|
277
|
+
"tool_output": row["tool_output"],
|
|
278
|
+
"success": bool(row["success"]),
|
|
279
|
+
"error_message": row["error_message"],
|
|
280
|
+
"duration_ms": row["duration_ms"],
|
|
281
|
+
"file_path": row["file_path"],
|
|
282
|
+
"timestamp": ts,
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
# Add command analytics fields if available
|
|
286
|
+
if "command_name" in column_names:
|
|
287
|
+
activity_data["command_name"] = row["command_name"]
|
|
288
|
+
if "command_scope" in column_names:
|
|
289
|
+
activity_data["command_scope"] = row["command_scope"]
|
|
290
|
+
if "mcp_server" in column_names:
|
|
291
|
+
activity_data["mcp_server"] = row["mcp_server"]
|
|
292
|
+
if "skill_name" in column_names:
|
|
293
|
+
activity_data["skill_name"] = row["skill_name"]
|
|
294
|
+
|
|
295
|
+
# Add summary fields if available
|
|
296
|
+
if "summary" in column_names:
|
|
297
|
+
activity_data["summary"] = row["summary"]
|
|
298
|
+
if "summary_detail" in column_names:
|
|
299
|
+
activity_data["summary_detail"] = row["summary_detail"]
|
|
300
|
+
|
|
301
|
+
activities.append(Activity(**activity_data))
|
|
230
302
|
|
|
231
303
|
conn.close()
|
|
232
304
|
return activities
|
|
@@ -933,6 +1005,12 @@ def get_activity_detail(db_path: str, activity_id: str) -> Optional[dict]:
|
|
|
933
1005
|
if "skill_name" in column_names:
|
|
934
1006
|
result["skill_name"] = row["skill_name"]
|
|
935
1007
|
|
|
1008
|
+
# Add summary fields if they exist
|
|
1009
|
+
if "summary" in column_names:
|
|
1010
|
+
result["summary"] = row["summary"]
|
|
1011
|
+
if "summary_detail" in column_names:
|
|
1012
|
+
result["summary_detail"] = row["summary_detail"]
|
|
1013
|
+
|
|
936
1014
|
conn.close()
|
|
937
1015
|
return result
|
|
938
1016
|
|
|
@@ -32,6 +32,7 @@ except ImportError:
|
|
|
32
32
|
from database import (
|
|
33
33
|
bulk_update_memory_status,
|
|
34
34
|
delete_memory,
|
|
35
|
+
ensure_migrations,
|
|
35
36
|
get_activities,
|
|
36
37
|
get_activity_detail,
|
|
37
38
|
get_activity_heatmap,
|
|
@@ -487,6 +488,9 @@ async def list_activities(
|
|
|
487
488
|
if not Path(project).exists():
|
|
488
489
|
raise HTTPException(status_code=404, detail="Database not found")
|
|
489
490
|
|
|
491
|
+
# Ensure migrations are applied (adds summary columns if missing)
|
|
492
|
+
ensure_migrations(project)
|
|
493
|
+
|
|
490
494
|
return get_activities(project, event_type, tool_name, limit, offset)
|
|
491
495
|
|
|
492
496
|
|
|
@@ -627,6 +631,9 @@ async def get_activity_detail_endpoint(
|
|
|
627
631
|
if not Path(project).exists():
|
|
628
632
|
raise HTTPException(status_code=404, detail="Database not found")
|
|
629
633
|
|
|
634
|
+
# Ensure migrations are applied
|
|
635
|
+
ensure_migrations(project)
|
|
636
|
+
|
|
630
637
|
activity = get_activity_detail(project, activity_id)
|
|
631
638
|
if not activity:
|
|
632
639
|
raise HTTPException(status_code=404, detail="Activity not found")
|
|
@@ -634,6 +641,26 @@ async def get_activity_detail_endpoint(
|
|
|
634
641
|
return activity
|
|
635
642
|
|
|
636
643
|
|
|
644
|
+
@app.post("/api/activities/backfill-summaries")
|
|
645
|
+
async def backfill_activity_summaries_endpoint(
|
|
646
|
+
project: str = Query(..., description="Path to the database file"),
|
|
647
|
+
):
|
|
648
|
+
"""Generate summaries for existing activities that don't have them."""
|
|
649
|
+
if not Path(project).exists():
|
|
650
|
+
raise HTTPException(status_code=404, detail="Database not found")
|
|
651
|
+
|
|
652
|
+
try:
|
|
653
|
+
from backfill_summaries import backfill_all
|
|
654
|
+
results = backfill_all(project)
|
|
655
|
+
return {
|
|
656
|
+
"success": True,
|
|
657
|
+
"summaries_updated": results["summaries"],
|
|
658
|
+
"mcp_servers_updated": results["mcp_servers"],
|
|
659
|
+
}
|
|
660
|
+
except Exception as e:
|
|
661
|
+
raise HTTPException(status_code=500, detail=f"Backfill failed: {str(e)}")
|
|
662
|
+
|
|
663
|
+
|
|
637
664
|
# --- Session Context Endpoints ---
|
|
638
665
|
|
|
639
666
|
|
|
@@ -84,6 +84,14 @@ class Activity(BaseModel):
|
|
|
84
84
|
duration_ms: Optional[int] = None
|
|
85
85
|
file_path: Optional[str] = None
|
|
86
86
|
timestamp: datetime
|
|
87
|
+
# Command analytics fields
|
|
88
|
+
command_name: Optional[str] = None
|
|
89
|
+
command_scope: Optional[str] = None
|
|
90
|
+
mcp_server: Optional[str] = None
|
|
91
|
+
skill_name: Optional[str] = None
|
|
92
|
+
# Natural language summary fields
|
|
93
|
+
summary: Optional[str] = None
|
|
94
|
+
summary_detail: Optional[str] = None
|
|
87
95
|
|
|
88
96
|
|
|
89
97
|
class Session(BaseModel):
|
|
@@ -24,6 +24,12 @@ MIGRATIONS: dict[str, str] = {
|
|
|
24
24
|
CREATE INDEX IF NOT EXISTS idx_activities_mcp ON activities(mcp_server);
|
|
25
25
|
CREATE INDEX IF NOT EXISTS idx_activities_skill ON activities(skill_name);
|
|
26
26
|
""",
|
|
27
|
+
# Natural language summary columns for activity display
|
|
28
|
+
"1.2": """
|
|
29
|
+
-- Add natural language summary columns to activities table
|
|
30
|
+
ALTER TABLE activities ADD COLUMN summary TEXT;
|
|
31
|
+
ALTER TABLE activities ADD COLUMN summary_detail TEXT;
|
|
32
|
+
""",
|
|
27
33
|
}
|
|
28
34
|
|
|
29
35
|
|
|
@@ -46,6 +46,14 @@ CREATE TABLE IF NOT EXISTS activities (
|
|
|
46
46
|
project_path TEXT,
|
|
47
47
|
file_path TEXT,
|
|
48
48
|
metadata TEXT,
|
|
49
|
+
-- Command analytics columns (v1.1)
|
|
50
|
+
command_name TEXT,
|
|
51
|
+
command_scope TEXT,
|
|
52
|
+
mcp_server TEXT,
|
|
53
|
+
skill_name TEXT,
|
|
54
|
+
-- Natural language summaries (v1.2)
|
|
55
|
+
summary TEXT,
|
|
56
|
+
summary_detail TEXT,
|
|
49
57
|
FOREIGN KEY (session_id) REFERENCES sessions(id),
|
|
50
58
|
FOREIGN KEY (agent_id) REFERENCES agents(id)
|
|
51
59
|
);
|
|
@@ -13,6 +13,7 @@ from ..models.activity import Activity, ActivityCreate, create_activity, get_act
|
|
|
13
13
|
from ..models.memory import list_memories
|
|
14
14
|
from ..utils.formatting import format_activity_markdown, format_timeline_markdown
|
|
15
15
|
from ..utils.timestamps import now_iso, parse_iso
|
|
16
|
+
from pathlib import Path
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
# === Input Models ===
|
|
@@ -319,3 +320,134 @@ def _extract_mcp_server(tool_name: str) -> Optional[str]:
|
|
|
319
320
|
return parts[1] # Server name is the second part
|
|
320
321
|
|
|
321
322
|
return None
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
# === Natural Language Summary Generation ===
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
def generate_activity_summary(
|
|
329
|
+
tool_name: Optional[str],
|
|
330
|
+
tool_input: Optional[str],
|
|
331
|
+
success: bool,
|
|
332
|
+
file_path: Optional[str],
|
|
333
|
+
event_type: str,
|
|
334
|
+
) -> tuple[str, str]:
|
|
335
|
+
"""Generate natural language summary for an activity.
|
|
336
|
+
|
|
337
|
+
Returns:
|
|
338
|
+
tuple of (short_summary, detailed_summary)
|
|
339
|
+
- short_summary: 12-20 words, shown in collapsed view
|
|
340
|
+
- detailed_summary: Expanded description with more context
|
|
341
|
+
"""
|
|
342
|
+
short = ""
|
|
343
|
+
detail = ""
|
|
344
|
+
|
|
345
|
+
# Parse tool input if available
|
|
346
|
+
input_data = {}
|
|
347
|
+
if tool_input:
|
|
348
|
+
try:
|
|
349
|
+
input_data = json.loads(tool_input)
|
|
350
|
+
except (json.JSONDecodeError, TypeError):
|
|
351
|
+
pass
|
|
352
|
+
|
|
353
|
+
# Generate summaries based on tool type
|
|
354
|
+
if tool_name == "Read":
|
|
355
|
+
path = input_data.get("file_path", file_path or "unknown file")
|
|
356
|
+
filename = Path(path).name if path else "file"
|
|
357
|
+
short = f"Read file: {filename}"
|
|
358
|
+
detail = f"Reading contents of {path}"
|
|
359
|
+
|
|
360
|
+
elif tool_name == "Write":
|
|
361
|
+
path = input_data.get("file_path", file_path or "unknown file")
|
|
362
|
+
filename = Path(path).name if path else "file"
|
|
363
|
+
short = f"Write file: {filename}"
|
|
364
|
+
detail = f"Writing/creating file at {path}"
|
|
365
|
+
|
|
366
|
+
elif tool_name == "Edit":
|
|
367
|
+
path = input_data.get("file_path", file_path or "unknown file")
|
|
368
|
+
filename = Path(path).name if path else "file"
|
|
369
|
+
short = f"Edit file: {filename}"
|
|
370
|
+
detail = f"Editing {path} - replacing text content"
|
|
371
|
+
|
|
372
|
+
elif tool_name == "Bash":
|
|
373
|
+
cmd = input_data.get("command", "")[:50]
|
|
374
|
+
short = f"Run command: {cmd}..."
|
|
375
|
+
detail = f"Executing bash command: {input_data.get('command', 'unknown')}"
|
|
376
|
+
|
|
377
|
+
elif tool_name == "Grep":
|
|
378
|
+
pattern = input_data.get("pattern", "")
|
|
379
|
+
short = f"Search for: {pattern[:30]}"
|
|
380
|
+
detail = f"Searching codebase for pattern: {pattern}"
|
|
381
|
+
|
|
382
|
+
elif tool_name == "Glob":
|
|
383
|
+
pattern = input_data.get("pattern", "")
|
|
384
|
+
short = f"Find files: {pattern[:30]}"
|
|
385
|
+
detail = f"Finding files matching pattern: {pattern}"
|
|
386
|
+
|
|
387
|
+
elif tool_name == "Skill":
|
|
388
|
+
skill = input_data.get("skill", "unknown")
|
|
389
|
+
short = f"Run skill: /{skill}"
|
|
390
|
+
detail = f"Executing slash command /{skill}"
|
|
391
|
+
|
|
392
|
+
elif tool_name == "Task":
|
|
393
|
+
desc = input_data.get("description", "task")
|
|
394
|
+
short = f"Spawn agent: {desc[:30]}"
|
|
395
|
+
detail = f"Launching sub-agent for: {input_data.get('prompt', desc)[:100]}"
|
|
396
|
+
|
|
397
|
+
elif tool_name == "WebSearch":
|
|
398
|
+
query = input_data.get("query", "")
|
|
399
|
+
short = f"Web search: {query[:30]}"
|
|
400
|
+
detail = f"Searching the web for: {query}"
|
|
401
|
+
|
|
402
|
+
elif tool_name == "WebFetch":
|
|
403
|
+
url = input_data.get("url", "")
|
|
404
|
+
short = f"Fetch URL: {url[:40]}"
|
|
405
|
+
detail = f"Fetching content from: {url}"
|
|
406
|
+
|
|
407
|
+
elif tool_name == "TodoWrite":
|
|
408
|
+
todos = input_data.get("todos", [])
|
|
409
|
+
count = len(todos) if isinstance(todos, list) else 0
|
|
410
|
+
short = f"Update todo list: {count} items"
|
|
411
|
+
detail = f"Managing task list with {count} items"
|
|
412
|
+
|
|
413
|
+
elif tool_name == "AskUserQuestion":
|
|
414
|
+
questions = input_data.get("questions", [])
|
|
415
|
+
count = len(questions) if isinstance(questions, list) else 1
|
|
416
|
+
short = f"Ask user: {count} question(s)"
|
|
417
|
+
detail = f"Prompting user for input with {count} question(s)"
|
|
418
|
+
|
|
419
|
+
elif tool_name and tool_name.startswith("mcp__"):
|
|
420
|
+
parts = tool_name.split("__")
|
|
421
|
+
server = parts[1] if len(parts) > 1 else "unknown"
|
|
422
|
+
tool = parts[2] if len(parts) > 2 else tool_name
|
|
423
|
+
short = f"MCP call: {server}/{tool}"
|
|
424
|
+
detail = f"Calling {tool} tool from MCP server {server}"
|
|
425
|
+
|
|
426
|
+
elif tool_name == "cortex_remember" or (tool_name and "remember" in tool_name.lower()):
|
|
427
|
+
params = input_data.get("params", {})
|
|
428
|
+
content = params.get("content", "") if isinstance(params, dict) else ""
|
|
429
|
+
short = f"Store memory: {content[:30]}..." if content else "Store memory"
|
|
430
|
+
detail = f"Saving to memory system: {content[:100]}" if content else "Saving to memory system"
|
|
431
|
+
|
|
432
|
+
elif tool_name == "cortex_recall" or (tool_name and "recall" in tool_name.lower()):
|
|
433
|
+
params = input_data.get("params", {})
|
|
434
|
+
query = params.get("query", "") if isinstance(params, dict) else ""
|
|
435
|
+
short = f"Recall: {query[:30]}" if query else "Recall memories"
|
|
436
|
+
detail = f"Searching memories for: {query}" if query else "Retrieving memories"
|
|
437
|
+
|
|
438
|
+
elif tool_name == "NotebookEdit":
|
|
439
|
+
path = input_data.get("notebook_path", "")
|
|
440
|
+
filename = Path(path).name if path else "notebook"
|
|
441
|
+
short = f"Edit notebook: {filename}"
|
|
442
|
+
detail = f"Editing Jupyter notebook {path}"
|
|
443
|
+
|
|
444
|
+
else:
|
|
445
|
+
short = f"{event_type}: {tool_name or 'unknown'}"
|
|
446
|
+
detail = f"Activity type {event_type} with tool {tool_name}"
|
|
447
|
+
|
|
448
|
+
# Add status suffix for failures
|
|
449
|
+
if not success:
|
|
450
|
+
short = f"[FAILED] {short}"
|
|
451
|
+
detail = f"[FAILED] {detail}"
|
|
452
|
+
|
|
453
|
+
return short, detail
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "omni-cortex"
|
|
7
|
-
version = "1.
|
|
7
|
+
version = "1.5.0"
|
|
8
8
|
description = "Give Claude Code a perfect memory - auto-logs everything, searches smartly, and gets smarter over time"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = "MIT"
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Venv Health Check
|
|
4
|
+
=================
|
|
5
|
+
|
|
6
|
+
Diagnoses common venv issues like corrupted metadata, missing packages,
|
|
7
|
+
and version mismatches.
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
python scripts/check-venv.py
|
|
11
|
+
python scripts/check-venv.py --fix # Attempt auto-fix
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import importlib
|
|
15
|
+
import importlib.metadata
|
|
16
|
+
import sys
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def check_package(name: str, import_name: str = None) -> tuple[bool, str]:
|
|
21
|
+
"""Check if a package is properly installed and importable."""
|
|
22
|
+
import_name = import_name or name.replace("-", "_")
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
# Check if metadata is accessible
|
|
26
|
+
version = importlib.metadata.version(name)
|
|
27
|
+
except importlib.metadata.PackageNotFoundError:
|
|
28
|
+
return False, f"Not installed (metadata missing)"
|
|
29
|
+
except Exception as e:
|
|
30
|
+
return False, f"Metadata error: {e}"
|
|
31
|
+
|
|
32
|
+
try:
|
|
33
|
+
# Check if package is importable
|
|
34
|
+
module = importlib.import_module(import_name)
|
|
35
|
+
module_version = getattr(module, "__version__", "unknown")
|
|
36
|
+
if module_version != "unknown" and module_version != version:
|
|
37
|
+
return False, f"Version mismatch: metadata={version}, module={module_version}"
|
|
38
|
+
return True, f"OK (v{version})"
|
|
39
|
+
except ImportError as e:
|
|
40
|
+
return False, f"Import failed: {e}"
|
|
41
|
+
except Exception as e:
|
|
42
|
+
return False, f"Error: {e}"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def main():
|
|
46
|
+
print("Venv Health Check")
|
|
47
|
+
print("=" * 50)
|
|
48
|
+
print(f"Python: {sys.executable}")
|
|
49
|
+
print(f"Version: {sys.version.split()[0]}")
|
|
50
|
+
print()
|
|
51
|
+
|
|
52
|
+
# Core packages to check
|
|
53
|
+
packages = [
|
|
54
|
+
("pydantic", "pydantic"),
|
|
55
|
+
("pydantic-core", "pydantic_core"),
|
|
56
|
+
("fastapi", "fastapi"),
|
|
57
|
+
("uvicorn", "uvicorn"),
|
|
58
|
+
("pyyaml", "yaml"),
|
|
59
|
+
("httpx", "httpx"),
|
|
60
|
+
("aiosqlite", "aiosqlite"),
|
|
61
|
+
]
|
|
62
|
+
|
|
63
|
+
# Check if we're in the right project
|
|
64
|
+
project_root = Path(__file__).parent.parent
|
|
65
|
+
pyproject = project_root / "pyproject.toml"
|
|
66
|
+
|
|
67
|
+
if pyproject.exists():
|
|
68
|
+
print(f"Project: {project_root}")
|
|
69
|
+
# Try to check omni-cortex
|
|
70
|
+
packages.insert(0, ("omni-cortex", "omni_cortex"))
|
|
71
|
+
print()
|
|
72
|
+
|
|
73
|
+
all_ok = True
|
|
74
|
+
issues = []
|
|
75
|
+
|
|
76
|
+
print("Package Status:")
|
|
77
|
+
print("-" * 50)
|
|
78
|
+
for pkg_name, import_name in packages:
|
|
79
|
+
ok, status = check_package(pkg_name, import_name)
|
|
80
|
+
symbol = "✓" if ok else "✗"
|
|
81
|
+
print(f" {symbol} {pkg_name}: {status}")
|
|
82
|
+
if not ok:
|
|
83
|
+
all_ok = False
|
|
84
|
+
issues.append((pkg_name, status))
|
|
85
|
+
|
|
86
|
+
print()
|
|
87
|
+
|
|
88
|
+
if all_ok:
|
|
89
|
+
print("✓ All packages healthy!")
|
|
90
|
+
else:
|
|
91
|
+
print("✗ Issues detected:")
|
|
92
|
+
print()
|
|
93
|
+
for pkg, issue in issues:
|
|
94
|
+
print(f" - {pkg}: {issue}")
|
|
95
|
+
print()
|
|
96
|
+
print("Suggested fixes:")
|
|
97
|
+
print(" 1. Try force-reinstall: uv pip install --force-reinstall <package>")
|
|
98
|
+
print(" 2. Rebuild venv: rm -rf .venv && uv venv && uv pip install -e .")
|
|
99
|
+
print(" 3. Use system pip: pip install -e . --force-reinstall")
|
|
100
|
+
return 1
|
|
101
|
+
|
|
102
|
+
return 0
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
if __name__ == "__main__":
|
|
106
|
+
sys.exit(main())
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|