omni-cortex 1.7.0__tar.gz → 1.8.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/PKG-INFO +204 -4
  2. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/README.md +201 -3
  3. omni_cortex-1.8.0/dashboard/backend/.env.example +12 -0
  4. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/chat_service.py +4 -2
  5. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/database.py +3 -2
  6. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/image_service.py +4 -1
  7. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/main.py +61 -0
  8. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/models.py +10 -0
  9. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/__init__.py +1 -1
  10. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/pyproject.toml +3 -1
  11. omni_cortex-1.7.0/dashboard/backend/.env.example +0 -22
  12. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/.gitignore +0 -0
  13. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/LICENSE +0 -0
  14. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/backfill_summaries.py +0 -0
  15. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/logging_config.py +0 -0
  16. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/project_config.py +0 -0
  17. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/project_scanner.py +0 -0
  18. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/prompt_security.py +0 -0
  19. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/pyproject.toml +0 -0
  20. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/security.py +0 -0
  21. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/uv.lock +0 -0
  22. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/dashboard/backend/websocket_manager.py +0 -0
  23. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/hooks/post_tool_use.py +0 -0
  24. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/hooks/pre_tool_use.py +0 -0
  25. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/hooks/session_utils.py +0 -0
  26. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/hooks/stop.py +0 -0
  27. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/hooks/subagent_stop.py +0 -0
  28. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/categorization/__init__.py +0 -0
  29. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/categorization/auto_tags.py +0 -0
  30. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/categorization/auto_type.py +0 -0
  31. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/config.py +0 -0
  32. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/dashboard.py +0 -0
  33. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/database/__init__.py +0 -0
  34. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/database/connection.py +0 -0
  35. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/database/migrations.py +0 -0
  36. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/database/schema.py +0 -0
  37. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/database/sync.py +0 -0
  38. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/decay/__init__.py +0 -0
  39. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/decay/importance.py +0 -0
  40. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/embeddings/__init__.py +0 -0
  41. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/embeddings/local.py +0 -0
  42. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/models/__init__.py +0 -0
  43. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/models/activity.py +0 -0
  44. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/models/agent.py +0 -0
  45. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/models/memory.py +0 -0
  46. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/models/relationship.py +0 -0
  47. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/models/session.py +0 -0
  48. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/resources/__init__.py +0 -0
  49. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/search/__init__.py +0 -0
  50. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/search/hybrid.py +0 -0
  51. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/search/keyword.py +0 -0
  52. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/search/ranking.py +0 -0
  53. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/search/semantic.py +0 -0
  54. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/server.py +0 -0
  55. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/setup.py +0 -0
  56. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/tools/__init__.py +0 -0
  57. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/tools/activities.py +0 -0
  58. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/tools/memories.py +0 -0
  59. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/tools/sessions.py +0 -0
  60. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/tools/utilities.py +0 -0
  61. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/utils/__init__.py +0 -0
  62. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/utils/formatting.py +0 -0
  63. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/utils/ids.py +0 -0
  64. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/utils/timestamps.py +0 -0
  65. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/omni_cortex/utils/truncation.py +0 -0
  66. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/scripts/check-venv.py +0 -0
  67. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/scripts/import_ken_memories.py +0 -0
  68. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/scripts/populate_session_data.py +0 -0
  69. {omni_cortex-1.7.0 → omni_cortex-1.8.0}/scripts/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omni-cortex
3
- Version: 1.7.0
3
+ Version: 1.8.0
4
4
  Summary: Give Claude Code a perfect memory - auto-logs everything, searches smartly, and gets smarter over time
5
5
  Project-URL: Homepage, https://github.com/AllCytes/Omni-Cortex
6
6
  Project-URL: Repository, https://github.com/AllCytes/Omni-Cortex
@@ -21,9 +21,11 @@ Classifier: Programming Language :: Python :: 3.13
21
21
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
22
22
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
23
23
  Requires-Python: >=3.10
24
+ Requires-Dist: claude-agent-sdk>=0.1.0
24
25
  Requires-Dist: httpx>=0.25.0
25
26
  Requires-Dist: mcp>=1.0.0
26
27
  Requires-Dist: pydantic>=2.0.0
28
+ Requires-Dist: python-dotenv>=1.0.0
27
29
  Requires-Dist: pyyaml>=6.0.0
28
30
  Provides-Extra: dev
29
31
  Requires-Dist: black>=23.0.0; extra == 'dev'
@@ -76,7 +78,180 @@ A universal memory system for Claude Code that combines activity logging with in
76
78
  - **Importance Decay**: Frequently accessed memories naturally surface
77
79
  - **Auto Activity Logging**: Automatically logs all tool calls via hooks
78
80
 
79
- ## Installation
81
+ ## Getting Started (5 Minutes)
82
+
83
+ A step-by-step guide to get Omni Cortex running on your machine.
84
+
85
+ ### Prerequisites
86
+
87
+ - **Python 3.10+** - Check with `python --version`
88
+ - **Claude Code CLI** - The Anthropic CLI tool
89
+ - **pip** - Python package manager (comes with Python)
90
+
91
+ ### Step 1: Install the Package
92
+
93
+ **Option A: From PyPI (Recommended for most users)**
94
+ ```bash
95
+ pip install omni-cortex
96
+ ```
97
+
98
+ **Option B: From Source (For development/contributions)**
99
+ ```bash
100
+ git clone https://github.com/AllCytes/Omni-Cortex.git
101
+ cd Omni-Cortex
102
+ pip install -e ".[semantic]"
103
+ ```
104
+
105
+ **Expected output:**
106
+ ```
107
+ Successfully installed omni-cortex-1.7.1
108
+ ```
109
+
110
+ ### Step 2: Run the Setup
111
+
112
+ ```bash
113
+ omni-cortex-setup
114
+ ```
115
+
116
+ This automatically:
117
+ - Adds Omni Cortex as an MCP server in `~/.claude.json`
118
+ - Configures hooks in `~/.claude/settings.json` for activity logging
119
+
120
+ **Expected output:**
121
+ ```
122
+ ✓ MCP server configured
123
+ ✓ Hooks configured
124
+ Setup complete! Restart Claude Code to activate.
125
+ ```
126
+
127
+ ### Step 3: Restart Claude Code
128
+
129
+ Close and reopen your Claude Code terminal. This loads the new MCP configuration.
130
+
131
+ ### Step 4: Verify It's Working
132
+
133
+ In Claude Code, try storing a memory:
134
+
135
+ ```
136
+ Ask Claude: "Remember that the database uses SQLite for storage"
137
+ ```
138
+
139
+ Claude should use the `cortex_remember` tool. Then verify:
140
+
141
+ ```
142
+ Ask Claude: "What do you remember about the database?"
143
+ ```
144
+
145
+ Claude should use `cortex_recall` and find your memory.
146
+
147
+ ### Step 5: Start the Dashboard (Optional)
148
+
149
+ The web dashboard lets you browse and search memories visually.
150
+
151
+ ```bash
152
+ # Start the dashboard (opens http://localhost:5173)
153
+ omni-cortex-dashboard
154
+ ```
155
+
156
+ Or manually:
157
+ ```bash
158
+ # Terminal 1: Backend (uses dashboard's own venv)
159
+ cd dashboard/backend
160
+ .venv/Scripts/python -m uvicorn main:app --host 127.0.0.1 --port 8765 --reload
161
+
162
+ # Terminal 2: Frontend
163
+ cd dashboard/frontend
164
+ npm install
165
+ npm run dev
166
+ ```
167
+
168
+ Open http://localhost:5173 in your browser.
169
+
170
+ **Note:** The dashboard has its own virtual environment at `dashboard/backend/.venv` with FastAPI and other web dependencies. This is separate from the project root `.venv` which contains the MCP server package.
171
+
172
+ ### Troubleshooting
173
+
174
+ <details>
175
+ <summary><b>❌ "omni-cortex-setup" command not found</b></summary>
176
+
177
+ **Cause:** pip installed to a location not in your PATH.
178
+
179
+ **Solution:**
180
+ ```bash
181
+ # Find where pip installed it
182
+ python -m omni_cortex.setup
183
+
184
+ # Or add Python scripts to PATH (Windows)
185
+ # Add %APPDATA%\Python\Python3x\Scripts to your PATH
186
+
187
+ # On macOS/Linux, ensure ~/.local/bin is in PATH
188
+ export PATH="$HOME/.local/bin:$PATH"
189
+ ```
190
+ </details>
191
+
192
+ <details>
193
+ <summary><b>❌ Claude doesn't see cortex_* tools</b></summary>
194
+
195
+ **Cause:** MCP server not configured or Claude Code not restarted.
196
+
197
+ **Solution:**
198
+ 1. Check `~/.claude.json` contains the `omni-cortex` MCP server entry
199
+ 2. Fully close and reopen Claude Code (not just the terminal)
200
+ 3. Run `omni-cortex-setup` again if needed
201
+ </details>
202
+
203
+ <details>
204
+ <summary><b>❌ "ModuleNotFoundError: No module named 'omni_cortex'"</b></summary>
205
+
206
+ **Cause:** Python environment mismatch.
207
+
208
+ **Solution:**
209
+ ```bash
210
+ # Ensure you're using the same Python that pip used
211
+ which python # or `where python` on Windows
212
+ pip show omni-cortex # Check if installed
213
+
214
+ # Reinstall if needed
215
+ pip install --force-reinstall omni-cortex
216
+ ```
217
+ </details>
218
+
219
+ <details>
220
+ <summary><b>❌ Dashboard won't start</b></summary>
221
+
222
+ **Cause:** Missing dependencies or port conflict.
223
+
224
+ **Solution:**
225
+ ```bash
226
+ # Install backend dependencies
227
+ cd dashboard/backend
228
+ pip install -e .
229
+
230
+ # Check if port 8765 is in use
231
+ # Windows: netstat -ano | findstr :8765
232
+ # macOS/Linux: lsof -i :8765
233
+
234
+ # Use a different port if needed
235
+ uvicorn main:app --port 8766
236
+ ```
237
+ </details>
238
+
239
+ <details>
240
+ <summary><b>❌ Semantic search not working</b></summary>
241
+
242
+ **Cause:** Semantic extras not installed.
243
+
244
+ **Solution:**
245
+ ```bash
246
+ pip install omni-cortex[semantic]
247
+ ```
248
+
249
+ First search will download the embedding model (~100MB).
250
+ </details>
251
+
252
+ ---
253
+
254
+ ## Installation (Detailed)
80
255
 
81
256
  ### Quick Install (Recommended)
82
257
 
@@ -312,8 +487,12 @@ git clone https://github.com/AllCytes/Omni-Cortex.git
312
487
  cd Omni-Cortex
313
488
  pip install -e .
314
489
 
315
- # Install dashboard dependencies
316
- cd dashboard/backend && pip install -r requirements.txt
490
+ # Dashboard backend has its own venv (already included in repo)
491
+ # If missing, set it up:
492
+ cd dashboard/backend
493
+ python -m venv .venv
494
+ .venv/Scripts/pip install -r requirements.txt # Windows
495
+ # .venv/bin/pip install -r requirements.txt # macOS/Linux
317
496
  cd ../frontend && npm install
318
497
  cd ../..
319
498
 
@@ -324,6 +503,27 @@ omni-cortex-dashboard --help
324
503
 
325
504
  **Important**: Always use `pip install -e .` (editable mode) so changes are immediately reflected without reinstalling.
326
505
 
506
+ ### Project Structure
507
+
508
+ ```
509
+ omni-cortex/
510
+ ├── .venv/ # Project root venv (omni-cortex MCP package)
511
+ ├── src/omni_cortex/ # MCP server source code
512
+ ├── dashboard/
513
+ │ ├── backend/
514
+ │ │ ├── .venv/ # Dashboard backend venv (FastAPI, uvicorn)
515
+ │ │ ├── main.py # FastAPI application
516
+ │ │ └── database.py # Database queries
517
+ │ └── frontend/ # Vue 3 + Vite frontend
518
+ ├── adws/ # Agentic Development Workflows
519
+ ├── specs/ # Implementation plans
520
+ │ ├── todo/ # Plans waiting to be built
521
+ │ └── done/ # Completed plans
522
+ └── tests/ # Unit tests
523
+ ```
524
+
525
+ **Why two venvs?** The dashboard is a standalone web application that can be packaged/deployed separately from the MCP server. They have different dependencies (MCP server needs `mcp`, dashboard needs `fastapi`).
526
+
327
527
  ### Running Tests
328
528
 
329
529
  ```bash
@@ -39,7 +39,180 @@ A universal memory system for Claude Code that combines activity logging with in
39
39
  - **Importance Decay**: Frequently accessed memories naturally surface
40
40
  - **Auto Activity Logging**: Automatically logs all tool calls via hooks
41
41
 
42
- ## Installation
42
+ ## Getting Started (5 Minutes)
43
+
44
+ A step-by-step guide to get Omni Cortex running on your machine.
45
+
46
+ ### Prerequisites
47
+
48
+ - **Python 3.10+** - Check with `python --version`
49
+ - **Claude Code CLI** - The Anthropic CLI tool
50
+ - **pip** - Python package manager (comes with Python)
51
+
52
+ ### Step 1: Install the Package
53
+
54
+ **Option A: From PyPI (Recommended for most users)**
55
+ ```bash
56
+ pip install omni-cortex
57
+ ```
58
+
59
+ **Option B: From Source (For development/contributions)**
60
+ ```bash
61
+ git clone https://github.com/AllCytes/Omni-Cortex.git
62
+ cd Omni-Cortex
63
+ pip install -e ".[semantic]"
64
+ ```
65
+
66
+ **Expected output:**
67
+ ```
68
+ Successfully installed omni-cortex-1.7.1
69
+ ```
70
+
71
+ ### Step 2: Run the Setup
72
+
73
+ ```bash
74
+ omni-cortex-setup
75
+ ```
76
+
77
+ This automatically:
78
+ - Adds Omni Cortex as an MCP server in `~/.claude.json`
79
+ - Configures hooks in `~/.claude/settings.json` for activity logging
80
+
81
+ **Expected output:**
82
+ ```
83
+ ✓ MCP server configured
84
+ ✓ Hooks configured
85
+ Setup complete! Restart Claude Code to activate.
86
+ ```
87
+
88
+ ### Step 3: Restart Claude Code
89
+
90
+ Close and reopen your Claude Code terminal. This loads the new MCP configuration.
91
+
92
+ ### Step 4: Verify It's Working
93
+
94
+ In Claude Code, try storing a memory:
95
+
96
+ ```
97
+ Ask Claude: "Remember that the database uses SQLite for storage"
98
+ ```
99
+
100
+ Claude should use the `cortex_remember` tool. Then verify:
101
+
102
+ ```
103
+ Ask Claude: "What do you remember about the database?"
104
+ ```
105
+
106
+ Claude should use `cortex_recall` and find your memory.
107
+
108
+ ### Step 5: Start the Dashboard (Optional)
109
+
110
+ The web dashboard lets you browse and search memories visually.
111
+
112
+ ```bash
113
+ # Start the dashboard (opens http://localhost:5173)
114
+ omni-cortex-dashboard
115
+ ```
116
+
117
+ Or manually:
118
+ ```bash
119
+ # Terminal 1: Backend (uses dashboard's own venv)
120
+ cd dashboard/backend
121
+ .venv/Scripts/python -m uvicorn main:app --host 127.0.0.1 --port 8765 --reload
122
+
123
+ # Terminal 2: Frontend
124
+ cd dashboard/frontend
125
+ npm install
126
+ npm run dev
127
+ ```
128
+
129
+ Open http://localhost:5173 in your browser.
130
+
131
+ **Note:** The dashboard has its own virtual environment at `dashboard/backend/.venv` with FastAPI and other web dependencies. This is separate from the project root `.venv` which contains the MCP server package.
132
+
133
+ ### Troubleshooting
134
+
135
+ <details>
136
+ <summary><b>❌ "omni-cortex-setup" command not found</b></summary>
137
+
138
+ **Cause:** pip installed to a location not in your PATH.
139
+
140
+ **Solution:**
141
+ ```bash
142
+ # Find where pip installed it
143
+ python -m omni_cortex.setup
144
+
145
+ # Or add Python scripts to PATH (Windows)
146
+ # Add %APPDATA%\Python\Python3x\Scripts to your PATH
147
+
148
+ # On macOS/Linux, ensure ~/.local/bin is in PATH
149
+ export PATH="$HOME/.local/bin:$PATH"
150
+ ```
151
+ </details>
152
+
153
+ <details>
154
+ <summary><b>❌ Claude doesn't see cortex_* tools</b></summary>
155
+
156
+ **Cause:** MCP server not configured or Claude Code not restarted.
157
+
158
+ **Solution:**
159
+ 1. Check `~/.claude.json` contains the `omni-cortex` MCP server entry
160
+ 2. Fully close and reopen Claude Code (not just the terminal)
161
+ 3. Run `omni-cortex-setup` again if needed
162
+ </details>
163
+
164
+ <details>
165
+ <summary><b>❌ "ModuleNotFoundError: No module named 'omni_cortex'"</b></summary>
166
+
167
+ **Cause:** Python environment mismatch.
168
+
169
+ **Solution:**
170
+ ```bash
171
+ # Ensure you're using the same Python that pip used
172
+ which python # or `where python` on Windows
173
+ pip show omni-cortex # Check if installed
174
+
175
+ # Reinstall if needed
176
+ pip install --force-reinstall omni-cortex
177
+ ```
178
+ </details>
179
+
180
+ <details>
181
+ <summary><b>❌ Dashboard won't start</b></summary>
182
+
183
+ **Cause:** Missing dependencies or port conflict.
184
+
185
+ **Solution:**
186
+ ```bash
187
+ # Install backend dependencies
188
+ cd dashboard/backend
189
+ pip install -e .
190
+
191
+ # Check if port 8765 is in use
192
+ # Windows: netstat -ano | findstr :8765
193
+ # macOS/Linux: lsof -i :8765
194
+
195
+ # Use a different port if needed
196
+ uvicorn main:app --port 8766
197
+ ```
198
+ </details>
199
+
200
+ <details>
201
+ <summary><b>❌ Semantic search not working</b></summary>
202
+
203
+ **Cause:** Semantic extras not installed.
204
+
205
+ **Solution:**
206
+ ```bash
207
+ pip install omni-cortex[semantic]
208
+ ```
209
+
210
+ First search will download the embedding model (~100MB).
211
+ </details>
212
+
213
+ ---
214
+
215
+ ## Installation (Detailed)
43
216
 
44
217
  ### Quick Install (Recommended)
45
218
 
@@ -275,8 +448,12 @@ git clone https://github.com/AllCytes/Omni-Cortex.git
275
448
  cd Omni-Cortex
276
449
  pip install -e .
277
450
 
278
- # Install dashboard dependencies
279
- cd dashboard/backend && pip install -r requirements.txt
451
+ # Dashboard backend has its own venv (already included in repo)
452
+ # If missing, set it up:
453
+ cd dashboard/backend
454
+ python -m venv .venv
455
+ .venv/Scripts/pip install -r requirements.txt # Windows
456
+ # .venv/bin/pip install -r requirements.txt # macOS/Linux
280
457
  cd ../frontend && npm install
281
458
  cd ../..
282
459
 
@@ -287,6 +464,27 @@ omni-cortex-dashboard --help
287
464
 
288
465
  **Important**: Always use `pip install -e .` (editable mode) so changes are immediately reflected without reinstalling.
289
466
 
467
+ ### Project Structure
468
+
469
+ ```
470
+ omni-cortex/
471
+ ├── .venv/ # Project root venv (omni-cortex MCP package)
472
+ ├── src/omni_cortex/ # MCP server source code
473
+ ├── dashboard/
474
+ │ ├── backend/
475
+ │ │ ├── .venv/ # Dashboard backend venv (FastAPI, uvicorn)
476
+ │ │ ├── main.py # FastAPI application
477
+ │ │ └── database.py # Database queries
478
+ │ └── frontend/ # Vue 3 + Vite frontend
479
+ ├── adws/ # Agentic Development Workflows
480
+ ├── specs/ # Implementation plans
481
+ │ ├── todo/ # Plans waiting to be built
482
+ │ └── done/ # Completed plans
483
+ └── tests/ # Unit tests
484
+ ```
485
+
486
+ **Why two venvs?** The dashboard is a standalone web application that can be packaged/deployed separately from the MCP server. They have different dependencies (MCP server needs `mcp`, dashboard needs `fastapi`).
487
+
290
488
  ### Running Tests
291
489
 
292
490
  ```bash
@@ -0,0 +1,12 @@
1
+ # Dashboard Backend Environment Configuration
2
+ #
3
+ # NOTE: This file is for reference only.
4
+ # The dashboard now loads from the PROJECT ROOT .env file.
5
+ #
6
+ # Copy the root .env.example to .env and configure there:
7
+ # cp ../../.env.example ../../.env
8
+ #
9
+ # Required settings in root .env:
10
+ # GEMINI_API_KEY=your-api-key-here
11
+ #
12
+ # See ../../.env.example for all available options.
@@ -1,6 +1,7 @@
1
1
  """Chat service for natural language queries about memories using Gemini Flash."""
2
2
 
3
3
  import os
4
+ from pathlib import Path
4
5
  from typing import Optional, AsyncGenerator, Any
5
6
 
6
7
  from dotenv import load_dotenv
@@ -9,8 +10,9 @@ from database import search_memories, get_memories, create_memory
9
10
  from models import FilterParams
10
11
  from prompt_security import build_safe_prompt, xml_escape
11
12
 
12
- # Load environment variables
13
- load_dotenv()
13
+ # Load environment variables from project root
14
+ _project_root = Path(__file__).parent.parent.parent
15
+ load_dotenv(_project_root / ".env")
14
16
 
15
17
  # Configure Gemini
16
18
  _api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
@@ -1049,8 +1049,8 @@ def create_memory(
1049
1049
  # Insert memory
1050
1050
  conn.execute(
1051
1051
  """
1052
- INSERT INTO memories (id, content, context, type, status, importance_score, access_count, created_at, last_accessed, tags)
1053
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
1052
+ INSERT INTO memories (id, content, context, type, status, importance_score, access_count, created_at, last_accessed, updated_at, tags)
1053
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
1054
1054
  """,
1055
1055
  (
1056
1056
  memory_id,
@@ -1062,6 +1062,7 @@ def create_memory(
1062
1062
  0,
1063
1063
  now,
1064
1064
  now,
1065
+ now,
1065
1066
  json.dumps(tags) if tags else None,
1066
1067
  ),
1067
1068
  )
@@ -5,6 +5,7 @@ import os
5
5
  import uuid
6
6
  from dataclasses import dataclass, field
7
7
  from enum import Enum
8
+ from pathlib import Path
8
9
  from typing import Optional
9
10
 
10
11
  from dotenv import load_dotenv
@@ -12,7 +13,9 @@ from dotenv import load_dotenv
12
13
  from database import get_memory_by_id
13
14
  from prompt_security import xml_escape
14
15
 
15
- load_dotenv()
16
+ # Load environment variables from project root
17
+ _project_root = Path(__file__).parent.parent.parent
18
+ load_dotenv(_project_root / ".env")
16
19
 
17
20
 
18
21
  class ImagePreset(str, Enum):
@@ -31,6 +31,7 @@ except ImportError:
31
31
 
32
32
  from database import (
33
33
  bulk_update_memory_status,
34
+ create_memory,
34
35
  delete_memory,
35
36
  ensure_migrations,
36
37
  get_activities,
@@ -62,6 +63,7 @@ from models import (
62
63
  ConversationSaveRequest,
63
64
  ConversationSaveResponse,
64
65
  FilterParams,
66
+ MemoryCreateRequest,
65
67
  MemoryUpdate,
66
68
  ProjectInfo,
67
69
  ProjectRegistration,
@@ -230,6 +232,20 @@ if RATE_LIMITING_AVAILABLE:
230
232
  else:
231
233
  limiter = None
232
234
 
235
+
236
+ def rate_limit(limit_string: str):
237
+ """Decorator for conditional rate limiting.
238
+
239
+ Returns the actual limiter decorator if available, otherwise a no-op.
240
+ Usage: @rate_limit("10/minute")
241
+ """
242
+ if limiter is not None:
243
+ return limiter.limit(limit_string)
244
+ # No-op decorator when rate limiting is not available
245
+ def noop_decorator(func):
246
+ return func
247
+ return noop_decorator
248
+
233
249
  # CORS configuration (environment-aware)
234
250
  cors_config = get_cors_config()
235
251
  app.add_middleware(
@@ -333,6 +349,7 @@ async def refresh_projects():
333
349
 
334
350
 
335
351
  @app.get("/api/memories")
352
+ @rate_limit("100/minute")
336
353
  async def list_memories(
337
354
  project: str = Query(..., description="Path to the database file"),
338
355
  memory_type: Optional[str] = Query(None, alias="type"),
@@ -373,6 +390,46 @@ async def list_memories(
373
390
  raise
374
391
 
375
392
 
393
+ @app.post("/api/memories")
394
+ @rate_limit("30/minute")
395
+ async def create_memory_endpoint(
396
+ request: MemoryCreateRequest,
397
+ project: str = Query(..., description="Path to the database file"),
398
+ ):
399
+ """Create a new memory."""
400
+ try:
401
+ if not Path(project).exists():
402
+ log_error("/api/memories POST", FileNotFoundError("Database not found"), project=project)
403
+ raise HTTPException(status_code=404, detail="Database not found")
404
+
405
+ # Create the memory
406
+ memory_id = create_memory(
407
+ db_path=project,
408
+ content=request.content,
409
+ memory_type=request.memory_type,
410
+ context=request.context,
411
+ tags=request.tags if request.tags else None,
412
+ importance_score=request.importance_score,
413
+ )
414
+
415
+ # Fetch the created memory to return it
416
+ created_memory = get_memory_by_id(project, memory_id)
417
+
418
+ # Broadcast to WebSocket clients
419
+ await manager.broadcast("memory_created", created_memory.model_dump(by_alias=True))
420
+
421
+ log_success("/api/memories POST", memory_id=memory_id, type=request.memory_type)
422
+ return created_memory
423
+ except HTTPException:
424
+ raise
425
+ except Exception as e:
426
+ import traceback
427
+ print(f"[DEBUG] create_memory_endpoint error: {type(e).__name__}: {e}")
428
+ traceback.print_exc()
429
+ log_error("/api/memories POST", e, project=project)
430
+ raise
431
+
432
+
376
433
  # NOTE: These routes MUST be defined before /api/memories/{memory_id} to avoid path conflicts
377
434
  @app.get("/api/memories/needs-review")
378
435
  async def get_memories_needing_review_endpoint(
@@ -744,6 +801,7 @@ async def chat_status():
744
801
 
745
802
 
746
803
  @app.post("/api/chat", response_model=ChatResponse)
804
+ @rate_limit("10/minute")
747
805
  async def chat_with_memories(
748
806
  request: ChatRequest,
749
807
  project: str = Query(..., description="Path to the database file"),
@@ -770,6 +828,7 @@ async def chat_with_memories(
770
828
 
771
829
 
772
830
  @app.get("/api/chat/stream")
831
+ @rate_limit("10/minute")
773
832
  async def stream_chat(
774
833
  project: str = Query(..., description="Path to the database file"),
775
834
  question: str = Query(..., description="The question to ask"),
@@ -846,6 +905,7 @@ async def get_image_presets():
846
905
 
847
906
 
848
907
  @app.post("/api/image/generate-batch", response_model=BatchImageGenerationResponse)
908
+ @rate_limit("5/minute")
849
909
  async def generate_images_batch(
850
910
  request: BatchImageGenerationRequest,
851
911
  db_path: str = Query(..., alias="project", description="Path to the database file"),
@@ -903,6 +963,7 @@ async def generate_images_batch(
903
963
 
904
964
 
905
965
  @app.post("/api/image/refine", response_model=SingleImageResponseModel)
966
+ @rate_limit("5/minute")
906
967
  async def refine_image(request: ImageRefineRequest):
907
968
  """Refine an existing generated image with a new prompt."""
908
969
  result = await image_service.refine_image(
@@ -128,6 +128,16 @@ class FilterParams(BaseModel):
128
128
  offset: int = 0
129
129
 
130
130
 
131
+ class MemoryCreateRequest(BaseModel):
132
+ """Create request for a new memory."""
133
+
134
+ content: str = Field(..., min_length=1, max_length=50000)
135
+ memory_type: str = Field(default="general")
136
+ context: Optional[str] = None
137
+ importance_score: int = Field(default=50, ge=1, le=100)
138
+ tags: list[str] = Field(default_factory=list)
139
+
140
+
131
141
  class MemoryUpdate(BaseModel):
132
142
  """Update request for a memory."""
133
143
 
@@ -1,3 +1,3 @@
1
1
  """Omni Cortex MCP - Universal Memory System for Claude Code."""
2
2
 
3
- __version__ = "1.7.0"
3
+ __version__ = "1.8.0"
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "omni-cortex"
7
- version = "1.7.0"
7
+ version = "1.8.0"
8
8
  description = "Give Claude Code a perfect memory - auto-logs everything, searches smartly, and gets smarter over time"
9
9
  readme = "README.md"
10
10
  license = "MIT"
@@ -31,6 +31,8 @@ dependencies = [
31
31
  "pydantic>=2.0.0",
32
32
  "httpx>=0.25.0",
33
33
  "pyyaml>=6.0.0",
34
+ "python-dotenv>=1.0.0",
35
+ "claude-agent-sdk>=0.1.0",
34
36
  ]
35
37
 
36
38
  [project.urls]
@@ -1,22 +0,0 @@
1
- # Omni-Cortex Dashboard Environment Configuration
2
- # Copy this file to .env and fill in your values
3
-
4
- # Gemini API Key for AI chat and image generation
5
- # Get your key from: https://aistudio.google.com/apikey
6
- GEMINI_API_KEY=your-api-key-here
7
-
8
- # Alternative (also works)
9
- # GOOGLE_API_KEY=your-api-key-here
10
-
11
- # API Key for dashboard access (auto-generated if not set)
12
- # DASHBOARD_API_KEY=your-secret-key-here
13
-
14
- # Environment: development or production
15
- # ENVIRONMENT=development
16
-
17
- # CORS Origins (comma-separated, for production)
18
- # CORS_ORIGINS=https://your-domain.com
19
-
20
- # SSL Configuration (optional, for HTTPS)
21
- # SSL_KEYFILE=/path/to/key.pem
22
- # SSL_CERTFILE=/path/to/cert.pem
File without changes
File without changes
File without changes