omni-cortex 1.0.3__tar.gz → 1.0.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/PKG-INFO +40 -1
  2. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/README.md +39 -0
  3. omni_cortex-1.0.5/dashboard/backend/chat_service.py +140 -0
  4. omni_cortex-1.0.5/dashboard/backend/database.py +729 -0
  5. omni_cortex-1.0.5/dashboard/backend/main.py +661 -0
  6. omni_cortex-1.0.5/dashboard/backend/models.py +140 -0
  7. omni_cortex-1.0.5/dashboard/backend/project_scanner.py +141 -0
  8. omni_cortex-1.0.5/dashboard/backend/pyproject.toml +23 -0
  9. omni_cortex-1.0.5/dashboard/backend/uv.lock +697 -0
  10. omni_cortex-1.0.5/dashboard/backend/websocket_manager.py +82 -0
  11. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/__init__.py +1 -1
  12. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/config.py +2 -2
  13. omni_cortex-1.0.5/omni_cortex/dashboard.py +184 -0
  14. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/search/semantic.py +7 -0
  15. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/pyproject.toml +6 -1
  16. omni_cortex-1.0.5/scripts/populate_session_data.py +255 -0
  17. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/.gitignore +0 -0
  18. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/LICENSE +0 -0
  19. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/hooks/post_tool_use.py +0 -0
  20. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/hooks/pre_tool_use.py +0 -0
  21. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/hooks/stop.py +0 -0
  22. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/hooks/subagent_stop.py +0 -0
  23. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/categorization/__init__.py +0 -0
  24. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/categorization/auto_tags.py +0 -0
  25. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/categorization/auto_type.py +0 -0
  26. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/database/__init__.py +0 -0
  27. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/database/connection.py +0 -0
  28. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/database/migrations.py +0 -0
  29. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/database/schema.py +0 -0
  30. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/database/sync.py +0 -0
  31. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/decay/__init__.py +0 -0
  32. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/decay/importance.py +0 -0
  33. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/embeddings/__init__.py +0 -0
  34. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/embeddings/local.py +0 -0
  35. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/models/__init__.py +0 -0
  36. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/models/activity.py +0 -0
  37. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/models/agent.py +0 -0
  38. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/models/memory.py +0 -0
  39. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/models/relationship.py +0 -0
  40. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/models/session.py +0 -0
  41. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/resources/__init__.py +0 -0
  42. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/search/__init__.py +0 -0
  43. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/search/hybrid.py +0 -0
  44. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/search/keyword.py +0 -0
  45. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/search/ranking.py +0 -0
  46. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/server.py +0 -0
  47. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/setup.py +0 -0
  48. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/tools/__init__.py +0 -0
  49. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/tools/activities.py +0 -0
  50. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/tools/memories.py +0 -0
  51. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/tools/sessions.py +0 -0
  52. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/tools/utilities.py +0 -0
  53. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/utils/__init__.py +0 -0
  54. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/utils/formatting.py +0 -0
  55. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/utils/ids.py +0 -0
  56. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/utils/timestamps.py +0 -0
  57. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/omni_cortex/utils/truncation.py +0 -0
  58. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/scripts/import_ken_memories.py +0 -0
  59. {omni_cortex-1.0.3 → omni_cortex-1.0.5}/scripts/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omni-cortex
3
- Version: 1.0.3
3
+ Version: 1.0.5
4
4
  Summary: Universal Memory MCP for Claude Code - dual-layer activity logging and knowledge storage
5
5
  Project-URL: Homepage, https://github.com/AllCytes/Omni-Cortex
6
6
  Project-URL: Repository, https://github.com/AllCytes/Omni-Cortex
@@ -205,6 +205,45 @@ auto_provide_context: true
205
205
  context_depth: 3
206
206
  ```
207
207
 
208
+ ## Web Dashboard
209
+
210
+ A visual interface for browsing, searching, and managing your memories.
211
+
212
+ ![Dashboard Preview](docs/images/dashboard-preview.png)
213
+
214
+ ### Features
215
+ - **Memory Browser**: View, search, filter, and edit memories
216
+ - **Ask AI**: Chat with your memories using Gemini
217
+ - **Real-time Updates**: WebSocket-based live sync
218
+ - **Statistics**: Memory counts, types, tags distribution
219
+ - **Project Switcher**: Switch between project databases
220
+
221
+ ### Quick Start
222
+
223
+ ```bash
224
+ # Backend (requires Python 3.10+)
225
+ cd dashboard/backend
226
+ pip install -e .
227
+ uvicorn main:app --host 0.0.0.0 --port 8765 --reload
228
+
229
+ # Frontend (requires Node.js 18+)
230
+ cd dashboard/frontend
231
+ npm install
232
+ npm run dev
233
+ ```
234
+
235
+ Open http://localhost:5173 in your browser.
236
+
237
+ ### Ask AI Setup (Optional)
238
+
239
+ To enable the "Ask AI" chat feature, set your Gemini API key:
240
+
241
+ ```bash
242
+ export GEMINI_API_KEY=your_api_key_here
243
+ ```
244
+
245
+ See [dashboard/README.md](dashboard/README.md) for full documentation.
246
+
208
247
  ## Documentation
209
248
 
210
249
  - [Tool Reference](docs/TOOLS.md) - Complete documentation for all 18 tools with examples
@@ -168,6 +168,45 @@ auto_provide_context: true
168
168
  context_depth: 3
169
169
  ```
170
170
 
171
+ ## Web Dashboard
172
+
173
+ A visual interface for browsing, searching, and managing your memories.
174
+
175
+ ![Dashboard Preview](docs/images/dashboard-preview.png)
176
+
177
+ ### Features
178
+ - **Memory Browser**: View, search, filter, and edit memories
179
+ - **Ask AI**: Chat with your memories using Gemini
180
+ - **Real-time Updates**: WebSocket-based live sync
181
+ - **Statistics**: Memory counts, types, tags distribution
182
+ - **Project Switcher**: Switch between project databases
183
+
184
+ ### Quick Start
185
+
186
+ ```bash
187
+ # Backend (requires Python 3.10+)
188
+ cd dashboard/backend
189
+ pip install -e .
190
+ uvicorn main:app --host 0.0.0.0 --port 8765 --reload
191
+
192
+ # Frontend (requires Node.js 18+)
193
+ cd dashboard/frontend
194
+ npm install
195
+ npm run dev
196
+ ```
197
+
198
+ Open http://localhost:5173 in your browser.
199
+
200
+ ### Ask AI Setup (Optional)
201
+
202
+ To enable the "Ask AI" chat feature, set your Gemini API key:
203
+
204
+ ```bash
205
+ export GEMINI_API_KEY=your_api_key_here
206
+ ```
207
+
208
+ See [dashboard/README.md](dashboard/README.md) for full documentation.
209
+
171
210
  ## Documentation
172
211
 
173
212
  - [Tool Reference](docs/TOOLS.md) - Complete documentation for all 18 tools with examples
@@ -0,0 +1,140 @@
1
+ """Chat service for natural language queries about memories using Gemini Flash."""
2
+
3
+ import os
4
+ from typing import Optional
5
+
6
+ import google.generativeai as genai
7
+ from dotenv import load_dotenv
8
+
9
+ from database import search_memories, get_memories
10
+ from models import FilterParams
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+
15
+ # Configure Gemini
16
+ _api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
17
+ _model: Optional[genai.GenerativeModel] = None
18
+
19
+
20
+ def get_model() -> Optional[genai.GenerativeModel]:
21
+ """Get or initialize the Gemini model."""
22
+ global _model
23
+ if _model is None and _api_key:
24
+ genai.configure(api_key=_api_key)
25
+ _model = genai.GenerativeModel("gemini-2.0-flash-exp")
26
+ return _model
27
+
28
+
29
+ def is_available() -> bool:
30
+ """Check if the chat service is available."""
31
+ return _api_key is not None
32
+
33
+
34
+ async def ask_about_memories(
35
+ db_path: str,
36
+ question: str,
37
+ max_memories: int = 10,
38
+ ) -> dict:
39
+ """Ask a natural language question about memories.
40
+
41
+ Args:
42
+ db_path: Path to the database file
43
+ question: The user's question
44
+ max_memories: Maximum memories to include in context
45
+
46
+ Returns:
47
+ Dict with answer and sources
48
+ """
49
+ if not is_available():
50
+ return {
51
+ "answer": "Chat is not available. Please configure GEMINI_API_KEY or GOOGLE_API_KEY environment variable.",
52
+ "sources": [],
53
+ "error": "api_key_missing",
54
+ }
55
+
56
+ model = get_model()
57
+ if not model:
58
+ return {
59
+ "answer": "Failed to initialize Gemini model.",
60
+ "sources": [],
61
+ "error": "model_init_failed",
62
+ }
63
+
64
+ # Search for relevant memories
65
+ memories = search_memories(db_path, question, limit=max_memories)
66
+
67
+ # If no memories found via search, get recent ones
68
+ if not memories:
69
+ filters = FilterParams(
70
+ sort_by="last_accessed",
71
+ sort_order="desc",
72
+ limit=max_memories,
73
+ offset=0,
74
+ )
75
+ memories = get_memories(db_path, filters)
76
+
77
+ if not memories:
78
+ return {
79
+ "answer": "No memories found in the database to answer your question.",
80
+ "sources": [],
81
+ "error": None,
82
+ }
83
+
84
+ # Build context from memories
85
+ memory_context = []
86
+ sources = []
87
+ for i, mem in enumerate(memories, 1):
88
+ memory_context.append(f"""
89
+ Memory {i}:
90
+ - Type: {mem.memory_type}
91
+ - Content: {mem.content}
92
+ - Context: {mem.context or 'N/A'}
93
+ - Tags: {', '.join(mem.tags) if mem.tags else 'N/A'}
94
+ - Status: {mem.status}
95
+ - Importance: {mem.importance_score}/100
96
+ """)
97
+ sources.append({
98
+ "id": mem.id,
99
+ "type": mem.memory_type,
100
+ "content_preview": mem.content[:100] + "..." if len(mem.content) > 100 else mem.content,
101
+ "tags": mem.tags,
102
+ })
103
+
104
+ context_str = "\n---\n".join(memory_context)
105
+
106
+ # Create prompt
107
+ prompt = f"""You are a helpful assistant that answers questions about stored memories and knowledge.
108
+
109
+ The user has a collection of memories that capture decisions, solutions, insights, errors, preferences, and other learnings from their work.
110
+
111
+ Here are the relevant memories:
112
+
113
+ {context_str}
114
+
115
+ User question: {question}
116
+
117
+ Instructions:
118
+ 1. Answer the question based on the memories provided
119
+ 2. If the memories don't contain relevant information, say so
120
+ 3. Reference specific memories when appropriate (e.g., "According to memory 1...")
121
+ 4. Be concise but thorough
122
+ 5. If the question is asking for a recommendation or decision, synthesize from multiple memories if possible
123
+
124
+ Answer:"""
125
+
126
+ try:
127
+ response = model.generate_content(prompt)
128
+ answer = response.text
129
+ except Exception as e:
130
+ return {
131
+ "answer": f"Failed to generate response: {str(e)}",
132
+ "sources": sources,
133
+ "error": "generation_failed",
134
+ }
135
+
136
+ return {
137
+ "answer": answer,
138
+ "sources": sources,
139
+ "error": None,
140
+ }