omni-cortex 1.0.10__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. omni_cortex-1.1.0.data/data/share/omni-cortex/dashboard/backend/chat_service.py +290 -0
  2. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/dashboard/backend/database.py +78 -0
  3. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/dashboard/backend/main.py +58 -1
  4. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/dashboard/backend/models.py +23 -0
  5. {omni_cortex-1.0.10.dist-info → omni_cortex-1.1.0.dist-info}/METADATA +1 -1
  6. omni_cortex-1.1.0.dist-info/RECORD +19 -0
  7. omni_cortex-1.0.10.data/data/share/omni-cortex/dashboard/backend/chat_service.py +0 -140
  8. omni_cortex-1.0.10.dist-info/RECORD +0 -19
  9. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/dashboard/backend/logging_config.py +0 -0
  10. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/dashboard/backend/project_config.py +0 -0
  11. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +0 -0
  12. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
  13. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/dashboard/backend/uv.lock +0 -0
  14. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +0 -0
  15. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/hooks/post_tool_use.py +0 -0
  16. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/hooks/pre_tool_use.py +0 -0
  17. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/hooks/stop.py +0 -0
  18. {omni_cortex-1.0.10.data → omni_cortex-1.1.0.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
  19. {omni_cortex-1.0.10.dist-info → omni_cortex-1.1.0.dist-info}/WHEEL +0 -0
  20. {omni_cortex-1.0.10.dist-info → omni_cortex-1.1.0.dist-info}/entry_points.txt +0 -0
  21. {omni_cortex-1.0.10.dist-info → omni_cortex-1.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,290 @@
1
+ """Chat service for natural language queries about memories using Gemini Flash."""
2
+
3
+ import os
4
+ from typing import Optional, AsyncGenerator, Any
5
+
6
+ import google.generativeai as genai
7
+ from dotenv import load_dotenv
8
+
9
+ from database import search_memories, get_memories, create_memory
10
+ from models import FilterParams
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+
15
+ # Configure Gemini
16
+ _api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
17
+ _model: Optional[genai.GenerativeModel] = None
18
+
19
+
20
+ def get_model() -> Optional[genai.GenerativeModel]:
21
+ """Get or initialize the Gemini model."""
22
+ global _model
23
+ if _model is None and _api_key:
24
+ genai.configure(api_key=_api_key)
25
+ _model = genai.GenerativeModel("gemini-3-flash-preview")
26
+ return _model
27
+
28
+
29
+ def is_available() -> bool:
30
+ """Check if the chat service is available."""
31
+ return _api_key is not None
32
+
33
+
34
+ def _build_prompt(question: str, context_str: str) -> str:
35
+ """Build the prompt for the AI model."""
36
+ return f"""You are a helpful assistant that answers questions about stored memories and knowledge.
37
+
38
+ The user has a collection of memories that capture decisions, solutions, insights, errors, preferences, and other learnings from their work.
39
+
40
+ Here are the relevant memories:
41
+
42
+ {context_str}
43
+
44
+ User question: {question}
45
+
46
+ Instructions:
47
+ 1. Answer the question based on the memories provided
48
+ 2. If the memories don't contain relevant information, say so
49
+ 3. Reference specific memories when appropriate using [[Memory N]] format (e.g., "According to [[Memory 1]]...")
50
+ 4. Be concise but thorough
51
+ 5. If the question is asking for a recommendation or decision, synthesize from multiple memories if possible
52
+
53
+ Answer:"""
54
+
55
+
56
+ def _get_memories_and_sources(db_path: str, question: str, max_memories: int) -> tuple[str, list[dict]]:
57
+ """Get relevant memories and build context string and sources list."""
58
+ # Search for relevant memories
59
+ memories = search_memories(db_path, question, limit=max_memories)
60
+
61
+ # If no memories found via search, get recent ones
62
+ if not memories:
63
+ filters = FilterParams(
64
+ sort_by="last_accessed",
65
+ sort_order="desc",
66
+ limit=max_memories,
67
+ offset=0,
68
+ )
69
+ memories = get_memories(db_path, filters)
70
+
71
+ if not memories:
72
+ return "", []
73
+
74
+ # Build context from memories
75
+ memory_context = []
76
+ sources = []
77
+ for i, mem in enumerate(memories, 1):
78
+ memory_context.append(f"""
79
+ Memory {i}:
80
+ - Type: {mem.memory_type}
81
+ - Content: {mem.content}
82
+ - Context: {mem.context or 'N/A'}
83
+ - Tags: {', '.join(mem.tags) if mem.tags else 'N/A'}
84
+ - Status: {mem.status}
85
+ - Importance: {mem.importance_score}/100
86
+ """)
87
+ sources.append({
88
+ "id": mem.id,
89
+ "type": mem.memory_type,
90
+ "content_preview": mem.content[:100] + "..." if len(mem.content) > 100 else mem.content,
91
+ "tags": mem.tags,
92
+ })
93
+
94
+ context_str = "\n---\n".join(memory_context)
95
+ return context_str, sources
96
+
97
+
98
+ async def stream_ask_about_memories(
99
+ db_path: str,
100
+ question: str,
101
+ max_memories: int = 10,
102
+ ) -> AsyncGenerator[dict[str, Any], None]:
103
+ """Stream a response to a question about memories.
104
+
105
+ Yields events with type 'sources', 'chunk', 'done', or 'error'.
106
+ """
107
+ if not is_available():
108
+ yield {
109
+ "type": "error",
110
+ "data": "Chat is not available. Please configure GEMINI_API_KEY or GOOGLE_API_KEY environment variable.",
111
+ }
112
+ return
113
+
114
+ model = get_model()
115
+ if not model:
116
+ yield {
117
+ "type": "error",
118
+ "data": "Failed to initialize Gemini model.",
119
+ }
120
+ return
121
+
122
+ context_str, sources = _get_memories_and_sources(db_path, question, max_memories)
123
+
124
+ if not sources:
125
+ yield {
126
+ "type": "sources",
127
+ "data": [],
128
+ }
129
+ yield {
130
+ "type": "chunk",
131
+ "data": "No memories found in the database to answer your question.",
132
+ }
133
+ yield {
134
+ "type": "done",
135
+ "data": None,
136
+ }
137
+ return
138
+
139
+ # Yield sources first
140
+ yield {
141
+ "type": "sources",
142
+ "data": sources,
143
+ }
144
+
145
+ # Build and stream the response
146
+ prompt = _build_prompt(question, context_str)
147
+
148
+ try:
149
+ response = model.generate_content(prompt, stream=True)
150
+
151
+ for chunk in response:
152
+ if chunk.text:
153
+ yield {
154
+ "type": "chunk",
155
+ "data": chunk.text,
156
+ }
157
+
158
+ yield {
159
+ "type": "done",
160
+ "data": None,
161
+ }
162
+ except Exception as e:
163
+ yield {
164
+ "type": "error",
165
+ "data": f"Failed to generate response: {str(e)}",
166
+ }
167
+
168
+
169
+ async def save_conversation(
170
+ db_path: str,
171
+ messages: list[dict],
172
+ referenced_memory_ids: list[str] | None = None,
173
+ importance: int = 60,
174
+ ) -> dict:
175
+ """Save a chat conversation as a memory.
176
+
177
+ Args:
178
+ db_path: Path to the database file
179
+ messages: List of message dicts with 'role', 'content', 'timestamp'
180
+ referenced_memory_ids: IDs of memories referenced in the conversation
181
+ importance: Importance score for the memory
182
+
183
+ Returns:
184
+ Dict with memory_id and summary
185
+ """
186
+ if not messages:
187
+ raise ValueError("No messages to save")
188
+
189
+ # Format conversation into markdown
190
+ content_lines = ["## Chat Conversation\n"]
191
+ for msg in messages:
192
+ role = "**You**" if msg["role"] == "user" else "**Assistant**"
193
+ content_lines.append(f"### {role}\n{msg['content']}\n")
194
+
195
+ content = "\n".join(content_lines)
196
+
197
+ # Generate summary using Gemini if available
198
+ summary = "Chat conversation"
199
+ model = get_model()
200
+ if model:
201
+ try:
202
+ summary_prompt = f"""Summarize this conversation in one concise sentence (max 100 chars):
203
+
204
+ {content[:2000]}
205
+
206
+ Summary:"""
207
+ response = model.generate_content(summary_prompt)
208
+ summary = response.text.strip()[:100]
209
+ except Exception:
210
+ # Use fallback summary
211
+ first_user_msg = next((m for m in messages if m["role"] == "user"), None)
212
+ if first_user_msg:
213
+ summary = f"Q: {first_user_msg['content'][:80]}..."
214
+
215
+ # Extract topics from conversation for tags
216
+ tags = ["chat", "conversation"]
217
+
218
+ # Create memory
219
+ memory_id = create_memory(
220
+ db_path=db_path,
221
+ content=content,
222
+ memory_type="conversation",
223
+ context=f"Chat conversation: {summary}",
224
+ tags=tags,
225
+ importance_score=importance,
226
+ related_memory_ids=referenced_memory_ids,
227
+ )
228
+
229
+ return {
230
+ "memory_id": memory_id,
231
+ "summary": summary,
232
+ }
233
+
234
+
235
+ async def ask_about_memories(
236
+ db_path: str,
237
+ question: str,
238
+ max_memories: int = 10,
239
+ ) -> dict:
240
+ """Ask a natural language question about memories (non-streaming).
241
+
242
+ Args:
243
+ db_path: Path to the database file
244
+ question: The user's question
245
+ max_memories: Maximum memories to include in context
246
+
247
+ Returns:
248
+ Dict with answer and sources
249
+ """
250
+ if not is_available():
251
+ return {
252
+ "answer": "Chat is not available. Please configure GEMINI_API_KEY or GOOGLE_API_KEY environment variable.",
253
+ "sources": [],
254
+ "error": "api_key_missing",
255
+ }
256
+
257
+ model = get_model()
258
+ if not model:
259
+ return {
260
+ "answer": "Failed to initialize Gemini model.",
261
+ "sources": [],
262
+ "error": "model_init_failed",
263
+ }
264
+
265
+ context_str, sources = _get_memories_and_sources(db_path, question, max_memories)
266
+
267
+ if not sources:
268
+ return {
269
+ "answer": "No memories found in the database to answer your question.",
270
+ "sources": [],
271
+ "error": None,
272
+ }
273
+
274
+ prompt = _build_prompt(question, context_str)
275
+
276
+ try:
277
+ response = model.generate_content(prompt)
278
+ answer = response.text
279
+ except Exception as e:
280
+ return {
281
+ "answer": f"Failed to generate response: {str(e)}",
282
+ "sources": sources,
283
+ "error": "generation_failed",
284
+ }
285
+
286
+ return {
287
+ "answer": answer,
288
+ "sources": sources,
289
+ "error": None,
290
+ }
@@ -727,3 +727,81 @@ def get_relationship_graph(db_path: str, center_id: Optional[str] = None, depth:
727
727
  })
728
728
 
729
729
  return {"nodes": list(nodes.values()), "edges": edges}
730
+
731
+
732
+ def create_memory(
733
+ db_path: str,
734
+ content: str,
735
+ memory_type: str = "other",
736
+ context: Optional[str] = None,
737
+ tags: Optional[list[str]] = None,
738
+ importance_score: int = 50,
739
+ related_memory_ids: Optional[list[str]] = None,
740
+ ) -> str:
741
+ """Create a new memory and return its ID.
742
+
743
+ Args:
744
+ db_path: Path to the database file
745
+ content: Memory content
746
+ memory_type: Type of memory (e.g., 'decision', 'solution', 'conversation')
747
+ context: Additional context
748
+ tags: List of tags
749
+ importance_score: Importance score (1-100)
750
+ related_memory_ids: IDs of related memories to create relationships with
751
+
752
+ Returns:
753
+ The ID of the created memory
754
+ """
755
+ import uuid
756
+
757
+ conn = get_write_connection(db_path)
758
+
759
+ # Generate ID
760
+ memory_id = f"mem_{int(datetime.now().timestamp() * 1000)}_{uuid.uuid4().hex[:8]}"
761
+ now = datetime.now().isoformat()
762
+
763
+ # Insert memory
764
+ conn.execute(
765
+ """
766
+ INSERT INTO memories (id, content, context, type, status, importance_score, access_count, created_at, last_accessed, tags)
767
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
768
+ """,
769
+ (
770
+ memory_id,
771
+ content,
772
+ context,
773
+ memory_type,
774
+ "fresh",
775
+ importance_score,
776
+ 0,
777
+ now,
778
+ now,
779
+ json.dumps(tags) if tags else None,
780
+ ),
781
+ )
782
+
783
+ # Create relationships if related_memory_ids provided
784
+ if related_memory_ids:
785
+ # Check if memory_relationships table exists
786
+ table_check = conn.execute(
787
+ "SELECT name FROM sqlite_master WHERE type='table' AND name='memory_relationships'"
788
+ ).fetchone()
789
+
790
+ if table_check:
791
+ for related_id in related_memory_ids:
792
+ try:
793
+ conn.execute(
794
+ """
795
+ INSERT INTO memory_relationships (source_memory_id, target_memory_id, relationship_type, strength)
796
+ VALUES (?, ?, ?, ?)
797
+ """,
798
+ (memory_id, related_id, "derived_from", 0.8),
799
+ )
800
+ except Exception:
801
+ # Ignore if related memory doesn't exist
802
+ pass
803
+
804
+ conn.commit()
805
+ conn.close()
806
+
807
+ return memory_id
@@ -39,7 +39,7 @@ from database import (
39
39
  update_memory,
40
40
  )
41
41
  from logging_config import log_success, log_error
42
- from models import ChatRequest, ChatResponse, FilterParams, MemoryUpdate, ProjectInfo, ProjectRegistration
42
+ from models import ChatRequest, ChatResponse, ConversationSaveRequest, ConversationSaveResponse, FilterParams, MemoryUpdate, ProjectInfo, ProjectRegistration
43
43
  from project_config import (
44
44
  load_config,
45
45
  add_registered_project,
@@ -573,6 +573,63 @@ async def chat_with_memories(
573
573
  raise
574
574
 
575
575
 
576
+ @app.get("/api/chat/stream")
577
+ async def stream_chat(
578
+ project: str = Query(..., description="Path to the database file"),
579
+ question: str = Query(..., description="The question to ask"),
580
+ max_memories: int = Query(10, ge=1, le=50),
581
+ ):
582
+ """SSE endpoint for streaming chat responses."""
583
+ from fastapi.responses import StreamingResponse
584
+
585
+ if not Path(project).exists():
586
+ raise HTTPException(status_code=404, detail="Database not found")
587
+
588
+ async def event_generator():
589
+ try:
590
+ async for event in chat_service.stream_ask_about_memories(project, question, max_memories):
591
+ yield f"data: {json.dumps(event)}\n\n"
592
+ except Exception as e:
593
+ yield f"data: {json.dumps({'type': 'error', 'data': str(e)})}\n\n"
594
+
595
+ return StreamingResponse(
596
+ event_generator(),
597
+ media_type="text/event-stream",
598
+ headers={
599
+ "Cache-Control": "no-cache",
600
+ "Connection": "keep-alive",
601
+ "X-Accel-Buffering": "no",
602
+ }
603
+ )
604
+
605
+
606
+ @app.post("/api/chat/save", response_model=ConversationSaveResponse)
607
+ async def save_chat_conversation(
608
+ request: ConversationSaveRequest,
609
+ project: str = Query(..., description="Path to the database file"),
610
+ ):
611
+ """Save a chat conversation as a memory."""
612
+ try:
613
+ if not Path(project).exists():
614
+ log_error("/api/chat/save", FileNotFoundError("Database not found"))
615
+ raise HTTPException(status_code=404, detail="Database not found")
616
+
617
+ result = await chat_service.save_conversation(
618
+ project,
619
+ [msg.model_dump() for msg in request.messages],
620
+ request.referenced_memory_ids,
621
+ request.importance or 60,
622
+ )
623
+
624
+ log_success("/api/chat/save", memory_id=result["memory_id"], messages=len(request.messages))
625
+ return ConversationSaveResponse(**result)
626
+ except HTTPException:
627
+ raise
628
+ except Exception as e:
629
+ log_error("/api/chat/save", e)
630
+ raise
631
+
632
+
576
633
  # --- WebSocket Endpoint ---
577
634
 
578
635
 
@@ -163,3 +163,26 @@ class ChatResponse(BaseModel):
163
163
  answer: str
164
164
  sources: list[ChatSource]
165
165
  error: Optional[str] = None
166
+
167
+
168
+ class ConversationMessage(BaseModel):
169
+ """A message in a conversation."""
170
+
171
+ role: str # 'user' or 'assistant'
172
+ content: str
173
+ timestamp: str
174
+
175
+
176
+ class ConversationSaveRequest(BaseModel):
177
+ """Request to save a conversation as memory."""
178
+
179
+ messages: list[ConversationMessage]
180
+ referenced_memory_ids: Optional[list[str]] = None
181
+ importance: Optional[int] = Field(default=60, ge=1, le=100)
182
+
183
+
184
+ class ConversationSaveResponse(BaseModel):
185
+ """Response after saving a conversation."""
186
+
187
+ memory_id: str
188
+ summary: str
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omni-cortex
3
- Version: 1.0.10
3
+ Version: 1.1.0
4
4
  Summary: Give Claude Code a perfect memory - auto-logs everything, searches smartly, and gets smarter over time
5
5
  Project-URL: Homepage, https://github.com/AllCytes/Omni-Cortex
6
6
  Project-URL: Repository, https://github.com/AllCytes/Omni-Cortex
@@ -0,0 +1,19 @@
1
+ omni_cortex-1.1.0.data/data/share/omni-cortex/hooks/post_tool_use.py,sha256=zXy30KNDW6UoWP0nwq5n320r1wFa-tE6V4QuSdDzx8w,5106
2
+ omni_cortex-1.1.0.data/data/share/omni-cortex/hooks/pre_tool_use.py,sha256=SlvvEKsIkolDG5Y_35VezY2e7kRpbj1GiDlBW-naj2g,4900
3
+ omni_cortex-1.1.0.data/data/share/omni-cortex/hooks/stop.py,sha256=T1bwcmbTLj0gzjrVvFBT1zB6wff4J2YkYBAY-ZxZI5g,5336
4
+ omni_cortex-1.1.0.data/data/share/omni-cortex/hooks/subagent_stop.py,sha256=V9HQSFGNOfkg8ZCstPEy4h5V8BP4AbrVr8teFzN1kNk,3314
5
+ omni_cortex-1.1.0.data/data/share/omni-cortex/dashboard/backend/chat_service.py,sha256=hmTvlwK5w29nOLUGCwaaIslEuLgA1-JezXbLwxWDSdM,8265
6
+ omni_cortex-1.1.0.data/data/share/omni-cortex/dashboard/backend/database.py,sha256=HNwfyfebHq0Gdooc4bZdyNp_FD7WFx9z6KJkWLWtHp8,25400
7
+ omni_cortex-1.1.0.data/data/share/omni-cortex/dashboard/backend/logging_config.py,sha256=dFcNqfw2jTfUjFERV_Pr5r5PjY9wSQGXEYPf0AyR5Yk,2869
8
+ omni_cortex-1.1.0.data/data/share/omni-cortex/dashboard/backend/main.py,sha256=kV6XsaheZd-SlU9yS3ehT9rnuCJ_W1X-iJ9t2AkV7aU,26817
9
+ omni_cortex-1.1.0.data/data/share/omni-cortex/dashboard/backend/models.py,sha256=7pldkJfUFgO9xD3xE7o-Z31w8yJMC0G4LFGU5r21IAc,4372
10
+ omni_cortex-1.1.0.data/data/share/omni-cortex/dashboard/backend/project_config.py,sha256=ZxGoeRpHvN5qQyf2hRxrAZiHrPSwdQp59f0di6O1LKM,4352
11
+ omni_cortex-1.1.0.data/data/share/omni-cortex/dashboard/backend/project_scanner.py,sha256=lwFXS8iJbOoxf7FAyo2TjH25neaMHiJ8B3jS57XxtDI,5713
12
+ omni_cortex-1.1.0.data/data/share/omni-cortex/dashboard/backend/pyproject.toml,sha256=9pbbGQXLe1Xd06nZAtDySCHIlfMWvPaB-C6tGZR6umc,502
13
+ omni_cortex-1.1.0.data/data/share/omni-cortex/dashboard/backend/uv.lock,sha256=e7IMinX0BR2EcnpPwHYCdDJQDzuDzQ3D-FmPOiPKfGA,131248
14
+ omni_cortex-1.1.0.data/data/share/omni-cortex/dashboard/backend/websocket_manager.py,sha256=fv16XkRkgN4SDNwTiP_p9qFnWta9lIpAXgKbFETZ7uM,2770
15
+ omni_cortex-1.1.0.dist-info/METADATA,sha256=hJN71VA7mokkLtxg2iAyezSybwnuQlZkO-jwKnvBj64,9855
16
+ omni_cortex-1.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
17
+ omni_cortex-1.1.0.dist-info/entry_points.txt,sha256=rohx4mFH2ffZmMb9QXPZmFf-ZGjA3jpKVDVeET-ttiM,150
18
+ omni_cortex-1.1.0.dist-info/licenses/LICENSE,sha256=oG_397owMmi-Umxp5sYocJ6RPohp9_bDNnnEu9OUphg,1072
19
+ omni_cortex-1.1.0.dist-info/RECORD,,
@@ -1,140 +0,0 @@
1
- """Chat service for natural language queries about memories using Gemini Flash."""
2
-
3
- import os
4
- from typing import Optional
5
-
6
- import google.generativeai as genai
7
- from dotenv import load_dotenv
8
-
9
- from database import search_memories, get_memories
10
- from models import FilterParams
11
-
12
- # Load environment variables
13
- load_dotenv()
14
-
15
- # Configure Gemini
16
- _api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
17
- _model: Optional[genai.GenerativeModel] = None
18
-
19
-
20
- def get_model() -> Optional[genai.GenerativeModel]:
21
- """Get or initialize the Gemini model."""
22
- global _model
23
- if _model is None and _api_key:
24
- genai.configure(api_key=_api_key)
25
- _model = genai.GenerativeModel("gemini-3-flash-preview")
26
- return _model
27
-
28
-
29
- def is_available() -> bool:
30
- """Check if the chat service is available."""
31
- return _api_key is not None
32
-
33
-
34
- async def ask_about_memories(
35
- db_path: str,
36
- question: str,
37
- max_memories: int = 10,
38
- ) -> dict:
39
- """Ask a natural language question about memories.
40
-
41
- Args:
42
- db_path: Path to the database file
43
- question: The user's question
44
- max_memories: Maximum memories to include in context
45
-
46
- Returns:
47
- Dict with answer and sources
48
- """
49
- if not is_available():
50
- return {
51
- "answer": "Chat is not available. Please configure GEMINI_API_KEY or GOOGLE_API_KEY environment variable.",
52
- "sources": [],
53
- "error": "api_key_missing",
54
- }
55
-
56
- model = get_model()
57
- if not model:
58
- return {
59
- "answer": "Failed to initialize Gemini model.",
60
- "sources": [],
61
- "error": "model_init_failed",
62
- }
63
-
64
- # Search for relevant memories
65
- memories = search_memories(db_path, question, limit=max_memories)
66
-
67
- # If no memories found via search, get recent ones
68
- if not memories:
69
- filters = FilterParams(
70
- sort_by="last_accessed",
71
- sort_order="desc",
72
- limit=max_memories,
73
- offset=0,
74
- )
75
- memories = get_memories(db_path, filters)
76
-
77
- if not memories:
78
- return {
79
- "answer": "No memories found in the database to answer your question.",
80
- "sources": [],
81
- "error": None,
82
- }
83
-
84
- # Build context from memories
85
- memory_context = []
86
- sources = []
87
- for i, mem in enumerate(memories, 1):
88
- memory_context.append(f"""
89
- Memory {i}:
90
- - Type: {mem.memory_type}
91
- - Content: {mem.content}
92
- - Context: {mem.context or 'N/A'}
93
- - Tags: {', '.join(mem.tags) if mem.tags else 'N/A'}
94
- - Status: {mem.status}
95
- - Importance: {mem.importance_score}/100
96
- """)
97
- sources.append({
98
- "id": mem.id,
99
- "type": mem.memory_type,
100
- "content_preview": mem.content[:100] + "..." if len(mem.content) > 100 else mem.content,
101
- "tags": mem.tags,
102
- })
103
-
104
- context_str = "\n---\n".join(memory_context)
105
-
106
- # Create prompt
107
- prompt = f"""You are a helpful assistant that answers questions about stored memories and knowledge.
108
-
109
- The user has a collection of memories that capture decisions, solutions, insights, errors, preferences, and other learnings from their work.
110
-
111
- Here are the relevant memories:
112
-
113
- {context_str}
114
-
115
- User question: {question}
116
-
117
- Instructions:
118
- 1. Answer the question based on the memories provided
119
- 2. If the memories don't contain relevant information, say so
120
- 3. Reference specific memories when appropriate (e.g., "According to memory 1...")
121
- 4. Be concise but thorough
122
- 5. If the question is asking for a recommendation or decision, synthesize from multiple memories if possible
123
-
124
- Answer:"""
125
-
126
- try:
127
- response = model.generate_content(prompt)
128
- answer = response.text
129
- except Exception as e:
130
- return {
131
- "answer": f"Failed to generate response: {str(e)}",
132
- "sources": sources,
133
- "error": "generation_failed",
134
- }
135
-
136
- return {
137
- "answer": answer,
138
- "sources": sources,
139
- "error": None,
140
- }
@@ -1,19 +0,0 @@
1
- omni_cortex-1.0.10.data/data/share/omni-cortex/hooks/post_tool_use.py,sha256=zXy30KNDW6UoWP0nwq5n320r1wFa-tE6V4QuSdDzx8w,5106
2
- omni_cortex-1.0.10.data/data/share/omni-cortex/hooks/pre_tool_use.py,sha256=SlvvEKsIkolDG5Y_35VezY2e7kRpbj1GiDlBW-naj2g,4900
3
- omni_cortex-1.0.10.data/data/share/omni-cortex/hooks/stop.py,sha256=T1bwcmbTLj0gzjrVvFBT1zB6wff4J2YkYBAY-ZxZI5g,5336
4
- omni_cortex-1.0.10.data/data/share/omni-cortex/hooks/subagent_stop.py,sha256=V9HQSFGNOfkg8ZCstPEy4h5V8BP4AbrVr8teFzN1kNk,3314
5
- omni_cortex-1.0.10.data/data/share/omni-cortex/dashboard/backend/chat_service.py,sha256=Le8sX4PoguyL7MtHXQ1eDM2EKUCqBV_IPJ9WF1p069o,3982
6
- omni_cortex-1.0.10.data/data/share/omni-cortex/dashboard/backend/database.py,sha256=VRy-Eh4XsXNp-LnAG3w7Lsm5BaJzlH-OtG9tDXpV8_o,23052
7
- omni_cortex-1.0.10.data/data/share/omni-cortex/dashboard/backend/logging_config.py,sha256=dFcNqfw2jTfUjFERV_Pr5r5PjY9wSQGXEYPf0AyR5Yk,2869
8
- omni_cortex-1.0.10.data/data/share/omni-cortex/dashboard/backend/main.py,sha256=Z6r6wx6w3DEGHa4uU-KAGE1NDJHN9NFeBXr8Ig9erDc,24766
9
- omni_cortex-1.0.10.data/data/share/omni-cortex/dashboard/backend/models.py,sha256=NPWLHGG7aXbnQvOcjAPjG74PnZoDp75IiaNJLDjqZvI,3832
10
- omni_cortex-1.0.10.data/data/share/omni-cortex/dashboard/backend/project_config.py,sha256=ZxGoeRpHvN5qQyf2hRxrAZiHrPSwdQp59f0di6O1LKM,4352
11
- omni_cortex-1.0.10.data/data/share/omni-cortex/dashboard/backend/project_scanner.py,sha256=lwFXS8iJbOoxf7FAyo2TjH25neaMHiJ8B3jS57XxtDI,5713
12
- omni_cortex-1.0.10.data/data/share/omni-cortex/dashboard/backend/pyproject.toml,sha256=9pbbGQXLe1Xd06nZAtDySCHIlfMWvPaB-C6tGZR6umc,502
13
- omni_cortex-1.0.10.data/data/share/omni-cortex/dashboard/backend/uv.lock,sha256=e7IMinX0BR2EcnpPwHYCdDJQDzuDzQ3D-FmPOiPKfGA,131248
14
- omni_cortex-1.0.10.data/data/share/omni-cortex/dashboard/backend/websocket_manager.py,sha256=fv16XkRkgN4SDNwTiP_p9qFnWta9lIpAXgKbFETZ7uM,2770
15
- omni_cortex-1.0.10.dist-info/METADATA,sha256=VR9iYE2IlsE1B7aX9kvJEM7sOacvHhhs-nItv12cT6E,9856
16
- omni_cortex-1.0.10.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
17
- omni_cortex-1.0.10.dist-info/entry_points.txt,sha256=rohx4mFH2ffZmMb9QXPZmFf-ZGjA3jpKVDVeET-ttiM,150
18
- omni_cortex-1.0.10.dist-info/licenses/LICENSE,sha256=oG_397owMmi-Umxp5sYocJ6RPohp9_bDNnnEu9OUphg,1072
19
- omni_cortex-1.0.10.dist-info/RECORD,,