omni-cortex 1.0.4__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/chat_service.py +290 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/database.py +78 -0
- omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/image_service.py +533 -0
- omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/logging_config.py +92 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/main.py +324 -42
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/models.py +93 -0
- omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/project_config.py +170 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +45 -22
- {omni_cortex-1.0.4.dist-info → omni_cortex-1.2.0.dist-info}/METADATA +26 -2
- omni_cortex-1.2.0.dist-info/RECORD +20 -0
- omni_cortex-1.0.4.data/data/share/omni-cortex/dashboard/backend/chat_service.py +0 -140
- omni_cortex-1.0.4.dist-info/RECORD +0 -17
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/uv.lock +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/hooks/post_tool_use.py +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/hooks/pre_tool_use.py +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/hooks/stop.py +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
- {omni_cortex-1.0.4.dist-info → omni_cortex-1.2.0.dist-info}/WHEEL +0 -0
- {omni_cortex-1.0.4.dist-info → omni_cortex-1.2.0.dist-info}/entry_points.txt +0 -0
- {omni_cortex-1.0.4.dist-info → omni_cortex-1.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
"""Chat service for natural language queries about memories using Gemini Flash."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Optional, AsyncGenerator, Any
|
|
5
|
+
|
|
6
|
+
import google.generativeai as genai
|
|
7
|
+
from dotenv import load_dotenv
|
|
8
|
+
|
|
9
|
+
from database import search_memories, get_memories, create_memory
|
|
10
|
+
from models import FilterParams
|
|
11
|
+
|
|
12
|
+
# Load environment variables
|
|
13
|
+
load_dotenv()
|
|
14
|
+
|
|
15
|
+
# Configure Gemini
|
|
16
|
+
_api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
|
|
17
|
+
_model: Optional[genai.GenerativeModel] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_model() -> Optional[genai.GenerativeModel]:
|
|
21
|
+
"""Get or initialize the Gemini model."""
|
|
22
|
+
global _model
|
|
23
|
+
if _model is None and _api_key:
|
|
24
|
+
genai.configure(api_key=_api_key)
|
|
25
|
+
_model = genai.GenerativeModel("gemini-3-flash-preview")
|
|
26
|
+
return _model
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def is_available() -> bool:
|
|
30
|
+
"""Check if the chat service is available."""
|
|
31
|
+
return _api_key is not None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _build_prompt(question: str, context_str: str) -> str:
|
|
35
|
+
"""Build the prompt for the AI model."""
|
|
36
|
+
return f"""You are a helpful assistant that answers questions about stored memories and knowledge.
|
|
37
|
+
|
|
38
|
+
The user has a collection of memories that capture decisions, solutions, insights, errors, preferences, and other learnings from their work.
|
|
39
|
+
|
|
40
|
+
Here are the relevant memories:
|
|
41
|
+
|
|
42
|
+
{context_str}
|
|
43
|
+
|
|
44
|
+
User question: {question}
|
|
45
|
+
|
|
46
|
+
Instructions:
|
|
47
|
+
1. Answer the question based on the memories provided
|
|
48
|
+
2. If the memories don't contain relevant information, say so
|
|
49
|
+
3. Reference specific memories when appropriate using [[Memory N]] format (e.g., "According to [[Memory 1]]...")
|
|
50
|
+
4. Be concise but thorough
|
|
51
|
+
5. If the question is asking for a recommendation or decision, synthesize from multiple memories if possible
|
|
52
|
+
|
|
53
|
+
Answer:"""
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _get_memories_and_sources(db_path: str, question: str, max_memories: int) -> tuple[str, list[dict]]:
|
|
57
|
+
"""Get relevant memories and build context string and sources list."""
|
|
58
|
+
# Search for relevant memories
|
|
59
|
+
memories = search_memories(db_path, question, limit=max_memories)
|
|
60
|
+
|
|
61
|
+
# If no memories found via search, get recent ones
|
|
62
|
+
if not memories:
|
|
63
|
+
filters = FilterParams(
|
|
64
|
+
sort_by="last_accessed",
|
|
65
|
+
sort_order="desc",
|
|
66
|
+
limit=max_memories,
|
|
67
|
+
offset=0,
|
|
68
|
+
)
|
|
69
|
+
memories = get_memories(db_path, filters)
|
|
70
|
+
|
|
71
|
+
if not memories:
|
|
72
|
+
return "", []
|
|
73
|
+
|
|
74
|
+
# Build context from memories
|
|
75
|
+
memory_context = []
|
|
76
|
+
sources = []
|
|
77
|
+
for i, mem in enumerate(memories, 1):
|
|
78
|
+
memory_context.append(f"""
|
|
79
|
+
Memory {i}:
|
|
80
|
+
- Type: {mem.memory_type}
|
|
81
|
+
- Content: {mem.content}
|
|
82
|
+
- Context: {mem.context or 'N/A'}
|
|
83
|
+
- Tags: {', '.join(mem.tags) if mem.tags else 'N/A'}
|
|
84
|
+
- Status: {mem.status}
|
|
85
|
+
- Importance: {mem.importance_score}/100
|
|
86
|
+
""")
|
|
87
|
+
sources.append({
|
|
88
|
+
"id": mem.id,
|
|
89
|
+
"type": mem.memory_type,
|
|
90
|
+
"content_preview": mem.content[:100] + "..." if len(mem.content) > 100 else mem.content,
|
|
91
|
+
"tags": mem.tags,
|
|
92
|
+
})
|
|
93
|
+
|
|
94
|
+
context_str = "\n---\n".join(memory_context)
|
|
95
|
+
return context_str, sources
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
async def stream_ask_about_memories(
|
|
99
|
+
db_path: str,
|
|
100
|
+
question: str,
|
|
101
|
+
max_memories: int = 10,
|
|
102
|
+
) -> AsyncGenerator[dict[str, Any], None]:
|
|
103
|
+
"""Stream a response to a question about memories.
|
|
104
|
+
|
|
105
|
+
Yields events with type 'sources', 'chunk', 'done', or 'error'.
|
|
106
|
+
"""
|
|
107
|
+
if not is_available():
|
|
108
|
+
yield {
|
|
109
|
+
"type": "error",
|
|
110
|
+
"data": "Chat is not available. Please configure GEMINI_API_KEY or GOOGLE_API_KEY environment variable.",
|
|
111
|
+
}
|
|
112
|
+
return
|
|
113
|
+
|
|
114
|
+
model = get_model()
|
|
115
|
+
if not model:
|
|
116
|
+
yield {
|
|
117
|
+
"type": "error",
|
|
118
|
+
"data": "Failed to initialize Gemini model.",
|
|
119
|
+
}
|
|
120
|
+
return
|
|
121
|
+
|
|
122
|
+
context_str, sources = _get_memories_and_sources(db_path, question, max_memories)
|
|
123
|
+
|
|
124
|
+
if not sources:
|
|
125
|
+
yield {
|
|
126
|
+
"type": "sources",
|
|
127
|
+
"data": [],
|
|
128
|
+
}
|
|
129
|
+
yield {
|
|
130
|
+
"type": "chunk",
|
|
131
|
+
"data": "No memories found in the database to answer your question.",
|
|
132
|
+
}
|
|
133
|
+
yield {
|
|
134
|
+
"type": "done",
|
|
135
|
+
"data": None,
|
|
136
|
+
}
|
|
137
|
+
return
|
|
138
|
+
|
|
139
|
+
# Yield sources first
|
|
140
|
+
yield {
|
|
141
|
+
"type": "sources",
|
|
142
|
+
"data": sources,
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
# Build and stream the response
|
|
146
|
+
prompt = _build_prompt(question, context_str)
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
response = model.generate_content(prompt, stream=True)
|
|
150
|
+
|
|
151
|
+
for chunk in response:
|
|
152
|
+
if chunk.text:
|
|
153
|
+
yield {
|
|
154
|
+
"type": "chunk",
|
|
155
|
+
"data": chunk.text,
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
yield {
|
|
159
|
+
"type": "done",
|
|
160
|
+
"data": None,
|
|
161
|
+
}
|
|
162
|
+
except Exception as e:
|
|
163
|
+
yield {
|
|
164
|
+
"type": "error",
|
|
165
|
+
"data": f"Failed to generate response: {str(e)}",
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
async def save_conversation(
|
|
170
|
+
db_path: str,
|
|
171
|
+
messages: list[dict],
|
|
172
|
+
referenced_memory_ids: list[str] | None = None,
|
|
173
|
+
importance: int = 60,
|
|
174
|
+
) -> dict:
|
|
175
|
+
"""Save a chat conversation as a memory.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
db_path: Path to the database file
|
|
179
|
+
messages: List of message dicts with 'role', 'content', 'timestamp'
|
|
180
|
+
referenced_memory_ids: IDs of memories referenced in the conversation
|
|
181
|
+
importance: Importance score for the memory
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
Dict with memory_id and summary
|
|
185
|
+
"""
|
|
186
|
+
if not messages:
|
|
187
|
+
raise ValueError("No messages to save")
|
|
188
|
+
|
|
189
|
+
# Format conversation into markdown
|
|
190
|
+
content_lines = ["## Chat Conversation\n"]
|
|
191
|
+
for msg in messages:
|
|
192
|
+
role = "**You**" if msg["role"] == "user" else "**Assistant**"
|
|
193
|
+
content_lines.append(f"### {role}\n{msg['content']}\n")
|
|
194
|
+
|
|
195
|
+
content = "\n".join(content_lines)
|
|
196
|
+
|
|
197
|
+
# Generate summary using Gemini if available
|
|
198
|
+
summary = "Chat conversation"
|
|
199
|
+
model = get_model()
|
|
200
|
+
if model:
|
|
201
|
+
try:
|
|
202
|
+
summary_prompt = f"""Summarize this conversation in one concise sentence (max 100 chars):
|
|
203
|
+
|
|
204
|
+
{content[:2000]}
|
|
205
|
+
|
|
206
|
+
Summary:"""
|
|
207
|
+
response = model.generate_content(summary_prompt)
|
|
208
|
+
summary = response.text.strip()[:100]
|
|
209
|
+
except Exception:
|
|
210
|
+
# Use fallback summary
|
|
211
|
+
first_user_msg = next((m for m in messages if m["role"] == "user"), None)
|
|
212
|
+
if first_user_msg:
|
|
213
|
+
summary = f"Q: {first_user_msg['content'][:80]}..."
|
|
214
|
+
|
|
215
|
+
# Extract topics from conversation for tags
|
|
216
|
+
tags = ["chat", "conversation"]
|
|
217
|
+
|
|
218
|
+
# Create memory
|
|
219
|
+
memory_id = create_memory(
|
|
220
|
+
db_path=db_path,
|
|
221
|
+
content=content,
|
|
222
|
+
memory_type="conversation",
|
|
223
|
+
context=f"Chat conversation: {summary}",
|
|
224
|
+
tags=tags,
|
|
225
|
+
importance_score=importance,
|
|
226
|
+
related_memory_ids=referenced_memory_ids,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
return {
|
|
230
|
+
"memory_id": memory_id,
|
|
231
|
+
"summary": summary,
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
async def ask_about_memories(
|
|
236
|
+
db_path: str,
|
|
237
|
+
question: str,
|
|
238
|
+
max_memories: int = 10,
|
|
239
|
+
) -> dict:
|
|
240
|
+
"""Ask a natural language question about memories (non-streaming).
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
db_path: Path to the database file
|
|
244
|
+
question: The user's question
|
|
245
|
+
max_memories: Maximum memories to include in context
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
Dict with answer and sources
|
|
249
|
+
"""
|
|
250
|
+
if not is_available():
|
|
251
|
+
return {
|
|
252
|
+
"answer": "Chat is not available. Please configure GEMINI_API_KEY or GOOGLE_API_KEY environment variable.",
|
|
253
|
+
"sources": [],
|
|
254
|
+
"error": "api_key_missing",
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
model = get_model()
|
|
258
|
+
if not model:
|
|
259
|
+
return {
|
|
260
|
+
"answer": "Failed to initialize Gemini model.",
|
|
261
|
+
"sources": [],
|
|
262
|
+
"error": "model_init_failed",
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
context_str, sources = _get_memories_and_sources(db_path, question, max_memories)
|
|
266
|
+
|
|
267
|
+
if not sources:
|
|
268
|
+
return {
|
|
269
|
+
"answer": "No memories found in the database to answer your question.",
|
|
270
|
+
"sources": [],
|
|
271
|
+
"error": None,
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
prompt = _build_prompt(question, context_str)
|
|
275
|
+
|
|
276
|
+
try:
|
|
277
|
+
response = model.generate_content(prompt)
|
|
278
|
+
answer = response.text
|
|
279
|
+
except Exception as e:
|
|
280
|
+
return {
|
|
281
|
+
"answer": f"Failed to generate response: {str(e)}",
|
|
282
|
+
"sources": sources,
|
|
283
|
+
"error": "generation_failed",
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
return {
|
|
287
|
+
"answer": answer,
|
|
288
|
+
"sources": sources,
|
|
289
|
+
"error": None,
|
|
290
|
+
}
|
|
@@ -727,3 +727,81 @@ def get_relationship_graph(db_path: str, center_id: Optional[str] = None, depth:
|
|
|
727
727
|
})
|
|
728
728
|
|
|
729
729
|
return {"nodes": list(nodes.values()), "edges": edges}
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
def create_memory(
|
|
733
|
+
db_path: str,
|
|
734
|
+
content: str,
|
|
735
|
+
memory_type: str = "other",
|
|
736
|
+
context: Optional[str] = None,
|
|
737
|
+
tags: Optional[list[str]] = None,
|
|
738
|
+
importance_score: int = 50,
|
|
739
|
+
related_memory_ids: Optional[list[str]] = None,
|
|
740
|
+
) -> str:
|
|
741
|
+
"""Create a new memory and return its ID.
|
|
742
|
+
|
|
743
|
+
Args:
|
|
744
|
+
db_path: Path to the database file
|
|
745
|
+
content: Memory content
|
|
746
|
+
memory_type: Type of memory (e.g., 'decision', 'solution', 'conversation')
|
|
747
|
+
context: Additional context
|
|
748
|
+
tags: List of tags
|
|
749
|
+
importance_score: Importance score (1-100)
|
|
750
|
+
related_memory_ids: IDs of related memories to create relationships with
|
|
751
|
+
|
|
752
|
+
Returns:
|
|
753
|
+
The ID of the created memory
|
|
754
|
+
"""
|
|
755
|
+
import uuid
|
|
756
|
+
|
|
757
|
+
conn = get_write_connection(db_path)
|
|
758
|
+
|
|
759
|
+
# Generate ID
|
|
760
|
+
memory_id = f"mem_{int(datetime.now().timestamp() * 1000)}_{uuid.uuid4().hex[:8]}"
|
|
761
|
+
now = datetime.now().isoformat()
|
|
762
|
+
|
|
763
|
+
# Insert memory
|
|
764
|
+
conn.execute(
|
|
765
|
+
"""
|
|
766
|
+
INSERT INTO memories (id, content, context, type, status, importance_score, access_count, created_at, last_accessed, tags)
|
|
767
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
768
|
+
""",
|
|
769
|
+
(
|
|
770
|
+
memory_id,
|
|
771
|
+
content,
|
|
772
|
+
context,
|
|
773
|
+
memory_type,
|
|
774
|
+
"fresh",
|
|
775
|
+
importance_score,
|
|
776
|
+
0,
|
|
777
|
+
now,
|
|
778
|
+
now,
|
|
779
|
+
json.dumps(tags) if tags else None,
|
|
780
|
+
),
|
|
781
|
+
)
|
|
782
|
+
|
|
783
|
+
# Create relationships if related_memory_ids provided
|
|
784
|
+
if related_memory_ids:
|
|
785
|
+
# Check if memory_relationships table exists
|
|
786
|
+
table_check = conn.execute(
|
|
787
|
+
"SELECT name FROM sqlite_master WHERE type='table' AND name='memory_relationships'"
|
|
788
|
+
).fetchone()
|
|
789
|
+
|
|
790
|
+
if table_check:
|
|
791
|
+
for related_id in related_memory_ids:
|
|
792
|
+
try:
|
|
793
|
+
conn.execute(
|
|
794
|
+
"""
|
|
795
|
+
INSERT INTO memory_relationships (source_memory_id, target_memory_id, relationship_type, strength)
|
|
796
|
+
VALUES (?, ?, ?, ?)
|
|
797
|
+
""",
|
|
798
|
+
(memory_id, related_id, "derived_from", 0.8),
|
|
799
|
+
)
|
|
800
|
+
except Exception:
|
|
801
|
+
# Ignore if related memory doesn't exist
|
|
802
|
+
pass
|
|
803
|
+
|
|
804
|
+
conn.commit()
|
|
805
|
+
conn.close()
|
|
806
|
+
|
|
807
|
+
return memory_id
|