omni-cortex 1.0.3__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (18) hide show
  1. omni_cortex-1.0.5.data/data/share/omni-cortex/dashboard/backend/chat_service.py +140 -0
  2. omni_cortex-1.0.5.data/data/share/omni-cortex/dashboard/backend/database.py +729 -0
  3. omni_cortex-1.0.5.data/data/share/omni-cortex/dashboard/backend/main.py +661 -0
  4. omni_cortex-1.0.5.data/data/share/omni-cortex/dashboard/backend/models.py +140 -0
  5. omni_cortex-1.0.5.data/data/share/omni-cortex/dashboard/backend/project_scanner.py +141 -0
  6. omni_cortex-1.0.5.data/data/share/omni-cortex/dashboard/backend/pyproject.toml +23 -0
  7. omni_cortex-1.0.5.data/data/share/omni-cortex/dashboard/backend/uv.lock +697 -0
  8. omni_cortex-1.0.5.data/data/share/omni-cortex/dashboard/backend/websocket_manager.py +82 -0
  9. {omni_cortex-1.0.3.dist-info → omni_cortex-1.0.5.dist-info}/METADATA +40 -1
  10. omni_cortex-1.0.5.dist-info/RECORD +17 -0
  11. {omni_cortex-1.0.3.dist-info → omni_cortex-1.0.5.dist-info}/entry_points.txt +1 -0
  12. omni_cortex-1.0.3.dist-info/RECORD +0 -9
  13. {omni_cortex-1.0.3.data → omni_cortex-1.0.5.data}/data/share/omni-cortex/hooks/post_tool_use.py +0 -0
  14. {omni_cortex-1.0.3.data → omni_cortex-1.0.5.data}/data/share/omni-cortex/hooks/pre_tool_use.py +0 -0
  15. {omni_cortex-1.0.3.data → omni_cortex-1.0.5.data}/data/share/omni-cortex/hooks/stop.py +0 -0
  16. {omni_cortex-1.0.3.data → omni_cortex-1.0.5.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
  17. {omni_cortex-1.0.3.dist-info → omni_cortex-1.0.5.dist-info}/WHEEL +0 -0
  18. {omni_cortex-1.0.3.dist-info → omni_cortex-1.0.5.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,140 @@
1
+ """Chat service for natural language queries about memories using Gemini Flash."""
2
+
3
+ import os
4
+ from typing import Optional
5
+
6
+ import google.generativeai as genai
7
+ from dotenv import load_dotenv
8
+
9
+ from database import search_memories, get_memories
10
+ from models import FilterParams
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+
15
+ # Configure Gemini
16
+ _api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
17
+ _model: Optional[genai.GenerativeModel] = None
18
+
19
+
20
+ def get_model() -> Optional[genai.GenerativeModel]:
21
+ """Get or initialize the Gemini model."""
22
+ global _model
23
+ if _model is None and _api_key:
24
+ genai.configure(api_key=_api_key)
25
+ _model = genai.GenerativeModel("gemini-2.0-flash-exp")
26
+ return _model
27
+
28
+
29
+ def is_available() -> bool:
30
+ """Check if the chat service is available."""
31
+ return _api_key is not None
32
+
33
+
34
+ async def ask_about_memories(
35
+ db_path: str,
36
+ question: str,
37
+ max_memories: int = 10,
38
+ ) -> dict:
39
+ """Ask a natural language question about memories.
40
+
41
+ Args:
42
+ db_path: Path to the database file
43
+ question: The user's question
44
+ max_memories: Maximum memories to include in context
45
+
46
+ Returns:
47
+ Dict with answer and sources
48
+ """
49
+ if not is_available():
50
+ return {
51
+ "answer": "Chat is not available. Please configure GEMINI_API_KEY or GOOGLE_API_KEY environment variable.",
52
+ "sources": [],
53
+ "error": "api_key_missing",
54
+ }
55
+
56
+ model = get_model()
57
+ if not model:
58
+ return {
59
+ "answer": "Failed to initialize Gemini model.",
60
+ "sources": [],
61
+ "error": "model_init_failed",
62
+ }
63
+
64
+ # Search for relevant memories
65
+ memories = search_memories(db_path, question, limit=max_memories)
66
+
67
+ # If no memories found via search, get recent ones
68
+ if not memories:
69
+ filters = FilterParams(
70
+ sort_by="last_accessed",
71
+ sort_order="desc",
72
+ limit=max_memories,
73
+ offset=0,
74
+ )
75
+ memories = get_memories(db_path, filters)
76
+
77
+ if not memories:
78
+ return {
79
+ "answer": "No memories found in the database to answer your question.",
80
+ "sources": [],
81
+ "error": None,
82
+ }
83
+
84
+ # Build context from memories
85
+ memory_context = []
86
+ sources = []
87
+ for i, mem in enumerate(memories, 1):
88
+ memory_context.append(f"""
89
+ Memory {i}:
90
+ - Type: {mem.memory_type}
91
+ - Content: {mem.content}
92
+ - Context: {mem.context or 'N/A'}
93
+ - Tags: {', '.join(mem.tags) if mem.tags else 'N/A'}
94
+ - Status: {mem.status}
95
+ - Importance: {mem.importance_score}/100
96
+ """)
97
+ sources.append({
98
+ "id": mem.id,
99
+ "type": mem.memory_type,
100
+ "content_preview": mem.content[:100] + "..." if len(mem.content) > 100 else mem.content,
101
+ "tags": mem.tags,
102
+ })
103
+
104
+ context_str = "\n---\n".join(memory_context)
105
+
106
+ # Create prompt
107
+ prompt = f"""You are a helpful assistant that answers questions about stored memories and knowledge.
108
+
109
+ The user has a collection of memories that capture decisions, solutions, insights, errors, preferences, and other learnings from their work.
110
+
111
+ Here are the relevant memories:
112
+
113
+ {context_str}
114
+
115
+ User question: {question}
116
+
117
+ Instructions:
118
+ 1. Answer the question based on the memories provided
119
+ 2. If the memories don't contain relevant information, say so
120
+ 3. Reference specific memories when appropriate (e.g., "According to memory 1...")
121
+ 4. Be concise but thorough
122
+ 5. If the question is asking for a recommendation or decision, synthesize from multiple memories if possible
123
+
124
+ Answer:"""
125
+
126
+ try:
127
+ response = model.generate_content(prompt)
128
+ answer = response.text
129
+ except Exception as e:
130
+ return {
131
+ "answer": f"Failed to generate response: {str(e)}",
132
+ "sources": sources,
133
+ "error": "generation_failed",
134
+ }
135
+
136
+ return {
137
+ "answer": answer,
138
+ "sources": sources,
139
+ "error": None,
140
+ }