omni-cortex 1.0.4__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. omni_cortex-1.3.0.data/data/share/omni-cortex/dashboard/backend/chat_service.py +308 -0
  2. {omni_cortex-1.0.4.data → omni_cortex-1.3.0.data}/data/share/omni-cortex/dashboard/backend/database.py +286 -0
  3. omni_cortex-1.3.0.data/data/share/omni-cortex/dashboard/backend/image_service.py +543 -0
  4. omni_cortex-1.3.0.data/data/share/omni-cortex/dashboard/backend/logging_config.py +92 -0
  5. {omni_cortex-1.0.4.data → omni_cortex-1.3.0.data}/data/share/omni-cortex/dashboard/backend/main.py +385 -42
  6. {omni_cortex-1.0.4.data → omni_cortex-1.3.0.data}/data/share/omni-cortex/dashboard/backend/models.py +93 -0
  7. omni_cortex-1.3.0.data/data/share/omni-cortex/dashboard/backend/project_config.py +170 -0
  8. {omni_cortex-1.0.4.data → omni_cortex-1.3.0.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +45 -22
  9. {omni_cortex-1.0.4.data → omni_cortex-1.3.0.data}/data/share/omni-cortex/dashboard/backend/uv.lock +414 -1
  10. {omni_cortex-1.0.4.dist-info → omni_cortex-1.3.0.dist-info}/METADATA +26 -2
  11. omni_cortex-1.3.0.dist-info/RECORD +20 -0
  12. omni_cortex-1.0.4.data/data/share/omni-cortex/dashboard/backend/chat_service.py +0 -140
  13. omni_cortex-1.0.4.dist-info/RECORD +0 -17
  14. {omni_cortex-1.0.4.data → omni_cortex-1.3.0.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
  15. {omni_cortex-1.0.4.data → omni_cortex-1.3.0.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +0 -0
  16. {omni_cortex-1.0.4.data → omni_cortex-1.3.0.data}/data/share/omni-cortex/hooks/post_tool_use.py +0 -0
  17. {omni_cortex-1.0.4.data → omni_cortex-1.3.0.data}/data/share/omni-cortex/hooks/pre_tool_use.py +0 -0
  18. {omni_cortex-1.0.4.data → omni_cortex-1.3.0.data}/data/share/omni-cortex/hooks/stop.py +0 -0
  19. {omni_cortex-1.0.4.data → omni_cortex-1.3.0.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
  20. {omni_cortex-1.0.4.dist-info → omni_cortex-1.3.0.dist-info}/WHEEL +0 -0
  21. {omni_cortex-1.0.4.dist-info → omni_cortex-1.3.0.dist-info}/entry_points.txt +0 -0
  22. {omni_cortex-1.0.4.dist-info → omni_cortex-1.3.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,308 @@
1
+ """Chat service for natural language queries about memories using Gemini Flash."""
2
+
3
+ import os
4
+ from typing import Optional, AsyncGenerator, Any
5
+
6
+ from dotenv import load_dotenv
7
+
8
+ from database import search_memories, get_memories, create_memory
9
+ from models import FilterParams
10
+
11
+ # Load environment variables
12
+ load_dotenv()
13
+
14
+ # Configure Gemini
15
+ _api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
16
+ _client = None
17
+
18
+
19
+ def get_client():
20
+ """Get or initialize the Gemini client."""
21
+ global _client
22
+ if _client is None and _api_key:
23
+ try:
24
+ from google import genai
25
+ _client = genai.Client(api_key=_api_key)
26
+ except ImportError:
27
+ return None
28
+ return _client
29
+
30
+
31
+ def is_available() -> bool:
32
+ """Check if the chat service is available."""
33
+ if not _api_key:
34
+ return False
35
+ try:
36
+ from google import genai
37
+ return True
38
+ except ImportError:
39
+ return False
40
+
41
+
42
+ def _build_prompt(question: str, context_str: str) -> str:
43
+ """Build the prompt for the AI model."""
44
+ return f"""You are a helpful assistant that answers questions about stored memories and knowledge.
45
+
46
+ The user has a collection of memories that capture decisions, solutions, insights, errors, preferences, and other learnings from their work.
47
+
48
+ Here are the relevant memories:
49
+
50
+ {context_str}
51
+
52
+ User question: {question}
53
+
54
+ Instructions:
55
+ 1. Answer the question based on the memories provided
56
+ 2. If the memories don't contain relevant information, say so
57
+ 3. Reference specific memories when appropriate using [[Memory N]] format (e.g., "According to [[Memory 1]]...")
58
+ 4. Be concise but thorough
59
+ 5. If the question is asking for a recommendation or decision, synthesize from multiple memories if possible
60
+
61
+ Answer:"""
62
+
63
+
64
+ def _get_memories_and_sources(db_path: str, question: str, max_memories: int) -> tuple[str, list[dict]]:
65
+ """Get relevant memories and build context string and sources list."""
66
+ # Search for relevant memories
67
+ memories = search_memories(db_path, question, limit=max_memories)
68
+
69
+ # If no memories found via search, get recent ones
70
+ if not memories:
71
+ filters = FilterParams(
72
+ sort_by="last_accessed",
73
+ sort_order="desc",
74
+ limit=max_memories,
75
+ offset=0,
76
+ )
77
+ memories = get_memories(db_path, filters)
78
+
79
+ if not memories:
80
+ return "", []
81
+
82
+ # Build context from memories
83
+ memory_context = []
84
+ sources = []
85
+ for i, mem in enumerate(memories, 1):
86
+ memory_context.append(f"""
87
+ Memory {i}:
88
+ - Type: {mem.memory_type}
89
+ - Content: {mem.content}
90
+ - Context: {mem.context or 'N/A'}
91
+ - Tags: {', '.join(mem.tags) if mem.tags else 'N/A'}
92
+ - Status: {mem.status}
93
+ - Importance: {mem.importance_score}/100
94
+ """)
95
+ sources.append({
96
+ "id": mem.id,
97
+ "type": mem.memory_type,
98
+ "content_preview": mem.content[:100] + "..." if len(mem.content) > 100 else mem.content,
99
+ "tags": mem.tags,
100
+ })
101
+
102
+ context_str = "\n---\n".join(memory_context)
103
+ return context_str, sources
104
+
105
+
106
+ async def stream_ask_about_memories(
107
+ db_path: str,
108
+ question: str,
109
+ max_memories: int = 10,
110
+ ) -> AsyncGenerator[dict[str, Any], None]:
111
+ """Stream a response to a question about memories.
112
+
113
+ Yields events with type 'sources', 'chunk', 'done', or 'error'.
114
+ """
115
+ if not is_available():
116
+ yield {
117
+ "type": "error",
118
+ "data": "Chat is not available. Please configure GEMINI_API_KEY or GOOGLE_API_KEY environment variable.",
119
+ }
120
+ return
121
+
122
+ client = get_client()
123
+ if not client:
124
+ yield {
125
+ "type": "error",
126
+ "data": "Failed to initialize Gemini client.",
127
+ }
128
+ return
129
+
130
+ context_str, sources = _get_memories_and_sources(db_path, question, max_memories)
131
+
132
+ if not sources:
133
+ yield {
134
+ "type": "sources",
135
+ "data": [],
136
+ }
137
+ yield {
138
+ "type": "chunk",
139
+ "data": "No memories found in the database to answer your question.",
140
+ }
141
+ yield {
142
+ "type": "done",
143
+ "data": None,
144
+ }
145
+ return
146
+
147
+ # Yield sources first
148
+ yield {
149
+ "type": "sources",
150
+ "data": sources,
151
+ }
152
+
153
+ # Build and stream the response
154
+ prompt = _build_prompt(question, context_str)
155
+
156
+ try:
157
+ # Use streaming with the new google.genai client
158
+ response = client.models.generate_content_stream(
159
+ model="gemini-2.0-flash",
160
+ contents=prompt,
161
+ )
162
+
163
+ for chunk in response:
164
+ if chunk.text:
165
+ yield {
166
+ "type": "chunk",
167
+ "data": chunk.text,
168
+ }
169
+
170
+ yield {
171
+ "type": "done",
172
+ "data": None,
173
+ }
174
+ except Exception as e:
175
+ yield {
176
+ "type": "error",
177
+ "data": f"Failed to generate response: {str(e)}",
178
+ }
179
+
180
+
181
+ async def save_conversation(
182
+ db_path: str,
183
+ messages: list[dict],
184
+ referenced_memory_ids: list[str] | None = None,
185
+ importance: int = 60,
186
+ ) -> dict:
187
+ """Save a chat conversation as a memory.
188
+
189
+ Args:
190
+ db_path: Path to the database file
191
+ messages: List of message dicts with 'role', 'content', 'timestamp'
192
+ referenced_memory_ids: IDs of memories referenced in the conversation
193
+ importance: Importance score for the memory
194
+
195
+ Returns:
196
+ Dict with memory_id and summary
197
+ """
198
+ if not messages:
199
+ raise ValueError("No messages to save")
200
+
201
+ # Format conversation into markdown
202
+ content_lines = ["## Chat Conversation\n"]
203
+ for msg in messages:
204
+ role = "**You**" if msg["role"] == "user" else "**Assistant**"
205
+ content_lines.append(f"### {role}\n{msg['content']}\n")
206
+
207
+ content = "\n".join(content_lines)
208
+
209
+ # Generate summary using Gemini if available
210
+ summary = "Chat conversation"
211
+ client = get_client()
212
+ if client:
213
+ try:
214
+ summary_prompt = f"""Summarize this conversation in one concise sentence (max 100 chars):
215
+
216
+ {content[:2000]}
217
+
218
+ Summary:"""
219
+ response = client.models.generate_content(
220
+ model="gemini-2.0-flash",
221
+ contents=summary_prompt,
222
+ )
223
+ summary = response.text.strip()[:100]
224
+ except Exception:
225
+ # Use fallback summary
226
+ first_user_msg = next((m for m in messages if m["role"] == "user"), None)
227
+ if first_user_msg:
228
+ summary = f"Q: {first_user_msg['content'][:80]}..."
229
+
230
+ # Extract topics from conversation for tags
231
+ tags = ["chat", "conversation"]
232
+
233
+ # Create memory
234
+ memory_id = create_memory(
235
+ db_path=db_path,
236
+ content=content,
237
+ memory_type="conversation",
238
+ context=f"Chat conversation: {summary}",
239
+ tags=tags,
240
+ importance_score=importance,
241
+ related_memory_ids=referenced_memory_ids,
242
+ )
243
+
244
+ return {
245
+ "memory_id": memory_id,
246
+ "summary": summary,
247
+ }
248
+
249
+
250
+ async def ask_about_memories(
251
+ db_path: str,
252
+ question: str,
253
+ max_memories: int = 10,
254
+ ) -> dict:
255
+ """Ask a natural language question about memories (non-streaming).
256
+
257
+ Args:
258
+ db_path: Path to the database file
259
+ question: The user's question
260
+ max_memories: Maximum memories to include in context
261
+
262
+ Returns:
263
+ Dict with answer and sources
264
+ """
265
+ if not is_available():
266
+ return {
267
+ "answer": "Chat is not available. Please configure GEMINI_API_KEY or GOOGLE_API_KEY environment variable.",
268
+ "sources": [],
269
+ "error": "api_key_missing",
270
+ }
271
+
272
+ client = get_client()
273
+ if not client:
274
+ return {
275
+ "answer": "Failed to initialize Gemini client.",
276
+ "sources": [],
277
+ "error": "client_init_failed",
278
+ }
279
+
280
+ context_str, sources = _get_memories_and_sources(db_path, question, max_memories)
281
+
282
+ if not sources:
283
+ return {
284
+ "answer": "No memories found in the database to answer your question.",
285
+ "sources": [],
286
+ "error": None,
287
+ }
288
+
289
+ prompt = _build_prompt(question, context_str)
290
+
291
+ try:
292
+ response = client.models.generate_content(
293
+ model="gemini-2.0-flash",
294
+ contents=prompt,
295
+ )
296
+ answer = response.text
297
+ except Exception as e:
298
+ return {
299
+ "answer": f"Failed to generate response: {str(e)}",
300
+ "sources": sources,
301
+ "error": "generation_failed",
302
+ }
303
+
304
+ return {
305
+ "answer": answer,
306
+ "sources": sources,
307
+ "error": None,
308
+ }
@@ -727,3 +727,289 @@ def get_relationship_graph(db_path: str, center_id: Optional[str] = None, depth:
727
727
  })
728
728
 
729
729
  return {"nodes": list(nodes.values()), "edges": edges}
730
+
731
+
732
+ # --- Command Analytics Functions ---
733
+
734
+
735
+ def get_command_usage(db_path: str, scope: Optional[str] = None, days: int = 30) -> list[dict]:
736
+ """Get slash command usage statistics aggregated by command_name.
737
+
738
+ Args:
739
+ db_path: Path to database
740
+ scope: Filter by scope ('universal', 'project', or None for all)
741
+ days: Number of days to look back
742
+
743
+ Returns:
744
+ List of command usage entries with counts and success rates
745
+ """
746
+ conn = get_connection(db_path)
747
+
748
+ # Check if command_name column exists
749
+ columns = conn.execute("PRAGMA table_info(activities)").fetchall()
750
+ column_names = [col[1] for col in columns]
751
+ if "command_name" not in column_names:
752
+ conn.close()
753
+ return []
754
+
755
+ query = """
756
+ SELECT
757
+ command_name,
758
+ command_scope,
759
+ COUNT(*) as count,
760
+ SUM(CASE WHEN success = 1 THEN 1 ELSE 0 END) * 1.0 / COUNT(*) as success_rate,
761
+ AVG(duration_ms) as avg_duration_ms
762
+ FROM activities
763
+ WHERE command_name IS NOT NULL
764
+ AND command_name != ''
765
+ AND timestamp >= date('now', ?)
766
+ """
767
+ params = [f'-{days} days']
768
+
769
+ if scope:
770
+ query += " AND command_scope = ?"
771
+ params.append(scope)
772
+
773
+ query += " GROUP BY command_name, command_scope ORDER BY count DESC"
774
+
775
+ cursor = conn.execute(query, params)
776
+ result = [
777
+ {
778
+ "command_name": row["command_name"],
779
+ "command_scope": row["command_scope"] or "unknown",
780
+ "count": row["count"],
781
+ "success_rate": round(row["success_rate"], 2) if row["success_rate"] else 1.0,
782
+ "avg_duration_ms": round(row["avg_duration_ms"]) if row["avg_duration_ms"] else None,
783
+ }
784
+ for row in cursor.fetchall()
785
+ ]
786
+ conn.close()
787
+ return result
788
+
789
+
790
+ def get_skill_usage(db_path: str, scope: Optional[str] = None, days: int = 30) -> list[dict]:
791
+ """Get skill usage statistics aggregated by skill_name.
792
+
793
+ Args:
794
+ db_path: Path to database
795
+ scope: Filter by scope ('universal', 'project', or None for all)
796
+ days: Number of days to look back
797
+
798
+ Returns:
799
+ List of skill usage entries with counts and success rates
800
+ """
801
+ conn = get_connection(db_path)
802
+
803
+ # Check if skill_name column exists
804
+ columns = conn.execute("PRAGMA table_info(activities)").fetchall()
805
+ column_names = [col[1] for col in columns]
806
+ if "skill_name" not in column_names:
807
+ conn.close()
808
+ return []
809
+
810
+ query = """
811
+ SELECT
812
+ skill_name,
813
+ command_scope,
814
+ COUNT(*) as count,
815
+ SUM(CASE WHEN success = 1 THEN 1 ELSE 0 END) * 1.0 / COUNT(*) as success_rate,
816
+ AVG(duration_ms) as avg_duration_ms
817
+ FROM activities
818
+ WHERE skill_name IS NOT NULL
819
+ AND skill_name != ''
820
+ AND timestamp >= date('now', ?)
821
+ """
822
+ params = [f'-{days} days']
823
+
824
+ if scope:
825
+ query += " AND command_scope = ?"
826
+ params.append(scope)
827
+
828
+ query += " GROUP BY skill_name, command_scope ORDER BY count DESC"
829
+
830
+ cursor = conn.execute(query, params)
831
+ result = [
832
+ {
833
+ "skill_name": row["skill_name"],
834
+ "skill_scope": row["command_scope"] or "unknown",
835
+ "count": row["count"],
836
+ "success_rate": round(row["success_rate"], 2) if row["success_rate"] else 1.0,
837
+ "avg_duration_ms": round(row["avg_duration_ms"]) if row["avg_duration_ms"] else None,
838
+ }
839
+ for row in cursor.fetchall()
840
+ ]
841
+ conn.close()
842
+ return result
843
+
844
+
845
+ def get_mcp_usage(db_path: str, days: int = 30) -> list[dict]:
846
+ """Get MCP server usage statistics.
847
+
848
+ Args:
849
+ db_path: Path to database
850
+ days: Number of days to look back
851
+
852
+ Returns:
853
+ List of MCP server usage entries with tool counts and call totals
854
+ """
855
+ conn = get_connection(db_path)
856
+
857
+ # Check if mcp_server column exists
858
+ columns = conn.execute("PRAGMA table_info(activities)").fetchall()
859
+ column_names = [col[1] for col in columns]
860
+ if "mcp_server" not in column_names:
861
+ conn.close()
862
+ return []
863
+
864
+ query = """
865
+ SELECT
866
+ mcp_server,
867
+ COUNT(DISTINCT tool_name) as tool_count,
868
+ COUNT(*) as total_calls,
869
+ SUM(CASE WHEN success = 1 THEN 1 ELSE 0 END) * 1.0 / COUNT(*) as success_rate
870
+ FROM activities
871
+ WHERE mcp_server IS NOT NULL
872
+ AND mcp_server != ''
873
+ AND timestamp >= date('now', ?)
874
+ GROUP BY mcp_server
875
+ ORDER BY total_calls DESC
876
+ """
877
+ cursor = conn.execute(query, (f'-{days} days',))
878
+ result = [
879
+ {
880
+ "mcp_server": row["mcp_server"],
881
+ "tool_count": row["tool_count"],
882
+ "total_calls": row["total_calls"],
883
+ "success_rate": round(row["success_rate"], 2) if row["success_rate"] else 1.0,
884
+ }
885
+ for row in cursor.fetchall()
886
+ ]
887
+ conn.close()
888
+ return result
889
+
890
+
891
+ def get_activity_detail(db_path: str, activity_id: str) -> Optional[dict]:
892
+ """Get full activity details including complete input/output.
893
+
894
+ Args:
895
+ db_path: Path to database
896
+ activity_id: Activity ID
897
+
898
+ Returns:
899
+ Full activity details or None if not found
900
+ """
901
+ conn = get_connection(db_path)
902
+ cursor = conn.execute("SELECT * FROM activities WHERE id = ?", (activity_id,))
903
+ row = cursor.fetchone()
904
+
905
+ if not row:
906
+ conn.close()
907
+ return None
908
+
909
+ # Get column names for safe access
910
+ column_names = [description[0] for description in cursor.description]
911
+
912
+ result = {
913
+ "id": row["id"],
914
+ "session_id": row["session_id"],
915
+ "event_type": row["event_type"],
916
+ "tool_name": row["tool_name"],
917
+ "tool_input_full": row["tool_input"],
918
+ "tool_output_full": row["tool_output"],
919
+ "success": bool(row["success"]),
920
+ "error_message": row["error_message"],
921
+ "duration_ms": row["duration_ms"],
922
+ "file_path": row["file_path"],
923
+ "timestamp": row["timestamp"],
924
+ }
925
+
926
+ # Add command analytics fields if they exist
927
+ if "command_name" in column_names:
928
+ result["command_name"] = row["command_name"]
929
+ if "command_scope" in column_names:
930
+ result["command_scope"] = row["command_scope"]
931
+ if "mcp_server" in column_names:
932
+ result["mcp_server"] = row["mcp_server"]
933
+ if "skill_name" in column_names:
934
+ result["skill_name"] = row["skill_name"]
935
+
936
+ conn.close()
937
+ return result
938
+
939
+
940
+ def create_memory(
941
+ db_path: str,
942
+ content: str,
943
+ memory_type: str = "other",
944
+ context: Optional[str] = None,
945
+ tags: Optional[list[str]] = None,
946
+ importance_score: int = 50,
947
+ related_memory_ids: Optional[list[str]] = None,
948
+ ) -> str:
949
+ """Create a new memory and return its ID.
950
+
951
+ Args:
952
+ db_path: Path to the database file
953
+ content: Memory content
954
+ memory_type: Type of memory (e.g., 'decision', 'solution', 'conversation')
955
+ context: Additional context
956
+ tags: List of tags
957
+ importance_score: Importance score (1-100)
958
+ related_memory_ids: IDs of related memories to create relationships with
959
+
960
+ Returns:
961
+ The ID of the created memory
962
+ """
963
+ import uuid
964
+
965
+ conn = get_write_connection(db_path)
966
+
967
+ # Generate ID
968
+ memory_id = f"mem_{int(datetime.now().timestamp() * 1000)}_{uuid.uuid4().hex[:8]}"
969
+ now = datetime.now().isoformat()
970
+
971
+ # Insert memory
972
+ conn.execute(
973
+ """
974
+ INSERT INTO memories (id, content, context, type, status, importance_score, access_count, created_at, last_accessed, tags)
975
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
976
+ """,
977
+ (
978
+ memory_id,
979
+ content,
980
+ context,
981
+ memory_type,
982
+ "fresh",
983
+ importance_score,
984
+ 0,
985
+ now,
986
+ now,
987
+ json.dumps(tags) if tags else None,
988
+ ),
989
+ )
990
+
991
+ # Create relationships if related_memory_ids provided
992
+ if related_memory_ids:
993
+ # Check if memory_relationships table exists
994
+ table_check = conn.execute(
995
+ "SELECT name FROM sqlite_master WHERE type='table' AND name='memory_relationships'"
996
+ ).fetchone()
997
+
998
+ if table_check:
999
+ for related_id in related_memory_ids:
1000
+ try:
1001
+ conn.execute(
1002
+ """
1003
+ INSERT INTO memory_relationships (source_memory_id, target_memory_id, relationship_type, strength)
1004
+ VALUES (?, ?, ?, ?)
1005
+ """,
1006
+ (memory_id, related_id, "derived_from", 0.8),
1007
+ )
1008
+ except Exception:
1009
+ # Ignore if related memory doesn't exist
1010
+ pass
1011
+
1012
+ conn.commit()
1013
+ conn.close()
1014
+
1015
+ return memory_id