omni-cortex 1.0.4__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/chat_service.py +290 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/database.py +78 -0
- omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/image_service.py +533 -0
- omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/logging_config.py +92 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/main.py +324 -42
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/models.py +93 -0
- omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/project_config.py +170 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +45 -22
- {omni_cortex-1.0.4.dist-info → omni_cortex-1.2.0.dist-info}/METADATA +26 -2
- omni_cortex-1.2.0.dist-info/RECORD +20 -0
- omni_cortex-1.0.4.data/data/share/omni-cortex/dashboard/backend/chat_service.py +0 -140
- omni_cortex-1.0.4.dist-info/RECORD +0 -17
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/uv.lock +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/hooks/post_tool_use.py +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/hooks/pre_tool_use.py +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/hooks/stop.py +0 -0
- {omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
- {omni_cortex-1.0.4.dist-info → omni_cortex-1.2.0.dist-info}/WHEEL +0 -0
- {omni_cortex-1.0.4.dist-info → omni_cortex-1.2.0.dist-info}/entry_points.txt +0 -0
- {omni_cortex-1.0.4.dist-info → omni_cortex-1.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
"""Scanner to discover all omni-cortex databases on the system."""
|
|
2
2
|
|
|
3
|
-
import os
|
|
4
3
|
import sqlite3
|
|
5
4
|
from datetime import datetime
|
|
6
5
|
from pathlib import Path
|
|
7
6
|
|
|
8
7
|
from models import ProjectInfo
|
|
8
|
+
from project_config import load_config
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
def get_global_db_path() -> Path:
|
|
@@ -65,54 +65,70 @@ def scan_projects() -> list[ProjectInfo]:
|
|
|
65
65
|
projects: list[ProjectInfo] = []
|
|
66
66
|
seen_paths: set[str] = set()
|
|
67
67
|
|
|
68
|
+
# Load user config
|
|
69
|
+
config = load_config()
|
|
70
|
+
|
|
68
71
|
# 1. Add global index if exists
|
|
69
72
|
global_path = get_global_db_path()
|
|
70
73
|
if global_path.exists():
|
|
71
74
|
stat = global_path.stat()
|
|
75
|
+
global_project_path = str(global_path.parent)
|
|
72
76
|
projects.append(
|
|
73
77
|
ProjectInfo(
|
|
74
78
|
name="Global Index",
|
|
75
|
-
path=
|
|
79
|
+
path=global_project_path,
|
|
76
80
|
db_path=str(global_path),
|
|
77
81
|
last_modified=datetime.fromtimestamp(stat.st_mtime),
|
|
78
82
|
memory_count=get_memory_count(global_path),
|
|
79
83
|
is_global=True,
|
|
84
|
+
is_favorite=global_project_path in config.favorites,
|
|
80
85
|
)
|
|
81
86
|
)
|
|
82
87
|
seen_paths.add(str(global_path))
|
|
83
88
|
|
|
84
|
-
# 2.
|
|
85
|
-
|
|
86
|
-
Path(
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
Path.home() / "code",
|
|
90
|
-
Path.home() / "Code",
|
|
91
|
-
Path.home() / "dev",
|
|
92
|
-
Path.home() / "Dev",
|
|
93
|
-
Path.home() / "src",
|
|
94
|
-
Path.home() / "workspace",
|
|
95
|
-
]
|
|
96
|
-
|
|
97
|
-
for scan_dir in scan_dirs:
|
|
98
|
-
if scan_dir.exists():
|
|
99
|
-
for db_path in scan_directory_for_cortex(scan_dir):
|
|
89
|
+
# 2. Use CONFIGURABLE scan directories
|
|
90
|
+
for scan_dir in config.scan_directories:
|
|
91
|
+
scan_path = Path(scan_dir).expanduser()
|
|
92
|
+
if scan_path.exists():
|
|
93
|
+
for db_path in scan_directory_for_cortex(scan_path):
|
|
100
94
|
if str(db_path) not in seen_paths:
|
|
101
95
|
project_dir = db_path.parent.parent
|
|
102
96
|
stat = db_path.stat()
|
|
97
|
+
project_path = str(project_dir)
|
|
103
98
|
projects.append(
|
|
104
99
|
ProjectInfo(
|
|
105
100
|
name=project_dir.name,
|
|
106
|
-
path=
|
|
101
|
+
path=project_path,
|
|
107
102
|
db_path=str(db_path),
|
|
108
103
|
last_modified=datetime.fromtimestamp(stat.st_mtime),
|
|
109
104
|
memory_count=get_memory_count(db_path),
|
|
110
105
|
is_global=False,
|
|
106
|
+
is_favorite=project_path in config.favorites,
|
|
111
107
|
)
|
|
112
108
|
)
|
|
113
109
|
seen_paths.add(str(db_path))
|
|
114
110
|
|
|
115
|
-
# 3. Add
|
|
111
|
+
# 3. Add REGISTERED projects (manual additions)
|
|
112
|
+
for reg in config.registered_projects:
|
|
113
|
+
db_path = Path(reg.path) / ".omni-cortex" / "cortex.db"
|
|
114
|
+
if db_path.exists() and str(db_path) not in seen_paths:
|
|
115
|
+
stat = db_path.stat()
|
|
116
|
+
projects.append(
|
|
117
|
+
ProjectInfo(
|
|
118
|
+
name=Path(reg.path).name,
|
|
119
|
+
path=reg.path,
|
|
120
|
+
db_path=str(db_path),
|
|
121
|
+
last_modified=datetime.fromtimestamp(stat.st_mtime),
|
|
122
|
+
memory_count=get_memory_count(db_path),
|
|
123
|
+
is_global=False,
|
|
124
|
+
is_favorite=reg.path in config.favorites,
|
|
125
|
+
is_registered=True,
|
|
126
|
+
display_name=reg.display_name,
|
|
127
|
+
)
|
|
128
|
+
)
|
|
129
|
+
seen_paths.add(str(db_path))
|
|
130
|
+
|
|
131
|
+
# 4. Add paths from global db that we haven't seen
|
|
116
132
|
for project_path in get_projects_from_global_db():
|
|
117
133
|
db_path = Path(project_path) / ".omni-cortex" / "cortex.db"
|
|
118
134
|
if db_path.exists() and str(db_path) not in seen_paths:
|
|
@@ -125,12 +141,19 @@ def scan_projects() -> list[ProjectInfo]:
|
|
|
125
141
|
last_modified=datetime.fromtimestamp(stat.st_mtime),
|
|
126
142
|
memory_count=get_memory_count(db_path),
|
|
127
143
|
is_global=False,
|
|
144
|
+
is_favorite=project_path in config.favorites,
|
|
128
145
|
)
|
|
129
146
|
)
|
|
130
147
|
seen_paths.add(str(db_path))
|
|
131
148
|
|
|
132
|
-
# Sort by last_modified (most recent first), with global always first
|
|
133
|
-
projects.sort(
|
|
149
|
+
# Sort: favorites first, then by last_modified (most recent first), with global always first
|
|
150
|
+
projects.sort(
|
|
151
|
+
key=lambda p: (
|
|
152
|
+
not p.is_global,
|
|
153
|
+
not p.is_favorite,
|
|
154
|
+
-(p.last_modified.timestamp() if p.last_modified else 0),
|
|
155
|
+
)
|
|
156
|
+
)
|
|
134
157
|
|
|
135
158
|
return projects
|
|
136
159
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: omni-cortex
|
|
3
|
-
Version: 1.0
|
|
4
|
-
Summary:
|
|
3
|
+
Version: 1.2.0
|
|
4
|
+
Summary: Give Claude Code a perfect memory - auto-logs everything, searches smartly, and gets smarter over time
|
|
5
5
|
Project-URL: Homepage, https://github.com/AllCytes/Omni-Cortex
|
|
6
6
|
Project-URL: Repository, https://github.com/AllCytes/Omni-Cortex
|
|
7
7
|
Project-URL: Issues, https://github.com/AllCytes/Omni-Cortex/issues
|
|
@@ -39,6 +39,30 @@ Description-Content-Type: text/markdown
|
|
|
39
39
|
|
|
40
40
|
A universal memory system for Claude Code that combines activity logging with intelligent knowledge storage.
|
|
41
41
|
|
|
42
|
+
## What Is This?
|
|
43
|
+
|
|
44
|
+
**For AI/ML experts:** A dual-layer context system with activity provenance, hybrid semantic search (FTS5 + embeddings), and temporal importance decay. Think of it as **Git + Elasticsearch + a knowledge graph for AI context**.
|
|
45
|
+
|
|
46
|
+
**For developers:** It gives Claude Code a persistent, searchable memory that auto-logs everything and gets smarter over time. Like a **senior developer's institutional knowledge**—searchable, organized, and always available.
|
|
47
|
+
|
|
48
|
+
**For everyone:** It makes your AI assistant actually remember things. No more re-explaining your project every session.
|
|
49
|
+
|
|
50
|
+
### Why Not Just Use CLAUDE.md or Basic Memory?
|
|
51
|
+
|
|
52
|
+
| Feature | Claude Code | CLAUDE.md | Basic MCP | Omni-Cortex |
|
|
53
|
+
|---------|:-----------:|:---------:|:---------:|:-----------:|
|
|
54
|
+
| Persists between sessions | ❌ | ✅ | ✅ | ✅ |
|
|
55
|
+
| Auto-logs all activity | ❌ | ❌ | ❌ | ✅ |
|
|
56
|
+
| Hybrid search (keyword + semantic) | ❌ | ❌ | ❌ | ✅ |
|
|
57
|
+
| Auto-categorizes memories | ❌ | ❌ | ❌ | ✅ |
|
|
58
|
+
| Importance decay + access boosting | ❌ | ❌ | ❌ | ✅ |
|
|
59
|
+
| Session history & context | ❌ | ❌ | ❌ | ✅ |
|
|
60
|
+
| Memory relationships | ❌ | ❌ | ❌ | ✅ |
|
|
61
|
+
| Cross-project search | ❌ | ❌ | ❌ | ✅ |
|
|
62
|
+
| Visual dashboard | ❌ | ❌ | ❌ | ✅ |
|
|
63
|
+
|
|
64
|
+
**The difference:** Basic solutions are like sticky notes. Omni-Cortex is like having a trusted long-term employee who remembers everything, files it automatically, and hands you exactly what you need.
|
|
65
|
+
|
|
42
66
|
## Features
|
|
43
67
|
|
|
44
68
|
- **Zero Configuration**: Works out of the box - just install and run setup
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/hooks/post_tool_use.py,sha256=zXy30KNDW6UoWP0nwq5n320r1wFa-tE6V4QuSdDzx8w,5106
|
|
2
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/hooks/pre_tool_use.py,sha256=SlvvEKsIkolDG5Y_35VezY2e7kRpbj1GiDlBW-naj2g,4900
|
|
3
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/hooks/stop.py,sha256=T1bwcmbTLj0gzjrVvFBT1zB6wff4J2YkYBAY-ZxZI5g,5336
|
|
4
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/hooks/subagent_stop.py,sha256=V9HQSFGNOfkg8ZCstPEy4h5V8BP4AbrVr8teFzN1kNk,3314
|
|
5
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/chat_service.py,sha256=hmTvlwK5w29nOLUGCwaaIslEuLgA1-JezXbLwxWDSdM,8265
|
|
6
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/database.py,sha256=HNwfyfebHq0Gdooc4bZdyNp_FD7WFx9z6KJkWLWtHp8,25400
|
|
7
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/image_service.py,sha256=fHP4AA9rM9r05PMglO1eY9Fd93v0JaNuaUX2HiRA-PI,18013
|
|
8
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/logging_config.py,sha256=dFcNqfw2jTfUjFERV_Pr5r5PjY9wSQGXEYPf0AyR5Yk,2869
|
|
9
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/main.py,sha256=RCKdsvOp1oJC64dZjh08b2NqCozSJdvla2wCmRexrk4,30478
|
|
10
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/models.py,sha256=lWb4Rvy6E-x21CGAeahSdVRzxGCVrEgYdc5vKbfo6_A,5671
|
|
11
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/project_config.py,sha256=ZxGoeRpHvN5qQyf2hRxrAZiHrPSwdQp59f0di6O1LKM,4352
|
|
12
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/project_scanner.py,sha256=lwFXS8iJbOoxf7FAyo2TjH25neaMHiJ8B3jS57XxtDI,5713
|
|
13
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/pyproject.toml,sha256=9pbbGQXLe1Xd06nZAtDySCHIlfMWvPaB-C6tGZR6umc,502
|
|
14
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/uv.lock,sha256=e7IMinX0BR2EcnpPwHYCdDJQDzuDzQ3D-FmPOiPKfGA,131248
|
|
15
|
+
omni_cortex-1.2.0.data/data/share/omni-cortex/dashboard/backend/websocket_manager.py,sha256=fv16XkRkgN4SDNwTiP_p9qFnWta9lIpAXgKbFETZ7uM,2770
|
|
16
|
+
omni_cortex-1.2.0.dist-info/METADATA,sha256=FYI3wuEZJ6oV0Mvla_GvwumdOzhvl938i-g2ZV4SM18,9855
|
|
17
|
+
omni_cortex-1.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
18
|
+
omni_cortex-1.2.0.dist-info/entry_points.txt,sha256=rohx4mFH2ffZmMb9QXPZmFf-ZGjA3jpKVDVeET-ttiM,150
|
|
19
|
+
omni_cortex-1.2.0.dist-info/licenses/LICENSE,sha256=oG_397owMmi-Umxp5sYocJ6RPohp9_bDNnnEu9OUphg,1072
|
|
20
|
+
omni_cortex-1.2.0.dist-info/RECORD,,
|
|
@@ -1,140 +0,0 @@
|
|
|
1
|
-
"""Chat service for natural language queries about memories using Gemini Flash."""
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
from typing import Optional
|
|
5
|
-
|
|
6
|
-
import google.generativeai as genai
|
|
7
|
-
from dotenv import load_dotenv
|
|
8
|
-
|
|
9
|
-
from database import search_memories, get_memories
|
|
10
|
-
from models import FilterParams
|
|
11
|
-
|
|
12
|
-
# Load environment variables
|
|
13
|
-
load_dotenv()
|
|
14
|
-
|
|
15
|
-
# Configure Gemini
|
|
16
|
-
_api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
|
|
17
|
-
_model: Optional[genai.GenerativeModel] = None
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def get_model() -> Optional[genai.GenerativeModel]:
|
|
21
|
-
"""Get or initialize the Gemini model."""
|
|
22
|
-
global _model
|
|
23
|
-
if _model is None and _api_key:
|
|
24
|
-
genai.configure(api_key=_api_key)
|
|
25
|
-
_model = genai.GenerativeModel("gemini-2.0-flash-exp")
|
|
26
|
-
return _model
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def is_available() -> bool:
|
|
30
|
-
"""Check if the chat service is available."""
|
|
31
|
-
return _api_key is not None
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
async def ask_about_memories(
|
|
35
|
-
db_path: str,
|
|
36
|
-
question: str,
|
|
37
|
-
max_memories: int = 10,
|
|
38
|
-
) -> dict:
|
|
39
|
-
"""Ask a natural language question about memories.
|
|
40
|
-
|
|
41
|
-
Args:
|
|
42
|
-
db_path: Path to the database file
|
|
43
|
-
question: The user's question
|
|
44
|
-
max_memories: Maximum memories to include in context
|
|
45
|
-
|
|
46
|
-
Returns:
|
|
47
|
-
Dict with answer and sources
|
|
48
|
-
"""
|
|
49
|
-
if not is_available():
|
|
50
|
-
return {
|
|
51
|
-
"answer": "Chat is not available. Please configure GEMINI_API_KEY or GOOGLE_API_KEY environment variable.",
|
|
52
|
-
"sources": [],
|
|
53
|
-
"error": "api_key_missing",
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
model = get_model()
|
|
57
|
-
if not model:
|
|
58
|
-
return {
|
|
59
|
-
"answer": "Failed to initialize Gemini model.",
|
|
60
|
-
"sources": [],
|
|
61
|
-
"error": "model_init_failed",
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
# Search for relevant memories
|
|
65
|
-
memories = search_memories(db_path, question, limit=max_memories)
|
|
66
|
-
|
|
67
|
-
# If no memories found via search, get recent ones
|
|
68
|
-
if not memories:
|
|
69
|
-
filters = FilterParams(
|
|
70
|
-
sort_by="last_accessed",
|
|
71
|
-
sort_order="desc",
|
|
72
|
-
limit=max_memories,
|
|
73
|
-
offset=0,
|
|
74
|
-
)
|
|
75
|
-
memories = get_memories(db_path, filters)
|
|
76
|
-
|
|
77
|
-
if not memories:
|
|
78
|
-
return {
|
|
79
|
-
"answer": "No memories found in the database to answer your question.",
|
|
80
|
-
"sources": [],
|
|
81
|
-
"error": None,
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
# Build context from memories
|
|
85
|
-
memory_context = []
|
|
86
|
-
sources = []
|
|
87
|
-
for i, mem in enumerate(memories, 1):
|
|
88
|
-
memory_context.append(f"""
|
|
89
|
-
Memory {i}:
|
|
90
|
-
- Type: {mem.memory_type}
|
|
91
|
-
- Content: {mem.content}
|
|
92
|
-
- Context: {mem.context or 'N/A'}
|
|
93
|
-
- Tags: {', '.join(mem.tags) if mem.tags else 'N/A'}
|
|
94
|
-
- Status: {mem.status}
|
|
95
|
-
- Importance: {mem.importance_score}/100
|
|
96
|
-
""")
|
|
97
|
-
sources.append({
|
|
98
|
-
"id": mem.id,
|
|
99
|
-
"type": mem.memory_type,
|
|
100
|
-
"content_preview": mem.content[:100] + "..." if len(mem.content) > 100 else mem.content,
|
|
101
|
-
"tags": mem.tags,
|
|
102
|
-
})
|
|
103
|
-
|
|
104
|
-
context_str = "\n---\n".join(memory_context)
|
|
105
|
-
|
|
106
|
-
# Create prompt
|
|
107
|
-
prompt = f"""You are a helpful assistant that answers questions about stored memories and knowledge.
|
|
108
|
-
|
|
109
|
-
The user has a collection of memories that capture decisions, solutions, insights, errors, preferences, and other learnings from their work.
|
|
110
|
-
|
|
111
|
-
Here are the relevant memories:
|
|
112
|
-
|
|
113
|
-
{context_str}
|
|
114
|
-
|
|
115
|
-
User question: {question}
|
|
116
|
-
|
|
117
|
-
Instructions:
|
|
118
|
-
1. Answer the question based on the memories provided
|
|
119
|
-
2. If the memories don't contain relevant information, say so
|
|
120
|
-
3. Reference specific memories when appropriate (e.g., "According to memory 1...")
|
|
121
|
-
4. Be concise but thorough
|
|
122
|
-
5. If the question is asking for a recommendation or decision, synthesize from multiple memories if possible
|
|
123
|
-
|
|
124
|
-
Answer:"""
|
|
125
|
-
|
|
126
|
-
try:
|
|
127
|
-
response = model.generate_content(prompt)
|
|
128
|
-
answer = response.text
|
|
129
|
-
except Exception as e:
|
|
130
|
-
return {
|
|
131
|
-
"answer": f"Failed to generate response: {str(e)}",
|
|
132
|
-
"sources": sources,
|
|
133
|
-
"error": "generation_failed",
|
|
134
|
-
}
|
|
135
|
-
|
|
136
|
-
return {
|
|
137
|
-
"answer": answer,
|
|
138
|
-
"sources": sources,
|
|
139
|
-
"error": None,
|
|
140
|
-
}
|
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/hooks/post_tool_use.py,sha256=zXy30KNDW6UoWP0nwq5n320r1wFa-tE6V4QuSdDzx8w,5106
|
|
2
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/hooks/pre_tool_use.py,sha256=SlvvEKsIkolDG5Y_35VezY2e7kRpbj1GiDlBW-naj2g,4900
|
|
3
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/hooks/stop.py,sha256=T1bwcmbTLj0gzjrVvFBT1zB6wff4J2YkYBAY-ZxZI5g,5336
|
|
4
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/hooks/subagent_stop.py,sha256=V9HQSFGNOfkg8ZCstPEy4h5V8BP4AbrVr8teFzN1kNk,3314
|
|
5
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/dashboard/backend/chat_service.py,sha256=xJk63Y7NW6mRCr9RekS_Xlqxbpl9WgC_D5-QIKsWgFE,3980
|
|
6
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/dashboard/backend/database.py,sha256=VRy-Eh4XsXNp-LnAG3w7Lsm5BaJzlH-OtG9tDXpV8_o,23052
|
|
7
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/dashboard/backend/main.py,sha256=KANfe-JXPJVdJvLj7vJrQIpY0YYAWoL-c1Jjpw5WuWw,20540
|
|
8
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/dashboard/backend/models.py,sha256=cVksSzga6WEsnZdASlVTqOGRnwRKATKKakueGaPz7SI,3297
|
|
9
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/dashboard/backend/project_scanner.py,sha256=6xrrgixQVihoCYvabpwd30sCO-14RrvWUjvOgWi5Tsw,4626
|
|
10
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/dashboard/backend/pyproject.toml,sha256=9pbbGQXLe1Xd06nZAtDySCHIlfMWvPaB-C6tGZR6umc,502
|
|
11
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/dashboard/backend/uv.lock,sha256=e7IMinX0BR2EcnpPwHYCdDJQDzuDzQ3D-FmPOiPKfGA,131248
|
|
12
|
-
omni_cortex-1.0.4.data/data/share/omni-cortex/dashboard/backend/websocket_manager.py,sha256=fv16XkRkgN4SDNwTiP_p9qFnWta9lIpAXgKbFETZ7uM,2770
|
|
13
|
-
omni_cortex-1.0.4.dist-info/METADATA,sha256=iDOTnP5x69VuU2wY1TyF5fayS4MuA8a4_bddOQ5HdlU,8381
|
|
14
|
-
omni_cortex-1.0.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
15
|
-
omni_cortex-1.0.4.dist-info/entry_points.txt,sha256=rohx4mFH2ffZmMb9QXPZmFf-ZGjA3jpKVDVeET-ttiM,150
|
|
16
|
-
omni_cortex-1.0.4.dist-info/licenses/LICENSE,sha256=oG_397owMmi-Umxp5sYocJ6RPohp9_bDNnnEu9OUphg,1072
|
|
17
|
-
omni_cortex-1.0.4.dist-info/RECORD,,
|
|
File without changes
|
{omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/dashboard/backend/uv.lock
RENAMED
|
File without changes
|
|
File without changes
|
{omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/hooks/post_tool_use.py
RENAMED
|
File without changes
|
{omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/hooks/pre_tool_use.py
RENAMED
|
File without changes
|
|
File without changes
|
{omni_cortex-1.0.4.data → omni_cortex-1.2.0.data}/data/share/omni-cortex/hooks/subagent_stop.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|