omni-cortex 1.0.1__tar.gz → 1.0.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/PKG-INFO +81 -2
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/README.md +80 -1
- omni_cortex-1.0.4/dashboard/backend/chat_service.py +140 -0
- omni_cortex-1.0.4/dashboard/backend/database.py +729 -0
- omni_cortex-1.0.4/dashboard/backend/main.py +661 -0
- omni_cortex-1.0.4/dashboard/backend/models.py +140 -0
- omni_cortex-1.0.4/dashboard/backend/project_scanner.py +141 -0
- omni_cortex-1.0.4/dashboard/backend/pyproject.toml +23 -0
- omni_cortex-1.0.4/dashboard/backend/uv.lock +697 -0
- omni_cortex-1.0.4/dashboard/backend/websocket_manager.py +82 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/hooks/post_tool_use.py +9 -3
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/hooks/pre_tool_use.py +18 -3
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/__init__.py +1 -1
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/config.py +2 -2
- omni_cortex-1.0.4/omni_cortex/dashboard.py +184 -0
- omni_cortex-1.0.4/omni_cortex/database/__init__.py +24 -0
- omni_cortex-1.0.4/omni_cortex/database/sync.py +421 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/embeddings/local.py +160 -80
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/models/activity.py +14 -13
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/server.py +77 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/tools/memories.py +67 -8
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/tools/sessions.py +1 -1
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/tools/utilities.py +191 -1
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/utils/formatting.py +25 -3
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/pyproject.toml +6 -1
- omni_cortex-1.0.4/scripts/import_ken_memories.py +261 -0
- omni_cortex-1.0.4/scripts/populate_session_data.py +255 -0
- omni_cortex-1.0.1/omni_cortex/database/__init__.py +0 -12
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/.gitignore +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/LICENSE +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/hooks/stop.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/hooks/subagent_stop.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/categorization/__init__.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/categorization/auto_tags.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/categorization/auto_type.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/database/connection.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/database/migrations.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/database/schema.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/decay/__init__.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/decay/importance.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/embeddings/__init__.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/models/__init__.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/models/agent.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/models/memory.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/models/relationship.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/models/session.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/resources/__init__.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/search/__init__.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/search/hybrid.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/search/keyword.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/search/ranking.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/search/semantic.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/setup.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/tools/__init__.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/tools/activities.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/utils/__init__.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/utils/ids.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/utils/timestamps.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/omni_cortex/utils/truncation.py +0 -0
- {omni_cortex-1.0.1 → omni_cortex-1.0.4}/scripts/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: omni-cortex
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.4
|
|
4
4
|
Summary: Universal Memory MCP for Claude Code - dual-layer activity logging and knowledge storage
|
|
5
5
|
Project-URL: Homepage, https://github.com/AllCytes/Omni-Cortex
|
|
6
6
|
Project-URL: Repository, https://github.com/AllCytes/Omni-Cortex
|
|
@@ -43,7 +43,7 @@ A universal memory system for Claude Code that combines activity logging with in
|
|
|
43
43
|
|
|
44
44
|
- **Zero Configuration**: Works out of the box - just install and run setup
|
|
45
45
|
- **Dual-Layer Storage**: Activity logging (audit trail) + Knowledge store (memories)
|
|
46
|
-
- **
|
|
46
|
+
- **18 MCP Tools**: Full-featured API for memory management, activity tracking, session continuity, and cross-project search
|
|
47
47
|
- **Semantic Search**: AI-powered search using sentence-transformers (optional)
|
|
48
48
|
- **Hybrid Search**: Combines keyword (FTS5) + semantic search for best results
|
|
49
49
|
- **Full-Text Search**: SQLite FTS5-powered keyword search with smart ranking
|
|
@@ -163,6 +163,14 @@ pip uninstall omni-cortex
|
|
|
163
163
|
| `cortex_review_memories` | Review and update memory freshness |
|
|
164
164
|
| `cortex_export` | Export data to markdown or JSON |
|
|
165
165
|
|
|
166
|
+
### Global Tools (3)
|
|
167
|
+
|
|
168
|
+
| Tool | Description |
|
|
169
|
+
|------|-------------|
|
|
170
|
+
| `cortex_global_search` | Search memories across all projects |
|
|
171
|
+
| `cortex_global_stats` | Get global index statistics |
|
|
172
|
+
| `cortex_sync_to_global` | Manually sync to global index |
|
|
173
|
+
|
|
166
174
|
## Memory Types
|
|
167
175
|
|
|
168
176
|
Memories are automatically categorized into:
|
|
@@ -197,6 +205,69 @@ auto_provide_context: true
|
|
|
197
205
|
context_depth: 3
|
|
198
206
|
```
|
|
199
207
|
|
|
208
|
+
## Web Dashboard
|
|
209
|
+
|
|
210
|
+
A visual interface for browsing, searching, and managing your memories.
|
|
211
|
+
|
|
212
|
+

|
|
213
|
+
|
|
214
|
+
### Features
|
|
215
|
+
- **Memory Browser**: View, search, filter, and edit memories
|
|
216
|
+
- **Ask AI**: Chat with your memories using Gemini
|
|
217
|
+
- **Real-time Updates**: WebSocket-based live sync
|
|
218
|
+
- **Statistics**: Memory counts, types, tags distribution
|
|
219
|
+
- **Project Switcher**: Switch between project databases
|
|
220
|
+
|
|
221
|
+
### Quick Start
|
|
222
|
+
|
|
223
|
+
```bash
|
|
224
|
+
# Backend (requires Python 3.10+)
|
|
225
|
+
cd dashboard/backend
|
|
226
|
+
pip install -e .
|
|
227
|
+
uvicorn main:app --host 0.0.0.0 --port 8765 --reload
|
|
228
|
+
|
|
229
|
+
# Frontend (requires Node.js 18+)
|
|
230
|
+
cd dashboard/frontend
|
|
231
|
+
npm install
|
|
232
|
+
npm run dev
|
|
233
|
+
```
|
|
234
|
+
|
|
235
|
+
Open http://localhost:5173 in your browser.
|
|
236
|
+
|
|
237
|
+
### Ask AI Setup (Optional)
|
|
238
|
+
|
|
239
|
+
To enable the "Ask AI" chat feature, set your Gemini API key:
|
|
240
|
+
|
|
241
|
+
```bash
|
|
242
|
+
export GEMINI_API_KEY=your_api_key_here
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
See [dashboard/README.md](dashboard/README.md) for full documentation.
|
|
246
|
+
|
|
247
|
+
## Documentation
|
|
248
|
+
|
|
249
|
+
- [Tool Reference](docs/TOOLS.md) - Complete documentation for all 18 tools with examples
|
|
250
|
+
- [Configuration Guide](docs/CONFIGURATION.md) - Configuration options and troubleshooting
|
|
251
|
+
- **Teaching Materials** (PDF):
|
|
252
|
+
- `docs/OmniCortex_QuickStart.pdf` - 3-page quick start guide
|
|
253
|
+
- `docs/OmniCortex_FeatureComparison.pdf` - Comparison with basic memory MCPs
|
|
254
|
+
- `docs/OmniCortex_Philosophy.pdf` - Design principles and inspiration
|
|
255
|
+
- `docs/OmniCortex_CommandReference.pdf` - All tools with parameters
|
|
256
|
+
|
|
257
|
+
### Regenerating PDFs
|
|
258
|
+
|
|
259
|
+
To regenerate the teaching material PDFs:
|
|
260
|
+
|
|
261
|
+
```bash
|
|
262
|
+
# Requires reportlab
|
|
263
|
+
pip install reportlab
|
|
264
|
+
|
|
265
|
+
# Generate all 4 PDFs
|
|
266
|
+
python docs/create_pdfs.py
|
|
267
|
+
```
|
|
268
|
+
|
|
269
|
+
The PDFs use a light theme with blue/purple/green accents. Edit `docs/create_pdfs.py` to customize colors or content.
|
|
270
|
+
|
|
200
271
|
## Development
|
|
201
272
|
|
|
202
273
|
```bash
|
|
@@ -211,6 +282,14 @@ black src tests
|
|
|
211
282
|
ruff check src tests
|
|
212
283
|
```
|
|
213
284
|
|
|
285
|
+
## Security
|
|
286
|
+
|
|
287
|
+
Omni Cortex v1.0.3 has been security reviewed:
|
|
288
|
+
- All SQL queries use parameterized statements
|
|
289
|
+
- Input validation via Pydantic models
|
|
290
|
+
- Model name validation prevents code injection
|
|
291
|
+
- YAML loading uses `safe_load()`
|
|
292
|
+
|
|
214
293
|
## License
|
|
215
294
|
|
|
216
295
|
MIT
|
|
@@ -6,7 +6,7 @@ A universal memory system for Claude Code that combines activity logging with in
|
|
|
6
6
|
|
|
7
7
|
- **Zero Configuration**: Works out of the box - just install and run setup
|
|
8
8
|
- **Dual-Layer Storage**: Activity logging (audit trail) + Knowledge store (memories)
|
|
9
|
-
- **
|
|
9
|
+
- **18 MCP Tools**: Full-featured API for memory management, activity tracking, session continuity, and cross-project search
|
|
10
10
|
- **Semantic Search**: AI-powered search using sentence-transformers (optional)
|
|
11
11
|
- **Hybrid Search**: Combines keyword (FTS5) + semantic search for best results
|
|
12
12
|
- **Full-Text Search**: SQLite FTS5-powered keyword search with smart ranking
|
|
@@ -126,6 +126,14 @@ pip uninstall omni-cortex
|
|
|
126
126
|
| `cortex_review_memories` | Review and update memory freshness |
|
|
127
127
|
| `cortex_export` | Export data to markdown or JSON |
|
|
128
128
|
|
|
129
|
+
### Global Tools (3)
|
|
130
|
+
|
|
131
|
+
| Tool | Description |
|
|
132
|
+
|------|-------------|
|
|
133
|
+
| `cortex_global_search` | Search memories across all projects |
|
|
134
|
+
| `cortex_global_stats` | Get global index statistics |
|
|
135
|
+
| `cortex_sync_to_global` | Manually sync to global index |
|
|
136
|
+
|
|
129
137
|
## Memory Types
|
|
130
138
|
|
|
131
139
|
Memories are automatically categorized into:
|
|
@@ -160,6 +168,69 @@ auto_provide_context: true
|
|
|
160
168
|
context_depth: 3
|
|
161
169
|
```
|
|
162
170
|
|
|
171
|
+
## Web Dashboard
|
|
172
|
+
|
|
173
|
+
A visual interface for browsing, searching, and managing your memories.
|
|
174
|
+
|
|
175
|
+

|
|
176
|
+
|
|
177
|
+
### Features
|
|
178
|
+
- **Memory Browser**: View, search, filter, and edit memories
|
|
179
|
+
- **Ask AI**: Chat with your memories using Gemini
|
|
180
|
+
- **Real-time Updates**: WebSocket-based live sync
|
|
181
|
+
- **Statistics**: Memory counts, types, tags distribution
|
|
182
|
+
- **Project Switcher**: Switch between project databases
|
|
183
|
+
|
|
184
|
+
### Quick Start
|
|
185
|
+
|
|
186
|
+
```bash
|
|
187
|
+
# Backend (requires Python 3.10+)
|
|
188
|
+
cd dashboard/backend
|
|
189
|
+
pip install -e .
|
|
190
|
+
uvicorn main:app --host 0.0.0.0 --port 8765 --reload
|
|
191
|
+
|
|
192
|
+
# Frontend (requires Node.js 18+)
|
|
193
|
+
cd dashboard/frontend
|
|
194
|
+
npm install
|
|
195
|
+
npm run dev
|
|
196
|
+
```
|
|
197
|
+
|
|
198
|
+
Open http://localhost:5173 in your browser.
|
|
199
|
+
|
|
200
|
+
### Ask AI Setup (Optional)
|
|
201
|
+
|
|
202
|
+
To enable the "Ask AI" chat feature, set your Gemini API key:
|
|
203
|
+
|
|
204
|
+
```bash
|
|
205
|
+
export GEMINI_API_KEY=your_api_key_here
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
See [dashboard/README.md](dashboard/README.md) for full documentation.
|
|
209
|
+
|
|
210
|
+
## Documentation
|
|
211
|
+
|
|
212
|
+
- [Tool Reference](docs/TOOLS.md) - Complete documentation for all 18 tools with examples
|
|
213
|
+
- [Configuration Guide](docs/CONFIGURATION.md) - Configuration options and troubleshooting
|
|
214
|
+
- **Teaching Materials** (PDF):
|
|
215
|
+
- `docs/OmniCortex_QuickStart.pdf` - 3-page quick start guide
|
|
216
|
+
- `docs/OmniCortex_FeatureComparison.pdf` - Comparison with basic memory MCPs
|
|
217
|
+
- `docs/OmniCortex_Philosophy.pdf` - Design principles and inspiration
|
|
218
|
+
- `docs/OmniCortex_CommandReference.pdf` - All tools with parameters
|
|
219
|
+
|
|
220
|
+
### Regenerating PDFs
|
|
221
|
+
|
|
222
|
+
To regenerate the teaching material PDFs:
|
|
223
|
+
|
|
224
|
+
```bash
|
|
225
|
+
# Requires reportlab
|
|
226
|
+
pip install reportlab
|
|
227
|
+
|
|
228
|
+
# Generate all 4 PDFs
|
|
229
|
+
python docs/create_pdfs.py
|
|
230
|
+
```
|
|
231
|
+
|
|
232
|
+
The PDFs use a light theme with blue/purple/green accents. Edit `docs/create_pdfs.py` to customize colors or content.
|
|
233
|
+
|
|
163
234
|
## Development
|
|
164
235
|
|
|
165
236
|
```bash
|
|
@@ -174,6 +245,14 @@ black src tests
|
|
|
174
245
|
ruff check src tests
|
|
175
246
|
```
|
|
176
247
|
|
|
248
|
+
## Security
|
|
249
|
+
|
|
250
|
+
Omni Cortex v1.0.3 has been security reviewed:
|
|
251
|
+
- All SQL queries use parameterized statements
|
|
252
|
+
- Input validation via Pydantic models
|
|
253
|
+
- Model name validation prevents code injection
|
|
254
|
+
- YAML loading uses `safe_load()`
|
|
255
|
+
|
|
177
256
|
## License
|
|
178
257
|
|
|
179
258
|
MIT
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
"""Chat service for natural language queries about memories using Gemini Flash."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
import google.generativeai as genai
|
|
7
|
+
from dotenv import load_dotenv
|
|
8
|
+
|
|
9
|
+
from database import search_memories, get_memories
|
|
10
|
+
from models import FilterParams
|
|
11
|
+
|
|
12
|
+
# Load environment variables
|
|
13
|
+
load_dotenv()
|
|
14
|
+
|
|
15
|
+
# Configure Gemini
|
|
16
|
+
_api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
|
|
17
|
+
_model: Optional[genai.GenerativeModel] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_model() -> Optional[genai.GenerativeModel]:
|
|
21
|
+
"""Get or initialize the Gemini model."""
|
|
22
|
+
global _model
|
|
23
|
+
if _model is None and _api_key:
|
|
24
|
+
genai.configure(api_key=_api_key)
|
|
25
|
+
_model = genai.GenerativeModel("gemini-2.0-flash-exp")
|
|
26
|
+
return _model
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def is_available() -> bool:
|
|
30
|
+
"""Check if the chat service is available."""
|
|
31
|
+
return _api_key is not None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
async def ask_about_memories(
|
|
35
|
+
db_path: str,
|
|
36
|
+
question: str,
|
|
37
|
+
max_memories: int = 10,
|
|
38
|
+
) -> dict:
|
|
39
|
+
"""Ask a natural language question about memories.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
db_path: Path to the database file
|
|
43
|
+
question: The user's question
|
|
44
|
+
max_memories: Maximum memories to include in context
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Dict with answer and sources
|
|
48
|
+
"""
|
|
49
|
+
if not is_available():
|
|
50
|
+
return {
|
|
51
|
+
"answer": "Chat is not available. Please configure GEMINI_API_KEY or GOOGLE_API_KEY environment variable.",
|
|
52
|
+
"sources": [],
|
|
53
|
+
"error": "api_key_missing",
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
model = get_model()
|
|
57
|
+
if not model:
|
|
58
|
+
return {
|
|
59
|
+
"answer": "Failed to initialize Gemini model.",
|
|
60
|
+
"sources": [],
|
|
61
|
+
"error": "model_init_failed",
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
# Search for relevant memories
|
|
65
|
+
memories = search_memories(db_path, question, limit=max_memories)
|
|
66
|
+
|
|
67
|
+
# If no memories found via search, get recent ones
|
|
68
|
+
if not memories:
|
|
69
|
+
filters = FilterParams(
|
|
70
|
+
sort_by="last_accessed",
|
|
71
|
+
sort_order="desc",
|
|
72
|
+
limit=max_memories,
|
|
73
|
+
offset=0,
|
|
74
|
+
)
|
|
75
|
+
memories = get_memories(db_path, filters)
|
|
76
|
+
|
|
77
|
+
if not memories:
|
|
78
|
+
return {
|
|
79
|
+
"answer": "No memories found in the database to answer your question.",
|
|
80
|
+
"sources": [],
|
|
81
|
+
"error": None,
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
# Build context from memories
|
|
85
|
+
memory_context = []
|
|
86
|
+
sources = []
|
|
87
|
+
for i, mem in enumerate(memories, 1):
|
|
88
|
+
memory_context.append(f"""
|
|
89
|
+
Memory {i}:
|
|
90
|
+
- Type: {mem.memory_type}
|
|
91
|
+
- Content: {mem.content}
|
|
92
|
+
- Context: {mem.context or 'N/A'}
|
|
93
|
+
- Tags: {', '.join(mem.tags) if mem.tags else 'N/A'}
|
|
94
|
+
- Status: {mem.status}
|
|
95
|
+
- Importance: {mem.importance_score}/100
|
|
96
|
+
""")
|
|
97
|
+
sources.append({
|
|
98
|
+
"id": mem.id,
|
|
99
|
+
"type": mem.memory_type,
|
|
100
|
+
"content_preview": mem.content[:100] + "..." if len(mem.content) > 100 else mem.content,
|
|
101
|
+
"tags": mem.tags,
|
|
102
|
+
})
|
|
103
|
+
|
|
104
|
+
context_str = "\n---\n".join(memory_context)
|
|
105
|
+
|
|
106
|
+
# Create prompt
|
|
107
|
+
prompt = f"""You are a helpful assistant that answers questions about stored memories and knowledge.
|
|
108
|
+
|
|
109
|
+
The user has a collection of memories that capture decisions, solutions, insights, errors, preferences, and other learnings from their work.
|
|
110
|
+
|
|
111
|
+
Here are the relevant memories:
|
|
112
|
+
|
|
113
|
+
{context_str}
|
|
114
|
+
|
|
115
|
+
User question: {question}
|
|
116
|
+
|
|
117
|
+
Instructions:
|
|
118
|
+
1. Answer the question based on the memories provided
|
|
119
|
+
2. If the memories don't contain relevant information, say so
|
|
120
|
+
3. Reference specific memories when appropriate (e.g., "According to memory 1...")
|
|
121
|
+
4. Be concise but thorough
|
|
122
|
+
5. If the question is asking for a recommendation or decision, synthesize from multiple memories if possible
|
|
123
|
+
|
|
124
|
+
Answer:"""
|
|
125
|
+
|
|
126
|
+
try:
|
|
127
|
+
response = model.generate_content(prompt)
|
|
128
|
+
answer = response.text
|
|
129
|
+
except Exception as e:
|
|
130
|
+
return {
|
|
131
|
+
"answer": f"Failed to generate response: {str(e)}",
|
|
132
|
+
"sources": sources,
|
|
133
|
+
"error": "generation_failed",
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
return {
|
|
137
|
+
"answer": answer,
|
|
138
|
+
"sources": sources,
|
|
139
|
+
"error": None,
|
|
140
|
+
}
|