comemo 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comemo-1.0.0/.gitignore +77 -0
- comemo-1.0.0/PKG-INFO +187 -0
- comemo-1.0.0/README.md +164 -0
- comemo-1.0.0/comemo/__init__.py +39 -0
- comemo-1.0.0/comemo/client.py +383 -0
- comemo-1.0.0/comemo/exceptions.py +27 -0
- comemo-1.0.0/comemo/models.py +83 -0
- comemo-1.0.0/pyproject.toml +37 -0
comemo-1.0.0/.gitignore
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
# Python
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
*.so
|
|
6
|
+
.Python
|
|
7
|
+
build/
|
|
8
|
+
develop-eggs/
|
|
9
|
+
dist/
|
|
10
|
+
downloads/
|
|
11
|
+
eggs/
|
|
12
|
+
.eggs/
|
|
13
|
+
lib/
|
|
14
|
+
lib64/
|
|
15
|
+
parts/
|
|
16
|
+
sdist/
|
|
17
|
+
var/
|
|
18
|
+
wheels/
|
|
19
|
+
pip-wheel-metadata/
|
|
20
|
+
share/python-wheels/
|
|
21
|
+
*.egg-info/
|
|
22
|
+
.installed.cfg
|
|
23
|
+
*.egg
|
|
24
|
+
MANIFEST
|
|
25
|
+
|
|
26
|
+
# Virtual Environments
|
|
27
|
+
.venv/
|
|
28
|
+
venv/
|
|
29
|
+
env/
|
|
30
|
+
ENV/
|
|
31
|
+
env.bak/
|
|
32
|
+
venv.bak/
|
|
33
|
+
|
|
34
|
+
# IDEs
|
|
35
|
+
.vscode/
|
|
36
|
+
.idea/
|
|
37
|
+
*.swp
|
|
38
|
+
*.swo
|
|
39
|
+
*~
|
|
40
|
+
.project
|
|
41
|
+
.pydevproject
|
|
42
|
+
.settings/
|
|
43
|
+
|
|
44
|
+
# Jupyter Notebook
|
|
45
|
+
.ipynb_checkpoints
|
|
46
|
+
|
|
47
|
+
# macOS
|
|
48
|
+
.DS_Store
|
|
49
|
+
.AppleDouble
|
|
50
|
+
.LSOverride
|
|
51
|
+
|
|
52
|
+
# Database
|
|
53
|
+
chroma_db/
|
|
54
|
+
*.db
|
|
55
|
+
*.sqlite
|
|
56
|
+
*.sqlite3
|
|
57
|
+
|
|
58
|
+
# Graph files
|
|
59
|
+
*.gpickle
|
|
60
|
+
|
|
61
|
+
# Data
|
|
62
|
+
data/
|
|
63
|
+
|
|
64
|
+
# Environment variables
|
|
65
|
+
.env
|
|
66
|
+
.env.local
|
|
67
|
+
|
|
68
|
+
# Logs
|
|
69
|
+
*.log
|
|
70
|
+
logs/
|
|
71
|
+
|
|
72
|
+
# Testing
|
|
73
|
+
.pytest_cache/
|
|
74
|
+
.coverage
|
|
75
|
+
htmlcov/
|
|
76
|
+
.tox/
|
|
77
|
+
|
comemo-1.0.0/PKG-INFO
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: comemo
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Python SDK for CoMemo
|
|
5
|
+
Project-URL: Homepage, https://github.com/hasala/cognitive-memory-sdk
|
|
6
|
+
Project-URL: Documentation, https://github.com/hasala/cognitive-memory-sdk#readme
|
|
7
|
+
Project-URL: Issues, https://github.com/hasala/cognitive-memory-sdk/issues
|
|
8
|
+
Author: Hasala
|
|
9
|
+
License-Expression: MIT
|
|
10
|
+
Keywords: ai,cognitive,memory,sdk
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
20
|
+
Requires-Python: >=3.10
|
|
21
|
+
Requires-Dist: httpx>=0.24.0
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
|
|
24
|
+
# CoMemo SDK
|
|
25
|
+
|
|
26
|
+
Python SDK for [CoMemo](https://github.com/hasala/cognitive-memory) — a multi-module hybrid memory system powered by Pinecone and Neo4j.
|
|
27
|
+
|
|
28
|
+
## Installation
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
pip install comemo
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Quick Start
|
|
35
|
+
|
|
36
|
+
```python
|
|
37
|
+
from comemo import MemoryClient
|
|
38
|
+
|
|
39
|
+
# Default (uses server-side LLM config)
|
|
40
|
+
client = MemoryClient()
|
|
41
|
+
|
|
42
|
+
# With your own LLM API key and model
|
|
43
|
+
client = MemoryClient(
|
|
44
|
+
api_key="sk-your-openai-key",
|
|
45
|
+
model="gpt-4o-mini"
|
|
46
|
+
)
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
### Core Memory
|
|
50
|
+
|
|
51
|
+
```python
|
|
52
|
+
# Add a memory
|
|
53
|
+
result = client.add_memory("john", "chat_01", "I work at Google as a software engineer")
|
|
54
|
+
print(result.status) # "success"
|
|
55
|
+
print(result.action) # "NEW"
|
|
56
|
+
print(result.memory_id) # "mem_abc123"
|
|
57
|
+
|
|
58
|
+
# Clear all user memories
|
|
59
|
+
client.delete_user_memories("john")
|
|
60
|
+
|
|
61
|
+
# Clear session memories
|
|
62
|
+
client.delete_session_memories("john", "chat_01")
|
|
63
|
+
|
|
64
|
+
# Delete a single memory
|
|
65
|
+
client.delete_memory("mem_abc123")
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### Retrieval
|
|
69
|
+
|
|
70
|
+
```python
|
|
71
|
+
# Retrieve memories with graph expansion
|
|
72
|
+
result = client.retrieve_advanced(
|
|
73
|
+
user_id="john",
|
|
74
|
+
session_id="chat_01",
|
|
75
|
+
query="career and hobbies",
|
|
76
|
+
top_k=10,
|
|
77
|
+
expand_context=True,
|
|
78
|
+
expand_graph=True,
|
|
79
|
+
min_score=0.3,
|
|
80
|
+
reinforce=True
|
|
81
|
+
)
|
|
82
|
+
for m in result.memories:
|
|
83
|
+
print(f"{m.fact} | semantic={m.semantic_similarity:.2f} graph={m.graph_relevance:.2f}")
|
|
84
|
+
|
|
85
|
+
# List user memories
|
|
86
|
+
result = client.list_memories("john", query="work", top_k=10)
|
|
87
|
+
for m in result.memories:
|
|
88
|
+
print(f"{m.fact} (score: {m.score:.2f})")
|
|
89
|
+
|
|
90
|
+
# Simple retrieve
|
|
91
|
+
result = client.retrieve("john", "chat_01", "Where does John work?", top_k=5)
|
|
92
|
+
for m in result.memories:
|
|
93
|
+
print(f"{m.fact} (score: {m.score:.2f})")
|
|
94
|
+
|
|
95
|
+
# Retrieve with summary
|
|
96
|
+
summary = client.retrieve_summary("john", "chat_01", "Tell me about John's career")
|
|
97
|
+
print(summary.summary)
|
|
98
|
+
for m in summary.supporting_memories:
|
|
99
|
+
print(f" - {m.fact}")
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### Maintenance & Governance
|
|
103
|
+
|
|
104
|
+
```python
|
|
105
|
+
# Run all maintenance tasks
|
|
106
|
+
result = client.run_maintenance(user_id="john")
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
### System
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
# Clear all memory
|
|
113
|
+
client.delete_all_memories(confirm="DELETE_ALL_DATA")
|
|
114
|
+
|
|
115
|
+
# Get scheduler status
|
|
116
|
+
status = client.scheduler_status()
|
|
117
|
+
|
|
118
|
+
# Health check
|
|
119
|
+
health = client.health()
|
|
120
|
+
print(health.status) # "healthy"
|
|
121
|
+
print(health.version) # "5.1.0"
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
## Configuration
|
|
125
|
+
|
|
126
|
+
```python
|
|
127
|
+
# With LLM API key and model
|
|
128
|
+
client = MemoryClient(api_key="sk-your-openai-key", model="gpt-4o-mini")
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
## Context Manager
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
with MemoryClient() as client:
|
|
135
|
+
client.add_memory("john", "chat_01", "My favorite color is blue")
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
## Error Handling
|
|
139
|
+
|
|
140
|
+
```python
|
|
141
|
+
from comemo import MemoryClient, ValidationError, NotFoundError
|
|
142
|
+
|
|
143
|
+
try:
|
|
144
|
+
client.retrieve("john", "chat_01", "")
|
|
145
|
+
except ValidationError as e:
|
|
146
|
+
print(f"Bad request: {e.message}")
|
|
147
|
+
except NotFoundError as e:
|
|
148
|
+
print(f"Not found: {e.message}")
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
## All Methods
|
|
152
|
+
|
|
153
|
+
### Core Memory
|
|
154
|
+
|
|
155
|
+
| Method | Description |
|
|
156
|
+
|---|---|
|
|
157
|
+
| `add_memory(user_id, session_id, text)` | Add Memory |
|
|
158
|
+
| `delete_user_memories(user_id)` | Clear User Memory |
|
|
159
|
+
| `delete_session_memories(user_id, session_id)` | Clear Session Memory |
|
|
160
|
+
| `delete_memory(memory_id)` | Delete Single Memory |
|
|
161
|
+
|
|
162
|
+
### Retrieval
|
|
163
|
+
|
|
164
|
+
| Method | Description |
|
|
165
|
+
|---|---|
|
|
166
|
+
| `retrieve_advanced(user_id, session_id, query, ...)` | Retrieve Memories |
|
|
167
|
+
| `list_memories(user_id, query, top_k=10)` | List User Memories |
|
|
168
|
+
| `retrieve(user_id, session_id, query, top_k=5)` | Simple Retrieve Memories |
|
|
169
|
+
| `retrieve_summary(user_id, session_id, query, top_k=5)` | Retrieve With Summary |
|
|
170
|
+
|
|
171
|
+
### Maintenance & Governance
|
|
172
|
+
|
|
173
|
+
| Method | Description |
|
|
174
|
+
|---|---|
|
|
175
|
+
| `run_maintenance(user_id="")` | Run All Maintenance |
|
|
176
|
+
|
|
177
|
+
### System
|
|
178
|
+
|
|
179
|
+
| Method | Description |
|
|
180
|
+
|---|---|
|
|
181
|
+
| `delete_all_memories(confirm)` | Clear All Memory |
|
|
182
|
+
| `scheduler_status()` | Get Scheduler Status |
|
|
183
|
+
| `health()` | Health Check |
|
|
184
|
+
|
|
185
|
+
## License
|
|
186
|
+
|
|
187
|
+
MIT
|
comemo-1.0.0/README.md
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
# CoMemo SDK
|
|
2
|
+
|
|
3
|
+
Python SDK for [CoMemo](https://github.com/hasala/cognitive-memory) — a multi-module hybrid memory system powered by Pinecone and Neo4j.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install comemo
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from comemo import MemoryClient
|
|
15
|
+
|
|
16
|
+
# Default (uses server-side LLM config)
|
|
17
|
+
client = MemoryClient()
|
|
18
|
+
|
|
19
|
+
# With your own LLM API key and model
|
|
20
|
+
client = MemoryClient(
|
|
21
|
+
api_key="sk-your-openai-key",
|
|
22
|
+
model="gpt-4o-mini"
|
|
23
|
+
)
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
### Core Memory
|
|
27
|
+
|
|
28
|
+
```python
|
|
29
|
+
# Add a memory
|
|
30
|
+
result = client.add_memory("john", "chat_01", "I work at Google as a software engineer")
|
|
31
|
+
print(result.status) # "success"
|
|
32
|
+
print(result.action) # "NEW"
|
|
33
|
+
print(result.memory_id) # "mem_abc123"
|
|
34
|
+
|
|
35
|
+
# Clear all user memories
|
|
36
|
+
client.delete_user_memories("john")
|
|
37
|
+
|
|
38
|
+
# Clear session memories
|
|
39
|
+
client.delete_session_memories("john", "chat_01")
|
|
40
|
+
|
|
41
|
+
# Delete a single memory
|
|
42
|
+
client.delete_memory("mem_abc123")
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
### Retrieval
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
# Retrieve memories with graph expansion
|
|
49
|
+
result = client.retrieve_advanced(
|
|
50
|
+
user_id="john",
|
|
51
|
+
session_id="chat_01",
|
|
52
|
+
query="career and hobbies",
|
|
53
|
+
top_k=10,
|
|
54
|
+
expand_context=True,
|
|
55
|
+
expand_graph=True,
|
|
56
|
+
min_score=0.3,
|
|
57
|
+
reinforce=True
|
|
58
|
+
)
|
|
59
|
+
for m in result.memories:
|
|
60
|
+
print(f"{m.fact} | semantic={m.semantic_similarity:.2f} graph={m.graph_relevance:.2f}")
|
|
61
|
+
|
|
62
|
+
# List user memories
|
|
63
|
+
result = client.list_memories("john", query="work", top_k=10)
|
|
64
|
+
for m in result.memories:
|
|
65
|
+
print(f"{m.fact} (score: {m.score:.2f})")
|
|
66
|
+
|
|
67
|
+
# Simple retrieve
|
|
68
|
+
result = client.retrieve("john", "chat_01", "Where does John work?", top_k=5)
|
|
69
|
+
for m in result.memories:
|
|
70
|
+
print(f"{m.fact} (score: {m.score:.2f})")
|
|
71
|
+
|
|
72
|
+
# Retrieve with summary
|
|
73
|
+
summary = client.retrieve_summary("john", "chat_01", "Tell me about John's career")
|
|
74
|
+
print(summary.summary)
|
|
75
|
+
for m in summary.supporting_memories:
|
|
76
|
+
print(f" - {m.fact}")
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### Maintenance & Governance
|
|
80
|
+
|
|
81
|
+
```python
|
|
82
|
+
# Run all maintenance tasks
|
|
83
|
+
result = client.run_maintenance(user_id="john")
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
### System
|
|
87
|
+
|
|
88
|
+
```python
|
|
89
|
+
# Clear all memory
|
|
90
|
+
client.delete_all_memories(confirm="DELETE_ALL_DATA")
|
|
91
|
+
|
|
92
|
+
# Get scheduler status
|
|
93
|
+
status = client.scheduler_status()
|
|
94
|
+
|
|
95
|
+
# Health check
|
|
96
|
+
health = client.health()
|
|
97
|
+
print(health.status) # "healthy"
|
|
98
|
+
print(health.version) # "5.1.0"
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
## Configuration
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
# With LLM API key and model
|
|
105
|
+
client = MemoryClient(api_key="sk-your-openai-key", model="gpt-4o-mini")
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
## Context Manager
|
|
109
|
+
|
|
110
|
+
```python
|
|
111
|
+
with MemoryClient() as client:
|
|
112
|
+
client.add_memory("john", "chat_01", "My favorite color is blue")
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
## Error Handling
|
|
116
|
+
|
|
117
|
+
```python
|
|
118
|
+
from comemo import MemoryClient, ValidationError, NotFoundError
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
client.retrieve("john", "chat_01", "")
|
|
122
|
+
except ValidationError as e:
|
|
123
|
+
print(f"Bad request: {e.message}")
|
|
124
|
+
except NotFoundError as e:
|
|
125
|
+
print(f"Not found: {e.message}")
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
## All Methods
|
|
129
|
+
|
|
130
|
+
### Core Memory
|
|
131
|
+
|
|
132
|
+
| Method | Description |
|
|
133
|
+
|---|---|
|
|
134
|
+
| `add_memory(user_id, session_id, text)` | Add Memory |
|
|
135
|
+
| `delete_user_memories(user_id)` | Clear User Memory |
|
|
136
|
+
| `delete_session_memories(user_id, session_id)` | Clear Session Memory |
|
|
137
|
+
| `delete_memory(memory_id)` | Delete Single Memory |
|
|
138
|
+
|
|
139
|
+
### Retrieval
|
|
140
|
+
|
|
141
|
+
| Method | Description |
|
|
142
|
+
|---|---|
|
|
143
|
+
| `retrieve_advanced(user_id, session_id, query, ...)` | Retrieve Memories |
|
|
144
|
+
| `list_memories(user_id, query, top_k=10)` | List User Memories |
|
|
145
|
+
| `retrieve(user_id, session_id, query, top_k=5)` | Simple Retrieve Memories |
|
|
146
|
+
| `retrieve_summary(user_id, session_id, query, top_k=5)` | Retrieve With Summary |
|
|
147
|
+
|
|
148
|
+
### Maintenance & Governance
|
|
149
|
+
|
|
150
|
+
| Method | Description |
|
|
151
|
+
|---|---|
|
|
152
|
+
| `run_maintenance(user_id="")` | Run All Maintenance |
|
|
153
|
+
|
|
154
|
+
### System
|
|
155
|
+
|
|
156
|
+
| Method | Description |
|
|
157
|
+
|---|---|
|
|
158
|
+
| `delete_all_memories(confirm)` | Clear All Memory |
|
|
159
|
+
| `scheduler_status()` | Get Scheduler Status |
|
|
160
|
+
| `health()` | Health Check |
|
|
161
|
+
|
|
162
|
+
## License
|
|
163
|
+
|
|
164
|
+
MIT
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""CoMemo SDK - Python client for the CoMemo API."""
|
|
2
|
+
|
|
3
|
+
from .client import MemoryClient
|
|
4
|
+
from .exceptions import (
|
|
5
|
+
AuthenticationError,
|
|
6
|
+
ComemoError,
|
|
7
|
+
NotFoundError,
|
|
8
|
+
ServerError,
|
|
9
|
+
ValidationError,
|
|
10
|
+
)
|
|
11
|
+
from .models import (
|
|
12
|
+
AddMemoryResult,
|
|
13
|
+
DeleteResult,
|
|
14
|
+
DetailedMemory,
|
|
15
|
+
DetailedRetrieveResult,
|
|
16
|
+
HealthResult,
|
|
17
|
+
Memory,
|
|
18
|
+
RetrieveResult,
|
|
19
|
+
RetrieveSummaryResult,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
__version__ = "1.0.0"
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
"MemoryClient",
|
|
26
|
+
"AddMemoryResult",
|
|
27
|
+
"DeleteResult",
|
|
28
|
+
"DetailedMemory",
|
|
29
|
+
"DetailedRetrieveResult",
|
|
30
|
+
"HealthResult",
|
|
31
|
+
"Memory",
|
|
32
|
+
"RetrieveResult",
|
|
33
|
+
"RetrieveSummaryResult",
|
|
34
|
+
"ComemoError",
|
|
35
|
+
"AuthenticationError",
|
|
36
|
+
"NotFoundError",
|
|
37
|
+
"ServerError",
|
|
38
|
+
"ValidationError",
|
|
39
|
+
]
|
|
@@ -0,0 +1,383 @@
|
|
|
1
|
+
"""CoMemo SDK client."""
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
|
|
5
|
+
from .exceptions import (
|
|
6
|
+
AuthenticationError,
|
|
7
|
+
ComemoError,
|
|
8
|
+
NotFoundError,
|
|
9
|
+
ServerError,
|
|
10
|
+
ValidationError,
|
|
11
|
+
)
|
|
12
|
+
from .models import (
|
|
13
|
+
AddMemoryResult,
|
|
14
|
+
DeleteResult,
|
|
15
|
+
DetailedMemory,
|
|
16
|
+
DetailedRetrieveResult,
|
|
17
|
+
HealthResult,
|
|
18
|
+
Memory,
|
|
19
|
+
RetrieveResult,
|
|
20
|
+
RetrieveSummaryResult,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MemoryClient:
|
|
25
|
+
"""Client for the CoMemo API.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
base_url: Base URL of the CoMemo API. Defaults to "https://xvert.io".
|
|
29
|
+
api_key: Optional LLM API key (e.g. OpenAI key). Sent with requests that use LLM.
|
|
30
|
+
model: Optional LLM model name (e.g. "gpt-4o-mini"). Sent with requests that use LLM.
|
|
31
|
+
timeout: Request timeout in seconds. Defaults to 30.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
base_url: str = "https://xvert.io",
|
|
37
|
+
api_key: str | None = None,
|
|
38
|
+
model: str | None = None,
|
|
39
|
+
timeout: float = 30,
|
|
40
|
+
):
|
|
41
|
+
self.base_url = base_url.rstrip("/")
|
|
42
|
+
self.api_key = api_key
|
|
43
|
+
self.model = model
|
|
44
|
+
self.timeout = timeout
|
|
45
|
+
|
|
46
|
+
self._client = httpx.Client(
|
|
47
|
+
base_url=self.base_url,
|
|
48
|
+
headers={"Content-Type": "application/json"},
|
|
49
|
+
timeout=timeout,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
def _llm_fields(self) -> dict:
|
|
53
|
+
"""Return api_key/model fields to include in request bodies."""
|
|
54
|
+
fields = {}
|
|
55
|
+
if self.api_key:
|
|
56
|
+
fields["api_key"] = self.api_key
|
|
57
|
+
if self.model:
|
|
58
|
+
fields["model"] = self.model
|
|
59
|
+
return fields
|
|
60
|
+
|
|
61
|
+
def _handle_response(self, response: httpx.Response) -> dict:
|
|
62
|
+
"""Parse response and raise appropriate exceptions on errors."""
|
|
63
|
+
if response.status_code >= 400:
|
|
64
|
+
try:
|
|
65
|
+
body = response.json()
|
|
66
|
+
detail = body.get("detail", response.text)
|
|
67
|
+
except Exception:
|
|
68
|
+
detail = response.text
|
|
69
|
+
|
|
70
|
+
if response.status_code in (401, 403):
|
|
71
|
+
raise AuthenticationError(detail, response.status_code)
|
|
72
|
+
if response.status_code == 404:
|
|
73
|
+
raise NotFoundError(detail, response.status_code)
|
|
74
|
+
if response.status_code in (400, 422):
|
|
75
|
+
raise ValidationError(detail, response.status_code)
|
|
76
|
+
if response.status_code >= 500:
|
|
77
|
+
raise ServerError(detail, response.status_code)
|
|
78
|
+
raise ComemoError(detail, response.status_code)
|
|
79
|
+
|
|
80
|
+
return response.json()
|
|
81
|
+
|
|
82
|
+
# ── Core Memory Operations ──────────────────────────────────────────
|
|
83
|
+
|
|
84
|
+
def add_memory(self, user_id: str, session_id: str, text: str) -> AddMemoryResult:
|
|
85
|
+
"""Extract facts from text and store them as memories.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
user_id: User ID for memory isolation.
|
|
89
|
+
session_id: Session ID for context grouping.
|
|
90
|
+
text: Input text to extract facts from.
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
AddMemoryResult with status, action taken, memory_id, links created, and score.
|
|
94
|
+
"""
|
|
95
|
+
data = self._handle_response(
|
|
96
|
+
self._client.post("/memory", json={
|
|
97
|
+
"user_id": user_id,
|
|
98
|
+
"session_id": session_id,
|
|
99
|
+
"input": text,
|
|
100
|
+
**self._llm_fields(),
|
|
101
|
+
})
|
|
102
|
+
)
|
|
103
|
+
return AddMemoryResult(
|
|
104
|
+
status=data.get("status", ""),
|
|
105
|
+
action=data.get("action", ""),
|
|
106
|
+
memory_id=data.get("memory_id"),
|
|
107
|
+
links_created=data.get("links_created", 0),
|
|
108
|
+
score=data.get("score", 0.0),
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
def delete_memory(self, memory_id: str) -> dict:
|
|
112
|
+
"""Delete a single memory by ID.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
memory_id: The memory identifier to delete.
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
Dict with status, memory_id, and deleted flag.
|
|
119
|
+
"""
|
|
120
|
+
return self._handle_response(
|
|
121
|
+
self._client.delete(f"/memory/{memory_id}")
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
def delete_user_memories(self, user_id: str) -> DeleteResult:
|
|
125
|
+
"""Delete all memories for a user.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
user_id: The user whose memories to delete.
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
DeleteResult with count of deleted memories.
|
|
132
|
+
"""
|
|
133
|
+
data = self._handle_response(
|
|
134
|
+
self._client.delete("/memory", params={"user_id": user_id})
|
|
135
|
+
)
|
|
136
|
+
return DeleteResult(
|
|
137
|
+
status=data.get("status", ""),
|
|
138
|
+
user_id=data.get("user_id", user_id),
|
|
139
|
+
memories_deleted=data.get("memories_deleted", 0),
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
def delete_session_memories(self, user_id: str, session_id: str) -> DeleteResult:
|
|
143
|
+
"""Delete all memories for a specific session.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
user_id: The user ID.
|
|
147
|
+
session_id: The session whose memories to delete.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
DeleteResult with count of deleted memories.
|
|
151
|
+
"""
|
|
152
|
+
data = self._handle_response(
|
|
153
|
+
self._client.delete(f"/memory/session/{session_id}", params={"user_id": user_id})
|
|
154
|
+
)
|
|
155
|
+
return DeleteResult(
|
|
156
|
+
status=data.get("status", ""),
|
|
157
|
+
user_id=data.get("user_id", user_id),
|
|
158
|
+
memories_deleted=data.get("memories_deleted", 0),
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# ── Retrieval ───────────────────────────────────────────────────────
|
|
162
|
+
|
|
163
|
+
def retrieve(self, user_id: str, session_id: str, query: str, top_k: int = 5) -> RetrieveResult:
|
|
164
|
+
"""Retrieve top memories matching a query (simple mode).
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
user_id: User ID for namespace isolation.
|
|
168
|
+
session_id: Current session ID.
|
|
169
|
+
query: Search query text.
|
|
170
|
+
top_k: Maximum number of results (1-50). Defaults to 5.
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
RetrieveResult with query, expanded query, and list of memories.
|
|
174
|
+
"""
|
|
175
|
+
data = self._handle_response(
|
|
176
|
+
self._client.post("/memories/retrieve", json={
|
|
177
|
+
"user_id": user_id,
|
|
178
|
+
"session_id": session_id,
|
|
179
|
+
"query": query,
|
|
180
|
+
"top_k": top_k,
|
|
181
|
+
**self._llm_fields(),
|
|
182
|
+
})
|
|
183
|
+
)
|
|
184
|
+
memories = [
|
|
185
|
+
Memory(memory_id=m["memory_id"], fact=m["fact"], score=m["score"])
|
|
186
|
+
for m in data.get("memories", [])
|
|
187
|
+
]
|
|
188
|
+
return RetrieveResult(
|
|
189
|
+
query=data.get("query", query),
|
|
190
|
+
expanded_query=data.get("expanded_query", ""),
|
|
191
|
+
memories=memories,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
def retrieve_advanced(
|
|
195
|
+
self,
|
|
196
|
+
user_id: str,
|
|
197
|
+
session_id: str,
|
|
198
|
+
query: str,
|
|
199
|
+
top_k: int = 10,
|
|
200
|
+
expand_context: bool = True,
|
|
201
|
+
expand_graph: bool = True,
|
|
202
|
+
min_score: float = 0.1,
|
|
203
|
+
reinforce: bool = False,
|
|
204
|
+
) -> DetailedRetrieveResult:
|
|
205
|
+
"""Retrieve memories with full scoring details and advanced options.
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
user_id: User ID for namespace isolation.
|
|
209
|
+
session_id: Current session ID.
|
|
210
|
+
query: Search query text.
|
|
211
|
+
top_k: Maximum number of results (1-50). Defaults to 10.
|
|
212
|
+
expand_context: Use session context for query expansion. Defaults to True.
|
|
213
|
+
expand_graph: Expand via Neo4j graph relationships. Defaults to True.
|
|
214
|
+
min_score: Minimum score threshold (0.0-1.0). Defaults to 0.1.
|
|
215
|
+
reinforce: Reinforce retrieved memories. Defaults to False.
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
DetailedRetrieveResult with full scoring breakdown per memory.
|
|
219
|
+
"""
|
|
220
|
+
data = self._handle_response(
|
|
221
|
+
self._client.post("/memory/retrieve", json={
|
|
222
|
+
"user_id": user_id,
|
|
223
|
+
"session_id": session_id,
|
|
224
|
+
"query": query,
|
|
225
|
+
"top_k": top_k,
|
|
226
|
+
"expand_context": expand_context,
|
|
227
|
+
"expand_graph": expand_graph,
|
|
228
|
+
"min_score": min_score,
|
|
229
|
+
"reinforce": reinforce,
|
|
230
|
+
**self._llm_fields(),
|
|
231
|
+
})
|
|
232
|
+
)
|
|
233
|
+
memories = [
|
|
234
|
+
DetailedMemory(
|
|
235
|
+
memory_id=m["memory_id"],
|
|
236
|
+
fact=m["fact"],
|
|
237
|
+
domain=m.get("domain", ""),
|
|
238
|
+
final_score=m.get("final_score", 0.0),
|
|
239
|
+
semantic_similarity=m.get("semantic_similarity", 0.0),
|
|
240
|
+
graph_relevance=m.get("graph_relevance", 0.0),
|
|
241
|
+
memory_strength=m.get("memory_strength", 0.0),
|
|
242
|
+
recency_score=m.get("recency_score", 0.0),
|
|
243
|
+
session_boost=m.get("session_boost", 0.0),
|
|
244
|
+
source=m.get("source", ""),
|
|
245
|
+
session_id=m.get("session_id", ""),
|
|
246
|
+
)
|
|
247
|
+
for m in data.get("memories", [])
|
|
248
|
+
]
|
|
249
|
+
return DetailedRetrieveResult(
|
|
250
|
+
query=data.get("query", query),
|
|
251
|
+
expanded_query=data.get("expanded_query", ""),
|
|
252
|
+
user_id=data.get("user_id", user_id),
|
|
253
|
+
session_id=data.get("session_id", session_id),
|
|
254
|
+
memories=memories,
|
|
255
|
+
total_candidates=data.get("total_candidates", 0),
|
|
256
|
+
filtered_count=data.get("filtered_count", 0),
|
|
257
|
+
retrieval_time_ms=data.get("retrieval_time_ms", 0.0),
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
def retrieve_summary(
|
|
261
|
+
self, user_id: str, session_id: str, query: str, top_k: int = 5
|
|
262
|
+
) -> RetrieveSummaryResult:
|
|
263
|
+
"""Retrieve memories and get an LLM-generated summary.
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
user_id: User ID for namespace isolation.
|
|
267
|
+
session_id: Current session ID.
|
|
268
|
+
query: Search query text.
|
|
269
|
+
top_k: Number of memories to summarize (1-20). Defaults to 5.
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
RetrieveSummaryResult with summary text and supporting memories.
|
|
273
|
+
"""
|
|
274
|
+
data = self._handle_response(
|
|
275
|
+
self._client.post("/memories/retrieve-summary", json={
|
|
276
|
+
"user_id": user_id,
|
|
277
|
+
"session_id": session_id,
|
|
278
|
+
"query": query,
|
|
279
|
+
"top_k": top_k,
|
|
280
|
+
**self._llm_fields(),
|
|
281
|
+
})
|
|
282
|
+
)
|
|
283
|
+
memories = [
|
|
284
|
+
Memory(memory_id=m["memory_id"], fact=m["fact"], score=m["score"])
|
|
285
|
+
for m in data.get("supporting_memories", [])
|
|
286
|
+
]
|
|
287
|
+
return RetrieveSummaryResult(
|
|
288
|
+
query=data.get("query", query),
|
|
289
|
+
summary=data.get("summary", ""),
|
|
290
|
+
supporting_memories=memories,
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
def list_memories(self, user_id: str, query: str, top_k: int = 10) -> RetrieveResult:
|
|
294
|
+
"""List user memories across all sessions.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
user_id: User ID.
|
|
298
|
+
query: Search query text.
|
|
299
|
+
top_k: Maximum number of results (1-50). Defaults to 10.
|
|
300
|
+
|
|
301
|
+
Returns:
|
|
302
|
+
RetrieveResult with matching memories across all sessions.
|
|
303
|
+
"""
|
|
304
|
+
data = self._handle_response(
|
|
305
|
+
self._client.get("/memories", params={
|
|
306
|
+
"user_id": user_id,
|
|
307
|
+
"query": query,
|
|
308
|
+
"top_k": top_k,
|
|
309
|
+
})
|
|
310
|
+
)
|
|
311
|
+
memories = [
|
|
312
|
+
Memory(memory_id=m["memory_id"], fact=m["fact"], score=m["score"])
|
|
313
|
+
for m in data.get("memories", [])
|
|
314
|
+
]
|
|
315
|
+
return RetrieveResult(
|
|
316
|
+
query=data.get("query", query),
|
|
317
|
+
expanded_query=data.get("expanded_query", ""),
|
|
318
|
+
memories=memories,
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
# ── Maintenance & Governance ────────────────────────────────────────
|
|
322
|
+
|
|
323
|
+
def run_maintenance(self, user_id: str = "") -> dict:
|
|
324
|
+
"""Manually trigger all maintenance tasks (decay, forgetting, summarization).
|
|
325
|
+
|
|
326
|
+
Args:
|
|
327
|
+
user_id: Optional user ID to scope maintenance. Empty string for all users.
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
Dict with job results for each maintenance task.
|
|
331
|
+
"""
|
|
332
|
+
return self._handle_response(
|
|
333
|
+
self._client.post("/memory/governance/run-all", params={"user_id": user_id})
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
# ── System ──────────────────────────────────────────────────────────
|
|
337
|
+
|
|
338
|
+
def delete_all_memories(self, confirm: str = "DELETE_ALL_DATA") -> dict:
|
|
339
|
+
"""Delete ALL memories from ALL users. Destructive and irreversible.
|
|
340
|
+
|
|
341
|
+
Args:
|
|
342
|
+
confirm: Must be exactly "DELETE_ALL_DATA" to confirm.
|
|
343
|
+
|
|
344
|
+
Returns:
|
|
345
|
+
Dict with status, message, and count of deleted memories.
|
|
346
|
+
"""
|
|
347
|
+
return self._handle_response(
|
|
348
|
+
self._client.delete("/memory/all", params={"confirm": confirm})
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
def scheduler_status(self) -> dict:
|
|
352
|
+
"""Get current status of the maintenance scheduler.
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
Dict with registered jobs, next run times, and recent history.
|
|
356
|
+
"""
|
|
357
|
+
return self._handle_response(self._client.get("/scheduler/status"))
|
|
358
|
+
|
|
359
|
+
def health(self) -> HealthResult:
|
|
360
|
+
"""Check API health and status.
|
|
361
|
+
|
|
362
|
+
Returns:
|
|
363
|
+
HealthResult with status, version, architecture, and scheduler info.
|
|
364
|
+
"""
|
|
365
|
+
data = self._handle_response(self._client.get("/health"))
|
|
366
|
+
return HealthResult(
|
|
367
|
+
status=data.get("status", ""),
|
|
368
|
+
version=data.get("version", ""),
|
|
369
|
+
architecture=data.get("architecture", ""),
|
|
370
|
+
scheduler=data.get("scheduler", {}),
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
# ── Context Manager ─────────────────────────────────────────────────
|
|
374
|
+
|
|
375
|
+
def close(self):
|
|
376
|
+
"""Close the underlying HTTP client."""
|
|
377
|
+
self._client.close()
|
|
378
|
+
|
|
379
|
+
def __enter__(self):
|
|
380
|
+
return self
|
|
381
|
+
|
|
382
|
+
def __exit__(self, *args):
|
|
383
|
+
self.close()
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Exceptions for the CoMemo SDK."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ComemoError(Exception):
|
|
5
|
+
"""Base exception for all SDK errors."""
|
|
6
|
+
|
|
7
|
+
def __init__(self, message: str, status_code: int | None = None, response: dict | None = None):
|
|
8
|
+
self.message = message
|
|
9
|
+
self.status_code = status_code
|
|
10
|
+
self.response = response
|
|
11
|
+
super().__init__(self.message)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class AuthenticationError(ComemoError):
|
|
15
|
+
"""Raised when authentication fails (401/403)."""
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class NotFoundError(ComemoError):
|
|
19
|
+
"""Raised when a resource is not found (404)."""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ValidationError(ComemoError):
|
|
23
|
+
"""Raised when the request is invalid (400/422)."""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ServerError(ComemoError):
|
|
27
|
+
"""Raised when the server returns a 5xx error."""
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""Response models for the CoMemo SDK."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass
|
|
7
|
+
class AddMemoryResult:
|
|
8
|
+
"""Result of adding a memory."""
|
|
9
|
+
status: str
|
|
10
|
+
action: str
|
|
11
|
+
memory_id: str | None
|
|
12
|
+
links_created: int
|
|
13
|
+
score: float
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class Memory:
|
|
18
|
+
"""A retrieved memory."""
|
|
19
|
+
memory_id: str
|
|
20
|
+
fact: str
|
|
21
|
+
score: float
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class DetailedMemory:
|
|
26
|
+
"""A retrieved memory with detailed scoring breakdown."""
|
|
27
|
+
memory_id: str
|
|
28
|
+
fact: str
|
|
29
|
+
domain: str
|
|
30
|
+
final_score: float
|
|
31
|
+
semantic_similarity: float
|
|
32
|
+
graph_relevance: float
|
|
33
|
+
memory_strength: float
|
|
34
|
+
recency_score: float
|
|
35
|
+
session_boost: float
|
|
36
|
+
source: str
|
|
37
|
+
session_id: str
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class RetrieveResult:
|
|
42
|
+
"""Result of a simple retrieval."""
|
|
43
|
+
query: str
|
|
44
|
+
expanded_query: str
|
|
45
|
+
memories: list[Memory]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class DetailedRetrieveResult:
|
|
50
|
+
"""Result of an advanced retrieval with full scoring details."""
|
|
51
|
+
query: str
|
|
52
|
+
expanded_query: str
|
|
53
|
+
user_id: str
|
|
54
|
+
session_id: str
|
|
55
|
+
memories: list[DetailedMemory]
|
|
56
|
+
total_candidates: int
|
|
57
|
+
filtered_count: int
|
|
58
|
+
retrieval_time_ms: float
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@dataclass
|
|
62
|
+
class RetrieveSummaryResult:
|
|
63
|
+
"""Result of a retrieval with LLM summary."""
|
|
64
|
+
query: str
|
|
65
|
+
summary: str
|
|
66
|
+
supporting_memories: list[Memory]
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@dataclass
|
|
70
|
+
class DeleteResult:
|
|
71
|
+
"""Result of a delete operation."""
|
|
72
|
+
status: str
|
|
73
|
+
user_id: str
|
|
74
|
+
memories_deleted: int
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@dataclass
|
|
78
|
+
class HealthResult:
|
|
79
|
+
"""Health check result."""
|
|
80
|
+
status: str
|
|
81
|
+
version: str
|
|
82
|
+
architecture: str
|
|
83
|
+
scheduler: dict
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "comemo"
|
|
7
|
+
version = "1.0.0"
|
|
8
|
+
description = "Python SDK for CoMemo"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
requires-python = ">=3.10"
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Hasala" },
|
|
14
|
+
]
|
|
15
|
+
keywords = ["memory", "cognitive", "ai", "sdk"]
|
|
16
|
+
classifiers = [
|
|
17
|
+
"Development Status :: 4 - Beta",
|
|
18
|
+
"Intended Audience :: Developers",
|
|
19
|
+
"License :: OSI Approved :: MIT License",
|
|
20
|
+
"Programming Language :: Python :: 3",
|
|
21
|
+
"Programming Language :: Python :: 3.10",
|
|
22
|
+
"Programming Language :: Python :: 3.11",
|
|
23
|
+
"Programming Language :: Python :: 3.12",
|
|
24
|
+
"Programming Language :: Python :: 3.13",
|
|
25
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
26
|
+
]
|
|
27
|
+
dependencies = [
|
|
28
|
+
"httpx>=0.24.0",
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
[tool.hatch.build.targets.wheel]
|
|
32
|
+
packages = ["comemo"]
|
|
33
|
+
|
|
34
|
+
[project.urls]
|
|
35
|
+
Homepage = "https://github.com/hasala/cognitive-memory-sdk"
|
|
36
|
+
Documentation = "https://github.com/hasala/cognitive-memory-sdk#readme"
|
|
37
|
+
Issues = "https://github.com/hasala/cognitive-memory-sdk/issues"
|