kore-memory 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kore_memory-0.2.0/.gitignore +42 -0
- kore_memory-0.2.0/LICENSE +21 -0
- kore_memory-0.2.0/PKG-INFO +196 -0
- kore_memory-0.2.0/README.md +166 -0
- kore_memory-0.2.0/pyproject.toml +51 -0
- kore_memory-0.2.0/requirements.txt +5 -0
- kore_memory-0.2.0/scripts/import_memory.py +135 -0
- kore_memory-0.2.0/src/__init__.py +1 -0
- kore_memory-0.2.0/src/auth.py +107 -0
- kore_memory-0.2.0/src/cli.py +37 -0
- kore_memory-0.2.0/src/compressor.py +146 -0
- kore_memory-0.2.0/src/database.py +84 -0
- kore_memory-0.2.0/src/decay.py +69 -0
- kore_memory-0.2.0/src/embedder.py +50 -0
- kore_memory-0.2.0/src/main.py +138 -0
- kore_memory-0.2.0/src/models.py +68 -0
- kore_memory-0.2.0/src/repository.py +269 -0
- kore_memory-0.2.0/src/scorer.py +70 -0
- kore_memory-0.2.0/start.sh +28 -0
- kore_memory-0.2.0/tests/__init__.py +0 -0
- kore_memory-0.2.0/tests/test_api.py +147 -0
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Python
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*.pyo
|
|
5
|
+
*.pyd
|
|
6
|
+
.Python
|
|
7
|
+
*.egg-info/
|
|
8
|
+
dist/
|
|
9
|
+
build/
|
|
10
|
+
.eggs/
|
|
11
|
+
|
|
12
|
+
# Virtual environment
|
|
13
|
+
.venv/
|
|
14
|
+
venv/
|
|
15
|
+
env/
|
|
16
|
+
|
|
17
|
+
# Data & secrets — NEVER commit
|
|
18
|
+
data/
|
|
19
|
+
logs/
|
|
20
|
+
*.db
|
|
21
|
+
*.db-shm
|
|
22
|
+
*.db-wal
|
|
23
|
+
.api_key
|
|
24
|
+
*.key
|
|
25
|
+
*.secret
|
|
26
|
+
.env
|
|
27
|
+
.env.*
|
|
28
|
+
|
|
29
|
+
# IDE
|
|
30
|
+
.vscode/
|
|
31
|
+
.idea/
|
|
32
|
+
*.swp
|
|
33
|
+
*.swo
|
|
34
|
+
|
|
35
|
+
# Testing
|
|
36
|
+
.pytest_cache/
|
|
37
|
+
.coverage
|
|
38
|
+
htmlcov/
|
|
39
|
+
|
|
40
|
+
# OS
|
|
41
|
+
.DS_Store
|
|
42
|
+
Thumbs.db
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Juan Auriti
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: kore-memory
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: The memory layer that thinks like a human: remembers what matters, forgets what doesn't, and never calls home.
|
|
5
|
+
Project-URL: Homepage, https://github.com/juanauriti/kore-memory
|
|
6
|
+
Project-URL: Repository, https://github.com/juanauriti/kore-memory
|
|
7
|
+
Project-URL: Issues, https://github.com/juanauriti/kore-memory/issues
|
|
8
|
+
License: MIT
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Keywords: agents,ai,embeddings,llm,memory,rag,semantic-search
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
18
|
+
Requires-Python: >=3.11
|
|
19
|
+
Requires-Dist: fastapi>=0.115.0
|
|
20
|
+
Requires-Dist: httpx>=0.27.0
|
|
21
|
+
Requires-Dist: pydantic>=2.7.0
|
|
22
|
+
Requires-Dist: uvicorn[standard]>=0.30.0
|
|
23
|
+
Provides-Extra: dev
|
|
24
|
+
Requires-Dist: httpx>=0.27.0; extra == 'dev'
|
|
25
|
+
Requires-Dist: pytest-asyncio>=0.23.0; extra == 'dev'
|
|
26
|
+
Requires-Dist: pytest>=8.0.0; extra == 'dev'
|
|
27
|
+
Provides-Extra: semantic
|
|
28
|
+
Requires-Dist: sentence-transformers>=3.0.0; extra == 'semantic'
|
|
29
|
+
Description-Content-Type: text/markdown
|
|
30
|
+
|
|
31
|
+
# Kore Memory
|
|
32
|
+
|
|
33
|
+
> **The memory layer that thinks like a human: remembers what matters, forgets what doesn't, and never calls home.**
|
|
34
|
+
|
|
35
|
+
[](LICENSE)
|
|
36
|
+
[](https://python.org)
|
|
37
|
+
[]()
|
|
38
|
+
|
|
39
|
+
---
|
|
40
|
+
|
|
41
|
+
## Why Kore?
|
|
42
|
+
|
|
43
|
+
Every AI agent memory tool out there has the same problem: they remember everything forever, require cloud APIs, or need an LLM just to decide what's worth storing.
|
|
44
|
+
|
|
45
|
+
**Kore is different.**
|
|
46
|
+
|
|
47
|
+
| Feature | Kore | Mem0 | Letta | Memori |
|
|
48
|
+
|---|---|---|---|---|
|
|
49
|
+
| Runs fully offline | ✅ | ❌ | ❌ | ❌ |
|
|
50
|
+
| No LLM required | ✅ | ❌ | ❌ | ✅ |
|
|
51
|
+
| Memory Decay (Ebbinghaus) | ✅ | ❌ | ❌ | ❌ |
|
|
52
|
+
| Auto-importance scoring | ✅ local | ✅ via LLM | ❌ | ❌ |
|
|
53
|
+
| Memory Compression | ✅ | ❌ | ❌ | ❌ |
|
|
54
|
+
| Semantic search (50+ langs) | ✅ local | ✅ via API | ✅ | ✅ |
|
|
55
|
+
| Timeline API | ✅ | ❌ | ❌ | ❌ |
|
|
56
|
+
| Access reinforcement | ✅ | ❌ | ❌ | ❌ |
|
|
57
|
+
| Install in 2 minutes | ✅ | ❌ | ❌ | ❌ |
|
|
58
|
+
|
|
59
|
+
---
|
|
60
|
+
|
|
61
|
+
## How It Works
|
|
62
|
+
|
|
63
|
+
Kore models memory the way the human brain does:
|
|
64
|
+
|
|
65
|
+
1. **Save** — store a memory with optional category and importance
|
|
66
|
+
2. **Auto-score** — Kore calculates importance locally using content analysis (no API calls)
|
|
67
|
+
3. **Decay** — memories fade over time using the [Ebbinghaus forgetting curve](https://en.wikipedia.org/wiki/Forgetting_curve)
|
|
68
|
+
4. **Reinforce** — retrieving a memory resets its clock and boosts its score
|
|
69
|
+
5. **Compress** — similar memories are automatically merged to keep the DB lean
|
|
70
|
+
6. **Search** — semantic search in any language, filtered by relevance and freshness
|
|
71
|
+
|
|
72
|
+
---
|
|
73
|
+
|
|
74
|
+
## Quickstart
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
pip install kore-memory
|
|
78
|
+
pip install kore-memory[semantic] # + multilingual embeddings (50+ languages)
|
|
79
|
+
|
|
80
|
+
kore # starts server on http://localhost:8765
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### Save a memory
|
|
84
|
+
|
|
85
|
+
```bash
|
|
86
|
+
curl -X POST http://localhost:8765/save \
|
|
87
|
+
-H "Content-Type: application/json" \
|
|
88
|
+
-d '{"content": "User prefers concise responses", "category": "preference"}'
|
|
89
|
+
# → {"id": 1, "importance": 4, "message": "Memory saved"}
|
|
90
|
+
# importance was auto-scored: "preference" category + keyword "prefers" → 4
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Search (any language)
|
|
94
|
+
|
|
95
|
+
```bash
|
|
96
|
+
# English query finds Italian content, French content, etc.
|
|
97
|
+
curl "http://localhost:8765/search?q=user+preferences&limit=5"
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
### Run decay pass (call daily via cron)
|
|
101
|
+
|
|
102
|
+
```bash
|
|
103
|
+
curl -X POST http://localhost:8765/decay/run
|
|
104
|
+
# → {"updated": 42, "message": "Decay pass complete"}
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
### Compress similar memories
|
|
108
|
+
|
|
109
|
+
```bash
|
|
110
|
+
curl -X POST http://localhost:8765/compress
|
|
111
|
+
# → {"clusters_found": 3, "memories_merged": 7, "new_records_created": 3}
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
### Timeline: what did I know about X over time?
|
|
115
|
+
|
|
116
|
+
```bash
|
|
117
|
+
curl "http://localhost:8765/timeline?subject=project+alpha"
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
---
|
|
121
|
+
|
|
122
|
+
## Memory Decay
|
|
123
|
+
|
|
124
|
+
Kore uses the **Ebbinghaus forgetting curve** to assign each memory a `decay_score` between 0.0 and 1.0:
|
|
125
|
+
|
|
126
|
+
```
|
|
127
|
+
decay = e^(-t * ln(2) / half_life)
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
Where:
|
|
131
|
+
- `t` = days since last access
|
|
132
|
+
- `half_life` = base days before 50% decay, adjusted by importance level
|
|
133
|
+
|
|
134
|
+
| Importance | Half-life | Meaning |
|
|
135
|
+
|---|---|---|
|
|
136
|
+
| 1 (low) | 7 days | Casual notes |
|
|
137
|
+
| 2 (normal) | 14 days | General context |
|
|
138
|
+
| 3 (important) | 30 days | Project info |
|
|
139
|
+
| 4 (high) | 90 days | Critical decisions |
|
|
140
|
+
| 5 (critical) | 365 days | Passwords, rules, never forget |
|
|
141
|
+
|
|
142
|
+
Every time a memory is retrieved, its `access_count` increases and its half-life is extended by 15% — just like spaced repetition in human learning.
|
|
143
|
+
|
|
144
|
+
---
|
|
145
|
+
|
|
146
|
+
## Auto-Importance Scoring
|
|
147
|
+
|
|
148
|
+
When you save a memory without an explicit importance level, Kore scores it automatically:
|
|
149
|
+
|
|
150
|
+
- **Category baseline** — `preference` starts at 4, `finance` at 3, `general` at 1
|
|
151
|
+
- **Keyword signals** — words like `password`, `token`, `urgente` → importance 5
|
|
152
|
+
- **Content length** — detailed content gets a small boost
|
|
153
|
+
|
|
154
|
+
Zero LLM calls. Zero API costs.
|
|
155
|
+
|
|
156
|
+
---
|
|
157
|
+
|
|
158
|
+
## API Reference
|
|
159
|
+
|
|
160
|
+
| Method | Endpoint | Description |
|
|
161
|
+
|---|---|---|
|
|
162
|
+
| `POST` | `/save` | Save a memory |
|
|
163
|
+
| `GET` | `/search` | Semantic search (any language) |
|
|
164
|
+
| `GET` | `/timeline` | Chronological history for a subject |
|
|
165
|
+
| `DELETE` | `/memories/{id}` | Delete a memory |
|
|
166
|
+
| `POST` | `/decay/run` | Update all decay scores |
|
|
167
|
+
| `POST` | `/compress` | Merge similar memories |
|
|
168
|
+
| `GET` | `/health` | Health check + capabilities |
|
|
169
|
+
|
|
170
|
+
Full interactive docs: `http://localhost:8765/docs`
|
|
171
|
+
|
|
172
|
+
---
|
|
173
|
+
|
|
174
|
+
## Categories
|
|
175
|
+
|
|
176
|
+
`general` · `project` · `trading` · `finance` · `person` · `preference` · `task` · `decision`
|
|
177
|
+
|
|
178
|
+
---
|
|
179
|
+
|
|
180
|
+
## Requirements
|
|
181
|
+
|
|
182
|
+
- Python 3.11+
|
|
183
|
+
- SQLite (built into Python)
|
|
184
|
+
- Optional: `sentence-transformers` for semantic search
|
|
185
|
+
|
|
186
|
+
No PostgreSQL. No Redis. No Docker. No API keys.
|
|
187
|
+
|
|
188
|
+
---
|
|
189
|
+
|
|
190
|
+
## License
|
|
191
|
+
|
|
192
|
+
MIT — use it, fork it, build on it.
|
|
193
|
+
|
|
194
|
+
---
|
|
195
|
+
|
|
196
|
+
*Built with ❤️ for AI agents that deserve better memory.*
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
# Kore Memory
|
|
2
|
+
|
|
3
|
+
> **The memory layer that thinks like a human: remembers what matters, forgets what doesn't, and never calls home.**
|
|
4
|
+
|
|
5
|
+
[](LICENSE)
|
|
6
|
+
[](https://python.org)
|
|
7
|
+
[]()
|
|
8
|
+
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
## Why Kore?
|
|
12
|
+
|
|
13
|
+
Every AI agent memory tool out there has the same problem: they remember everything forever, require cloud APIs, or need an LLM just to decide what's worth storing.
|
|
14
|
+
|
|
15
|
+
**Kore is different.**
|
|
16
|
+
|
|
17
|
+
| Feature | Kore | Mem0 | Letta | Memori |
|
|
18
|
+
|---|---|---|---|---|
|
|
19
|
+
| Runs fully offline | ✅ | ❌ | ❌ | ❌ |
|
|
20
|
+
| No LLM required | ✅ | ❌ | ❌ | ✅ |
|
|
21
|
+
| Memory Decay (Ebbinghaus) | ✅ | ❌ | ❌ | ❌ |
|
|
22
|
+
| Auto-importance scoring | ✅ local | ✅ via LLM | ❌ | ❌ |
|
|
23
|
+
| Memory Compression | ✅ | ❌ | ❌ | ❌ |
|
|
24
|
+
| Semantic search (50+ langs) | ✅ local | ✅ via API | ✅ | ✅ |
|
|
25
|
+
| Timeline API | ✅ | ❌ | ❌ | ❌ |
|
|
26
|
+
| Access reinforcement | ✅ | ❌ | ❌ | ❌ |
|
|
27
|
+
| Install in 2 minutes | ✅ | ❌ | ❌ | ❌ |
|
|
28
|
+
|
|
29
|
+
---
|
|
30
|
+
|
|
31
|
+
## How It Works
|
|
32
|
+
|
|
33
|
+
Kore models memory the way the human brain does:
|
|
34
|
+
|
|
35
|
+
1. **Save** — store a memory with optional category and importance
|
|
36
|
+
2. **Auto-score** — Kore calculates importance locally using content analysis (no API calls)
|
|
37
|
+
3. **Decay** — memories fade over time using the [Ebbinghaus forgetting curve](https://en.wikipedia.org/wiki/Forgetting_curve)
|
|
38
|
+
4. **Reinforce** — retrieving a memory resets its clock and boosts its score
|
|
39
|
+
5. **Compress** — similar memories are automatically merged to keep the DB lean
|
|
40
|
+
6. **Search** — semantic search in any language, filtered by relevance and freshness
|
|
41
|
+
|
|
42
|
+
---
|
|
43
|
+
|
|
44
|
+
## Quickstart
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
pip install kore-memory
|
|
48
|
+
pip install kore-memory[semantic] # + multilingual embeddings (50+ languages)
|
|
49
|
+
|
|
50
|
+
kore # starts server on http://localhost:8765
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
### Save a memory
|
|
54
|
+
|
|
55
|
+
```bash
|
|
56
|
+
curl -X POST http://localhost:8765/save \
|
|
57
|
+
-H "Content-Type: application/json" \
|
|
58
|
+
-d '{"content": "User prefers concise responses", "category": "preference"}'
|
|
59
|
+
# → {"id": 1, "importance": 4, "message": "Memory saved"}
|
|
60
|
+
# importance was auto-scored: "preference" category + keyword "prefers" → 4
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
### Search (any language)
|
|
64
|
+
|
|
65
|
+
```bash
|
|
66
|
+
# English query finds Italian content, French content, etc.
|
|
67
|
+
curl "http://localhost:8765/search?q=user+preferences&limit=5"
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
### Run decay pass (call daily via cron)
|
|
71
|
+
|
|
72
|
+
```bash
|
|
73
|
+
curl -X POST http://localhost:8765/decay/run
|
|
74
|
+
# → {"updated": 42, "message": "Decay pass complete"}
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
### Compress similar memories
|
|
78
|
+
|
|
79
|
+
```bash
|
|
80
|
+
curl -X POST http://localhost:8765/compress
|
|
81
|
+
# → {"clusters_found": 3, "memories_merged": 7, "new_records_created": 3}
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
### Timeline: what did I know about X over time?
|
|
85
|
+
|
|
86
|
+
```bash
|
|
87
|
+
curl "http://localhost:8765/timeline?subject=project+alpha"
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
---
|
|
91
|
+
|
|
92
|
+
## Memory Decay
|
|
93
|
+
|
|
94
|
+
Kore uses the **Ebbinghaus forgetting curve** to assign each memory a `decay_score` between 0.0 and 1.0:
|
|
95
|
+
|
|
96
|
+
```
|
|
97
|
+
decay = e^(-t * ln(2) / half_life)
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
Where:
|
|
101
|
+
- `t` = days since last access
|
|
102
|
+
- `half_life` = base days before 50% decay, adjusted by importance level
|
|
103
|
+
|
|
104
|
+
| Importance | Half-life | Meaning |
|
|
105
|
+
|---|---|---|
|
|
106
|
+
| 1 (low) | 7 days | Casual notes |
|
|
107
|
+
| 2 (normal) | 14 days | General context |
|
|
108
|
+
| 3 (important) | 30 days | Project info |
|
|
109
|
+
| 4 (high) | 90 days | Critical decisions |
|
|
110
|
+
| 5 (critical) | 365 days | Passwords, rules, never forget |
|
|
111
|
+
|
|
112
|
+
Every time a memory is retrieved, its `access_count` increases and its half-life is extended by 15% — just like spaced repetition in human learning.
|
|
113
|
+
|
|
114
|
+
---
|
|
115
|
+
|
|
116
|
+
## Auto-Importance Scoring
|
|
117
|
+
|
|
118
|
+
When you save a memory without an explicit importance level, Kore scores it automatically:
|
|
119
|
+
|
|
120
|
+
- **Category baseline** — `preference` starts at 4, `finance` at 3, `general` at 1
|
|
121
|
+
- **Keyword signals** — words like `password`, `token`, `urgente` → importance 5
|
|
122
|
+
- **Content length** — detailed content gets a small boost
|
|
123
|
+
|
|
124
|
+
Zero LLM calls. Zero API costs.
|
|
125
|
+
|
|
126
|
+
---
|
|
127
|
+
|
|
128
|
+
## API Reference
|
|
129
|
+
|
|
130
|
+
| Method | Endpoint | Description |
|
|
131
|
+
|---|---|---|
|
|
132
|
+
| `POST` | `/save` | Save a memory |
|
|
133
|
+
| `GET` | `/search` | Semantic search (any language) |
|
|
134
|
+
| `GET` | `/timeline` | Chronological history for a subject |
|
|
135
|
+
| `DELETE` | `/memories/{id}` | Delete a memory |
|
|
136
|
+
| `POST` | `/decay/run` | Update all decay scores |
|
|
137
|
+
| `POST` | `/compress` | Merge similar memories |
|
|
138
|
+
| `GET` | `/health` | Health check + capabilities |
|
|
139
|
+
|
|
140
|
+
Full interactive docs: `http://localhost:8765/docs`
|
|
141
|
+
|
|
142
|
+
---
|
|
143
|
+
|
|
144
|
+
## Categories
|
|
145
|
+
|
|
146
|
+
`general` · `project` · `trading` · `finance` · `person` · `preference` · `task` · `decision`
|
|
147
|
+
|
|
148
|
+
---
|
|
149
|
+
|
|
150
|
+
## Requirements
|
|
151
|
+
|
|
152
|
+
- Python 3.11+
|
|
153
|
+
- SQLite (built into Python)
|
|
154
|
+
- Optional: `sentence-transformers` for semantic search
|
|
155
|
+
|
|
156
|
+
No PostgreSQL. No Redis. No Docker. No API keys.
|
|
157
|
+
|
|
158
|
+
---
|
|
159
|
+
|
|
160
|
+
## License
|
|
161
|
+
|
|
162
|
+
MIT — use it, fork it, build on it.
|
|
163
|
+
|
|
164
|
+
---
|
|
165
|
+
|
|
166
|
+
*Built with ❤️ for AI agents that deserve better memory.*
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "kore-memory"
|
|
7
|
+
version = "0.2.0"
|
|
8
|
+
description = "The memory layer that thinks like a human: remembers what matters, forgets what doesn't, and never calls home."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = { text = "MIT" }
|
|
11
|
+
requires-python = ">=3.11"
|
|
12
|
+
keywords = ["ai", "memory", "agents", "llm", "embeddings", "semantic-search", "rag"]
|
|
13
|
+
classifiers = [
|
|
14
|
+
"Development Status :: 4 - Beta",
|
|
15
|
+
"Intended Audience :: Developers",
|
|
16
|
+
"License :: OSI Approved :: MIT License",
|
|
17
|
+
"Programming Language :: Python :: 3",
|
|
18
|
+
"Programming Language :: Python :: 3.11",
|
|
19
|
+
"Programming Language :: Python :: 3.12",
|
|
20
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
21
|
+
]
|
|
22
|
+
dependencies = [
|
|
23
|
+
"fastapi>=0.115.0",
|
|
24
|
+
"uvicorn[standard]>=0.30.0",
|
|
25
|
+
"pydantic>=2.7.0",
|
|
26
|
+
"httpx>=0.27.0",
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
[project.optional-dependencies]
|
|
30
|
+
semantic = [
|
|
31
|
+
"sentence-transformers>=3.0.0",
|
|
32
|
+
]
|
|
33
|
+
dev = [
|
|
34
|
+
"pytest>=8.0.0",
|
|
35
|
+
"httpx>=0.27.0",
|
|
36
|
+
"pytest-asyncio>=0.23.0",
|
|
37
|
+
]
|
|
38
|
+
|
|
39
|
+
[project.urls]
|
|
40
|
+
Homepage = "https://github.com/juanauriti/kore-memory"
|
|
41
|
+
Repository = "https://github.com/juanauriti/kore-memory"
|
|
42
|
+
Issues = "https://github.com/juanauriti/kore-memory/issues"
|
|
43
|
+
|
|
44
|
+
[project.scripts]
|
|
45
|
+
kore = "kore.cli:main"
|
|
46
|
+
|
|
47
|
+
[tool.hatch.build.targets.wheel]
|
|
48
|
+
packages = ["src"]
|
|
49
|
+
|
|
50
|
+
[tool.pytest.ini_options]
|
|
51
|
+
asyncio_mode = "auto"
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Kore — import_memory.py
|
|
3
|
+
Popola il DB Kore leggendo MEMORY.md e suddividendo per sezioni.
|
|
4
|
+
Esegui una volta sola dopo l'installazione.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import re
|
|
8
|
+
import sys
|
|
9
|
+
import json
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
import httpx
|
|
13
|
+
|
|
14
|
+
MEMORY_PATH = Path(__file__).parent.parent.parent / "MEMORY.md"
|
|
15
|
+
KORE_URL = "http://localhost:8765"
|
|
16
|
+
|
|
17
|
+
SECTION_CATEGORY_MAP = {
|
|
18
|
+
"finanz": "finance",
|
|
19
|
+
"kore": "project",
|
|
20
|
+
"progetti": "project",
|
|
21
|
+
"clawdwork": "task",
|
|
22
|
+
"freelance": "person",
|
|
23
|
+
"crypto": "trading",
|
|
24
|
+
"regole": "preference",
|
|
25
|
+
"ottimizzaz": "preference",
|
|
26
|
+
"calcfast": "project",
|
|
27
|
+
"amazon": "project",
|
|
28
|
+
"agencypilot": "project",
|
|
29
|
+
"priorità": "task",
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
IMPORTANCE_MAP = {
|
|
33
|
+
"finance": 4,
|
|
34
|
+
"trading": 4,
|
|
35
|
+
"project": 3,
|
|
36
|
+
"task": 3,
|
|
37
|
+
"preference": 5,
|
|
38
|
+
"person": 2,
|
|
39
|
+
"decision": 4,
|
|
40
|
+
"general": 2,
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def detect_category(section_title: str) -> str:
|
|
45
|
+
title_lower = section_title.lower()
|
|
46
|
+
for keyword, category in SECTION_CATEGORY_MAP.items():
|
|
47
|
+
if keyword in title_lower:
|
|
48
|
+
return category
|
|
49
|
+
return "general"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def parse_memory_md(path: Path) -> list[dict]:
|
|
53
|
+
"""Split MEMORY.md into chunks by H2 section, return list of records."""
|
|
54
|
+
text = path.read_text(encoding="utf-8")
|
|
55
|
+
sections = re.split(r"\n(?=## )", text)
|
|
56
|
+
|
|
57
|
+
records = []
|
|
58
|
+
for section in sections:
|
|
59
|
+
lines = section.strip().splitlines()
|
|
60
|
+
if not lines:
|
|
61
|
+
continue
|
|
62
|
+
|
|
63
|
+
title_line = lines[0].lstrip("#").strip()
|
|
64
|
+
body_lines = [l for l in lines[1:] if l.strip() and not l.startswith("---")]
|
|
65
|
+
|
|
66
|
+
if not body_lines:
|
|
67
|
+
continue
|
|
68
|
+
|
|
69
|
+
category = detect_category(title_line)
|
|
70
|
+
importance = IMPORTANCE_MAP.get(category, 2)
|
|
71
|
+
|
|
72
|
+
# Split long sections into sub-chunks by bullet points
|
|
73
|
+
chunks = chunk_section(title_line, body_lines)
|
|
74
|
+
for chunk in chunks:
|
|
75
|
+
records.append({
|
|
76
|
+
"content": chunk,
|
|
77
|
+
"category": category,
|
|
78
|
+
"importance": importance,
|
|
79
|
+
})
|
|
80
|
+
|
|
81
|
+
return records
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def chunk_section(title: str, lines: list[str]) -> list[str]:
|
|
85
|
+
"""
|
|
86
|
+
Group bullet lines into chunks of max 3 items, prefixed with section title.
|
|
87
|
+
Avoids saving single massive blobs.
|
|
88
|
+
"""
|
|
89
|
+
bullets = [l.strip().lstrip("-").lstrip("*").strip() for l in lines if l.strip()]
|
|
90
|
+
bullets = [b for b in bullets if len(b) > 10]
|
|
91
|
+
|
|
92
|
+
if not bullets:
|
|
93
|
+
return []
|
|
94
|
+
|
|
95
|
+
chunks = []
|
|
96
|
+
for i in range(0, len(bullets), 3):
|
|
97
|
+
group = bullets[i:i+3]
|
|
98
|
+
chunk = f"[{title}] " + " | ".join(group)
|
|
99
|
+
if len(chunk) > 4000:
|
|
100
|
+
chunk = chunk[:3997] + "..."
|
|
101
|
+
chunks.append(chunk)
|
|
102
|
+
|
|
103
|
+
return chunks
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def save_record(record: dict) -> int | None:
|
|
107
|
+
try:
|
|
108
|
+
resp = httpx.post(f"{KORE_URL}/save", json=record, timeout=5)
|
|
109
|
+
if resp.status_code == 201:
|
|
110
|
+
return resp.json()["id"]
|
|
111
|
+
else:
|
|
112
|
+
print(f" ⚠️ {resp.status_code}: {resp.text[:80]}")
|
|
113
|
+
return None
|
|
114
|
+
except Exception as e:
|
|
115
|
+
print(f" ❌ Errore: {e}")
|
|
116
|
+
return None
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def main():
|
|
120
|
+
print(f"📂 Lettura {MEMORY_PATH}...")
|
|
121
|
+
records = parse_memory_md(MEMORY_PATH)
|
|
122
|
+
print(f"📝 Trovati {len(records)} chunk da importare\n")
|
|
123
|
+
|
|
124
|
+
saved = 0
|
|
125
|
+
for rec in records:
|
|
126
|
+
record_id = save_record(rec)
|
|
127
|
+
if record_id:
|
|
128
|
+
print(f" ✅ #{record_id} [{rec['category']}] ★{rec['importance']} — {rec['content'][:60]}...")
|
|
129
|
+
saved += 1
|
|
130
|
+
|
|
131
|
+
print(f"\n🎉 Importati {saved}/{len(records)} record in Kore")
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
if __name__ == "__main__":
|
|
135
|
+
main()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Kore package
|