cogmem-agent 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cogmem_agent-0.1.1/.gitignore +9 -0
- cogmem_agent-0.1.1/LICENSE +21 -0
- cogmem_agent-0.1.1/PKG-INFO +134 -0
- cogmem_agent-0.1.1/README.md +106 -0
- cogmem_agent-0.1.1/docs/embedding-providers.md +90 -0
- cogmem_agent-0.1.1/docs/log-format.md +88 -0
- cogmem_agent-0.1.1/docs/quickstart.md +109 -0
- cogmem_agent-0.1.1/pyproject.toml +51 -0
- cogmem_agent-0.1.1/src/cognitive_memory/__init__.py +27 -0
- cogmem_agent-0.1.1/src/cognitive_memory/_version.py +1 -0
- cogmem_agent-0.1.1/src/cognitive_memory/cli/__init__.py +0 -0
- cogmem_agent-0.1.1/src/cognitive_memory/cli/index_cmd.py +42 -0
- cogmem_agent-0.1.1/src/cognitive_memory/cli/init_cmd.py +55 -0
- cogmem_agent-0.1.1/src/cognitive_memory/cli/main.py +51 -0
- cogmem_agent-0.1.1/src/cognitive_memory/cli/search_cmd.py +36 -0
- cogmem_agent-0.1.1/src/cognitive_memory/config.py +125 -0
- cogmem_agent-0.1.1/src/cognitive_memory/embeddings/__init__.py +6 -0
- cogmem_agent-0.1.1/src/cognitive_memory/embeddings/_protocol.py +18 -0
- cogmem_agent-0.1.1/src/cognitive_memory/embeddings/ollama.py +41 -0
- cogmem_agent-0.1.1/src/cognitive_memory/gate.py +37 -0
- cogmem_agent-0.1.1/src/cognitive_memory/parser.py +47 -0
- cogmem_agent-0.1.1/src/cognitive_memory/scaffold/cogmem.toml +16 -0
- cogmem_agent-0.1.1/src/cognitive_memory/scoring.py +43 -0
- cogmem_agent-0.1.1/src/cognitive_memory/search.py +154 -0
- cogmem_agent-0.1.1/src/cognitive_memory/store.py +221 -0
- cogmem_agent-0.1.1/src/cognitive_memory/types.py +36 -0
- cogmem_agent-0.1.1/tests/conftest.py +111 -0
- cogmem_agent-0.1.1/tests/test_cli.py +95 -0
- cogmem_agent-0.1.1/tests/test_config.py +119 -0
- cogmem_agent-0.1.1/tests/test_gate.py +57 -0
- cogmem_agent-0.1.1/tests/test_parser.py +110 -0
- cogmem_agent-0.1.1/tests/test_scoring.py +109 -0
- cogmem_agent-0.1.1/tests/test_search.py +121 -0
- cogmem_agent-0.1.1/tests/test_store.py +123 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Akira Honjo
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cogmem-agent
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: Human-like cognitive memory for AI agents — emotion-gated recall with adaptive forgetting
|
|
5
|
+
Project-URL: Homepage, https://github.com/akira/cognitive-memory
|
|
6
|
+
Project-URL: Repository, https://github.com/akira/cognitive-memory
|
|
7
|
+
Author: Akira Honjo
|
|
8
|
+
License-Expression: MIT
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Keywords: agent,ai,cognitive,llm,memory,semantic-search
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
21
|
+
Requires-Python: >=3.9
|
|
22
|
+
Requires-Dist: tomli>=1.0; python_version < '3.11'
|
|
23
|
+
Provides-Extra: dev
|
|
24
|
+
Requires-Dist: pytest>=7.0; extra == 'dev'
|
|
25
|
+
Provides-Extra: openai
|
|
26
|
+
Requires-Dist: openai>=1.0; extra == 'openai'
|
|
27
|
+
Description-Content-Type: text/markdown
|
|
28
|
+
|
|
29
|
+
# Cognitive Memory
|
|
30
|
+
|
|
31
|
+
Human-like cognitive memory for AI agents — emotion-gated recall with adaptive forgetting.
|
|
32
|
+
|
|
33
|
+
Unlike traditional vector databases that treat all memories equally, Cognitive Memory models how humans actually remember: emotionally significant experiences persist longer, while routine information naturally fades. This makes AI agents feel more natural and context-aware.
|
|
34
|
+
|
|
35
|
+
## Key Features
|
|
36
|
+
|
|
37
|
+
- **Emotion-gated recall**: Arousal scores modulate memory persistence
|
|
38
|
+
- **Adaptive forgetting**: High-arousal memories decay slower (configurable half-life)
|
|
39
|
+
- **Adaptive search gate**: Skips trivial queries (greetings, acknowledgments)
|
|
40
|
+
- **FailOpen design**: Falls back to keyword search when embeddings are unavailable
|
|
41
|
+
- **Zero required dependencies**: Core uses only Python stdlib (sqlite3, urllib)
|
|
42
|
+
- **Pluggable embeddings**: Ollama (built-in), OpenAI, or any custom provider
|
|
43
|
+
|
|
44
|
+
## Install
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
pip install cognitive-memory
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Quick Start
|
|
51
|
+
|
|
52
|
+
### CLI
|
|
53
|
+
|
|
54
|
+
```bash
|
|
55
|
+
cogmem init # Initialize project
|
|
56
|
+
cogmem index # Build/update index
|
|
57
|
+
cogmem search "past decisions" # Search memories
|
|
58
|
+
cogmem status # Show statistics
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
### Python API
|
|
62
|
+
|
|
63
|
+
```python
|
|
64
|
+
from cognitive_memory import MemoryStore, CogMemConfig
|
|
65
|
+
|
|
66
|
+
config = CogMemConfig.from_toml("cogmem.toml")
|
|
67
|
+
with MemoryStore(config) as store:
|
|
68
|
+
store.index_dir()
|
|
69
|
+
result = store.search("past competition analysis")
|
|
70
|
+
for r in result.results:
|
|
71
|
+
print(f"{r.date} [{r.score:.2f}] {r.content[:80]}")
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
### Convenience API
|
|
75
|
+
|
|
76
|
+
```python
|
|
77
|
+
from cognitive_memory import search
|
|
78
|
+
result = search("past decisions") # Auto-finds cogmem.toml
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
## Scoring Formula
|
|
82
|
+
|
|
83
|
+
```
|
|
84
|
+
score = (0.7 * cosine_sim + 0.3 * arousal) * time_decay
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
Where `time_decay` uses an adaptive half-life:
|
|
88
|
+
```
|
|
89
|
+
half_life = base_half_life * (1 + arousal)
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
High-arousal memories (insights, conflicts, surprises) decay slower — just like human memory.
|
|
93
|
+
|
|
94
|
+
## Configuration
|
|
95
|
+
|
|
96
|
+
`cogmem.toml`:
|
|
97
|
+
|
|
98
|
+
```toml
|
|
99
|
+
[cogmem]
|
|
100
|
+
logs_dir = "memory/logs"
|
|
101
|
+
db_path = "memory/vectors.db"
|
|
102
|
+
|
|
103
|
+
[cogmem.scoring]
|
|
104
|
+
sim_weight = 0.7
|
|
105
|
+
arousal_weight = 0.3
|
|
106
|
+
base_half_life = 60.0
|
|
107
|
+
decay_floor = 0.3
|
|
108
|
+
|
|
109
|
+
[cogmem.embedding]
|
|
110
|
+
provider = "ollama"
|
|
111
|
+
model = "zylonai/multilingual-e5-large"
|
|
112
|
+
url = "http://localhost:11434/api/embed"
|
|
113
|
+
timeout = 10
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
## Custom Embedding Provider
|
|
117
|
+
|
|
118
|
+
```python
|
|
119
|
+
class MyEmbedder:
|
|
120
|
+
def embed(self, text: str) -> list[float] | None: ...
|
|
121
|
+
def embed_batch(self, texts: list[str]) -> list[list[float]] | None: ...
|
|
122
|
+
|
|
123
|
+
store = MemoryStore(config, embedder=MyEmbedder())
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
## Documentation
|
|
127
|
+
|
|
128
|
+
- [Quick Start](docs/quickstart.md)
|
|
129
|
+
- [Log Format](docs/log-format.md)
|
|
130
|
+
- [Embedding Providers](docs/embedding-providers.md)
|
|
131
|
+
|
|
132
|
+
## License
|
|
133
|
+
|
|
134
|
+
MIT
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
# Cognitive Memory
|
|
2
|
+
|
|
3
|
+
Human-like cognitive memory for AI agents — emotion-gated recall with adaptive forgetting.
|
|
4
|
+
|
|
5
|
+
Unlike traditional vector databases that treat all memories equally, Cognitive Memory models how humans actually remember: emotionally significant experiences persist longer, while routine information naturally fades. This makes AI agents feel more natural and context-aware.
|
|
6
|
+
|
|
7
|
+
## Key Features
|
|
8
|
+
|
|
9
|
+
- **Emotion-gated recall**: Arousal scores modulate memory persistence
|
|
10
|
+
- **Adaptive forgetting**: High-arousal memories decay slower (configurable half-life)
|
|
11
|
+
- **Adaptive search gate**: Skips trivial queries (greetings, acknowledgments)
|
|
12
|
+
- **FailOpen design**: Falls back to keyword search when embeddings are unavailable
|
|
13
|
+
- **Zero required dependencies**: Core uses only Python stdlib (sqlite3, urllib)
|
|
14
|
+
- **Pluggable embeddings**: Ollama (built-in), OpenAI, or any custom provider
|
|
15
|
+
|
|
16
|
+
## Install
|
|
17
|
+
|
|
18
|
+
```bash
|
|
19
|
+
pip install cognitive-memory
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## Quick Start
|
|
23
|
+
|
|
24
|
+
### CLI
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
cogmem init # Initialize project
|
|
28
|
+
cogmem index # Build/update index
|
|
29
|
+
cogmem search "past decisions" # Search memories
|
|
30
|
+
cogmem status # Show statistics
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
### Python API
|
|
34
|
+
|
|
35
|
+
```python
|
|
36
|
+
from cognitive_memory import MemoryStore, CogMemConfig
|
|
37
|
+
|
|
38
|
+
config = CogMemConfig.from_toml("cogmem.toml")
|
|
39
|
+
with MemoryStore(config) as store:
|
|
40
|
+
store.index_dir()
|
|
41
|
+
result = store.search("past competition analysis")
|
|
42
|
+
for r in result.results:
|
|
43
|
+
print(f"{r.date} [{r.score:.2f}] {r.content[:80]}")
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
### Convenience API
|
|
47
|
+
|
|
48
|
+
```python
|
|
49
|
+
from cognitive_memory import search
|
|
50
|
+
result = search("past decisions") # Auto-finds cogmem.toml
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
## Scoring Formula
|
|
54
|
+
|
|
55
|
+
```
|
|
56
|
+
score = (0.7 * cosine_sim + 0.3 * arousal) * time_decay
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
Where `time_decay` uses an adaptive half-life:
|
|
60
|
+
```
|
|
61
|
+
half_life = base_half_life * (1 + arousal)
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
High-arousal memories (insights, conflicts, surprises) decay slower — just like human memory.
|
|
65
|
+
|
|
66
|
+
## Configuration
|
|
67
|
+
|
|
68
|
+
`cogmem.toml`:
|
|
69
|
+
|
|
70
|
+
```toml
|
|
71
|
+
[cogmem]
|
|
72
|
+
logs_dir = "memory/logs"
|
|
73
|
+
db_path = "memory/vectors.db"
|
|
74
|
+
|
|
75
|
+
[cogmem.scoring]
|
|
76
|
+
sim_weight = 0.7
|
|
77
|
+
arousal_weight = 0.3
|
|
78
|
+
base_half_life = 60.0
|
|
79
|
+
decay_floor = 0.3
|
|
80
|
+
|
|
81
|
+
[cogmem.embedding]
|
|
82
|
+
provider = "ollama"
|
|
83
|
+
model = "zylonai/multilingual-e5-large"
|
|
84
|
+
url = "http://localhost:11434/api/embed"
|
|
85
|
+
timeout = 10
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
## Custom Embedding Provider
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
class MyEmbedder:
|
|
92
|
+
def embed(self, text: str) -> list[float] | None: ...
|
|
93
|
+
def embed_batch(self, texts: list[str]) -> list[list[float]] | None: ...
|
|
94
|
+
|
|
95
|
+
store = MemoryStore(config, embedder=MyEmbedder())
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## Documentation
|
|
99
|
+
|
|
100
|
+
- [Quick Start](docs/quickstart.md)
|
|
101
|
+
- [Log Format](docs/log-format.md)
|
|
102
|
+
- [Embedding Providers](docs/embedding-providers.md)
|
|
103
|
+
|
|
104
|
+
## License
|
|
105
|
+
|
|
106
|
+
MIT
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# Embedding Providers
|
|
2
|
+
|
|
3
|
+
Cognitive Memory uses a Protocol-based embedding system. Any object with `embed()` and `embed_batch()` methods works.
|
|
4
|
+
|
|
5
|
+
## Built-in: Ollama (Default)
|
|
6
|
+
|
|
7
|
+
Zero external dependencies. Uses `urllib.request` to call Ollama's API.
|
|
8
|
+
|
|
9
|
+
```toml
|
|
10
|
+
[cogmem.embedding]
|
|
11
|
+
provider = "ollama"
|
|
12
|
+
model = "zylonai/multilingual-e5-large"
|
|
13
|
+
url = "http://localhost:11434/api/embed"
|
|
14
|
+
timeout = 10
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
### Setup
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
# Install Ollama (https://ollama.ai)
|
|
21
|
+
ollama pull zylonai/multilingual-e5-large
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
### Recommended Models
|
|
25
|
+
|
|
26
|
+
| Model | Size | Languages | Notes |
|
|
27
|
+
|-------|------|-----------|-------|
|
|
28
|
+
| `zylonai/multilingual-e5-large` | 2.2 GB | 100+ | Best for multilingual (recommended) |
|
|
29
|
+
| `nomic-embed-text` | 274 MB | English | Lightweight, English-focused |
|
|
30
|
+
| `mxbai-embed-large` | 670 MB | English | Good quality, moderate size |
|
|
31
|
+
|
|
32
|
+
## Custom Provider
|
|
33
|
+
|
|
34
|
+
Implement two methods:
|
|
35
|
+
|
|
36
|
+
```python
|
|
37
|
+
from typing import List, Optional
|
|
38
|
+
|
|
39
|
+
class MyEmbedder:
|
|
40
|
+
def embed(self, text: str) -> Optional[List[float]]:
|
|
41
|
+
"""Embed a single text. Return None on failure."""
|
|
42
|
+
...
|
|
43
|
+
|
|
44
|
+
def embed_batch(self, texts: List[str]) -> Optional[List[List[float]]]:
|
|
45
|
+
"""Embed multiple texts. Return None on failure."""
|
|
46
|
+
...
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Pass to MemoryStore:
|
|
50
|
+
|
|
51
|
+
```python
|
|
52
|
+
from cognitive_memory import MemoryStore, CogMemConfig
|
|
53
|
+
|
|
54
|
+
store = MemoryStore(config, embedder=MyEmbedder())
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
## OpenAI Provider (Example)
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
pip install cognitive-memory[openai]
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
```python
|
|
64
|
+
import openai
|
|
65
|
+
|
|
66
|
+
class OpenAIEmbedding:
|
|
67
|
+
def __init__(self, model="text-embedding-3-small"):
|
|
68
|
+
self.client = openai.OpenAI()
|
|
69
|
+
self.model = model
|
|
70
|
+
|
|
71
|
+
def embed(self, text):
|
|
72
|
+
r = self.client.embeddings.create(input=[text], model=self.model)
|
|
73
|
+
return r.data[0].embedding
|
|
74
|
+
|
|
75
|
+
def embed_batch(self, texts):
|
|
76
|
+
r = self.client.embeddings.create(input=texts, model=self.model)
|
|
77
|
+
return [d.embedding for d in r.data]
|
|
78
|
+
|
|
79
|
+
store = MemoryStore(config, embedder=OpenAIEmbedding())
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
## FailOpen Behavior
|
|
83
|
+
|
|
84
|
+
If embedding fails (Ollama down, network error, etc.), Cognitive Memory falls back to keyword-based grep search. This ensures the system always returns results when possible.
|
|
85
|
+
|
|
86
|
+
Search response status indicates the mode:
|
|
87
|
+
- `"ok"` — Full semantic + grep search
|
|
88
|
+
- `"degraded (ollama_unavailable)"` — Grep only (embedding failed)
|
|
89
|
+
- `"degraded (no_index)"` — Grep only (no SQLite database)
|
|
90
|
+
- `"skipped_by_gate"` — Query too short/trivial, no search performed
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# Log Format Specification
|
|
2
|
+
|
|
3
|
+
Cognitive Memory parses structured markdown log files to build a searchable memory index.
|
|
4
|
+
|
|
5
|
+
## File Naming
|
|
6
|
+
|
|
7
|
+
```
|
|
8
|
+
memory/logs/YYYY-MM-DD.md
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
Files must start with a date pattern (`YYYY-MM-DD`). Files without this pattern are ignored.
|
|
12
|
+
Files ending in `.compact.md` are excluded from indexing (they are compressed summaries).
|
|
13
|
+
|
|
14
|
+
## File Structure
|
|
15
|
+
|
|
16
|
+
```markdown
|
|
17
|
+
# YYYY-MM-DD Session Log
|
|
18
|
+
|
|
19
|
+
## Session Summary
|
|
20
|
+
[Optional summary, not indexed]
|
|
21
|
+
|
|
22
|
+
## Log Entries
|
|
23
|
+
|
|
24
|
+
### [CATEGORY][DOMAIN] Entry Title
|
|
25
|
+
*Arousal: 0.7 | Emotion: Insight*
|
|
26
|
+
Entry content here. Must be at least 20 characters to pass noise filter.
|
|
27
|
+
|
|
28
|
+
---
|
|
29
|
+
|
|
30
|
+
### [CATEGORY][DOMAIN] Another Entry
|
|
31
|
+
*Arousal: 0.5 | Emotion: Determination*
|
|
32
|
+
More content here.
|
|
33
|
+
|
|
34
|
+
---
|
|
35
|
+
|
|
36
|
+
## 引き継ぎ
|
|
37
|
+
[Everything below this delimiter is excluded from indexing]
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Category Tags
|
|
41
|
+
|
|
42
|
+
| Tag | Usage |
|
|
43
|
+
|-----|-------|
|
|
44
|
+
| `[INSIGHT]` | New understanding, perspective shift |
|
|
45
|
+
| `[DECISION]` | Decision and its rationale |
|
|
46
|
+
| `[ERROR]` | Mistakes, failed assumptions, course corrections |
|
|
47
|
+
| `[PATTERN]` | Recurring themes, behaviors, thought patterns |
|
|
48
|
+
| `[QUESTION]` | Open questions, areas needing investigation |
|
|
49
|
+
| `[MILESTONE]` | Important achievements, phase transitions |
|
|
50
|
+
|
|
51
|
+
## Domain Tags
|
|
52
|
+
|
|
53
|
+
Combine with category tags: `[INSIGHT][TECH]`, `[DECISION][MARKET]`
|
|
54
|
+
|
|
55
|
+
Available: `[PROBLEM]` `[USER-RESEARCH]` `[MARKET]` `[CONCEPT]` `[BUSINESS-MODEL]` `[MVP]` `[TECH]` `[STRATEGY]` `[RISK]`
|
|
56
|
+
|
|
57
|
+
## Arousal
|
|
58
|
+
|
|
59
|
+
The `*Arousal: X.X | Emotion: ...*` line is optional but recommended.
|
|
60
|
+
|
|
61
|
+
- **Range**: 0.0 to 1.0
|
|
62
|
+
- **Default**: 0.5 (when not specified)
|
|
63
|
+
- **Effect**: Higher arousal → slower forgetting (adaptive half-life), higher search score
|
|
64
|
+
|
|
65
|
+
| Arousal | Meaning | Half-life (base=60d) |
|
|
66
|
+
|---------|---------|---------------------|
|
|
67
|
+
| 0.0 | Routine | 60 days |
|
|
68
|
+
| 0.5 | Notable | 90 days |
|
|
69
|
+
| 0.8 | Important | 108 days |
|
|
70
|
+
| 1.0 | Critical | 120 days |
|
|
71
|
+
|
|
72
|
+
## Noise Filter
|
|
73
|
+
|
|
74
|
+
Entries are excluded if:
|
|
75
|
+
- Content is shorter than 20 characters
|
|
76
|
+
- Content matches noise patterns (greetings, acknowledgments)
|
|
77
|
+
|
|
78
|
+
## Handover Delimiter
|
|
79
|
+
|
|
80
|
+
Default: `## 引き継ぎ`
|
|
81
|
+
|
|
82
|
+
Configurable in `cogmem.toml`:
|
|
83
|
+
```toml
|
|
84
|
+
[cogmem]
|
|
85
|
+
handover_delimiter = "## Handover"
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
Everything after this delimiter is excluded from indexing.
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
# Quick Start
|
|
2
|
+
|
|
3
|
+
## Installation
|
|
4
|
+
|
|
5
|
+
```bash
|
|
6
|
+
pip install cognitive-memory
|
|
7
|
+
```
|
|
8
|
+
|
|
9
|
+
For OpenAI embedding support:
|
|
10
|
+
```bash
|
|
11
|
+
pip install cognitive-memory[openai]
|
|
12
|
+
```
|
|
13
|
+
|
|
14
|
+
## Project Setup
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
cogmem init
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
This creates:
|
|
21
|
+
- `cogmem.toml` — configuration file
|
|
22
|
+
- `memory/logs/` — directory for session logs
|
|
23
|
+
- `.gitignore` update (excludes `*.db`)
|
|
24
|
+
|
|
25
|
+
## Writing Logs
|
|
26
|
+
|
|
27
|
+
Create markdown files in `memory/logs/` with the naming pattern `YYYY-MM-DD.md`:
|
|
28
|
+
|
|
29
|
+
```markdown
|
|
30
|
+
# 2026-03-21 Session Log
|
|
31
|
+
|
|
32
|
+
## Log Entries
|
|
33
|
+
|
|
34
|
+
### [INSIGHT][TECH] Discovered adaptive half-life
|
|
35
|
+
*Arousal: 0.8 | Emotion: Insight*
|
|
36
|
+
High-arousal memories should decay slower. Using formula:
|
|
37
|
+
half_life = base * (1 + arousal)
|
|
38
|
+
|
|
39
|
+
---
|
|
40
|
+
|
|
41
|
+
### [DECISION][TECH] Chose SQLite over LanceDB
|
|
42
|
+
*Arousal: 0.6 | Emotion: Determination*
|
|
43
|
+
SQLite is sufficient for our scale. Zero external dependencies.
|
|
44
|
+
|
|
45
|
+
---
|
|
46
|
+
|
|
47
|
+
## 引き継ぎ
|
|
48
|
+
- Content below this delimiter is excluded from search
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Building the Index
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
cogmem index # Incremental (new/changed files only)
|
|
55
|
+
cogmem index --all # Force re-index everything
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## Searching
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
cogmem search "adaptive half-life"
|
|
62
|
+
cogmem search "competition analysis" --top-k 3
|
|
63
|
+
cogmem search "previous decisions" --json
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## Python API
|
|
67
|
+
|
|
68
|
+
```python
|
|
69
|
+
from cognitive_memory import MemoryStore, CogMemConfig
|
|
70
|
+
|
|
71
|
+
config = CogMemConfig.from_toml("cogmem.toml")
|
|
72
|
+
with MemoryStore(config) as store:
|
|
73
|
+
store.index_dir()
|
|
74
|
+
response = store.search("past competition analysis")
|
|
75
|
+
for r in response.results:
|
|
76
|
+
print(f"{r.date} [{r.score:.2f}] {r.content[:80]}")
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### Convenience API
|
|
80
|
+
|
|
81
|
+
```python
|
|
82
|
+
from cognitive_memory import search
|
|
83
|
+
|
|
84
|
+
response = search("past decisions") # Auto-finds cogmem.toml
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
## AI Coding Tool Integration
|
|
88
|
+
|
|
89
|
+
### Claude Code
|
|
90
|
+
|
|
91
|
+
Add to `.claude/commands/search.md`:
|
|
92
|
+
```
|
|
93
|
+
Run `cogmem search "$ARGUMENTS"` and return the results.
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
### Cursor
|
|
97
|
+
|
|
98
|
+
Add to `.cursorrules`:
|
|
99
|
+
```
|
|
100
|
+
Before answering questions about past decisions, run:
|
|
101
|
+
cogmem search "<relevant query>"
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
### Cline
|
|
105
|
+
|
|
106
|
+
Add to `.clinerules`:
|
|
107
|
+
```
|
|
108
|
+
Use `cogmem search` to recall past context before making decisions.
|
|
109
|
+
```
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "cogmem-agent"
|
|
7
|
+
dynamic = ["version"]
|
|
8
|
+
description = "Human-like cognitive memory for AI agents — emotion-gated recall with adaptive forgetting"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
requires-python = ">=3.9"
|
|
12
|
+
authors = [{ name = "Akira Honjo" }]
|
|
13
|
+
keywords = ["ai", "agent", "memory", "cognitive", "semantic-search", "llm"]
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Development Status :: 3 - Alpha",
|
|
16
|
+
"Intended Audience :: Developers",
|
|
17
|
+
"License :: OSI Approved :: MIT License",
|
|
18
|
+
"Programming Language :: Python :: 3",
|
|
19
|
+
"Programming Language :: Python :: 3.9",
|
|
20
|
+
"Programming Language :: Python :: 3.10",
|
|
21
|
+
"Programming Language :: Python :: 3.11",
|
|
22
|
+
"Programming Language :: Python :: 3.12",
|
|
23
|
+
"Programming Language :: Python :: 3.13",
|
|
24
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
25
|
+
]
|
|
26
|
+
dependencies = [
|
|
27
|
+
"tomli>=1.0; python_version < '3.11'",
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
[project.optional-dependencies]
|
|
31
|
+
openai = ["openai>=1.0"]
|
|
32
|
+
dev = ["pytest>=7.0"]
|
|
33
|
+
|
|
34
|
+
[project.scripts]
|
|
35
|
+
cogmem = "cognitive_memory.cli.main:main"
|
|
36
|
+
|
|
37
|
+
[project.urls]
|
|
38
|
+
Homepage = "https://github.com/akira/cognitive-memory"
|
|
39
|
+
Repository = "https://github.com/akira/cognitive-memory"
|
|
40
|
+
|
|
41
|
+
[tool.hatch.version]
|
|
42
|
+
path = "src/cognitive_memory/_version.py"
|
|
43
|
+
|
|
44
|
+
[tool.hatch.build.targets.wheel]
|
|
45
|
+
packages = ["src/cognitive_memory"]
|
|
46
|
+
|
|
47
|
+
[tool.pytest.ini_options]
|
|
48
|
+
testpaths = ["tests"]
|
|
49
|
+
markers = [
|
|
50
|
+
"integration: requires Ollama running",
|
|
51
|
+
]
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Cognitive Memory — human-like cognitive memory for AI agents."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from ._version import __version__
|
|
6
|
+
from .config import CogMemConfig
|
|
7
|
+
from .store import MemoryStore
|
|
8
|
+
from .types import MemoryEntry, SearchResponse, SearchResult
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def search(query: str, top_k: int = 5, config: CogMemConfig | None = None) -> SearchResponse:
|
|
12
|
+
"""Convenience function: auto-find cogmem.toml and search."""
|
|
13
|
+
if config is None:
|
|
14
|
+
config = CogMemConfig.find_and_load()
|
|
15
|
+
with MemoryStore(config) as store:
|
|
16
|
+
return store.search(query, top_k)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"__version__",
|
|
21
|
+
"CogMemConfig",
|
|
22
|
+
"MemoryEntry",
|
|
23
|
+
"MemoryStore",
|
|
24
|
+
"SearchResponse",
|
|
25
|
+
"SearchResult",
|
|
26
|
+
"search",
|
|
27
|
+
]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.1"
|
|
File without changes
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""cogmem index / cogmem status — index management commands."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import sys
|
|
7
|
+
import time
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
from ..config import CogMemConfig
|
|
11
|
+
from ..store import MemoryStore
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def run_index(all_files: bool = False, single_file: str | None = None):
|
|
15
|
+
config = CogMemConfig.find_and_load()
|
|
16
|
+
|
|
17
|
+
with MemoryStore(config) as store:
|
|
18
|
+
t0 = time.time()
|
|
19
|
+
|
|
20
|
+
if single_file:
|
|
21
|
+
fp = config.logs_path / Path(single_file).name
|
|
22
|
+
if not fp.exists():
|
|
23
|
+
print(f"File not found: {fp}", file=sys.stderr)
|
|
24
|
+
sys.exit(1)
|
|
25
|
+
n = store.index_file(fp, force=True)
|
|
26
|
+
print(f"Indexed {n} entries from {fp.name}")
|
|
27
|
+
else:
|
|
28
|
+
n = store.index_dir(force=all_files)
|
|
29
|
+
elapsed = time.time() - t0
|
|
30
|
+
print(f"Done: {n} entries indexed in {elapsed:.1f}s")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def run_status():
|
|
34
|
+
config = CogMemConfig.find_and_load()
|
|
35
|
+
|
|
36
|
+
with MemoryStore(config) as store:
|
|
37
|
+
stats = store.status()
|
|
38
|
+
|
|
39
|
+
print(f"Indexed files: {stats['indexed_files']}")
|
|
40
|
+
print(f"Total entries: {stats['total_entries']}")
|
|
41
|
+
size_kb = stats["db_size_bytes"] / 1024
|
|
42
|
+
print(f"Database size: {size_kb:.1f} KB")
|