memvee 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. memvee-0.1.0/PKG-INFO +251 -0
  2. memvee-0.1.0/README.md +223 -0
  3. memvee-0.1.0/pyproject.toml +87 -0
  4. memvee-0.1.0/src/memv/__init__.py +52 -0
  5. memvee-0.1.0/src/memv/cache.py +94 -0
  6. memvee-0.1.0/src/memv/config.py +55 -0
  7. memvee-0.1.0/src/memv/dashboard/__init__.py +5 -0
  8. memvee-0.1.0/src/memv/dashboard/__main__.py +25 -0
  9. memvee-0.1.0/src/memv/dashboard/app.py +487 -0
  10. memvee-0.1.0/src/memv/embeddings/__init__.py +3 -0
  11. memvee-0.1.0/src/memv/embeddings/openai.py +15 -0
  12. memvee-0.1.0/src/memv/llm/__init__.py +3 -0
  13. memvee-0.1.0/src/memv/llm/pydantic_ai.py +41 -0
  14. memvee-0.1.0/src/memv/memory/__init__.py +5 -0
  15. memvee-0.1.0/src/memv/memory/_api.py +151 -0
  16. memvee-0.1.0/src/memv/memory/_lifecycle.py +213 -0
  17. memvee-0.1.0/src/memv/memory/_pipeline.py +236 -0
  18. memvee-0.1.0/src/memv/memory/_task_manager.py +143 -0
  19. memvee-0.1.0/src/memv/memory/memory.py +310 -0
  20. memvee-0.1.0/src/memv/models.py +213 -0
  21. memvee-0.1.0/src/memv/processing/__init__.py +9 -0
  22. memvee-0.1.0/src/memv/processing/batch_segmenter.py +194 -0
  23. memvee-0.1.0/src/memv/processing/boundary.py +131 -0
  24. memvee-0.1.0/src/memv/processing/episode_merger.py +238 -0
  25. memvee-0.1.0/src/memv/processing/episodes.py +114 -0
  26. memvee-0.1.0/src/memv/processing/extraction.py +144 -0
  27. memvee-0.1.0/src/memv/processing/prompts.py +433 -0
  28. memvee-0.1.0/src/memv/protocols.py +39 -0
  29. memvee-0.1.0/src/memv/py.typed +0 -0
  30. memvee-0.1.0/src/memv/retrieval/__init__.py +5 -0
  31. memvee-0.1.0/src/memv/retrieval/retriever.py +238 -0
  32. memvee-0.1.0/src/memv/storage/__init__.py +19 -0
  33. memvee-0.1.0/src/memv/storage/sqlite/__init__.py +17 -0
  34. memvee-0.1.0/src/memv/storage/sqlite/_base.py +61 -0
  35. memvee-0.1.0/src/memv/storage/sqlite/_episodes.py +125 -0
  36. memvee-0.1.0/src/memv/storage/sqlite/_knowledge.py +171 -0
  37. memvee-0.1.0/src/memv/storage/sqlite/_messages.py +84 -0
  38. memvee-0.1.0/src/memv/storage/sqlite/_text_index.py +132 -0
  39. memvee-0.1.0/src/memv/storage/sqlite/_vector_index.py +167 -0
memvee-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,251 @@
1
+ Metadata-Version: 2.4
2
+ Name: memvee
3
+ Version: 0.1.0
4
+ Summary: Structured, temporal memory for AI agents.
5
+ Keywords: ai,agents,memory,llm,rag,temporal
6
+ Author: bartosz roguski
7
+ Author-email: bartosz roguski <bartosz.k.roguski@gmail.com>
8
+ License-Expression: MIT
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3.13
13
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
14
+ Classifier: Typing :: Typed
15
+ Requires-Dist: aiosqlite>=0.22.1
16
+ Requires-Dist: openai>=2.15.0
17
+ Requires-Dist: pydantic>=2.12.5
18
+ Requires-Dist: pydantic-ai>=1.43.0
19
+ Requires-Dist: sqlite-vec>=0.1.6
20
+ Requires-Dist: textual>=3.5.0
21
+ Requires-Python: >=3.13
22
+ Project-URL: Homepage, https://github.com/vstorm-co/memv
23
+ Project-URL: Documentation, https://vstorm-co.github.io/memv/
24
+ Project-URL: Repository, https://github.com/vstorm-co/memv
25
+ Project-URL: Issues, https://github.com/vstorm-co/memv/issues
26
+ Project-URL: Changelog, https://github.com/vstorm-co/memv/blob/main/CHANGELOG.md
27
+ Description-Content-Type: text/markdown
28
+
29
+ <p align="center">
30
+ <img src="assets/banner.png" alt="memv" width="600">
31
+ </p>
32
+
33
+ <h1 align="center">memv</h1>
34
+
35
+ <p align="center">
36
+ <b>Structured, Temporal Memory for AI Agents</b>
37
+ </p>
38
+
39
+ <p align="center">
40
+ <a href="https://vstorm-co.github.io/memv/">Docs</a> •
41
+ <a href="https://vstorm-co.github.io/memv/getting-started/">Getting Started</a> •
42
+ <a href="https://pypi.org/project/memvee/">PyPI</a>
43
+ </p>
44
+
45
+ <p align="center">
46
+ <a href="https://pypi.org/project/memvee/"><img src="https://img.shields.io/pypi/v/memvee.svg" alt="PyPI version"></a>
47
+ <a href="https://www.python.org/downloads/"><img src="https://img.shields.io/badge/python-3.13+-blue.svg" alt="Python 3.13+"></a>
48
+ <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
49
+ <a href="https://github.com/pydantic/pydantic-ai"><img src="https://img.shields.io/badge/Powered%20by-Pydantic%20AI-E92063?logo=pydantic&logoColor=white" alt="Pydantic AI"></a>
50
+ </p>
51
+
52
+ <p align="center">
53
+ <b>🧠 Predict-Calibrate</b> extraction
54
+ &nbsp;•&nbsp;
55
+ <b>⏱️ Bi-Temporal</b> validity
56
+ &nbsp;•&nbsp;
57
+ <b>🔍 Hybrid</b> retrieval
58
+ &nbsp;•&nbsp;
59
+ <b>📦 SQLite</b> default
60
+ </p>
61
+
62
+ ---
63
+
64
+ ## Why memv?
65
+
66
+ Most memory systems extract everything and hope retrieval sorts it out. memv is different:
67
+
68
+ | Typical Approach | memv |
69
+ |------------------|--------|
70
+ | Extract all facts upfront | Extract only what we **failed to predict** |
71
+ | Overwrite old facts | **Invalidate** with temporal bounds |
72
+ | Retrieve by similarity | **Hybrid** vector + BM25 + RRF |
73
+ | Timestamps only | **Bi-temporal**: event time + transaction time |
74
+
75
+ **Result:** Less noise, better retrieval, accurate history.
76
+
77
+ ---
78
+
79
+ ## Get Started in 60 Seconds
80
+
81
+ ```bash
82
+ pip install memvee
83
+ ```
84
+
85
+ ```python
86
+ from memv import Memory
87
+ from memv.embeddings import OpenAIEmbedAdapter
88
+ from memv.llm import PydanticAIAdapter
89
+
90
+ memory = Memory(
91
+ db_path="memory.db",
92
+ embedding_client=OpenAIEmbedAdapter(),
93
+ llm_client=PydanticAIAdapter("openai:gpt-4o-mini"),
94
+ )
95
+
96
+ async with memory:
97
+ # Store conversation
98
+ await memory.add_exchange(
99
+ user_id="user-123",
100
+ user_message="I just started at Anthropic as a researcher.",
101
+ assistant_message="Congrats! What's your focus area?",
102
+ )
103
+
104
+ # Extract knowledge
105
+ await memory.process("user-123")
106
+
107
+ # Retrieve context
108
+ result = await memory.retrieve("What does the user do?", user_id="user-123")
109
+ print(result.to_prompt())
110
+ ```
111
+
112
+ **That's it.** Your agent now has:
113
+
114
+ - ✅ **Episodic memory** — conversations grouped into coherent episodes
115
+ - ✅ **Semantic knowledge** — facts extracted via predict-calibrate
116
+ - ✅ **Temporal awareness** — knows when facts were true
117
+ - ✅ **Hybrid retrieval** — vector + text search with RRF fusion
118
+
119
+ ---
120
+
121
+ ## Features
122
+
123
+ 🧠 **Predict-Calibrate Extraction**
124
+ > Only extracts what the model failed to predict. Importance emerges from prediction error, not upfront scoring. Based on [Nemori](https://arxiv.org/abs/2508.03341).
125
+
126
+ ⏱️ **Bi-Temporal Validity**
127
+ > Track when facts were true (event time) vs when you learned them (transaction time). Query history at any point in time. Based on [Graphiti](https://github.com/getzep/graphiti).
128
+
129
+ 🔍 **Hybrid Retrieval**
130
+ > Combines vector similarity and BM25 text search with Reciprocal Rank Fusion. Configurable weighting.
131
+
132
+ 📝 **Episode Segmentation**
133
+ > Automatically groups messages into coherent conversation episodes. Handles interleaved topics.
134
+
135
+ 🔄 **Contradiction Handling**
136
+ > New facts automatically invalidate conflicting old facts. Full history preserved.
137
+
138
+ 📅 **Temporal Parsing**
139
+ > Relative dates ("last week", "yesterday") resolved to absolute timestamps at extraction time.
140
+
141
+ ⚡ **Async Processing**
142
+ > Non-blocking `process_async()` with auto-processing when message threshold is reached.
143
+
144
+ 🗄️ **SQLite Default**
145
+ > Zero-config local storage with sqlite-vec for vectors and FTS5 for text search.
146
+
147
+ ---
148
+
149
+ ## Point-in-Time Queries
150
+
151
+ memv's bi-temporal model lets you query knowledge as it was at any moment:
152
+
153
+ ```python
154
+ from datetime import datetime
155
+
156
+ # What did we know about user's job in January 2024?
157
+ result = await memory.retrieve(
158
+ "Where does user work?",
159
+ user_id="user-123",
160
+ at_time=datetime(2024, 1, 1),
161
+ )
162
+
163
+ # Show full history including superseded facts
164
+ result = await memory.retrieve(
165
+ "Where does user work?",
166
+ user_id="user-123",
167
+ include_expired=True,
168
+ )
169
+ ```
170
+
171
+ ---
172
+
173
+ ## Architecture
174
+
175
+ ```
176
+ Messages (append-only)
177
+
178
+
179
+ Episodes (segmented conversations)
180
+
181
+
182
+ Knowledge (extracted facts with bi-temporal validity)
183
+
184
+ ├── Vector Index (sqlite-vec)
185
+ └── Text Index (FTS5)
186
+ ```
187
+
188
+ **Extraction Flow:**
189
+ 1. Messages buffered until threshold
190
+ 2. Boundary detection segments into episodes
191
+ 3. Episode narrative generated
192
+ 4. Predict what episode should contain (given existing KB)
193
+ 5. Compare prediction vs actual → extract gaps
194
+ 6. Store with embeddings + temporal bounds
195
+
196
+ ---
197
+
198
+ ## Framework Integration
199
+
200
+ memv works with any agent framework:
201
+
202
+ ```python
203
+ class MyAgent:
204
+ def __init__(self, memory: Memory):
205
+ self.memory = memory
206
+
207
+ async def run(self, user_input: str, user_id: str) -> str:
208
+ # 1. Retrieve relevant context
209
+ context = await self.memory.retrieve(user_input, user_id=user_id)
210
+
211
+ # 2. Generate response with context
212
+ response = await self.llm.generate(
213
+ f"{context.to_prompt()}\n\nUser: {user_input}"
214
+ )
215
+
216
+ # 3. Store the exchange
217
+ await self.memory.add_exchange(user_id, user_input, response)
218
+
219
+ return response
220
+ ```
221
+
222
+ ---
223
+
224
+ ## Documentation
225
+
226
+ - [Getting Started](https://vstorm-co.github.io/memv/getting-started/) — Installation, setup, first example
227
+ - [Core Concepts](https://vstorm-co.github.io/memv/concepts/) — Predict-calibrate, episodes, bi-temporal, retrieval
228
+ - [API Reference](https://vstorm-co.github.io/memv/api/) — All public classes and methods
229
+
230
+ ---
231
+
232
+ ## Contributing
233
+
234
+ ```bash
235
+ git clone https://github.com/vstorm-co/memv.git
236
+ cd memv
237
+ make install
238
+ make all
239
+ ```
240
+
241
+ See [CONTRIBUTING.md](CONTRIBUTING.md) for details.
242
+
243
+ ---
244
+
245
+ ## License
246
+
247
+ MIT — see [LICENSE](LICENSE)
248
+
249
+ <p align="center">
250
+ <sub>Built with ❤️ by <a href="https://github.com/vstorm-co">vstorm</a></sub>
251
+ </p>
memvee-0.1.0/README.md ADDED
@@ -0,0 +1,223 @@
1
+ <p align="center">
2
+ <img src="assets/banner.png" alt="memv" width="600">
3
+ </p>
4
+
5
+ <h1 align="center">memv</h1>
6
+
7
+ <p align="center">
8
+ <b>Structured, Temporal Memory for AI Agents</b>
9
+ </p>
10
+
11
+ <p align="center">
12
+ <a href="https://vstorm-co.github.io/memv/">Docs</a> •
13
+ <a href="https://vstorm-co.github.io/memv/getting-started/">Getting Started</a> •
14
+ <a href="https://pypi.org/project/memvee/">PyPI</a>
15
+ </p>
16
+
17
+ <p align="center">
18
+ <a href="https://pypi.org/project/memvee/"><img src="https://img.shields.io/pypi/v/memvee.svg" alt="PyPI version"></a>
19
+ <a href="https://www.python.org/downloads/"><img src="https://img.shields.io/badge/python-3.13+-blue.svg" alt="Python 3.13+"></a>
20
+ <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
21
+ <a href="https://github.com/pydantic/pydantic-ai"><img src="https://img.shields.io/badge/Powered%20by-Pydantic%20AI-E92063?logo=pydantic&logoColor=white" alt="Pydantic AI"></a>
22
+ </p>
23
+
24
+ <p align="center">
25
+ <b>🧠 Predict-Calibrate</b> extraction
26
+ &nbsp;•&nbsp;
27
+ <b>⏱️ Bi-Temporal</b> validity
28
+ &nbsp;•&nbsp;
29
+ <b>🔍 Hybrid</b> retrieval
30
+ &nbsp;•&nbsp;
31
+ <b>📦 SQLite</b> default
32
+ </p>
33
+
34
+ ---
35
+
36
+ ## Why memv?
37
+
38
+ Most memory systems extract everything and hope retrieval sorts it out. memv is different:
39
+
40
+ | Typical Approach | memv |
41
+ |------------------|--------|
42
+ | Extract all facts upfront | Extract only what we **failed to predict** |
43
+ | Overwrite old facts | **Invalidate** with temporal bounds |
44
+ | Retrieve by similarity | **Hybrid** vector + BM25 + RRF |
45
+ | Timestamps only | **Bi-temporal**: event time + transaction time |
46
+
47
+ **Result:** Less noise, better retrieval, accurate history.
48
+
49
+ ---
50
+
51
+ ## Get Started in 60 Seconds
52
+
53
+ ```bash
54
+ pip install memvee
55
+ ```
56
+
57
+ ```python
58
+ from memv import Memory
59
+ from memv.embeddings import OpenAIEmbedAdapter
60
+ from memv.llm import PydanticAIAdapter
61
+
62
+ memory = Memory(
63
+ db_path="memory.db",
64
+ embedding_client=OpenAIEmbedAdapter(),
65
+ llm_client=PydanticAIAdapter("openai:gpt-4o-mini"),
66
+ )
67
+
68
+ async with memory:
69
+ # Store conversation
70
+ await memory.add_exchange(
71
+ user_id="user-123",
72
+ user_message="I just started at Anthropic as a researcher.",
73
+ assistant_message="Congrats! What's your focus area?",
74
+ )
75
+
76
+ # Extract knowledge
77
+ await memory.process("user-123")
78
+
79
+ # Retrieve context
80
+ result = await memory.retrieve("What does the user do?", user_id="user-123")
81
+ print(result.to_prompt())
82
+ ```
83
+
84
+ **That's it.** Your agent now has:
85
+
86
+ - ✅ **Episodic memory** — conversations grouped into coherent episodes
87
+ - ✅ **Semantic knowledge** — facts extracted via predict-calibrate
88
+ - ✅ **Temporal awareness** — knows when facts were true
89
+ - ✅ **Hybrid retrieval** — vector + text search with RRF fusion
90
+
91
+ ---
92
+
93
+ ## Features
94
+
95
+ 🧠 **Predict-Calibrate Extraction**
96
+ > Only extracts what the model failed to predict. Importance emerges from prediction error, not upfront scoring. Based on [Nemori](https://arxiv.org/abs/2508.03341).
97
+
98
+ ⏱️ **Bi-Temporal Validity**
99
+ > Track when facts were true (event time) vs when you learned them (transaction time). Query history at any point in time. Based on [Graphiti](https://github.com/getzep/graphiti).
100
+
101
+ 🔍 **Hybrid Retrieval**
102
+ > Combines vector similarity and BM25 text search with Reciprocal Rank Fusion. Configurable weighting.
103
+
104
+ 📝 **Episode Segmentation**
105
+ > Automatically groups messages into coherent conversation episodes. Handles interleaved topics.
106
+
107
+ 🔄 **Contradiction Handling**
108
+ > New facts automatically invalidate conflicting old facts. Full history preserved.
109
+
110
+ 📅 **Temporal Parsing**
111
+ > Relative dates ("last week", "yesterday") resolved to absolute timestamps at extraction time.
112
+
113
+ ⚡ **Async Processing**
114
+ > Non-blocking `process_async()` with auto-processing when message threshold is reached.
115
+
116
+ 🗄️ **SQLite Default**
117
+ > Zero-config local storage with sqlite-vec for vectors and FTS5 for text search.
118
+
119
+ ---
120
+
121
+ ## Point-in-Time Queries
122
+
123
+ memv's bi-temporal model lets you query knowledge as it was at any moment:
124
+
125
+ ```python
126
+ from datetime import datetime
127
+
128
+ # What did we know about user's job in January 2024?
129
+ result = await memory.retrieve(
130
+ "Where does user work?",
131
+ user_id="user-123",
132
+ at_time=datetime(2024, 1, 1),
133
+ )
134
+
135
+ # Show full history including superseded facts
136
+ result = await memory.retrieve(
137
+ "Where does user work?",
138
+ user_id="user-123",
139
+ include_expired=True,
140
+ )
141
+ ```
142
+
143
+ ---
144
+
145
+ ## Architecture
146
+
147
+ ```
148
+ Messages (append-only)
149
+
150
+
151
+ Episodes (segmented conversations)
152
+
153
+
154
+ Knowledge (extracted facts with bi-temporal validity)
155
+
156
+ ├── Vector Index (sqlite-vec)
157
+ └── Text Index (FTS5)
158
+ ```
159
+
160
+ **Extraction Flow:**
161
+ 1. Messages buffered until threshold
162
+ 2. Boundary detection segments into episodes
163
+ 3. Episode narrative generated
164
+ 4. Predict what episode should contain (given existing KB)
165
+ 5. Compare prediction vs actual → extract gaps
166
+ 6. Store with embeddings + temporal bounds
167
+
168
+ ---
169
+
170
+ ## Framework Integration
171
+
172
+ memv works with any agent framework:
173
+
174
+ ```python
175
+ class MyAgent:
176
+ def __init__(self, memory: Memory):
177
+ self.memory = memory
178
+
179
+ async def run(self, user_input: str, user_id: str) -> str:
180
+ # 1. Retrieve relevant context
181
+ context = await self.memory.retrieve(user_input, user_id=user_id)
182
+
183
+ # 2. Generate response with context
184
+ response = await self.llm.generate(
185
+ f"{context.to_prompt()}\n\nUser: {user_input}"
186
+ )
187
+
188
+ # 3. Store the exchange
189
+ await self.memory.add_exchange(user_id, user_input, response)
190
+
191
+ return response
192
+ ```
193
+
194
+ ---
195
+
196
+ ## Documentation
197
+
198
+ - [Getting Started](https://vstorm-co.github.io/memv/getting-started/) — Installation, setup, first example
199
+ - [Core Concepts](https://vstorm-co.github.io/memv/concepts/) — Predict-calibrate, episodes, bi-temporal, retrieval
200
+ - [API Reference](https://vstorm-co.github.io/memv/api/) — All public classes and methods
201
+
202
+ ---
203
+
204
+ ## Contributing
205
+
206
+ ```bash
207
+ git clone https://github.com/vstorm-co/memv.git
208
+ cd memv
209
+ make install
210
+ make all
211
+ ```
212
+
213
+ See [CONTRIBUTING.md](CONTRIBUTING.md) for details.
214
+
215
+ ---
216
+
217
+ ## License
218
+
219
+ MIT — see [LICENSE](LICENSE)
220
+
221
+ <p align="center">
222
+ <sub>Built with ❤️ by <a href="https://github.com/vstorm-co">vstorm</a></sub>
223
+ </p>
@@ -0,0 +1,87 @@
1
+ [project]
2
+ name = "memvee"
3
+ version = "0.1.0"
4
+ description = "Structured, temporal memory for AI agents."
5
+ readme = "README.md"
6
+ license = "MIT"
7
+ authors = [
8
+ { name = "bartosz roguski", email = "bartosz.k.roguski@gmail.com" }
9
+ ]
10
+ requires-python = ">=3.13"
11
+ keywords = ["ai", "agents", "memory", "llm", "rag", "temporal"]
12
+ classifiers = [
13
+ "Development Status :: 3 - Alpha",
14
+ "Intended Audience :: Developers",
15
+ "License :: OSI Approved :: MIT License",
16
+ "Programming Language :: Python :: 3.13",
17
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
18
+ "Typing :: Typed",
19
+ ]
20
+ dependencies = [
21
+ "aiosqlite>=0.22.1",
22
+ "openai>=2.15.0",
23
+ "pydantic>=2.12.5",
24
+ "pydantic-ai>=1.43.0",
25
+ "sqlite-vec>=0.1.6",
26
+ "textual>=3.5.0",
27
+ ]
28
+
29
+ [project.scripts]
30
+ memv = "memv:main"
31
+
32
+ [project.urls]
33
+ Homepage = "https://github.com/vstorm-co/memv"
34
+ Documentation = "https://vstorm-co.github.io/memv/"
35
+ Repository = "https://github.com/vstorm-co/memv"
36
+ Issues = "https://github.com/vstorm-co/memv/issues"
37
+ Changelog = "https://github.com/vstorm-co/memv/blob/main/CHANGELOG.md"
38
+
39
+ [build-system]
40
+ requires = ["uv_build>=0.9.21,<0.10.0"]
41
+ build-backend = "uv_build"
42
+
43
+ [tool.uv.build-backend]
44
+ module-name = "memv"
45
+
46
+ [dependency-groups]
47
+ dev = [
48
+ "ipython>=9.9.0",
49
+ "pre-commit>=4.5.1",
50
+ "pytest>=9.0.2",
51
+ "pytest-asyncio>=1.3.0",
52
+ "ruff>=0.14.10",
53
+ "ty>=0.0.1a7",
54
+ ]
55
+ docs = [
56
+ "mkdocs>=1.6",
57
+ "mkdocs-material>=9.6",
58
+ "mkdocstrings[python]>=0.28",
59
+ ]
60
+ examples = [
61
+ "autogen-agentchat>=0.7",
62
+ "autogen-ext[openai]>=0.7",
63
+ "crewai>=0.11",
64
+ "langchain-openai>=1.0",
65
+ "langgraph>=1.0",
66
+ "llama-index>=0.14",
67
+ "llama-index-llms-openai>=0.6",
68
+ "rich>=13.0",
69
+ "typer>=0.15",
70
+ ]
71
+
72
+ [tool.ruff]
73
+ line-length = 135
74
+
75
+ [tool.ruff.lint]
76
+ select = [
77
+ "I", # isort
78
+ "ERA", # dead code
79
+ "F401", # unused imports
80
+ "E", # pycodestyle error
81
+ "W", # pycodestyle warning
82
+ "B", # flake8-bugbear
83
+ ]
84
+
85
+ [tool.pytest.ini_options]
86
+ testpaths = ["tests"]
87
+ pythonpath = ["src"]
@@ -0,0 +1,52 @@
1
+ """memv - Structured, temporal memory for AI agents.
2
+
3
+ Example:
4
+ ```python
5
+ from memv import Memory, Message, MessageRole
6
+ from memv.embeddings import OpenAIEmbedAdapter
7
+ from memv.llm import PydanticAIAdapter
8
+
9
+ memory = Memory(
10
+ db_path="memory.db",
11
+ embedding_client=OpenAIEmbedAdapter(),
12
+ llm_client=PydanticAIAdapter("openai:gpt-4o-mini"),
13
+ )
14
+
15
+ async with memory:
16
+ await memory.add_exchange(user_id, user_msg, assistant_msg)
17
+ await memory.process(user_id)
18
+ result = await memory.retrieve("query", user_id=user_id)
19
+ print(result.to_prompt())
20
+ ```
21
+ """
22
+
23
+ from memv.config import MemoryConfig
24
+ from memv.memory import Memory
25
+ from memv.models import (
26
+ Episode,
27
+ ExtractedKnowledge,
28
+ Message,
29
+ MessageRole,
30
+ ProcessStatus,
31
+ ProcessTask,
32
+ RetrievalResult,
33
+ SemanticKnowledge,
34
+ )
35
+
36
+ __all__ = [
37
+ "Memory",
38
+ "MemoryConfig",
39
+ "Message",
40
+ "MessageRole",
41
+ "Episode",
42
+ "SemanticKnowledge",
43
+ "ExtractedKnowledge",
44
+ "RetrievalResult",
45
+ "ProcessTask",
46
+ "ProcessStatus",
47
+ ]
48
+
49
+
50
+ def main() -> None:
51
+ print("memv - Structured, temporal memory for AI agents.")
52
+ print("See examples/ for usage demos.")
@@ -0,0 +1,94 @@
1
+ """Caching utilities for AgentMemory."""
2
+
3
+ import hashlib
4
+ from collections import OrderedDict
5
+ from dataclasses import dataclass
6
+ from datetime import datetime, timedelta, timezone
7
+
8
+
9
+ @dataclass
10
+ class CacheEntry:
11
+ """Entry in the embedding cache with expiration."""
12
+
13
+ value: list[float]
14
+ expires_at: datetime
15
+
16
+
17
+ class EmbeddingCache:
18
+ """LRU cache for query embeddings with TTL.
19
+
20
+ Caches embedding vectors by text content hash to avoid redundant API calls.
21
+ Thread-safe for single-threaded async usage (no locks needed).
22
+
23
+ Example:
24
+ ```python
25
+ cache = EmbeddingCache(max_size=1000, ttl_seconds=600)
26
+
27
+ # Check cache first
28
+ embedding = cache.get("query text")
29
+ if embedding is None:
30
+ embedding = await embedder.embed("query text")
31
+ cache.set("query text", embedding)
32
+ ```
33
+ """
34
+
35
+ def __init__(self, max_size: int = 1000, ttl_seconds: int = 600):
36
+ """
37
+ Initialize the cache.
38
+
39
+ Args:
40
+ max_size: Maximum number of entries to cache
41
+ ttl_seconds: Time-to-live for entries in seconds
42
+ """
43
+ self.max_size = max_size
44
+ self.ttl = timedelta(seconds=ttl_seconds)
45
+ self._cache: OrderedDict[str, CacheEntry] = OrderedDict()
46
+
47
+ def _key(self, text: str) -> str:
48
+ """Generate cache key from text content."""
49
+ return hashlib.sha256(text.encode()).hexdigest()
50
+
51
+ def get(self, text: str) -> list[float] | None:
52
+ """
53
+ Get cached embedding for text.
54
+
55
+ Returns None if not found or expired.
56
+ """
57
+ key = self._key(text)
58
+ entry = self._cache.get(key)
59
+
60
+ if entry is None:
61
+ return None
62
+
63
+ # Check expiration
64
+ if datetime.now(timezone.utc) > entry.expires_at:
65
+ del self._cache[key]
66
+ return None
67
+
68
+ # Move to end (most recently used)
69
+ self._cache.move_to_end(key)
70
+ return entry.value
71
+
72
+ def set(self, text: str, embedding: list[float]) -> None:
73
+ """
74
+ Cache embedding for text.
75
+
76
+ Evicts oldest entry if cache is full.
77
+ """
78
+ # Evict if at capacity
79
+ while len(self._cache) >= self.max_size:
80
+ self._cache.popitem(last=False) # Remove oldest
81
+
82
+ key = self._key(text)
83
+ self._cache[key] = CacheEntry(
84
+ value=embedding,
85
+ expires_at=datetime.now(timezone.utc) + self.ttl,
86
+ )
87
+
88
+ def clear(self) -> None:
89
+ """Clear all cached entries."""
90
+ self._cache.clear()
91
+
92
+ def __len__(self) -> int:
93
+ """Return number of cached entries."""
94
+ return len(self._cache)