agent-memory-store 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.MD +290 -0
- package/package.json +52 -0
- package/src/bm25.js +115 -0
- package/src/index.js +316 -0
- package/src/store.js +292 -0
package/README.MD
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
# agent-memory-store
|
|
2
|
+
|
|
3
|
+
> Local-first MCP memory server for multi-agent systems.
|
|
4
|
+
|
|
5
|
+
[](https://www.npmjs.com/package/agent-memory-store)
|
|
6
|
+
[](https://opensource.org/licenses/MIT)
|
|
7
|
+
[](https://nodejs.org)
|
|
8
|
+
|
|
9
|
+
`agent-memory-store` gives your AI agents a shared, searchable, persistent memory — running entirely on your local filesystem. No vector database, no embedding APIs, no cloud services required.
|
|
10
|
+
|
|
11
|
+
Agents read and write **chunks** (markdown files with YAML frontmatter) through a set of MCP tools. Search is powered by **BM25**, the same ranking algorithm used by Elasticsearch, implemented in pure JavaScript with zero runtime dependencies.
|
|
12
|
+
|
|
13
|
+
```
|
|
14
|
+
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
|
|
15
|
+
│ Agent A │ │ Agent B │ │ Agent C │
|
|
16
|
+
└──────┬──────┘ └──────┬──────┘ └──────┬──────┘
|
|
17
|
+
│ │ │
|
|
18
|
+
└────────────────┬─────────────────-┘
|
|
19
|
+
│ MCP tools
|
|
20
|
+
┌──────────▼──────────┐
|
|
21
|
+
│ agent-memory-store │
|
|
22
|
+
│ search · write │
|
|
23
|
+
│ read · state · list │
|
|
24
|
+
└──────────┬──────────┘
|
|
25
|
+
│
|
|
26
|
+
┌──────────▼──────────┐
|
|
27
|
+
│ .agent-memory-store/ │
|
|
28
|
+
│ ├── chunks/ │
|
|
29
|
+
│ └── state/ │
|
|
30
|
+
└─────────────────────-┘
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Features
|
|
34
|
+
|
|
35
|
+
- **Zero-install usage** via `npx`
|
|
36
|
+
- **BM25 full-text search** — relevance ranking without embeddings or APIs
|
|
37
|
+
- **Tag and agent filtering** — find chunks by who wrote them or what they cover
|
|
38
|
+
- **TTL-based expiry** — chunks auto-delete after a configurable number of days
|
|
39
|
+
- **Session state** — key/value store for pipeline progress, flags, and counters
|
|
40
|
+
- **Plain files** — chunks are `.md` files, readable and editable by humans and git
|
|
41
|
+
- **MCP-native** — works with Claude Code, opencode, and any MCP-compatible client
|
|
42
|
+
|
|
43
|
+
## Requirements
|
|
44
|
+
|
|
45
|
+
- Node.js ≥ 18
|
|
46
|
+
|
|
47
|
+
## Quick start
|
|
48
|
+
|
|
49
|
+
No installation needed:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
npx agent-memory-store
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
With a custom storage path:
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
AGENT_STORE_PATH=/your/project/.agent-memory-store npx agent-memory-store
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Configuration
|
|
62
|
+
|
|
63
|
+
### Claude Code
|
|
64
|
+
|
|
65
|
+
Add to your project's `claude.mcp.json` (or `~/.claude/claude.mcp.json` for global):
|
|
66
|
+
|
|
67
|
+
```json
|
|
68
|
+
{
|
|
69
|
+
"mcpServers": {
|
|
70
|
+
"agent-memory-store": {
|
|
71
|
+
"command": "npx",
|
|
72
|
+
"args": ["-y", "agent-memory-store"],
|
|
73
|
+
"env": {
|
|
74
|
+
"AGENT_STORE_PATH": "/your/project/.agent-memory-store"
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
Or using the Claude Code CLI:
|
|
82
|
+
|
|
83
|
+
```bash
|
|
84
|
+
claude mcp add agent-memory-store \
|
|
85
|
+
--command "npx" \
|
|
86
|
+
--args "-y agent-memory-store" \
|
|
87
|
+
--env "AGENT_STORE_PATH=/your/project/.agent-memory-store"
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
### opencode
|
|
91
|
+
|
|
92
|
+
Add to your `opencode.json`:
|
|
93
|
+
|
|
94
|
+
```json
|
|
95
|
+
{
|
|
96
|
+
"mcp": {
|
|
97
|
+
"agent-memory-store": {
|
|
98
|
+
"command": "npx",
|
|
99
|
+
"args": ["-y", "agent-memory-store"],
|
|
100
|
+
"env": {
|
|
101
|
+
"AGENT_STORE_PATH": "/your/project/.agent-memory-store"
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
### Cursor / VS Code (MCP extension)
|
|
109
|
+
|
|
110
|
+
Add to your MCP settings file:
|
|
111
|
+
|
|
112
|
+
```json
|
|
113
|
+
{
|
|
114
|
+
"servers": {
|
|
115
|
+
"agent-memory-store": {
|
|
116
|
+
"command": "npx",
|
|
117
|
+
"args": ["-y", "agent-memory-store"],
|
|
118
|
+
"env": {
|
|
119
|
+
"AGENT_STORE_PATH": "/your/project/.agent-memory-store"
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
### Environment variables
|
|
127
|
+
|
|
128
|
+
| Variable | Default | Description |
|
|
129
|
+
| ------------------ | ----------------------- | -------------------------------------- |
|
|
130
|
+
| `AGENT_STORE_PATH` | `./.agent-memory-store` | Absolute path to the storage directory |
|
|
131
|
+
|
|
132
|
+
## Tools
|
|
133
|
+
|
|
134
|
+
| Tool | When to use |
|
|
135
|
+
| ---------------- | ------------------------------------------------------------------------- |
|
|
136
|
+
| `search_context` | **Start of every task** — retrieve relevant prior knowledge before acting |
|
|
137
|
+
| `write_context` | After decisions, discoveries, or outputs that other agents will need |
|
|
138
|
+
| `read_context` | Read a specific chunk by ID |
|
|
139
|
+
| `list_context` | Inventory the memory store (metadata only, no body) |
|
|
140
|
+
| `delete_context` | Remove outdated or incorrect chunks |
|
|
141
|
+
| `get_state` | Read a pipeline variable (progress, flags, counters) |
|
|
142
|
+
| `set_state` | Write a pipeline variable |
|
|
143
|
+
|
|
144
|
+
### `search_context`
|
|
145
|
+
|
|
146
|
+
```
|
|
147
|
+
query string Search query. Use specific, canonical terms.
|
|
148
|
+
tags string[] (optional) Narrow to chunks matching any of these tags.
|
|
149
|
+
agent string (optional) Narrow to chunks written by a specific agent.
|
|
150
|
+
top_k number (optional) Max results to return. Default: 6.
|
|
151
|
+
min_score number (optional) Minimum BM25 score. Default: 0.1.
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
### `write_context`
|
|
155
|
+
|
|
156
|
+
```
|
|
157
|
+
topic string Short, specific title. ("Auth — JWT decision", not "decision")
|
|
158
|
+
content string Chunk body in markdown. Include rationale, not just conclusions.
|
|
159
|
+
agent string (optional) Agent ID writing this chunk.
|
|
160
|
+
tags string[] (optional) Canonical tags for future retrieval.
|
|
161
|
+
importance string (optional) low | medium | high | critical. Default: medium.
|
|
162
|
+
ttl_days number (optional) Auto-expiry in days. Omit for permanent storage.
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
### `get_state` / `set_state`
|
|
166
|
+
|
|
167
|
+
```
|
|
168
|
+
key string State variable name.
|
|
169
|
+
value any (set_state only) Any JSON-serializable value.
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
## Storage format
|
|
173
|
+
|
|
174
|
+
Each chunk is a plain `.md` file under `.context/chunks/`:
|
|
175
|
+
|
|
176
|
+
```markdown
|
|
177
|
+
---
|
|
178
|
+
id: a3f9c12b40
|
|
179
|
+
topic: "Auth service — chose JWT over sessions"
|
|
180
|
+
agent: architect-agent
|
|
181
|
+
tags: [auth, architecture, decision]
|
|
182
|
+
importance: high
|
|
183
|
+
updated: 2025-06-01T14:32:00.000Z
|
|
184
|
+
---
|
|
185
|
+
|
|
186
|
+
Chose stateless JWT over server-side sessions.
|
|
187
|
+
|
|
188
|
+
**Rationale:** No shared session store needed across services.
|
|
189
|
+
Refresh tokens stored in Redis with 7-day TTL.
|
|
190
|
+
Access tokens expire in 15 minutes.
|
|
191
|
+
|
|
192
|
+
**Trade-offs:** Cannot invalidate individual tokens before expiry.
|
|
193
|
+
Acceptable for our threat model.
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
Session state lives in `.agent-memory-store/state/<key>.json`.
|
|
197
|
+
|
|
198
|
+
Both directories are human-readable, diffable with git, and can be committed to version control if you want shared team memory.
|
|
199
|
+
|
|
200
|
+
## Agent system prompt
|
|
201
|
+
|
|
202
|
+
Paste this into the system prompt of every agent that should use the memory store:
|
|
203
|
+
|
|
204
|
+
```markdown
|
|
205
|
+
## Memory usage
|
|
206
|
+
|
|
207
|
+
You have access to a persistent local memory store via agent-memory-store MCP tools.
|
|
208
|
+
|
|
209
|
+
**At the start of each task:**
|
|
210
|
+
|
|
211
|
+
1. Call `search_context` with 2–3 specific queries related to what you are about to do.
|
|
212
|
+
2. Incorporate retrieved chunks (score > 1.0) into your reasoning.
|
|
213
|
+
3. Call `get_state` to check pipeline status if relevant.
|
|
214
|
+
|
|
215
|
+
**After completing a subtask:**
|
|
216
|
+
|
|
217
|
+
1. Call `write_context` to persist:
|
|
218
|
+
- Decisions made and their rationale
|
|
219
|
+
- Key discoveries or findings
|
|
220
|
+
- Structured outputs intended for downstream agents
|
|
221
|
+
2. Use canonical tags consistent with the rest of the team.
|
|
222
|
+
3. Set `importance: high` or `critical` for information other agents will need.
|
|
223
|
+
|
|
224
|
+
**Best practices:**
|
|
225
|
+
|
|
226
|
+
- Specific topics: "ZAP scraper — stack decision" > "decision"
|
|
227
|
+
- Consistent tags: always use the same term (`auth`, not `authentication` or `autenticação`)
|
|
228
|
+
- Check before writing: search first to avoid duplicate chunks
|
|
229
|
+
- Temporary context: use `ttl_days: 7` for session-scoped information
|
|
230
|
+
```
|
|
231
|
+
|
|
232
|
+
## How BM25 search works
|
|
233
|
+
|
|
234
|
+
BM25 ranks documents by term frequency and inverse document frequency, normalized by document length. It is the ranking algorithm behind Elasticsearch and Apache Lucene.
|
|
235
|
+
|
|
236
|
+
**Strengths:**
|
|
237
|
+
|
|
238
|
+
- Works well for short, labeled text chunks
|
|
239
|
+
- Instant — no network calls, no GPU, no warm-up
|
|
240
|
+
- Deterministic and explainable
|
|
241
|
+
|
|
242
|
+
**Limitations:**
|
|
243
|
+
|
|
244
|
+
- No semantic understanding (`car` ≠ `automobile`)
|
|
245
|
+
- Mitigated by using canonical tags and consistent terminology across agents
|
|
246
|
+
|
|
247
|
+
**Score interpretation:**
|
|
248
|
+
|
|
249
|
+
- `> 3.0` — strong match, highly relevant
|
|
250
|
+
- `1.0 – 3.0` — good match, likely relevant
|
|
251
|
+
- `0.1 – 1.0` — weak match, may be tangentially related
|
|
252
|
+
- `< 0.1` — filtered out by default
|
|
253
|
+
|
|
254
|
+
## Development
|
|
255
|
+
|
|
256
|
+
```bash
|
|
257
|
+
git clone https://github.com/YOUR_USERNAME/agent-memory-store
|
|
258
|
+
cd agent-memory-store
|
|
259
|
+
npm install
|
|
260
|
+
npm start
|
|
261
|
+
```
|
|
262
|
+
|
|
263
|
+
Run tests:
|
|
264
|
+
|
|
265
|
+
```bash
|
|
266
|
+
npm test
|
|
267
|
+
```
|
|
268
|
+
|
|
269
|
+
See [CONTRIBUTING.md](./CONTRIBUTING.md) for guidelines.
|
|
270
|
+
|
|
271
|
+
## Project structure
|
|
272
|
+
|
|
273
|
+
```
|
|
274
|
+
src/
|
|
275
|
+
bm25.js BM25 ranking engine — pure JS, zero dependencies
|
|
276
|
+
store.js File-based persistence (chunks + session state)
|
|
277
|
+
index.js MCP server and tool definitions
|
|
278
|
+
```
|
|
279
|
+
|
|
280
|
+
## Roadmap
|
|
281
|
+
|
|
282
|
+
- [ ] `summarize_context` tool — LLM-powered chunk consolidation
|
|
283
|
+
- [ ] `prune_context` tool — remove chunks by age, agent, or importance
|
|
284
|
+
- [ ] Hybrid scoring: BM25 + optional local embedding reranking (ollama)
|
|
285
|
+
- [ ] Web UI for browsing the memory store
|
|
286
|
+
- [ ] Multi-project workspace support
|
|
287
|
+
|
|
288
|
+
## License
|
|
289
|
+
|
|
290
|
+
MIT — see [LICENSE](./LICENSE).
|
package/package.json
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "agent-memory-store",
|
|
3
|
+
"version": "0.0.1",
|
|
4
|
+
"description": "Local-first MCP memory server for multi-agent systems. BM25 search, zero external dependencies, file-based persistence.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "src/index.js",
|
|
7
|
+
"bin": {
|
|
8
|
+
"context-store": "./src/index.js"
|
|
9
|
+
},
|
|
10
|
+
"scripts": {
|
|
11
|
+
"start": "node src/index.js",
|
|
12
|
+
"test": "node --test src/__tests__/store.test.js",
|
|
13
|
+
"lint": "node --check src/bm25.js src/store.js src/index.js"
|
|
14
|
+
},
|
|
15
|
+
"keywords": [
|
|
16
|
+
"mcp",
|
|
17
|
+
"model-context-protocol",
|
|
18
|
+
"ai-agents",
|
|
19
|
+
"multi-agent",
|
|
20
|
+
"memory",
|
|
21
|
+
"rag",
|
|
22
|
+
"bm25",
|
|
23
|
+
"context",
|
|
24
|
+
"opencode",
|
|
25
|
+
"claude",
|
|
26
|
+
"llm"
|
|
27
|
+
],
|
|
28
|
+
"author": "Vinícius Barreto",
|
|
29
|
+
"license": "MIT",
|
|
30
|
+
"repository": {
|
|
31
|
+
"type": "git",
|
|
32
|
+
"url": "https://github.com/vbfs/context-store.git"
|
|
33
|
+
},
|
|
34
|
+
"bugs": {
|
|
35
|
+
"url": "https://github.com/vbfs/context-store/issues"
|
|
36
|
+
},
|
|
37
|
+
"homepage": "https://github.com/vbfs/context-store#readme",
|
|
38
|
+
"engines": {
|
|
39
|
+
"node": ">=18.0.0"
|
|
40
|
+
},
|
|
41
|
+
"files": [
|
|
42
|
+
"src/",
|
|
43
|
+
"README.md",
|
|
44
|
+
"LICENSE",
|
|
45
|
+
"CHANGELOG.md"
|
|
46
|
+
],
|
|
47
|
+
"dependencies": {
|
|
48
|
+
"@modelcontextprotocol/sdk": "^1.28.0",
|
|
49
|
+
"gray-matter": "^4.0.3",
|
|
50
|
+
"zod": "^4.3.6"
|
|
51
|
+
}
|
|
52
|
+
}
|
package/src/bm25.js
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* BM25 (Best Match 25) ranking algorithm — pure JavaScript implementation.
|
|
3
|
+
*
|
|
4
|
+
* BM25 is a bag-of-words retrieval function that ranks documents by relevance
|
|
5
|
+
* to a given query. It is the default ranking algorithm in Elasticsearch and
|
|
6
|
+
* Apache Lucene, and works exceptionally well for short, well-labeled text
|
|
7
|
+
* chunks without requiring any external APIs or embedding models.
|
|
8
|
+
*
|
|
9
|
+
* Tuning parameters:
|
|
10
|
+
* k1 (1.5) — term frequency saturation. Higher = more weight on rare terms.
|
|
11
|
+
* b (0.75) — length normalization. 1.0 = full normalization, 0 = none.
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
const k1 = 1.5;
|
|
15
|
+
const b = 0.75;
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Tokenizes a string into lowercase terms, stripping punctuation.
|
|
19
|
+
* Handles Latin extended characters (Portuguese, Spanish, French, etc.).
|
|
20
|
+
*
|
|
21
|
+
* @param {string} text
|
|
22
|
+
* @returns {string[]}
|
|
23
|
+
*/
|
|
24
|
+
function tokenize(text) {
|
|
25
|
+
return text
|
|
26
|
+
.toLowerCase()
|
|
27
|
+
.normalize("NFD")
|
|
28
|
+
.replace(/[\u0300-\u036f]/g, "")
|
|
29
|
+
.replace(/[^\w\s]/g, " ")
|
|
30
|
+
.split(/\s+/)
|
|
31
|
+
.filter((t) => t.length > 1);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export class BM25 {
|
|
35
|
+
constructor() {
|
|
36
|
+
/** @type {Array<{ id: string, len: number, metadata: object }>} */
|
|
37
|
+
this.documents = [];
|
|
38
|
+
/** @type {Array<Map<string, number>>} */
|
|
39
|
+
this.termFreqs = [];
|
|
40
|
+
/** @type {Map<string, number>} term → document frequency */
|
|
41
|
+
this.docFreqs = new Map();
|
|
42
|
+
this.avgLen = 0;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Adds a document to the index.
|
|
47
|
+
*
|
|
48
|
+
* @param {string} id - Unique document identifier
|
|
49
|
+
* @param {string} text - Searchable text content
|
|
50
|
+
* @param {object} metadata - Arbitrary metadata attached to results
|
|
51
|
+
*/
|
|
52
|
+
addDocument(id, text, metadata = {}) {
|
|
53
|
+
const tokens = tokenize(text);
|
|
54
|
+
const tf = new Map();
|
|
55
|
+
for (const t of tokens) tf.set(t, (tf.get(t) || 0) + 1);
|
|
56
|
+
|
|
57
|
+
this.documents.push({ id, len: tokens.length, metadata });
|
|
58
|
+
this.termFreqs.push(tf);
|
|
59
|
+
|
|
60
|
+
for (const term of tf.keys()) {
|
|
61
|
+
this.docFreqs.set(term, (this.docFreqs.get(term) || 0) + 1);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const total = this.documents.reduce((s, d) => s + d.len, 0);
|
|
65
|
+
this.avgLen = total / this.documents.length;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Computes the BM25 score for a single document against query terms.
|
|
70
|
+
*
|
|
71
|
+
* @param {number} docIdx
|
|
72
|
+
* @param {string[]} queryTerms
|
|
73
|
+
* @returns {number}
|
|
74
|
+
*/
|
|
75
|
+
_score(docIdx, queryTerms) {
|
|
76
|
+
const doc = this.documents[docIdx];
|
|
77
|
+
const tf = this.termFreqs[docIdx];
|
|
78
|
+
const N = this.documents.length;
|
|
79
|
+
let score = 0;
|
|
80
|
+
|
|
81
|
+
for (const term of queryTerms) {
|
|
82
|
+
const freq = tf.get(term) || 0;
|
|
83
|
+
if (freq === 0) continue;
|
|
84
|
+
const df = this.docFreqs.get(term) || 0;
|
|
85
|
+
const idf = Math.log((N - df + 0.5) / (df + 0.5) + 1);
|
|
86
|
+
const tfNorm =
|
|
87
|
+
(freq * (k1 + 1)) / (freq + k1 * (1 - b + b * (doc.len / this.avgLen)));
|
|
88
|
+
score += idf * tfNorm;
|
|
89
|
+
}
|
|
90
|
+
return score;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
/**
|
|
94
|
+
* Searches the index and returns the top-K most relevant documents.
|
|
95
|
+
*
|
|
96
|
+
* @param {string} query - Natural language search query
|
|
97
|
+
* @param {number} [topK=5] - Maximum results to return
|
|
98
|
+
* @param {Function} [filter=null] - Optional predicate fn(metadata) => boolean
|
|
99
|
+
* @returns {Array<{ id: string, score: number, metadata: object }>}
|
|
100
|
+
*/
|
|
101
|
+
search(query, topK = 5, filter = null) {
|
|
102
|
+
const queryTerms = tokenize(query);
|
|
103
|
+
const results = [];
|
|
104
|
+
|
|
105
|
+
for (let i = 0; i < this.documents.length; i++) {
|
|
106
|
+
const doc = this.documents[i];
|
|
107
|
+
if (filter && !filter(doc.metadata)) continue;
|
|
108
|
+
const score = this._score(i, queryTerms);
|
|
109
|
+
if (score > 0)
|
|
110
|
+
results.push({ id: doc.id, score, metadata: doc.metadata });
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
return results.sort((a, b) => b.score - a.score).slice(0, topK);
|
|
114
|
+
}
|
|
115
|
+
}
|
package/src/index.js
ADDED
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* agent-store MCP server entry point.
|
|
4
|
+
*
|
|
5
|
+
* Exposes 7 tools to any MCP-compatible client (Claude Code, opencode, etc.):
|
|
6
|
+
* search_context — BM25 full-text search over stored chunks
|
|
7
|
+
* write_context — persist a new memory chunk
|
|
8
|
+
* read_context — retrieve a chunk by ID
|
|
9
|
+
* list_context — list chunk metadata (no body)
|
|
10
|
+
* delete_context — remove a chunk by ID
|
|
11
|
+
* get_state — read a session state variable
|
|
12
|
+
* set_state — write a session state variable
|
|
13
|
+
*
|
|
14
|
+
* Usage:
|
|
15
|
+
* npx @agentops/context-store
|
|
16
|
+
* CONTEXT_STORE_PATH=/your/project/.context npx @agentops/context-store
|
|
17
|
+
*/
|
|
18
|
+
|
|
19
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
20
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
21
|
+
import { z } from "zod";
|
|
22
|
+
import {
|
|
23
|
+
searchChunks,
|
|
24
|
+
writeChunk,
|
|
25
|
+
readChunk,
|
|
26
|
+
deleteChunk,
|
|
27
|
+
listChunks,
|
|
28
|
+
getState,
|
|
29
|
+
setState,
|
|
30
|
+
} from "./store.js";
|
|
31
|
+
|
|
32
|
+
const { version } = JSON.parse(
|
|
33
|
+
await import("fs").then((fs) =>
|
|
34
|
+
fs.promises.readFile(new URL("../package.json", import.meta.url), "utf8"),
|
|
35
|
+
),
|
|
36
|
+
);
|
|
37
|
+
|
|
38
|
+
const server = new McpServer({
|
|
39
|
+
name: "context-store",
|
|
40
|
+
version,
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
// ─── search_context ───────────────────────────────────────────────────────────
|
|
44
|
+
|
|
45
|
+
server.tool(
|
|
46
|
+
"search_context",
|
|
47
|
+
[
|
|
48
|
+
"Search stored memory chunks by relevance using BM25 full-text ranking.",
|
|
49
|
+
"Call this at the start of any task to retrieve relevant prior knowledge,",
|
|
50
|
+
"decisions, and outputs before generating a response.",
|
|
51
|
+
].join(" "),
|
|
52
|
+
{
|
|
53
|
+
query: z
|
|
54
|
+
.string()
|
|
55
|
+
.describe(
|
|
56
|
+
"Search query. Be specific — use canonical terms your team agreed on.",
|
|
57
|
+
),
|
|
58
|
+
tags: z
|
|
59
|
+
.array(z.string())
|
|
60
|
+
.optional()
|
|
61
|
+
.describe("Narrow results to chunks matching any of these tags."),
|
|
62
|
+
agent: z
|
|
63
|
+
.string()
|
|
64
|
+
.optional()
|
|
65
|
+
.describe("Narrow results to chunks written by a specific agent ID."),
|
|
66
|
+
top_k: z
|
|
67
|
+
.number()
|
|
68
|
+
.int()
|
|
69
|
+
.min(1)
|
|
70
|
+
.max(20)
|
|
71
|
+
.optional()
|
|
72
|
+
.describe("Maximum number of results to return (default: 6)."),
|
|
73
|
+
min_score: z
|
|
74
|
+
.number()
|
|
75
|
+
.min(0)
|
|
76
|
+
.optional()
|
|
77
|
+
.describe(
|
|
78
|
+
"Minimum BM25 relevance score. Lower = more permissive (default: 0.1).",
|
|
79
|
+
),
|
|
80
|
+
},
|
|
81
|
+
async ({ query, tags, agent, top_k, min_score }) => {
|
|
82
|
+
const results = await searchChunks({
|
|
83
|
+
query,
|
|
84
|
+
tags: tags ?? [],
|
|
85
|
+
agent,
|
|
86
|
+
topK: top_k ?? 6,
|
|
87
|
+
minScore: min_score ?? 0.1,
|
|
88
|
+
});
|
|
89
|
+
|
|
90
|
+
if (results.length === 0) {
|
|
91
|
+
return { content: [{ type: "text", text: "No matching chunks found." }] };
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
const body = results
|
|
95
|
+
.map((r) =>
|
|
96
|
+
[
|
|
97
|
+
`### [score: ${r.score}] ${r.topic}`,
|
|
98
|
+
`**id:** \`${r.id}\` | **agent:** ${r.agent} | **tags:** ${r.tags.join(", ")} | **importance:** ${r.importance} | **updated:** ${r.updated}`,
|
|
99
|
+
"",
|
|
100
|
+
r.content,
|
|
101
|
+
].join("\n"),
|
|
102
|
+
)
|
|
103
|
+
.join("\n\n---\n\n");
|
|
104
|
+
|
|
105
|
+
return { content: [{ type: "text", text: body }] };
|
|
106
|
+
},
|
|
107
|
+
);
|
|
108
|
+
|
|
109
|
+
// ─── write_context ────────────────────────────────────────────────────────────
|
|
110
|
+
|
|
111
|
+
server.tool(
|
|
112
|
+
"write_context",
|
|
113
|
+
[
|
|
114
|
+
"Persist a memory chunk to local storage.",
|
|
115
|
+
"Call this after completing a subtask, making a key decision,",
|
|
116
|
+
"or producing output that downstream agents will need.",
|
|
117
|
+
].join(" "),
|
|
118
|
+
{
|
|
119
|
+
topic: z
|
|
120
|
+
.string()
|
|
121
|
+
.describe(
|
|
122
|
+
'Short, specific title. e.g. "Auth service — JWT decision" not "decision".',
|
|
123
|
+
),
|
|
124
|
+
content: z
|
|
125
|
+
.string()
|
|
126
|
+
.describe(
|
|
127
|
+
"Chunk body in markdown. Include rationale, not just conclusions.",
|
|
128
|
+
),
|
|
129
|
+
agent: z
|
|
130
|
+
.string()
|
|
131
|
+
.optional()
|
|
132
|
+
.describe(
|
|
133
|
+
'Agent ID writing this chunk (e.g. "pm-agent", "scraper-agent").',
|
|
134
|
+
),
|
|
135
|
+
tags: z
|
|
136
|
+
.array(z.string())
|
|
137
|
+
.optional()
|
|
138
|
+
.describe(
|
|
139
|
+
"Canonical tags for future retrieval. Use consistent terms across agents.",
|
|
140
|
+
),
|
|
141
|
+
importance: z
|
|
142
|
+
.enum(["low", "medium", "high", "critical"])
|
|
143
|
+
.optional()
|
|
144
|
+
.describe(
|
|
145
|
+
"Importance level — affects future curation decisions (default: medium).",
|
|
146
|
+
),
|
|
147
|
+
ttl_days: z
|
|
148
|
+
.number()
|
|
149
|
+
.positive()
|
|
150
|
+
.optional()
|
|
151
|
+
.describe("Auto-expiry in days. Omit for permanent storage."),
|
|
152
|
+
},
|
|
153
|
+
async ({ topic, content, agent, tags, importance, ttl_days }) => {
|
|
154
|
+
const result = await writeChunk({
|
|
155
|
+
topic,
|
|
156
|
+
content,
|
|
157
|
+
agent: agent ?? "global",
|
|
158
|
+
tags: tags ?? [],
|
|
159
|
+
importance: importance ?? "medium",
|
|
160
|
+
ttlDays: ttl_days,
|
|
161
|
+
});
|
|
162
|
+
|
|
163
|
+
return {
|
|
164
|
+
content: [
|
|
165
|
+
{
|
|
166
|
+
type: "text",
|
|
167
|
+
text: `Chunk saved: id=\`${result.id}\` | topic="${result.topic}" | tags=[${result.tags.join(", ")}] | importance=${result.importance}`,
|
|
168
|
+
},
|
|
169
|
+
],
|
|
170
|
+
};
|
|
171
|
+
},
|
|
172
|
+
);
|
|
173
|
+
|
|
174
|
+
// ─── read_context ─────────────────────────────────────────────────────────────
|
|
175
|
+
|
|
176
|
+
server.tool(
|
|
177
|
+
"read_context",
|
|
178
|
+
"Retrieve the full content of a specific chunk by its ID.",
|
|
179
|
+
{
|
|
180
|
+
id: z
|
|
181
|
+
.string()
|
|
182
|
+
.describe(
|
|
183
|
+
"Chunk ID (10-char hex string from write_context or list_context).",
|
|
184
|
+
),
|
|
185
|
+
},
|
|
186
|
+
async ({ id }) => {
|
|
187
|
+
const chunk = await readChunk(id);
|
|
188
|
+
|
|
189
|
+
if (!chunk) {
|
|
190
|
+
return {
|
|
191
|
+
content: [{ type: "text", text: `No chunk found with id \`${id}\`.` }],
|
|
192
|
+
};
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
const { meta, content } = chunk;
|
|
196
|
+
const header = [
|
|
197
|
+
`## ${meta.topic}`,
|
|
198
|
+
`**id:** \`${meta.id}\` | **agent:** ${meta.agent} | **tags:** ${(meta.tags || []).join(", ")} | **importance:** ${meta.importance} | **updated:** ${meta.updated}`,
|
|
199
|
+
].join("\n");
|
|
200
|
+
|
|
201
|
+
return { content: [{ type: "text", text: `${header}\n\n${content}` }] };
|
|
202
|
+
},
|
|
203
|
+
);
|
|
204
|
+
|
|
205
|
+
// ─── list_context ─────────────────────────────────────────────────────────────
|
|
206
|
+
|
|
207
|
+
server.tool(
|
|
208
|
+
"list_context",
|
|
209
|
+
"List all stored chunks (metadata only, no body). Useful for inventory and curation.",
|
|
210
|
+
{
|
|
211
|
+
agent: z.string().optional().describe("Filter by agent ID."),
|
|
212
|
+
tags: z.array(z.string()).optional().describe("Filter by tags."),
|
|
213
|
+
},
|
|
214
|
+
async ({ agent, tags }) => {
|
|
215
|
+
const chunks = await listChunks({ agent, tags: tags ?? [] });
|
|
216
|
+
|
|
217
|
+
if (chunks.length === 0) {
|
|
218
|
+
return { content: [{ type: "text", text: "Memory store is empty." }] };
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
const lines = chunks.map(
|
|
222
|
+
(c) =>
|
|
223
|
+
`- \`${c.id}\` **${c.topic}** | agent:${c.agent} | tags:[${c.tags.join(", ")}] | ${c.importance} | ${c.updated}`,
|
|
224
|
+
);
|
|
225
|
+
|
|
226
|
+
return {
|
|
227
|
+
content: [
|
|
228
|
+
{
|
|
229
|
+
type: "text",
|
|
230
|
+
text: `${chunks.length} chunk(s) found:\n\n${lines.join("\n")}`,
|
|
231
|
+
},
|
|
232
|
+
],
|
|
233
|
+
};
|
|
234
|
+
},
|
|
235
|
+
);
|
|
236
|
+
|
|
237
|
+
// ─── delete_context ───────────────────────────────────────────────────────────
|
|
238
|
+
|
|
239
|
+
server.tool(
|
|
240
|
+
"delete_context",
|
|
241
|
+
"Permanently delete a chunk by ID. Use to remove outdated or incorrect memory.",
|
|
242
|
+
{
|
|
243
|
+
id: z.string().describe("Chunk ID to delete."),
|
|
244
|
+
},
|
|
245
|
+
async ({ id }) => {
|
|
246
|
+
const deleted = await deleteChunk(id);
|
|
247
|
+
return {
|
|
248
|
+
content: [
|
|
249
|
+
{
|
|
250
|
+
type: "text",
|
|
251
|
+
text: deleted
|
|
252
|
+
? `Chunk \`${id}\` deleted.`
|
|
253
|
+
: `No chunk found with id \`${id}\`.`,
|
|
254
|
+
},
|
|
255
|
+
],
|
|
256
|
+
};
|
|
257
|
+
},
|
|
258
|
+
);
|
|
259
|
+
|
|
260
|
+
// ─── get_state ────────────────────────────────────────────────────────────────
|
|
261
|
+
|
|
262
|
+
server.tool(
|
|
263
|
+
"get_state",
|
|
264
|
+
"Read a pipeline state variable by key. Use to check progress, flags, and counters across agent turns.",
|
|
265
|
+
{
|
|
266
|
+
key: z.string().describe("State key to read."),
|
|
267
|
+
},
|
|
268
|
+
async ({ key }) => {
|
|
269
|
+
const value = await getState(key);
|
|
270
|
+
|
|
271
|
+
if (value === null) {
|
|
272
|
+
return {
|
|
273
|
+
content: [{ type: "text", text: `State key "${key}" not found.` }],
|
|
274
|
+
};
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
return {
|
|
278
|
+
content: [
|
|
279
|
+
{
|
|
280
|
+
type: "text",
|
|
281
|
+
text:
|
|
282
|
+
typeof value === "string" ? value : JSON.stringify(value, null, 2),
|
|
283
|
+
},
|
|
284
|
+
],
|
|
285
|
+
};
|
|
286
|
+
},
|
|
287
|
+
);
|
|
288
|
+
|
|
289
|
+
// ─── set_state ────────────────────────────────────────────────────────────────
|
|
290
|
+
|
|
291
|
+
server.tool(
|
|
292
|
+
"set_state",
|
|
293
|
+
"Write a pipeline state variable (any JSON-serializable value). Use to track progress, store flags, or pass structured data between agent turns.",
|
|
294
|
+
{
|
|
295
|
+
key: z
|
|
296
|
+
.string()
|
|
297
|
+
.describe('State key (e.g. "pipeline_status", "current_phase").'),
|
|
298
|
+
value: z.any().describe("Any JSON-serializable value."),
|
|
299
|
+
},
|
|
300
|
+
async ({ key, value }) => {
|
|
301
|
+
const result = await setState(key, value);
|
|
302
|
+
return {
|
|
303
|
+
content: [
|
|
304
|
+
{
|
|
305
|
+
type: "text",
|
|
306
|
+
text: `State "${key}" written at ${result.updated}.`,
|
|
307
|
+
},
|
|
308
|
+
],
|
|
309
|
+
};
|
|
310
|
+
},
|
|
311
|
+
);
|
|
312
|
+
|
|
313
|
+
// ─── Start server ─────────────────────────────────────────────────────────────
|
|
314
|
+
|
|
315
|
+
const transport = new StdioServerTransport();
|
|
316
|
+
await server.connect(transport);
|
package/src/store.js
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* context-store: file-based persistent memory for multi-agent systems.
|
|
3
|
+
*
|
|
4
|
+
* Storage layout:
|
|
5
|
+
* <CONTEXT_STORE_PATH>/
|
|
6
|
+
* chunks/ → one .md file per chunk, YAML frontmatter + markdown body
|
|
7
|
+
* state/ → one .json file per key (session state / pipeline variables)
|
|
8
|
+
*
|
|
9
|
+
* Chunk file format:
|
|
10
|
+
* ---
|
|
11
|
+
* id: <sha1-10>
|
|
12
|
+
* topic: "Descriptive title of the chunk"
|
|
13
|
+
* agent: agent-id
|
|
14
|
+
* tags: [tag1, tag2]
|
|
15
|
+
* importance: low | medium | high | critical
|
|
16
|
+
* updated: ISO-8601
|
|
17
|
+
* expires: ISO-8601 # optional — omit for permanent chunks
|
|
18
|
+
* ---
|
|
19
|
+
* Markdown content here.
|
|
20
|
+
*/
|
|
21
|
+
|
|
22
|
+
import fs from "fs/promises";
|
|
23
|
+
import path from "path";
|
|
24
|
+
import matter from "gray-matter";
|
|
25
|
+
import { createHash } from "crypto";
|
|
26
|
+
import { BM25 } from "./bm25.js";
|
|
27
|
+
|
|
28
|
+
const STORE_PATH = process.env.CONTEXT_STORE_PATH
|
|
29
|
+
? path.resolve(process.env.CONTEXT_STORE_PATH)
|
|
30
|
+
: path.join(process.cwd(), ".context");
|
|
31
|
+
|
|
32
|
+
const CHUNKS_DIR = path.join(STORE_PATH, "chunks");
|
|
33
|
+
const STATE_DIR = path.join(STORE_PATH, "state");
|
|
34
|
+
|
|
35
|
+
/** Ensures storage directories exist. */
|
|
36
|
+
async function ensureDirs() {
|
|
37
|
+
await fs.mkdir(CHUNKS_DIR, { recursive: true });
|
|
38
|
+
await fs.mkdir(STATE_DIR, { recursive: true });
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Generates a stable short ID from agent + topic + current timestamp.
|
|
43
|
+
* @param {string} agentId
|
|
44
|
+
* @param {string} topic
|
|
45
|
+
* @returns {string} 10-char hex string
|
|
46
|
+
*/
|
|
47
|
+
function generateId(agentId, topic) {
|
|
48
|
+
const seed = `${agentId}:${topic}:${Date.now()}:${Math.random()}`;
|
|
49
|
+
return createHash("sha1").update(seed).digest("hex").slice(0, 10);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Reads all non-expired chunks from disk.
|
|
54
|
+
* Expired chunks are automatically deleted on read.
|
|
55
|
+
*
|
|
56
|
+
* @returns {Promise<Array<{ file: string, meta: object, content: string }>>}
|
|
57
|
+
*/
|
|
58
|
+
async function loadAllChunks() {
|
|
59
|
+
await ensureDirs();
|
|
60
|
+
const files = await fs.readdir(CHUNKS_DIR).catch(() => []);
|
|
61
|
+
const chunks = [];
|
|
62
|
+
|
|
63
|
+
await Promise.all(
|
|
64
|
+
files.map(async (file) => {
|
|
65
|
+
if (!file.endsWith(".md")) return;
|
|
66
|
+
try {
|
|
67
|
+
const raw = await fs.readFile(path.join(CHUNKS_DIR, file), "utf8");
|
|
68
|
+
const { data: meta, content } = matter(raw);
|
|
69
|
+
|
|
70
|
+
if (meta.expires && new Date(meta.expires) < new Date()) {
|
|
71
|
+
await fs.unlink(path.join(CHUNKS_DIR, file)).catch(() => {});
|
|
72
|
+
return;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
chunks.push({ file, meta, content: content.trim() });
|
|
76
|
+
} catch {
|
|
77
|
+
// Skip unreadable files silently
|
|
78
|
+
}
|
|
79
|
+
}),
|
|
80
|
+
);
|
|
81
|
+
|
|
82
|
+
return chunks;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Builds a BM25 index from a list of chunks.
|
|
87
|
+
* Searchable text = topic + tags + agent + body content.
|
|
88
|
+
*
|
|
89
|
+
* @param {Array} chunks
|
|
90
|
+
* @returns {BM25}
|
|
91
|
+
*/
|
|
92
|
+
function buildIndex(chunks) {
|
|
93
|
+
const engine = new BM25();
|
|
94
|
+
for (const c of chunks) {
|
|
95
|
+
const searchText = [
|
|
96
|
+
c.meta.topic || "",
|
|
97
|
+
(c.meta.tags || []).join(" "),
|
|
98
|
+
c.meta.agent || "",
|
|
99
|
+
c.content,
|
|
100
|
+
].join(" ");
|
|
101
|
+
engine.addDocument(c.file, searchText, c.meta);
|
|
102
|
+
}
|
|
103
|
+
return engine;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// ─── Public API ───────────────────────────────────────────────────────────────
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Searches chunks by relevance using BM25, with optional tag and agent filters.
|
|
110
|
+
*
|
|
111
|
+
* @param {object} opts
|
|
112
|
+
* @param {string} opts.query - Search query text
|
|
113
|
+
* @param {string[]} [opts.tags] - Filter: only chunks matching any of these tags
|
|
114
|
+
* @param {string} [opts.agent] - Filter: only chunks written by this agent
|
|
115
|
+
* @param {number} [opts.topK] - Max results (default: 6)
|
|
116
|
+
* @param {number} [opts.minScore] - Minimum BM25 score threshold (default: 0.1)
|
|
117
|
+
* @returns {Promise<Array>}
|
|
118
|
+
*/
|
|
119
|
+
export async function searchChunks({
|
|
120
|
+
query,
|
|
121
|
+
tags = [],
|
|
122
|
+
agent,
|
|
123
|
+
topK = 6,
|
|
124
|
+
minScore = 0.1,
|
|
125
|
+
}) {
|
|
126
|
+
const chunks = await loadAllChunks();
|
|
127
|
+
if (chunks.length === 0) return [];
|
|
128
|
+
|
|
129
|
+
const engine = buildIndex(chunks);
|
|
130
|
+
const hasFilter = tags.length > 0 || !!agent;
|
|
131
|
+
|
|
132
|
+
const filter = hasFilter
|
|
133
|
+
? (meta) => {
|
|
134
|
+
if (agent && meta.agent !== agent) return false;
|
|
135
|
+
if (tags.length > 0 && !tags.some((t) => (meta.tags || []).includes(t)))
|
|
136
|
+
return false;
|
|
137
|
+
return true;
|
|
138
|
+
}
|
|
139
|
+
: null;
|
|
140
|
+
|
|
141
|
+
const hits = engine.search(query, topK, filter);
|
|
142
|
+
const byFile = Object.fromEntries(chunks.map((c) => [c.file, c]));
|
|
143
|
+
|
|
144
|
+
return hits
|
|
145
|
+
.filter((h) => h.score >= minScore)
|
|
146
|
+
.map((h) => {
|
|
147
|
+
const c = byFile[h.id];
|
|
148
|
+
return {
|
|
149
|
+
id: c.meta.id,
|
|
150
|
+
topic: c.meta.topic,
|
|
151
|
+
agent: c.meta.agent,
|
|
152
|
+
tags: c.meta.tags || [],
|
|
153
|
+
importance: c.meta.importance || "medium",
|
|
154
|
+
score: Math.round(h.score * 100) / 100,
|
|
155
|
+
content: c.content,
|
|
156
|
+
updated: c.meta.updated,
|
|
157
|
+
};
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
/**
|
|
162
|
+
* Writes a new chunk to disk.
|
|
163
|
+
*
|
|
164
|
+
* @param {object} opts
|
|
165
|
+
* @param {string} opts.topic - Short descriptive title
|
|
166
|
+
* @param {string} opts.content - Markdown body content
|
|
167
|
+
* @param {string} [opts.agent] - Agent identifier
|
|
168
|
+
* @param {string[]} [opts.tags] - Search tags
|
|
169
|
+
* @param {string} [opts.importance] - low | medium | high | critical
|
|
170
|
+
* @param {number} [opts.ttlDays] - Days until auto-expiry (omit = permanent)
|
|
171
|
+
* @returns {Promise<{ id, file, topic, tags, importance }>}
|
|
172
|
+
*/
|
|
173
|
+
export async function writeChunk({
|
|
174
|
+
topic,
|
|
175
|
+
content,
|
|
176
|
+
agent = "global",
|
|
177
|
+
tags = [],
|
|
178
|
+
importance = "medium",
|
|
179
|
+
ttlDays = null,
|
|
180
|
+
}) {
|
|
181
|
+
await ensureDirs();
|
|
182
|
+
|
|
183
|
+
const id = generateId(agent, topic);
|
|
184
|
+
const now = new Date().toISOString();
|
|
185
|
+
const expires = ttlDays
|
|
186
|
+
? new Date(Date.now() + ttlDays * 86_400_000).toISOString()
|
|
187
|
+
: null;
|
|
188
|
+
|
|
189
|
+
const meta = {
|
|
190
|
+
id,
|
|
191
|
+
topic,
|
|
192
|
+
agent,
|
|
193
|
+
tags,
|
|
194
|
+
importance,
|
|
195
|
+
updated: now,
|
|
196
|
+
...(expires ? { expires } : {}),
|
|
197
|
+
};
|
|
198
|
+
|
|
199
|
+
const fileContent = matter.stringify(`\n${content}\n`, meta);
|
|
200
|
+
const filename = `${id}.md`;
|
|
201
|
+
|
|
202
|
+
await fs.writeFile(path.join(CHUNKS_DIR, filename), fileContent, "utf8");
|
|
203
|
+
return { id, file: filename, topic, tags, importance };
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
/**
|
|
207
|
+
* Reads a single chunk by its ID.
|
|
208
|
+
*
|
|
209
|
+
* @param {string} id
|
|
210
|
+
* @returns {Promise<{ meta: object, content: string } | null>}
|
|
211
|
+
*/
|
|
212
|
+
export async function readChunk(id) {
|
|
213
|
+
const chunks = await loadAllChunks();
|
|
214
|
+
return chunks.find((c) => c.meta.id === id) ?? null;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
/**
|
|
218
|
+
* Deletes a chunk by ID.
|
|
219
|
+
*
|
|
220
|
+
* @param {string} id
|
|
221
|
+
* @returns {Promise<boolean>} true if deleted, false if not found
|
|
222
|
+
*/
|
|
223
|
+
export async function deleteChunk(id) {
|
|
224
|
+
const chunks = await loadAllChunks();
|
|
225
|
+
const target = chunks.find((c) => c.meta.id === id);
|
|
226
|
+
if (!target) return false;
|
|
227
|
+
await fs.unlink(path.join(CHUNKS_DIR, target.file));
|
|
228
|
+
return true;
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
/**
|
|
232
|
+
* Lists chunk metadata without loading full content.
|
|
233
|
+
* Results are sorted by most recently updated.
|
|
234
|
+
*
|
|
235
|
+
* @param {object} opts
|
|
236
|
+
* @param {string} [opts.agent]
|
|
237
|
+
* @param {string[]} [opts.tags]
|
|
238
|
+
* @returns {Promise<Array>}
|
|
239
|
+
*/
|
|
240
|
+
export async function listChunks({ agent, tags = [] } = {}) {
|
|
241
|
+
const chunks = await loadAllChunks();
|
|
242
|
+
return chunks
|
|
243
|
+
.filter((c) => {
|
|
244
|
+
if (agent && c.meta.agent !== agent) return false;
|
|
245
|
+
if (tags.length > 0 && !tags.some((t) => (c.meta.tags || []).includes(t)))
|
|
246
|
+
return false;
|
|
247
|
+
return true;
|
|
248
|
+
})
|
|
249
|
+
.map((c) => ({
|
|
250
|
+
id: c.meta.id,
|
|
251
|
+
topic: c.meta.topic,
|
|
252
|
+
agent: c.meta.agent,
|
|
253
|
+
tags: c.meta.tags || [],
|
|
254
|
+
importance: c.meta.importance || "medium",
|
|
255
|
+
updated: c.meta.updated,
|
|
256
|
+
}))
|
|
257
|
+
.sort((a, b) => new Date(b.updated) - new Date(a.updated));
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
/**
|
|
261
|
+
* Reads a session state variable.
|
|
262
|
+
*
|
|
263
|
+
* @param {string} key
|
|
264
|
+
* @returns {Promise<any | null>}
|
|
265
|
+
*/
|
|
266
|
+
export async function getState(key) {
|
|
267
|
+
await ensureDirs();
|
|
268
|
+
try {
|
|
269
|
+
const raw = await fs.readFile(path.join(STATE_DIR, `${key}.json`), "utf8");
|
|
270
|
+
return JSON.parse(raw).value;
|
|
271
|
+
} catch {
|
|
272
|
+
return null;
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
/**
|
|
277
|
+
* Writes a session state variable (any JSON-serializable value).
|
|
278
|
+
*
|
|
279
|
+
* @param {string} key
|
|
280
|
+
* @param {any} value
|
|
281
|
+
* @returns {Promise<{ key: string, updated: string }>}
|
|
282
|
+
*/
|
|
283
|
+
export async function setState(key, value) {
|
|
284
|
+
await ensureDirs();
|
|
285
|
+
const updated = new Date().toISOString();
|
|
286
|
+
await fs.writeFile(
|
|
287
|
+
path.join(STATE_DIR, `${key}.json`),
|
|
288
|
+
JSON.stringify({ key, value, updated }, null, 2),
|
|
289
|
+
"utf8",
|
|
290
|
+
);
|
|
291
|
+
return { key, updated };
|
|
292
|
+
}
|