@lumenflow/memory 2.2.2 → 2.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +332 -0
- package/dist/decay/access-tracking.js +171 -0
- package/dist/decay/archival.js +164 -0
- package/dist/decay/scoring.js +143 -0
- package/dist/index.js +10 -0
- package/dist/mem-checkpoint-core.js +3 -3
- package/dist/mem-cleanup-core.js +38 -8
- package/dist/mem-context-core.js +347 -0
- package/dist/mem-create-core.js +4 -4
- package/dist/mem-delete-core.js +277 -0
- package/dist/mem-id.js +4 -4
- package/dist/mem-index-core.js +307 -0
- package/dist/mem-init-core.js +3 -3
- package/dist/mem-profile-core.js +184 -0
- package/dist/mem-promote-core.js +233 -0
- package/dist/mem-ready-core.js +2 -2
- package/dist/mem-signal-core.js +3 -3
- package/dist/mem-start-core.js +3 -3
- package/dist/mem-summarize-core.js +2 -2
- package/dist/mem-triage-core.js +5 -7
- package/dist/memory-schema.js +1 -1
- package/dist/memory-store.js +114 -53
- package/dist/signal-cleanup-core.js +355 -0
- package/package.json +4 -2
package/README.md
CHANGED
|
@@ -98,6 +98,264 @@ await sendSignal('/path/to/project', {
|
|
|
98
98
|
});
|
|
99
99
|
```
|
|
100
100
|
|
|
101
|
+
### Context Injection (WU-1234)
|
|
102
|
+
|
|
103
|
+
Generate deterministic context blocks for wu:spawn prompts:
|
|
104
|
+
|
|
105
|
+
```typescript
|
|
106
|
+
import { generateContext } from '@lumenflow/memory/context';
|
|
107
|
+
|
|
108
|
+
// Generate context for a WU
|
|
109
|
+
const result = await generateContext('/path/to/project', {
|
|
110
|
+
wuId: 'WU-123',
|
|
111
|
+
maxSize: 4096, // default: 4KB
|
|
112
|
+
});
|
|
113
|
+
|
|
114
|
+
console.log(result.contextBlock);
|
|
115
|
+
// <!-- mem:context for WU-123 -->
|
|
116
|
+
//
|
|
117
|
+
// ## Project Profile
|
|
118
|
+
// - [mem-abc1] (2025-01-15): Project architecture decision...
|
|
119
|
+
//
|
|
120
|
+
// ## WU Context
|
|
121
|
+
// - [mem-def2] (2025-01-20): Checkpoint: completed port definitions...
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
The context block includes:
|
|
125
|
+
|
|
126
|
+
- **Project Profile**: lifecycle=project memories (architectural knowledge)
|
|
127
|
+
- **Summaries**: summary-type nodes for the WU
|
|
128
|
+
- **WU Context**: checkpoints and notes linked to the WU
|
|
129
|
+
- **Discoveries**: discovered information for the WU
|
|
130
|
+
|
|
131
|
+
Selection is deterministic (filter by lifecycle, wu_id, recency). Max size is configurable (default 4KB). Returns empty block if no memories match.
|
|
132
|
+
|
|
133
|
+
#### Decay-Based Ranking (WU-1238)
|
|
134
|
+
|
|
135
|
+
Enable decay-based ranking to prioritize memories by relevance rather than recency:
|
|
136
|
+
|
|
137
|
+
```typescript
|
|
138
|
+
const result = await generateContext('/path/to/project', {
|
|
139
|
+
wuId: 'WU-123',
|
|
140
|
+
sortByDecay: true, // Sort by decay score instead of recency
|
|
141
|
+
trackAccess: true, // Track access for included nodes
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
console.log(result.stats.accessTracked); // Number of nodes with access tracked
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
Decay scoring considers:
|
|
148
|
+
|
|
149
|
+
- **Recency**: Exponential decay based on age (half-life: 30 days default)
|
|
150
|
+
- **Access frequency**: Boost for frequently accessed nodes
|
|
151
|
+
- **Priority**: P0=2x, P1=1.5x, P2=1x, P3=0.5x multiplier
|
|
152
|
+
|
|
153
|
+
### Access Tracking and Decay Scoring (WU-1238)
|
|
154
|
+
|
|
155
|
+
Track memory access patterns and compute decay scores for relevance management:
|
|
156
|
+
|
|
157
|
+
```typescript
|
|
158
|
+
import {
|
|
159
|
+
recordAccess,
|
|
160
|
+
recordAccessBatch,
|
|
161
|
+
getAccessStats,
|
|
162
|
+
computeDecayScore,
|
|
163
|
+
DEFAULT_HALF_LIFE_MS,
|
|
164
|
+
IMPORTANCE_BY_PRIORITY,
|
|
165
|
+
} from '@lumenflow/memory';
|
|
166
|
+
|
|
167
|
+
// Record access for a single node
|
|
168
|
+
const updated = await recordAccess('/path/to/project', 'mem-abc1');
|
|
169
|
+
console.log(updated.metadata.access.count); // Incremented
|
|
170
|
+
console.log(updated.metadata.access.last_accessed_at); // ISO timestamp
|
|
171
|
+
|
|
172
|
+
// Record access for multiple nodes (efficient batch operation)
|
|
173
|
+
const updatedNodes = await recordAccessBatch('/path/to/project', ['mem-abc1', 'mem-def2']);
|
|
174
|
+
|
|
175
|
+
// Get access statistics
|
|
176
|
+
const stats = await getAccessStats('/path/to/project', 'mem-abc1');
|
|
177
|
+
console.log(stats?.count, stats?.last_accessed_at);
|
|
178
|
+
|
|
179
|
+
// Compute decay score for a node
|
|
180
|
+
const score = computeDecayScore(node, {
|
|
181
|
+
now: Date.now(),
|
|
182
|
+
halfLifeMs: DEFAULT_HALF_LIFE_MS, // 30 days
|
|
183
|
+
});
|
|
184
|
+
```
|
|
185
|
+
|
|
186
|
+
Decay scoring formula:
|
|
187
|
+
|
|
188
|
+
```
|
|
189
|
+
decayScore = recencyScore * (1 + accessScore) * importanceScore
|
|
190
|
+
|
|
191
|
+
Where:
|
|
192
|
+
- recencyScore = exp(-age / halfLife)
|
|
193
|
+
- accessScore = log1p(access_count) / 10
|
|
194
|
+
- importanceScore = { P0: 2, P1: 1.5, P2: 1, P3: 0.5 }
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
### Archival by Decay (WU-1238)
|
|
198
|
+
|
|
199
|
+
Archive stale nodes with low decay scores:
|
|
200
|
+
|
|
201
|
+
```typescript
|
|
202
|
+
import { archiveByDecay, isArchived, DEFAULT_DECAY_THRESHOLD } from '@lumenflow/memory';
|
|
203
|
+
|
|
204
|
+
// Archive nodes below threshold (default: 0.1)
|
|
205
|
+
const result = await archiveByDecay('/path/to/project', {
|
|
206
|
+
threshold: 0.1,
|
|
207
|
+
dryRun: true, // Preview without modifying
|
|
208
|
+
});
|
|
209
|
+
|
|
210
|
+
console.log(result.archivedIds); // Nodes that would be archived
|
|
211
|
+
console.log(result.retainedIds); // Nodes above threshold
|
|
212
|
+
console.log(result.skippedIds); // Already archived or protected nodes
|
|
213
|
+
|
|
214
|
+
// Execute archival
|
|
215
|
+
await archiveByDecay('/path/to/project', { threshold: 0.1 });
|
|
216
|
+
|
|
217
|
+
// Check if a node is archived
|
|
218
|
+
if (isArchived(node)) {
|
|
219
|
+
console.log('Node has metadata.status = archived');
|
|
220
|
+
}
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
Archival rules:
|
|
224
|
+
|
|
225
|
+
- Nodes below threshold get `metadata.status = 'archived'`
|
|
226
|
+
- Project lifecycle nodes are never archived (protected)
|
|
227
|
+
- Already archived nodes are skipped
|
|
228
|
+
- Nothing is deleted (append-only pattern)
|
|
229
|
+
|
|
230
|
+
### Memory Cleanup with Decay (WU-1238)
|
|
231
|
+
|
|
232
|
+
The `cleanupMemory` function now supports decay-based archival:
|
|
233
|
+
|
|
234
|
+
```typescript
|
|
235
|
+
import { cleanupMemory } from '@lumenflow/memory';
|
|
236
|
+
|
|
237
|
+
// Run cleanup with decay archival
|
|
238
|
+
const result = await cleanupMemory('/path/to/project', {
|
|
239
|
+
decay: true,
|
|
240
|
+
decayThreshold: 0.1, // Archive nodes below this score
|
|
241
|
+
halfLifeMs: 30 * 24 * 60 * 60 * 1000, // 30 days
|
|
242
|
+
dryRun: true, // Preview first
|
|
243
|
+
});
|
|
244
|
+
|
|
245
|
+
console.log(result.breakdown.decayArchived); // Number of nodes archived by decay
|
|
246
|
+
console.log(result.decayResult); // Detailed archival result
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
### Including Archived Nodes (WU-1238)
|
|
250
|
+
|
|
251
|
+
By default, queries exclude archived nodes. Use `includeArchived` to include them:
|
|
252
|
+
|
|
253
|
+
```typescript
|
|
254
|
+
import { loadMemory, queryReady, queryByWu } from '@lumenflow/memory';
|
|
255
|
+
|
|
256
|
+
// Load including archived nodes
|
|
257
|
+
const allMemory = await loadMemory('/path/to/project', { includeArchived: true });
|
|
258
|
+
|
|
259
|
+
// Query including archived nodes
|
|
260
|
+
const allNodes = await queryReady('/path/to/project', 'WU-123', { includeArchived: true });
|
|
261
|
+
const allWuNodes = await queryByWu('/path/to/project', 'WU-123', { includeArchived: true });
|
|
262
|
+
```
|
|
263
|
+
|
|
264
|
+
### Knowledge Promotion (WU-1237)
|
|
265
|
+
|
|
266
|
+
Promote session/WU learnings to project-level knowledge:
|
|
267
|
+
|
|
268
|
+
```typescript
|
|
269
|
+
import { promoteNode, promoteFromWu, ALLOWED_PROMOTION_TAGS } from '@lumenflow/memory';
|
|
270
|
+
|
|
271
|
+
// Promote a single node to project-level
|
|
272
|
+
const result = await promoteNode('/path/to/project', {
|
|
273
|
+
nodeId: 'mem-abc1',
|
|
274
|
+
tag: 'pattern', // decision|convention|pattern|pitfall|interface|invariant|faq
|
|
275
|
+
});
|
|
276
|
+
console.log(`Promoted to ${result.promotedNode.id}`);
|
|
277
|
+
|
|
278
|
+
// Promote all summaries from a WU
|
|
279
|
+
const wuResult = await promoteFromWu('/path/to/project', {
|
|
280
|
+
wuId: 'WU-123',
|
|
281
|
+
tag: 'decision',
|
|
282
|
+
});
|
|
283
|
+
console.log(`Promoted ${wuResult.promotedNodes.length} summaries`);
|
|
284
|
+
|
|
285
|
+
// Dry-run mode (preview without writing)
|
|
286
|
+
const dryResult = await promoteNode('/path/to/project', {
|
|
287
|
+
nodeId: 'mem-abc1',
|
|
288
|
+
tag: 'pattern',
|
|
289
|
+
dryRun: true,
|
|
290
|
+
});
|
|
291
|
+
```
|
|
292
|
+
|
|
293
|
+
Promotion creates:
|
|
294
|
+
|
|
295
|
+
- A new node with `lifecycle=project`
|
|
296
|
+
- A `discovered_from` relationship to the source node
|
|
297
|
+
- The specified taxonomy tag on the promoted node
|
|
298
|
+
|
|
299
|
+
Allowed tags: `decision`, `convention`, `pattern`, `pitfall`, `interface`, `invariant`, `faq`.
|
|
300
|
+
|
|
301
|
+
### Project Profile (WU-1237)
|
|
302
|
+
|
|
303
|
+
Generate aggregated project knowledge for context injection:
|
|
304
|
+
|
|
305
|
+
```typescript
|
|
306
|
+
import { generateProfile, DEFAULT_PROFILE_LIMIT } from '@lumenflow/memory';
|
|
307
|
+
|
|
308
|
+
// Get top 20 project memories (default)
|
|
309
|
+
const result = await generateProfile('/path/to/project');
|
|
310
|
+
console.log(result.profileBlock);
|
|
311
|
+
// ## Project Profile
|
|
312
|
+
// - [mem-abc1] (2025-01-15): Architecture decision...
|
|
313
|
+
// - [mem-def2] (2025-01-20): Naming convention...
|
|
314
|
+
|
|
315
|
+
// Filter by tag
|
|
316
|
+
const decisions = await generateProfile('/path/to/project', {
|
|
317
|
+
tag: 'decision',
|
|
318
|
+
limit: 10,
|
|
319
|
+
});
|
|
320
|
+
|
|
321
|
+
// Access statistics
|
|
322
|
+
console.log(result.stats.totalProjectNodes);
|
|
323
|
+
console.log(result.stats.byTag); // { decision: 5, pattern: 3, ... }
|
|
324
|
+
```
|
|
325
|
+
|
|
326
|
+
The profile output is formatted for integration with `mem:context`.
|
|
327
|
+
|
|
328
|
+
### Project Indexing (WU-1235)
|
|
329
|
+
|
|
330
|
+
Index project conventions for agent context awareness:
|
|
331
|
+
|
|
332
|
+
```typescript
|
|
333
|
+
import { indexProject, getDefaultSources } from '@lumenflow/memory/index';
|
|
334
|
+
|
|
335
|
+
// Index project conventions
|
|
336
|
+
const result = await indexProject('/path/to/project');
|
|
337
|
+
console.log(`Created: ${result.nodesCreated}, Updated: ${result.nodesUpdated}`);
|
|
338
|
+
|
|
339
|
+
// Dry-run mode (no writes)
|
|
340
|
+
const dryResult = await indexProject('/path/to/project', { dryRun: true });
|
|
341
|
+
console.log('Would index:', dryResult.sourcesScanned);
|
|
342
|
+
|
|
343
|
+
// Get default sources that will be scanned
|
|
344
|
+
const sources = getDefaultSources();
|
|
345
|
+
// ['README.md', 'LUMENFLOW.md', 'package.json', ...]
|
|
346
|
+
```
|
|
347
|
+
|
|
348
|
+
Default sources scanned:
|
|
349
|
+
|
|
350
|
+
- **README.md**: Project overview (tagged: `index:architecture`)
|
|
351
|
+
- **LUMENFLOW.md**: Workflow conventions (tagged: `index:conventions`)
|
|
352
|
+
- **package.json**: Monorepo structure (tagged: `index:architecture`)
|
|
353
|
+
- **.lumenflow.config.yaml**: Workflow config (tagged: `index:commands`, `index:conventions`)
|
|
354
|
+
- **.lumenflow/constraints.md**: Project invariants (tagged: `index:invariants`)
|
|
355
|
+
|
|
356
|
+
Each node includes provenance metadata: `source_path`, `source_hash`, `indexed_at`.
|
|
357
|
+
Idempotent: re-running updates or skips existing nodes based on content hash.
|
|
358
|
+
|
|
101
359
|
## Subpath Exports
|
|
102
360
|
|
|
103
361
|
```typescript
|
|
@@ -114,6 +372,8 @@ import { cleanupExpired } from '@lumenflow/memory/cleanup';
|
|
|
114
372
|
import { createMemoryNode } from '@lumenflow/memory/create';
|
|
115
373
|
import { summarizeWu } from '@lumenflow/memory/summarize';
|
|
116
374
|
import { triageBugs } from '@lumenflow/memory/triage';
|
|
375
|
+
import { generateContext } from '@lumenflow/memory/context';
|
|
376
|
+
import { indexProject } from '@lumenflow/memory/index';
|
|
117
377
|
import { MemoryNodeSchema } from '@lumenflow/memory/schema';
|
|
118
378
|
import { loadMemory, appendNode } from '@lumenflow/memory/store';
|
|
119
379
|
```
|
|
@@ -129,6 +389,75 @@ import { loadMemory, appendNode } from '@lumenflow/memory/store';
|
|
|
129
389
|
| `queryReady(baseDir, wuId)` | Get nodes for WU in priority order |
|
|
130
390
|
| `queryByWu(baseDir, wuId)` | Get all nodes for WU in file order |
|
|
131
391
|
|
|
392
|
+
### Context Injection
|
|
393
|
+
|
|
394
|
+
| Function | Description |
|
|
395
|
+
| -------------------------------- | --------------------------------------------- |
|
|
396
|
+
| `generateContext(baseDir, opts)` | Generate formatted context block for wu:spawn |
|
|
397
|
+
|
|
398
|
+
### Project Indexing
|
|
399
|
+
|
|
400
|
+
| Function | Description |
|
|
401
|
+
| ----------------------------- | ------------------------------------- |
|
|
402
|
+
| `indexProject(baseDir, opts)` | Scan sources and create summary nodes |
|
|
403
|
+
| `getDefaultSources()` | Get list of default sources to scan |
|
|
404
|
+
|
|
405
|
+
### Knowledge Promotion
|
|
406
|
+
|
|
407
|
+
| Function | Description |
|
|
408
|
+
| ------------------------------ | ---------------------------------------- |
|
|
409
|
+
| `promoteNode(baseDir, opts)` | Promote single node to project lifecycle |
|
|
410
|
+
| `promoteFromWu(baseDir, opts)` | Promote all summaries from a WU |
|
|
411
|
+
| `ALLOWED_PROMOTION_TAGS` | Array of valid taxonomy tags |
|
|
412
|
+
|
|
413
|
+
### Project Profile
|
|
414
|
+
|
|
415
|
+
| Function | Description |
|
|
416
|
+
| -------------------------------- | --------------------------------------------- |
|
|
417
|
+
| `generateProfile(baseDir, opts)` | Generate aggregated project knowledge profile |
|
|
418
|
+
| `DEFAULT_PROFILE_LIMIT` | Default limit for profile generation (20) |
|
|
419
|
+
|
|
420
|
+
Options for `indexProject`:
|
|
421
|
+
|
|
422
|
+
- `dryRun` (optional): If true, show what would be indexed without writing (default: false)
|
|
423
|
+
- `additionalSources` (optional): Additional source definitions to scan
|
|
424
|
+
|
|
425
|
+
Options for `generateContext`:
|
|
426
|
+
|
|
427
|
+
- `wuId` (required): WU ID to generate context for
|
|
428
|
+
- `maxSize` (optional): Maximum context size in bytes (default: 4096)
|
|
429
|
+
- `sortByDecay` (optional): Sort by decay score instead of recency (default: false)
|
|
430
|
+
- `trackAccess` (optional): Track access for included nodes (default: false)
|
|
431
|
+
- `halfLifeMs` (optional): Half-life for decay calculation (default: 30 days)
|
|
432
|
+
- `now` (optional): Current timestamp for decay calculation (default: Date.now())
|
|
433
|
+
|
|
434
|
+
### Access Tracking (WU-1238)
|
|
435
|
+
|
|
436
|
+
| Function | Description |
|
|
437
|
+
| ------------------------------------- | -------------------------------------------- |
|
|
438
|
+
| `recordAccess(baseDir, nodeId)` | Record access for a single node |
|
|
439
|
+
| `recordAccessBatch(baseDir, nodeIds)` | Record access for multiple nodes (efficient) |
|
|
440
|
+
| `getAccessStats(baseDir, nodeId)` | Get access statistics for a node |
|
|
441
|
+
|
|
442
|
+
### Decay Scoring (WU-1238)
|
|
443
|
+
|
|
444
|
+
| Function | Description |
|
|
445
|
+
| -------------------------------- | ------------------------------------------------ |
|
|
446
|
+
| `computeDecayScore(node, opts)` | Compute overall decay score for a node |
|
|
447
|
+
| `computeRecencyScore(node, ...)` | Compute recency component (exponential decay) |
|
|
448
|
+
| `computeAccessScore(node)` | Compute access component (logarithmic boost) |
|
|
449
|
+
| `computeImportanceScore(node)` | Compute importance component (priority-based) |
|
|
450
|
+
| `DEFAULT_HALF_LIFE_MS` | Default half-life: 30 days in milliseconds |
|
|
451
|
+
| `IMPORTANCE_BY_PRIORITY` | Priority multipliers: P0=2, P1=1.5, P2=1, P3=0.5 |
|
|
452
|
+
|
|
453
|
+
### Archival (WU-1238)
|
|
454
|
+
|
|
455
|
+
| Function | Description |
|
|
456
|
+
| ------------------------------- | ---------------------------------------------- |
|
|
457
|
+
| `archiveByDecay(baseDir, opts)` | Archive nodes below decay threshold |
|
|
458
|
+
| `isArchived(node)` | Check if node has metadata.status = 'archived' |
|
|
459
|
+
| `DEFAULT_DECAY_THRESHOLD` | Default threshold: 0.1 |
|
|
460
|
+
|
|
132
461
|
### Memory Schema
|
|
133
462
|
|
|
134
463
|
| Export | Description |
|
|
@@ -171,6 +500,9 @@ interface IndexedMemory {
|
|
|
171
500
|
- **Priority ordering**: Deterministic query results (P0 > P1 > P2 > P3)
|
|
172
501
|
- **Schema validation**: Zod-based runtime validation
|
|
173
502
|
- **Modern**: Node 22+, ESM-only, TypeScript
|
|
503
|
+
- **Decay scoring** (WU-1238): Relevance management based on recency, access frequency, and priority
|
|
504
|
+
- **Access tracking** (WU-1238): Track node access patterns for decay scoring
|
|
505
|
+
- **Archival** (WU-1238): Archive stale nodes without deletion (append-only pattern)
|
|
174
506
|
|
|
175
507
|
## Documentation
|
|
176
508
|
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Access Tracking (WU-1238)
|
|
3
|
+
*
|
|
4
|
+
* Track access patterns for memory nodes to inform decay scoring.
|
|
5
|
+
* Access is recorded when nodes are returned by mem:search or mem:context.
|
|
6
|
+
*
|
|
7
|
+
* Tracks:
|
|
8
|
+
* - metadata.access.count: Number of times node was accessed
|
|
9
|
+
* - metadata.access.last_accessed_at: ISO timestamp of last access
|
|
10
|
+
* - metadata.decay.score: Computed decay score (updated on access)
|
|
11
|
+
*
|
|
12
|
+
* @see {@link packages/@lumenflow/memory/__tests__/access-tracking.test.ts} - Tests
|
|
13
|
+
*/
|
|
14
|
+
import fs from 'node:fs/promises';
|
|
15
|
+
import path from 'node:path';
|
|
16
|
+
import { loadMemory, MEMORY_FILE_NAME } from '../memory-store.js';
|
|
17
|
+
import { computeDecayScore, DEFAULT_HALF_LIFE_MS } from './scoring.js';
|
|
18
|
+
/**
|
|
19
|
+
* Write nodes back to memory file.
|
|
20
|
+
* Rewrites the entire file to update existing nodes.
|
|
21
|
+
*
|
|
22
|
+
* @param baseDir - Base directory containing memory.jsonl
|
|
23
|
+
* @param nodes - All nodes to write
|
|
24
|
+
*/
|
|
25
|
+
async function writeMemoryFile(baseDir, nodes) {
|
|
26
|
+
const filePath = path.join(baseDir, MEMORY_FILE_NAME);
|
|
27
|
+
const content = nodes.map((n) => JSON.stringify(n)).join('\n') + (nodes.length > 0 ? '\n' : '');
|
|
28
|
+
await fs.writeFile(filePath, content, { encoding: 'utf-8' });
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Update a node's access metadata.
|
|
32
|
+
*
|
|
33
|
+
* @param node - Node to update
|
|
34
|
+
* @param now - Current timestamp
|
|
35
|
+
* @param halfLifeMs - Half-life for decay scoring
|
|
36
|
+
* @returns Updated node with new access metadata
|
|
37
|
+
*/
|
|
38
|
+
function updateNodeAccess(node, now = Date.now(), halfLifeMs = DEFAULT_HALF_LIFE_MS) {
|
|
39
|
+
const timestamp = new Date(now).toISOString();
|
|
40
|
+
// Get existing access metadata
|
|
41
|
+
const existingAccess = node.metadata?.access;
|
|
42
|
+
const currentCount = existingAccess?.count ?? 0;
|
|
43
|
+
// Create updated access metadata
|
|
44
|
+
const newAccess = {
|
|
45
|
+
count: currentCount + 1,
|
|
46
|
+
last_accessed_at: timestamp,
|
|
47
|
+
};
|
|
48
|
+
// Compute decay score
|
|
49
|
+
const updatedNodeForScoring = {
|
|
50
|
+
...node,
|
|
51
|
+
metadata: {
|
|
52
|
+
...node.metadata,
|
|
53
|
+
access: newAccess,
|
|
54
|
+
},
|
|
55
|
+
};
|
|
56
|
+
const decayScore = computeDecayScore(updatedNodeForScoring, { now, halfLifeMs });
|
|
57
|
+
const newDecay = {
|
|
58
|
+
score: decayScore,
|
|
59
|
+
computed_at: timestamp,
|
|
60
|
+
};
|
|
61
|
+
// Return updated node
|
|
62
|
+
return {
|
|
63
|
+
...node,
|
|
64
|
+
metadata: {
|
|
65
|
+
...node.metadata,
|
|
66
|
+
access: newAccess,
|
|
67
|
+
decay: newDecay,
|
|
68
|
+
},
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Record access for a single memory node.
|
|
73
|
+
*
|
|
74
|
+
* Increments access count and updates last_accessed_at timestamp.
|
|
75
|
+
* Also recomputes the decay score.
|
|
76
|
+
*
|
|
77
|
+
* @param baseDir - Base directory containing memory.jsonl
|
|
78
|
+
* @param nodeId - ID of the node to record access for
|
|
79
|
+
* @param options - Optional configuration
|
|
80
|
+
* @returns Updated node with new access metadata
|
|
81
|
+
* @throws If node is not found
|
|
82
|
+
*
|
|
83
|
+
* @example
|
|
84
|
+
* const updatedNode = await recordAccess(baseDir, 'mem-abc1');
|
|
85
|
+
* console.log(updatedNode.metadata.access.count); // Incremented
|
|
86
|
+
*/
|
|
87
|
+
export async function recordAccess(baseDir, nodeId, options = {}) {
|
|
88
|
+
const { now = Date.now(), halfLifeMs = DEFAULT_HALF_LIFE_MS } = options;
|
|
89
|
+
// Load all nodes
|
|
90
|
+
const memory = await loadMemory(baseDir);
|
|
91
|
+
// Find the target node
|
|
92
|
+
const targetNode = memory.byId.get(nodeId);
|
|
93
|
+
if (!targetNode) {
|
|
94
|
+
throw new Error(`Node not found: ${nodeId}`);
|
|
95
|
+
}
|
|
96
|
+
// Update the node
|
|
97
|
+
const updatedNode = updateNodeAccess(targetNode, now, halfLifeMs);
|
|
98
|
+
// Replace the node in the nodes array
|
|
99
|
+
const updatedNodes = memory.nodes.map((n) => (n.id === nodeId ? updatedNode : n));
|
|
100
|
+
// Write back to file
|
|
101
|
+
await writeMemoryFile(baseDir, updatedNodes);
|
|
102
|
+
return updatedNode;
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Record access for multiple memory nodes in a batch.
|
|
106
|
+
*
|
|
107
|
+
* More efficient than calling recordAccess individually because
|
|
108
|
+
* it only reads and writes the file once.
|
|
109
|
+
*
|
|
110
|
+
* @param baseDir - Base directory containing memory.jsonl
|
|
111
|
+
* @param nodeIds - IDs of nodes to record access for
|
|
112
|
+
* @param options - Optional configuration
|
|
113
|
+
* @returns Array of updated nodes (skips non-existent nodes)
|
|
114
|
+
*
|
|
115
|
+
* @example
|
|
116
|
+
* const updated = await recordAccessBatch(baseDir, ['mem-abc1', 'mem-def2']);
|
|
117
|
+
* console.log(`Updated ${updated.length} nodes`);
|
|
118
|
+
*/
|
|
119
|
+
export async function recordAccessBatch(baseDir, nodeIds, options = {}) {
|
|
120
|
+
const { now = Date.now(), halfLifeMs = DEFAULT_HALF_LIFE_MS } = options;
|
|
121
|
+
// Load all nodes
|
|
122
|
+
const memory = await loadMemory(baseDir);
|
|
123
|
+
// Track which nodes were updated
|
|
124
|
+
const updatedNodes = [];
|
|
125
|
+
const nodeIdSet = new Set(nodeIds);
|
|
126
|
+
// Update matching nodes
|
|
127
|
+
const allNodes = memory.nodes.map((node) => {
|
|
128
|
+
if (nodeIdSet.has(node.id)) {
|
|
129
|
+
const updated = updateNodeAccess(node, now, halfLifeMs);
|
|
130
|
+
updatedNodes.push(updated);
|
|
131
|
+
return updated;
|
|
132
|
+
}
|
|
133
|
+
return node;
|
|
134
|
+
});
|
|
135
|
+
// Write back to file if any nodes were updated
|
|
136
|
+
if (updatedNodes.length > 0) {
|
|
137
|
+
await writeMemoryFile(baseDir, allNodes);
|
|
138
|
+
}
|
|
139
|
+
return updatedNodes;
|
|
140
|
+
}
|
|
141
|
+
/**
|
|
142
|
+
* Get access statistics for a memory node.
|
|
143
|
+
*
|
|
144
|
+
* @param baseDir - Base directory containing memory.jsonl
|
|
145
|
+
* @param nodeId - ID of the node to get stats for
|
|
146
|
+
* @returns Access stats or null if node has no access data or doesn't exist
|
|
147
|
+
*
|
|
148
|
+
* @example
|
|
149
|
+
* const stats = await getAccessStats(baseDir, 'mem-abc1');
|
|
150
|
+
* if (stats) {
|
|
151
|
+
* console.log(`Accessed ${stats.count} times`);
|
|
152
|
+
* }
|
|
153
|
+
*/
|
|
154
|
+
export async function getAccessStats(baseDir, nodeId) {
|
|
155
|
+
// Load all nodes
|
|
156
|
+
const memory = await loadMemory(baseDir);
|
|
157
|
+
// Find the target node
|
|
158
|
+
const node = memory.byId.get(nodeId);
|
|
159
|
+
if (!node) {
|
|
160
|
+
return null;
|
|
161
|
+
}
|
|
162
|
+
// Get access metadata
|
|
163
|
+
const access = node.metadata?.access;
|
|
164
|
+
if (!access || typeof access.count !== 'number') {
|
|
165
|
+
return null;
|
|
166
|
+
}
|
|
167
|
+
return {
|
|
168
|
+
count: access.count,
|
|
169
|
+
last_accessed_at: access.last_accessed_at,
|
|
170
|
+
};
|
|
171
|
+
}
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Archival (WU-1238)
|
|
3
|
+
*
|
|
4
|
+
* Archive memory nodes below decay threshold.
|
|
5
|
+
* Uses append-only pattern - nothing is deleted, nodes are marked with metadata.status = 'archived'.
|
|
6
|
+
*
|
|
7
|
+
* Archival rules:
|
|
8
|
+
* - Nodes below threshold get metadata.status = 'archived'
|
|
9
|
+
* - Project lifecycle nodes are never archived (protected)
|
|
10
|
+
* - Already archived nodes are skipped
|
|
11
|
+
* - Archived nodes excluded from default queries
|
|
12
|
+
*
|
|
13
|
+
* @see {@link packages/@lumenflow/memory/__tests__/archival.test.ts} - Tests
|
|
14
|
+
*/
|
|
15
|
+
import fs from 'node:fs/promises';
|
|
16
|
+
import path from 'node:path';
|
|
17
|
+
import { loadMemory, MEMORY_FILE_NAME } from '../memory-store.js';
|
|
18
|
+
import { computeDecayScore, DEFAULT_HALF_LIFE_MS } from './scoring.js';
|
|
19
|
+
/**
|
|
20
|
+
* Default decay threshold below which nodes are archived
|
|
21
|
+
*/
|
|
22
|
+
export const DEFAULT_DECAY_THRESHOLD = 0.1;
|
|
23
|
+
/**
|
|
24
|
+
* Status value for archived nodes
|
|
25
|
+
*/
|
|
26
|
+
const ARCHIVED_STATUS = 'archived';
|
|
27
|
+
/**
|
|
28
|
+
* Protected lifecycle that is never archived
|
|
29
|
+
*/
|
|
30
|
+
const PROTECTED_LIFECYCLE = 'project';
|
|
31
|
+
/**
|
|
32
|
+
* Check if a node is already archived.
|
|
33
|
+
*
|
|
34
|
+
* @param node - Memory node to check
|
|
35
|
+
* @returns True if node has metadata.status = 'archived'
|
|
36
|
+
*
|
|
37
|
+
* @example
|
|
38
|
+
* if (isArchived(node)) {
|
|
39
|
+
* console.log('Node is already archived');
|
|
40
|
+
* }
|
|
41
|
+
*/
|
|
42
|
+
export function isArchived(node) {
|
|
43
|
+
return node.metadata?.status === ARCHIVED_STATUS;
|
|
44
|
+
}
|
|
45
|
+
/**
|
|
46
|
+
* Check if a node is protected from archival.
|
|
47
|
+
*
|
|
48
|
+
* Protected nodes:
|
|
49
|
+
* - Project lifecycle nodes (architectural knowledge)
|
|
50
|
+
*
|
|
51
|
+
* @param node - Memory node to check
|
|
52
|
+
* @returns True if node should never be archived
|
|
53
|
+
*/
|
|
54
|
+
function isProtected(node) {
|
|
55
|
+
return node.lifecycle === PROTECTED_LIFECYCLE;
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* Mark a node as archived.
|
|
59
|
+
*
|
|
60
|
+
* @param node - Node to archive
|
|
61
|
+
* @param score - The decay score that triggered archival
|
|
62
|
+
* @param threshold - The threshold used
|
|
63
|
+
* @param now - Current timestamp
|
|
64
|
+
* @returns Node with archived status
|
|
65
|
+
*/
|
|
66
|
+
function markAsArchived(node, score, threshold, now) {
|
|
67
|
+
const timestamp = new Date(now).toISOString();
|
|
68
|
+
return {
|
|
69
|
+
...node,
|
|
70
|
+
metadata: {
|
|
71
|
+
...node.metadata,
|
|
72
|
+
status: ARCHIVED_STATUS,
|
|
73
|
+
archived_at: timestamp,
|
|
74
|
+
decay: {
|
|
75
|
+
...node.metadata?.decay,
|
|
76
|
+
score,
|
|
77
|
+
reason: `Score ${score.toFixed(4)} below threshold ${threshold}`,
|
|
78
|
+
computed_at: timestamp,
|
|
79
|
+
},
|
|
80
|
+
},
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
/**
|
|
84
|
+
* Write nodes back to memory file.
|
|
85
|
+
*
|
|
86
|
+
* @param baseDir - Base directory containing memory.jsonl
|
|
87
|
+
* @param nodes - All nodes to write
|
|
88
|
+
*/
|
|
89
|
+
async function writeMemoryFile(baseDir, nodes) {
|
|
90
|
+
const filePath = path.join(baseDir, MEMORY_FILE_NAME);
|
|
91
|
+
const content = nodes.map((n) => JSON.stringify(n)).join('\n') + (nodes.length > 0 ? '\n' : '');
|
|
92
|
+
await fs.writeFile(filePath, content, { encoding: 'utf-8' });
|
|
93
|
+
}
|
|
94
|
+
/**
|
|
95
|
+
* Archive nodes with decay score below threshold.
|
|
96
|
+
*
|
|
97
|
+
* This function:
|
|
98
|
+
* 1. Computes decay score for each node
|
|
99
|
+
* 2. Archives nodes below the threshold
|
|
100
|
+
* 3. Skips already archived and protected nodes
|
|
101
|
+
* 4. Does NOT delete any nodes (append-only pattern)
|
|
102
|
+
*
|
|
103
|
+
* Archived nodes get:
|
|
104
|
+
* - metadata.status = 'archived'
|
|
105
|
+
* - metadata.archived_at = ISO timestamp
|
|
106
|
+
* - metadata.decay.score = computed score
|
|
107
|
+
* - metadata.decay.reason = explanation string
|
|
108
|
+
*
|
|
109
|
+
* @param baseDir - Base directory containing memory.jsonl
|
|
110
|
+
* @param options - Archive options
|
|
111
|
+
* @returns Result with lists of archived, retained, and skipped node IDs
|
|
112
|
+
*
|
|
113
|
+
* @example
|
|
114
|
+
* // Archive nodes with decay score below 0.1
|
|
115
|
+
* const result = await archiveByDecay(baseDir, { threshold: 0.1 });
|
|
116
|
+
* console.log(`Archived ${result.archivedIds.length} nodes`);
|
|
117
|
+
*
|
|
118
|
+
* @example
|
|
119
|
+
* // Dry-run to preview what would be archived
|
|
120
|
+
* const preview = await archiveByDecay(baseDir, { threshold: 0.1, dryRun: true });
|
|
121
|
+
* console.log(`Would archive: ${preview.archivedIds.join(', ')}`);
|
|
122
|
+
*/
|
|
123
|
+
export async function archiveByDecay(baseDir, options = {}) {
|
|
124
|
+
const { threshold = DEFAULT_DECAY_THRESHOLD, now = Date.now(), halfLifeMs = DEFAULT_HALF_LIFE_MS, dryRun = false, } = options;
|
|
125
|
+
// Load all nodes including archived ones (need to see everything for processing)
|
|
126
|
+
const memory = await loadMemory(baseDir, { includeArchived: true });
|
|
127
|
+
// Track results
|
|
128
|
+
const archivedIds = [];
|
|
129
|
+
const retainedIds = [];
|
|
130
|
+
const skippedIds = [];
|
|
131
|
+
// Process nodes and build updated list
|
|
132
|
+
const updatedNodes = memory.nodes.map((node) => {
|
|
133
|
+
// Skip already archived nodes
|
|
134
|
+
if (isArchived(node)) {
|
|
135
|
+
skippedIds.push(node.id);
|
|
136
|
+
return node;
|
|
137
|
+
}
|
|
138
|
+
// Skip protected nodes (project lifecycle)
|
|
139
|
+
if (isProtected(node)) {
|
|
140
|
+
skippedIds.push(node.id);
|
|
141
|
+
return node;
|
|
142
|
+
}
|
|
143
|
+
// Compute decay score
|
|
144
|
+
const score = computeDecayScore(node, { now, halfLifeMs });
|
|
145
|
+
// Check threshold
|
|
146
|
+
if (score < threshold) {
|
|
147
|
+
archivedIds.push(node.id);
|
|
148
|
+
return markAsArchived(node, score, threshold, now);
|
|
149
|
+
}
|
|
150
|
+
retainedIds.push(node.id);
|
|
151
|
+
return node;
|
|
152
|
+
});
|
|
153
|
+
// Write back to file if not dry-run and nodes were archived
|
|
154
|
+
if (!dryRun && archivedIds.length > 0) {
|
|
155
|
+
await writeMemoryFile(baseDir, updatedNodes);
|
|
156
|
+
}
|
|
157
|
+
return {
|
|
158
|
+
archivedIds,
|
|
159
|
+
retainedIds,
|
|
160
|
+
skippedIds,
|
|
161
|
+
totalProcessed: memory.nodes.length,
|
|
162
|
+
...(dryRun ? { dryRun: true } : {}),
|
|
163
|
+
};
|
|
164
|
+
}
|