@lumenflow/memory 2.2.2 → 2.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,277 @@
1
+ /**
2
+ * Memory Delete Core (WU-1284)
3
+ *
4
+ * Soft delete memory nodes via metadata.status=deleted.
5
+ * Respects append-only pattern by updating nodes in-place rather than removing.
6
+ *
7
+ * Features:
8
+ * - Delete by node ID(s)
9
+ * - Bulk delete via tag filter
10
+ * - Bulk delete via older-than filter
11
+ * - Dry-run preview mode
12
+ * - Preserves all original node data
13
+ *
14
+ * @see {@link packages/@lumenflow/memory/__tests__/mem-delete.test.ts} - Tests
15
+ */
16
+ import fs from 'node:fs/promises';
17
+ import path from 'node:path';
18
+ import { loadMemory, MEMORY_FILE_NAME } from './memory-store.js';
19
+ import { LUMENFLOW_MEMORY_PATHS } from './paths.js';
20
+ /**
21
+ * Duration multipliers in milliseconds
22
+ */
23
+ const DURATION_MULTIPLIERS = {
24
+ h: 60 * 60 * 1000,
25
+ d: 24 * 60 * 60 * 1000,
26
+ w: 7 * 24 * 60 * 60 * 1000,
27
+ };
28
+ /**
29
+ * Parse a duration string (e.g., '7d', '24h', '2w') to milliseconds
30
+ *
31
+ * @param duration - Duration string
32
+ * @returns Duration in milliseconds
33
+ * @throws If duration format is invalid
34
+ */
35
+ function parseDuration(duration) {
36
+ const durationRegex = /^(\d+)([hdw])$/;
37
+ const match = durationRegex.exec(duration);
38
+ if (!match || match.length < 3) {
39
+ throw new Error(`Invalid duration format: ${duration}. Use format like '7d', '24h', '2w'`);
40
+ }
41
+ const valueStr = match[1];
42
+ const unit = match[2];
43
+ if (!valueStr || !unit) {
44
+ throw new Error(`Invalid duration format: ${duration}. Use format like '7d', '24h', '2w'`);
45
+ }
46
+ const value = parseInt(valueStr, 10);
47
+ const multiplier = DURATION_MULTIPLIERS[unit];
48
+ if (!multiplier) {
49
+ throw new Error(`Unknown duration unit: ${unit}`);
50
+ }
51
+ return value * multiplier;
52
+ }
53
+ /**
54
+ * Check if a node matches the tag filter
55
+ */
56
+ function matchesTag(node, tag) {
57
+ return Array.isArray(node.tags) && node.tags.includes(tag);
58
+ }
59
+ /**
60
+ * Check if a node is older than the specified duration
61
+ */
62
+ function isOlderThan(node, durationMs, referenceDate) {
63
+ const nodeDate = new Date(node.created_at);
64
+ const cutoffDate = new Date(referenceDate.getTime() - durationMs);
65
+ return nodeDate < cutoffDate;
66
+ }
67
+ /**
68
+ * Check if a node is already marked as deleted
69
+ */
70
+ function isAlreadyDeleted(node) {
71
+ return node.metadata?.status === 'deleted';
72
+ }
73
+ /**
74
+ * Mark a node as deleted by updating its metadata
75
+ */
76
+ function markAsDeleted(node) {
77
+ return {
78
+ ...node,
79
+ metadata: {
80
+ ...node.metadata,
81
+ status: 'deleted',
82
+ deleted_at: new Date().toISOString(),
83
+ },
84
+ };
85
+ }
86
+ /**
87
+ * Process nodeIds filter and add matches to delete set
88
+ */
89
+ function processNodeIdsFilter(nodeIds, nodeMap, toDeleteSet, errors) {
90
+ for (const nodeId of nodeIds) {
91
+ const node = nodeMap.get(nodeId);
92
+ if (node) {
93
+ toDeleteSet.add(nodeId);
94
+ }
95
+ else {
96
+ errors.push(`Node not found: ${nodeId}`);
97
+ }
98
+ }
99
+ }
100
+ /**
101
+ * Process tag filter and add matches to delete set
102
+ */
103
+ function processTagFilter(nodes, tag, toDeleteSet) {
104
+ for (const node of nodes) {
105
+ if (matchesTag(node, tag)) {
106
+ toDeleteSet.add(node.id);
107
+ }
108
+ }
109
+ }
110
+ /**
111
+ * Process olderThan filter, optionally intersected with tag filter
112
+ */
113
+ function processOlderThanFilter(nodes, olderThan, referenceDate, tag, hasNodeIds, toDeleteSet) {
114
+ const durationMs = parseDuration(olderThan);
115
+ for (const node of nodes) {
116
+ const nodeIsOld = isOlderThan(node, durationMs, referenceDate);
117
+ if (tag) {
118
+ // Intersection: node must match BOTH tag and age filters
119
+ const shouldInclude = nodeIsOld && matchesTag(node, tag);
120
+ if (shouldInclude) {
121
+ toDeleteSet.add(node.id);
122
+ }
123
+ else {
124
+ // Remove if previously added by tag filter but not matching age
125
+ toDeleteSet.delete(node.id);
126
+ }
127
+ }
128
+ else if (!hasNodeIds && nodeIsOld) {
129
+ // No tag filter, no nodeIds: add all old nodes
130
+ toDeleteSet.add(node.id);
131
+ }
132
+ }
133
+ }
134
+ /**
135
+ * Find nodes to delete based on filters
136
+ */
137
+ function findNodesToDelete(nodes, options) {
138
+ const { nodeIds, tag, olderThan, referenceDate = new Date() } = options;
139
+ const errors = [];
140
+ const toDeleteSet = new Set();
141
+ // Build a map for quick lookups
142
+ const nodeMap = new Map();
143
+ for (const node of nodes) {
144
+ nodeMap.set(node.id, node);
145
+ }
146
+ // Process nodeIds filter
147
+ if (nodeIds && nodeIds.length > 0) {
148
+ processNodeIdsFilter(nodeIds, nodeMap, toDeleteSet, errors);
149
+ }
150
+ // Process tag filter (before olderThan to allow intersection)
151
+ if (tag) {
152
+ processTagFilter(nodes, tag, toDeleteSet);
153
+ }
154
+ // Process olderThan filter (may intersect with tag filter)
155
+ if (olderThan) {
156
+ const hasNodeIds = Boolean(nodeIds && nodeIds.length > 0);
157
+ processOlderThanFilter(nodes, olderThan, referenceDate, tag, hasNodeIds, toDeleteSet);
158
+ }
159
+ // Get the actual nodes to delete
160
+ const toDelete = [];
161
+ for (const nodeId of toDeleteSet) {
162
+ const node = nodeMap.get(nodeId);
163
+ if (node) {
164
+ toDelete.push(node);
165
+ }
166
+ }
167
+ return { toDelete, errors };
168
+ }
169
+ /**
170
+ * Write updated nodes back to the JSONL file
171
+ * Rewrites the file to update nodes in-place (soft delete pattern)
172
+ */
173
+ async function writeUpdatedNodes(baseDir, nodes) {
174
+ const filePath = path.join(baseDir, MEMORY_FILE_NAME);
175
+ const content = nodes.map((node) => JSON.stringify(node)).join('\n') + '\n';
176
+ // eslint-disable-next-line security/detect-non-literal-fs-filename -- path computed from known base and constant
177
+ await fs.writeFile(filePath, content, 'utf-8');
178
+ }
179
+ /**
180
+ * Delete memory nodes using soft-delete pattern (metadata.status=deleted)
181
+ *
182
+ * Respects the append-only pattern by updating nodes in-place rather than
183
+ * physically removing them from the file.
184
+ *
185
+ * @param baseDir - Project root directory containing .lumenflow/memory/
186
+ * @param options - Delete options (filters and dry-run)
187
+ * @returns Result with deleted IDs and any errors
188
+ *
189
+ * @example
190
+ * // Delete by ID
191
+ * await deleteMemoryNodes('/path', { nodeIds: ['mem-abc1'] });
192
+ *
193
+ * @example
194
+ * // Delete by tag (dry-run)
195
+ * await deleteMemoryNodes('/path', { tag: 'obsolete', dryRun: true });
196
+ *
197
+ * @example
198
+ * // Delete old nodes
199
+ * await deleteMemoryNodes('/path', { olderThan: '30d' });
200
+ */
201
+ export async function deleteMemoryNodes(baseDir, options) {
202
+ const { nodeIds, tag, olderThan, dryRun = false } = options;
203
+ // Validate: at least one filter must be provided
204
+ if ((!nodeIds || nodeIds.length === 0) && !tag && !olderThan) {
205
+ return {
206
+ success: false,
207
+ deletedCount: 0,
208
+ deletedIds: [],
209
+ skippedIds: [],
210
+ dryRun,
211
+ errors: ['At least one filter (nodeIds, tag, or olderThan) is required'],
212
+ };
213
+ }
214
+ // WU-1285: Compute the memory directory path from baseDir
215
+ // The memory file lives at .lumenflow/memory/memory.jsonl
216
+ const memoryDir = path.join(baseDir, LUMENFLOW_MEMORY_PATHS.MEMORY_DIR);
217
+ // Load all memory including archived/deleted nodes
218
+ let memory;
219
+ try {
220
+ memory = await loadMemory(memoryDir, { includeArchived: true });
221
+ }
222
+ catch (err) {
223
+ const errMsg = err instanceof Error ? err.message : String(err);
224
+ return {
225
+ success: false,
226
+ deletedCount: 0,
227
+ deletedIds: [],
228
+ skippedIds: [],
229
+ dryRun,
230
+ errors: [`Failed to load memory: ${errMsg}`],
231
+ };
232
+ }
233
+ // Find nodes to delete
234
+ const { toDelete, errors } = findNodesToDelete(memory.nodes, options);
235
+ // Separate already-deleted nodes
236
+ const deletedIds = [];
237
+ const skippedIds = [];
238
+ const nodesToUpdate = [];
239
+ for (const node of toDelete) {
240
+ if (isAlreadyDeleted(node)) {
241
+ skippedIds.push(node.id);
242
+ errors.push(`Node already deleted: ${node.id}`);
243
+ }
244
+ else {
245
+ deletedIds.push(node.id);
246
+ nodesToUpdate.push(node);
247
+ }
248
+ }
249
+ // Apply deletion if not dry-run
250
+ if (!dryRun && nodesToUpdate.length > 0) {
251
+ // Build updated node list
252
+ const deletedIdSet = new Set(deletedIds);
253
+ const updatedNodes = memory.nodes.map((node) => {
254
+ if (deletedIdSet.has(node.id)) {
255
+ return markAsDeleted(node);
256
+ }
257
+ return node;
258
+ });
259
+ // Write back to file (using memoryDir, not baseDir)
260
+ await writeUpdatedNodes(memoryDir, updatedNodes);
261
+ }
262
+ // Determine success:
263
+ // - True if any nodes were deleted
264
+ // - True if all requested nodes were already deleted (skipped but valid)
265
+ // - False only if requested nodes don't exist (not found errors)
266
+ const hasNotFoundErrors = errors.some((e) => e.startsWith('Node not found:'));
267
+ const allNodesHandled = deletedIds.length > 0 || skippedIds.length > 0;
268
+ const success = !hasNotFoundErrors || allNodesHandled;
269
+ return {
270
+ success,
271
+ deletedCount: deletedIds.length,
272
+ deletedIds,
273
+ skippedIds,
274
+ dryRun,
275
+ errors,
276
+ };
277
+ }
package/dist/mem-id.js CHANGED
@@ -5,8 +5,8 @@
5
5
  * Format: mem-[4 hex chars] derived from content hash.
6
6
  * Supports hierarchical IDs (mem-a1b2.1.2) for sub-task decomposition.
7
7
  *
8
- * @see {@link tools/lib/__tests__/mem-id.test.mjs} - Tests
9
- * @see {@link tools/lib/memory-schema.mjs} - Schema definitions
8
+ * @see {@link packages/@lumenflow/cli/src/lib/__tests__/mem-id.test.ts} - Tests
9
+ * @see {@link packages/@lumenflow/cli/src/lib/memory-schema.ts} - Schema definitions
10
10
  */
11
11
  import { createHash } from 'node:crypto';
12
12
  /**
@@ -51,7 +51,7 @@ const ERROR_MESSAGES = {
51
51
  * @returns Memory ID in format mem-[a-f0-9]{4}
52
52
  *
53
53
  * @example
54
- * const id = generateMemId('discovered file at src/utils.mjs');
54
+ * const id = generateMemId('discovered file at src/utils.ts');
55
55
  * // Returns something like 'mem-a3f2'
56
56
  */
57
57
  export function generateMemId(content) {
@@ -90,7 +90,7 @@ export function generateHierarchicalId(parentId, index) {
90
90
  * Validates a memory ID and extracts its components.
91
91
  *
92
92
  * Returns validation result with type classification and parsed components.
93
- * Compatible with MEMORY_PATTERNS.MEMORY_ID from memory-schema.mjs.
93
+ * Compatible with MEMORY_PATTERNS.MEMORY_ID from memory-schema.ts.
94
94
  *
95
95
  * @param id - Memory ID to validate
96
96
  * @returns Validation result with parsed components
@@ -0,0 +1,307 @@
1
+ /**
2
+ * Memory Index Core (WU-1235)
3
+ *
4
+ * Scans predictable project sources and produces project-lifecycle summary nodes.
5
+ * Creates memory nodes tagged with index:architecture, index:conventions,
6
+ * index:commands, index:invariants for agent context awareness.
7
+ *
8
+ * Features:
9
+ * - Scans README.md, LUMENFLOW.md, package.json, .lumenflow.config.yaml
10
+ * - Creates summary nodes with lifecycle=project
11
+ * - Includes provenance metadata (source_path, source_hash, indexed_at)
12
+ * - Idempotent: re-running updates/skips existing nodes
13
+ *
14
+ * @see {@link packages/@lumenflow/cli/src/mem-index.ts} - CLI wrapper
15
+ * @see {@link packages/@lumenflow/memory/__tests__/mem-index-core.test.ts} - Tests
16
+ */
17
+ import fs from 'node:fs/promises';
18
+ import path from 'node:path';
19
+ import crypto from 'node:crypto';
20
+ import { generateMemId } from './mem-id.js';
21
+ import { loadMemory, appendNode } from './memory-store.js';
22
+ import { LUMENFLOW_MEMORY_PATHS } from './paths.js';
23
+ /**
24
+ * Default sources to scan for project conventions
25
+ */
26
+ const DEFAULT_SOURCES = [
27
+ {
28
+ path: 'README.md',
29
+ tags: ['index:architecture'],
30
+ description: 'Project overview and structure',
31
+ },
32
+ {
33
+ path: 'LUMENFLOW.md',
34
+ tags: ['index:conventions'],
35
+ description: 'Workflow conventions and guidelines',
36
+ },
37
+ {
38
+ path: 'package.json',
39
+ tags: ['index:architecture'],
40
+ description: 'Monorepo structure and dependencies',
41
+ },
42
+ {
43
+ path: '.lumenflow.config.yaml',
44
+ tags: ['index:commands', 'index:conventions'],
45
+ description: 'Workflow configuration and lane definitions',
46
+ },
47
+ {
48
+ path: '.lumenflow/constraints.md',
49
+ tags: ['index:invariants'],
50
+ description: 'Non-negotiable project constraints',
51
+ },
52
+ ];
53
+ /** Maximum summary length in characters */
54
+ const MAX_SUMMARY_LENGTH = 2000;
55
+ /**
56
+ * Computes SHA-256 hash of content
57
+ *
58
+ * @param content - Content to hash
59
+ * @returns Hex-encoded hash
60
+ */
61
+ function computeHash(content) {
62
+ return crypto.createHash('sha256').update(content).digest('hex').slice(0, 16);
63
+ }
64
+ /**
65
+ * Extracts summary from package.json content
66
+ *
67
+ * @param content - Raw JSON content
68
+ * @returns Summarized package info or null if parse fails
69
+ */
70
+ function extractPackageJsonSummary(content) {
71
+ try {
72
+ const pkg = JSON.parse(content);
73
+ const summary = [];
74
+ if (pkg.name) {
75
+ summary.push(`Project: ${pkg.name}`);
76
+ }
77
+ if (pkg.description) {
78
+ summary.push(`Description: ${pkg.description}`);
79
+ }
80
+ if (pkg.workspaces) {
81
+ const workspaces = Array.isArray(pkg.workspaces)
82
+ ? pkg.workspaces
83
+ : pkg.workspaces.packages || [];
84
+ summary.push(`Workspaces: ${workspaces.join(', ')}`);
85
+ }
86
+ if (pkg.scripts) {
87
+ const scripts = Object.keys(pkg.scripts).slice(0, 10);
88
+ summary.push(`Key scripts: ${scripts.join(', ')}`);
89
+ }
90
+ return summary.join('\n');
91
+ }
92
+ catch {
93
+ return null;
94
+ }
95
+ }
96
+ /**
97
+ * Extracts summary from Markdown content
98
+ *
99
+ * @param content - Raw Markdown content
100
+ * @returns Summarized content with headings and paragraphs
101
+ */
102
+ function extractMarkdownSummary(content) {
103
+ const lines = content.split('\n');
104
+ const summary = [];
105
+ let currentLength = 0;
106
+ for (const line of lines) {
107
+ if (currentLength + line.length > MAX_SUMMARY_LENGTH) {
108
+ break;
109
+ }
110
+ // Include headings
111
+ if (line.startsWith('#')) {
112
+ summary.push(line);
113
+ currentLength += line.length + 1;
114
+ continue;
115
+ }
116
+ // Include non-empty lines until we hit length limit
117
+ if (line.trim()) {
118
+ summary.push(line);
119
+ currentLength += line.length + 1;
120
+ }
121
+ else if (summary.length > 0) {
122
+ // Preserve paragraph breaks
123
+ summary.push('');
124
+ }
125
+ }
126
+ return summary.join('\n').trim();
127
+ }
128
+ /**
129
+ * Extracts summary content from a source file
130
+ *
131
+ * @param sourcePath - Path of the source file
132
+ * @param content - Raw file content
133
+ * @returns Summarized content for memory node
134
+ */
135
+ function extractSummary(sourcePath, content) {
136
+ // For package.json, extract key fields
137
+ if (sourcePath === 'package.json') {
138
+ const pkgSummary = extractPackageJsonSummary(content);
139
+ if (pkgSummary) {
140
+ return pkgSummary;
141
+ }
142
+ }
143
+ // For YAML files, preserve structure
144
+ if (sourcePath.endsWith('.yaml') || sourcePath.endsWith('.yml')) {
145
+ return content.slice(0, MAX_SUMMARY_LENGTH);
146
+ }
147
+ // For Markdown files, extract headings and first paragraphs
148
+ if (sourcePath.endsWith('.md')) {
149
+ return extractMarkdownSummary(content);
150
+ }
151
+ // Default: truncate
152
+ return content.slice(0, MAX_SUMMARY_LENGTH);
153
+ }
154
+ /**
155
+ * Finds existing index nodes for a source
156
+ *
157
+ * @param nodes - All memory nodes
158
+ * @param sourcePath - Source path to find
159
+ * @returns Existing node or undefined
160
+ */
161
+ function findExistingNode(nodes, sourcePath) {
162
+ return nodes.find((n) => {
163
+ const metadata = n.metadata;
164
+ return metadata?.source_path === sourcePath;
165
+ });
166
+ }
167
+ /**
168
+ * Creates a memory node for a source
169
+ *
170
+ * @param source - Source definition
171
+ * @param content - File content
172
+ * @param contentHash - Content hash
173
+ * @param indexedAt - Timestamp
174
+ * @param existingNodeId - ID of existing node being replaced (if update)
175
+ * @returns Memory node
176
+ */
177
+ function createSourceNode(source, content, contentHash, indexedAt, existingNodeId) {
178
+ const node = {
179
+ id: generateMemId(`${source.path}-${indexedAt}`),
180
+ type: 'summary',
181
+ lifecycle: 'project',
182
+ content: extractSummary(source.path, content),
183
+ created_at: indexedAt,
184
+ tags: source.tags,
185
+ metadata: {
186
+ source_path: source.path,
187
+ source_hash: contentHash,
188
+ indexed_at: indexedAt,
189
+ description: source.description,
190
+ },
191
+ };
192
+ if (existingNodeId) {
193
+ node.updated_at = indexedAt;
194
+ node.metadata.replaces = existingNodeId;
195
+ }
196
+ return node;
197
+ }
198
+ /**
199
+ * Processes a single source file and creates/updates memory node
200
+ *
201
+ * @param source - Source definition
202
+ * @param content - File content
203
+ * @param ctx - Processing context
204
+ * @returns Result indicating what action was taken
205
+ */
206
+ async function processSource(source, content, ctx) {
207
+ const contentHash = computeHash(content);
208
+ const existingNode = findExistingNode(ctx.existingNodes, source.path);
209
+ // Check if content unchanged
210
+ if (existingNode) {
211
+ const existingHash = existingNode.metadata?.source_hash;
212
+ if (existingHash === contentHash) {
213
+ return 'skipped';
214
+ }
215
+ }
216
+ // Write node (unless dry-run)
217
+ if (!ctx.dryRun) {
218
+ const node = createSourceNode(source, content, contentHash, ctx.indexedAt, existingNode?.id);
219
+ await appendNode(ctx.memoryDir, node);
220
+ }
221
+ return existingNode ? 'updated' : 'created';
222
+ }
223
+ /**
224
+ * Loads existing memory nodes from memory directory
225
+ *
226
+ * @param memoryDir - Memory directory path
227
+ * @returns Array of existing memory nodes
228
+ */
229
+ async function loadExistingNodes(memoryDir) {
230
+ try {
231
+ const memory = await loadMemory(memoryDir);
232
+ return memory.nodes;
233
+ }
234
+ catch {
235
+ return [];
236
+ }
237
+ }
238
+ /**
239
+ * Indexes project sources and creates/updates memory nodes
240
+ *
241
+ * @param baseDir - Project base directory
242
+ * @param options - Indexing options
243
+ * @returns Index result
244
+ *
245
+ * @example
246
+ * const result = await indexProject('/path/to/project');
247
+ * console.log(`Created: ${result.nodesCreated}, Updated: ${result.nodesUpdated}`);
248
+ *
249
+ * @example
250
+ * // Dry-run mode
251
+ * const result = await indexProject('/path/to/project', { dryRun: true });
252
+ * console.log('Would create:', result.nodesCreated);
253
+ */
254
+ export async function indexProject(baseDir, options = {}) {
255
+ const { dryRun = false, additionalSources = [] } = options;
256
+ const result = {
257
+ success: true,
258
+ nodesCreated: 0,
259
+ nodesUpdated: 0,
260
+ nodesSkipped: 0,
261
+ sourcesScanned: [],
262
+ sourcesMissing: [],
263
+ };
264
+ const sources = [...DEFAULT_SOURCES, ...additionalSources];
265
+ const memoryDir = path.join(baseDir, LUMENFLOW_MEMORY_PATHS.MEMORY_DIR);
266
+ const indexedAt = new Date().toISOString();
267
+ // Ensure memory directory exists (unless dry-run)
268
+ if (!dryRun) {
269
+ await fs.mkdir(memoryDir, { recursive: true });
270
+ }
271
+ const existingNodes = await loadExistingNodes(memoryDir);
272
+ const ctx = { memoryDir, indexedAt, dryRun, existingNodes };
273
+ // Process each source
274
+ for (const source of sources) {
275
+ const sourcePath = path.join(baseDir, source.path);
276
+ // Try to read file content
277
+ let content;
278
+ try {
279
+ content = await fs.readFile(sourcePath, 'utf-8');
280
+ }
281
+ catch {
282
+ result.sourcesMissing.push(source.path);
283
+ continue;
284
+ }
285
+ result.sourcesScanned.push(source.path);
286
+ // Process the source
287
+ const action = await processSource(source, content, ctx);
288
+ if (action === 'created') {
289
+ result.nodesCreated++;
290
+ }
291
+ else if (action === 'updated') {
292
+ result.nodesUpdated++;
293
+ }
294
+ else {
295
+ result.nodesSkipped++;
296
+ }
297
+ }
298
+ return result;
299
+ }
300
+ /**
301
+ * Gets the default sources that will be scanned
302
+ *
303
+ * @returns Array of source definitions
304
+ */
305
+ export function getDefaultSources() {
306
+ return [...DEFAULT_SOURCES];
307
+ }
@@ -4,9 +4,9 @@
4
4
  * Core logic for initializing memory layer in a repository.
5
5
  * Creates .lumenflow/memory/ directory with empty memory.jsonl and config.yaml.
6
6
  *
7
- * @see {@link tools/__tests__/mem-init.test.mjs} - Tests
8
- * @see {@link tools/lib/memory-store.mjs} - Memory store operations
9
- * @see {@link tools/lib/memory-schema.mjs} - Memory schema definitions
7
+ * @see {@link packages/@lumenflow/cli/src/__tests__/mem-init.test.ts} - Tests
8
+ * @see {@link packages/@lumenflow/cli/src/lib/memory-store.ts} - Memory store operations
9
+ * @see {@link packages/@lumenflow/cli/src/lib/memory-schema.ts} - Memory schema definitions
10
10
  */
11
11
  import fs from 'node:fs/promises';
12
12
  import path from 'node:path';