@syntesseraai/opencode-feature-factory 0.2.44 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/agents/building.md +13 -14
  2. package/agents/ff-acceptance.md +12 -15
  3. package/agents/ff-research.md +12 -16
  4. package/agents/ff-review.md +12 -15
  5. package/agents/ff-security.md +12 -15
  6. package/agents/ff-validate.md +12 -15
  7. package/agents/ff-well-architected.md +12 -15
  8. package/agents/planning.md +12 -24
  9. package/agents/reviewing.md +12 -24
  10. package/dist/index.js +7 -7
  11. package/dist/local-recall/daemon.d.ts +35 -0
  12. package/dist/local-recall/daemon.js +188 -0
  13. package/dist/local-recall/index.d.ts +14 -0
  14. package/dist/local-recall/index.js +20 -0
  15. package/dist/local-recall/mcp-server.d.ts +38 -0
  16. package/dist/local-recall/mcp-server.js +71 -0
  17. package/dist/local-recall/mcp-tools.d.ts +90 -0
  18. package/dist/local-recall/mcp-tools.js +162 -0
  19. package/dist/local-recall/memory-service.d.ts +31 -0
  20. package/dist/local-recall/memory-service.js +156 -0
  21. package/dist/local-recall/model-router.d.ts +23 -0
  22. package/dist/local-recall/model-router.js +41 -0
  23. package/dist/local-recall/processed-log.d.ts +41 -0
  24. package/dist/local-recall/processed-log.js +82 -0
  25. package/dist/local-recall/session-extractor.d.ts +19 -0
  26. package/dist/local-recall/session-extractor.js +172 -0
  27. package/dist/local-recall/storage-reader.d.ts +40 -0
  28. package/dist/local-recall/storage-reader.js +147 -0
  29. package/dist/local-recall/thinking-extractor.d.ts +16 -0
  30. package/dist/local-recall/thinking-extractor.js +132 -0
  31. package/dist/local-recall/types.d.ts +129 -0
  32. package/dist/local-recall/types.js +7 -0
  33. package/package.json +4 -4
  34. package/skills/ff-learning/SKILL.md +166 -689
  35. package/dist/learning/memory-get.d.ts +0 -24
  36. package/dist/learning/memory-get.js +0 -155
  37. package/dist/learning/memory-search.d.ts +0 -20
  38. package/dist/learning/memory-search.js +0 -193
  39. package/dist/learning/memory-store.d.ts +0 -20
  40. package/dist/learning/memory-store.js +0 -85
  41. package/dist/plugins/ff-learning-get-plugin.d.ts +0 -2
  42. package/dist/plugins/ff-learning-get-plugin.js +0 -55
  43. package/dist/plugins/ff-learning-search-plugin.d.ts +0 -2
  44. package/dist/plugins/ff-learning-search-plugin.js +0 -65
  45. package/dist/plugins/ff-learning-store-plugin.d.ts +0 -2
  46. package/dist/plugins/ff-learning-store-plugin.js +0 -70
  47. package/skills/ff-computer-use/SKILL.md +0 -473
@@ -463,22 +463,21 @@ Before completing your building task:
463
463
  - Error resolutions and debugging insights
464
464
  - Integration challenges and solutions
465
465
 
466
- 2. **Store important learnings** using ff-learning skill:
467
- - Create semantic memories for technical patterns and best practices
468
- - Create episodic memories for significant implementation experiences
469
- - Create procedural memories for reusable workflows discovered
466
+ 2. **Store important learnings** using the `ff-learning-store` MCP tool:
467
+ - Use category `pattern` for technical patterns and best practices
468
+ - Use category `context` for significant implementation experiences
469
+ - Use category `procedure` for reusable workflows discovered
470
470
  - Tag with relevant technology names and categories
471
471
 
472
472
  3. **Example learning capture:**
473
473
 
474
- ```markdown
475
- After implementing feature:
476
-
477
- 1. Generate UUID: `uuidgen`
478
- 2. Determine memory type based on content:
479
- - Technical pattern → semantic
480
- - Implementation experience → episodic
481
- - Reusable workflow procedural
482
- 3. Create memory file in appropriate directory
483
- 4. Include frontmatter with relevant tags and importance
474
+ ```
475
+ ff-learning-store(
476
+ title: "Implementation Pattern: [Pattern Name]",
477
+ description: "Best practice for [scenario] in [technology]",
478
+ category: "pattern",
479
+ tags: ["implementation", "pattern", "{technology}"],
480
+ importance: 0.8,
481
+ content: "Detailed explanation of the pattern..."
482
+ )
484
483
  ```
@@ -290,21 +290,18 @@ Before completing your validation task:
290
290
  - Ambiguity resolutions and scope clarifications
291
291
  - Common gaps between requirements and implementation
292
292
 
293
- 2. **Store important learnings** using ff-learning skill:
294
- - Create semantic memories for requirement patterns
295
- - Create episodic memories for significant validation findings
293
+ 2. **Store important learnings** using `ff-learning-store` MCP tool:
294
+ - Use category `pattern` for requirement patterns and validation techniques
295
+ - Use category `decision` for significant scope clarifications and findings
296
296
  - Tag with requirement types, categories, and validation insights
297
297
 
298
- 3. **Example memory format:**
299
- ```yaml
300
- ---
301
- id: 'uuid-from-uuidgen'
302
- title: 'Validation Pattern: [Pattern Name]'
303
- description: 'Common pattern of [issue] in [type] requirements'
304
- date: '2026-02-02T12:00:00Z'
305
- memory_type: 'semantic'
306
- agent_id: 'ff-acceptance'
307
- importance: 0.7
308
- tags: ['validation', 'requirements', 'pattern', '{category}']
309
- ---
298
+ 3. **Example MCP tool call:**
299
+ ```
300
+ ff-learning-store(
301
+ title: "Validation Pattern: [Pattern Name]",
302
+ description: "Common pattern of [issue] in [type] requirements",
303
+ category: "pattern",
304
+ tags: ["validation", "requirements", "pattern", "{category}"],
305
+ importance: 0.7
306
+ )
310
307
  ```
@@ -434,23 +434,19 @@ Before completing your research task:
434
434
  - Best practices discovered
435
435
  - Source quality assessments
436
436
 
437
- 2. **Store important learnings** using ff-learning skill:
438
- - Create semantic memories for key findings and knowledge
439
- - Create episodic memories for significant research discoveries
437
+ 2. **Store important learnings** using `ff-learning-store` MCP tool:
438
+ - Use category `pattern` for key findings and reusable knowledge
439
+ - Use category `context` for significant research discoveries and context
440
440
  - Tag with research topics, technologies, and categories
441
441
  - Set high importance (0.8-1.0) for critical findings
442
442
 
443
- 3. **Example memory format:**
444
- ```yaml
445
- ---
446
- id: 'uuid-from-uuidgen'
447
- title: 'Research Finding: [Topic]'
448
- description: 'Key finding about [topic] from [source]'
449
- date: '2026-02-02T12:00:00Z'
450
- memory_type: 'semantic'
451
- agent_id: 'ff-research'
452
- importance: 0.9
453
- tags: ['research', '{topic}', '{technology}', 'best-practices']
454
- source: 'research'
455
- ---
443
+ 3. **Example MCP tool call:**
444
+ ```
445
+ ff-learning-store(
446
+ title: "Research Finding: [Topic]",
447
+ description: "Key finding about [topic] from [source]",
448
+ category: "pattern",
449
+ tags: ["research", "{topic}", "{technology}", "best-practices"],
450
+ importance: 0.9
451
+ )
456
452
  ```
@@ -293,21 +293,18 @@ Before completing your code review:
293
293
  - Best practices identified
294
294
  - Review technique improvements
295
295
 
296
- 2. **Store important learnings** using ff-learning skill:
297
- - Create semantic memories for code patterns and anti-patterns
298
- - Create episodic memories for significant review findings
296
+ 2. **Store important learnings** using `ff-learning-store` MCP tool:
297
+ - Use category `pattern` for code patterns and anti-patterns
298
+ - Use category `decision` for significant review findings and trade-offs
299
299
  - Tag with code quality categories, technologies, and patterns
300
300
 
301
- 3. **Example memory format:**
302
- ```yaml
303
- ---
304
- id: 'uuid-from-uuidgen'
305
- title: 'Code Pattern: [Pattern Name]'
306
- description: 'Effective pattern for [scenario] in [technology]'
307
- date: '2026-02-02T12:00:00Z'
308
- memory_type: 'semantic'
309
- agent_id: 'ff-review'
310
- importance: 0.7
311
- tags: ['code-quality', 'pattern', '{technology}', '{category}']
312
- ---
301
+ 3. **Example MCP tool call:**
302
+ ```
303
+ ff-learning-store(
304
+ title: "Code Pattern: [Pattern Name]",
305
+ description: "Effective pattern for [scenario] in [technology]",
306
+ category: "pattern",
307
+ tags: ["code-quality", "pattern", "{technology}", "{category}"],
308
+ importance: 0.7
309
+ )
313
310
  ```
@@ -327,21 +327,18 @@ Before completing your security audit:
327
327
  - Threat model insights
328
328
  - Common security mistakes observed
329
329
 
330
- 2. **Store important learnings** using ff-learning skill:
331
- - Create semantic memories for vulnerability patterns and fixes
332
- - Create episodic memories for significant security findings
330
+ 2. **Store important learnings** using `ff-learning-store` MCP tool:
331
+ - Use category `pattern` for vulnerability patterns and fixes
332
+ - Use category `debugging` for significant security findings and investigations
333
333
  - Tag with security categories, vulnerability types, and technologies
334
334
 
335
- 3. **Example memory format:**
336
- ```yaml
337
- ---
338
- id: 'uuid-from-uuidgen'
339
- title: 'Security Pattern: [Vulnerability Type]'
340
- description: 'Common [vulnerability] pattern in [context] and how to fix it'
341
- date: '2026-02-02T12:00:00Z'
342
- memory_type: 'semantic'
343
- agent_id: 'ff-security'
344
- importance: 0.9
345
- tags: ['security', 'vulnerability', '{type}', '{technology}']
346
- ---
335
+ 3. **Example MCP tool call:**
336
+ ```
337
+ ff-learning-store(
338
+ title: "Security Pattern: [Vulnerability Type]",
339
+ description: "Common [vulnerability] pattern in [context] and how to fix it",
340
+ category: "pattern",
341
+ tags: ["security", "vulnerability", "{type}", "{technology}"],
342
+ importance: 0.9
343
+ )
347
344
  ```
@@ -321,21 +321,18 @@ Before completing your validation:
321
321
  - Validation efficiency improvements
322
322
  - Cross-dimensional finding patterns
323
323
 
324
- 2. **Store important learnings** using ff-learning skill:
325
- - Create semantic memories for validation patterns
326
- - Create episodic memories for significant validation results
324
+ 2. **Store important learnings** using `ff-learning-store` MCP tool:
325
+ - Use category `pattern` for validation patterns and effective techniques
326
+ - Use category `decision` for significant validation findings
327
327
  - Tag with validation types and categories
328
328
 
329
- 3. **Example memory format:**
330
- ```yaml
331
- ---
332
- id: 'uuid-from-uuidgen'
333
- title: 'Validation Pattern: [Pattern Name]'
334
- description: 'Common pattern of [findings] when validating [type] changes'
335
- date: '2026-02-02T12:00:00Z'
336
- memory_type: 'semantic'
337
- agent_id: 'ff-validate'
338
- importance: 0.7
339
- tags: ['validation', 'pattern', '{category}']
340
- ---
329
+ 3. **Example MCP tool call:**
330
+ ```
331
+ ff-learning-store(
332
+ title: "Validation Pattern: [Pattern Name]",
333
+ description: "Common pattern of [findings] when validating [type] changes",
334
+ category: "pattern",
335
+ tags: ["validation", "pattern", "{category}"],
336
+ importance: 0.7
337
+ )
341
338
  ```
@@ -289,21 +289,18 @@ Before completing your architecture review:
289
289
  - Scalability and efficiency considerations
290
290
  - Cross-pillar interactions and trade-offs
291
291
 
292
- 2. **Store important learnings** using ff-learning skill:
293
- - Create semantic memories for architectural patterns
294
- - Create episodic memories for significant architectural findings
292
+ 2. **Store important learnings** using `ff-learning-store` MCP tool:
293
+ - Use category `pattern` for architectural patterns and best practices
294
+ - Use category `decision` for significant architectural trade-offs
295
295
  - Tag with pillar names, architecture categories, and patterns
296
296
 
297
- 3. **Example memory format:**
298
- ```yaml
299
- ---
300
- id: 'uuid-from-uuidgen'
301
- title: 'Architecture Pattern: [Pattern Name]'
302
- description: 'Pattern for achieving [pillar] excellence in [context]'
303
- date: '2026-02-02T12:00:00Z'
304
- memory_type: 'semantic'
305
- agent_id: 'ff-well-architected'
306
- importance: 0.8
307
- tags: ['architecture', '{pillar}', 'pattern', '{category}']
308
- ---
297
+ 3. **Example MCP tool call:**
298
+ ```
299
+ ff-learning-store(
300
+ title: "Architecture Pattern: [Pattern Name]",
301
+ description: "Pattern for achieving [pillar] excellence in [context]",
302
+ category: "pattern",
303
+ tags: ["architecture", "{pillar}", "pattern", "{category}"],
304
+ importance: 0.8
305
+ )
309
306
  ```
@@ -369,32 +369,20 @@ Before completing your planning task:
369
369
  - Pattern discoveries from codebase exploration
370
370
  - Estimation accuracy and lessons learned
371
371
 
372
- 2. **Store important learnings** using ff-learning skill:
373
- - Create semantic memories for architectural patterns discovered
374
- - Create episodic memories for significant planning decisions
372
+ 2. **Store important learnings** using the `ff-learning-store` MCP tool:
373
+ - Use category `pattern` for architectural patterns discovered
374
+ - Use category `decision` for significant planning decisions
375
375
  - Tag with relevant keywords (architecture, patterns, risks, decisions)
376
376
 
377
377
  3. **Example learning capture:**
378
378
 
379
- ```markdown
380
- After completing planning:
381
-
382
- 1. Generate UUID: `uuidgen` `550e8400-e29b-41d4-a716-446655440000`
383
- 2. Create memory file in `.feature-factory/memories/semantic/architecture/`:
384
-
385
- ---
386
-
387
- id: '550e8400-e29b-41d4-a716-446655440000'
388
- title: 'Architecture Decision: [Decision Name]'
389
- description: 'Rationale for choosing [approach] over [alternatives]'
390
- date: '2026-02-02T14:30:00Z'
391
- memory_type: 'semantic'
392
- agent_id: 'planning'
393
- importance: 0.8
394
- tags: ['architecture', 'decision', '{technology}']
395
- source: 'planning'
396
-
397
- ---
398
-
399
- Detailed explanation of the decision...
379
+ ```
380
+ ff-learning-store(
381
+ title: "Architecture Decision: [Decision Name]",
382
+ description: "Rationale for choosing [approach] over [alternatives]",
383
+ category: "decision",
384
+ tags: ["architecture", "decision", "{technology}"],
385
+ importance: 0.8,
386
+ content: "Detailed explanation of the decision..."
387
+ )
400
388
  ```
@@ -493,32 +493,20 @@ Before completing your review task:
493
493
  - Review effectiveness and process improvements
494
494
  - Tool and technique discoveries
495
495
 
496
- 2. **Store important learnings** using ff-learning skill:
497
- - Create semantic memories for common issue patterns and solutions
498
- - Create episodic memories for significant review findings
496
+ 2. **Store important learnings** using the `ff-learning-store` MCP tool:
497
+ - Use category `pattern` for common issue patterns and solutions
498
+ - Use category `decision` for significant review findings
499
499
  - Tag with review type, categories, and technologies reviewed
500
500
 
501
501
  3. **Example learning capture:**
502
502
 
503
- ```markdown
504
- After completing review:
505
-
506
- 1. Generate UUID: `uuidgen` `550e8400-e29b-41d4-a716-446655440000`
507
- 2. Create memory file in `.feature-factory/memories/semantic/review-patterns/`:
508
-
509
- ---
510
-
511
- id: '550e8400-e29b-41d4-a716-446655440000'
512
- title: 'Review Pattern: [Common Issue Type]'
513
- description: 'Common pattern of [issue] found in [context]'
514
- date: '2026-02-02T14:30:00Z'
515
- memory_type: 'semantic'
516
- agent_id: 'reviewing'
517
- importance: 0.7
518
- tags: ['review', 'pattern', '{issue-type}', '{technology}']
519
- source: 'review'
520
-
521
- ---
522
-
523
- Detailed explanation of the pattern and solution...
503
+ ```
504
+ ff-learning-store(
505
+ title: "Review Pattern: [Common Issue Type]",
506
+ description: "Common pattern of [issue] found in [context]",
507
+ category: "pattern",
508
+ tags: ["review", "pattern", "{issue-type}", "{technology}"],
509
+ importance: 0.7,
510
+ content: "Detailed explanation of the pattern and solution..."
511
+ )
524
512
  ```
package/dist/index.js CHANGED
@@ -3,9 +3,7 @@ import { StopQualityGateHooksPlugin } from './stop-quality-gate.js';
3
3
  import { createFFAgentsCurrentTool } from './plugins/ff-agents-current-plugin.js';
4
4
  import { createFFAgentsShowTool } from './plugins/ff-agents-show-plugin.js';
5
5
  import { createFFAgentsClearTool } from './plugins/ff-agents-clear-plugin.js';
6
- import { createFFLearningStoreTool } from './plugins/ff-learning-store-plugin.js';
7
- import { createFFLearningSearchTool } from './plugins/ff-learning-search-plugin.js';
8
- import { createFFLearningGetTool } from './plugins/ff-learning-get-plugin.js';
6
+ import { createLearningStoreTool, createLearningSearchTool, createLearningGetTool, initLocalRecall, } from './local-recall/index.js';
9
7
  import { createFFPlanCreateTool } from './plugins/ff-plan-create-plugin.js';
10
8
  import { createFFPlanUpdateTool } from './plugins/ff-plan-update-plugin.js';
11
9
  import { createFFAgentContextCreateTool } from './plugins/ff-agent-context-create-plugin.js';
@@ -36,6 +34,8 @@ export const FeatureFactoryPlugin = async (input) => {
36
34
  if (!directory || directory === '' || directory === '/') {
37
35
  return {};
38
36
  }
37
+ // Initialize local-recall memory system
38
+ initLocalRecall(directory);
39
39
  // Load hooks from the quality gate plugin
40
40
  const qualityGateHooks = await StopQualityGateHooksPlugin(input).catch(() => ({}));
41
41
  // Create all tools
@@ -44,10 +44,10 @@ export const FeatureFactoryPlugin = async (input) => {
44
44
  'ff-agents-current': createFFAgentsCurrentTool(),
45
45
  'ff-agents-show': createFFAgentsShowTool(),
46
46
  'ff-agents-clear': createFFAgentsClearTool(),
47
- // Learning/memory tools
48
- 'ff-learning-store': createFFLearningStoreTool(),
49
- 'ff-learning-search': createFFLearningSearchTool(),
50
- 'ff-learning-get': createFFLearningGetTool(),
47
+ // Learning/memory tools (local-recall)
48
+ 'ff-learning-store': createLearningStoreTool(),
49
+ 'ff-learning-search': createLearningSearchTool(),
50
+ 'ff-learning-get': createLearningGetTool(),
51
51
  // Plan tools
52
52
  'ff-plan-create': createFFPlanCreateTool(),
53
53
  'ff-plan-update': createFFPlanUpdateTool(),
@@ -0,0 +1,35 @@
1
+ /**
2
+ * daemon.ts — Background extraction daemon for local-recall.
3
+ *
4
+ * Scans OpenCode session storage for unprocessed assistant messages,
5
+ * runs the extraction pipeline (session + thinking extractors),
6
+ * and stores resulting memories with logical IDs and content-hash
7
+ * idempotency.
8
+ *
9
+ * Logical ID conventions:
10
+ * session memories → session-<sessionID>-<messageID>-<index>
11
+ * thinking memories → thinking-<sessionID>-<messageID>-<index>
12
+ *
13
+ * Idempotency is dual-layer:
14
+ * 1. Message-ID check (fast skip for already-processed messages)
15
+ * 2. Content-hash check (skips duplicate content across edits/replays)
16
+ */
17
+ export interface ExtractionStats {
18
+ sessionsScanned: number;
19
+ messagesScanned: number;
20
+ messagesSkipped: number;
21
+ newMemories: number;
22
+ errors: string[];
23
+ }
24
+ /**
25
+ * Run a full extraction pass for the project rooted at `directory`.
26
+ *
27
+ * 1. Find the OpenCode project matching `directory`
28
+ * 2. Load existing processed log for fast membership checks
29
+ * 3. Iterate sessions → messages (assistant only)
30
+ * 4. Skip by message-ID *and* content-hash (dual idempotency)
31
+ * 5. Run session + thinking extractors
32
+ * 6. Assign logical IDs (session-<sid>-<msgid>-N / thinking-<sid>-<msgid>-N)
33
+ * 7. Store new memories & update processed log
34
+ */
35
+ export declare function runExtraction(directory: string): Promise<ExtractionStats>;
@@ -0,0 +1,188 @@
1
+ /**
2
+ * daemon.ts — Background extraction daemon for local-recall.
3
+ *
4
+ * Scans OpenCode session storage for unprocessed assistant messages,
5
+ * runs the extraction pipeline (session + thinking extractors),
6
+ * and stores resulting memories with logical IDs and content-hash
7
+ * idempotency.
8
+ *
9
+ * Logical ID conventions:
10
+ * session memories → session-<sessionID>-<messageID>-<index>
11
+ * thinking memories → thinking-<sessionID>-<messageID>-<index>
12
+ *
13
+ * Idempotency is dual-layer:
14
+ * 1. Message-ID check (fast skip for already-processed messages)
15
+ * 2. Content-hash check (skips duplicate content across edits/replays)
16
+ */
17
+ import * as path from 'node:path';
18
+ import * as fs from 'node:fs/promises';
19
+ import { findProject, listSessions, listMessages, listParts } from './storage-reader.js';
20
+ import { extractFromMessage } from './session-extractor.js';
21
+ import { extractThinkingFromMessage } from './thinking-extractor.js';
22
+ import { readProcessedLog, getProcessedMessageIDs, getProcessedHashes, markProcessed, contentHash, } from './processed-log.js';
23
+ import { storeMemories } from './memory-service.js';
24
+ // ────────────────────────────────────────────────────────────
25
+ // Helpers
26
+ // ────────────────────────────────────────────────────────────
27
+ /**
28
+ * Build a deterministic content hash for a message based on its
29
+ * concatenated part text. This allows us to detect duplicate
30
+ * processing even when message IDs change.
31
+ */
32
+ async function buildMessageContentHash(messageID) {
33
+ try {
34
+ const parts = await listParts(messageID);
35
+ const textParts = parts
36
+ .filter((p) => p.type === 'text' || p.type === 'reasoning')
37
+ .map((p) => p.text ?? '')
38
+ .join('\n');
39
+ // Hash content only (no messageID) so dedupe works across ID changes
40
+ return contentHash(textParts);
41
+ }
42
+ catch {
43
+ // Fallback: hash the message ID itself (best effort)
44
+ return contentHash(messageID);
45
+ }
46
+ }
47
+ // ────────────────────────────────────────────────────────────
48
+ // Main extraction loop
49
+ // ────────────────────────────────────────────────────────────
50
+ /**
51
+ * Run a full extraction pass for the project rooted at `directory`.
52
+ *
53
+ * 1. Find the OpenCode project matching `directory`
54
+ * 2. Load existing processed log for fast membership checks
55
+ * 3. Iterate sessions → messages (assistant only)
56
+ * 4. Skip by message-ID *and* content-hash (dual idempotency)
57
+ * 5. Run session + thinking extractors
58
+ * 6. Assign logical IDs (session-<sid>-<msgid>-N / thinking-<sid>-<msgid>-N)
59
+ * 7. Store new memories & update processed log
60
+ */
61
+ export async function runExtraction(directory) {
62
+ const stats = {
63
+ sessionsScanned: 0,
64
+ messagesScanned: 0,
65
+ messagesSkipped: 0,
66
+ newMemories: 0,
67
+ errors: [],
68
+ };
69
+ try {
70
+ // Find project for this directory
71
+ const project = await findProject(directory);
72
+ if (!project) {
73
+ stats.errors.push(`No OpenCode project found for directory: ${directory}`);
74
+ return stats;
75
+ }
76
+ // Ensure local-recall directories exist
77
+ const recallDir = path.join(directory, '.feature-factory', 'local-recall');
78
+ await fs.mkdir(path.join(recallDir, 'memories'), { recursive: true });
79
+ // Pre-load processed log for fast lookups
80
+ const existingLog = await readProcessedLog(directory);
81
+ const processedMsgIDs = getProcessedMessageIDs(existingLog);
82
+ const processedHashes = getProcessedHashes(existingLog);
83
+ // Iterate sessions
84
+ const sessions = await listSessions(project.id);
85
+ stats.sessionsScanned = sessions.length;
86
+ const newProcessedEntries = [];
87
+ const allNewMemories = [];
88
+ for (const session of sessions) {
89
+ try {
90
+ const messages = await listMessages(session.id);
91
+ for (const message of messages) {
92
+ stats.messagesScanned++;
93
+ // Only process assistant messages
94
+ if (message.role !== 'assistant') {
95
+ stats.messagesSkipped++;
96
+ continue;
97
+ }
98
+ // Layer 1: skip by message ID
99
+ if (processedMsgIDs.has(message.id)) {
100
+ stats.messagesSkipped++;
101
+ continue;
102
+ }
103
+ // Layer 2: skip by content hash
104
+ const msgHash = await buildMessageContentHash(message.id);
105
+ if (processedHashes.has(msgHash)) {
106
+ stats.messagesSkipped++;
107
+ continue;
108
+ }
109
+ try {
110
+ const input = {
111
+ sessionID: session.id,
112
+ messageID: message.id,
113
+ };
114
+ // Run both extractors in parallel
115
+ const [sessionResults, thinkingResults] = await Promise.all([
116
+ extractFromMessage(input),
117
+ extractThinkingFromMessage(input),
118
+ ]);
119
+ // Convert ExtractionResult → Memory with logical IDs
120
+ const sessionMemories = sessionResults.map((r, idx) => ({
121
+ id: `session-${session.id}-${message.id}-${idx}`,
122
+ sessionID: r.sessionID,
123
+ messageID: r.messageID,
124
+ category: r.category,
125
+ title: r.title,
126
+ body: r.body,
127
+ tags: r.tags,
128
+ importance: r.importance,
129
+ createdAt: message.time?.created ?? Date.now(),
130
+ extractedBy: 'local-recall-daemon',
131
+ }));
132
+ const thinkingMemories = thinkingResults.map((r, idx) => ({
133
+ id: `thinking-${session.id}-${message.id}-${idx}`,
134
+ sessionID: r.sessionID,
135
+ messageID: r.messageID,
136
+ category: r.category,
137
+ title: r.title,
138
+ body: r.body,
139
+ tags: r.tags,
140
+ importance: r.importance,
141
+ createdAt: message.time?.created ?? Date.now(),
142
+ extractedBy: 'local-recall-daemon',
143
+ }));
144
+ const memories = [...sessionMemories, ...thinkingMemories];
145
+ allNewMemories.push(...memories);
146
+ // Add to fast-lookup sets so subsequent messages in this
147
+ // pass are also deduplicated
148
+ processedMsgIDs.add(message.id);
149
+ processedHashes.add(msgHash);
150
+ // Mark as processed with content hash
151
+ newProcessedEntries.push({
152
+ messageID: message.id,
153
+ contentHash: msgHash,
154
+ processedAt: Date.now(),
155
+ memoriesCreated: memories.length,
156
+ });
157
+ }
158
+ catch (err) {
159
+ stats.errors.push(`Error processing message ${message.id}: ${err instanceof Error ? err.message : String(err)}`);
160
+ // Still mark as processed to avoid re-trying broken messages
161
+ newProcessedEntries.push({
162
+ messageID: message.id,
163
+ contentHash: msgHash,
164
+ processedAt: Date.now(),
165
+ memoriesCreated: 0,
166
+ });
167
+ }
168
+ }
169
+ }
170
+ catch (err) {
171
+ stats.errors.push(`Error processing session ${session.id}: ${err instanceof Error ? err.message : String(err)}`);
172
+ }
173
+ }
174
+ // Batch store all new memories
175
+ if (allNewMemories.length > 0) {
176
+ await storeMemories(directory, allNewMemories);
177
+ stats.newMemories = allNewMemories.length;
178
+ }
179
+ // Batch update processed log
180
+ if (newProcessedEntries.length > 0) {
181
+ await markProcessed(directory, newProcessedEntries);
182
+ }
183
+ }
184
+ catch (err) {
185
+ stats.errors.push(`Extraction failed: ${err instanceof Error ? err.message : String(err)}`);
186
+ }
187
+ return stats;
188
+ }
@@ -0,0 +1,14 @@
1
+ /**
2
+ * local-recall/index.ts — Barrel export for the local-recall module.
3
+ *
4
+ * Re-exports all public APIs from the local-recall memory system.
5
+ */
6
+ export type { Memory, MemoryCategory, SearchCriteria, MemorySearchResult, ExtractionInput, ExtractionResult, ProcessedEntry, OCProject, OCSession, OCMessage, OCPart, } from './types.js';
7
+ export { findProject, listProjects, listSessions, getSession, listMessages, getMessage, listParts, getPart, } from './storage-reader.js';
8
+ export { storeMemory, storeMemories, getMemory, listAllMemories, searchMemories, } from './memory-service.js';
9
+ export { extractFromMessage, extractFromParts } from './session-extractor.js';
10
+ export { extractThinkingFromMessage, extractFromThinkingParts } from './thinking-extractor.js';
11
+ export { isProcessed, isContentProcessed, markProcessed, readProcessedLog, contentHash, getProcessedMessageIDs, getProcessedHashes, } from './processed-log.js';
12
+ export { runExtraction, type ExtractionStats } from './daemon.js';
13
+ export { initLocalRecall, isInitialized, getDirectory, triggerExtraction, getLastExtractionStats, shutdownLocalRecall, } from './mcp-server.js';
14
+ export { createLearningStoreTool, createLearningSearchTool, createLearningGetTool, } from './mcp-tools.js';
@@ -0,0 +1,20 @@
1
+ /**
2
+ * local-recall/index.ts — Barrel export for the local-recall module.
3
+ *
4
+ * Re-exports all public APIs from the local-recall memory system.
5
+ */
6
+ // Storage reader
7
+ export { findProject, listProjects, listSessions, getSession, listMessages, getMessage, listParts, getPart, } from './storage-reader.js';
8
+ // Memory service
9
+ export { storeMemory, storeMemories, getMemory, listAllMemories, searchMemories, } from './memory-service.js';
10
+ // Extractors
11
+ export { extractFromMessage, extractFromParts } from './session-extractor.js';
12
+ export { extractThinkingFromMessage, extractFromThinkingParts } from './thinking-extractor.js';
13
+ // Processed log
14
+ export { isProcessed, isContentProcessed, markProcessed, readProcessedLog, contentHash, getProcessedMessageIDs, getProcessedHashes, } from './processed-log.js';
15
+ // Daemon
16
+ export { runExtraction } from './daemon.js';
17
+ // MCP server lifecycle
18
+ export { initLocalRecall, isInitialized, getDirectory, triggerExtraction, getLastExtractionStats, shutdownLocalRecall, } from './mcp-server.js';
19
+ // MCP tools (plugin tool creators)
20
+ export { createLearningStoreTool, createLearningSearchTool, createLearningGetTool, } from './mcp-tools.js';