@syntesseraai/opencode-feature-factory 0.2.45 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/agents/building.md +13 -14
  2. package/agents/ff-acceptance.md +12 -15
  3. package/agents/ff-research.md +12 -16
  4. package/agents/ff-review.md +12 -15
  5. package/agents/ff-security.md +12 -15
  6. package/agents/ff-validate.md +12 -15
  7. package/agents/ff-well-architected.md +12 -15
  8. package/agents/planning.md +12 -24
  9. package/agents/reviewing.md +12 -24
  10. package/dist/index.js +7 -7
  11. package/dist/local-recall/daemon.d.ts +35 -0
  12. package/dist/local-recall/daemon.js +188 -0
  13. package/dist/local-recall/index.d.ts +14 -0
  14. package/dist/local-recall/index.js +20 -0
  15. package/dist/local-recall/mcp-server.d.ts +38 -0
  16. package/dist/local-recall/mcp-server.js +71 -0
  17. package/dist/local-recall/mcp-tools.d.ts +90 -0
  18. package/dist/local-recall/mcp-tools.js +162 -0
  19. package/dist/local-recall/memory-service.d.ts +31 -0
  20. package/dist/local-recall/memory-service.js +156 -0
  21. package/dist/local-recall/model-router.d.ts +23 -0
  22. package/dist/local-recall/model-router.js +41 -0
  23. package/dist/local-recall/processed-log.d.ts +41 -0
  24. package/dist/local-recall/processed-log.js +82 -0
  25. package/dist/local-recall/session-extractor.d.ts +19 -0
  26. package/dist/local-recall/session-extractor.js +172 -0
  27. package/dist/local-recall/storage-reader.d.ts +40 -0
  28. package/dist/local-recall/storage-reader.js +147 -0
  29. package/dist/local-recall/thinking-extractor.d.ts +16 -0
  30. package/dist/local-recall/thinking-extractor.js +132 -0
  31. package/dist/local-recall/types.d.ts +129 -0
  32. package/dist/local-recall/types.js +7 -0
  33. package/package.json +1 -1
  34. package/skills/ff-learning/SKILL.md +166 -689
  35. package/dist/learning/memory-get.d.ts +0 -24
  36. package/dist/learning/memory-get.js +0 -155
  37. package/dist/learning/memory-search.d.ts +0 -20
  38. package/dist/learning/memory-search.js +0 -193
  39. package/dist/learning/memory-store.d.ts +0 -20
  40. package/dist/learning/memory-store.js +0 -85
  41. package/dist/plugins/ff-learning-get-plugin.d.ts +0 -2
  42. package/dist/plugins/ff-learning-get-plugin.js +0 -55
  43. package/dist/plugins/ff-learning-search-plugin.d.ts +0 -2
  44. package/dist/plugins/ff-learning-search-plugin.js +0 -65
  45. package/dist/plugins/ff-learning-store-plugin.d.ts +0 -2
  46. package/dist/plugins/ff-learning-store-plugin.js +0 -70
@@ -0,0 +1,82 @@
1
+ /**
2
+ * Processed Log
3
+ *
4
+ * Tracks which messages have already been processed for memory extraction.
5
+ * Uses content-hash based idempotency: each processed entry stores a
6
+ * hash derived from the message content so re-extractions with identical
7
+ * content are skipped even if message IDs change.
8
+ *
9
+ * Stored as a JSON file at .feature-factory/local-recall/processed.json
10
+ */
11
+ import { createHash } from 'node:crypto';
12
+ import { readFile, writeFile, mkdir } from 'node:fs/promises';
13
+ import { join, dirname } from 'node:path';
14
+ function getLogPath(directory) {
15
+ return join(directory, '.feature-factory', 'local-recall', 'processed.json');
16
+ }
17
+ // ────────────────────────────────────────────────────────────
18
+ // Content hashing
19
+ // ────────────────────────────────────────────────────────────
20
+ /**
21
+ * Create a SHA-256 content hash from one or more text fragments.
22
+ * Used to de-duplicate extraction across re-processes / message edits.
23
+ */
24
+ export function contentHash(...fragments) {
25
+ const h = createHash('sha256');
26
+ for (const f of fragments) {
27
+ h.update(f);
28
+ }
29
+ return h.digest('hex');
30
+ }
31
+ // ────────────────────────────────────────────────────────────
32
+ // Log CRUD
33
+ // ────────────────────────────────────────────────────────────
34
+ /**
35
+ * Read all processed entries from the log.
36
+ */
37
+ export async function readProcessedLog(directory) {
38
+ try {
39
+ const raw = await readFile(getLogPath(directory), 'utf-8');
40
+ return JSON.parse(raw);
41
+ }
42
+ catch {
43
+ return [];
44
+ }
45
+ }
46
+ /**
47
+ * Check if a specific message has already been processed (by message ID).
48
+ */
49
+ export async function isProcessed(directory, messageID) {
50
+ const log = await readProcessedLog(directory);
51
+ return log.some((entry) => entry.messageID === messageID);
52
+ }
53
+ /**
54
+ * Check if a content hash has already been processed.
55
+ * This catches duplicate content even if message IDs differ.
56
+ */
57
+ export async function isContentProcessed(directory, hash) {
58
+ const log = await readProcessedLog(directory);
59
+ return log.some((entry) => entry.contentHash === hash);
60
+ }
61
+ /**
62
+ * Mark messages as processed by appending entries to the log.
63
+ */
64
+ export async function markProcessed(directory, entries) {
65
+ const logPath = getLogPath(directory);
66
+ await mkdir(dirname(logPath), { recursive: true });
67
+ const existing = await readProcessedLog(directory);
68
+ const merged = [...existing, ...entries];
69
+ await writeFile(logPath, JSON.stringify(merged, null, 2), 'utf-8');
70
+ }
71
+ /**
72
+ * Get the set of already-processed message IDs for fast lookup.
73
+ */
74
+ export function getProcessedMessageIDs(log) {
75
+ return new Set(log.map((e) => e.messageID));
76
+ }
77
+ /**
78
+ * Get the set of already-processed content hashes for fast lookup.
79
+ */
80
+ export function getProcessedHashes(log) {
81
+ return new Set(log.map((e) => e.contentHash));
82
+ }
@@ -0,0 +1,19 @@
1
+ /**
2
+ * session-extractor.ts — Extracts learnings from OpenCode session messages.
3
+ *
4
+ * Scans assistant text parts for reusable knowledge (patterns, decisions,
5
+ * debugging insights, preferences, context, procedures) and returns
6
+ * ExtractionResult[] ready for storage.
7
+ */
8
+ import type { ExtractionInput, ExtractionResult, OCPart } from './types.js';
9
+ /**
10
+ * Extract learnings from a set of text parts for a single message.
11
+ *
12
+ * Each qualifying text part that passes heuristic classification
13
+ * becomes one ExtractionResult.
14
+ */
15
+ export declare function extractFromParts(input: ExtractionInput, parts: OCPart[]): ExtractionResult[];
16
+ /**
17
+ * High-level: extract from a message by reading its parts from storage.
18
+ */
19
+ export declare function extractFromMessage(input: ExtractionInput): Promise<ExtractionResult[]>;
@@ -0,0 +1,172 @@
1
+ /**
2
+ * session-extractor.ts — Extracts learnings from OpenCode session messages.
3
+ *
4
+ * Scans assistant text parts for reusable knowledge (patterns, decisions,
5
+ * debugging insights, preferences, context, procedures) and returns
6
+ * ExtractionResult[] ready for storage.
7
+ */
8
+ import { listParts } from './storage-reader.js';
9
+ const CATEGORY_RULES = [
10
+ {
11
+ category: 'pattern',
12
+ weight: 0.9,
13
+ patterns: [
14
+ /\bpattern\b/i,
15
+ /\bbest practice\b/i,
16
+ /\bidiom(atic)?\b/i,
17
+ /\bconvention\b/i,
18
+ /\balways use\b/i,
19
+ /\bprefer\s+\w+\s+over\b/i,
20
+ ],
21
+ },
22
+ {
23
+ category: 'decision',
24
+ weight: 0.85,
25
+ patterns: [
26
+ /\bdecid(e|ed|ing)\b/i,
27
+ /\bchose\b/i,
28
+ /\btrade-?off\b/i,
29
+ /\bwent with\b/i,
30
+ /\binstead of\b/i,
31
+ /\bwe (should|chose|picked)\b/i,
32
+ ],
33
+ },
34
+ {
35
+ category: 'debugging',
36
+ weight: 0.8,
37
+ patterns: [
38
+ /\bbug\b/i,
39
+ /\bfix(ed|ing)?\b/i,
40
+ /\berror\b/i,
41
+ /\broot cause\b/i,
42
+ /\bwork-?around\b/i,
43
+ /\bregression\b/i,
44
+ /\bstack trace\b/i,
45
+ ],
46
+ },
47
+ {
48
+ category: 'preference',
49
+ weight: 0.7,
50
+ patterns: [
51
+ /\bprefer(ence|red|s)?\b/i,
52
+ /\bstyle\b/i,
53
+ /\bdon't like\b/i,
54
+ /\bfavor\b/i,
55
+ /\brather\b/i,
56
+ ],
57
+ },
58
+ {
59
+ category: 'procedure',
60
+ weight: 0.75,
61
+ patterns: [
62
+ /\bstep[- ]?by[- ]?step\b/i,
63
+ /\bworkflow\b/i,
64
+ /\bprocedure\b/i,
65
+ /\bhow to\b/i,
66
+ /\brecipe\b/i,
67
+ /\brun(ning)?\s+(the|this)?\s*command\b/i,
68
+ ],
69
+ },
70
+ {
71
+ category: 'context',
72
+ weight: 0.6,
73
+ patterns: [
74
+ /\barchitecture\b/i,
75
+ /\bdesign\b/i,
76
+ /\bconstraint\b/i,
77
+ /\brequirement\b/i,
78
+ /\binfrastructure\b/i,
79
+ /\bstack\b/i,
80
+ ],
81
+ },
82
+ ];
83
+ // ────────────────────────────────────────────────────────────
84
+ // Helpers
85
+ // ────────────────────────────────────────────────────────────
86
+ /** Score text against category rules and return best match (if any). */
87
+ function classifyText(text) {
88
+ let best = null;
89
+ for (const rule of CATEGORY_RULES) {
90
+ let hits = 0;
91
+ for (const pat of rule.patterns) {
92
+ if (pat.test(text))
93
+ hits++;
94
+ }
95
+ if (hits === 0)
96
+ continue;
97
+ const confidence = Math.min(1, (hits / rule.patterns.length) * rule.weight);
98
+ if (!best || confidence > best.confidence) {
99
+ best = { category: rule.category, confidence };
100
+ }
101
+ }
102
+ return best;
103
+ }
104
+ /** Derive a short title from the first meaningful sentence. */
105
+ function deriveTitle(text) {
106
+ const firstLine = text.split(/\n/).find((l) => l.trim().length > 10) ?? text;
107
+ const trimmed = firstLine.replace(/^#+\s*/, '').trim();
108
+ return trimmed.length > 120 ? trimmed.slice(0, 117) + '...' : trimmed;
109
+ }
110
+ /** Extract simple tags from text. */
111
+ function deriveTags(text) {
112
+ const tags = new Set();
113
+ // Language mentions
114
+ const langs = text.match(/\b(TypeScript|JavaScript|Python|Rust|Go|Java|C\+\+|Ruby|Swift|Kotlin)\b/gi);
115
+ if (langs)
116
+ langs.forEach((l) => tags.add(l.toLowerCase()));
117
+ // Framework mentions
118
+ const frameworks = text.match(/\b(React|Next\.?js|Vue|Angular|Express|Django|Flask|Spring|Rails)\b/gi);
119
+ if (frameworks)
120
+ frameworks.forEach((f) => tags.add(f.toLowerCase()));
121
+ // Tool mentions
122
+ const tools = text.match(/\b(Docker|Kubernetes|Terraform|AWS|GCP|Azure|GitHub|GitLab)\b/gi);
123
+ if (tools)
124
+ tools.forEach((t) => tags.add(t.toLowerCase()));
125
+ return [...tags].slice(0, 10);
126
+ }
127
+ // ────────────────────────────────────────────────────────────
128
+ // Minimum thresholds
129
+ // ────────────────────────────────────────────────────────────
130
+ /** Minimum text length to consider for extraction. */
131
+ const MIN_TEXT_LENGTH = 80;
132
+ /** Minimum confidence to accept a classification. */
133
+ const MIN_CONFIDENCE = 0.15;
134
+ // ────────────────────────────────────────────────────────────
135
+ // Public API
136
+ // ────────────────────────────────────────────────────────────
137
+ /**
138
+ * Extract learnings from a set of text parts for a single message.
139
+ *
140
+ * Each qualifying text part that passes heuristic classification
141
+ * becomes one ExtractionResult.
142
+ */
143
+ export function extractFromParts(input, parts) {
144
+ const results = [];
145
+ for (const part of parts) {
146
+ if (part.type !== 'text' || !part.text)
147
+ continue;
148
+ if (part.text.length < MIN_TEXT_LENGTH)
149
+ continue;
150
+ const classification = classifyText(part.text);
151
+ if (!classification || classification.confidence < MIN_CONFIDENCE)
152
+ continue;
153
+ results.push({
154
+ sessionID: input.sessionID,
155
+ messageID: input.messageID,
156
+ category: classification.category,
157
+ title: deriveTitle(part.text),
158
+ body: part.text,
159
+ tags: deriveTags(part.text),
160
+ importance: classification.confidence,
161
+ source: 'session',
162
+ });
163
+ }
164
+ return results;
165
+ }
166
+ /**
167
+ * High-level: extract from a message by reading its parts from storage.
168
+ */
169
+ export async function extractFromMessage(input) {
170
+ const parts = await listParts(input.messageID);
171
+ return extractFromParts(input, parts);
172
+ }
@@ -0,0 +1,40 @@
1
+ /**
2
+ * OpenCode Storage Readers
3
+ *
4
+ * Reads OpenCode's native JSON storage from ~/.local/share/opencode/storage/
5
+ * with adapters for project, session, message, and part entities.
6
+ */
7
+ import type { OCProject, OCSession, OCMessage, OCPart } from './types.js';
8
+ /**
9
+ * Find the project record whose worktree matches `directory`.
10
+ * The project hash is a SHA-1 of the worktree path.
11
+ */
12
+ export declare function findProject(directory: string): Promise<OCProject | null>;
13
+ /**
14
+ * List all known projects.
15
+ */
16
+ export declare function listProjects(): Promise<OCProject[]>;
17
+ /**
18
+ * List sessions for a given project hash.
19
+ */
20
+ export declare function listSessions(projectID: string): Promise<OCSession[]>;
21
+ /**
22
+ * Get a single session by its ID within a project.
23
+ */
24
+ export declare function getSession(projectID: string, sessionID: string): Promise<OCSession | null>;
25
+ /**
26
+ * List all messages in a session, sorted by creation time.
27
+ */
28
+ export declare function listMessages(sessionID: string): Promise<OCMessage[]>;
29
+ /**
30
+ * Get a single message by ID.
31
+ */
32
+ export declare function getMessage(sessionID: string, messageID: string): Promise<OCMessage | null>;
33
+ /**
34
+ * List all parts for a given message, sorted by start time.
35
+ */
36
+ export declare function listParts(messageID: string): Promise<OCPart[]>;
37
+ /**
38
+ * Get a single part by ID.
39
+ */
40
+ export declare function getPart(messageID: string, partID: string): Promise<OCPart | null>;
@@ -0,0 +1,147 @@
1
+ /**
2
+ * OpenCode Storage Readers
3
+ *
4
+ * Reads OpenCode's native JSON storage from ~/.local/share/opencode/storage/
5
+ * with adapters for project, session, message, and part entities.
6
+ */
7
+ import { readFile, readdir, stat, realpath } from 'fs/promises';
8
+ import { join } from 'path';
9
+ import { homedir } from 'os';
10
+ // ── Storage base path ───────────────────────────────────────────
11
+ function getStorageBase() {
12
+ return join(homedir(), '.local', 'share', 'opencode', 'storage');
13
+ }
14
+ // ── Helpers ─────────────────────────────────────────────────────
15
+ async function readJsonFile(filePath) {
16
+ const raw = await readFile(filePath, 'utf-8');
17
+ return JSON.parse(raw);
18
+ }
19
+ async function readAllJsonInDir(dirPath) {
20
+ try {
21
+ const entries = await readdir(dirPath);
22
+ const jsonFiles = entries.filter((e) => e.endsWith('.json'));
23
+ const results = [];
24
+ for (const file of jsonFiles) {
25
+ try {
26
+ const data = await readJsonFile(join(dirPath, file));
27
+ results.push(data);
28
+ }
29
+ catch {
30
+ // Skip malformed files
31
+ }
32
+ }
33
+ return results;
34
+ }
35
+ catch {
36
+ return [];
37
+ }
38
+ }
39
+ async function dirExists(dirPath) {
40
+ try {
41
+ const s = await stat(dirPath);
42
+ return s.isDirectory();
43
+ }
44
+ catch {
45
+ return false;
46
+ }
47
+ }
48
+ // ── Project Reader ──────────────────────────────────────────────
49
+ /**
50
+ * Find the project record whose worktree matches `directory`.
51
+ * The project hash is a SHA-1 of the worktree path.
52
+ */
53
+ export async function findProject(directory) {
54
+ const projectDir = join(getStorageBase(), 'project');
55
+ const projects = await readAllJsonInDir(projectDir);
56
+ // Normalise both sides so symlinks / trailing slashes don't cause mismatches
57
+ let normalisedInput;
58
+ try {
59
+ normalisedInput = await realpath(directory);
60
+ }
61
+ catch {
62
+ normalisedInput = directory; // fallback when directory doesn't exist yet
63
+ }
64
+ for (const p of projects) {
65
+ let normalisedWorktree;
66
+ try {
67
+ normalisedWorktree = await realpath(p.worktree);
68
+ }
69
+ catch {
70
+ normalisedWorktree = p.worktree;
71
+ }
72
+ if (normalisedWorktree === normalisedInput || p.worktree === directory) {
73
+ return p;
74
+ }
75
+ }
76
+ return null;
77
+ }
78
+ /**
79
+ * List all known projects.
80
+ */
81
+ export async function listProjects() {
82
+ return readAllJsonInDir(join(getStorageBase(), 'project'));
83
+ }
84
+ // ── Session Reader ──────────────────────────────────────────────
85
+ /**
86
+ * List sessions for a given project hash.
87
+ */
88
+ export async function listSessions(projectID) {
89
+ const sessionDir = join(getStorageBase(), 'session', projectID);
90
+ return readAllJsonInDir(sessionDir);
91
+ }
92
+ /**
93
+ * Get a single session by its ID within a project.
94
+ */
95
+ export async function getSession(projectID, sessionID) {
96
+ const sessionDir = join(getStorageBase(), 'session', projectID);
97
+ try {
98
+ return await readJsonFile(join(sessionDir, `${sessionID}.json`));
99
+ }
100
+ catch {
101
+ return null;
102
+ }
103
+ }
104
+ // ── Message Reader ──────────────────────────────────────────────
105
+ /**
106
+ * List all messages in a session, sorted by creation time.
107
+ */
108
+ export async function listMessages(sessionID) {
109
+ const msgDir = join(getStorageBase(), 'message', sessionID);
110
+ const messages = await readAllJsonInDir(msgDir);
111
+ return messages.sort((a, b) => a.time.created - b.time.created);
112
+ }
113
+ /**
114
+ * Get a single message by ID.
115
+ */
116
+ export async function getMessage(sessionID, messageID) {
117
+ const msgDir = join(getStorageBase(), 'message', sessionID);
118
+ try {
119
+ return await readJsonFile(join(msgDir, `${messageID}.json`));
120
+ }
121
+ catch {
122
+ return null;
123
+ }
124
+ }
125
+ // ── Part Reader ─────────────────────────────────────────────────
126
+ /**
127
+ * List all parts for a given message, sorted by start time.
128
+ */
129
+ export async function listParts(messageID) {
130
+ const partDir = join(getStorageBase(), 'part', messageID);
131
+ if (!(await dirExists(partDir)))
132
+ return [];
133
+ const parts = await readAllJsonInDir(partDir);
134
+ return parts.sort((a, b) => a.time.start - b.time.start);
135
+ }
136
+ /**
137
+ * Get a single part by ID.
138
+ */
139
+ export async function getPart(messageID, partID) {
140
+ const partDir = join(getStorageBase(), 'part', messageID);
141
+ try {
142
+ return await readJsonFile(join(partDir, `${partID}.json`));
143
+ }
144
+ catch {
145
+ return null;
146
+ }
147
+ }
@@ -0,0 +1,16 @@
1
+ /**
2
+ * thinking-extractor.ts — Extracts learnings from "thinking" / reasoning blocks.
3
+ *
4
+ * OpenCode stores extended-thinking or chain-of-thought content in parts
5
+ * with type "reasoning". These often contain high-signal insights about
6
+ * decision making and problem solving that are worth capturing.
7
+ */
8
+ import type { ExtractionInput, ExtractionResult, OCPart } from './types.js';
9
+ /**
10
+ * Extract learnings specifically from reasoning / thinking parts.
11
+ */
12
+ export declare function extractFromThinkingParts(input: ExtractionInput, parts: OCPart[]): ExtractionResult[];
13
+ /**
14
+ * High-level: extract thinking-based learnings from a message.
15
+ */
16
+ export declare function extractThinkingFromMessage(input: ExtractionInput): Promise<ExtractionResult[]>;
@@ -0,0 +1,132 @@
1
+ /**
2
+ * thinking-extractor.ts — Extracts learnings from "thinking" / reasoning blocks.
3
+ *
4
+ * OpenCode stores extended-thinking or chain-of-thought content in parts
5
+ * with type "reasoning". These often contain high-signal insights about
6
+ * decision making and problem solving that are worth capturing.
7
+ */
8
+ import { listParts } from './storage-reader.js';
9
+ const THINKING_SIGNALS = [
10
+ {
11
+ category: 'decision',
12
+ weight: 0.9,
13
+ patterns: [
14
+ /\blet me think about\b/i,
15
+ /\bthe (better|right|correct) approach\b/i,
16
+ /\bI('ll| will) go with\b/i,
17
+ /\bweighing (the )?(options|trade-?offs)\b/i,
18
+ /\boption [A-D1-4] (is|seems|looks)\b/i,
19
+ /\bpros and cons\b/i,
20
+ ],
21
+ },
22
+ {
23
+ category: 'debugging',
24
+ weight: 0.85,
25
+ patterns: [
26
+ /\bthe (issue|problem|bug) (is|was|seems)\b/i,
27
+ /\broot cause\b/i,
28
+ /\bthis (fails|breaks|errors) because\b/i,
29
+ /\bI (notice|see|found) (that |the )?(error|issue|bug)\b/i,
30
+ /\bstack trace (shows|indicates|reveals)\b/i,
31
+ ],
32
+ },
33
+ {
34
+ category: 'pattern',
35
+ weight: 0.8,
36
+ patterns: [
37
+ /\bthe (standard|common|typical) (pattern|approach|way)\b/i,
38
+ /\bbest practice\b/i,
39
+ /\bfollow(ing)? the (same |existing )?(pattern|convention)\b/i,
40
+ /\bthis is (a |the )?(standard|idiomatic)\b/i,
41
+ ],
42
+ },
43
+ {
44
+ category: 'context',
45
+ weight: 0.7,
46
+ patterns: [
47
+ /\blooking at the (code|codebase|architecture|structure)\b/i,
48
+ /\bthe (project|repo|codebase) (uses|follows|has)\b/i,
49
+ /\bbased on (the |my )?understanding\b/i,
50
+ /\bthe (current|existing) (setup|config|architecture)\b/i,
51
+ ],
52
+ },
53
+ {
54
+ category: 'procedure',
55
+ weight: 0.75,
56
+ patterns: [
57
+ /\bfirst[\s\S]+then[\s\S]+finally\b/i,
58
+ /\bstep \d/i,
59
+ /\bthe (process|procedure|workflow) (is|involves)\b/i,
60
+ /\bI need to:?\s*\n/i,
61
+ ],
62
+ },
63
+ ];
64
+ // ────────────────────────────────────────────────────────────
65
+ // Analysis
66
+ // ────────────────────────────────────────────────────────────
67
+ const MIN_THINKING_LENGTH = 120;
68
+ const MIN_THINKING_CONFIDENCE = 0.2;
69
+ function classifyThinking(text) {
70
+ let best = null;
71
+ for (const signal of THINKING_SIGNALS) {
72
+ let hits = 0;
73
+ for (const pat of signal.patterns) {
74
+ if (pat.test(text))
75
+ hits++;
76
+ }
77
+ if (hits === 0)
78
+ continue;
79
+ const confidence = Math.min(1, (hits / signal.patterns.length) * signal.weight);
80
+ if (!best || confidence > best.confidence) {
81
+ best = { category: signal.category, confidence };
82
+ }
83
+ }
84
+ return best;
85
+ }
86
+ function deriveThinkingTitle(text) {
87
+ // Attempt to find a summary-like sentence
88
+ const sentences = text.split(/[.!?]\s+/).filter((s) => s.trim().length > 15);
89
+ const summary = sentences.find((s) => /\b(so|therefore|conclusion|result|decided|approach)\b/i.test(s)) ??
90
+ sentences[0];
91
+ if (!summary)
92
+ return 'Thinking insight';
93
+ const clean = summary.replace(/^[\s*#-]+/, '').trim();
94
+ return clean.length > 120 ? clean.slice(0, 117) + '...' : clean;
95
+ }
96
+ // ────────────────────────────────────────────────────────────
97
+ // Public API
98
+ // ────────────────────────────────────────────────────────────
99
+ /**
100
+ * Extract learnings specifically from reasoning / thinking parts.
101
+ */
102
+ export function extractFromThinkingParts(input, parts) {
103
+ const results = [];
104
+ for (const part of parts) {
105
+ // Accept "reasoning" type parts (extended thinking)
106
+ if (part.type !== 'reasoning' || !part.text)
107
+ continue;
108
+ if (part.text.length < MIN_THINKING_LENGTH)
109
+ continue;
110
+ const classification = classifyThinking(part.text);
111
+ if (!classification || classification.confidence < MIN_THINKING_CONFIDENCE)
112
+ continue;
113
+ results.push({
114
+ sessionID: input.sessionID,
115
+ messageID: input.messageID,
116
+ category: classification.category,
117
+ title: deriveThinkingTitle(part.text),
118
+ body: part.text,
119
+ tags: ['thinking', 'reasoning'],
120
+ importance: Math.min(1, classification.confidence * 1.1), // slight boost for thinking
121
+ source: 'thinking',
122
+ });
123
+ }
124
+ return results;
125
+ }
126
+ /**
127
+ * High-level: extract thinking-based learnings from a message.
128
+ */
129
+ export async function extractThinkingFromMessage(input) {
130
+ const parts = await listParts(input.messageID);
131
+ return extractFromThinkingParts(input, parts);
132
+ }