@iflow-mcp/multimail-dev-thinking-mcp 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +106 -0
- package/dist/bootstrap.d.ts +2 -0
- package/dist/bootstrap.js +89 -0
- package/dist/db.d.ts +9 -0
- package/dist/db.js +190 -0
- package/dist/embed.d.ts +13 -0
- package/dist/embed.js +127 -0
- package/dist/extract.d.ts +2 -0
- package/dist/extract.js +58 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +63 -0
- package/dist/tools.d.ts +16 -0
- package/dist/tools.js +432 -0
- package/dist/types.d.ts +21 -0
- package/dist/types.js +18 -0
- package/package.json +1 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 MultiMail
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
# thinking-mcp
|
|
2
|
+
|
|
3
|
+
MCP server that models how you think. Not what you know.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```json
|
|
8
|
+
{
|
|
9
|
+
"mcpServers": {
|
|
10
|
+
"thinking": {
|
|
11
|
+
"command": "npx",
|
|
12
|
+
"args": ["-y", "@multimail/thinking-mcp"],
|
|
13
|
+
"env": {
|
|
14
|
+
"ANTHROPIC_API_KEY": "sk-ant-...",
|
|
15
|
+
"VOYAGE_API_KEY": "pa-..."
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
Works with Claude Code, Claude Desktop, Cursor, Windsurf, and any MCP client.
|
|
23
|
+
|
|
24
|
+
## What this is
|
|
25
|
+
|
|
26
|
+
Every conversation you have contains cognitive patterns you don't notice. Decision rules you apply without naming them. Tensions between things you believe. Assumptions you've never tested.
|
|
27
|
+
|
|
28
|
+
This server extracts those patterns, stores them in a typed graph, and lets any AI agent query them over MCP. The agent can find contradictions in your thinking, bridge connections between domains you never linked, or predict how you'd approach a decision you haven't faced yet.
|
|
29
|
+
|
|
30
|
+
It is not a memory system. Memory stores what you said. This models how you decide.
|
|
31
|
+
|
|
32
|
+
## First run
|
|
33
|
+
|
|
34
|
+
On first use, the server walks you through a bootstrap conversation. It starts with a real decision you made recently and works backward to understand your heuristics, mental models, and assumptions. Takes about 20 minutes. After that, the extraction pipeline runs against your conversations to build the graph continuously.
|
|
35
|
+
|
|
36
|
+
The bootstrap tool returns questions for your agent to ask you. You answer naturally. Each answer gets classified and stored as typed nodes in the graph.
|
|
37
|
+
|
|
38
|
+
## How it works
|
|
39
|
+
|
|
40
|
+
When you capture text (or an agent does on your behalf), an LLM classifies each statement against an ordered checklist. Is this a decision rule? A framework for thinking? A tension between beliefs? A preference? The checklist forces specific types before falling through to the generic "idea" bucket.
|
|
41
|
+
|
|
42
|
+
Each node gets:
|
|
43
|
+
- A type (heuristic, mental_model, tension, value, assumption, preference, question, project, idea)
|
|
44
|
+
- An epistemic tag (assertion, hypothesis, speculation)
|
|
45
|
+
- A confidence score (strong, tentative, uncertain)
|
|
46
|
+
- An activation level that decays over time
|
|
47
|
+
|
|
48
|
+
Typed edges connect nodes: supports, contradicts, evolved_into, depends_on. This makes it a real graph, not a flat embedding store. Agents can traverse relationships, not just retrieve by similarity.
|
|
49
|
+
|
|
50
|
+
Activation decay means the graph stays current. Values decay slowly. Ideas decay fast. If you haven't revisited or reinforced a pattern in weeks, it fades. Validated heuristics stay hot.
|
|
51
|
+
|
|
52
|
+
Scoring uses Reciprocal Rank Fusion across vector similarity, keyword matching, and activation. Hub dampening prevents well-connected nodes from dominating every query.
|
|
53
|
+
|
|
54
|
+
## The extraction problem
|
|
55
|
+
|
|
56
|
+
You can tell an agent to "capture that thought" mid-conversation. It won't. Not reliably. Agents follow the task at hand and forget side quests.
|
|
57
|
+
|
|
58
|
+
The real path is a backend pipeline that processes your conversation transcripts on a schedule. Your conversations already contain the patterns. You just need something reading them after the fact.
|
|
59
|
+
|
|
60
|
+
The capture tool supports both modes. Pass a nodeType and it stores directly. Omit nodeType and it runs inline extraction to classify the text into properly typed nodes.
|
|
61
|
+
|
|
62
|
+
## Tools
|
|
63
|
+
|
|
64
|
+
| Tool | What it does | Side effects |
|
|
65
|
+
|------|-------------|-------------|
|
|
66
|
+
| `what_do_i_think` | Query your thinking on a topic | READ-ONLY (bumps activation) |
|
|
67
|
+
| `what_connects` | Find bridges between two domains | READ-ONLY |
|
|
68
|
+
| `what_tensions_exist` | Surface contradictions and weak spots | READ-ONLY |
|
|
69
|
+
| `where_am_i_uncertain` | Find low-confidence or untested patterns | READ-ONLY |
|
|
70
|
+
| `suggest_exploration` | Forgotten patterns near your current context | READ-ONLY |
|
|
71
|
+
| `how_would_user_decide` | Reconstruct reasoning for a new decision | READ-ONLY |
|
|
72
|
+
| `what_has_changed` | Timeline of how your thinking evolved | READ-ONLY |
|
|
73
|
+
| `capture` | Add a thought (with or without inline extraction) | WRITES |
|
|
74
|
+
| `correct` | Fix a node. Strongest learning signal. | WRITES |
|
|
75
|
+
| `record_outcome` | Track what happened after a decision | WRITES |
|
|
76
|
+
| `get_node` | Inspect a node with all its data | READ-ONLY |
|
|
77
|
+
| `get_neighbors` | 1-hop graph traversal from a node | READ-ONLY |
|
|
78
|
+
| `search_nodes` | Keyword search with optional type filter | READ-ONLY |
|
|
79
|
+
| `merge_nodes` | Combine duplicate nodes | DESTRUCTIVE |
|
|
80
|
+
| `archive_node` | Drop a node from results without deleting | WRITES |
|
|
81
|
+
| `bootstrap` | Guided first-run Q&A (5 phases) | WRITES |
|
|
82
|
+
|
|
83
|
+
## Configuration
|
|
84
|
+
|
|
85
|
+
| Variable | Required | Default | Description |
|
|
86
|
+
|----------|----------|---------|-------------|
|
|
87
|
+
| `ANTHROPIC_API_KEY` | Yes | | Claude Haiku for inline extraction |
|
|
88
|
+
| `VOYAGE_API_KEY` | Yes | | Voyage AI for embeddings |
|
|
89
|
+
| `THINKING_MCP_DB_PATH` | No | `~/.thinking-mcp/mind.db` | SQLite database location |
|
|
90
|
+
| `THINKING_MCP_EMBEDDING_PROVIDER` | No | `voyage` | `voyage`, `openai`, or `ollama` |
|
|
91
|
+
|
|
92
|
+
## What leaves your machine
|
|
93
|
+
|
|
94
|
+
Embedding text goes to Voyage AI (or your configured provider). Extraction text goes to Anthropic when capture is called without a nodeType. Everything else stays local in SQLite. No telemetry. No analytics.
|
|
95
|
+
|
|
96
|
+
## Limitations
|
|
97
|
+
|
|
98
|
+
The vector search is brute-force cosine over all stored embeddings. Fine for personal use up to maybe 100K nodes. Past that you would need a real vector index.
|
|
99
|
+
|
|
100
|
+
Extraction can hallucinate patterns from weak evidence. The prompt caps at 8 patterns per input and requires explicit evidence for "strong" confidence, but it is still an LLM reading your words and guessing what you meant.
|
|
101
|
+
|
|
102
|
+
The graph is only as good as the conversations you have. If you only talk about code, it will only model how you think about code.
|
|
103
|
+
|
|
104
|
+
---
|
|
105
|
+
|
|
106
|
+
Built by [multimail.dev](https://multimail.dev)
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import { db, persistDb } from "./db.js";
|
|
2
|
+
const PHASES = [
|
|
3
|
+
{
|
|
4
|
+
name: "decisions",
|
|
5
|
+
questions: [
|
|
6
|
+
"Tell me about a decision you made in the last two weeks.",
|
|
7
|
+
"What options did you reject?",
|
|
8
|
+
"What tradeoff mattered most?",
|
|
9
|
+
"What rule did you apply, even if you didn't name it at the time?",
|
|
10
|
+
],
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
name: "heuristics",
|
|
14
|
+
questions: [
|
|
15
|
+
"When you're stuck on a problem, what's your first move?",
|
|
16
|
+
"What decision rule do you apply that others might not?",
|
|
17
|
+
"How do you decide what NOT to work on?",
|
|
18
|
+
],
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
name: "mental_models",
|
|
22
|
+
questions: [
|
|
23
|
+
"What framework do you use repeatedly across different domains?",
|
|
24
|
+
"How do you evaluate whether an idea is worth pursuing?",
|
|
25
|
+
],
|
|
26
|
+
},
|
|
27
|
+
{
|
|
28
|
+
name: "tensions",
|
|
29
|
+
questions: [
|
|
30
|
+
"Where do two things you believe pull in opposite directions?",
|
|
31
|
+
"What tradeoff do you keep revisiting without a clear answer?",
|
|
32
|
+
],
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
name: "assumptions",
|
|
36
|
+
questions: [
|
|
37
|
+
"What do you assume is true that you haven't tested?",
|
|
38
|
+
"What would change your mind about something you hold strongly?",
|
|
39
|
+
],
|
|
40
|
+
},
|
|
41
|
+
];
|
|
42
|
+
export function toolBootstrap(action) {
|
|
43
|
+
const phaseRow = db.exec("SELECT value FROM meta WHERE key = 'bootstrap_phase'");
|
|
44
|
+
const currentPhase = phaseRow.length > 0 && phaseRow[0].values.length > 0 ? parseInt(phaseRow[0].values[0][0]) : 0;
|
|
45
|
+
const completeRow = db.exec("SELECT value FROM meta WHERE key = 'bootstrap_complete'");
|
|
46
|
+
const isComplete = completeRow.length > 0 && completeRow[0].values[0]?.[0] === "true";
|
|
47
|
+
if (action === "status" || isComplete) {
|
|
48
|
+
const nodeCount = db.exec("SELECT COUNT(*) FROM nodes");
|
|
49
|
+
const count = nodeCount[0]?.values[0]?.[0] || 0;
|
|
50
|
+
return JSON.stringify({
|
|
51
|
+
complete: isComplete,
|
|
52
|
+
phase: currentPhase,
|
|
53
|
+
total_phases: PHASES.length,
|
|
54
|
+
node_count: count,
|
|
55
|
+
message: isComplete
|
|
56
|
+
? `Bootstrap complete. ${count} nodes in the graph. Use capture to keep adding patterns from conversations.`
|
|
57
|
+
: `Bootstrap in progress. Phase ${currentPhase + 1}/${PHASES.length}: ${PHASES[currentPhase]?.name || "done"}.`,
|
|
58
|
+
}, null, 2);
|
|
59
|
+
}
|
|
60
|
+
if (currentPhase >= PHASES.length) {
|
|
61
|
+
db.run("INSERT OR REPLACE INTO meta (key, value) VALUES ('bootstrap_complete', 'true')");
|
|
62
|
+
persistDb();
|
|
63
|
+
return JSON.stringify({ complete: true, message: "All phases done. Bootstrap complete." });
|
|
64
|
+
}
|
|
65
|
+
const phase = PHASES[currentPhase];
|
|
66
|
+
return JSON.stringify({
|
|
67
|
+
complete: false,
|
|
68
|
+
phase: currentPhase + 1,
|
|
69
|
+
phase_name: phase.name,
|
|
70
|
+
total_phases: PHASES.length,
|
|
71
|
+
questions: phase.questions,
|
|
72
|
+
instructions: "Ask the user these questions one at a time. Feed each answer back through the capture tool (without nodeType, so extraction runs automatically). When done with all questions, call bootstrap again to advance to the next phase.",
|
|
73
|
+
advance: `After capturing answers, call bootstrap with no arguments to move to phase ${currentPhase + 2}.`,
|
|
74
|
+
}, null, 2);
|
|
75
|
+
}
|
|
76
|
+
export function advanceBootstrap() {
|
|
77
|
+
const phaseRow = db.exec("SELECT value FROM meta WHERE key = 'bootstrap_phase'");
|
|
78
|
+
const currentPhase = phaseRow.length > 0 && phaseRow[0].values.length > 0 ? parseInt(phaseRow[0].values[0][0]) : 0;
|
|
79
|
+
const nextPhase = currentPhase + 1;
|
|
80
|
+
if (nextPhase >= PHASES.length) {
|
|
81
|
+
db.run("INSERT OR REPLACE INTO meta (key, value) VALUES ('bootstrap_phase', ?)", [String(nextPhase)]);
|
|
82
|
+
db.run("INSERT OR REPLACE INTO meta (key, value) VALUES ('bootstrap_complete', 'true')");
|
|
83
|
+
persistDb();
|
|
84
|
+
return JSON.stringify({ complete: true, message: "Bootstrap complete. All phases done." });
|
|
85
|
+
}
|
|
86
|
+
db.run("INSERT OR REPLACE INTO meta (key, value) VALUES ('bootstrap_phase', ?)", [String(nextPhase)]);
|
|
87
|
+
persistDb();
|
|
88
|
+
return toolBootstrap();
|
|
89
|
+
}
|
package/dist/db.d.ts
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
export type Database = any;
|
|
2
|
+
export declare let db: Database;
|
|
3
|
+
export declare function initDb(): Promise<Database>;
|
|
4
|
+
export declare function persistDb(): void;
|
|
5
|
+
export declare function uuid(): string;
|
|
6
|
+
export declare function now(): string;
|
|
7
|
+
export declare function getNode(nodeId: string): Record<string, unknown> | null;
|
|
8
|
+
export declare function queryNodes(sql: string): Record<string, unknown>[];
|
|
9
|
+
export declare function bumpActivation(nodeId: string): void;
|
package/dist/db.js
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
// @ts-ignore sql.js has no type declarations
|
|
2
|
+
import initSqlJs from "sql.js";
|
|
3
|
+
import * as fs from "fs";
|
|
4
|
+
import * as path from "path";
|
|
5
|
+
import * as yaml from "js-yaml";
|
|
6
|
+
import { DB_PATH } from "./types.js";
|
|
7
|
+
import { fileURLToPath } from "url";
|
|
8
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
9
|
+
const __dirname = path.dirname(__filename);
|
|
10
|
+
const SCHEMA = `
|
|
11
|
+
CREATE TABLE IF NOT EXISTS chunks (
|
|
12
|
+
id TEXT PRIMARY KEY,
|
|
13
|
+
text TEXT NOT NULL,
|
|
14
|
+
source_type TEXT NOT NULL DEFAULT 'capture'
|
|
15
|
+
CHECK (source_type IN ('capture', 'transcript', 'bootstrap')),
|
|
16
|
+
epistemic_tag TEXT NOT NULL DEFAULT 'assertion'
|
|
17
|
+
CHECK (epistemic_tag IN ('assertion', 'hypothesis', 'speculation', 'quoting', 'rejected')),
|
|
18
|
+
confidence TEXT NOT NULL DEFAULT 'tentative'
|
|
19
|
+
CHECK (confidence IN ('strong', 'tentative', 'uncertain')),
|
|
20
|
+
superseded_by TEXT REFERENCES chunks(id),
|
|
21
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now'))
|
|
22
|
+
);
|
|
23
|
+
|
|
24
|
+
CREATE TABLE IF NOT EXISTS nodes (
|
|
25
|
+
id TEXT PRIMARY KEY,
|
|
26
|
+
type TEXT NOT NULL CHECK (type IN (
|
|
27
|
+
'idea', 'question', 'project',
|
|
28
|
+
'heuristic', 'value', 'mental_model', 'assumption', 'tension', 'preference'
|
|
29
|
+
)),
|
|
30
|
+
summary TEXT NOT NULL,
|
|
31
|
+
confidence TEXT NOT NULL DEFAULT 'tentative'
|
|
32
|
+
CHECK (confidence IN ('strong', 'tentative', 'uncertain')),
|
|
33
|
+
activation REAL NOT NULL DEFAULT 1.0,
|
|
34
|
+
first_seen TEXT NOT NULL DEFAULT (datetime('now')),
|
|
35
|
+
last_accessed TEXT NOT NULL DEFAULT (datetime('now')),
|
|
36
|
+
outcome_score REAL,
|
|
37
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now'))
|
|
38
|
+
);
|
|
39
|
+
|
|
40
|
+
CREATE TABLE IF NOT EXISTS node_chunks (
|
|
41
|
+
node_id TEXT NOT NULL REFERENCES nodes(id),
|
|
42
|
+
chunk_id TEXT NOT NULL REFERENCES chunks(id),
|
|
43
|
+
PRIMARY KEY (node_id, chunk_id)
|
|
44
|
+
);
|
|
45
|
+
|
|
46
|
+
CREATE TABLE IF NOT EXISTS edges (
|
|
47
|
+
id TEXT PRIMARY KEY,
|
|
48
|
+
type TEXT NOT NULL CHECK (type IN (
|
|
49
|
+
'supports', 'contradicts', 'evolved_into', 'inspired_by',
|
|
50
|
+
'depends_on', 'overrides', 'learned_from', 'scoped_by',
|
|
51
|
+
'rejected', 'belongs_to', 'derived_from'
|
|
52
|
+
)),
|
|
53
|
+
source_node_id TEXT NOT NULL REFERENCES nodes(id),
|
|
54
|
+
target_node_id TEXT NOT NULL REFERENCES nodes(id),
|
|
55
|
+
weight REAL NOT NULL DEFAULT 1.0,
|
|
56
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now'))
|
|
57
|
+
);
|
|
58
|
+
|
|
59
|
+
CREATE TABLE IF NOT EXISTS outcomes (
|
|
60
|
+
id TEXT PRIMARY KEY,
|
|
61
|
+
node_id TEXT NOT NULL REFERENCES nodes(id),
|
|
62
|
+
outcome TEXT NOT NULL,
|
|
63
|
+
score REAL NOT NULL,
|
|
64
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now'))
|
|
65
|
+
);
|
|
66
|
+
|
|
67
|
+
CREATE TABLE IF NOT EXISTS embeddings (
|
|
68
|
+
chunk_id TEXT PRIMARY KEY REFERENCES chunks(id),
|
|
69
|
+
vector TEXT NOT NULL,
|
|
70
|
+
model TEXT NOT NULL,
|
|
71
|
+
dims INTEGER NOT NULL
|
|
72
|
+
);
|
|
73
|
+
|
|
74
|
+
CREATE TABLE IF NOT EXISTS meta (
|
|
75
|
+
key TEXT PRIMARY KEY,
|
|
76
|
+
value TEXT NOT NULL
|
|
77
|
+
);
|
|
78
|
+
|
|
79
|
+
CREATE TABLE IF NOT EXISTS framing (
|
|
80
|
+
id TEXT PRIMARY KEY,
|
|
81
|
+
question TEXT NOT NULL,
|
|
82
|
+
rationale TEXT,
|
|
83
|
+
priority INTEGER NOT NULL DEFAULT 0,
|
|
84
|
+
version INTEGER NOT NULL DEFAULT 1,
|
|
85
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
86
|
+
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
|
|
87
|
+
);
|
|
88
|
+
|
|
89
|
+
CREATE TABLE IF NOT EXISTS traces (
|
|
90
|
+
id TEXT PRIMARY KEY,
|
|
91
|
+
tool_name TEXT NOT NULL,
|
|
92
|
+
query_text TEXT,
|
|
93
|
+
node_ids_touched TEXT,
|
|
94
|
+
agent_name TEXT,
|
|
95
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now'))
|
|
96
|
+
);
|
|
97
|
+
|
|
98
|
+
CREATE INDEX IF NOT EXISTS idx_nodes_type ON nodes(type);
|
|
99
|
+
CREATE INDEX IF NOT EXISTS idx_nodes_activation ON nodes(activation DESC);
|
|
100
|
+
CREATE INDEX IF NOT EXISTS idx_edges_source ON edges(source_node_id);
|
|
101
|
+
CREATE INDEX IF NOT EXISTS idx_edges_target ON edges(target_node_id);
|
|
102
|
+
CREATE INDEX IF NOT EXISTS idx_edges_type ON edges(type);
|
|
103
|
+
CREATE INDEX IF NOT EXISTS idx_outcomes_node ON outcomes(node_id);
|
|
104
|
+
CREATE INDEX IF NOT EXISTS idx_framing_priority ON framing(priority);
|
|
105
|
+
`;
|
|
106
|
+
function ensureDir(filePath) {
|
|
107
|
+
const dir = path.dirname(filePath);
|
|
108
|
+
if (!fs.existsSync(dir))
|
|
109
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
110
|
+
}
|
|
111
|
+
export let db;
|
|
112
|
+
export async function initDb() {
|
|
113
|
+
const SQL = await initSqlJs();
|
|
114
|
+
ensureDir(DB_PATH);
|
|
115
|
+
if (fs.existsSync(DB_PATH)) {
|
|
116
|
+
const buf = fs.readFileSync(DB_PATH);
|
|
117
|
+
db = new SQL.Database(buf);
|
|
118
|
+
}
|
|
119
|
+
else {
|
|
120
|
+
db = new SQL.Database();
|
|
121
|
+
}
|
|
122
|
+
db.run("PRAGMA foreign_keys = ON;");
|
|
123
|
+
db.run(SCHEMA);
|
|
124
|
+
seedDefaultFraming();
|
|
125
|
+
persistDb();
|
|
126
|
+
return db;
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Seeds framing directives from framing.yaml if the framing table is empty.
|
|
130
|
+
* framing.yaml ships with starter directives — users replace them with their own.
|
|
131
|
+
*/
|
|
132
|
+
function seedDefaultFraming() {
|
|
133
|
+
const existing = db.exec("SELECT COUNT(*) FROM framing");
|
|
134
|
+
const count = existing.length > 0 ? existing[0].values[0][0] : 0;
|
|
135
|
+
if (count > 0)
|
|
136
|
+
return;
|
|
137
|
+
const candidates = [
|
|
138
|
+
path.resolve(__dirname, "..", "framing.yaml"),
|
|
139
|
+
path.resolve(__dirname, "..", "..", "framing.yaml"),
|
|
140
|
+
];
|
|
141
|
+
const yamlPath = candidates.find((p) => fs.existsSync(p));
|
|
142
|
+
if (!yamlPath) {
|
|
143
|
+
console.warn("framing.yaml not found — use seed_framing to add directives.");
|
|
144
|
+
return;
|
|
145
|
+
}
|
|
146
|
+
const raw = fs.readFileSync(yamlPath, "utf-8");
|
|
147
|
+
const parsed = yaml.load(raw);
|
|
148
|
+
if (!parsed?.directives?.length)
|
|
149
|
+
return;
|
|
150
|
+
for (const d of parsed.directives) {
|
|
151
|
+
db.run("INSERT INTO framing (id, question, rationale, priority, version) VALUES (?, ?, ?, ?, ?)", [d.id, d.question, d.rationale ?? null, d.priority, d.version ?? 1]);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
export function persistDb() {
|
|
155
|
+
const data = db.export();
|
|
156
|
+
const buffer = Buffer.from(data);
|
|
157
|
+
const tmp = DB_PATH + ".tmp";
|
|
158
|
+
ensureDir(DB_PATH);
|
|
159
|
+
fs.writeFileSync(tmp, buffer);
|
|
160
|
+
fs.renameSync(tmp, DB_PATH);
|
|
161
|
+
}
|
|
162
|
+
export function uuid() {
|
|
163
|
+
return crypto.randomUUID();
|
|
164
|
+
}
|
|
165
|
+
export function now() {
|
|
166
|
+
return new Date().toISOString().replace("T", " ").slice(0, 19);
|
|
167
|
+
}
|
|
168
|
+
export function getNode(nodeId) {
|
|
169
|
+
const rows = db.exec(`SELECT * FROM nodes WHERE id = '${nodeId}'`);
|
|
170
|
+
if (rows.length === 0 || rows[0].values.length === 0)
|
|
171
|
+
return null;
|
|
172
|
+
const cols = rows[0].columns;
|
|
173
|
+
const vals = rows[0].values[0];
|
|
174
|
+
const obj = {};
|
|
175
|
+
cols.forEach((c, i) => { obj[c] = vals[i]; });
|
|
176
|
+
return obj;
|
|
177
|
+
}
|
|
178
|
+
export function queryNodes(sql) {
|
|
179
|
+
const rows = db.exec(sql);
|
|
180
|
+
if (rows.length === 0)
|
|
181
|
+
return [];
|
|
182
|
+
return rows[0].values.map((vals) => {
|
|
183
|
+
const obj = {};
|
|
184
|
+
rows[0].columns.forEach((c, i) => { obj[c] = vals[i]; });
|
|
185
|
+
return obj;
|
|
186
|
+
});
|
|
187
|
+
}
|
|
188
|
+
export function bumpActivation(nodeId) {
|
|
189
|
+
db.run(`UPDATE nodes SET activation = MIN(activation + 0.1, 1.0), last_accessed = '${now()}' WHERE id = '${nodeId}'`);
|
|
190
|
+
}
|
package/dist/embed.d.ts
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
export declare let embeddingModel: string;
|
|
2
|
+
export declare let embeddingDims: number;
|
|
3
|
+
export declare function embed(texts: string[]): Promise<number[][]>;
|
|
4
|
+
export declare function embedOne(text: string): Promise<number[] | null>;
|
|
5
|
+
export declare function validateDims(vector: number[]): boolean;
|
|
6
|
+
export declare function cosine(a: number[], b: number[]): number;
|
|
7
|
+
export declare function vectorSearch(queryVec: number[], topK?: number): {
|
|
8
|
+
chunkId: string;
|
|
9
|
+
score: number;
|
|
10
|
+
}[];
|
|
11
|
+
export declare function rrfFuse(rankings: Map<string, number>[], weights?: number[]): Map<string, number>;
|
|
12
|
+
export declare function decayedActivation(activation: number, lastAccessed: string, nodeType: string): number;
|
|
13
|
+
export declare function hubDampen(nodeId: string, score: number): number;
|
package/dist/embed.js
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
import { EMBEDDING_PROVIDER, VOYAGE_API_KEY, OPENAI_API_KEY, OLLAMA_URL } from "./types.js";
|
|
2
|
+
import { db } from "./db.js";
|
|
3
|
+
import { DECAY_RATES } from "./types.js";
|
|
4
|
+
export let embeddingModel = "";
|
|
5
|
+
export let embeddingDims = 0;
|
|
6
|
+
export async function embed(texts) {
|
|
7
|
+
if (EMBEDDING_PROVIDER === "voyage") {
|
|
8
|
+
if (!VOYAGE_API_KEY)
|
|
9
|
+
throw new Error("VOYAGE_API_KEY required for voyage embeddings");
|
|
10
|
+
const resp = await fetch("https://api.voyageai.com/v1/embeddings", {
|
|
11
|
+
method: "POST",
|
|
12
|
+
headers: { "Authorization": `Bearer ${VOYAGE_API_KEY}`, "Content-Type": "application/json" },
|
|
13
|
+
body: JSON.stringify({ model: "voyage-3-lite", input: texts, input_type: "document" }),
|
|
14
|
+
});
|
|
15
|
+
if (!resp.ok)
|
|
16
|
+
throw new Error(`Voyage API error: ${resp.status} ${await resp.text()}`);
|
|
17
|
+
const json = await resp.json();
|
|
18
|
+
const vectors = json.data.map(d => d.embedding);
|
|
19
|
+
if (vectors.length > 0) {
|
|
20
|
+
embeddingModel = "voyage-3-lite";
|
|
21
|
+
embeddingDims = vectors[0].length;
|
|
22
|
+
}
|
|
23
|
+
return vectors;
|
|
24
|
+
}
|
|
25
|
+
if (EMBEDDING_PROVIDER === "openai") {
|
|
26
|
+
if (!OPENAI_API_KEY)
|
|
27
|
+
throw new Error("OPENAI_API_KEY required for openai embeddings");
|
|
28
|
+
const resp = await fetch("https://api.openai.com/v1/embeddings", {
|
|
29
|
+
method: "POST",
|
|
30
|
+
headers: { "Authorization": `Bearer ${OPENAI_API_KEY}`, "Content-Type": "application/json" },
|
|
31
|
+
body: JSON.stringify({ model: "text-embedding-3-small", input: texts }),
|
|
32
|
+
});
|
|
33
|
+
if (!resp.ok)
|
|
34
|
+
throw new Error(`OpenAI API error: ${resp.status} ${await resp.text()}`);
|
|
35
|
+
const json = await resp.json();
|
|
36
|
+
const vectors = json.data.map(d => d.embedding);
|
|
37
|
+
if (vectors.length > 0) {
|
|
38
|
+
embeddingModel = "text-embedding-3-small";
|
|
39
|
+
embeddingDims = vectors[0].length;
|
|
40
|
+
}
|
|
41
|
+
return vectors;
|
|
42
|
+
}
|
|
43
|
+
if (EMBEDDING_PROVIDER === "ollama") {
|
|
44
|
+
const vectors = [];
|
|
45
|
+
for (const text of texts) {
|
|
46
|
+
const resp = await fetch(`${OLLAMA_URL}/api/embeddings`, {
|
|
47
|
+
method: "POST",
|
|
48
|
+
headers: { "Content-Type": "application/json" },
|
|
49
|
+
body: JSON.stringify({ model: "nomic-embed-text", prompt: text }),
|
|
50
|
+
});
|
|
51
|
+
if (!resp.ok)
|
|
52
|
+
throw new Error(`Ollama error: ${resp.status}`);
|
|
53
|
+
const json = await resp.json();
|
|
54
|
+
vectors.push(json.embedding);
|
|
55
|
+
}
|
|
56
|
+
if (vectors.length > 0) {
|
|
57
|
+
embeddingModel = "nomic-embed-text";
|
|
58
|
+
embeddingDims = vectors[0].length;
|
|
59
|
+
}
|
|
60
|
+
return vectors;
|
|
61
|
+
}
|
|
62
|
+
throw new Error(`Unknown embedding provider: ${EMBEDDING_PROVIDER}`);
|
|
63
|
+
}
|
|
64
|
+
export async function embedOne(text) {
|
|
65
|
+
try {
|
|
66
|
+
const result = await embed([text]);
|
|
67
|
+
return result[0] ?? null;
|
|
68
|
+
}
|
|
69
|
+
catch (e) {
|
|
70
|
+
console.error("Embedding error:", e);
|
|
71
|
+
return null;
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
export function validateDims(vector) {
|
|
75
|
+
if (embeddingDims === 0)
|
|
76
|
+
return true;
|
|
77
|
+
return vector.length === embeddingDims;
|
|
78
|
+
}
|
|
79
|
+
export function cosine(a, b) {
|
|
80
|
+
let dot = 0, magA = 0, magB = 0;
|
|
81
|
+
for (let i = 0; i < a.length; i++) {
|
|
82
|
+
dot += a[i] * b[i];
|
|
83
|
+
magA += a[i] * a[i];
|
|
84
|
+
magB += b[i] * b[i];
|
|
85
|
+
}
|
|
86
|
+
const denom = Math.sqrt(magA) * Math.sqrt(magB);
|
|
87
|
+
return denom === 0 ? 0 : dot / denom;
|
|
88
|
+
}
|
|
89
|
+
export function vectorSearch(queryVec, topK = 20) {
|
|
90
|
+
const rows = db.exec("SELECT chunk_id, vector FROM embeddings");
|
|
91
|
+
if (rows.length === 0 || rows[0].values.length === 0)
|
|
92
|
+
return [];
|
|
93
|
+
const scored = [];
|
|
94
|
+
for (const row of rows[0].values) {
|
|
95
|
+
const chunkId = row[0];
|
|
96
|
+
const vec = JSON.parse(row[1]);
|
|
97
|
+
if (vec.length !== queryVec.length)
|
|
98
|
+
continue;
|
|
99
|
+
scored.push({ chunkId, score: cosine(queryVec, vec) });
|
|
100
|
+
}
|
|
101
|
+
scored.sort((a, b) => b.score - a.score);
|
|
102
|
+
return scored.slice(0, topK);
|
|
103
|
+
}
|
|
104
|
+
export function rrfFuse(rankings, weights) {
|
|
105
|
+
const k = 60;
|
|
106
|
+
const fused = new Map();
|
|
107
|
+
rankings.forEach((ranking, i) => {
|
|
108
|
+
const w = weights?.[i] ?? 1;
|
|
109
|
+
const sorted = [...ranking.entries()].sort((a, b) => b[1] - a[1]);
|
|
110
|
+
sorted.forEach(([id], rank) => {
|
|
111
|
+
fused.set(id, (fused.get(id) || 0) + w / (k + rank + 1));
|
|
112
|
+
});
|
|
113
|
+
});
|
|
114
|
+
return fused;
|
|
115
|
+
}
|
|
116
|
+
export function decayedActivation(activation, lastAccessed, nodeType) {
|
|
117
|
+
const days = (Date.now() - new Date(lastAccessed).getTime()) / 86400000;
|
|
118
|
+
const rate = DECAY_RATES[nodeType] || 0.95;
|
|
119
|
+
return Math.max(activation * Math.pow(rate, days), 0.01);
|
|
120
|
+
}
|
|
121
|
+
export function hubDampen(nodeId, score) {
|
|
122
|
+
const result = db.exec(`SELECT COUNT(*) FROM edges WHERE source_node_id = '${nodeId}' OR target_node_id = '${nodeId}'`);
|
|
123
|
+
const edgeCount = result[0]?.values[0]?.[0] || 0;
|
|
124
|
+
if (edgeCount > 10)
|
|
125
|
+
return score / Math.log2(edgeCount);
|
|
126
|
+
return score;
|
|
127
|
+
}
|
package/dist/extract.js
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import { ANTHROPIC_API_KEY, VALID_NODE_TYPES, VALID_CONFIDENCE, VALID_EPISTEMIC_TAGS } from "./types.js";
|
|
2
|
+
const EXTRACTION_PROMPT = `You are extracting cognitive patterns from text. Extract ONLY patterns that reveal how the user thinks, decides, or evaluates.
|
|
3
|
+
|
|
4
|
+
For each pattern, output a JSON object:
|
|
5
|
+
- "text": The pattern in the user's voice
|
|
6
|
+
- "type": One of: heuristic, mental_model, preference, value, assumption, tension, question, project, idea
|
|
7
|
+
- "confidence": strong | tentative | uncertain
|
|
8
|
+
- "epistemic": assertion | hypothesis | speculation
|
|
9
|
+
|
|
10
|
+
Type selection, work through IN ORDER (do NOT default to "idea"):
|
|
11
|
+
1. Decision rule or "when X, do Y"? -> heuristic
|
|
12
|
+
2. Framework for thinking about a domain? -> mental_model
|
|
13
|
+
3. Conflict or tradeoff between held beliefs? -> tension
|
|
14
|
+
4. What the user prefers, likes, or chooses? -> preference
|
|
15
|
+
5. Core principle or what they care about? -> value
|
|
16
|
+
6. Something taken as given that could be wrong? -> assumption
|
|
17
|
+
7. Active question being investigated? -> question
|
|
18
|
+
8. Ongoing effort with a goal? -> project
|
|
19
|
+
9. ONLY if none above -> idea
|
|
20
|
+
|
|
21
|
+
Rules: 0-8 patterns max. Quality over quantity. Corrections are strongest signal. "idea" should be RARE.
|
|
22
|
+
Output a JSON array. [] if none found.
|
|
23
|
+
|
|
24
|
+
Text to analyze:
|
|
25
|
+
`;
|
|
26
|
+
export async function extractPatterns(text) {
|
|
27
|
+
if (!ANTHROPIC_API_KEY)
|
|
28
|
+
return [];
|
|
29
|
+
try {
|
|
30
|
+
const resp = await fetch("https://api.anthropic.com/v1/messages", {
|
|
31
|
+
method: "POST",
|
|
32
|
+
headers: {
|
|
33
|
+
"x-api-key": ANTHROPIC_API_KEY,
|
|
34
|
+
"content-type": "application/json",
|
|
35
|
+
"anthropic-version": "2023-06-01",
|
|
36
|
+
},
|
|
37
|
+
body: JSON.stringify({
|
|
38
|
+
model: "claude-haiku-4-5-20251001",
|
|
39
|
+
max_tokens: 1024,
|
|
40
|
+
messages: [{ role: "user", content: EXTRACTION_PROMPT + text }],
|
|
41
|
+
}),
|
|
42
|
+
});
|
|
43
|
+
if (!resp.ok)
|
|
44
|
+
return [];
|
|
45
|
+
const json = await resp.json();
|
|
46
|
+
const content = json.content?.[0]?.text || "";
|
|
47
|
+
const match = content.match(/\[[\s\S]*\]/);
|
|
48
|
+
if (!match)
|
|
49
|
+
return [];
|
|
50
|
+
const parsed = JSON.parse(match[0]);
|
|
51
|
+
return parsed.filter(p => p.text && p.type && VALID_NODE_TYPES.has(p.type) &&
|
|
52
|
+
VALID_CONFIDENCE.has(p.confidence || "tentative") &&
|
|
53
|
+
VALID_EPISTEMIC_TAGS.has(p.epistemic || "assertion"));
|
|
54
|
+
}
|
|
55
|
+
catch {
|
|
56
|
+
return [];
|
|
57
|
+
}
|
|
58
|
+
}
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
// thinking-mcp: MCP server that models how you think
|
|
3
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
4
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
5
|
+
import { z } from "zod";
|
|
6
|
+
import { initDb, db, uuid, persistDb } from "./db.js";
|
|
7
|
+
import { toolPing, toolCapture, toolWhatDoIThink, toolWhatConnects, toolWhatTensionsExist, toolWhereAmIUncertain, toolSuggestExploration, toolHowWouldUserDecide, toolWhatHasChanged, toolCorrect, toolRecordOutcome, toolGetNode, toolGetNeighbors, toolSearchNodes, toolMergeNodes, toolArchiveNode } from "./tools.js";
|
|
8
|
+
import { toolBootstrap, advanceBootstrap } from "./bootstrap.js";
|
|
9
|
+
async function main() {
|
|
10
|
+
await initDb();
|
|
11
|
+
const server = new McpServer({ name: "thinking-mcp", version: "0.2.0" });
|
|
12
|
+
// --- get_framing (call FIRST on every substantive query) ---
|
|
13
|
+
server.tool("get_framing", "Get framing directives — lens-shaping questions that must be answered before any substantive response. CALL THIS FIRST on any substantive query, unconditionally. Framing is architecturally separated from knowledge retrieval: not activation-ranked, not similarity-matched — loaded verbatim, always. Answer each question visibly in your response, or state explicitly which you are skipping and why. READ-ONLY but traces every invocation. Hard-capped at 5.", {}, () => {
|
|
14
|
+
const rows = db.exec("SELECT id, question, rationale, priority, version FROM framing ORDER BY priority ASC, id ASC");
|
|
15
|
+
const directives = rows.length > 0 ? rows[0].values.map((v) => ({
|
|
16
|
+
id: v[0], question: v[1], rationale: v[2], priority: v[3], version: v[4],
|
|
17
|
+
})) : [];
|
|
18
|
+
const traceId = uuid();
|
|
19
|
+
db.run(`INSERT INTO traces (id, tool_name, node_ids_touched) VALUES (?, 'get_framing', ?)`, [traceId, JSON.stringify(directives.map((d) => d.id))]);
|
|
20
|
+
persistDb();
|
|
21
|
+
const result = directives.length === 0
|
|
22
|
+
? { directives: [], warning: "No framing directives configured. Use seed_framing or add a framing.yaml file.", trace_id: traceId }
|
|
23
|
+
: { directives, count: directives.length, protocol: "Answer each question visibly in your response, or state explicitly which you are skipping and why.", trace_id: traceId };
|
|
24
|
+
return { content: [{ type: "text", text: JSON.stringify(result) }] };
|
|
25
|
+
});
|
|
26
|
+
// --- seed_framing ---
|
|
27
|
+
server.tool("seed_framing", "Seed or replace all framing directives. Hard-capped at 5. DESTRUCTIVE: replaces all existing directives. Each directive needs id, question, priority. Write your own — these are the questions that shape how you think before acting.", { directives: z.array(z.object({ id: z.string(), question: z.string(), rationale: z.string().optional(), priority: z.number().default(0) })).max(5).describe("Array of framing directives (max 5)") }, ({ directives }) => {
|
|
28
|
+
db.run("DELETE FROM framing");
|
|
29
|
+
for (const d of directives) {
|
|
30
|
+
db.run("INSERT INTO framing (id, question, rationale, priority, version) VALUES (?, ?, ?, ?, 1)", [d.id, d.question, d.rationale ?? null, d.priority]);
|
|
31
|
+
}
|
|
32
|
+
persistDb();
|
|
33
|
+
return { content: [{ type: "text", text: JSON.stringify({ seeded: directives.length, ids: directives.map(d => d.id) }) }] };
|
|
34
|
+
});
|
|
35
|
+
server.tool("ping", "Health check. Returns node count, chunk count, and DB path. READ-ONLY.", {}, () => ({ content: [{ type: "text", text: toolPing() }] }));
|
|
36
|
+
server.tool("capture", "Capture a thought into the cognitive graph. If nodeType is provided, stores as that type directly. If omitted, runs inline extraction to classify the text into properly typed nodes. WRITES to SQLite. Does NOT create edges between nodes.", {
|
|
37
|
+
text: z.string().describe("The thought, observation, or raw text to capture"),
|
|
38
|
+
nodeType: z.string().optional().describe("Optional. Skips extraction if provided. Valid: idea, question, heuristic, value, mental_model, assumption, tension, preference, project"),
|
|
39
|
+
epistemicTag: z.string().default("assertion").describe("assertion, hypothesis, speculation, quoting, rejected"),
|
|
40
|
+
confidence: z.string().default("tentative").describe("strong, tentative, uncertain"),
|
|
41
|
+
}, async ({ text, nodeType, epistemicTag, confidence }) => ({ content: [{ type: "text", text: await toolCapture(text, nodeType, epistemicTag, confidence) }] }));
|
|
42
|
+
server.tool("what_do_i_think", "Query your thinking on a topic. Returns nodes ranked by relevance and activation. READ-ONLY (but bumps activation on accessed nodes). Use this before capture to check if a pattern already exists.", { topic: z.string().describe("The topic to explore your thinking on") }, async ({ topic }) => ({ content: [{ type: "text", text: await toolWhatDoIThink(topic) }] }));
|
|
43
|
+
server.tool("what_connects", "Find unexpected bridges between two domains. Scores every process node against both domains and returns those relevant to both. READ-ONLY. Hub dampening applied.", { domain_a: z.string().describe("First domain"), domain_b: z.string().describe("Second domain") }, async ({ domain_a, domain_b }) => ({ content: [{ type: "text", text: await toolWhatConnects(domain_a, domain_b) }] }));
|
|
44
|
+
server.tool("what_tensions_exist", "Surface contradictions and weak spots. Finds tension nodes, contradicts edges, and low-confidence assumptions. READ-ONLY. Optional topic filter.", { topic: z.string().optional().describe("Optional topic to focus on") }, async ({ topic }) => ({ content: [{ type: "text", text: await toolWhatTensionsExist(topic) }] }));
|
|
45
|
+
server.tool("where_am_i_uncertain", "Find areas of low confidence or untested patterns. READ-ONLY. Optional domain filter.", { domain: z.string().optional().describe("Optional domain to focus on") }, async ({ domain }) => ({ content: [{ type: "text", text: await toolWhereAmIUncertain(domain) }] }));
|
|
46
|
+
server.tool("suggest_exploration", "Surface forgotten patterns relevant to your current context. Finds low-activation nodes semantically close to what you're working on. READ-ONLY. Use for creative cross-pollination.", { current_context: z.string().describe("What you're currently working on or thinking about") }, async ({ current_context }) => ({ content: [{ type: "text", text: await toolSuggestExploration(current_context) }] }));
|
|
47
|
+
server.tool("how_would_user_decide", "Reason through a decision using captured heuristics, values, and mental models. Returns relevant patterns grouped by type. READ-ONLY. Does NOT make the decision.", { context: z.string().describe("The decision context"), options: z.string().optional().describe("Comma-separated options being considered") }, async ({ context, options }) => ({ content: [{ type: "text", text: await toolHowWouldUserDecide(context, options) }] }));
|
|
48
|
+
server.tool("what_has_changed", "Show how your thinking on a topic has evolved over time. Returns a timeline of nodes, evolution chains, and outcome history. READ-ONLY.", { domain: z.string().describe("The domain to check evolution on") }, async ({ domain }) => ({ content: [{ type: "text", text: await toolWhatHasChanged(domain) }] }));
|
|
49
|
+
server.tool("correct", "Correct a node in the graph. Updates summary, supersedes old chunks, boosts activation +2.0. Strongest learning signal. WRITES to SQLite.", { node_id: z.string().describe("The node ID to correct"), new_summary: z.string().describe("The corrected summary"), new_confidence: z.string().optional().describe("Updated confidence: strong, tentative, uncertain") }, async ({ node_id, new_summary, new_confidence }) => ({ content: [{ type: "text", text: await toolCorrect(node_id, new_summary, new_confidence) }] }));
|
|
50
|
+
server.tool("record_outcome", "Record what happened after a decision influenced by a node. Writes to outcomes table and updates the node's track record. WRITES to SQLite.", { node_id: z.string().describe("The node that influenced the decision"), outcome: z.string().describe("What happened"), score: z.number().describe("Outcome quality from -1.0 (terrible) to 1.0 (excellent)") }, ({ node_id, outcome, score }) => ({ content: [{ type: "text", text: toolRecordOutcome(node_id, outcome, score) }] }));
|
|
51
|
+
server.tool("get_node", "Get a node by ID with all chunks, edges, and outcome history. READ-ONLY. Use before correcting, merging, or archiving.", { node_id: z.string().describe("The node ID") }, ({ node_id }) => ({ content: [{ type: "text", text: toolGetNode(node_id) }] }));
|
|
52
|
+
server.tool("get_neighbors", "Get all nodes connected to a node via edges (1-hop). READ-ONLY. Returns the node, its neighbors, and connecting edges.", { node_id: z.string().describe("The node ID") }, ({ node_id }) => ({ content: [{ type: "text", text: toolGetNeighbors(node_id) }] }));
|
|
53
|
+
server.tool("search_nodes", "Keyword search across node summaries with optional type filter. Up to 20 results ranked by activation. READ-ONLY. For semantic search, use what_do_i_think instead.", { query: z.string().describe("Keyword to search for"), nodeType: z.string().optional().describe("Optional type filter") }, ({ query, nodeType }) => ({ content: [{ type: "text", text: toolSearchNodes(query, nodeType) }] }));
|
|
54
|
+
server.tool("merge_nodes", "Merge two duplicate nodes. Keeps the first, transfers chunks/edges/outcomes from the second, deletes the second. DESTRUCTIVE. Use get_node on both first.", { keep_id: z.string().describe("The node ID to keep"), merge_id: z.string().describe("The node ID to merge and delete") }, ({ keep_id, merge_id }) => ({ content: [{ type: "text", text: toolMergeNodes(keep_id, merge_id) }] }));
|
|
55
|
+
server.tool("archive_node", "Set a node's activation to 0. It stays in the graph but drops out of results. WRITES. Prefer correct if the node needs updating rather than removal.", { node_id: z.string().describe("The node ID to archive") }, ({ node_id }) => ({ content: [{ type: "text", text: toolArchiveNode(node_id) }] }));
|
|
56
|
+
server.tool("bootstrap", "Guided first-run experience. Returns questions for the agent to ask the user across 5 phases. Feed answers back through capture (without nodeType). Call again with no args to advance phases. Tracks phase state in meta table.", { action: z.string().optional().describe("'start' to begin or resume, 'status' to check progress, omit to advance to next phase") }, ({ action }) => {
|
|
57
|
+
const text = action === "start" || action === "status" ? toolBootstrap(action) : advanceBootstrap();
|
|
58
|
+
return { content: [{ type: "text", text }] };
|
|
59
|
+
});
|
|
60
|
+
const transport = new StdioServerTransport();
|
|
61
|
+
await server.connect(transport);
|
|
62
|
+
}
|
|
63
|
+
main().catch(e => { console.error("Fatal:", e); process.exit(1); });
|
package/dist/tools.d.ts
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
export declare function toolPing(): string;
|
|
2
|
+
export declare function toolCapture(text: string, nodeType?: string, epistemicTag?: string, confidence?: string): Promise<string>;
|
|
3
|
+
export declare function toolWhatDoIThink(topic: string): Promise<string>;
|
|
4
|
+
export declare function toolWhatConnects(domainA: string, domainB: string): Promise<string>;
|
|
5
|
+
export declare function toolWhatTensionsExist(topic?: string): Promise<string>;
|
|
6
|
+
export declare function toolWhereAmIUncertain(domain?: string): Promise<string>;
|
|
7
|
+
export declare function toolSuggestExploration(currentContext: string): Promise<string>;
|
|
8
|
+
export declare function toolHowWouldUserDecide(context: string, options?: string): Promise<string>;
|
|
9
|
+
export declare function toolWhatHasChanged(domain: string): Promise<string>;
|
|
10
|
+
export declare function toolCorrect(nodeId: string, newSummary: string, newConfidence?: string): Promise<string>;
|
|
11
|
+
export declare function toolRecordOutcome(nodeId: string, outcome: string, score: number): string;
|
|
12
|
+
export declare function toolGetNode(nodeId: string): string;
|
|
13
|
+
export declare function toolGetNeighbors(nodeId: string): string;
|
|
14
|
+
export declare function toolSearchNodes(query: string, nodeType?: string): string;
|
|
15
|
+
export declare function toolMergeNodes(keepId: string, mergeId: string): string;
|
|
16
|
+
export declare function toolArchiveNode(nodeId: string): string;
|
package/dist/tools.js
ADDED
|
@@ -0,0 +1,432 @@
|
|
|
1
|
+
import { VALID_NODE_TYPES } from "./types.js";
|
|
2
|
+
import { db, persistDb, uuid, now, getNode, queryNodes, bumpActivation } from "./db.js";
|
|
3
|
+
import { embedOne, validateDims, embeddingModel, embeddingDims, vectorSearch, cosine, rrfFuse, decayedActivation, hubDampen } from "./embed.js";
|
|
4
|
+
import { extractPatterns } from "./extract.js";
|
|
5
|
+
import { DB_PATH } from "./types.js";
|
|
6
|
+
// ---------------------------------------------------------------------------
|
|
7
|
+
// ping
|
|
8
|
+
// ---------------------------------------------------------------------------
|
|
9
|
+
export function toolPing() {
|
|
10
|
+
const nodeCount = db.exec("SELECT COUNT(*) FROM nodes");
|
|
11
|
+
const chunkCount = db.exec("SELECT COUNT(*) FROM chunks");
|
|
12
|
+
const n = nodeCount[0]?.values[0]?.[0] || 0;
|
|
13
|
+
const c = chunkCount[0]?.values[0]?.[0] || 0;
|
|
14
|
+
return `thinking-mcp online. ${n} nodes, ${c} chunks. DB: ${DB_PATH}`;
|
|
15
|
+
}
|
|
16
|
+
// ---------------------------------------------------------------------------
|
|
17
|
+
// capture
|
|
18
|
+
// ---------------------------------------------------------------------------
|
|
19
|
+
export async function toolCapture(text, nodeType, epistemicTag = "assertion", confidence = "tentative") {
|
|
20
|
+
if (nodeType && VALID_NODE_TYPES.has(nodeType)) {
|
|
21
|
+
const chunkId = uuid(), nodeId = uuid();
|
|
22
|
+
const vec = await embedOne(text);
|
|
23
|
+
if (!vec)
|
|
24
|
+
return "Error: failed to generate embedding";
|
|
25
|
+
if (!validateDims(vec))
|
|
26
|
+
return "Error: embedding dimension mismatch";
|
|
27
|
+
db.run("INSERT INTO chunks (id, text, source_type, epistemic_tag, confidence) VALUES (?, ?, 'capture', ?, ?)", [chunkId, text, epistemicTag, confidence]);
|
|
28
|
+
db.run("INSERT INTO nodes (id, type, summary, confidence, activation) VALUES (?, ?, ?, ?, 1.0)", [nodeId, nodeType, text, confidence]);
|
|
29
|
+
db.run("INSERT INTO node_chunks (node_id, chunk_id) VALUES (?, ?)", [nodeId, chunkId]);
|
|
30
|
+
db.run("INSERT INTO embeddings (chunk_id, vector, model, dims) VALUES (?, ?, ?, ?)", [chunkId, JSON.stringify(vec), embeddingModel, embeddingDims]);
|
|
31
|
+
persistDb();
|
|
32
|
+
return `Captured as ${nodeType} (${epistemicTag}, ${confidence}). Node: ${nodeId}`;
|
|
33
|
+
}
|
|
34
|
+
const patterns = await extractPatterns(text);
|
|
35
|
+
if (patterns.length === 0) {
|
|
36
|
+
const chunkId = uuid(), nodeId = uuid();
|
|
37
|
+
const vec = await embedOne(text);
|
|
38
|
+
if (!vec)
|
|
39
|
+
return "Error: failed to generate embedding";
|
|
40
|
+
db.run("INSERT INTO chunks (id, text, source_type, epistemic_tag, confidence) VALUES (?, ?, 'capture', ?, ?)", [chunkId, text, epistemicTag, confidence]);
|
|
41
|
+
db.run("INSERT INTO nodes (id, type, summary, confidence, activation) VALUES (?, ?, ?, ?, 1.0)", [nodeId, "idea", text, confidence]);
|
|
42
|
+
db.run("INSERT INTO node_chunks (node_id, chunk_id) VALUES (?, ?)", [nodeId, chunkId]);
|
|
43
|
+
db.run("INSERT INTO embeddings (chunk_id, vector, model, dims) VALUES (?, ?, ?, ?)", [chunkId, JSON.stringify(vec), embeddingModel, embeddingDims]);
|
|
44
|
+
persistDb();
|
|
45
|
+
return `No patterns extracted. Stored as idea. Node: ${nodeId}`;
|
|
46
|
+
}
|
|
47
|
+
const nodeIds = [];
|
|
48
|
+
for (const p of patterns) {
|
|
49
|
+
const chunkId = uuid(), nodeId = uuid();
|
|
50
|
+
const vec = await embedOne(p.text);
|
|
51
|
+
if (!vec)
|
|
52
|
+
continue;
|
|
53
|
+
db.run("INSERT INTO chunks (id, text, source_type, epistemic_tag, confidence) VALUES (?, ?, 'capture', ?, ?)", [chunkId, p.text, p.epistemic || "assertion", p.confidence || "tentative"]);
|
|
54
|
+
db.run("INSERT INTO nodes (id, type, summary, confidence, activation) VALUES (?, ?, ?, ?, 1.0)", [nodeId, p.type, p.text, p.confidence || "tentative"]);
|
|
55
|
+
db.run("INSERT INTO node_chunks (node_id, chunk_id) VALUES (?, ?)", [nodeId, chunkId]);
|
|
56
|
+
db.run("INSERT INTO embeddings (chunk_id, vector, model, dims) VALUES (?, ?, ?, ?)", [chunkId, JSON.stringify(vec), embeddingModel, embeddingDims]);
|
|
57
|
+
nodeIds.push(nodeId);
|
|
58
|
+
}
|
|
59
|
+
persistDb();
|
|
60
|
+
const summary = patterns.map(p => `${p.type}: "${p.text.slice(0, 50)}"`).join("; ");
|
|
61
|
+
return `Extracted ${patterns.length} pattern(s): ${summary}`;
|
|
62
|
+
}
|
|
63
|
+
// ---------------------------------------------------------------------------
|
|
64
|
+
// what_do_i_think
|
|
65
|
+
// ---------------------------------------------------------------------------
|
|
66
|
+
export async function toolWhatDoIThink(topic) {
|
|
67
|
+
const queryVec = await embedOne(topic);
|
|
68
|
+
if (!queryVec)
|
|
69
|
+
return "Error: failed to embed query";
|
|
70
|
+
const vectorHits = vectorSearch(queryVec, 40);
|
|
71
|
+
const vectorScores = new Map();
|
|
72
|
+
for (const h of vectorHits) {
|
|
73
|
+
const nc = db.exec(`SELECT node_id FROM node_chunks WHERE chunk_id = '${h.chunkId}'`);
|
|
74
|
+
if (nc.length > 0) {
|
|
75
|
+
for (const row of nc[0].values) {
|
|
76
|
+
const nid = row[0];
|
|
77
|
+
vectorScores.set(nid, Math.max(vectorScores.get(nid) || 0, h.score));
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
const kw = topic.split(/\s+/).filter(w => w.length > 3).map(w => `summary LIKE '%${w}%'`).join(" OR ");
|
|
82
|
+
const keywordScores = new Map();
|
|
83
|
+
if (kw) {
|
|
84
|
+
const kwNodes = queryNodes(`SELECT id, activation FROM nodes WHERE ${kw} LIMIT 20`);
|
|
85
|
+
kwNodes.forEach((n, i) => { keywordScores.set(n.id, 1 / (i + 1)); });
|
|
86
|
+
}
|
|
87
|
+
const allIds = new Set([...vectorScores.keys(), ...keywordScores.keys()]);
|
|
88
|
+
const activationScores = new Map();
|
|
89
|
+
const outcomeScores = new Map();
|
|
90
|
+
for (const id of allIds) {
|
|
91
|
+
const node = getNode(id);
|
|
92
|
+
if (!node)
|
|
93
|
+
continue;
|
|
94
|
+
activationScores.set(id, decayedActivation(node.activation, node.last_accessed, node.type));
|
|
95
|
+
outcomeScores.set(id, node.outcome_score ?? 0);
|
|
96
|
+
}
|
|
97
|
+
// Weights: vector 3x, keyword 3x, activation 1x, outcome 0.5x
|
|
98
|
+
const fused = rrfFuse([vectorScores, keywordScores, activationScores, outcomeScores], [3, 3, 1, 0.5]);
|
|
99
|
+
const ranked = [...fused.entries()].sort((a, b) => b[1] - a[1]).slice(0, 20);
|
|
100
|
+
for (const [id] of ranked)
|
|
101
|
+
bumpActivation(id);
|
|
102
|
+
const positions = ranked.map(([id]) => {
|
|
103
|
+
const node = getNode(id);
|
|
104
|
+
if (!node)
|
|
105
|
+
return null;
|
|
106
|
+
return {
|
|
107
|
+
type: node.type, summary: node.summary, confidence: node.confidence,
|
|
108
|
+
activation: decayedActivation(node.activation, node.last_accessed, node.type).toFixed(3),
|
|
109
|
+
outcome_score: node.outcome_score,
|
|
110
|
+
first_seen: node.first_seen,
|
|
111
|
+
};
|
|
112
|
+
}).filter(Boolean);
|
|
113
|
+
persistDb();
|
|
114
|
+
return JSON.stringify({ topic, positions, node_count: positions.length }, null, 2);
|
|
115
|
+
}
|
|
116
|
+
// ---------------------------------------------------------------------------
|
|
117
|
+
// what_connects
|
|
118
|
+
// ---------------------------------------------------------------------------
|
|
119
|
+
export async function toolWhatConnects(domainA, domainB) {
|
|
120
|
+
const vecA = await embedOne(domainA);
|
|
121
|
+
const vecB = await embedOne(domainB);
|
|
122
|
+
if (!vecA || !vecB)
|
|
123
|
+
return "Error: failed to embed domains";
|
|
124
|
+
const processTypes = new Set(["heuristic", "value", "mental_model", "tension", "assumption", "preference"]);
|
|
125
|
+
const allNodes = queryNodes("SELECT * FROM nodes");
|
|
126
|
+
const bridges = [];
|
|
127
|
+
for (const node of allNodes) {
|
|
128
|
+
if (!processTypes.has(node.type))
|
|
129
|
+
continue;
|
|
130
|
+
const chunks = db.exec(`SELECT chunk_id FROM node_chunks WHERE node_id = '${node.id}'`);
|
|
131
|
+
if (chunks.length === 0)
|
|
132
|
+
continue;
|
|
133
|
+
let bestScoreA = 0, bestScoreB = 0;
|
|
134
|
+
for (const row of chunks[0].values) {
|
|
135
|
+
const embRow = db.exec(`SELECT vector FROM embeddings WHERE chunk_id = '${row[0]}'`);
|
|
136
|
+
if (embRow.length === 0)
|
|
137
|
+
continue;
|
|
138
|
+
const vec = JSON.parse(embRow[0].values[0][0]);
|
|
139
|
+
bestScoreA = Math.max(bestScoreA, cosine(vecA, vec));
|
|
140
|
+
bestScoreB = Math.max(bestScoreB, cosine(vecB, vec));
|
|
141
|
+
}
|
|
142
|
+
const minScore = Math.min(bestScoreA, bestScoreB);
|
|
143
|
+
if (minScore > 0.18) {
|
|
144
|
+
bridges.push({ id: node.id, type: node.type, summary: node.summary, scoreA: bestScoreA, scoreB: bestScoreB, min: hubDampen(node.id, minScore) });
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
bridges.sort((a, b) => b.min - a.min);
|
|
148
|
+
return JSON.stringify({ domain_a: domainA, domain_b: domainB, bridges: bridges.slice(0, 10) }, null, 2);
|
|
149
|
+
}
|
|
150
|
+
// ---------------------------------------------------------------------------
|
|
151
|
+
// what_tensions_exist
|
|
152
|
+
// ---------------------------------------------------------------------------
|
|
153
|
+
export async function toolWhatTensionsExist(topic) {
|
|
154
|
+
let tensionNodes = queryNodes("SELECT * FROM nodes WHERE type = 'tension'");
|
|
155
|
+
const contradictEdges = queryNodes("SELECT * FROM edges WHERE type = 'contradicts'");
|
|
156
|
+
const weakAssumptions = queryNodes("SELECT * FROM nodes WHERE type = 'assumption' AND (confidence = 'uncertain' OR confidence = 'tentative')");
|
|
157
|
+
if (topic) {
|
|
158
|
+
const vec = await embedOne(topic);
|
|
159
|
+
if (vec) {
|
|
160
|
+
const hits = vectorSearch(vec, 40);
|
|
161
|
+
const relevantChunks = new Set(hits.filter(h => h.score > 0.2).map(h => h.chunkId));
|
|
162
|
+
const relevantNodes = new Set();
|
|
163
|
+
for (const cid of relevantChunks) {
|
|
164
|
+
const nc = db.exec(`SELECT node_id FROM node_chunks WHERE chunk_id = '${cid}'`);
|
|
165
|
+
if (nc.length > 0)
|
|
166
|
+
nc[0].values.forEach((r) => relevantNodes.add(r[0]));
|
|
167
|
+
}
|
|
168
|
+
tensionNodes = tensionNodes.filter(n => relevantNodes.has(n.id));
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
return JSON.stringify({
|
|
172
|
+
explicit_tensions: tensionNodes.map(n => ({ type: n.type, summary: n.summary, confidence: n.confidence })),
|
|
173
|
+
contradictions: contradictEdges.map(e => ({ source: e.source_node_id, target: e.target_node_id, weight: e.weight })),
|
|
174
|
+
weak_assumptions: weakAssumptions.map(n => ({ summary: n.summary, confidence: n.confidence })),
|
|
175
|
+
}, null, 2);
|
|
176
|
+
}
|
|
177
|
+
// ---------------------------------------------------------------------------
|
|
178
|
+
// where_am_i_uncertain
|
|
179
|
+
// ---------------------------------------------------------------------------
|
|
180
|
+
export async function toolWhereAmIUncertain(domain) {
|
|
181
|
+
let nodes = queryNodes("SELECT * FROM nodes WHERE confidence IN ('uncertain', 'tentative') OR (type IN ('heuristic', 'mental_model', 'assumption') AND outcome_score IS NULL)");
|
|
182
|
+
if (domain) {
|
|
183
|
+
const vec = await embedOne(domain);
|
|
184
|
+
if (vec) {
|
|
185
|
+
const hits = vectorSearch(vec, 40);
|
|
186
|
+
const relevantNodes = new Set();
|
|
187
|
+
for (const h of hits) {
|
|
188
|
+
const nc = db.exec(`SELECT node_id FROM node_chunks WHERE chunk_id = '${h.chunkId}'`);
|
|
189
|
+
if (nc.length > 0)
|
|
190
|
+
nc[0].values.forEach((r) => relevantNodes.add(r[0]));
|
|
191
|
+
}
|
|
192
|
+
nodes = nodes.filter(n => relevantNodes.has(n.id));
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
return JSON.stringify({
|
|
196
|
+
uncertain: nodes.filter(n => n.confidence === "uncertain").map(n => ({ type: n.type, summary: n.summary })),
|
|
197
|
+
tentative: nodes.filter(n => n.confidence === "tentative").map(n => ({ type: n.type, summary: n.summary })),
|
|
198
|
+
untested: nodes.filter(n => n.outcome_score === null && ["heuristic", "mental_model", "assumption"].includes(n.type)).map(n => ({ type: n.type, summary: n.summary })),
|
|
199
|
+
}, null, 2);
|
|
200
|
+
}
|
|
201
|
+
// ---------------------------------------------------------------------------
|
|
202
|
+
// suggest_exploration
|
|
203
|
+
// ---------------------------------------------------------------------------
|
|
204
|
+
export async function toolSuggestExploration(currentContext) {
|
|
205
|
+
const vec = await embedOne(currentContext);
|
|
206
|
+
if (!vec)
|
|
207
|
+
return "Error: failed to embed context";
|
|
208
|
+
const hits = vectorSearch(vec, 40);
|
|
209
|
+
const nodeScores = new Map();
|
|
210
|
+
for (const h of hits) {
|
|
211
|
+
const nc = db.exec(`SELECT node_id FROM node_chunks WHERE chunk_id = '${h.chunkId}'`);
|
|
212
|
+
if (nc.length > 0) {
|
|
213
|
+
for (const row of nc[0].values)
|
|
214
|
+
nodeScores.set(row[0], Math.max(nodeScores.get(row[0]) || 0, h.score));
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
const candidates = [];
|
|
218
|
+
for (const [id, similarity] of nodeScores) {
|
|
219
|
+
const node = getNode(id);
|
|
220
|
+
if (!node)
|
|
221
|
+
continue;
|
|
222
|
+
const act = decayedActivation(node.activation, node.last_accessed, node.type);
|
|
223
|
+
if (act < 0.5 && similarity > 0.15) {
|
|
224
|
+
candidates.push({ id, type: node.type, summary: node.summary, activation: act, similarity });
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
candidates.sort((a, b) => b.similarity - a.similarity);
|
|
228
|
+
const typeSeen = new Set();
|
|
229
|
+
const diverse = [];
|
|
230
|
+
for (const c of candidates) {
|
|
231
|
+
if (!typeSeen.has(c.type) || diverse.length < 5) {
|
|
232
|
+
diverse.push(c);
|
|
233
|
+
typeSeen.add(c.type);
|
|
234
|
+
}
|
|
235
|
+
if (diverse.length >= 10)
|
|
236
|
+
break;
|
|
237
|
+
}
|
|
238
|
+
return JSON.stringify({ current_context: currentContext, forgotten_but_relevant: diverse }, null, 2);
|
|
239
|
+
}
|
|
240
|
+
// ---------------------------------------------------------------------------
|
|
241
|
+
// how_would_user_decide
|
|
242
|
+
// ---------------------------------------------------------------------------
|
|
243
|
+
export async function toolHowWouldUserDecide(context, options) {
|
|
244
|
+
const vec = await embedOne(context + (options ? " " + options : ""));
|
|
245
|
+
if (!vec)
|
|
246
|
+
return "Error: failed to embed context";
|
|
247
|
+
const hits = vectorSearch(vec, 40);
|
|
248
|
+
const nodeIds = new Set();
|
|
249
|
+
for (const h of hits) {
|
|
250
|
+
const nc = db.exec(`SELECT node_id FROM node_chunks WHERE chunk_id = '${h.chunkId}'`);
|
|
251
|
+
if (nc.length > 0)
|
|
252
|
+
nc[0].values.forEach((r) => nodeIds.add(r[0]));
|
|
253
|
+
}
|
|
254
|
+
const grouped = {};
|
|
255
|
+
for (const id of nodeIds) {
|
|
256
|
+
const node = getNode(id);
|
|
257
|
+
if (!node)
|
|
258
|
+
continue;
|
|
259
|
+
const t = node.type;
|
|
260
|
+
if (!["heuristic", "value", "mental_model", "preference", "assumption"].includes(t))
|
|
261
|
+
continue;
|
|
262
|
+
if (!grouped[t])
|
|
263
|
+
grouped[t] = [];
|
|
264
|
+
grouped[t].push({ summary: node.summary, confidence: node.confidence, activation: decayedActivation(node.activation, node.last_accessed, t) });
|
|
265
|
+
}
|
|
266
|
+
for (const t in grouped)
|
|
267
|
+
grouped[t].sort((a, b) => b.activation - a.activation);
|
|
268
|
+
return JSON.stringify({ context, options: options || null, reasoning_inputs: grouped }, null, 2);
|
|
269
|
+
}
|
|
270
|
+
// ---------------------------------------------------------------------------
|
|
271
|
+
// what_has_changed
|
|
272
|
+
// ---------------------------------------------------------------------------
|
|
273
|
+
export async function toolWhatHasChanged(domain) {
|
|
274
|
+
const vec = await embedOne(domain);
|
|
275
|
+
if (!vec)
|
|
276
|
+
return "Error: failed to embed domain";
|
|
277
|
+
const hits = vectorSearch(vec, 40);
|
|
278
|
+
const nodeIds = new Set();
|
|
279
|
+
for (const h of hits) {
|
|
280
|
+
const nc = db.exec(`SELECT node_id FROM node_chunks WHERE chunk_id = '${h.chunkId}'`);
|
|
281
|
+
if (nc.length > 0)
|
|
282
|
+
nc[0].values.forEach((r) => nodeIds.add(r[0]));
|
|
283
|
+
}
|
|
284
|
+
const nodes = [...nodeIds].map(id => getNode(id)).filter(Boolean).sort((a, b) => new Date(a.first_seen).getTime() - new Date(b.first_seen).getTime());
|
|
285
|
+
const idList = [...nodeIds].map(id => `'${id}'`).join(",") || "''";
|
|
286
|
+
const evolutions = queryNodes(`SELECT * FROM edges WHERE type = 'evolved_into' AND (source_node_id IN (${idList}) OR target_node_id IN (${idList}))`);
|
|
287
|
+
const outcomes = queryNodes(`SELECT * FROM outcomes WHERE node_id IN (${idList}) ORDER BY created_at DESC`);
|
|
288
|
+
return JSON.stringify({
|
|
289
|
+
domain,
|
|
290
|
+
timeline: nodes.map(n => ({ type: n.type, summary: n.summary, first_seen: n.first_seen, confidence: n.confidence })),
|
|
291
|
+
evolution_chains: evolutions,
|
|
292
|
+
outcomes: outcomes.slice(0, 10),
|
|
293
|
+
}, null, 2);
|
|
294
|
+
}
|
|
295
|
+
// ---------------------------------------------------------------------------
|
|
296
|
+
// correct
|
|
297
|
+
// ---------------------------------------------------------------------------
|
|
298
|
+
export async function toolCorrect(nodeId, newSummary, newConfidence) {
|
|
299
|
+
const existing = getNode(nodeId);
|
|
300
|
+
if (!existing)
|
|
301
|
+
return `Error: node ${nodeId} not found`;
|
|
302
|
+
db.run(`UPDATE nodes SET summary = ?, activation = MIN(activation + 2.0, 10.0), last_accessed = ? ${newConfidence ? ", confidence = ?" : ""} WHERE id = ?`, newConfidence ? [newSummary, now(), newConfidence, nodeId] : [newSummary, now(), nodeId]);
|
|
303
|
+
const oldChunks = db.exec(`SELECT chunk_id FROM node_chunks WHERE node_id = '${nodeId}'`);
|
|
304
|
+
const newChunkId = uuid();
|
|
305
|
+
// Insert new chunk FIRST so FK on superseded_by can reference it
|
|
306
|
+
const vec = await embedOne(newSummary);
|
|
307
|
+
if (vec) {
|
|
308
|
+
db.run("INSERT INTO chunks (id, text, source_type, epistemic_tag, confidence) VALUES (?, ?, 'capture', 'assertion', ?)", [newChunkId, newSummary, newConfidence || "strong"]);
|
|
309
|
+
db.run("INSERT INTO node_chunks (node_id, chunk_id) VALUES (?, ?)", [nodeId, newChunkId]);
|
|
310
|
+
db.run("INSERT INTO embeddings (chunk_id, vector, model, dims) VALUES (?, ?, ?, ?)", [newChunkId, JSON.stringify(vec), embeddingModel, embeddingDims]);
|
|
311
|
+
// Now supersede old chunks — new chunk exists so FK is satisfied
|
|
312
|
+
if (oldChunks.length > 0) {
|
|
313
|
+
for (const row of oldChunks[0].values) {
|
|
314
|
+
db.run(`UPDATE chunks SET superseded_by = ? WHERE id = ? AND superseded_by IS NULL`, [newChunkId, row[0]]);
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
persistDb();
|
|
319
|
+
return `Corrected node ${nodeId}. Old: "${existing.summary.slice(0, 60)}". New: "${newSummary.slice(0, 60)}".`;
|
|
320
|
+
}
|
|
321
|
+
// ---------------------------------------------------------------------------
|
|
322
|
+
// record_outcome
|
|
323
|
+
// ---------------------------------------------------------------------------
|
|
324
|
+
export function toolRecordOutcome(nodeId, outcome, score) {
|
|
325
|
+
const existing = getNode(nodeId);
|
|
326
|
+
if (!existing)
|
|
327
|
+
return `Error: node ${nodeId} not found`;
|
|
328
|
+
db.run("INSERT INTO outcomes (id, node_id, outcome, score) VALUES (?, ?, ?, ?)", [uuid(), nodeId, outcome, score]);
|
|
329
|
+
const currentScore = existing.outcome_score ?? 0;
|
|
330
|
+
const newScore = currentScore === 0 ? score : (currentScore * 0.7 + score * 0.3);
|
|
331
|
+
db.run("UPDATE nodes SET outcome_score = ?, last_accessed = ? WHERE id = ?", [newScore, now(), nodeId]);
|
|
332
|
+
// Propagate to neighbors via edge-type-aware dampening (1-hop only)
|
|
333
|
+
const PROP_WEIGHTS = { supports: 0.3, contradicts: -0.2, depends_on: 0.15 };
|
|
334
|
+
const edgeRows = queryNodes(`SELECT id, type, source_node_id, target_node_id FROM edges WHERE source_node_id = '${nodeId}' OR target_node_id = '${nodeId}'`);
|
|
335
|
+
let propagated = 0;
|
|
336
|
+
for (const edge of edgeRows) {
|
|
337
|
+
const w = PROP_WEIGHTS[edge.type];
|
|
338
|
+
if (w === undefined)
|
|
339
|
+
continue;
|
|
340
|
+
const neighborId = edge.source_node_id === nodeId ? edge.target_node_id : edge.source_node_id;
|
|
341
|
+
db.run(`UPDATE nodes SET outcome_score = COALESCE(outcome_score, 0) * 0.7 + ? * 0.3, last_accessed = ? WHERE id = ?`, [score * w, now(), neighborId]);
|
|
342
|
+
propagated++;
|
|
343
|
+
}
|
|
344
|
+
// Auto-update confidence after 3+ outcomes
|
|
345
|
+
const countRows = db.exec(`SELECT COUNT(*) FROM outcomes WHERE node_id = '${nodeId}'`);
|
|
346
|
+
const outcomeCount = countRows.length > 0 ? countRows[0].values[0][0] : 0;
|
|
347
|
+
if (outcomeCount >= 3) {
|
|
348
|
+
if (newScore > 0.5)
|
|
349
|
+
db.run(`UPDATE nodes SET confidence = 'strong' WHERE id = ? AND confidence != 'strong'`, [nodeId]);
|
|
350
|
+
else if (newScore < -0.3)
|
|
351
|
+
db.run(`UPDATE nodes SET confidence = 'uncertain' WHERE id = ? AND confidence != 'uncertain'`, [nodeId]);
|
|
352
|
+
}
|
|
353
|
+
persistDb();
|
|
354
|
+
const propNote = propagated > 0 ? ` Propagated to ${propagated} neighbor(s).` : "";
|
|
355
|
+
return `Recorded outcome for "${existing.summary.slice(0, 50)}". Score: ${currentScore.toFixed(2)} -> ${newScore.toFixed(2)}.${propNote}`;
|
|
356
|
+
}
|
|
357
|
+
// ---------------------------------------------------------------------------
|
|
358
|
+
// get_node
|
|
359
|
+
// ---------------------------------------------------------------------------
|
|
360
|
+
export function toolGetNode(nodeId) {
|
|
361
|
+
const node = getNode(nodeId);
|
|
362
|
+
if (!node)
|
|
363
|
+
return `Error: node ${nodeId} not found`;
|
|
364
|
+
const chunks = queryNodes(`SELECT c.* FROM chunks c JOIN node_chunks nc ON c.id = nc.chunk_id WHERE nc.node_id = '${nodeId}'`);
|
|
365
|
+
const edges = queryNodes(`SELECT * FROM edges WHERE source_node_id = '${nodeId}' OR target_node_id = '${nodeId}'`);
|
|
366
|
+
const outs = queryNodes(`SELECT * FROM outcomes WHERE node_id = '${nodeId}' ORDER BY created_at DESC`);
|
|
367
|
+
return JSON.stringify({ node, chunks, edges, outcomes: outs }, null, 2);
|
|
368
|
+
}
|
|
369
|
+
// ---------------------------------------------------------------------------
|
|
370
|
+
// get_neighbors
|
|
371
|
+
// ---------------------------------------------------------------------------
|
|
372
|
+
export function toolGetNeighbors(nodeId) {
|
|
373
|
+
const node = getNode(nodeId);
|
|
374
|
+
if (!node)
|
|
375
|
+
return `Error: node ${nodeId} not found`;
|
|
376
|
+
const edges = queryNodes(`SELECT * FROM edges WHERE source_node_id = '${nodeId}' OR target_node_id = '${nodeId}'`);
|
|
377
|
+
const neighborIds = new Set();
|
|
378
|
+
for (const e of edges) {
|
|
379
|
+
if (e.source_node_id !== nodeId)
|
|
380
|
+
neighborIds.add(e.source_node_id);
|
|
381
|
+
if (e.target_node_id !== nodeId)
|
|
382
|
+
neighborIds.add(e.target_node_id);
|
|
383
|
+
}
|
|
384
|
+
const neighbors = [...neighborIds].map(id => getNode(id)).filter(Boolean).map(n => ({ id: n.id, type: n.type, summary: n.summary, activation: n.activation }));
|
|
385
|
+
return JSON.stringify({ node: { id: node.id, type: node.type, summary: node.summary }, neighbors, edges }, null, 2);
|
|
386
|
+
}
|
|
387
|
+
// ---------------------------------------------------------------------------
|
|
388
|
+
// search_nodes
|
|
389
|
+
// ---------------------------------------------------------------------------
|
|
390
|
+
export function toolSearchNodes(query, nodeType) {
|
|
391
|
+
const words = query.split(/\s+/).filter(w => w.length > 2).map(w => `summary LIKE '%${w}%'`);
|
|
392
|
+
if (words.length === 0)
|
|
393
|
+
return JSON.stringify({ query, nodes: [] });
|
|
394
|
+
let sql = `SELECT * FROM nodes WHERE (${words.join(" OR ")})`;
|
|
395
|
+
if (nodeType && VALID_NODE_TYPES.has(nodeType))
|
|
396
|
+
sql += ` AND type = '${nodeType}'`;
|
|
397
|
+
sql += " ORDER BY activation DESC LIMIT 20";
|
|
398
|
+
const nodes = queryNodes(sql);
|
|
399
|
+
return JSON.stringify({ query, type_filter: nodeType || null, count: nodes.length, nodes }, null, 2);
|
|
400
|
+
}
|
|
401
|
+
// ---------------------------------------------------------------------------
|
|
402
|
+
// merge_nodes
|
|
403
|
+
// ---------------------------------------------------------------------------
|
|
404
|
+
export function toolMergeNodes(keepId, mergeId) {
|
|
405
|
+
const keep = getNode(keepId);
|
|
406
|
+
const merge = getNode(mergeId);
|
|
407
|
+
if (!keep)
|
|
408
|
+
return `Error: node ${keepId} not found`;
|
|
409
|
+
if (!merge)
|
|
410
|
+
return `Error: node ${mergeId} not found`;
|
|
411
|
+
db.run(`UPDATE node_chunks SET node_id = ? WHERE node_id = ? AND chunk_id NOT IN (SELECT chunk_id FROM node_chunks WHERE node_id = ?)`, [keepId, mergeId, keepId]);
|
|
412
|
+
db.run(`DELETE FROM node_chunks WHERE node_id = ?`, [mergeId]);
|
|
413
|
+
db.run(`UPDATE edges SET source_node_id = ? WHERE source_node_id = ?`, [keepId, mergeId]);
|
|
414
|
+
db.run(`UPDATE edges SET target_node_id = ? WHERE target_node_id = ?`, [keepId, mergeId]);
|
|
415
|
+
db.run(`DELETE FROM edges WHERE source_node_id = target_node_id`);
|
|
416
|
+
db.run(`UPDATE outcomes SET node_id = ? WHERE node_id = ?`, [keepId, mergeId]);
|
|
417
|
+
db.run(`UPDATE nodes SET activation = ? WHERE id = ?`, [Math.max(keep.activation, merge.activation), keepId]);
|
|
418
|
+
db.run(`DELETE FROM nodes WHERE id = ?`, [mergeId]);
|
|
419
|
+
persistDb();
|
|
420
|
+
return `Merged "${merge.summary.slice(0, 50)}" into "${keep.summary.slice(0, 50)}".`;
|
|
421
|
+
}
|
|
422
|
+
// ---------------------------------------------------------------------------
|
|
423
|
+
// archive_node
|
|
424
|
+
// ---------------------------------------------------------------------------
|
|
425
|
+
export function toolArchiveNode(nodeId) {
|
|
426
|
+
const existing = getNode(nodeId);
|
|
427
|
+
if (!existing)
|
|
428
|
+
return `Error: node ${nodeId} not found`;
|
|
429
|
+
db.run(`UPDATE nodes SET activation = 0.0, last_accessed = ? WHERE id = ?`, [now(), nodeId]);
|
|
430
|
+
persistDb();
|
|
431
|
+
return `Archived node ${nodeId} ("${existing.summary.slice(0, 50)}"). Will not appear in results.`;
|
|
432
|
+
}
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
export type NodeType = "idea" | "question" | "project" | "heuristic" | "value" | "mental_model" | "assumption" | "tension" | "preference";
|
|
2
|
+
export type EpistemicTag = "assertion" | "hypothesis" | "speculation" | "quoting" | "rejected";
|
|
3
|
+
export type Confidence = "strong" | "tentative" | "uncertain";
|
|
4
|
+
export type EdgeType = "supports" | "contradicts" | "evolved_into" | "inspired_by" | "depends_on" | "overrides" | "learned_from" | "scoped_by" | "rejected" | "belongs_to" | "derived_from";
|
|
5
|
+
export declare const VALID_NODE_TYPES: Set<string>;
|
|
6
|
+
export declare const VALID_EPISTEMIC_TAGS: Set<string>;
|
|
7
|
+
export declare const VALID_CONFIDENCE: Set<string>;
|
|
8
|
+
export declare const VALID_EDGE_TYPES: Set<string>;
|
|
9
|
+
export declare const DECAY_RATES: Record<string, number>;
|
|
10
|
+
export interface ExtractedPattern {
|
|
11
|
+
text: string;
|
|
12
|
+
type: string;
|
|
13
|
+
confidence: string;
|
|
14
|
+
epistemic: string;
|
|
15
|
+
}
|
|
16
|
+
export declare const DB_PATH: string;
|
|
17
|
+
export declare const EMBEDDING_PROVIDER: string;
|
|
18
|
+
export declare const VOYAGE_API_KEY: string | undefined;
|
|
19
|
+
export declare const ANTHROPIC_API_KEY: string | undefined;
|
|
20
|
+
export declare const OPENAI_API_KEY: string | undefined;
|
|
21
|
+
export declare const OLLAMA_URL: string;
|
package/dist/types.js
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import * as path from "path";
|
|
2
|
+
import * as os from "os";
|
|
3
|
+
export const VALID_NODE_TYPES = new Set(["idea", "question", "project", "heuristic", "value", "mental_model", "assumption", "tension", "preference"]);
|
|
4
|
+
export const VALID_EPISTEMIC_TAGS = new Set(["assertion", "hypothesis", "speculation", "quoting", "rejected"]);
|
|
5
|
+
export const VALID_CONFIDENCE = new Set(["strong", "tentative", "uncertain"]);
|
|
6
|
+
export const VALID_EDGE_TYPES = new Set(["supports", "contradicts", "evolved_into", "inspired_by", "depends_on", "overrides", "learned_from", "scoped_by", "rejected", "belongs_to", "derived_from"]);
|
|
7
|
+
export const DECAY_RATES = {
|
|
8
|
+
value: 0.98, assumption: 0.98,
|
|
9
|
+
heuristic: 0.96, mental_model: 0.96, preference: 0.96, tension: 0.96,
|
|
10
|
+
idea: 0.93, question: 0.93, project: 0.93,
|
|
11
|
+
};
|
|
12
|
+
export const DB_PATH = process.env.THINKING_MCP_DB_PATH
|
|
13
|
+
|| path.join(os.homedir(), ".thinking-mcp", "mind.db");
|
|
14
|
+
export const EMBEDDING_PROVIDER = process.env.THINKING_MCP_EMBEDDING_PROVIDER || "voyage";
|
|
15
|
+
export const VOYAGE_API_KEY = process.env.VOYAGE_API_KEY;
|
|
16
|
+
export const ANTHROPIC_API_KEY = process.env.ANTHROPIC_API_KEY;
|
|
17
|
+
export const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
|
|
18
|
+
export const OLLAMA_URL = process.env.OLLAMA_URL || "http://localhost:11434";
|
package/package.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"name":"@iflow-mcp/multimail-dev-thinking-mcp","version":"0.1.0","description":"MCP server that models how you think, not what you know","type":"module","bin":{"iflow-mcp_multimail-dev-thinking-mcp":"./dist/index.js"},"files":["dist"],"scripts":{"build":"tsc && chmod 755 dist/index.js","dev":"tsx src/index.ts","prepare":"npm run build"},"dependencies":{"@modelcontextprotocol/sdk":"^1.29.0","js-yaml":"^4.1.1","sql.js":"^1.14.0","zod":"^3.25.0"},"devDependencies":{"@types/js-yaml":"^4.0.9","@types/node":"^22.0.0","tsx":"^4.0.0","typescript":"^5.8.0"},"keywords":["mcp","cognitive","knowledge-graph","thinking","mental-model"],"license":"MIT","repository":{"type":"git","url":"https://github.com/multimail-dev/thinking-mcp"},"homepage":"https://multimail.dev","publishConfig":{"access":"public"},"engines":{"node":">=20"}}
|