0nmcp 2.8.0 → 2.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/catalog.js +29 -0
- package/engine/brain.js +738 -0
- package/engine/local-ai.js +439 -0
- package/index.js +4 -0
- package/lib/stats.json +1 -1
- package/package.json +2 -2
package/catalog.js
CHANGED
|
@@ -1833,6 +1833,35 @@ export const SERVICE_CATALOG = {
|
|
|
1833
1833
|
}),
|
|
1834
1834
|
},
|
|
1835
1835
|
|
|
1836
|
+
// ── Ollama (Local AI) ──────────────────────────────────────
|
|
1837
|
+
ollama: {
|
|
1838
|
+
name: "Ollama",
|
|
1839
|
+
type: "ai",
|
|
1840
|
+
description: "Local AI inference — chat completions, embeddings, model management. Free, private, no API costs.",
|
|
1841
|
+
baseUrl: "http://localhost:11434",
|
|
1842
|
+
authType: "none",
|
|
1843
|
+
credentialKeys: [],
|
|
1844
|
+
capabilities: [
|
|
1845
|
+
{ name: "generate_text", actions: ["create"], description: "Generate text completions with local models (Llama, Mistral, etc.)" },
|
|
1846
|
+
{ name: "chat_completion", actions: ["create"], description: "Multi-turn chat with local models" },
|
|
1847
|
+
{ name: "create_embedding", actions: ["create"], description: "Create text embeddings locally" },
|
|
1848
|
+
{ name: "manage_models", actions: ["list", "pull", "delete", "show"], description: "List, pull, inspect, and delete local models" },
|
|
1849
|
+
],
|
|
1850
|
+
endpoints: {
|
|
1851
|
+
chat_completion: { method: "POST", path: "/api/chat", body: { model: "llama3.1", messages: [], stream: false } },
|
|
1852
|
+
generate: { method: "POST", path: "/api/generate", body: { model: "llama3.1", prompt: "", stream: false } },
|
|
1853
|
+
create_embedding: { method: "POST", path: "/api/embed", body: { model: "llama3.1", input: "" } },
|
|
1854
|
+
list_models: { method: "GET", path: "/api/tags" },
|
|
1855
|
+
show_model: { method: "POST", path: "/api/show", body: { name: "" } },
|
|
1856
|
+
pull_model: { method: "POST", path: "/api/pull", body: { name: "", stream: false } },
|
|
1857
|
+
delete_model: { method: "DELETE", path: "/api/delete", body: { name: "" } },
|
|
1858
|
+
list_running: { method: "GET", path: "/api/ps" },
|
|
1859
|
+
},
|
|
1860
|
+
authHeader: () => ({
|
|
1861
|
+
"Content-Type": "application/json",
|
|
1862
|
+
}),
|
|
1863
|
+
},
|
|
1864
|
+
|
|
1836
1865
|
};
|
|
1837
1866
|
|
|
1838
1867
|
// ── Helpers ────────────────────────────────────────────────
|
package/engine/brain.js
ADDED
|
@@ -0,0 +1,738 @@
|
|
|
1
|
+
// ============================================================
|
|
2
|
+
// 0nMCP — Brain Compiler & Behavior Engine
|
|
3
|
+
// ============================================================
|
|
4
|
+
// Merges BotCoaches concept into 0nAI. Portable AI behavior
|
|
5
|
+
// packages (.brain files) that can be imported into any app
|
|
6
|
+
// on any LLM.
|
|
7
|
+
//
|
|
8
|
+
// The Three-Layer Model:
|
|
9
|
+
// BRAIN = Trained Behavior (portable .brain file)
|
|
10
|
+
// SKULL = Application Container (0nCore, Marketplace, any app)
|
|
11
|
+
// BODY = LLM Compute (Claude, GPT, Gemini, Grok, Llama)
|
|
12
|
+
//
|
|
13
|
+
// 6 MCP Tools:
|
|
14
|
+
// brain_create — Create a new brain project
|
|
15
|
+
// brain_build — Add knowledge/reasoning/behavior/skills
|
|
16
|
+
// brain_train — Run scenario-based training with evaluation
|
|
17
|
+
// brain_compile — Compile to portable .brain file
|
|
18
|
+
// brain_import — Import .brain and convert to system prompt
|
|
19
|
+
// brain_list — List all brain projects
|
|
20
|
+
//
|
|
21
|
+
// Database: Supabase (pwujhhmlrtxjmjzyttwn)
|
|
22
|
+
// ============================================================
|
|
23
|
+
|
|
24
|
+
import { readFileSync, writeFileSync, existsSync, mkdirSync } from "fs";
|
|
25
|
+
import { join } from "path";
|
|
26
|
+
import { homedir } from "os";
|
|
27
|
+
import { createHash } from "crypto";
|
|
28
|
+
|
|
29
|
+
const BRAINS_DIR = join(homedir(), ".0n", "brains");
|
|
30
|
+
|
|
31
|
+
// ── .brain file format version ────────────────────────────
|
|
32
|
+
const BRAIN_VERSION = "1.0.0";
|
|
33
|
+
const BRAIN_FORMAT = "0nai-brain";
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Register brain engine tools on an MCP server instance.
|
|
37
|
+
*
|
|
38
|
+
* @param {import("@modelcontextprotocol/sdk/server/mcp.js").McpServer} server
|
|
39
|
+
* @param {import("zod").ZodType} z
|
|
40
|
+
* @param {object} [supabase]
|
|
41
|
+
*/
|
|
42
|
+
export function registerBrainTools(server, z, supabase) {
|
|
43
|
+
|
|
44
|
+
async function getSupabase() {
|
|
45
|
+
if (supabase) return supabase;
|
|
46
|
+
try {
|
|
47
|
+
const { createClient } = await import("@supabase/supabase-js");
|
|
48
|
+
const url = process.env.SUPABASE_URL || "https://pwujhhmlrtxjmjzyttwn.supabase.co";
|
|
49
|
+
const key = process.env.SUPABASE_SERVICE_KEY || process.env.SUPABASE_SERVICE_ROLE_KEY;
|
|
50
|
+
if (!key) throw new Error("No Supabase service key");
|
|
51
|
+
supabase = createClient(url, key);
|
|
52
|
+
return supabase;
|
|
53
|
+
} catch (err) {
|
|
54
|
+
throw new Error(`Supabase not available: ${err.message}`);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// ─── brain_create ─────────────────────────────────────────
|
|
59
|
+
server.tool(
|
|
60
|
+
"brain_create",
|
|
61
|
+
`Create a new brain project. A brain is a portable AI behavior package
|
|
62
|
+
with knowledge, reasoning patterns, behavior rules, and skills.
|
|
63
|
+
|
|
64
|
+
Example: brain_create({ name: "CRM Expert", domain: "crm", description: "Knows all 245 CRM tools" })
|
|
65
|
+
Example: brain_create({ name: "SXO Writer", domain: "content", traits: ["professional", "data-driven"] })`,
|
|
66
|
+
{
|
|
67
|
+
name: z.string().describe("Brain name"),
|
|
68
|
+
domain: z.string().describe("Domain: crm, content, sales, support, development, automation, general"),
|
|
69
|
+
description: z.string().optional().describe("What this brain does"),
|
|
70
|
+
traits: z.array(z.string()).optional().describe("Personality traits: professional, thorough, concise, etc."),
|
|
71
|
+
base_brain: z.string().optional().describe("ID of brain to fork from"),
|
|
72
|
+
},
|
|
73
|
+
async ({ name, domain, description, traits, base_brain }) => {
|
|
74
|
+
try {
|
|
75
|
+
const sb = await getSupabase();
|
|
76
|
+
|
|
77
|
+
let brain_data = {
|
|
78
|
+
format: BRAIN_FORMAT,
|
|
79
|
+
version: BRAIN_VERSION,
|
|
80
|
+
identity: { name, domain, description: description || "", author: "RocketOpp", trained_by: "0nAI" },
|
|
81
|
+
knowledge: { concepts: [], facts: [], rules: [], glossary: [] },
|
|
82
|
+
reasoning: {
|
|
83
|
+
decomposition: [],
|
|
84
|
+
decision_making: [],
|
|
85
|
+
question_asking: [],
|
|
86
|
+
},
|
|
87
|
+
behavior: {
|
|
88
|
+
personality: { traits: traits || ["professional", "helpful"], boundaries: [] },
|
|
89
|
+
communication: { style: "adaptive", verbosity: "balanced" },
|
|
90
|
+
interaction: { greeting_patterns: [], error_handling: [] },
|
|
91
|
+
},
|
|
92
|
+
skills: { modules: [], tool_mapping: [] },
|
|
93
|
+
evaluation: { scenarios: [], rubric: null, benchmarks: [] },
|
|
94
|
+
training: { iterations: [], final_score: 0, method: "simulated" },
|
|
95
|
+
};
|
|
96
|
+
|
|
97
|
+
// Fork from existing brain
|
|
98
|
+
if (base_brain) {
|
|
99
|
+
const { data: parent } = await sb.from("bc_brains").select("brain_data").eq("id", base_brain).single();
|
|
100
|
+
if (parent?.brain_data) {
|
|
101
|
+
brain_data = { ...parent.brain_data, identity: { ...parent.brain_data.identity, name, domain, description: description || "" } };
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
const { data, error } = await sb.from("bc_brains").insert({
|
|
106
|
+
name,
|
|
107
|
+
domain,
|
|
108
|
+
description: description || "",
|
|
109
|
+
brain_data,
|
|
110
|
+
status: "draft",
|
|
111
|
+
version: "0.1.0",
|
|
112
|
+
training_score: 0,
|
|
113
|
+
}).select("id, name, domain, status, version").single();
|
|
114
|
+
|
|
115
|
+
if (error) throw error;
|
|
116
|
+
|
|
117
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
118
|
+
status: "created", brain: data,
|
|
119
|
+
message: `Brain "${name}" created. Use brain_build to add knowledge, reasoning, behavior, and skills.`,
|
|
120
|
+
next_steps: [
|
|
121
|
+
`brain_build({ brain_id: "${data.id}", layer: "knowledge", action: "add", items: [...] })`,
|
|
122
|
+
`brain_build({ brain_id: "${data.id}", layer: "reasoning", action: "add", items: [...] })`,
|
|
123
|
+
`brain_build({ brain_id: "${data.id}", layer: "behavior", action: "set", config: {...} })`,
|
|
124
|
+
],
|
|
125
|
+
}, null, 2) }] };
|
|
126
|
+
} catch (err) {
|
|
127
|
+
return { content: [{ type: "text", text: JSON.stringify({ status: "failed", error: err.message }) }] };
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
);
|
|
131
|
+
|
|
132
|
+
// ─── brain_build ──────────────────────────────────────────
|
|
133
|
+
server.tool(
|
|
134
|
+
"brain_build",
|
|
135
|
+
`Add knowledge, reasoning patterns, behavior rules, or skills to a brain.
|
|
136
|
+
|
|
137
|
+
Layers:
|
|
138
|
+
knowledge — concepts, facts, rules, glossary terms
|
|
139
|
+
reasoning — decomposition patterns, decision frameworks, question triggers
|
|
140
|
+
behavior — personality traits, communication style, boundaries
|
|
141
|
+
skills — capability modules, tool mappings
|
|
142
|
+
scenarios — test scenarios with expected behavior
|
|
143
|
+
|
|
144
|
+
Example: brain_build({ brain_id: "uuid", layer: "knowledge", action: "add", items: [
|
|
145
|
+
{ type: "fact", content: "0nMCP has 819 tools across 54 services" },
|
|
146
|
+
{ type: "rule", content: "Never say GHL — always say CRM" },
|
|
147
|
+
{ type: "concept", name: "Radial Burst", definition: "Fan-out content to all channels simultaneously" }
|
|
148
|
+
]})
|
|
149
|
+
|
|
150
|
+
Example: brain_build({ brain_id: "uuid", layer: "scenarios", action: "add", items: [
|
|
151
|
+
{ input: "How do I create a contact?", expected: "Should reference CRM contacts API, never say GHL", difficulty: "easy" }
|
|
152
|
+
]})`,
|
|
153
|
+
{
|
|
154
|
+
brain_id: z.string().describe("Brain ID"),
|
|
155
|
+
layer: z.enum(["knowledge", "reasoning", "behavior", "skills", "scenarios"]).describe("Which layer to build"),
|
|
156
|
+
action: z.enum(["add", "set", "remove", "clear"]).describe("add items, set config, remove by index, clear layer"),
|
|
157
|
+
items: z.array(z.record(z.any())).optional().describe("Items to add (for knowledge, reasoning, skills, scenarios)"),
|
|
158
|
+
config: z.record(z.any()).optional().describe("Config to set (for behavior)"),
|
|
159
|
+
indices: z.array(z.number()).optional().describe("Indices to remove"),
|
|
160
|
+
},
|
|
161
|
+
async ({ brain_id, layer, action, items, config, indices }) => {
|
|
162
|
+
try {
|
|
163
|
+
const sb = await getSupabase();
|
|
164
|
+
const { data: brain, error: fetchErr } = await sb.from("bc_brains").select("*").eq("id", brain_id).single();
|
|
165
|
+
if (fetchErr || !brain) throw new Error("Brain not found");
|
|
166
|
+
|
|
167
|
+
const bd = brain.brain_data;
|
|
168
|
+
|
|
169
|
+
if (layer === "knowledge") {
|
|
170
|
+
if (action === "add" && items) {
|
|
171
|
+
bd.knowledge.facts = bd.knowledge.facts || [];
|
|
172
|
+
bd.knowledge.concepts = bd.knowledge.concepts || [];
|
|
173
|
+
bd.knowledge.rules = bd.knowledge.rules || [];
|
|
174
|
+
bd.knowledge.glossary = bd.knowledge.glossary || [];
|
|
175
|
+
for (const item of items) {
|
|
176
|
+
const type = item.type || "fact";
|
|
177
|
+
if (type === "fact") bd.knowledge.facts.push({ content: item.content, source: item.source || "manual" });
|
|
178
|
+
else if (type === "concept") bd.knowledge.concepts.push({ name: item.name, definition: item.definition || item.content });
|
|
179
|
+
else if (type === "rule") bd.knowledge.rules.push({ rule: item.content || item.rule, priority: item.priority || "normal" });
|
|
180
|
+
else if (type === "term") bd.knowledge.glossary.push({ term: item.term || item.name, definition: item.definition || item.content });
|
|
181
|
+
}
|
|
182
|
+
} else if (action === "clear") {
|
|
183
|
+
bd.knowledge = { concepts: [], facts: [], rules: [], glossary: [] };
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
else if (layer === "reasoning") {
|
|
188
|
+
if (action === "add" && items) {
|
|
189
|
+
for (const item of items) {
|
|
190
|
+
const cat = item.category || "decomposition";
|
|
191
|
+
bd.reasoning[cat] = bd.reasoning[cat] || [];
|
|
192
|
+
bd.reasoning[cat].push({ pattern: item.pattern || item.content, when: item.when, example: item.example });
|
|
193
|
+
}
|
|
194
|
+
} else if (action === "clear") {
|
|
195
|
+
bd.reasoning = { decomposition: [], decision_making: [], question_asking: [] };
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
else if (layer === "behavior") {
|
|
200
|
+
if (action === "set" && config) {
|
|
201
|
+
if (config.traits) bd.behavior.personality.traits = config.traits;
|
|
202
|
+
if (config.boundaries) bd.behavior.personality.boundaries = config.boundaries;
|
|
203
|
+
if (config.style) bd.behavior.communication.style = config.style;
|
|
204
|
+
if (config.verbosity) bd.behavior.communication.verbosity = config.verbosity;
|
|
205
|
+
if (config.greeting_patterns) bd.behavior.interaction.greeting_patterns = config.greeting_patterns;
|
|
206
|
+
if (config.error_handling) bd.behavior.interaction.error_handling = config.error_handling;
|
|
207
|
+
} else if (action === "clear") {
|
|
208
|
+
bd.behavior = { personality: { traits: [], boundaries: [] }, communication: { style: "adaptive", verbosity: "balanced" }, interaction: { greeting_patterns: [], error_handling: [] } };
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
else if (layer === "skills") {
|
|
213
|
+
if (action === "add" && items) {
|
|
214
|
+
bd.skills.modules = bd.skills.modules || [];
|
|
215
|
+
for (const item of items) {
|
|
216
|
+
bd.skills.modules.push({ name: item.name, description: item.description || "", capabilities: item.capabilities || [] });
|
|
217
|
+
}
|
|
218
|
+
} else if (action === "clear") {
|
|
219
|
+
bd.skills = { modules: [], tool_mapping: [] };
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
else if (layer === "scenarios") {
|
|
224
|
+
if (action === "add" && items) {
|
|
225
|
+
bd.evaluation.scenarios = bd.evaluation.scenarios || [];
|
|
226
|
+
for (const item of items) {
|
|
227
|
+
bd.evaluation.scenarios.push({
|
|
228
|
+
input: item.input,
|
|
229
|
+
context: item.context || null,
|
|
230
|
+
expected: item.expected || null,
|
|
231
|
+
difficulty: item.difficulty || "medium",
|
|
232
|
+
category: item.category || "general",
|
|
233
|
+
});
|
|
234
|
+
}
|
|
235
|
+
} else if (action === "remove" && indices) {
|
|
236
|
+
bd.evaluation.scenarios = bd.evaluation.scenarios.filter((_, i) => !indices.includes(i));
|
|
237
|
+
} else if (action === "clear") {
|
|
238
|
+
bd.evaluation.scenarios = [];
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
// Save
|
|
243
|
+
const { error: updateErr } = await sb.from("bc_brains").update({
|
|
244
|
+
brain_data: bd,
|
|
245
|
+
updated_at: new Date().toISOString(),
|
|
246
|
+
}).eq("id", brain_id);
|
|
247
|
+
if (updateErr) throw updateErr;
|
|
248
|
+
|
|
249
|
+
// Count items per layer
|
|
250
|
+
const counts = {
|
|
251
|
+
knowledge: (bd.knowledge.concepts?.length || 0) + (bd.knowledge.facts?.length || 0) + (bd.knowledge.rules?.length || 0) + (bd.knowledge.glossary?.length || 0),
|
|
252
|
+
reasoning: Object.values(bd.reasoning).reduce((s, arr) => s + (arr?.length || 0), 0),
|
|
253
|
+
behavior: bd.behavior.personality.traits?.length || 0,
|
|
254
|
+
skills: bd.skills.modules?.length || 0,
|
|
255
|
+
scenarios: bd.evaluation.scenarios?.length || 0,
|
|
256
|
+
};
|
|
257
|
+
|
|
258
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
259
|
+
status: "updated", brain_id, layer, action, counts,
|
|
260
|
+
message: `Brain "${brain.name}" updated. ${layer} now has ${counts[layer]} items.`,
|
|
261
|
+
}, null, 2) }] };
|
|
262
|
+
} catch (err) {
|
|
263
|
+
return { content: [{ type: "text", text: JSON.stringify({ status: "failed", error: err.message }) }] };
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
);
|
|
267
|
+
|
|
268
|
+
// ─── brain_train ──────────────────────────────────────────
|
|
269
|
+
server.tool(
|
|
270
|
+
"brain_train",
|
|
271
|
+
`Run scenario-based training on a brain. Executes each scenario against
|
|
272
|
+
the brain's compiled prompt, evaluates responses against expected behavior,
|
|
273
|
+
and scores across multiple dimensions.
|
|
274
|
+
|
|
275
|
+
This generates training pairs that feed back into the 0nAI training pipeline.
|
|
276
|
+
|
|
277
|
+
Example: brain_train({ brain_id: "uuid" })
|
|
278
|
+
Example: brain_train({ brain_id: "uuid", scenarios: [0,1,2], provider: "anthropic" })`,
|
|
279
|
+
{
|
|
280
|
+
brain_id: z.string().describe("Brain to train"),
|
|
281
|
+
scenarios: z.array(z.number()).optional().describe("Specific scenario indices to run (default: all)"),
|
|
282
|
+
provider: z.enum(["local", "anthropic"]).optional().describe("LLM provider (default: local — no API cost)"),
|
|
283
|
+
},
|
|
284
|
+
async ({ brain_id, scenarios: scenarioIndices, provider }) => {
|
|
285
|
+
try {
|
|
286
|
+
const sb = await getSupabase();
|
|
287
|
+
const { data: brain } = await sb.from("bc_brains").select("*").eq("id", brain_id).single();
|
|
288
|
+
if (!brain) throw new Error("Brain not found");
|
|
289
|
+
|
|
290
|
+
const bd = brain.brain_data;
|
|
291
|
+
const allScenarios = bd.evaluation?.scenarios || [];
|
|
292
|
+
|
|
293
|
+
if (allScenarios.length === 0) {
|
|
294
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
295
|
+
status: "no_scenarios",
|
|
296
|
+
message: "No scenarios defined. Use brain_build to add scenarios first.",
|
|
297
|
+
}) }] };
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
const toRun = scenarioIndices
|
|
301
|
+
? allScenarios.filter((_, i) => scenarioIndices.includes(i))
|
|
302
|
+
: allScenarios;
|
|
303
|
+
|
|
304
|
+
// Compile the brain to a system prompt
|
|
305
|
+
const systemPrompt = compileBrainToPrompt(bd);
|
|
306
|
+
|
|
307
|
+
const results = [];
|
|
308
|
+
for (const scenario of toRun) {
|
|
309
|
+
let response;
|
|
310
|
+
let score;
|
|
311
|
+
|
|
312
|
+
if (provider === "anthropic" && process.env.ANTHROPIC_API_KEY) {
|
|
313
|
+
// Live API call — COSTS MONEY
|
|
314
|
+
try {
|
|
315
|
+
const { default: Anthropic } = await import("@anthropic-ai/sdk");
|
|
316
|
+
const client = new Anthropic();
|
|
317
|
+
const msg = await client.messages.create({
|
|
318
|
+
model: "claude-sonnet-4-20250514",
|
|
319
|
+
max_tokens: 1024,
|
|
320
|
+
system: systemPrompt,
|
|
321
|
+
messages: [{ role: "user", content: scenario.input }],
|
|
322
|
+
});
|
|
323
|
+
response = msg.content[0]?.text || "";
|
|
324
|
+
} catch (apiErr) {
|
|
325
|
+
response = `[API Error: ${apiErr.message}]`;
|
|
326
|
+
}
|
|
327
|
+
} else {
|
|
328
|
+
// Local evaluation — no API cost
|
|
329
|
+
// Score based on whether the prompt + scenario alignment makes sense
|
|
330
|
+
response = "[Local mode — no API call. Score based on prompt structure analysis.]";
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
// Score the response
|
|
334
|
+
score = scoreResponse(response, scenario, bd);
|
|
335
|
+
|
|
336
|
+
results.push({
|
|
337
|
+
input: scenario.input.substring(0, 100),
|
|
338
|
+
difficulty: scenario.difficulty,
|
|
339
|
+
score: score.overall,
|
|
340
|
+
dimensions: score.dimensions,
|
|
341
|
+
passed: score.overall >= 0.7,
|
|
342
|
+
});
|
|
343
|
+
|
|
344
|
+
// Feed back into training pipeline as a pair
|
|
345
|
+
if (response && response.length > 20 && !response.startsWith("[Local")) {
|
|
346
|
+
await sb.from("training_pairs").insert({
|
|
347
|
+
system_prompt: systemPrompt,
|
|
348
|
+
user_input: scenario.input,
|
|
349
|
+
assistant_output: response,
|
|
350
|
+
domain: brain.domain || "general",
|
|
351
|
+
difficulty: scenario.difficulty || "medium",
|
|
352
|
+
quality_score: score.overall,
|
|
353
|
+
tags: ["brain-training", brain.name],
|
|
354
|
+
metadata: { brain_id, brain_name: brain.name },
|
|
355
|
+
}).catch(() => {}); // Don't fail if training tables don't exist
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
const avgScore = results.length > 0
|
|
360
|
+
? Math.round(results.reduce((s, r) => s + r.score, 0) / results.length * 100) / 100
|
|
361
|
+
: 0;
|
|
362
|
+
|
|
363
|
+
const passed = results.filter(r => r.passed).length;
|
|
364
|
+
|
|
365
|
+
// Update brain score
|
|
366
|
+
bd.training.iterations = bd.training.iterations || [];
|
|
367
|
+
bd.training.iterations.push({
|
|
368
|
+
date: new Date().toISOString(),
|
|
369
|
+
scenarios_run: results.length,
|
|
370
|
+
avg_score: avgScore,
|
|
371
|
+
passed,
|
|
372
|
+
failed: results.length - passed,
|
|
373
|
+
});
|
|
374
|
+
bd.training.final_score = avgScore;
|
|
375
|
+
|
|
376
|
+
await sb.from("bc_brains").update({
|
|
377
|
+
brain_data: bd,
|
|
378
|
+
training_score: avgScore,
|
|
379
|
+
status: avgScore >= 0.85 ? "trained" : "training",
|
|
380
|
+
updated_at: new Date().toISOString(),
|
|
381
|
+
}).eq("id", brain_id);
|
|
382
|
+
|
|
383
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
384
|
+
status: "trained",
|
|
385
|
+
brain: brain.name,
|
|
386
|
+
scenarios_run: results.length,
|
|
387
|
+
passed,
|
|
388
|
+
failed: results.length - passed,
|
|
389
|
+
avg_score: avgScore,
|
|
390
|
+
threshold: 0.85,
|
|
391
|
+
verdict: avgScore >= 0.85 ? "READY TO COMPILE" : "NEEDS MORE TRAINING",
|
|
392
|
+
results,
|
|
393
|
+
message: avgScore >= 0.85
|
|
394
|
+
? `Brain "${brain.name}" passed! Use brain_compile to package it.`
|
|
395
|
+
: `Brain "${brain.name}" scored ${avgScore}. Add more knowledge/reasoning and retrain.`,
|
|
396
|
+
}, null, 2) }] };
|
|
397
|
+
} catch (err) {
|
|
398
|
+
return { content: [{ type: "text", text: JSON.stringify({ status: "failed", error: err.message }) }] };
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
);
|
|
402
|
+
|
|
403
|
+
// ─── brain_compile ────────────────────────────────────────
|
|
404
|
+
server.tool(
|
|
405
|
+
"brain_compile",
|
|
406
|
+
`Compile a trained brain into a portable .brain file.
|
|
407
|
+
The .brain file can be imported into any app on any LLM.
|
|
408
|
+
|
|
409
|
+
Also generates the system prompt version for direct use.
|
|
410
|
+
|
|
411
|
+
Example: brain_compile({ brain_id: "uuid" })
|
|
412
|
+
Example: brain_compile({ brain_id: "uuid", output: "prompt" }) — just get the prompt`,
|
|
413
|
+
{
|
|
414
|
+
brain_id: z.string().describe("Brain to compile"),
|
|
415
|
+
version: z.string().optional().describe("Version tag (default: auto-increment)"),
|
|
416
|
+
output: z.enum(["file", "prompt", "both"]).optional().describe("Output format (default: both)"),
|
|
417
|
+
},
|
|
418
|
+
async ({ brain_id, version, output }) => {
|
|
419
|
+
try {
|
|
420
|
+
const sb = await getSupabase();
|
|
421
|
+
const { data: brain } = await sb.from("bc_brains").select("*").eq("id", brain_id).single();
|
|
422
|
+
if (!brain) throw new Error("Brain not found");
|
|
423
|
+
|
|
424
|
+
const bd = brain.brain_data;
|
|
425
|
+
const outputMode = output || "both";
|
|
426
|
+
|
|
427
|
+
// Generate system prompt
|
|
428
|
+
const systemPrompt = compileBrainToPrompt(bd);
|
|
429
|
+
|
|
430
|
+
// Build .brain file
|
|
431
|
+
const brainFile = {
|
|
432
|
+
$brain: {
|
|
433
|
+
format: BRAIN_FORMAT,
|
|
434
|
+
version: BRAIN_VERSION,
|
|
435
|
+
compiled: new Date().toISOString(),
|
|
436
|
+
compiler: "0nAI Brain Engine v1.0.0",
|
|
437
|
+
},
|
|
438
|
+
identity: bd.identity,
|
|
439
|
+
knowledge: bd.knowledge,
|
|
440
|
+
reasoning: bd.reasoning,
|
|
441
|
+
behavior: bd.behavior,
|
|
442
|
+
skills: bd.skills,
|
|
443
|
+
evaluation: {
|
|
444
|
+
scenarios_count: bd.evaluation?.scenarios?.length || 0,
|
|
445
|
+
final_score: bd.training?.final_score || 0,
|
|
446
|
+
training_iterations: bd.training?.iterations?.length || 0,
|
|
447
|
+
},
|
|
448
|
+
prompt: systemPrompt,
|
|
449
|
+
checksum: null,
|
|
450
|
+
};
|
|
451
|
+
|
|
452
|
+
// Compute checksum
|
|
453
|
+
const content = JSON.stringify(brainFile);
|
|
454
|
+
brainFile.checksum = createHash("sha256").update(content).digest("hex");
|
|
455
|
+
|
|
456
|
+
const result = { status: "compiled", brain: brain.name, version: version || brain.version };
|
|
457
|
+
|
|
458
|
+
// Save .brain file
|
|
459
|
+
if (outputMode === "file" || outputMode === "both") {
|
|
460
|
+
if (!existsSync(BRAINS_DIR)) mkdirSync(BRAINS_DIR, { recursive: true });
|
|
461
|
+
const filename = `${brain.name.toLowerCase().replace(/\s+/g, "-")}-v${version || brain.version}.brain`;
|
|
462
|
+
const filePath = join(BRAINS_DIR, filename);
|
|
463
|
+
writeFileSync(filePath, JSON.stringify(brainFile, null, 2));
|
|
464
|
+
result.file = filePath;
|
|
465
|
+
result.size_bytes = Buffer.byteLength(JSON.stringify(brainFile));
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
// Return prompt
|
|
469
|
+
if (outputMode === "prompt" || outputMode === "both") {
|
|
470
|
+
result.prompt = systemPrompt;
|
|
471
|
+
result.prompt_tokens = Math.ceil(systemPrompt.length / 4);
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
result.checksum = brainFile.checksum;
|
|
475
|
+
result.message = `Brain "${brain.name}" compiled. ${outputMode === "prompt" ? "Prompt ready." : `Saved to ${result.file}`}`;
|
|
476
|
+
|
|
477
|
+
// Update DB
|
|
478
|
+
const newVer = version || incrementVersion(brain.version);
|
|
479
|
+
await sb.from("bc_brains").update({
|
|
480
|
+
version: newVer,
|
|
481
|
+
status: "compiled",
|
|
482
|
+
updated_at: new Date().toISOString(),
|
|
483
|
+
}).eq("id", brain_id);
|
|
484
|
+
|
|
485
|
+
return { content: [{ type: "text", text: JSON.stringify(result, null, 2) }] };
|
|
486
|
+
} catch (err) {
|
|
487
|
+
return { content: [{ type: "text", text: JSON.stringify({ status: "failed", error: err.message }) }] };
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
);
|
|
491
|
+
|
|
492
|
+
// ─── brain_import ─────────────────────────────────────────
|
|
493
|
+
server.tool(
|
|
494
|
+
"brain_import",
|
|
495
|
+
`Import a .brain file and extract its system prompt for use in any LLM.
|
|
496
|
+
Can import from file path or raw JSON.
|
|
497
|
+
|
|
498
|
+
Example: brain_import({ path: "~/.0n/brains/crm-expert-v1.0.0.brain" })
|
|
499
|
+
Example: brain_import({ brain_data: {...} })`,
|
|
500
|
+
{
|
|
501
|
+
path: z.string().optional().describe("Path to .brain file"),
|
|
502
|
+
brain_data: z.record(z.any()).optional().describe("Raw brain data JSON"),
|
|
503
|
+
target_llm: z.enum(["claude", "openai", "gemini", "grok", "generic"]).optional().describe("Target LLM format (default: claude)"),
|
|
504
|
+
},
|
|
505
|
+
async ({ path, brain_data, target_llm }) => {
|
|
506
|
+
try {
|
|
507
|
+
let brain;
|
|
508
|
+
if (path) {
|
|
509
|
+
const resolvedPath = path.replace("~", homedir());
|
|
510
|
+
if (!existsSync(resolvedPath)) throw new Error(`File not found: ${resolvedPath}`);
|
|
511
|
+
brain = JSON.parse(readFileSync(resolvedPath, "utf-8"));
|
|
512
|
+
} else if (brain_data) {
|
|
513
|
+
brain = brain_data;
|
|
514
|
+
} else {
|
|
515
|
+
throw new Error("Provide path or brain_data");
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
// Validate format
|
|
519
|
+
if (!brain.$brain && !brain.identity) throw new Error("Invalid .brain file format");
|
|
520
|
+
|
|
521
|
+
// Extract or regenerate prompt
|
|
522
|
+
let prompt = brain.prompt;
|
|
523
|
+
if (!prompt) {
|
|
524
|
+
prompt = compileBrainToPrompt(brain);
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
// Format for target LLM
|
|
528
|
+
const llm = target_llm || "claude";
|
|
529
|
+
let formatted;
|
|
530
|
+
if (llm === "openai") {
|
|
531
|
+
formatted = { role: "system", content: prompt };
|
|
532
|
+
} else if (llm === "gemini") {
|
|
533
|
+
formatted = { system_instruction: { parts: [{ text: prompt }] } };
|
|
534
|
+
} else {
|
|
535
|
+
formatted = { system: prompt };
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
539
|
+
status: "imported",
|
|
540
|
+
name: brain.identity?.name || "Unknown",
|
|
541
|
+
domain: brain.identity?.domain || "general",
|
|
542
|
+
score: brain.evaluation?.final_score || brain.training?.final_score || 0,
|
|
543
|
+
prompt_length: prompt.length,
|
|
544
|
+
prompt_tokens: Math.ceil(prompt.length / 4),
|
|
545
|
+
target_llm: llm,
|
|
546
|
+
formatted,
|
|
547
|
+
prompt,
|
|
548
|
+
message: `Brain "${brain.identity?.name}" imported. Ready to use with ${llm}.`,
|
|
549
|
+
}, null, 2) }] };
|
|
550
|
+
} catch (err) {
|
|
551
|
+
return { content: [{ type: "text", text: JSON.stringify({ status: "failed", error: err.message }) }] };
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
);
|
|
555
|
+
|
|
556
|
+
// ─── brain_list ───────────────────────────────────────────
|
|
557
|
+
server.tool(
|
|
558
|
+
"brain_list",
|
|
559
|
+
`List all brain projects with their status and training scores.
|
|
560
|
+
|
|
561
|
+
Example: brain_list({})
|
|
562
|
+
Example: brain_list({ domain: "crm" })
|
|
563
|
+
Example: brain_list({ status: "trained" })`,
|
|
564
|
+
{
|
|
565
|
+
domain: z.string().optional().describe("Filter by domain"),
|
|
566
|
+
status: z.enum(["draft", "training", "trained", "compiled", "published"]).optional().describe("Filter by status"),
|
|
567
|
+
},
|
|
568
|
+
async ({ domain, status }) => {
|
|
569
|
+
try {
|
|
570
|
+
const sb = await getSupabase();
|
|
571
|
+
let query = sb.from("bc_brains").select("id, name, domain, description, status, version, training_score, created_at, updated_at")
|
|
572
|
+
.order("updated_at", { ascending: false });
|
|
573
|
+
if (domain) query = query.eq("domain", domain);
|
|
574
|
+
if (status) query = query.eq("status", status);
|
|
575
|
+
|
|
576
|
+
const { data, error } = await query;
|
|
577
|
+
if (error) throw error;
|
|
578
|
+
|
|
579
|
+
// Also check local .brain files
|
|
580
|
+
let localFiles = [];
|
|
581
|
+
if (existsSync(BRAINS_DIR)) {
|
|
582
|
+
const { readdirSync } = await import("fs");
|
|
583
|
+
localFiles = readdirSync(BRAINS_DIR).filter(f => f.endsWith(".brain")).map(f => ({
|
|
584
|
+
file: f,
|
|
585
|
+
path: join(BRAINS_DIR, f),
|
|
586
|
+
}));
|
|
587
|
+
}
|
|
588
|
+
|
|
589
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
590
|
+
status: "ok",
|
|
591
|
+
count: (data || []).length,
|
|
592
|
+
brains: data || [],
|
|
593
|
+
local_files: localFiles,
|
|
594
|
+
message: `${(data || []).length} brain(s) in database, ${localFiles.length} compiled .brain file(s) in ~/.0n/brains/`,
|
|
595
|
+
}, null, 2) }] };
|
|
596
|
+
} catch (err) {
|
|
597
|
+
return { content: [{ type: "text", text: JSON.stringify({ status: "failed", error: err.message }) }] };
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
);
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
// ── Brain-to-Prompt Compiler ──────────────────────────────
|
|
604
|
+
|
|
605
|
+
/**
|
|
606
|
+
* Compile brain data into a system prompt for any LLM.
|
|
607
|
+
*/
|
|
608
|
+
function compileBrainToPrompt(bd) {
|
|
609
|
+
const sections = [];
|
|
610
|
+
|
|
611
|
+
// Identity
|
|
612
|
+
if (bd.identity) {
|
|
613
|
+
sections.push(`# ${bd.identity.name}\n\nYou are ${bd.identity.name}. ${bd.identity.description || ""}\nDomain: ${bd.identity.domain || "general"}`);
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
// Knowledge — Rules (highest priority)
|
|
617
|
+
if (bd.knowledge?.rules?.length) {
|
|
618
|
+
sections.push("## Rules\n\n" + bd.knowledge.rules.map((r, i) => `${i + 1}. ${r.rule || r.content}`).join("\n"));
|
|
619
|
+
}
|
|
620
|
+
|
|
621
|
+
// Knowledge — Core concepts
|
|
622
|
+
if (bd.knowledge?.concepts?.length) {
|
|
623
|
+
sections.push("## Core Concepts\n\n" + bd.knowledge.concepts.map(c =>
|
|
624
|
+
`**${c.name}**: ${c.definition}`
|
|
625
|
+
).join("\n\n"));
|
|
626
|
+
}
|
|
627
|
+
|
|
628
|
+
// Knowledge — Facts
|
|
629
|
+
if (bd.knowledge?.facts?.length) {
|
|
630
|
+
sections.push("## Key Facts\n\n" + bd.knowledge.facts.map(f => `- ${f.content}`).join("\n"));
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
// Knowledge — Glossary
|
|
634
|
+
if (bd.knowledge?.glossary?.length) {
|
|
635
|
+
sections.push("## Glossary\n\n" + bd.knowledge.glossary.map(t => `- **${t.term}**: ${t.definition}`).join("\n"));
|
|
636
|
+
}
|
|
637
|
+
|
|
638
|
+
// Reasoning patterns
|
|
639
|
+
const allPatterns = [
|
|
640
|
+
...(bd.reasoning?.decomposition || []).map(p => ({ ...p, cat: "Decomposition" })),
|
|
641
|
+
...(bd.reasoning?.decision_making || []).map(p => ({ ...p, cat: "Decision Making" })),
|
|
642
|
+
...(bd.reasoning?.question_asking || []).map(p => ({ ...p, cat: "Question Asking" })),
|
|
643
|
+
];
|
|
644
|
+
if (allPatterns.length) {
|
|
645
|
+
sections.push("## Reasoning Patterns\n\n" + allPatterns.map(p =>
|
|
646
|
+
`### ${p.cat}\n${p.pattern || p.content}${p.when ? `\n**When:** ${p.when}` : ""}${p.example ? `\n**Example:** ${p.example}` : ""}`
|
|
647
|
+
).join("\n\n"));
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
// Behavior
|
|
651
|
+
if (bd.behavior?.personality?.traits?.length) {
|
|
652
|
+
sections.push("## Behavior\n\n" +
|
|
653
|
+
`**Traits:** ${bd.behavior.personality.traits.join(", ")}\n` +
|
|
654
|
+
`**Style:** ${bd.behavior.communication?.style || "adaptive"}\n` +
|
|
655
|
+
`**Verbosity:** ${bd.behavior.communication?.verbosity || "balanced"}`
|
|
656
|
+
);
|
|
657
|
+
if (bd.behavior.personality.boundaries?.length) {
|
|
658
|
+
sections.push("**Boundaries:**\n" + bd.behavior.personality.boundaries.map(b => `- ${b}`).join("\n"));
|
|
659
|
+
}
|
|
660
|
+
}
|
|
661
|
+
|
|
662
|
+
// Skills
|
|
663
|
+
if (bd.skills?.modules?.length) {
|
|
664
|
+
sections.push("## Skills\n\n" + bd.skills.modules.map(s =>
|
|
665
|
+
`### ${s.name}\n${s.description}${s.capabilities?.length ? "\n- " + s.capabilities.join("\n- ") : ""}`
|
|
666
|
+
).join("\n\n"));
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
return sections.join("\n\n---\n\n");
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
// ── Response Scoring ──────────────────────────────────────
|
|
673
|
+
|
|
674
|
+
/**
|
|
675
|
+
* Score a response against a scenario and brain config.
|
|
676
|
+
*/
|
|
677
|
+
function scoreResponse(response, scenario, bd) {
|
|
678
|
+
const dimensions = {};
|
|
679
|
+
let total = 0;
|
|
680
|
+
let count = 0;
|
|
681
|
+
|
|
682
|
+
// Relevance — does the response address the input?
|
|
683
|
+
const inputWords = new Set(scenario.input.toLowerCase().split(/\s+/).filter(w => w.length > 3));
|
|
684
|
+
const responseWords = new Set(response.toLowerCase().split(/\s+/).filter(w => w.length > 3));
|
|
685
|
+
let overlap = 0;
|
|
686
|
+
for (const w of inputWords) if (responseWords.has(w)) overlap++;
|
|
687
|
+
dimensions.relevance = inputWords.size > 0 ? Math.min(1, overlap / inputWords.size + 0.3) : 0.5;
|
|
688
|
+
total += dimensions.relevance; count++;
|
|
689
|
+
|
|
690
|
+
// Rule compliance — check against brain rules
|
|
691
|
+
const rules = bd.knowledge?.rules || [];
|
|
692
|
+
let ruleScore = 1.0;
|
|
693
|
+
for (const rule of rules) {
|
|
694
|
+
const ruleText = (rule.rule || rule.content || "").toLowerCase();
|
|
695
|
+
// Check for "never" rules
|
|
696
|
+
if (ruleText.includes("never")) {
|
|
697
|
+
const forbidden = ruleText.replace(/never\s+(say|use|mention)\s+/i, "").replace(/['"]/g, "").trim().split(/\s*[,—-]\s*/);
|
|
698
|
+
for (const word of forbidden) {
|
|
699
|
+
if (word.length > 2 && response.toLowerCase().includes(word.toLowerCase())) {
|
|
700
|
+
ruleScore -= 0.3;
|
|
701
|
+
}
|
|
702
|
+
}
|
|
703
|
+
}
|
|
704
|
+
}
|
|
705
|
+
dimensions.rule_compliance = Math.max(0, ruleScore);
|
|
706
|
+
total += dimensions.rule_compliance; count++;
|
|
707
|
+
|
|
708
|
+
// Completeness — response length relative to expected
|
|
709
|
+
if (response.length > 50) dimensions.completeness = Math.min(1, response.length / 200);
|
|
710
|
+
else dimensions.completeness = 0.3;
|
|
711
|
+
total += dimensions.completeness; count++;
|
|
712
|
+
|
|
713
|
+
// Structure — has formatting (headers, lists, code blocks)
|
|
714
|
+
let structureScore = 0.4;
|
|
715
|
+
if (response.includes("- ") || response.includes("* ")) structureScore += 0.2;
|
|
716
|
+
if (response.includes("**") || response.includes("##")) structureScore += 0.2;
|
|
717
|
+
if (response.includes("```")) structureScore += 0.2;
|
|
718
|
+
dimensions.structure = Math.min(1, structureScore);
|
|
719
|
+
total += dimensions.structure; count++;
|
|
720
|
+
|
|
721
|
+
// Tone — matches brain personality
|
|
722
|
+
const traits = bd.behavior?.personality?.traits || [];
|
|
723
|
+
dimensions.tone = traits.length > 0 ? 0.7 : 0.5; // baseline
|
|
724
|
+
total += dimensions.tone; count++;
|
|
725
|
+
|
|
726
|
+
const overall = count > 0 ? Math.round(total / count * 100) / 100 : 0;
|
|
727
|
+
|
|
728
|
+
return { overall, dimensions };
|
|
729
|
+
}
|
|
730
|
+
|
|
731
|
+
// ── Helpers ───────────────────────────────────────────────
|
|
732
|
+
|
|
733
|
+
function incrementVersion(ver) {
|
|
734
|
+
if (!ver) return "0.1.0";
|
|
735
|
+
const parts = ver.split(".").map(Number);
|
|
736
|
+
parts[2] = (parts[2] || 0) + 1;
|
|
737
|
+
return parts.join(".");
|
|
738
|
+
}
|
|
@@ -0,0 +1,439 @@
|
|
|
1
|
+
// ============================================================
|
|
2
|
+
// 0nMCP — Local AI Engine (0nAI ↔ Ollama/Llama)
|
|
3
|
+
// ============================================================
|
|
4
|
+
// Direct interface to local Llama models via Ollama.
|
|
5
|
+
// Zero cost, fully private, no data leaves the machine.
|
|
6
|
+
//
|
|
7
|
+
// 5 MCP Tools:
|
|
8
|
+
// ai_chat — Chat with local Llama (with optional brain)
|
|
9
|
+
// ai_generate — One-shot text generation
|
|
10
|
+
// ai_models — List/pull/manage local models
|
|
11
|
+
// ai_embed — Create embeddings locally
|
|
12
|
+
// ai_bench — Benchmark a prompt across local + cloud models
|
|
13
|
+
//
|
|
14
|
+
// Requires: Ollama running at localhost:11434
|
|
15
|
+
// ============================================================
|
|
16
|
+
|
|
17
|
+
const OLLAMA_BASE = process.env.OLLAMA_URL || "http://localhost:11434";
|
|
18
|
+
const DEFAULT_MODEL = process.env.OLLAMA_MODEL || "llama3.1";
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Register local AI tools on an MCP server instance.
|
|
22
|
+
*
|
|
23
|
+
* @param {import("@modelcontextprotocol/sdk/server/mcp.js").McpServer} server
|
|
24
|
+
* @param {import("zod").ZodType} z
|
|
25
|
+
*/
|
|
26
|
+
export function registerLocalAITools(server, z) {
|
|
27
|
+
|
|
28
|
+
// ─── ai_chat ──────────────────────────────────────────────
|
|
29
|
+
server.tool(
|
|
30
|
+
"ai_chat",
|
|
31
|
+
`Chat with your local Llama model. Zero cost, fully private.
|
|
32
|
+
Supports multi-turn conversation, system prompts, and .brain file loading.
|
|
33
|
+
|
|
34
|
+
Example: ai_chat({ message: "Explain MCP servers in simple terms" })
|
|
35
|
+
Example: ai_chat({ message: "What is 0nMCP?", system: "You are a tech expert" })
|
|
36
|
+
Example: ai_chat({ message: "Score this content", brain: "~/.0n/brains/sxo-writer.brain" })
|
|
37
|
+
Example: ai_chat({ message: "Continue from before", history: [...previous messages...] })
|
|
38
|
+
Example: ai_chat({ message: "Hello", model: "mistral" })`,
|
|
39
|
+
{
|
|
40
|
+
message: z.string().describe("Your message to the AI"),
|
|
41
|
+
system: z.string().optional().describe("System prompt (or use brain parameter)"),
|
|
42
|
+
brain: z.string().optional().describe("Path to .brain file to load as system prompt"),
|
|
43
|
+
model: z.string().optional().describe(`Model to use (default: ${DEFAULT_MODEL})`),
|
|
44
|
+
history: z.array(z.object({
|
|
45
|
+
role: z.enum(["user", "assistant", "system"]),
|
|
46
|
+
content: z.string(),
|
|
47
|
+
})).optional().describe("Previous conversation messages for context"),
|
|
48
|
+
temperature: z.number().optional().describe("Creativity (0-2, default: 0.7)"),
|
|
49
|
+
max_tokens: z.number().optional().describe("Max response length (default: 2048)"),
|
|
50
|
+
},
|
|
51
|
+
async ({ message, system, brain, model, history, temperature, max_tokens }) => {
|
|
52
|
+
try {
|
|
53
|
+
// Load brain file as system prompt if provided
|
|
54
|
+
let systemPrompt = system || "";
|
|
55
|
+
if (brain) {
|
|
56
|
+
const { readFileSync, existsSync } = await import("fs");
|
|
57
|
+
const { homedir } = await import("os");
|
|
58
|
+
const resolvedPath = brain.replace("~", homedir());
|
|
59
|
+
if (existsSync(resolvedPath)) {
|
|
60
|
+
const brainData = JSON.parse(readFileSync(resolvedPath, "utf-8"));
|
|
61
|
+
systemPrompt = brainData.prompt || "";
|
|
62
|
+
if (!systemPrompt && brainData.identity) {
|
|
63
|
+
// Compile on the fly
|
|
64
|
+
const { compileBrainToPrompt } = await import("./brain.js").catch(() => ({}));
|
|
65
|
+
if (compileBrainToPrompt) systemPrompt = compileBrainToPrompt(brainData);
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// Build messages
|
|
71
|
+
const messages = [];
|
|
72
|
+
if (systemPrompt) messages.push({ role: "system", content: systemPrompt });
|
|
73
|
+
if (history) messages.push(...history);
|
|
74
|
+
messages.push({ role: "user", content: message });
|
|
75
|
+
|
|
76
|
+
const startTime = Date.now();
|
|
77
|
+
const res = await fetch(`${OLLAMA_BASE}/api/chat`, {
|
|
78
|
+
method: "POST",
|
|
79
|
+
headers: { "Content-Type": "application/json" },
|
|
80
|
+
body: JSON.stringify({
|
|
81
|
+
model: model || DEFAULT_MODEL,
|
|
82
|
+
messages,
|
|
83
|
+
stream: false,
|
|
84
|
+
options: {
|
|
85
|
+
temperature: temperature ?? 0.7,
|
|
86
|
+
num_predict: max_tokens || 2048,
|
|
87
|
+
},
|
|
88
|
+
}),
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
if (!res.ok) {
|
|
92
|
+
const errText = await res.text();
|
|
93
|
+
throw new Error(`Ollama error ${res.status}: ${errText}`);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
const data = await res.json();
|
|
97
|
+
const duration = Date.now() - startTime;
|
|
98
|
+
const response = data.message?.content || "";
|
|
99
|
+
|
|
100
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
101
|
+
status: "ok",
|
|
102
|
+
model: data.model || model || DEFAULT_MODEL,
|
|
103
|
+
response,
|
|
104
|
+
stats: {
|
|
105
|
+
duration_ms: duration,
|
|
106
|
+
eval_count: data.eval_count,
|
|
107
|
+
eval_duration_ms: data.eval_duration ? Math.round(data.eval_duration / 1e6) : null,
|
|
108
|
+
tokens_per_second: data.eval_count && data.eval_duration
|
|
109
|
+
? Math.round(data.eval_count / (data.eval_duration / 1e9) * 10) / 10
|
|
110
|
+
: null,
|
|
111
|
+
prompt_tokens: data.prompt_eval_count,
|
|
112
|
+
total_duration_ms: data.total_duration ? Math.round(data.total_duration / 1e6) : duration,
|
|
113
|
+
},
|
|
114
|
+
provider: "ollama-local",
|
|
115
|
+
cost: "$0.00",
|
|
116
|
+
}, null, 2) }] };
|
|
117
|
+
} catch (err) {
|
|
118
|
+
if (err.message?.includes("ECONNREFUSED")) {
|
|
119
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
120
|
+
status: "offline",
|
|
121
|
+
error: "Ollama is not running. Start it with: ollama serve",
|
|
122
|
+
help: "Install: https://ollama.com | Pull a model: ollama pull llama3.1",
|
|
123
|
+
}) }] };
|
|
124
|
+
}
|
|
125
|
+
return { content: [{ type: "text", text: JSON.stringify({ status: "failed", error: err.message }) }] };
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
);
|
|
129
|
+
|
|
130
|
+
// ─── ai_generate ──────────────────────────────────────────
|
|
131
|
+
server.tool(
|
|
132
|
+
"ai_generate",
|
|
133
|
+
`One-shot text generation with local Llama. No conversation context.
|
|
134
|
+
Good for: summarization, code generation, analysis, content creation.
|
|
135
|
+
|
|
136
|
+
Example: ai_generate({ prompt: "Write a Python function to sort a list" })
|
|
137
|
+
Example: ai_generate({ prompt: "Summarize this: ...", model: "llama3.1" })`,
|
|
138
|
+
{
|
|
139
|
+
prompt: z.string().describe("The prompt to generate from"),
|
|
140
|
+
model: z.string().optional().describe(`Model (default: ${DEFAULT_MODEL})`),
|
|
141
|
+
system: z.string().optional().describe("System prompt"),
|
|
142
|
+
temperature: z.number().optional().describe("Creativity (0-2, default: 0.7)"),
|
|
143
|
+
max_tokens: z.number().optional().describe("Max tokens (default: 2048)"),
|
|
144
|
+
format: z.enum(["text", "json"]).optional().describe("Response format"),
|
|
145
|
+
},
|
|
146
|
+
async ({ prompt, model, system, temperature, max_tokens, format }) => {
|
|
147
|
+
try {
|
|
148
|
+
const startTime = Date.now();
|
|
149
|
+
const body = {
|
|
150
|
+
model: model || DEFAULT_MODEL,
|
|
151
|
+
prompt,
|
|
152
|
+
stream: false,
|
|
153
|
+
options: {
|
|
154
|
+
temperature: temperature ?? 0.7,
|
|
155
|
+
num_predict: max_tokens || 2048,
|
|
156
|
+
},
|
|
157
|
+
};
|
|
158
|
+
if (system) body.system = system;
|
|
159
|
+
if (format === "json") body.format = "json";
|
|
160
|
+
|
|
161
|
+
const res = await fetch(`${OLLAMA_BASE}/api/generate`, {
|
|
162
|
+
method: "POST",
|
|
163
|
+
headers: { "Content-Type": "application/json" },
|
|
164
|
+
body: JSON.stringify(body),
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
if (!res.ok) throw new Error(`Ollama error ${res.status}: ${await res.text()}`);
|
|
168
|
+
|
|
169
|
+
const data = await res.json();
|
|
170
|
+
const duration = Date.now() - startTime;
|
|
171
|
+
|
|
172
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
173
|
+
status: "ok",
|
|
174
|
+
model: data.model || model || DEFAULT_MODEL,
|
|
175
|
+
response: data.response || "",
|
|
176
|
+
stats: {
|
|
177
|
+
duration_ms: duration,
|
|
178
|
+
eval_count: data.eval_count,
|
|
179
|
+
tokens_per_second: data.eval_count && data.eval_duration
|
|
180
|
+
? Math.round(data.eval_count / (data.eval_duration / 1e9) * 10) / 10
|
|
181
|
+
: null,
|
|
182
|
+
},
|
|
183
|
+
cost: "$0.00",
|
|
184
|
+
}, null, 2) }] };
|
|
185
|
+
} catch (err) {
|
|
186
|
+
return { content: [{ type: "text", text: JSON.stringify({ status: "failed", error: err.message }) }] };
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
);
|
|
190
|
+
|
|
191
|
+
// ─── ai_models ────────────────────────────────────────────
|
|
192
|
+
server.tool(
|
|
193
|
+
"ai_models",
|
|
194
|
+
`Manage local AI models. List installed, pull new ones, get model info.
|
|
195
|
+
|
|
196
|
+
Example: ai_models({ action: "list" })
|
|
197
|
+
Example: ai_models({ action: "pull", name: "mistral" })
|
|
198
|
+
Example: ai_models({ action: "info", name: "llama3.1" })
|
|
199
|
+
Example: ai_models({ action: "running" })`,
|
|
200
|
+
{
|
|
201
|
+
action: z.enum(["list", "pull", "info", "delete", "running"]).describe("Action to perform"),
|
|
202
|
+
name: z.string().optional().describe("Model name (for pull/info/delete)"),
|
|
203
|
+
},
|
|
204
|
+
async ({ action, name }) => {
|
|
205
|
+
try {
|
|
206
|
+
switch (action) {
|
|
207
|
+
case "list": {
|
|
208
|
+
const res = await fetch(`${OLLAMA_BASE}/api/tags`);
|
|
209
|
+
const data = await res.json();
|
|
210
|
+
const models = (data.models || []).map(m => ({
|
|
211
|
+
name: m.name,
|
|
212
|
+
size_gb: Math.round(m.size / 1e9 * 10) / 10,
|
|
213
|
+
modified: m.modified_at,
|
|
214
|
+
family: m.details?.family,
|
|
215
|
+
parameters: m.details?.parameter_size,
|
|
216
|
+
quantization: m.details?.quantization_level,
|
|
217
|
+
}));
|
|
218
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
219
|
+
status: "ok", count: models.length, models,
|
|
220
|
+
message: models.length === 0
|
|
221
|
+
? "No models installed. Pull one with: ai_models({ action: 'pull', name: 'llama3.1' })"
|
|
222
|
+
: `${models.length} model(s) installed locally.`,
|
|
223
|
+
}, null, 2) }] };
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
case "running": {
|
|
227
|
+
const res = await fetch(`${OLLAMA_BASE}/api/ps`);
|
|
228
|
+
const data = await res.json();
|
|
229
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
230
|
+
status: "ok",
|
|
231
|
+
running: (data.models || []).map(m => ({
|
|
232
|
+
name: m.name, size_gb: Math.round(m.size / 1e9 * 10) / 10,
|
|
233
|
+
expires: m.expires_at,
|
|
234
|
+
})),
|
|
235
|
+
}, null, 2) }] };
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
case "info": {
|
|
239
|
+
if (!name) throw new Error("Model name required");
|
|
240
|
+
const res = await fetch(`${OLLAMA_BASE}/api/show`, {
|
|
241
|
+
method: "POST",
|
|
242
|
+
headers: { "Content-Type": "application/json" },
|
|
243
|
+
body: JSON.stringify({ name }),
|
|
244
|
+
});
|
|
245
|
+
const data = await res.json();
|
|
246
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
247
|
+
status: "ok", model: name,
|
|
248
|
+
details: data.details,
|
|
249
|
+
parameters: data.model_info,
|
|
250
|
+
template: data.template,
|
|
251
|
+
system: data.system?.substring(0, 200),
|
|
252
|
+
license: data.license?.substring(0, 200),
|
|
253
|
+
}, null, 2) }] };
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
case "pull": {
|
|
257
|
+
if (!name) throw new Error("Model name required");
|
|
258
|
+
const res = await fetch(`${OLLAMA_BASE}/api/pull`, {
|
|
259
|
+
method: "POST",
|
|
260
|
+
headers: { "Content-Type": "application/json" },
|
|
261
|
+
body: JSON.stringify({ name, stream: false }),
|
|
262
|
+
});
|
|
263
|
+
const data = await res.json();
|
|
264
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
265
|
+
status: data.status || "pulling",
|
|
266
|
+
model: name,
|
|
267
|
+
message: `Model "${name}" ${data.status || "pull initiated"}. This may take a while for large models.`,
|
|
268
|
+
}, null, 2) }] };
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
case "delete": {
|
|
272
|
+
if (!name) throw new Error("Model name required");
|
|
273
|
+
const res = await fetch(`${OLLAMA_BASE}/api/delete`, {
|
|
274
|
+
method: "DELETE",
|
|
275
|
+
headers: { "Content-Type": "application/json" },
|
|
276
|
+
body: JSON.stringify({ name }),
|
|
277
|
+
});
|
|
278
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
279
|
+
status: res.ok ? "deleted" : "failed",
|
|
280
|
+
model: name,
|
|
281
|
+
}) }] };
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
} catch (err) {
|
|
285
|
+
return { content: [{ type: "text", text: JSON.stringify({ status: "failed", error: err.message }) }] };
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
);
|
|
289
|
+
|
|
290
|
+
// ─── ai_embed ─────────────────────────────────────────────
|
|
291
|
+
server.tool(
|
|
292
|
+
"ai_embed",
|
|
293
|
+
`Create text embeddings locally using Ollama. Free, private.
|
|
294
|
+
Useful for: semantic search, similarity matching, RAG pipelines.
|
|
295
|
+
|
|
296
|
+
Example: ai_embed({ text: "What is MCP?" })
|
|
297
|
+
Example: ai_embed({ text: ["Hello world", "Hi there"], model: "llama3.1" })`,
|
|
298
|
+
{
|
|
299
|
+
text: z.union([z.string(), z.array(z.string())]).describe("Text(s) to embed"),
|
|
300
|
+
model: z.string().optional().describe(`Model (default: ${DEFAULT_MODEL})`),
|
|
301
|
+
},
|
|
302
|
+
async ({ text, model }) => {
|
|
303
|
+
try {
|
|
304
|
+
const input = Array.isArray(text) ? text : [text];
|
|
305
|
+
const res = await fetch(`${OLLAMA_BASE}/api/embed`, {
|
|
306
|
+
method: "POST",
|
|
307
|
+
headers: { "Content-Type": "application/json" },
|
|
308
|
+
body: JSON.stringify({ model: model || DEFAULT_MODEL, input }),
|
|
309
|
+
});
|
|
310
|
+
if (!res.ok) throw new Error(`Ollama error ${res.status}`);
|
|
311
|
+
const data = await res.json();
|
|
312
|
+
|
|
313
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
314
|
+
status: "ok",
|
|
315
|
+
model: data.model || model || DEFAULT_MODEL,
|
|
316
|
+
count: (data.embeddings || []).length,
|
|
317
|
+
dimensions: data.embeddings?.[0]?.length || 0,
|
|
318
|
+
embeddings: (data.embeddings || []).map((e, i) => ({
|
|
319
|
+
text: input[i]?.substring(0, 50) + (input[i]?.length > 50 ? "..." : ""),
|
|
320
|
+
vector_preview: e.slice(0, 5),
|
|
321
|
+
dimensions: e.length,
|
|
322
|
+
})),
|
|
323
|
+
cost: "$0.00",
|
|
324
|
+
}, null, 2) }] };
|
|
325
|
+
} catch (err) {
|
|
326
|
+
return { content: [{ type: "text", text: JSON.stringify({ status: "failed", error: err.message }) }] };
|
|
327
|
+
}
|
|
328
|
+
}
|
|
329
|
+
);
|
|
330
|
+
|
|
331
|
+
// ─── ai_bench ─────────────────────────────────────────────
|
|
332
|
+
server.tool(
|
|
333
|
+
"ai_bench",
|
|
334
|
+
`Benchmark a prompt across local Llama AND cloud models side by side.
|
|
335
|
+
Compare speed, quality, and cost. Cloud calls use real API keys (cost money).
|
|
336
|
+
|
|
337
|
+
Example: ai_bench({ prompt: "Explain quantum computing in one paragraph" })
|
|
338
|
+
Example: ai_bench({ prompt: "Write a haiku about AI", providers: ["local", "anthropic"] })`,
|
|
339
|
+
{
|
|
340
|
+
prompt: z.string().describe("Prompt to benchmark"),
|
|
341
|
+
system: z.string().optional().describe("System prompt"),
|
|
342
|
+
providers: z.array(z.enum(["local", "anthropic", "openai", "gemini"])).optional()
|
|
343
|
+
.describe("Providers to test (default: [local]). WARNING: cloud providers cost money."),
|
|
344
|
+
},
|
|
345
|
+
async ({ prompt, system, providers }) => {
|
|
346
|
+
const targets = providers || ["local"];
|
|
347
|
+
const results = [];
|
|
348
|
+
|
|
349
|
+
for (const provider of targets) {
|
|
350
|
+
const start = Date.now();
|
|
351
|
+
let response = "";
|
|
352
|
+
let tokens = 0;
|
|
353
|
+
let cost = "$0.00";
|
|
354
|
+
let error = null;
|
|
355
|
+
|
|
356
|
+
try {
|
|
357
|
+
if (provider === "local") {
|
|
358
|
+
const body = { model: DEFAULT_MODEL, prompt, stream: false, options: { temperature: 0.7, num_predict: 512 } };
|
|
359
|
+
if (system) body.system = system;
|
|
360
|
+
const res = await fetch(`${OLLAMA_BASE}/api/generate`, {
|
|
361
|
+
method: "POST",
|
|
362
|
+
headers: { "Content-Type": "application/json" },
|
|
363
|
+
body: JSON.stringify(body),
|
|
364
|
+
});
|
|
365
|
+
const data = await res.json();
|
|
366
|
+
response = data.response || "";
|
|
367
|
+
tokens = data.eval_count || 0;
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
else if (provider === "anthropic") {
|
|
371
|
+
if (!process.env.ANTHROPIC_API_KEY) throw new Error("No ANTHROPIC_API_KEY");
|
|
372
|
+
const { default: Anthropic } = await import("@anthropic-ai/sdk");
|
|
373
|
+
const client = new Anthropic();
|
|
374
|
+
const msg = await client.messages.create({
|
|
375
|
+
model: "claude-sonnet-4-20250514",
|
|
376
|
+
max_tokens: 512,
|
|
377
|
+
system: system || undefined,
|
|
378
|
+
messages: [{ role: "user", content: prompt }],
|
|
379
|
+
});
|
|
380
|
+
response = msg.content[0]?.text || "";
|
|
381
|
+
tokens = (msg.usage?.input_tokens || 0) + (msg.usage?.output_tokens || 0);
|
|
382
|
+
cost = `~$${(tokens * 0.000003).toFixed(4)}`;
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
else if (provider === "openai") {
|
|
386
|
+
if (!process.env.OPENAI_API_KEY) throw new Error("No OPENAI_API_KEY");
|
|
387
|
+
const messages = [];
|
|
388
|
+
if (system) messages.push({ role: "system", content: system });
|
|
389
|
+
messages.push({ role: "user", content: prompt });
|
|
390
|
+
const res = await fetch("https://api.openai.com/v1/chat/completions", {
|
|
391
|
+
method: "POST",
|
|
392
|
+
headers: { "Authorization": `Bearer ${process.env.OPENAI_API_KEY}`, "Content-Type": "application/json" },
|
|
393
|
+
body: JSON.stringify({ model: "gpt-4o", messages, max_tokens: 512 }),
|
|
394
|
+
});
|
|
395
|
+
const data = await res.json();
|
|
396
|
+
response = data.choices?.[0]?.message?.content || "";
|
|
397
|
+
tokens = (data.usage?.total_tokens || 0);
|
|
398
|
+
cost = `~$${(tokens * 0.000005).toFixed(4)}`;
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
else if (provider === "gemini") {
|
|
402
|
+
if (!process.env.GEMINI_API_KEY) throw new Error("No GEMINI_API_KEY");
|
|
403
|
+
const url = `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=${process.env.GEMINI_API_KEY}`;
|
|
404
|
+
const body = { contents: [{ parts: [{ text: prompt }] }], generationConfig: { maxOutputTokens: 512 } };
|
|
405
|
+
if (system) body.system_instruction = { parts: [{ text: system }] };
|
|
406
|
+
const res = await fetch(url, { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify(body) });
|
|
407
|
+
const data = await res.json();
|
|
408
|
+
response = data.candidates?.[0]?.content?.parts?.[0]?.text || "";
|
|
409
|
+
tokens = data.usageMetadata?.totalTokenCount || 0;
|
|
410
|
+
cost = `~$${(tokens * 0.0000001).toFixed(6)}`;
|
|
411
|
+
}
|
|
412
|
+
} catch (e) {
|
|
413
|
+
error = e.message;
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
const duration = Date.now() - start;
|
|
417
|
+
results.push({
|
|
418
|
+
provider,
|
|
419
|
+
model: provider === "local" ? DEFAULT_MODEL : provider === "anthropic" ? "claude-sonnet" : provider === "openai" ? "gpt-4o" : "gemini-2.0-flash",
|
|
420
|
+
response: response.substring(0, 300) + (response.length > 300 ? "..." : ""),
|
|
421
|
+
response_length: response.length,
|
|
422
|
+
duration_ms: duration,
|
|
423
|
+
tokens,
|
|
424
|
+
tokens_per_second: tokens > 0 && duration > 0 ? Math.round(tokens / (duration / 1000)) : null,
|
|
425
|
+
cost,
|
|
426
|
+
error,
|
|
427
|
+
});
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
return { content: [{ type: "text", text: JSON.stringify({
|
|
431
|
+
status: "ok",
|
|
432
|
+
prompt: prompt.substring(0, 100),
|
|
433
|
+
results,
|
|
434
|
+
fastest: results.filter(r => !r.error).sort((a, b) => a.duration_ms - b.duration_ms)[0]?.provider || "none",
|
|
435
|
+
cheapest: results.filter(r => !r.error).sort((a, b) => parseFloat(a.cost.replace(/[^0-9.]/g, "")) - parseFloat(b.cost.replace(/[^0-9.]/g, "")))[0]?.provider || "local",
|
|
436
|
+
}, null, 2) }] };
|
|
437
|
+
}
|
|
438
|
+
);
|
|
439
|
+
}
|
package/index.js
CHANGED
|
@@ -32,6 +32,8 @@ import { registerContainerTools } from "./vault/tools-container.js";
|
|
|
32
32
|
import { registerDeedTools } from "./vault/tools-deed.js";
|
|
33
33
|
import { unsealedCache } from "./vault/cache.js";
|
|
34
34
|
import { registerEngineTools, registerTrainingTools, registerFeedTools, registerCouncilTools, registerSxoWriterTools } from "./engine/index.js";
|
|
35
|
+
import { registerBrainTools } from "./engine/brain.js";
|
|
36
|
+
import { registerLocalAITools } from "./engine/local-ai.js";
|
|
35
37
|
import { CapabilityProxy } from "./capability-proxy.js";
|
|
36
38
|
import { SERVICE_CATALOG } from "./catalog.js";
|
|
37
39
|
|
|
@@ -86,6 +88,8 @@ registerTrainingTools(server, z);
|
|
|
86
88
|
registerFeedTools(server, z);
|
|
87
89
|
registerCouncilTools(server, z);
|
|
88
90
|
registerSxoWriterTools(server, z);
|
|
91
|
+
registerBrainTools(server, z);
|
|
92
|
+
registerLocalAITools(server, z);
|
|
89
93
|
|
|
90
94
|
// ============================================================
|
|
91
95
|
// VAULT CONTAINER TOOLS (patent-pending 0nVault containers)
|
package/lib/stats.json
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "0nmcp",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.9.0",
|
|
4
4
|
"mcpName": "io.github.0nork/0nMCP",
|
|
5
5
|
"description": "Universal AI API Orchestrator — 819 tools, 48 services, portable AI Brain bundles + machine-bound vault encryption + Application Engine. The most comprehensive MCP server available. Free and open source from 0nORK.",
|
|
6
6
|
"type": "module",
|
|
@@ -282,6 +282,6 @@
|
|
|
282
282
|
"triggers": 155,
|
|
283
283
|
"totalCapabilities": 1078,
|
|
284
284
|
"categories": 21,
|
|
285
|
-
"lastUpdated": "2026-03-
|
|
285
|
+
"lastUpdated": "2026-03-28T07:55:13.555Z"
|
|
286
286
|
}
|
|
287
287
|
}
|