clementine-agent 1.18.12 → 1.18.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/tool-router.js +23 -1
- package/dist/brain/adapters/markdown.js +6 -1
- package/dist/brain/connector-recipes.d.ts +5 -5
- package/dist/brain/connector-recipes.js +106 -6
- package/dist/cli/dashboard.js +86 -7
- package/dist/tools/brain-tools.d.ts +26 -2
- package/dist/tools/brain-tools.js +97 -72
- package/package.json +1 -1
|
@@ -126,6 +126,15 @@ export const TOOL_BUNDLES = [
|
|
|
126
126
|
function uniqueStrings(values) {
|
|
127
127
|
return [...new Set([...values].filter((v) => !!v && v.trim().length > 0))];
|
|
128
128
|
}
|
|
129
|
+
function explicitMcpServers(scopeText) {
|
|
130
|
+
const servers = new Set();
|
|
131
|
+
const re = /\bmcp__([A-Za-z0-9_-]+)__[A-Za-z0-9_.:-]+\b/g;
|
|
132
|
+
let match;
|
|
133
|
+
while ((match = re.exec(scopeText)) !== null) {
|
|
134
|
+
servers.add(match[1]);
|
|
135
|
+
}
|
|
136
|
+
return uniqueStrings(servers);
|
|
137
|
+
}
|
|
129
138
|
export function routeToolSurface(text) {
|
|
130
139
|
const scopeText = text?.trim() ?? '';
|
|
131
140
|
if (!scopeText) {
|
|
@@ -162,13 +171,26 @@ export function routeToolSurface(text) {
|
|
|
162
171
|
composio.add(slug);
|
|
163
172
|
inheritFullClaudeEnv = inheritFullClaudeEnv || bundle.inheritFullClaudeEnv === true;
|
|
164
173
|
}
|
|
174
|
+
for (const server of explicitMcpServers(scopeText)) {
|
|
175
|
+
if (server.startsWith('claude_ai_')) {
|
|
176
|
+
external.add(server.slice('claude_ai_'.length));
|
|
177
|
+
}
|
|
178
|
+
else {
|
|
179
|
+
// Exact `mcp__<server>__<tool>` mentions are authoritative. Add the
|
|
180
|
+
// name as both a direct MCP server and a Composio toolkit; whichever
|
|
181
|
+
// source is actually connected will mount, and the other path no-ops.
|
|
182
|
+
external.add(server);
|
|
183
|
+
composio.add(server);
|
|
184
|
+
}
|
|
185
|
+
inheritFullClaudeEnv = true;
|
|
186
|
+
}
|
|
165
187
|
return {
|
|
166
188
|
bundles: uniqueStrings(bundles),
|
|
167
189
|
externalMcpServers: uniqueStrings(external),
|
|
168
190
|
composioToolkits: uniqueStrings(composio),
|
|
169
191
|
inheritFullClaudeEnv,
|
|
170
192
|
fullSurface: false,
|
|
171
|
-
reason: bundles.size > 0 ? 'matched' : 'empty',
|
|
193
|
+
reason: bundles.size > 0 || external.size > 0 || composio.size > 0 ? 'matched' : 'empty',
|
|
172
194
|
};
|
|
173
195
|
}
|
|
174
196
|
//# sourceMappingURL=tool-router.js.map
|
|
@@ -33,8 +33,13 @@ export async function* parseMarkdown(filePath) {
|
|
|
33
33
|
mtime = statSync(filePath).mtime.toISOString();
|
|
34
34
|
}
|
|
35
35
|
catch { /* ignore */ }
|
|
36
|
+
const frontmatterExternalId = typeof parsed.data?.externalId === 'string' && parsed.data.externalId.trim()
|
|
37
|
+
? parsed.data.externalId.trim()
|
|
38
|
+
: typeof parsed.data?.external_id === 'string' && parsed.data.external_id.trim()
|
|
39
|
+
? parsed.data.external_id.trim()
|
|
40
|
+
: null;
|
|
36
41
|
yield {
|
|
37
|
-
externalId: `md-${hint}-${contentHash(body)}`,
|
|
42
|
+
externalId: frontmatterExternalId ?? `md-${hint}-${contentHash(body)}`,
|
|
38
43
|
content: body,
|
|
39
44
|
rawPayload: raw,
|
|
40
45
|
metadata: {
|
|
@@ -3,8 +3,8 @@
|
|
|
3
3
|
*
|
|
4
4
|
* Each recipe is a blueprint for a one-click "auto-seed feed" that turns an
|
|
5
5
|
* authenticated tool source (Claude Desktop connector, Composio toolkit, or
|
|
6
|
-
* local MCP server) into a scheduled data feed that writes
|
|
7
|
-
* ingest folder.
|
|
6
|
+
* local MCP server) into a scheduled data feed that writes distilled notes
|
|
7
|
+
* into the brain's ingest folder.
|
|
8
8
|
*
|
|
9
9
|
* A feed materializes as:
|
|
10
10
|
* 1. A CRON.md job entry with `managed: connector-feed` frontmatter
|
|
@@ -12,8 +12,8 @@
|
|
|
12
12
|
*
|
|
13
13
|
* The cron prompt tells the Claude Code agent to use the integration's MCP
|
|
14
14
|
* tools to pull records, compare them with current memory when appropriate,
|
|
15
|
-
* then call `brain_ingest_folder` to commit them — which writes
|
|
16
|
-
*
|
|
15
|
+
* then call `brain_ingest_folder` to commit them — which writes distilled
|
|
16
|
+
* markdown notes and indexes them in one step.
|
|
17
17
|
*
|
|
18
18
|
* Field syntax in prompt templates:
|
|
19
19
|
* {{fieldKey}} — user-supplied value
|
|
@@ -59,7 +59,7 @@ export interface ConnectorRecipe {
|
|
|
59
59
|
description: string;
|
|
60
60
|
/** Emoji shown next to the label. */
|
|
61
61
|
icon: string;
|
|
62
|
-
/** Matches the
|
|
62
|
+
/** Matches the tool source name; "*" recipes are offered for every source. */
|
|
63
63
|
integration: string;
|
|
64
64
|
/** Tools we rely on for this recipe. Used only to warn if the integration
|
|
65
65
|
* hasn't surfaced them yet in claude-integrations.json. */
|
|
@@ -3,8 +3,8 @@
|
|
|
3
3
|
*
|
|
4
4
|
* Each recipe is a blueprint for a one-click "auto-seed feed" that turns an
|
|
5
5
|
* authenticated tool source (Claude Desktop connector, Composio toolkit, or
|
|
6
|
-
* local MCP server) into a scheduled data feed that writes
|
|
7
|
-
* ingest folder.
|
|
6
|
+
* local MCP server) into a scheduled data feed that writes distilled notes
|
|
7
|
+
* into the brain's ingest folder.
|
|
8
8
|
*
|
|
9
9
|
* A feed materializes as:
|
|
10
10
|
* 1. A CRON.md job entry with `managed: connector-feed` frontmatter
|
|
@@ -12,8 +12,8 @@
|
|
|
12
12
|
*
|
|
13
13
|
* The cron prompt tells the Claude Code agent to use the integration's MCP
|
|
14
14
|
* tools to pull records, compare them with current memory when appropriate,
|
|
15
|
-
* then call `brain_ingest_folder` to commit them — which writes
|
|
16
|
-
*
|
|
15
|
+
* then call `brain_ingest_folder` to commit them — which writes distilled
|
|
16
|
+
* markdown notes and indexes them in one step.
|
|
17
17
|
*
|
|
18
18
|
* Field syntax in prompt templates:
|
|
19
19
|
* {{fieldKey}} — user-supplied value
|
|
@@ -25,16 +25,116 @@ function slugify(s) {
|
|
|
25
25
|
.replace(/^-+|-+$/g, '')
|
|
26
26
|
.slice(0, 40) || 'feed';
|
|
27
27
|
}
|
|
28
|
+
function inferToolServer(toolName) {
|
|
29
|
+
const match = String(toolName).match(/^mcp__([^_]+(?:_[^_]+)*)__/);
|
|
30
|
+
return match?.[1] ?? 'tool';
|
|
31
|
+
}
|
|
28
32
|
const COMMIT_INSTRUCTIONS = `When you have the records collected, call the \`brain_ingest_folder\` MCP tool with:
|
|
29
33
|
- \`slug\`: "{{slug}}"
|
|
30
34
|
- \`records\`: an array of \`{title, externalId, content, metadata}\` objects (one per item). \`externalId\` should be the source provider's stable id so re-runs dedup. \`metadata\` can include any fields you want preserved (url, modifiedAt, author).
|
|
31
35
|
|
|
32
|
-
That tool writes
|
|
36
|
+
That tool runs the brain's distillation pipeline and writes the final notes to \`{{targetFolder}}/\`. You do NOT need to use Write — brain_ingest_folder handles note creation and indexing. Finish by reporting a one-line summary like "Ingested N new records, M unchanged".
|
|
33
37
|
|
|
34
38
|
If the tool returns an error, include the error text in your summary.`;
|
|
35
|
-
const MEMORY_DELTA_INSTRUCTIONS = `Before committing, call \`memory_recall\` for the feed slug/topic and use the returned chunks as the current memory state for this source. Keep records that are new, materially changed, or contain a new finding. Drop exact duplicates and rows that add no useful information. The ingestion pipeline will write markdown and
|
|
39
|
+
const MEMORY_DELTA_INSTRUCTIONS = `Before committing, call \`memory_recall\` for the feed slug/topic and use the returned chunks as the current memory state for this source. Keep records that are new, materially changed, or contain a new finding. Drop exact duplicates and rows that add no useful information. The ingestion pipeline will write markdown, chunk it, and index it for recall; do not call \`memory_write\` for these feed records.`;
|
|
36
40
|
// ── Recipes ────────────────────────────────────────────────────────────
|
|
37
41
|
export const RECIPES = [
|
|
42
|
+
{
|
|
43
|
+
id: 'tool-backed-memory-seed',
|
|
44
|
+
label: 'Seed memory from this tool',
|
|
45
|
+
description: 'Pick one tool, fetch records from it, compare them with current memory, and save only new or changed findings.',
|
|
46
|
+
icon: '🔌',
|
|
47
|
+
integration: '*',
|
|
48
|
+
requiredTools: [],
|
|
49
|
+
fields: [
|
|
50
|
+
{
|
|
51
|
+
key: 'topic',
|
|
52
|
+
label: 'Memory topic',
|
|
53
|
+
placeholder: 'customers, calls, leads, deals, meetings...',
|
|
54
|
+
required: true,
|
|
55
|
+
help: 'Used to search current memory and name this feed.',
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
key: 'toolName',
|
|
59
|
+
label: 'Tool to call',
|
|
60
|
+
required: true,
|
|
61
|
+
help: 'Pick the exact tool this feed should call each time it runs.',
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
key: 'callGoal',
|
|
65
|
+
label: 'What should Clementine fetch?',
|
|
66
|
+
placeholder: 'Fetch updated HubSpot contacts modified since the last run...',
|
|
67
|
+
required: true,
|
|
68
|
+
help: 'Describe the records to fetch, filters to apply, and any pagination bounds.',
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
key: 'variablesJson',
|
|
72
|
+
label: 'Tool variables (JSON)',
|
|
73
|
+
placeholder: '{"listId":"123","limit":100,"updatedAfter":"last_run"}',
|
|
74
|
+
help: 'Optional. Use {} if the tool needs no arguments.',
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
key: 'recordStrategy',
|
|
78
|
+
label: 'How to save each result',
|
|
79
|
+
placeholder: 'One record per contact. Use email as stable id. Summarize lifecycle stage, owner, last activity, and new changes.',
|
|
80
|
+
help: 'Tell Clementine what counts as one memory record and which field is the stable id.',
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
key: 'slug',
|
|
84
|
+
label: 'Memory bucket name (optional)',
|
|
85
|
+
placeholder: 'hubspot-contacts',
|
|
86
|
+
help: 'Optional. Leave blank to derive one from the connector and topic.',
|
|
87
|
+
},
|
|
88
|
+
{
|
|
89
|
+
key: 'limit',
|
|
90
|
+
label: 'Max records per run',
|
|
91
|
+
placeholder: '100',
|
|
92
|
+
defaultValue: '100',
|
|
93
|
+
},
|
|
94
|
+
],
|
|
95
|
+
defaultSchedule: '0 8 * * *',
|
|
96
|
+
tier: 2,
|
|
97
|
+
slugFromValues: (v) => `tool-${slugify(v.slug || `${v.toolSourceName || inferToolServer(v.toolName || '')}-${v.topic || v.toolName || 'feed'}`)}`,
|
|
98
|
+
buildPrompt: (v, ctx) => {
|
|
99
|
+
const sourceName = v.toolSourceName || inferToolServer(v.toolName || '');
|
|
100
|
+
const sourceKind = v.toolSourceKind || 'mcp';
|
|
101
|
+
const sourceLabel = v.toolSourceLabel || sourceName;
|
|
102
|
+
const topic = v.topic || 'tool-backed memory';
|
|
103
|
+
const limit = v.limit || '100';
|
|
104
|
+
return `You are running a generic tool-backed memory seed feed.
|
|
105
|
+
|
|
106
|
+
Tool source:
|
|
107
|
+
- Label: "${sourceLabel}"
|
|
108
|
+
- Source name: "${sourceName}"
|
|
109
|
+
- Source kind: "${sourceKind}"
|
|
110
|
+
- Tool: \`${v.toolName}\`
|
|
111
|
+
|
|
112
|
+
Goal: ${v.callGoal || `Call ${v.toolName} and ingest useful returned data into memory.`}
|
|
113
|
+
|
|
114
|
+
Tool variables JSON:
|
|
115
|
+
\`\`\`json
|
|
116
|
+
${(v.variablesJson || '{}').trim() || '{}'}
|
|
117
|
+
\`\`\`
|
|
118
|
+
|
|
119
|
+
How to save each result:
|
|
120
|
+
${v.recordStrategy || 'Convert the tool response into one memory record per returned entity or event. Use the provider stable id when available; otherwise use a deterministic hash of the source, topic, and meaningful record key.'}
|
|
121
|
+
|
|
122
|
+
Steps:
|
|
123
|
+
1. Call exactly this selected tool: \`${v.toolName}\`. Use the Tool variables JSON and the Goal above as the tool-call inputs. If the tool schema needs differently named arguments, map the provided variables to that schema. Do not switch to a different external tool unless this tool returns a clear instruction that another tool is required to read the selected records.
|
|
124
|
+
2. If the tool supports pagination or modified-since filters, prefer new/updated records and stop after ${limit} records. If no modified-since filter is available, fetch the most relevant ${limit} records.
|
|
125
|
+
3. Normalize the tool result into candidate records. Preserve stable ids, URLs, timestamps, owners/authors, status fields, and provider metadata. Skip empty or purely administrative records.
|
|
126
|
+
4. ${MEMORY_DELTA_INSTRUCTIONS}
|
|
127
|
+
Use this recall query: \`source:${ctx.slug} ${topic} ${sourceLabel} ${v.toolName}\`.
|
|
128
|
+
5. Compare the normalized candidates with recalled memory. Keep only candidates that are new, materially changed, or produce a new useful finding. Drop exact duplicates and trivial timestamp-only changes unless the timestamp itself is the useful fact.
|
|
129
|
+
6. For each kept candidate, build one record:
|
|
130
|
+
- \`title\`: a compact human label including the topic and record name/id.
|
|
131
|
+
- \`externalId\`: \`${sourceName}:${topic}:<providerStableIdOrDeterministicHash>\`.
|
|
132
|
+
- \`content\`: markdown containing the current facts, the new/changed finding, and a "Source data" section with relevant returned fields.
|
|
133
|
+
- \`metadata\`: \`{provider:"${sourceName}", toolSource:"${sourceKind}", toolName:"${v.toolName}", topic:"${topic}", fetchedAt, sourceUrl, updatedAt}\` plus any provider-specific keys worth preserving.
|
|
134
|
+
7. ${COMMIT_INSTRUCTIONS.replace(/{{slug}}/g, ctx.slug).replace(/{{targetFolder}}/g, ctx.targetFolder)}
|
|
135
|
+
`;
|
|
136
|
+
},
|
|
137
|
+
},
|
|
38
138
|
{
|
|
39
139
|
id: 'gdrive-watch-folder',
|
|
40
140
|
label: 'Google Drive: watch a folder',
|
package/dist/cli/dashboard.js
CHANGED
|
@@ -3834,7 +3834,8 @@ export async function cmdDashboard(opts) {
|
|
|
3834
3834
|
// the Intelligence → Sources tab composes recipe + field values + schedule
|
|
3835
3835
|
// into a cron prompt that uses the user's authenticated tool source
|
|
3836
3836
|
// (Claude Desktop connector, Composio toolkit, or local MCP server) to pull
|
|
3837
|
-
// records and
|
|
3837
|
+
// records, compare them with memory, and call brain_ingest_folder to commit
|
|
3838
|
+
// distilled notes.
|
|
3838
3839
|
app.get('/api/brain/connectors', async (_req, res) => {
|
|
3839
3840
|
try {
|
|
3840
3841
|
const { getClaudeIntegrations, loadToolInventory } = await import('../agent/mcp-bridge.js');
|
|
@@ -3890,7 +3891,7 @@ export async function cmdDashboard(opts) {
|
|
|
3890
3891
|
const connected = await composio.listConnectedToolkits();
|
|
3891
3892
|
const activeSlugs = [...new Set(connected
|
|
3892
3893
|
.filter((c) => c.status === 'ACTIVE')
|
|
3893
|
-
.filter((c) => recipeIntegrations.has(c.slug))
|
|
3894
|
+
.filter((c) => recipeIntegrations.has('*') || recipeIntegrations.has(c.slug))
|
|
3894
3895
|
.map((c) => c.slug))];
|
|
3895
3896
|
if (activeSlugs.length) {
|
|
3896
3897
|
const { listComposioToolkitTools } = await import('../integrations/composio/mcp-bridge.js');
|
|
@@ -4083,6 +4084,27 @@ If the tool returns nothing or errors, return an empty array \`[]\`.`,
|
|
|
4083
4084
|
res.status(400).json({ error: `missing required field(s): ${missing.join(', ')}` });
|
|
4084
4085
|
return;
|
|
4085
4086
|
}
|
|
4087
|
+
if (recipe.id === 'tool-backed-memory-seed') {
|
|
4088
|
+
const toolName = String(values.toolName ?? '').trim();
|
|
4089
|
+
if (!/^mcp__.+__.+$/.test(toolName)) {
|
|
4090
|
+
res.status(400).json({ error: 'toolName must be an exact MCP tool name like mcp__server__tool' });
|
|
4091
|
+
return;
|
|
4092
|
+
}
|
|
4093
|
+
const rawVariables = String(values.variablesJson ?? '').trim();
|
|
4094
|
+
if (rawVariables) {
|
|
4095
|
+
try {
|
|
4096
|
+
const parsedVariables = JSON.parse(rawVariables);
|
|
4097
|
+
if (!parsedVariables || typeof parsedVariables !== 'object' || Array.isArray(parsedVariables)) {
|
|
4098
|
+
res.status(400).json({ error: 'Tool variables must be a JSON object, for example {}' });
|
|
4099
|
+
return;
|
|
4100
|
+
}
|
|
4101
|
+
}
|
|
4102
|
+
catch {
|
|
4103
|
+
res.status(400).json({ error: 'Tool variables must be valid JSON, for example {}' });
|
|
4104
|
+
return;
|
|
4105
|
+
}
|
|
4106
|
+
}
|
|
4107
|
+
}
|
|
4086
4108
|
const schedule = (body.schedule || recipe.defaultSchedule).trim();
|
|
4087
4109
|
if (!cron.validate(schedule)) {
|
|
4088
4110
|
res.status(400).json({ error: `invalid cron expression: ${schedule}` });
|
|
@@ -4119,7 +4141,7 @@ If the tool returns nothing or errors, return an empty array \`[]\`.`,
|
|
|
4119
4141
|
managed: 'connector-feed',
|
|
4120
4142
|
recipeId: recipe.id,
|
|
4121
4143
|
fields: values,
|
|
4122
|
-
|
|
4144
|
+
mode: 'direct-records',
|
|
4123
4145
|
}),
|
|
4124
4146
|
targetFolder: spec.targetFolder,
|
|
4125
4147
|
intelligence: 'auto',
|
|
@@ -13549,11 +13571,11 @@ if('serviceWorker' in navigator){navigator.serviceWorker.getRegistrations().then
|
|
|
13549
13571
|
<!-- ═══ Auto-seed feeds (connected tools → cron → brain) ═══ -->
|
|
13550
13572
|
<div class="card" style="padding:16px;margin-bottom:16px">
|
|
13551
13573
|
<div style="display:flex;align-items:center;justify-content:space-between;margin-bottom:4px">
|
|
13552
|
-
<div style="font-weight:600">
|
|
13574
|
+
<div style="font-weight:600">Seed memory from connected apps</div>
|
|
13553
13575
|
<button class="btn-primary" onclick="brainOpenFeedWizard()">+ Add feed</button>
|
|
13554
13576
|
</div>
|
|
13555
13577
|
<div style="color:var(--muted);font-size:13px;margin-bottom:12px">
|
|
13556
|
-
|
|
13578
|
+
Scheduled feeds use authenticated tools (Composio, Claude Desktop connectors, or local MCP servers) to fetch records, compare them with current memory, and save distilled notes to the brain.
|
|
13557
13579
|
</div>
|
|
13558
13580
|
<div id="brain-feeds-connectors" style="display:flex;flex-wrap:wrap;gap:6px;margin-bottom:12px"></div>
|
|
13559
13581
|
<div id="brain-feeds-list"></div>
|
|
@@ -13561,7 +13583,7 @@ if('serviceWorker' in navigator){navigator.serviceWorker.getRegistrations().then
|
|
|
13561
13583
|
|
|
13562
13584
|
<!-- ═══ Auto-seed feed wizard (hidden by default) ═══ -->
|
|
13563
13585
|
<div id="brain-feed-wizard" class="card" style="display:none;padding:16px;margin-bottom:16px">
|
|
13564
|
-
<div style="font-weight:600;margin-bottom:4px">Add
|
|
13586
|
+
<div style="font-weight:600;margin-bottom:4px">Add memory seed feed</div>
|
|
13565
13587
|
<div id="brain-feed-wizard-breadcrumbs" style="color:var(--muted);font-size:12px;margin-bottom:12px"></div>
|
|
13566
13588
|
<div id="brain-feed-wizard-step"></div>
|
|
13567
13589
|
<div style="display:flex;gap:8px;margin-top:14px">
|
|
@@ -14115,6 +14137,11 @@ if('serviceWorker' in navigator){navigator.serviceWorker.getRegistrations().then
|
|
|
14115
14137
|
for (const f of (s.recipe.fields || [])) {
|
|
14116
14138
|
if (f.defaultValue) s.values[f.key] = f.defaultValue;
|
|
14117
14139
|
}
|
|
14140
|
+
if (s.recipe.integration === '*' && s.pick) {
|
|
14141
|
+
s.values.toolSourceName = s.pick.name;
|
|
14142
|
+
s.values.toolSourceKind = s.pick.kind;
|
|
14143
|
+
s.values.toolSourceLabel = s.pick.label;
|
|
14144
|
+
}
|
|
14118
14145
|
s.schedule = s.recipe.defaultSchedule;
|
|
14119
14146
|
s.step = 2;
|
|
14120
14147
|
} else if (s.step === 2) {
|
|
@@ -14123,6 +14150,27 @@ if('serviceWorker' in navigator){navigator.serviceWorker.getRegistrations().then
|
|
|
14123
14150
|
inputs.forEach(function(inp) { s.values[inp.dataset.field] = inp.value; });
|
|
14124
14151
|
const missing = (s.recipe.fields || []).filter(function(f) { return f.required && !(s.values[f.key] || '').trim(); });
|
|
14125
14152
|
if (missing.length) { document.getElementById('brain-feed-wizard-status').innerHTML = '<span style="color:#e66">Required: ' + missing.map(function(f) { return f.label; }).join(', ') + '</span>'; return; }
|
|
14153
|
+
if (s.recipe && s.recipe.id === 'tool-backed-memory-seed') {
|
|
14154
|
+
const toolName = String(s.values.toolName || '').trim();
|
|
14155
|
+
if (!/^mcp__.+__.+$/.test(toolName)) {
|
|
14156
|
+
document.getElementById('brain-feed-wizard-status').innerHTML = '<span style="color:#e66">Pick an exact tool before continuing.</span>';
|
|
14157
|
+
return;
|
|
14158
|
+
}
|
|
14159
|
+
const rawVariables = String(s.values.variablesJson || '').trim();
|
|
14160
|
+
if (rawVariables) {
|
|
14161
|
+
try {
|
|
14162
|
+
const parsedVariables = JSON.parse(rawVariables);
|
|
14163
|
+
if (!parsedVariables || typeof parsedVariables !== 'object' || Array.isArray(parsedVariables)) {
|
|
14164
|
+
document.getElementById('brain-feed-wizard-status').innerHTML = '<span style="color:#e66">Tool variables must be a JSON object, for example {}.</span>';
|
|
14165
|
+
return;
|
|
14166
|
+
}
|
|
14167
|
+
} catch (err) {
|
|
14168
|
+
void err;
|
|
14169
|
+
document.getElementById('brain-feed-wizard-status').innerHTML = '<span style="color:#e66">Tool variables must be valid JSON, for example {}.</span>';
|
|
14170
|
+
return;
|
|
14171
|
+
}
|
|
14172
|
+
}
|
|
14173
|
+
}
|
|
14126
14174
|
s.step = 3;
|
|
14127
14175
|
} else if (s.step === 3) {
|
|
14128
14176
|
brainFeedWizardSubmit();
|
|
@@ -14290,6 +14338,13 @@ if('serviceWorker' in navigator){navigator.serviceWorker.getRegistrations().then
|
|
|
14290
14338
|
if (field) await brainRenderFieldPicker(field, s.values);
|
|
14291
14339
|
}
|
|
14292
14340
|
|
|
14341
|
+
function brainFullToolNameForPick(pick, tool) {
|
|
14342
|
+
if (!pick || !tool) return tool || '';
|
|
14343
|
+
if (String(tool).startsWith('mcp__')) return tool;
|
|
14344
|
+
const server = pick.kind === 'claude-desktop' ? ('claude_ai_' + pick.name) : pick.name;
|
|
14345
|
+
return 'mcp__' + server + '__' + tool;
|
|
14346
|
+
}
|
|
14347
|
+
|
|
14293
14348
|
function brainFeedWizardRender() {
|
|
14294
14349
|
if (!brainFeedWizardState) return;
|
|
14295
14350
|
const s = brainFeedWizardState;
|
|
@@ -14317,7 +14372,7 @@ if('serviceWorker' in navigator){navigator.serviceWorker.getRegistrations().then
|
|
|
14317
14372
|
}).join('') + '</div>';
|
|
14318
14373
|
}
|
|
14319
14374
|
} else if (s.step === 1) {
|
|
14320
|
-
const recipes = (s.catalog.recipes || []).filter(function(r) { return r.integration === s.pick.name; });
|
|
14375
|
+
const recipes = (s.catalog.recipes || []).filter(function(r) { return r.integration === s.pick.name || r.integration === '*'; });
|
|
14321
14376
|
if (!recipes.length) {
|
|
14322
14377
|
html = '<div style="color:var(--muted)">No recipes for this connector yet.</div>';
|
|
14323
14378
|
} else {
|
|
@@ -14346,6 +14401,25 @@ if('serviceWorker' in navigator){navigator.serviceWorker.getRegistrations().then
|
|
|
14346
14401
|
'<div style="color:var(--muted);font-size:13px;padding:6px">Loading choices…</div>' +
|
|
14347
14402
|
'</div>' +
|
|
14348
14403
|
'<input type="hidden" data-field="' + f.key + '" value="' + escapeHtml(val) + '">';
|
|
14404
|
+
} else if (s.recipe.integration === '*' && f.key === 'toolName') {
|
|
14405
|
+
const tools = (s.pick && s.pick.tools) || [];
|
|
14406
|
+
if (!tools.length) {
|
|
14407
|
+
control = '<input type="text" data-field="' + f.key + '" value="' + escapeHtml(val) + '" placeholder="mcp__server__TOOL_NAME" style="width:100%">';
|
|
14408
|
+
} else {
|
|
14409
|
+
const options = tools.map(function(t) {
|
|
14410
|
+
const full = brainFullToolNameForPick(s.pick, t);
|
|
14411
|
+
const selected = full === val ? ' selected' : '';
|
|
14412
|
+
return '<option value="' + escapeHtml(full) + '"' + selected + '>' + escapeHtml(t) + '</option>';
|
|
14413
|
+
}).join('');
|
|
14414
|
+
control = '<select data-field="' + f.key + '" style="width:100%;padding:6px">' +
|
|
14415
|
+
'<option value="">— pick a tool —</option>' +
|
|
14416
|
+
options +
|
|
14417
|
+
'</select>' +
|
|
14418
|
+
'<div style="font-size:11px;color:var(--muted);margin-top:4px">The feed will call the selected tool exactly, then compare returned records with memory.</div>';
|
|
14419
|
+
}
|
|
14420
|
+
} else if (s.recipe.integration === '*' && ['callGoal', 'variablesJson', 'recordStrategy'].includes(f.key)) {
|
|
14421
|
+
const minHeight = f.key === 'variablesJson' ? '70px' : '92px';
|
|
14422
|
+
control = '<textarea data-field="' + f.key + '" placeholder="' + escapeHtml(f.placeholder || '') + '" style="width:100%;min-height:' + minHeight + ';resize:vertical">' + escapeHtml(val) + '</textarea>';
|
|
14349
14423
|
} else {
|
|
14350
14424
|
control = '<input type="text" data-field="' + f.key + '" value="' + escapeHtml(val) + '" placeholder="' + escapeHtml(f.placeholder || '') + '" style="width:100%">';
|
|
14351
14425
|
}
|
|
@@ -14387,6 +14461,11 @@ if('serviceWorker' in navigator){navigator.serviceWorker.getRegistrations().then
|
|
|
14387
14461
|
function brainFeedWizardPickRecipe(id) {
|
|
14388
14462
|
const r = (brainFeedWizardState.catalog.recipes || []).find(function(x) { return x.id === id; });
|
|
14389
14463
|
brainFeedWizardState.recipe = r;
|
|
14464
|
+
if (r && r.integration === '*' && brainFeedWizardState.pick) {
|
|
14465
|
+
brainFeedWizardState.values.toolSourceName = brainFeedWizardState.pick.name;
|
|
14466
|
+
brainFeedWizardState.values.toolSourceKind = brainFeedWizardState.pick.kind;
|
|
14467
|
+
brainFeedWizardState.values.toolSourceLabel = brainFeedWizardState.pick.label;
|
|
14468
|
+
}
|
|
14390
14469
|
brainFeedWizardRender();
|
|
14391
14470
|
}
|
|
14392
14471
|
|
|
@@ -3,9 +3,33 @@
|
|
|
3
3
|
*
|
|
4
4
|
* Tools the agent uses to feed the brain's ingestion pipeline from cron jobs.
|
|
5
5
|
* Primarily used by Connector Feeds (src/brain/connector-recipes.ts) — each
|
|
6
|
-
* feed's cron prompt ends with a brain_ingest_folder call that
|
|
7
|
-
* records
|
|
6
|
+
* feed's cron prompt ends with a brain_ingest_folder call that sends fetched
|
|
7
|
+
* records into the distillation pipeline. The pipeline writes distilled notes
|
|
8
|
+
* to 04-Ingest/<slug>/ and indexes them for recall.
|
|
8
9
|
*/
|
|
9
10
|
import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
11
|
+
export interface IngestRecordInput {
|
|
12
|
+
title: string;
|
|
13
|
+
externalId: string;
|
|
14
|
+
content: string;
|
|
15
|
+
metadata?: Record<string, unknown>;
|
|
16
|
+
}
|
|
17
|
+
export interface BrainIngestFolderResult {
|
|
18
|
+
slug: string;
|
|
19
|
+
acceptedCount: number;
|
|
20
|
+
skippedEmpty: number;
|
|
21
|
+
pipeline: {
|
|
22
|
+
recordsIn: number;
|
|
23
|
+
recordsWritten: number;
|
|
24
|
+
recordsSkipped: number;
|
|
25
|
+
recordsFailed: number;
|
|
26
|
+
errors: Array<{
|
|
27
|
+
externalId?: string;
|
|
28
|
+
error: string;
|
|
29
|
+
}>;
|
|
30
|
+
};
|
|
31
|
+
message: string;
|
|
32
|
+
}
|
|
33
|
+
export declare function ingestBrainRecords(slug: string, records: IngestRecordInput[]): Promise<BrainIngestFolderResult>;
|
|
10
34
|
export declare function registerBrainTools(server: McpServer): void;
|
|
11
35
|
//# sourceMappingURL=brain-tools.d.ts.map
|
|
@@ -3,31 +3,19 @@
|
|
|
3
3
|
*
|
|
4
4
|
* Tools the agent uses to feed the brain's ingestion pipeline from cron jobs.
|
|
5
5
|
* Primarily used by Connector Feeds (src/brain/connector-recipes.ts) — each
|
|
6
|
-
* feed's cron prompt ends with a brain_ingest_folder call that
|
|
7
|
-
* records
|
|
6
|
+
* feed's cron prompt ends with a brain_ingest_folder call that sends fetched
|
|
7
|
+
* records into the distillation pipeline. The pipeline writes distilled notes
|
|
8
|
+
* to 04-Ingest/<slug>/ and indexes them for recall.
|
|
8
9
|
*/
|
|
9
|
-
import { mkdirSync, writeFileSync, existsSync } from 'node:fs';
|
|
10
|
-
import path from 'node:path';
|
|
11
10
|
import { z } from 'zod';
|
|
12
|
-
import {
|
|
13
|
-
|
|
14
|
-
function
|
|
15
|
-
const base = String(title || externalId || 'record')
|
|
16
|
-
.toLowerCase()
|
|
17
|
-
.replace(/[^a-z0-9]+/g, '-')
|
|
18
|
-
.replace(/^-+|-+$/g, '')
|
|
19
|
-
.slice(0, 60) || 'record';
|
|
20
|
-
const idPart = String(externalId || '')
|
|
21
|
-
.replace(/[^a-zA-Z0-9]+/g, '')
|
|
22
|
-
.slice(0, 16) || 'x';
|
|
23
|
-
return `${base}-${idPart}.md`;
|
|
24
|
-
}
|
|
25
|
-
function formatFrontmatter(record, slug) {
|
|
11
|
+
import { fallbackExternalId } from '../brain/adapters/common.js';
|
|
12
|
+
import { logger, textResult } from './shared.js';
|
|
13
|
+
function formatFrontmatter(record, slug, fetchedAt) {
|
|
26
14
|
const frontmatter = {
|
|
27
15
|
source: slug,
|
|
28
16
|
externalId: record.externalId,
|
|
29
17
|
title: record.title,
|
|
30
|
-
fetchedAt
|
|
18
|
+
fetchedAt,
|
|
31
19
|
};
|
|
32
20
|
if (record.metadata && typeof record.metadata === 'object') {
|
|
33
21
|
for (const [k, v] of Object.entries(record.metadata)) {
|
|
@@ -51,8 +39,94 @@ function formatFrontmatter(record, slug) {
|
|
|
51
39
|
lines.push('---');
|
|
52
40
|
return lines.join('\n') + '\n\n';
|
|
53
41
|
}
|
|
42
|
+
function sanitizeSlug(slug) {
|
|
43
|
+
return String(slug).toLowerCase().replace(/[^a-z0-9-]/g, '-').replace(/^-+|-+$/g, '');
|
|
44
|
+
}
|
|
45
|
+
function toRawRecords(records, slug) {
|
|
46
|
+
const fetchedAt = new Date().toISOString();
|
|
47
|
+
const rawRecords = [];
|
|
48
|
+
let skippedEmpty = 0;
|
|
49
|
+
for (const [index, record] of records.entries()) {
|
|
50
|
+
const content = String(record.content ?? '').trim();
|
|
51
|
+
if (!content) {
|
|
52
|
+
skippedEmpty += 1;
|
|
53
|
+
continue;
|
|
54
|
+
}
|
|
55
|
+
const title = String(record.title || record.externalId || `Record ${index + 1}`).trim();
|
|
56
|
+
const externalId = String(record.externalId || '').trim()
|
|
57
|
+
|| fallbackExternalId(`${slug}-record`, index + 1, content);
|
|
58
|
+
const normalized = {
|
|
59
|
+
title,
|
|
60
|
+
externalId,
|
|
61
|
+
content,
|
|
62
|
+
metadata: record.metadata,
|
|
63
|
+
};
|
|
64
|
+
rawRecords.push({
|
|
65
|
+
externalId,
|
|
66
|
+
content,
|
|
67
|
+
rawPayload: formatFrontmatter(normalized, slug, fetchedAt) + content,
|
|
68
|
+
metadata: {
|
|
69
|
+
...(record.metadata ?? {}),
|
|
70
|
+
adapter: 'connector-feed',
|
|
71
|
+
source: slug,
|
|
72
|
+
externalId,
|
|
73
|
+
title,
|
|
74
|
+
fetchedAt,
|
|
75
|
+
},
|
|
76
|
+
});
|
|
77
|
+
}
|
|
78
|
+
return { rawRecords, skippedEmpty };
|
|
79
|
+
}
|
|
80
|
+
async function* iterateRecords(records) {
|
|
81
|
+
for (const record of records)
|
|
82
|
+
yield record;
|
|
83
|
+
}
|
|
84
|
+
export async function ingestBrainRecords(slug, records) {
|
|
85
|
+
const safeSlug = sanitizeSlug(slug);
|
|
86
|
+
if (!safeSlug)
|
|
87
|
+
throw new Error('slug is required');
|
|
88
|
+
if (!Array.isArray(records) || records.length === 0)
|
|
89
|
+
throw new Error(`no records to ingest for slug "${safeSlug}"`);
|
|
90
|
+
const { rawRecords, skippedEmpty } = toRawRecords(records, safeSlug);
|
|
91
|
+
if (rawRecords.length === 0)
|
|
92
|
+
throw new Error(`no non-empty records to ingest for slug "${safeSlug}"`);
|
|
93
|
+
const { upsertSource, getSource } = await import('../brain/source-registry.js');
|
|
94
|
+
const { runIngestion } = await import('../brain/ingestion-pipeline.js');
|
|
95
|
+
await upsertSource({
|
|
96
|
+
slug: safeSlug,
|
|
97
|
+
kind: 'seed',
|
|
98
|
+
adapter: 'markdown',
|
|
99
|
+
configJson: JSON.stringify({ managed: 'connector-feed', mode: 'direct-records' }),
|
|
100
|
+
targetFolder: `04-Ingest/${safeSlug}`,
|
|
101
|
+
intelligence: 'auto',
|
|
102
|
+
enabled: true,
|
|
103
|
+
});
|
|
104
|
+
const source = await getSource(safeSlug);
|
|
105
|
+
if (!source)
|
|
106
|
+
throw new Error('failed to register source');
|
|
107
|
+
const result = await runIngestion({ source, records: iterateRecords(rawRecords) });
|
|
108
|
+
let ingestionSummary = `Pipeline: ${result.recordsIn} in · ${result.recordsWritten} written · ${result.recordsSkipped} skipped · ${result.recordsFailed} failed`;
|
|
109
|
+
if (result.errors?.length) {
|
|
110
|
+
ingestionSummary += ` (first error: ${result.errors[0].error.slice(0, 100)})`;
|
|
111
|
+
}
|
|
112
|
+
const message = `Ingested into slug "${safeSlug}": ${rawRecords.length} accepted record(s), ${skippedEmpty} empty skipped. ${ingestionSummary}`;
|
|
113
|
+
logger.info({ slug: safeSlug, acceptedCount: rawRecords.length, skippedEmpty, recordCount: records.length }, 'brain_ingest_folder complete');
|
|
114
|
+
return {
|
|
115
|
+
slug: safeSlug,
|
|
116
|
+
acceptedCount: rawRecords.length,
|
|
117
|
+
skippedEmpty,
|
|
118
|
+
pipeline: {
|
|
119
|
+
recordsIn: result.recordsIn,
|
|
120
|
+
recordsWritten: result.recordsWritten,
|
|
121
|
+
recordsSkipped: result.recordsSkipped,
|
|
122
|
+
recordsFailed: result.recordsFailed,
|
|
123
|
+
errors: result.errors,
|
|
124
|
+
},
|
|
125
|
+
message,
|
|
126
|
+
};
|
|
127
|
+
}
|
|
54
128
|
export function registerBrainTools(server) {
|
|
55
|
-
server.tool('brain_ingest_folder', 'Ingest a batch of records into the brain under a named slug.
|
|
129
|
+
server.tool('brain_ingest_folder', 'Ingest a batch of records into the brain under a named slug. Sends records directly into the distillation pipeline (chunking, LLM summarization, vault note write, memory indexing, knowledge graph write). Use at the end of Connector Feed cron jobs. Safe to re-run — records with the same externalId update the same distilled note.', {
|
|
56
130
|
slug: z.string().describe('Feed slug (matches 04-Ingest/<slug> folder). Lowercase, hyphen-separated.'),
|
|
57
131
|
records: z.array(z.object({
|
|
58
132
|
title: z.string().describe('Human-readable title for this record.'),
|
|
@@ -67,64 +141,15 @@ export function registerBrainTools(server) {
|
|
|
67
141
|
if (!Array.isArray(records) || records.length === 0) {
|
|
68
142
|
return textResult(`brain_ingest_folder: no records to ingest for slug "${safeSlug}".`);
|
|
69
143
|
}
|
|
70
|
-
const targetFolder = path.join(VAULT_DIR, '04-Ingest', safeSlug);
|
|
71
|
-
mkdirSync(targetFolder, { recursive: true });
|
|
72
|
-
// Write each record to a markdown file
|
|
73
|
-
let writtenCount = 0;
|
|
74
|
-
let skippedExisting = 0;
|
|
75
|
-
for (const r of records) {
|
|
76
|
-
if (!r.content || !r.content.trim())
|
|
77
|
-
continue;
|
|
78
|
-
const fname = filenameFor(r.title, r.externalId);
|
|
79
|
-
const fullPath = path.join(targetFolder, fname);
|
|
80
|
-
const body = formatFrontmatter(r, safeSlug) + r.content;
|
|
81
|
-
// Idempotency: if a file with the same externalId already exists, overwrite
|
|
82
|
-
// (the distillation pipeline does its own content-hash dedup).
|
|
83
|
-
const preExisting = existsSync(fullPath);
|
|
84
|
-
try {
|
|
85
|
-
writeFileSync(fullPath, body, 'utf-8');
|
|
86
|
-
if (preExisting)
|
|
87
|
-
skippedExisting += 1;
|
|
88
|
-
else
|
|
89
|
-
writtenCount += 1;
|
|
90
|
-
}
|
|
91
|
-
catch (err) {
|
|
92
|
-
logger.warn({ err, fullPath }, 'brain_ingest_folder: write failed for one record');
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
// Run the distillation pipeline. Use a synthetic seed source so the
|
|
96
|
-
// ingestion framework can classify + distill + write back into the
|
|
97
|
-
// vault & graph with its existing dedup.
|
|
98
|
-
let ingestionSummary = '';
|
|
99
144
|
try {
|
|
100
|
-
const
|
|
101
|
-
|
|
102
|
-
await upsertSource({
|
|
103
|
-
slug: safeSlug,
|
|
104
|
-
kind: 'seed',
|
|
105
|
-
adapter: 'markdown',
|
|
106
|
-
configJson: JSON.stringify({ inputPath: targetFolder, managed: 'connector-feed' }),
|
|
107
|
-
targetFolder: `04-Ingest/${safeSlug}`,
|
|
108
|
-
intelligence: 'auto',
|
|
109
|
-
enabled: true,
|
|
110
|
-
});
|
|
111
|
-
const source = await getSource(safeSlug);
|
|
112
|
-
if (!source)
|
|
113
|
-
throw new Error('failed to register source');
|
|
114
|
-
const result = await runIngestion({ source, inputPath: targetFolder });
|
|
115
|
-
ingestionSummary =
|
|
116
|
-
`Pipeline: ${result.recordsIn} in · ${result.recordsWritten} written · ${result.recordsSkipped} skipped · ${result.recordsFailed} failed`;
|
|
117
|
-
if (result.errors?.length) {
|
|
118
|
-
ingestionSummary += ` (first error: ${result.errors[0].error.slice(0, 100)})`;
|
|
119
|
-
}
|
|
145
|
+
const result = await ingestBrainRecords(safeSlug, records);
|
|
146
|
+
return textResult(result.message);
|
|
120
147
|
}
|
|
121
148
|
catch (err) {
|
|
122
149
|
const msg = err instanceof Error ? err.message : String(err);
|
|
123
150
|
logger.error({ err, slug: safeSlug }, 'brain_ingest_folder: ingestion pipeline failed');
|
|
124
|
-
return textResult(`brain_ingest_folder:
|
|
151
|
+
return textResult(`brain_ingest_folder: ingestion failed for slug "${safeSlug}": ${msg}`);
|
|
125
152
|
}
|
|
126
|
-
logger.info({ slug: safeSlug, writtenCount, skippedExisting, recordCount: records.length }, 'brain_ingest_folder complete');
|
|
127
|
-
return textResult(`Ingested into slug "${safeSlug}": ${writtenCount} new file(s), ${skippedExisting} updated in place. ${ingestionSummary}`);
|
|
128
153
|
});
|
|
129
154
|
}
|
|
130
155
|
//# sourceMappingURL=brain-tools.js.map
|