wispy-cli 2.7.7 → 2.7.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/core/browser.mjs +327 -0
- package/core/engine.mjs +239 -0
- package/core/subagent-worker.mjs +325 -0
- package/core/subagents.mjs +642 -88
- package/core/task-decomposer.mjs +375 -0
- package/core/task-router.mjs +395 -0
- package/core/tools.mjs +59 -0
- package/package.json +1 -1
|
@@ -0,0 +1,325 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* core/subagent-worker.mjs — Worker thread code for sub-agent isolation
|
|
3
|
+
*
|
|
4
|
+
* Runs inside a worker_threads Worker. Receives task/config via workerData,
|
|
5
|
+
* communicates back to the main thread via parentPort.postMessage().
|
|
6
|
+
*
|
|
7
|
+
* Message protocol (worker → main):
|
|
8
|
+
* { type: 'progress', agentId, round, content }
|
|
9
|
+
* { type: 'tool_call', agentId, round, call: { name, args, id } }
|
|
10
|
+
* { type: 'tool_result', agentId, round, toolName, result }
|
|
11
|
+
* { type: 'completed', agentId, result }
|
|
12
|
+
* { type: 'failed', agentId, error }
|
|
13
|
+
*
|
|
14
|
+
* Message protocol (main → worker):
|
|
15
|
+
* { type: 'tool_result', callId, result }
|
|
16
|
+
* { type: 'steer', message }
|
|
17
|
+
* { type: 'kill' }
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
import { workerData, parentPort } from "node:worker_threads";
|
|
21
|
+
|
|
22
|
+
if (!parentPort) {
|
|
23
|
+
throw new Error("subagent-worker.mjs must be run as a Worker thread");
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const { agentId, task, systemPrompt, model, timeout, providerConfig, toolDefs } = workerData;
|
|
27
|
+
|
|
28
|
+
const MAX_ROUNDS = 30;
|
|
29
|
+
const TOKEN_LIMIT = 128_000;
|
|
30
|
+
const COMPACT_THRESHOLD = 0.8; // compact at 80%
|
|
31
|
+
|
|
32
|
+
/** Estimate token count as chars/4 */
|
|
33
|
+
function estimateTokens(text) {
|
|
34
|
+
return Math.ceil((text?.length ?? 0) / 4);
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
function estimateMessages(msgs) {
|
|
38
|
+
return msgs.reduce((sum, m) => {
|
|
39
|
+
const content = m.content ?? JSON.stringify(m);
|
|
40
|
+
return sum + estimateTokens(content);
|
|
41
|
+
}, 0);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/** Pending tool call resolvers: callId → { resolve, reject } */
|
|
45
|
+
const pendingToolCalls = new Map();
|
|
46
|
+
/** Pending steer messages */
|
|
47
|
+
const steerMessages = [];
|
|
48
|
+
let killed = false;
|
|
49
|
+
|
|
50
|
+
// Listen for messages from the main thread
|
|
51
|
+
parentPort.on("message", (msg) => {
|
|
52
|
+
if (msg.type === "tool_result") {
|
|
53
|
+
const pending = pendingToolCalls.get(msg.callId);
|
|
54
|
+
if (pending) {
|
|
55
|
+
pendingToolCalls.delete(msg.callId);
|
|
56
|
+
if (msg.error) {
|
|
57
|
+
pending.reject(new Error(msg.error));
|
|
58
|
+
} else {
|
|
59
|
+
pending.resolve(msg.result);
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
} else if (msg.type === "steer") {
|
|
63
|
+
steerMessages.push(msg.message);
|
|
64
|
+
} else if (msg.type === "kill") {
|
|
65
|
+
killed = true;
|
|
66
|
+
}
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Request a tool call from the main thread.
|
|
71
|
+
* Returns a Promise that resolves when the main thread sends back the result.
|
|
72
|
+
*/
|
|
73
|
+
function requestToolCall(call, round) {
|
|
74
|
+
return new Promise((resolve, reject) => {
|
|
75
|
+
const callId = call.id ?? `${call.name}-${Date.now()}`;
|
|
76
|
+
pendingToolCalls.set(callId, { resolve, reject });
|
|
77
|
+
|
|
78
|
+
parentPort.postMessage({
|
|
79
|
+
type: "tool_call",
|
|
80
|
+
agentId,
|
|
81
|
+
round,
|
|
82
|
+
call: { name: call.name, args: call.args, id: callId },
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
// Tool timeout: 60s
|
|
86
|
+
setTimeout(() => {
|
|
87
|
+
if (pendingToolCalls.has(callId)) {
|
|
88
|
+
pendingToolCalls.delete(callId);
|
|
89
|
+
reject(new Error(`Tool '${call.name}' timed out in worker`));
|
|
90
|
+
}
|
|
91
|
+
}, 60_000);
|
|
92
|
+
});
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Compact messages when approaching token limit.
|
|
97
|
+
* Keep system prompt + last 3 rounds, summarize the middle.
|
|
98
|
+
*
|
|
99
|
+
* @param {Array} messages
|
|
100
|
+
* @returns {Array} compacted messages
|
|
101
|
+
*/
|
|
102
|
+
function compactMessages(messages) {
|
|
103
|
+
const system = messages.filter(m => m.role === "system");
|
|
104
|
+
const nonSystem = messages.filter(m => m.role !== "system");
|
|
105
|
+
|
|
106
|
+
// Keep last 6 messages (≈3 rounds of user+assistant)
|
|
107
|
+
const keepTail = nonSystem.slice(-6);
|
|
108
|
+
const toSummarize = nonSystem.slice(0, -6);
|
|
109
|
+
|
|
110
|
+
if (toSummarize.length === 0) {
|
|
111
|
+
return messages; // Nothing to compact
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
const summaryContent = toSummarize
|
|
115
|
+
.filter(m => m.role === "user" || m.role === "assistant")
|
|
116
|
+
.map(m => `[${m.role}]: ${(m.content ?? "").slice(0, 300)}`)
|
|
117
|
+
.join("\n");
|
|
118
|
+
|
|
119
|
+
const summaryMsg = {
|
|
120
|
+
role: "user",
|
|
121
|
+
content: `[Context summary from earlier in this conversation]\n${summaryContent}\n[End of summary]`,
|
|
122
|
+
};
|
|
123
|
+
|
|
124
|
+
return [...system, summaryMsg, ...keepTail];
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Make an HTTP call to the provider API.
|
|
129
|
+
* Uses the providerConfig passed from the main thread.
|
|
130
|
+
*/
|
|
131
|
+
async function callProvider(messages) {
|
|
132
|
+
const { provider, apiKey, model: configModel, endpoint } = providerConfig;
|
|
133
|
+
const useModel = model ?? configModel;
|
|
134
|
+
|
|
135
|
+
// Build request based on provider
|
|
136
|
+
if (provider === "anthropic") {
|
|
137
|
+
const systemMsg = messages.find(m => m.role === "system")?.content ?? "";
|
|
138
|
+
const nonSystem = messages.filter(m => m.role !== "system");
|
|
139
|
+
|
|
140
|
+
const response = await fetch("https://api.anthropic.com/v1/messages", {
|
|
141
|
+
method: "POST",
|
|
142
|
+
headers: {
|
|
143
|
+
"Content-Type": "application/json",
|
|
144
|
+
"x-api-key": apiKey,
|
|
145
|
+
"anthropic-version": "2023-06-01",
|
|
146
|
+
},
|
|
147
|
+
body: JSON.stringify({
|
|
148
|
+
model: useModel ?? "claude-3-5-haiku-20241022",
|
|
149
|
+
max_tokens: 4096,
|
|
150
|
+
system: systemMsg,
|
|
151
|
+
messages: nonSystem.map(m => ({
|
|
152
|
+
role: m.role === "tool_result" ? "user" : m.role,
|
|
153
|
+
content: m.role === "tool_result"
|
|
154
|
+
? [{ type: "tool_result", tool_use_id: m.toolUseId, content: JSON.stringify(m.result) }]
|
|
155
|
+
: m.role === "assistant" && m.toolCalls?.length
|
|
156
|
+
? m.toolCalls.map(tc => ({ type: "tool_use", id: tc.id, name: tc.name, input: tc.args ?? tc.input ?? {} }))
|
|
157
|
+
: [{ type: "text", text: m.content ?? "" }],
|
|
158
|
+
})),
|
|
159
|
+
tools: (toolDefs ?? []).map(t => ({
|
|
160
|
+
name: t.name,
|
|
161
|
+
description: t.description,
|
|
162
|
+
input_schema: t.parameters,
|
|
163
|
+
})),
|
|
164
|
+
}),
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
const data = await response.json();
|
|
168
|
+
if (!response.ok) throw new Error(data.error?.message ?? `API error ${response.status}`);
|
|
169
|
+
|
|
170
|
+
const textBlock = data.content?.find(b => b.type === "text");
|
|
171
|
+
const toolUseBlocks = data.content?.filter(b => b.type === "tool_use") ?? [];
|
|
172
|
+
|
|
173
|
+
if (toolUseBlocks.length > 0) {
|
|
174
|
+
return {
|
|
175
|
+
type: "tool_calls",
|
|
176
|
+
calls: toolUseBlocks.map(b => ({ id: b.id, name: b.name, args: b.input })),
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
return { type: "text", text: textBlock?.text ?? "" };
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
// Default: OpenAI-compat
|
|
183
|
+
const url = endpoint ?? "https://api.openai.com/v1/chat/completions";
|
|
184
|
+
const response = await fetch(url, {
|
|
185
|
+
method: "POST",
|
|
186
|
+
headers: {
|
|
187
|
+
"Content-Type": "application/json",
|
|
188
|
+
"Authorization": `Bearer ${apiKey}`,
|
|
189
|
+
},
|
|
190
|
+
body: JSON.stringify({
|
|
191
|
+
model: useModel ?? "gpt-4o-mini",
|
|
192
|
+
messages: messages.map(m => {
|
|
193
|
+
if (m.role === "tool_result") {
|
|
194
|
+
return { role: "tool", tool_call_id: m.toolUseId, content: JSON.stringify(m.result) };
|
|
195
|
+
}
|
|
196
|
+
if (m.role === "assistant" && m.toolCalls?.length) {
|
|
197
|
+
return {
|
|
198
|
+
role: "assistant",
|
|
199
|
+
content: m.content ?? null,
|
|
200
|
+
tool_calls: m.toolCalls.map(tc => ({
|
|
201
|
+
id: tc.id,
|
|
202
|
+
type: "function",
|
|
203
|
+
function: { name: tc.name, arguments: JSON.stringify(tc.args ?? {}) },
|
|
204
|
+
})),
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
return { role: m.role, content: m.content ?? "" };
|
|
208
|
+
}),
|
|
209
|
+
tools: (toolDefs ?? []).map(t => ({
|
|
210
|
+
type: "function",
|
|
211
|
+
function: { name: t.name, description: t.description, parameters: t.parameters },
|
|
212
|
+
})),
|
|
213
|
+
}),
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
const data = await response.json();
|
|
217
|
+
if (!response.ok) throw new Error(data.error?.message ?? `API error ${response.status}`);
|
|
218
|
+
|
|
219
|
+
const choice = data.choices?.[0];
|
|
220
|
+
const msg = choice?.message;
|
|
221
|
+
|
|
222
|
+
if (msg?.tool_calls?.length) {
|
|
223
|
+
return {
|
|
224
|
+
type: "tool_calls",
|
|
225
|
+
calls: msg.tool_calls.map(tc => ({
|
|
226
|
+
id: tc.id,
|
|
227
|
+
name: tc.function.name,
|
|
228
|
+
args: JSON.parse(tc.function.arguments ?? "{}"),
|
|
229
|
+
})),
|
|
230
|
+
};
|
|
231
|
+
}
|
|
232
|
+
return { type: "text", text: msg?.content ?? "" };
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
/** Main agent loop */
|
|
236
|
+
async function runLoop() {
|
|
237
|
+
const messages = [
|
|
238
|
+
{ role: "system", content: systemPrompt },
|
|
239
|
+
{ role: "user", content: task },
|
|
240
|
+
];
|
|
241
|
+
|
|
242
|
+
for (let round = 0; round < MAX_ROUNDS; round++) {
|
|
243
|
+
if (killed) {
|
|
244
|
+
parentPort.postMessage({ type: "failed", agentId, error: "killed" });
|
|
245
|
+
return;
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
// Inject steer messages
|
|
249
|
+
while (steerMessages.length > 0) {
|
|
250
|
+
const steer = steerMessages.shift();
|
|
251
|
+
messages.push({ role: "user", content: `[Guidance from orchestrator]: ${steer}` });
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
// Context compaction
|
|
255
|
+
const totalTokens = estimateMessages(messages);
|
|
256
|
+
if (totalTokens > TOKEN_LIMIT * COMPACT_THRESHOLD) {
|
|
257
|
+
const compacted = compactMessages(messages);
|
|
258
|
+
messages.length = 0;
|
|
259
|
+
messages.push(...compacted);
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
// Emit progress
|
|
263
|
+
parentPort.postMessage({ type: "progress", agentId, round, content: `Starting round ${round + 1}` });
|
|
264
|
+
|
|
265
|
+
// Provider call with retry + fallback handled by main thread
|
|
266
|
+
// Here we just call via the providerConfig directly
|
|
267
|
+
let result;
|
|
268
|
+
try {
|
|
269
|
+
result = await callProvider(messages);
|
|
270
|
+
} catch (err) {
|
|
271
|
+
parentPort.postMessage({ type: "failed", agentId, error: err.message });
|
|
272
|
+
return;
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
if (result.type === "text") {
|
|
276
|
+
parentPort.postMessage({ type: "completed", agentId, result: result.text });
|
|
277
|
+
return;
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
// Handle tool calls
|
|
281
|
+
messages.push({ role: "assistant", toolCalls: result.calls, content: "" });
|
|
282
|
+
|
|
283
|
+
for (const call of result.calls) {
|
|
284
|
+
if (killed) {
|
|
285
|
+
parentPort.postMessage({ type: "failed", agentId, error: "killed" });
|
|
286
|
+
return;
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
let toolResult;
|
|
290
|
+
try {
|
|
291
|
+
toolResult = await requestToolCall(call, round);
|
|
292
|
+
} catch (err) {
|
|
293
|
+
toolResult = { error: err.message, success: false };
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
parentPort.postMessage({
|
|
297
|
+
type: "tool_result",
|
|
298
|
+
agentId,
|
|
299
|
+
round,
|
|
300
|
+
toolName: call.name,
|
|
301
|
+
result: toolResult,
|
|
302
|
+
});
|
|
303
|
+
|
|
304
|
+
messages.push({
|
|
305
|
+
role: "tool_result",
|
|
306
|
+
toolName: call.name,
|
|
307
|
+
toolUseId: call.id ?? call.name,
|
|
308
|
+
result: toolResult,
|
|
309
|
+
});
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
// Max rounds
|
|
314
|
+
parentPort.postMessage({ type: "completed", agentId, result: "(max rounds reached — partial work above)" });
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
// Start the loop with a timeout
|
|
318
|
+
const timeoutMs = timeout ?? 300_000;
|
|
319
|
+
const timeoutPromise = new Promise((_, reject) =>
|
|
320
|
+
setTimeout(() => reject(new Error("Sub-agent timed out")), timeoutMs)
|
|
321
|
+
);
|
|
322
|
+
|
|
323
|
+
Promise.race([runLoop(), timeoutPromise]).catch((err) => {
|
|
324
|
+
parentPort.postMessage({ type: "failed", agentId, error: err.message ?? String(err) });
|
|
325
|
+
});
|