wispy-cli 2.7.7 → 2.7.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/core/browser.mjs +327 -0
- package/core/engine.mjs +239 -0
- package/core/subagent-worker.mjs +325 -0
- package/core/subagents.mjs +642 -88
- package/core/task-decomposer.mjs +375 -0
- package/core/task-router.mjs +395 -0
- package/core/tools.mjs +59 -0
- package/package.json +1 -1
|
@@ -0,0 +1,375 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* core/task-decomposer.mjs — Task Decomposition Engine for Wispy
|
|
3
|
+
*
|
|
4
|
+
* Splits complex tasks into parallel subtasks, routes each to the best model,
|
|
5
|
+
* executes concurrently, and synthesizes results.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { routeTask, getCheapDecomposerModel, classifyTask } from "./task-router.mjs";
|
|
9
|
+
|
|
10
|
+
// ── Subtask counter ───────────────────────────────────────────────────────────
|
|
11
|
+
|
|
12
|
+
let _subtaskCounter = 0;
|
|
13
|
+
function makeSubtaskId() {
|
|
14
|
+
return `st-${(++_subtaskCounter).toString().padStart(2, "0")}`;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
// ── Decompose task using LLM ─────────────────────────────────────────────────
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Decompose a complex task into parallel subtasks using a cheap LLM.
|
|
21
|
+
*
|
|
22
|
+
* @param {string} task - The task description
|
|
23
|
+
* @param {object} [options]
|
|
24
|
+
* @param {number} [options.maxSubtasks=5] - Max number of subtasks
|
|
25
|
+
* @param {string} [options.costPreference="balanced"] - "minimize" | "balanced" | "maximize-quality"
|
|
26
|
+
* @param {object} [options.engine] - WispyEngine instance (for LLM calls)
|
|
27
|
+
* @returns {Promise<{
|
|
28
|
+
* subtasks: Array<{id,task,type,dependencies,priority}>,
|
|
29
|
+
* parallelGroups: string[][],
|
|
30
|
+
* estimatedCost: string,
|
|
31
|
+
* estimatedTime: string
|
|
32
|
+
* }>}
|
|
33
|
+
*/
|
|
34
|
+
export async function decomposeTask(task, options = {}) {
|
|
35
|
+
const maxSubtasks = options.maxSubtasks ?? 5;
|
|
36
|
+
const costPreference = options.costPreference ?? "balanced";
|
|
37
|
+
const engine = options.engine ?? null;
|
|
38
|
+
|
|
39
|
+
_subtaskCounter = 0; // reset for this decomposition
|
|
40
|
+
|
|
41
|
+
// Quick path: if the task seems simple, don't bother decomposing
|
|
42
|
+
const classification = classifyTask(task);
|
|
43
|
+
if (classification.complexity !== "complex" || !classification.parallelizable) {
|
|
44
|
+
const subtaskId = makeSubtaskId();
|
|
45
|
+
return {
|
|
46
|
+
subtasks: [
|
|
47
|
+
{
|
|
48
|
+
id: subtaskId,
|
|
49
|
+
task,
|
|
50
|
+
type: classification.type,
|
|
51
|
+
dependencies: [],
|
|
52
|
+
priority: 1,
|
|
53
|
+
},
|
|
54
|
+
],
|
|
55
|
+
parallelGroups: [[subtaskId]],
|
|
56
|
+
estimatedCost: costPreference === "minimize" ? "very-low" : "low",
|
|
57
|
+
estimatedTime: classification.complexity === "simple" ? "<1min" : "1-2min",
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// Use LLM decomposition if engine is available
|
|
62
|
+
if (engine) {
|
|
63
|
+
try {
|
|
64
|
+
return await _llmDecompose(task, maxSubtasks, costPreference, engine);
|
|
65
|
+
} catch (err) {
|
|
66
|
+
if (process.env.WISPY_DEBUG) {
|
|
67
|
+
console.error(`[task-decomposer] LLM decompose failed: ${err.message}, falling back to heuristic`);
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// Heuristic decomposition (no LLM)
|
|
73
|
+
return _heuristicDecompose(task, maxSubtasks, costPreference, classification);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Decompose using an LLM call (cheap model).
|
|
78
|
+
*/
|
|
79
|
+
async function _llmDecompose(task, maxSubtasks, costPreference, engine) {
|
|
80
|
+
const { model } = getCheapDecomposerModel();
|
|
81
|
+
|
|
82
|
+
const systemPrompt = `You are a task decomposition expert. Split complex tasks into independent subtasks that can be parallelized.
|
|
83
|
+
Reply with ONLY valid JSON. No markdown, no explanation.`;
|
|
84
|
+
|
|
85
|
+
const userPrompt = `Decompose this task into at most ${maxSubtasks} subtasks.
|
|
86
|
+
Task: "${task}"
|
|
87
|
+
|
|
88
|
+
Requirements:
|
|
89
|
+
- Identify subtasks that can run in parallel (no dependencies)
|
|
90
|
+
- Identify subtasks that need results from others (add dependency IDs)
|
|
91
|
+
- Classify each subtask type: coding, research, analysis, design, review, summarize, format, or general
|
|
92
|
+
- Set priority: 1 = first group (parallel), 2 = waits for priority 1, etc.
|
|
93
|
+
|
|
94
|
+
Respond with ONLY this JSON (no markdown):
|
|
95
|
+
{
|
|
96
|
+
"subtasks": [
|
|
97
|
+
{"id": "st-01", "task": "...", "type": "coding", "dependencies": [], "priority": 1},
|
|
98
|
+
{"id": "st-02", "task": "...", "type": "research", "dependencies": [], "priority": 1},
|
|
99
|
+
{"id": "st-03", "task": "...", "type": "review", "dependencies": ["st-01","st-02"], "priority": 2}
|
|
100
|
+
],
|
|
101
|
+
"estimatedTime": "2-5min",
|
|
102
|
+
"estimatedCost": "low"
|
|
103
|
+
}`;
|
|
104
|
+
|
|
105
|
+
const messages = [
|
|
106
|
+
{ role: "system", content: systemPrompt },
|
|
107
|
+
{ role: "user", content: userPrompt },
|
|
108
|
+
];
|
|
109
|
+
|
|
110
|
+
const result = await engine.providers.chat(messages, [], { model });
|
|
111
|
+
const text = result.type === "text" ? result.text : JSON.stringify(result);
|
|
112
|
+
|
|
113
|
+
// Extract JSON from response
|
|
114
|
+
const jsonMatch = text.match(/\{[\s\S]*"subtasks"[\s\S]*\}/);
|
|
115
|
+
if (!jsonMatch) throw new Error("LLM did not return valid decomposition JSON");
|
|
116
|
+
|
|
117
|
+
const parsed = JSON.parse(jsonMatch[0]);
|
|
118
|
+
const subtasks = (parsed.subtasks ?? []).slice(0, maxSubtasks);
|
|
119
|
+
|
|
120
|
+
// Ensure IDs are consistent
|
|
121
|
+
const idMap = {};
|
|
122
|
+
subtasks.forEach((st, i) => {
|
|
123
|
+
const newId = makeSubtaskId();
|
|
124
|
+
idMap[st.id] = newId;
|
|
125
|
+
st.id = newId;
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
// Remap dependency IDs
|
|
129
|
+
for (const st of subtasks) {
|
|
130
|
+
st.dependencies = (st.dependencies ?? []).map(d => idMap[d] ?? d).filter(d => subtasks.some(s => s.id === d));
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// Build parallel groups from priority
|
|
134
|
+
const groups = {};
|
|
135
|
+
for (const st of subtasks) {
|
|
136
|
+
const p = st.priority ?? 1;
|
|
137
|
+
if (!groups[p]) groups[p] = [];
|
|
138
|
+
groups[p].push(st.id);
|
|
139
|
+
}
|
|
140
|
+
const parallelGroups = Object.keys(groups).sort((a, b) => Number(a) - Number(b)).map(k => groups[k]);
|
|
141
|
+
|
|
142
|
+
return {
|
|
143
|
+
subtasks,
|
|
144
|
+
parallelGroups,
|
|
145
|
+
estimatedCost: parsed.estimatedCost ?? _estimateCost(costPreference, subtasks.length),
|
|
146
|
+
estimatedTime: parsed.estimatedTime ?? _estimateTime(subtasks.length),
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
/**
|
|
151
|
+
* Heuristic decomposition (no LLM required).
|
|
152
|
+
*/
|
|
153
|
+
function _heuristicDecompose(task, maxSubtasks, costPreference, classification) {
|
|
154
|
+
// Split by "and" / newlines / semicolons as a heuristic
|
|
155
|
+
const sentences = task
|
|
156
|
+
.split(/\n|;|\band\b(?=[^,]*,|\s+\w+\s+the\s)/)
|
|
157
|
+
.map(s => s.trim())
|
|
158
|
+
.filter(s => s.length > 10);
|
|
159
|
+
|
|
160
|
+
const subtasks = sentences.slice(0, maxSubtasks).map((s, i) => ({
|
|
161
|
+
id: makeSubtaskId(),
|
|
162
|
+
task: s,
|
|
163
|
+
type: classifyTask(s).type,
|
|
164
|
+
dependencies: [],
|
|
165
|
+
priority: 1,
|
|
166
|
+
}));
|
|
167
|
+
|
|
168
|
+
// If no useful split found, use the whole task as one subtask
|
|
169
|
+
if (subtasks.length === 0) {
|
|
170
|
+
const id = makeSubtaskId();
|
|
171
|
+
return {
|
|
172
|
+
subtasks: [{ id, task, type: classification.type, dependencies: [], priority: 1 }],
|
|
173
|
+
parallelGroups: [[id]],
|
|
174
|
+
estimatedCost: _estimateCost(costPreference, 1),
|
|
175
|
+
estimatedTime: "1-3min",
|
|
176
|
+
};
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
return {
|
|
180
|
+
subtasks,
|
|
181
|
+
parallelGroups: [subtasks.map(s => s.id)],
|
|
182
|
+
estimatedCost: _estimateCost(costPreference, subtasks.length),
|
|
183
|
+
estimatedTime: _estimateTime(subtasks.length),
|
|
184
|
+
};
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
function _estimateCost(costPreference, numSubtasks) {
|
|
188
|
+
if (costPreference === "minimize") return "very-low";
|
|
189
|
+
if (costPreference === "maximize-quality") return numSubtasks > 3 ? "high" : "medium";
|
|
190
|
+
return numSubtasks > 3 ? "medium" : "low";
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
function _estimateTime(numSubtasks) {
|
|
194
|
+
if (numSubtasks <= 1) return "<1min";
|
|
195
|
+
if (numSubtasks <= 3) return "1-3min";
|
|
196
|
+
return "2-5min";
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// ── Execute decomposed plan ──────────────────────────────────────────────────
|
|
200
|
+
|
|
201
|
+
/**
|
|
202
|
+
* Execute an execution plan, running parallel groups concurrently.
|
|
203
|
+
*
|
|
204
|
+
* @param {object} plan - Result from decomposeTask()
|
|
205
|
+
* @param {object} engine - WispyEngine instance
|
|
206
|
+
* @param {object} [opts]
|
|
207
|
+
* @param {string} [opts.costPreference="balanced"]
|
|
208
|
+
* @param {Function} [opts.onSubtaskStart] - (subtask) => void
|
|
209
|
+
* @param {Function} [opts.onSubtaskComplete] - (subtask, result) => void
|
|
210
|
+
* @param {Function} [opts.onSubtaskFail] - (subtask, error) => void
|
|
211
|
+
* @returns {Promise<{ results: object[], synthesized: string, errors: object[] }>}
|
|
212
|
+
*/
|
|
213
|
+
export async function executeDecomposedPlan(plan, engine, opts = {}) {
|
|
214
|
+
const costPreference = opts.costPreference ?? "balanced";
|
|
215
|
+
const completedResults = {}; // id → result
|
|
216
|
+
const errors = [];
|
|
217
|
+
const MAX_RETRIES = 1;
|
|
218
|
+
|
|
219
|
+
for (const group of plan.parallelGroups) {
|
|
220
|
+
// Filter to subtasks in this group (skip if all deps not satisfied)
|
|
221
|
+
const groupSubtasks = group
|
|
222
|
+
.map(id => plan.subtasks.find(s => s.id === id))
|
|
223
|
+
.filter(Boolean);
|
|
224
|
+
|
|
225
|
+
// Run group in parallel
|
|
226
|
+
const groupPromises = groupSubtasks.map(async (subtask) => {
|
|
227
|
+
// Route to best model
|
|
228
|
+
const routing = routeTask(
|
|
229
|
+
{ type: subtask.type, complexity: "medium", estimatedTokens: Math.ceil(subtask.task.length / 4) + 800, parallelizable: false },
|
|
230
|
+
null,
|
|
231
|
+
{ costPreference }
|
|
232
|
+
);
|
|
233
|
+
|
|
234
|
+
opts.onSubtaskStart?.(subtask);
|
|
235
|
+
|
|
236
|
+
let attempt = 0;
|
|
237
|
+
while (attempt <= MAX_RETRIES) {
|
|
238
|
+
try {
|
|
239
|
+
// Build context from dependencies
|
|
240
|
+
const depContext = subtask.dependencies
|
|
241
|
+
.map(depId => completedResults[depId])
|
|
242
|
+
.filter(Boolean)
|
|
243
|
+
.map((r, i) => `### Dependency ${i + 1} result:\n${r}`)
|
|
244
|
+
.join("\n\n");
|
|
245
|
+
|
|
246
|
+
const fullTask = depContext
|
|
247
|
+
? `${subtask.task}\n\n---\nContext from previous steps:\n${depContext}`
|
|
248
|
+
: subtask.task;
|
|
249
|
+
|
|
250
|
+
// Use sub-agent manager if available, else direct provider call
|
|
251
|
+
let result;
|
|
252
|
+
if (engine.subagents) {
|
|
253
|
+
const agent = await engine.subagents.spawn({
|
|
254
|
+
task: fullTask,
|
|
255
|
+
label: `decomposed-${subtask.id}`,
|
|
256
|
+
model: routing.model,
|
|
257
|
+
timeout: 120, // seconds
|
|
258
|
+
workstream: engine._activeWorkstream,
|
|
259
|
+
});
|
|
260
|
+
|
|
261
|
+
// Wait for completion
|
|
262
|
+
result = await engine.subagents.waitFor(agent.id, 120_000);
|
|
263
|
+
result = result.result ?? result.error ?? "(no result)";
|
|
264
|
+
} else {
|
|
265
|
+
// Direct provider call
|
|
266
|
+
const messages = [
|
|
267
|
+
{ role: "system", content: `You are a focused worker agent. Complete only this specific subtask. Be concise.` },
|
|
268
|
+
{ role: "user", content: fullTask },
|
|
269
|
+
];
|
|
270
|
+
const response = await engine.providers.chat(messages, [], { model: routing.model });
|
|
271
|
+
result = response.type === "text" ? response.text : JSON.stringify(response);
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
completedResults[subtask.id] = result;
|
|
275
|
+
opts.onSubtaskComplete?.(subtask, result);
|
|
276
|
+
return { id: subtask.id, result, routing, success: true };
|
|
277
|
+
} catch (err) {
|
|
278
|
+
attempt++;
|
|
279
|
+
if (attempt > MAX_RETRIES) {
|
|
280
|
+
const error = { id: subtask.id, error: err.message, subtask };
|
|
281
|
+
errors.push(error);
|
|
282
|
+
opts.onSubtaskFail?.(subtask, err);
|
|
283
|
+
// Non-critical: continue with empty result
|
|
284
|
+
completedResults[subtask.id] = `[subtask ${subtask.id} failed: ${err.message}]`;
|
|
285
|
+
return { id: subtask.id, result: null, error: err.message, success: false };
|
|
286
|
+
}
|
|
287
|
+
// Retry
|
|
288
|
+
await new Promise(r => setTimeout(r, 1000 * attempt));
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
});
|
|
292
|
+
|
|
293
|
+
// Wait for all in group before proceeding to next group
|
|
294
|
+
await Promise.all(groupPromises);
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
// Collect results in subtask order
|
|
298
|
+
const orderedResults = plan.subtasks.map(st => ({
|
|
299
|
+
id: st.id,
|
|
300
|
+
task: st.task,
|
|
301
|
+
type: st.type,
|
|
302
|
+
result: completedResults[st.id] ?? null,
|
|
303
|
+
}));
|
|
304
|
+
|
|
305
|
+
// Synthesize
|
|
306
|
+
let synthesized;
|
|
307
|
+
try {
|
|
308
|
+
synthesized = await synthesizeResults(orderedResults, engine);
|
|
309
|
+
} catch (err) {
|
|
310
|
+
// Fallback: concatenate results
|
|
311
|
+
synthesized = orderedResults
|
|
312
|
+
.filter(r => r.result)
|
|
313
|
+
.map(r => `**${r.type.toUpperCase()}**: ${r.result}`)
|
|
314
|
+
.join("\n\n---\n\n");
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
return {
|
|
318
|
+
results: orderedResults,
|
|
319
|
+
synthesized,
|
|
320
|
+
errors,
|
|
321
|
+
};
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
// ── Synthesize results ────────────────────────────────────────────────────────
|
|
325
|
+
|
|
326
|
+
/**
|
|
327
|
+
* Synthesize multiple subtask results into a coherent response.
|
|
328
|
+
*
|
|
329
|
+
* @param {Array<{id, task, type, result}>} subtaskResults
|
|
330
|
+
* @param {object} [engine] - WispyEngine instance (for LLM synthesis)
|
|
331
|
+
* @returns {Promise<string>}
|
|
332
|
+
*/
|
|
333
|
+
export async function synthesizeResults(subtaskResults, engine) {
|
|
334
|
+
const validResults = subtaskResults.filter(r => r.result && !r.result.startsWith("[subtask"));
|
|
335
|
+
|
|
336
|
+
if (validResults.length === 0) {
|
|
337
|
+
return "All subtasks failed to produce results.";
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
if (validResults.length === 1) {
|
|
341
|
+
return validResults[0].result;
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
// Build synthesis prompt
|
|
345
|
+
const parts = validResults.map((r, i) =>
|
|
346
|
+
`### Subtask ${i + 1} (${r.type}): ${r.task.slice(0, 100)}\n${r.result.slice(0, 2000)}`
|
|
347
|
+
).join("\n\n---\n\n");
|
|
348
|
+
|
|
349
|
+
// If no engine, concatenate
|
|
350
|
+
if (!engine) {
|
|
351
|
+
return `## Combined Results\n\n${parts}`;
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
const { model } = getCheapDecomposerModel();
|
|
355
|
+
|
|
356
|
+
const messages = [
|
|
357
|
+
{
|
|
358
|
+
role: "system",
|
|
359
|
+
content: `You are a synthesis agent. Merge multiple subtask outputs into a single, coherent, well-structured response.
|
|
360
|
+
Remove redundancy. Resolve conflicts by noting them. Maintain all important information.`,
|
|
361
|
+
},
|
|
362
|
+
{
|
|
363
|
+
role: "user",
|
|
364
|
+
content: `Synthesize these ${validResults.length} subtask results into one coherent response:\n\n${parts}`,
|
|
365
|
+
},
|
|
366
|
+
];
|
|
367
|
+
|
|
368
|
+
try {
|
|
369
|
+
const result = await engine.providers.chat(messages, [], { model });
|
|
370
|
+
return result.type === "text" ? result.text : JSON.stringify(result);
|
|
371
|
+
} catch (err) {
|
|
372
|
+
// Fallback
|
|
373
|
+
return `## Synthesized Results\n\n${parts}`;
|
|
374
|
+
}
|
|
375
|
+
}
|