@deeflectcom/smart-spawn 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.ts +655 -0
- package/openclaw.plugin.json +40 -0
- package/package.json +25 -0
- package/skills/SKILL.md +214 -0
- package/skills/smart-spawn/SKILL.md +114 -0
- package/src/api-client.ts +280 -0
package/index.ts
ADDED
|
@@ -0,0 +1,655 @@
|
|
|
1
|
+
import { ApiClient } from "./src/api-client.ts";
|
|
2
|
+
import { existsSync, readFileSync, writeFileSync } from "fs";
|
|
3
|
+
import { join } from "path";
|
|
4
|
+
import { randomUUID } from "crypto";
|
|
5
|
+
|
|
6
|
+
const CATEGORY_KEYWORDS: Record<string, string[]> = {
|
|
7
|
+
coding: [
|
|
8
|
+
"code", "coding", "program", "debug", "fix", "implement", "refactor",
|
|
9
|
+
"typescript", "python", "javascript", "rust", "api", "function", "test", "bug",
|
|
10
|
+
],
|
|
11
|
+
reasoning: [
|
|
12
|
+
"reason", "reasoning", "analysis", "think", "logic",
|
|
13
|
+
"math", "prove", "evaluate", "strategy", "plan", "deduce", "infer",
|
|
14
|
+
],
|
|
15
|
+
creative: [
|
|
16
|
+
"creative", "write", "story", "poem", "essay", "blog", "content",
|
|
17
|
+
"marketing", "brainstorm", "idea", "narrative", "fiction",
|
|
18
|
+
],
|
|
19
|
+
research: [
|
|
20
|
+
"research", "search", "find", "investigate", "summarize",
|
|
21
|
+
"report", "literature", "paper", "study",
|
|
22
|
+
"cause", "explain", "history", "compare", "overview", "background", "origin",
|
|
23
|
+
],
|
|
24
|
+
"fast-cheap": [
|
|
25
|
+
"quick", "fast", "simple", "brief", "classify", "label", "tag",
|
|
26
|
+
"extract", "parse", "format", "convert",
|
|
27
|
+
"add", "sum", "subtract", "multiply", "divide", "calculate",
|
|
28
|
+
],
|
|
29
|
+
vision: [
|
|
30
|
+
"image", "picture", "photo", "screenshot", "diagram", "visual", "ocr",
|
|
31
|
+
"analyze", "look", "see", "describe", "identify", "detect", "recognize",
|
|
32
|
+
],
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
function classifyTask(text: string): string {
|
|
36
|
+
const lower = (text || "").toLowerCase();
|
|
37
|
+
let best = "general";
|
|
38
|
+
let bestScore = 0;
|
|
39
|
+
for (const [category, keywords] of Object.entries(CATEGORY_KEYWORDS)) {
|
|
40
|
+
const score = keywords.filter((kw) => lower.includes(kw)).length;
|
|
41
|
+
if (score > bestScore) {
|
|
42
|
+
bestScore = score;
|
|
43
|
+
best = category;
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
return best;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Build set of providers the user has direct keys for.
|
|
51
|
+
* Auth profile IDs follow the pattern: "{provider}:{identifier}"
|
|
52
|
+
* e.g. "anthropic:default", "openai:oauth", "google-gemini:api_key"
|
|
53
|
+
*/
|
|
54
|
+
function detectDirectProviders(api: any): Set<string> {
|
|
55
|
+
const profiles = api.config?.auth?.profiles ?? {};
|
|
56
|
+
const direct = new Set<string>();
|
|
57
|
+
for (const [profileId, profile] of Object.entries(profiles) as [string, any][]) {
|
|
58
|
+
const provider = profile?.provider ?? profileId.split(":")[0];
|
|
59
|
+
if (provider && provider !== "openrouter") {
|
|
60
|
+
direct.add(provider);
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
// Also check models.providers for custom provider configs
|
|
64
|
+
const providers = api.config?.models?.providers ?? {};
|
|
65
|
+
for (const key of Object.keys(providers)) {
|
|
66
|
+
if (key !== "openrouter") direct.add(key);
|
|
67
|
+
}
|
|
68
|
+
return direct;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Map provider names from auth profiles to OpenRouter model ID prefixes.
|
|
73
|
+
* Auth uses "anthropic", "google-gemini", etc. Model IDs use "anthropic/", "google/", etc.
|
|
74
|
+
*/
|
|
75
|
+
const PROVIDER_TO_MODEL_PREFIX: Record<string, string> = {
|
|
76
|
+
anthropic: "anthropic",
|
|
77
|
+
openai: "openai",
|
|
78
|
+
"google-gemini": "google",
|
|
79
|
+
"google-gemini-cli": "google",
|
|
80
|
+
"aws-bedrock": "amazon",
|
|
81
|
+
};
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Route model to cheapest available provider.
|
|
85
|
+
* If user has a direct key for the model's provider, skip OpenRouter.
|
|
86
|
+
*/
|
|
87
|
+
function routeModel(modelId: string, directProviders: Set<string>): string {
|
|
88
|
+
if (modelId.startsWith("openrouter/")) modelId = modelId.replace(/^openrouter\//, "");
|
|
89
|
+
const provider = modelId.split("/")[0]; // e.g. "anthropic" from "anthropic/claude-opus-4-6"
|
|
90
|
+
|
|
91
|
+
// Check if any direct provider matches this model's provider
|
|
92
|
+
for (const [authProvider, modelPrefix] of Object.entries(PROVIDER_TO_MODEL_PREFIX)) {
|
|
93
|
+
if (modelPrefix === provider && directProviders.has(authProvider)) {
|
|
94
|
+
return modelId; // Use direct — no openrouter/ prefix
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Also check if provider name directly matches (e.g. "anthropic" in both)
|
|
99
|
+
if (directProviders.has(provider)) {
|
|
100
|
+
return modelId;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// Fallback to OpenRouter
|
|
104
|
+
return `openrouter/${modelId}`;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/** Get or create a persistent instance ID for community telemetry */
|
|
108
|
+
function getInstanceId(pluginDir: string): string {
|
|
109
|
+
// Try multiple paths: explicit dir, home-based fallback
|
|
110
|
+
const candidates = [
|
|
111
|
+
join(pluginDir, ".instance-id"),
|
|
112
|
+
join(process.env.HOME ?? "/tmp", ".smart-spawn-instance-id"),
|
|
113
|
+
];
|
|
114
|
+
|
|
115
|
+
for (const idPath of candidates) {
|
|
116
|
+
try {
|
|
117
|
+
if (existsSync(idPath)) {
|
|
118
|
+
return readFileSync(idPath, "utf-8").trim();
|
|
119
|
+
}
|
|
120
|
+
} catch { /* ignore */ }
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
const id = randomUUID();
|
|
124
|
+
for (const idPath of candidates) {
|
|
125
|
+
try {
|
|
126
|
+
writeFileSync(idPath, id, "utf-8");
|
|
127
|
+
return id;
|
|
128
|
+
} catch { /* ignore, try next */ }
|
|
129
|
+
}
|
|
130
|
+
return id;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
export default function (api: any) {
|
|
134
|
+
const pluginConfig =
|
|
135
|
+
api.config?.plugins?.entries?.["smart-spawn"]?.config ?? {};
|
|
136
|
+
|
|
137
|
+
const apiUrl = pluginConfig.apiUrl ?? "https://ss.deeflect.com";
|
|
138
|
+
const defaultBudget = pluginConfig.defaultBudget ?? "medium";
|
|
139
|
+
const defaultMode = pluginConfig.defaultMode ?? "single";
|
|
140
|
+
const collectiveCount = pluginConfig.collectiveCount ?? 3;
|
|
141
|
+
const telemetryOptIn = pluginConfig.telemetryOptIn ?? false;
|
|
142
|
+
const communityUrl = pluginConfig.communityUrl ?? apiUrl;
|
|
143
|
+
|
|
144
|
+
// Detect which providers the user has direct access to
|
|
145
|
+
const directProviders = detectDirectProviders(api);
|
|
146
|
+
const hasOpenRouter = Object.keys(api.config?.auth?.profiles ?? {})
|
|
147
|
+
.some((id: string) => id.startsWith("openrouter:"));
|
|
148
|
+
|
|
149
|
+
if (directProviders.size > 0) {
|
|
150
|
+
console.log(`[smart-spawn] Direct providers detected: ${[...directProviders].join(", ")}`);
|
|
151
|
+
}
|
|
152
|
+
if (hasOpenRouter) {
|
|
153
|
+
console.log(`[smart-spawn] OpenRouter auth detected — will use for models without direct provider`);
|
|
154
|
+
}
|
|
155
|
+
if (!hasOpenRouter && directProviders.size === 0) {
|
|
156
|
+
console.warn(`[smart-spawn] WARNING: No auth profiles detected! Models will fail to spawn.`);
|
|
157
|
+
console.warn(`[smart-spawn] Run: openclaw auth add openrouter (or add a direct provider key)`);
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
const RAW_FALLBACKS: Record<string, string> = {
|
|
161
|
+
coding: "anthropic/claude-opus-4-6",
|
|
162
|
+
reasoning: "anthropic/claude-opus-4-6",
|
|
163
|
+
creative: "anthropic/claude-opus-4-6",
|
|
164
|
+
research: "google/gemini-2.5-flash",
|
|
165
|
+
"fast-cheap": "moonshotai/kimi-k2.5",
|
|
166
|
+
general: "anthropic/claude-sonnet-4",
|
|
167
|
+
vision: "anthropic/claude-sonnet-4",
|
|
168
|
+
};
|
|
169
|
+
function getFallback(category: string): string {
|
|
170
|
+
return routeModel(RAW_FALLBACKS[category] ?? RAW_FALLBACKS.general, directProviders);
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
const client = new ApiClient(apiUrl, communityUrl);
|
|
174
|
+
|
|
175
|
+
// Instance ID for community telemetry (lazy-loaded)
|
|
176
|
+
let instanceId: string | null = null;
|
|
177
|
+
function getOrCreateInstanceId(): string {
|
|
178
|
+
if (!instanceId) {
|
|
179
|
+
instanceId = getInstanceId(typeof __dirname !== 'undefined' ? __dirname : new URL('.', import.meta.url).pathname);
|
|
180
|
+
}
|
|
181
|
+
return instanceId;
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
/** Build a fallback-to-single-mode response when the API is unavailable */
|
|
185
|
+
function buildFallbackResponse(category: string, budget: string, enrichedTask: string, context?: string) {
|
|
186
|
+
const modelId = getFallback(category);
|
|
187
|
+
client.logSpawn({ model: modelId, category, budget, mode: "single", role: "primary", source: "fallback", context });
|
|
188
|
+
return {
|
|
189
|
+
content: [{
|
|
190
|
+
type: "text",
|
|
191
|
+
text: JSON.stringify({
|
|
192
|
+
action: "spawn",
|
|
193
|
+
model: modelId,
|
|
194
|
+
task: enrichedTask,
|
|
195
|
+
category,
|
|
196
|
+
budget,
|
|
197
|
+
reason: "API unavailable, falling back to single mode",
|
|
198
|
+
source: "fallback",
|
|
199
|
+
}),
|
|
200
|
+
}],
|
|
201
|
+
};
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
api.registerTool({
|
|
205
|
+
name: "smart_spawn",
|
|
206
|
+
description: `Intelligently spawn sub-agent(s) for a task. Automatically selects the best model(s) based on task type, budget, and strategy. Use this instead of sessions_spawn when you want optimal model selection. Do NOT use this when the user explicitly requests a specific agent or model.`,
|
|
207
|
+
parameters: {
|
|
208
|
+
type: "object",
|
|
209
|
+
properties: {
|
|
210
|
+
task: {
|
|
211
|
+
type: "string",
|
|
212
|
+
description: "The task to delegate. Be specific.",
|
|
213
|
+
},
|
|
214
|
+
category: {
|
|
215
|
+
type: "string",
|
|
216
|
+
enum: [
|
|
217
|
+
"coding", "reasoning", "creative", "research",
|
|
218
|
+
"general", "fast-cheap", "vision", "auto",
|
|
219
|
+
],
|
|
220
|
+
description: "Task category. 'auto' (default) lets the system classify it.",
|
|
221
|
+
},
|
|
222
|
+
mode: {
|
|
223
|
+
type: "string",
|
|
224
|
+
enum: ["single", "collective", "cascade", "plan", "swarm"],
|
|
225
|
+
description: "Spawning strategy. 'single' (default): one optimal model. 'collective': N diverse models + merge. 'cascade': cheap first, escalate if needed. 'plan': decompose multi-step task sequentially. 'swarm': decompose into a dependency DAG, maximize parallelism.",
|
|
226
|
+
},
|
|
227
|
+
budget: {
|
|
228
|
+
type: "string",
|
|
229
|
+
enum: ["low", "medium", "high", "any"],
|
|
230
|
+
description: "Budget tier for model selection.",
|
|
231
|
+
},
|
|
232
|
+
collectiveCount: {
|
|
233
|
+
type: "number",
|
|
234
|
+
description: "Number of models for collective mode (default: 3).",
|
|
235
|
+
},
|
|
236
|
+
label: {
|
|
237
|
+
type: "string",
|
|
238
|
+
description: "Optional label for the spawned session.",
|
|
239
|
+
},
|
|
240
|
+
context: {
|
|
241
|
+
type: "string",
|
|
242
|
+
description: "Project context tags, comma-separated (e.g. 'typescript,nextjs,supabase'). Improves model selection for specific tech stacks.",
|
|
243
|
+
},
|
|
244
|
+
persona: {
|
|
245
|
+
type: "string",
|
|
246
|
+
description: "Role persona for the sub-agent (e.g. 'frontend-engineer', 'security-engineer', 'copywriter', 'data-analyst'). See SKILL.md for full list.",
|
|
247
|
+
},
|
|
248
|
+
stack: {
|
|
249
|
+
type: "array",
|
|
250
|
+
items: { type: "string" },
|
|
251
|
+
description: "Tech stack blocks to include in role instructions (e.g. ['react', 'nextjs', 'supabase', 'stripe']). See SKILL.md for available blocks.",
|
|
252
|
+
},
|
|
253
|
+
domain: {
|
|
254
|
+
type: "string",
|
|
255
|
+
description: "Industry/domain block (e.g. 'saas', 'fintech', 'healthcare', 'crypto', 'ecommerce'). Adds domain-specific expertise.",
|
|
256
|
+
},
|
|
257
|
+
format: {
|
|
258
|
+
type: "string",
|
|
259
|
+
description: "Output format block (e.g. 'full-implementation', 'review', 'comparison', 'documentation', 'pitch-deck'). Shapes how the sub-agent structures its response.",
|
|
260
|
+
},
|
|
261
|
+
guardrails: {
|
|
262
|
+
type: "array",
|
|
263
|
+
items: { type: "string" },
|
|
264
|
+
description: "Quality guardrails (e.g. ['code', 'security', 'production']). Adds constraints to prevent common mistakes.",
|
|
265
|
+
},
|
|
266
|
+
},
|
|
267
|
+
required: ["task"],
|
|
268
|
+
},
|
|
269
|
+
async execute(_callId: string, input: any) {
|
|
270
|
+
const task = input.task || "";
|
|
271
|
+
const category =
|
|
272
|
+
input.category === "auto" || !input.category
|
|
273
|
+
? classifyTask(task)
|
|
274
|
+
: input.category;
|
|
275
|
+
const budget = input.budget ?? defaultBudget;
|
|
276
|
+
const mode = input.mode ?? defaultMode;
|
|
277
|
+
const context = input.context || undefined;
|
|
278
|
+
|
|
279
|
+
// Compose role-enriched task if agent specified blocks
|
|
280
|
+
const enrichedTask = await client.composeTaskPrompt({
|
|
281
|
+
task,
|
|
282
|
+
persona: input.persona,
|
|
283
|
+
stack: input.stack,
|
|
284
|
+
domain: input.domain,
|
|
285
|
+
format: input.format,
|
|
286
|
+
guardrails: input.guardrails,
|
|
287
|
+
}) ?? task;
|
|
288
|
+
|
|
289
|
+
// --- SINGLE MODE ---
|
|
290
|
+
if (mode === "single") {
|
|
291
|
+
let modelId: string;
|
|
292
|
+
let reason: string;
|
|
293
|
+
let source = "api";
|
|
294
|
+
|
|
295
|
+
try {
|
|
296
|
+
const pick = await client.pick(category, budget, undefined, context);
|
|
297
|
+
modelId = routeModel(pick.data.id, directProviders);
|
|
298
|
+
reason = pick.data.reason;
|
|
299
|
+
} catch {
|
|
300
|
+
modelId = getFallback(category);
|
|
301
|
+
reason = `API unavailable, using fallback for ${category}`;
|
|
302
|
+
source = "fallback";
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
const label = input.label ?? `smart-spawn: ${category} (${modelId.split("/").pop()})`;
|
|
306
|
+
|
|
307
|
+
client.logSpawn({ model: modelId, category, budget, mode, role: "primary", source, context });
|
|
308
|
+
|
|
309
|
+
return {
|
|
310
|
+
content: [{
|
|
311
|
+
type: "text",
|
|
312
|
+
text: JSON.stringify({
|
|
313
|
+
action: "spawn",
|
|
314
|
+
model: modelId,
|
|
315
|
+
task: enrichedTask,
|
|
316
|
+
category,
|
|
317
|
+
budget,
|
|
318
|
+
reason,
|
|
319
|
+
source,
|
|
320
|
+
label,
|
|
321
|
+
}),
|
|
322
|
+
}],
|
|
323
|
+
};
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
// --- COLLECTIVE MODE ---
|
|
327
|
+
if (mode === "collective") {
|
|
328
|
+
const count = input.collectiveCount ?? collectiveCount;
|
|
329
|
+
try {
|
|
330
|
+
const rec = await client.recommend({ task: category, budget, count, context });
|
|
331
|
+
const models = rec.data.map((r) => ({
|
|
332
|
+
id: routeModel(r.model.id, directProviders),
|
|
333
|
+
reason: r.reason,
|
|
334
|
+
}));
|
|
335
|
+
for (const m of models) {
|
|
336
|
+
client.logSpawn({ model: m.id, category, budget, mode, role: "collective_worker", source: "api", context });
|
|
337
|
+
}
|
|
338
|
+
return {
|
|
339
|
+
content: [{
|
|
340
|
+
type: "text",
|
|
341
|
+
text: JSON.stringify({
|
|
342
|
+
action: "collective",
|
|
343
|
+
models: models.map((m, i) => ({
|
|
344
|
+
...m,
|
|
345
|
+
label: `smart-spawn-collective-${i + 1}: ${category} (${m.id.split("/").pop()})`,
|
|
346
|
+
})),
|
|
347
|
+
task: enrichedTask,
|
|
348
|
+
category,
|
|
349
|
+
budget,
|
|
350
|
+
count,
|
|
351
|
+
mergeLabel: `smart-spawn-merge: ${category}`,
|
|
352
|
+
}),
|
|
353
|
+
}],
|
|
354
|
+
};
|
|
355
|
+
} catch {
|
|
356
|
+
return buildFallbackResponse(category, budget, enrichedTask, context);
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
// --- CASCADE MODE ---
|
|
361
|
+
if (mode === "cascade") {
|
|
362
|
+
try {
|
|
363
|
+
const cheapPick = await client.pick(category, "low", undefined, context);
|
|
364
|
+
const cheapId = cheapPick.data.id;
|
|
365
|
+
// Exclude the cheap model so premium is guaranteed different
|
|
366
|
+
const premiumPick = await client.pick(category, "high", [cheapId], context);
|
|
367
|
+
const routedCheap = routeModel(cheapId, directProviders);
|
|
368
|
+
const routedPremium = routeModel(premiumPick.data.id, directProviders);
|
|
369
|
+
client.logSpawn({ model: routedCheap, category, budget: "low", mode, role: "cascade_cheap", source: "api", context });
|
|
370
|
+
client.logSpawn({ model: routedPremium, category, budget: "high", mode, role: "cascade_premium", source: "api", context });
|
|
371
|
+
return {
|
|
372
|
+
content: [{
|
|
373
|
+
type: "text",
|
|
374
|
+
text: JSON.stringify({
|
|
375
|
+
action: "cascade",
|
|
376
|
+
cheapModel: routedCheap,
|
|
377
|
+
cheapScore: cheapPick.data.score,
|
|
378
|
+
cheapPricing: cheapPick.data.pricing,
|
|
379
|
+
premiumModel: routedPremium,
|
|
380
|
+
premiumScore: premiumPick.data.score,
|
|
381
|
+
premiumPricing: premiumPick.data.pricing,
|
|
382
|
+
cheapLabel: `smart-spawn-cascade-cheap: ${category} (${cheapId.split("/").pop()})`,
|
|
383
|
+
premiumLabel: `smart-spawn-cascade-premium: ${category} (${premiumPick.data.id.split("/").pop()})`,
|
|
384
|
+
task: enrichedTask,
|
|
385
|
+
category,
|
|
386
|
+
escalationHint: "If the cheap model's response is incomplete, incorrect, or low quality, escalate to the premium model.",
|
|
387
|
+
}),
|
|
388
|
+
}],
|
|
389
|
+
};
|
|
390
|
+
} catch {
|
|
391
|
+
return buildFallbackResponse(category, budget, enrichedTask, context);
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
// --- PLAN MODE ---
|
|
396
|
+
if (mode === "plan") {
|
|
397
|
+
try {
|
|
398
|
+
const decomposition = await client.decompose({ task, budget, context });
|
|
399
|
+
|
|
400
|
+
// If task can't be decomposed, fall back to single mode
|
|
401
|
+
if (!decomposition.decomposed || !decomposition.steps?.length) {
|
|
402
|
+
let modelId: string;
|
|
403
|
+
let reason: string;
|
|
404
|
+
let source = "api";
|
|
405
|
+
|
|
406
|
+
try {
|
|
407
|
+
const pick = await client.pick(category, budget, undefined, context);
|
|
408
|
+
modelId = routeModel(pick.data.id, directProviders);
|
|
409
|
+
reason = pick.data.reason;
|
|
410
|
+
} catch {
|
|
411
|
+
modelId = getFallback(category);
|
|
412
|
+
reason = `API unavailable, using fallback for ${category}`;
|
|
413
|
+
source = "fallback";
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
const lbl = input.label ?? `smart-spawn: ${category} (${modelId.split("/").pop()})`;
|
|
417
|
+
client.logSpawn({ model: modelId, category, budget, mode: "single", role: "primary", source, context });
|
|
418
|
+
|
|
419
|
+
return {
|
|
420
|
+
content: [{
|
|
421
|
+
type: "text",
|
|
422
|
+
text: JSON.stringify({
|
|
423
|
+
action: "spawn",
|
|
424
|
+
model: modelId,
|
|
425
|
+
task: enrichedTask,
|
|
426
|
+
category,
|
|
427
|
+
budget,
|
|
428
|
+
reason: `Task not decomposable — ${reason}`,
|
|
429
|
+
source,
|
|
430
|
+
label: lbl,
|
|
431
|
+
}),
|
|
432
|
+
}],
|
|
433
|
+
};
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
// Build plan response with routed models
|
|
437
|
+
const subtasks = decomposition.steps.map((step) => {
|
|
438
|
+
const modelId = step.model
|
|
439
|
+
? routeModel(step.model.id, directProviders)
|
|
440
|
+
: getFallback(step.category);
|
|
441
|
+
|
|
442
|
+
client.logSpawn({
|
|
443
|
+
model: modelId,
|
|
444
|
+
category: step.category,
|
|
445
|
+
budget: step.budget,
|
|
446
|
+
mode: "plan",
|
|
447
|
+
role: `plan_step_${step.step}`,
|
|
448
|
+
source: step.model ? "api" : "fallback",
|
|
449
|
+
context,
|
|
450
|
+
});
|
|
451
|
+
|
|
452
|
+
// Plan mode steps use task as-is — blocks are top-level only
|
|
453
|
+
const stepTask = step.task;
|
|
454
|
+
|
|
455
|
+
return {
|
|
456
|
+
step: step.step,
|
|
457
|
+
task: stepTask,
|
|
458
|
+
category: step.category,
|
|
459
|
+
model: modelId,
|
|
460
|
+
budget: step.budget,
|
|
461
|
+
reason: step.reason,
|
|
462
|
+
label: `smart-spawn-plan-${step.step}: ${step.category} (${modelId.split("/").pop()})`,
|
|
463
|
+
};
|
|
464
|
+
});
|
|
465
|
+
|
|
466
|
+
return {
|
|
467
|
+
content: [{
|
|
468
|
+
type: "text",
|
|
469
|
+
text: JSON.stringify({
|
|
470
|
+
action: "plan",
|
|
471
|
+
subtasks,
|
|
472
|
+
originalTask: task,
|
|
473
|
+
totalSteps: subtasks.length,
|
|
474
|
+
executionHint: "Execute steps sequentially. Pass each step's output as context to the next.",
|
|
475
|
+
}),
|
|
476
|
+
}],
|
|
477
|
+
};
|
|
478
|
+
} catch {
|
|
479
|
+
return buildFallbackResponse(category, budget, enrichedTask, context);
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
// --- SWARM MODE ---
|
|
484
|
+
if (mode === "swarm") {
|
|
485
|
+
try {
|
|
486
|
+
const swarmResult = await client.swarm({ task, budget, context });
|
|
487
|
+
|
|
488
|
+
// If task can't be decomposed, fall back to single mode
|
|
489
|
+
if (!swarmResult.decomposed || !swarmResult.dag?.tasks?.length) {
|
|
490
|
+
let modelId: string;
|
|
491
|
+
let reason: string;
|
|
492
|
+
let source = "api";
|
|
493
|
+
|
|
494
|
+
try {
|
|
495
|
+
const pick = await client.pick(category, budget, undefined, context);
|
|
496
|
+
modelId = routeModel(pick.data.id, directProviders);
|
|
497
|
+
reason = pick.data.reason;
|
|
498
|
+
} catch {
|
|
499
|
+
modelId = getFallback(category);
|
|
500
|
+
reason = `API unavailable, using fallback for ${category}`;
|
|
501
|
+
source = "fallback";
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
const lbl = input.label ?? `smart-spawn: ${category} (${modelId.split("/").pop()})`;
|
|
505
|
+
client.logSpawn({ model: modelId, category, budget, mode: "single", role: "primary", source, context });
|
|
506
|
+
|
|
507
|
+
return {
|
|
508
|
+
content: [{
|
|
509
|
+
type: "text",
|
|
510
|
+
text: JSON.stringify({
|
|
511
|
+
action: "spawn",
|
|
512
|
+
model: modelId,
|
|
513
|
+
task: enrichedTask,
|
|
514
|
+
category,
|
|
515
|
+
budget,
|
|
516
|
+
reason: `Task not decomposable — ${reason}`,
|
|
517
|
+
source,
|
|
518
|
+
label: lbl,
|
|
519
|
+
}),
|
|
520
|
+
}],
|
|
521
|
+
};
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
// Build swarm response with routed models
|
|
525
|
+
const dag = swarmResult.dag;
|
|
526
|
+
const dagTasks = dag.tasks.map((t) => {
|
|
527
|
+
const modelId = t.model
|
|
528
|
+
? routeModel(t.model.id, directProviders)
|
|
529
|
+
: getFallback(t.category);
|
|
530
|
+
|
|
531
|
+
client.logSpawn({
|
|
532
|
+
model: modelId,
|
|
533
|
+
category: t.category,
|
|
534
|
+
budget: t.budget,
|
|
535
|
+
mode: "swarm",
|
|
536
|
+
role: `swarm_${t.id}`,
|
|
537
|
+
source: t.model ? "api" : "fallback",
|
|
538
|
+
context,
|
|
539
|
+
});
|
|
540
|
+
|
|
541
|
+
return {
|
|
542
|
+
id: t.id,
|
|
543
|
+
task: t.description,
|
|
544
|
+
category: t.category,
|
|
545
|
+
model: modelId,
|
|
546
|
+
budget: t.budget,
|
|
547
|
+
persona: t.persona,
|
|
548
|
+
dependsOn: t.dependsOn,
|
|
549
|
+
wave: t.wave,
|
|
550
|
+
reason: t.reason,
|
|
551
|
+
label: `smart-spawn-${t.id}: ${t.category} (${modelId.split("/").pop()})`,
|
|
552
|
+
};
|
|
553
|
+
});
|
|
554
|
+
|
|
555
|
+
return {
|
|
556
|
+
content: [{
|
|
557
|
+
type: "text",
|
|
558
|
+
text: JSON.stringify({
|
|
559
|
+
action: "swarm",
|
|
560
|
+
dag: {
|
|
561
|
+
tasks: dagTasks,
|
|
562
|
+
waves: dag.waves,
|
|
563
|
+
edges: dag.edges,
|
|
564
|
+
totalTasks: dag.totalTasks,
|
|
565
|
+
totalWaves: dag.totalWaves,
|
|
566
|
+
estimatedCost: dag.estimatedCost,
|
|
567
|
+
...(dag.warning ? { warning: dag.warning } : {}),
|
|
568
|
+
},
|
|
569
|
+
originalTask: task,
|
|
570
|
+
executionHint: "Execute wave-by-wave. Spawn all tasks within a wave in parallel. Pass outputs from completed tasks as context to their dependents in the next wave.",
|
|
571
|
+
}),
|
|
572
|
+
}],
|
|
573
|
+
};
|
|
574
|
+
} catch {
|
|
575
|
+
return buildFallbackResponse(category, budget, enrichedTask, context);
|
|
576
|
+
}
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
return {
|
|
580
|
+
content: [{
|
|
581
|
+
type: "text",
|
|
582
|
+
text: JSON.stringify({
|
|
583
|
+
action: "error",
|
|
584
|
+
error: `Unknown mode: ${mode}`,
|
|
585
|
+
validModes: ["single", "collective", "cascade", "plan", "swarm"],
|
|
586
|
+
}),
|
|
587
|
+
}],
|
|
588
|
+
isError: true,
|
|
589
|
+
};
|
|
590
|
+
},
|
|
591
|
+
});
|
|
592
|
+
|
|
593
|
+
// --- FEEDBACK TOOL (learning loop) ---
|
|
594
|
+
api.registerTool({
|
|
595
|
+
name: "smart_spawn_feedback",
|
|
596
|
+
description: `Report quality feedback after a smart_spawn task completes. Rate the spawned model's output 1-5 (1=terrible, 5=excellent). This feedback improves future model recommendations for your specific use patterns.`,
|
|
597
|
+
parameters: {
|
|
598
|
+
type: "object",
|
|
599
|
+
properties: {
|
|
600
|
+
model: {
|
|
601
|
+
type: "string",
|
|
602
|
+
description: "The model ID that was spawned (from the smart_spawn response).",
|
|
603
|
+
},
|
|
604
|
+
category: {
|
|
605
|
+
type: "string",
|
|
606
|
+
enum: ["coding", "reasoning", "creative", "research", "general", "fast-cheap", "vision"],
|
|
607
|
+
description: "The task category.",
|
|
608
|
+
},
|
|
609
|
+
rating: {
|
|
610
|
+
type: "number",
|
|
611
|
+
description: "Quality rating 1-5. 1=terrible, 2=poor, 3=acceptable, 4=good, 5=excellent.",
|
|
612
|
+
},
|
|
613
|
+
context: {
|
|
614
|
+
type: "string",
|
|
615
|
+
description: "Project context tags from the original smart_spawn call (e.g. 'typescript,nextjs').",
|
|
616
|
+
},
|
|
617
|
+
},
|
|
618
|
+
required: ["model", "category", "rating"],
|
|
619
|
+
},
|
|
620
|
+
async execute(_callId: string, input: any) {
|
|
621
|
+
const model = input.model || "";
|
|
622
|
+
const category = input.category || "general";
|
|
623
|
+
const rating = Math.max(1, Math.min(5, Math.round(input.rating ?? 3)));
|
|
624
|
+
const context = input.context || undefined;
|
|
625
|
+
|
|
626
|
+
client.logOutcome({ model, category, rating, context });
|
|
627
|
+
|
|
628
|
+
// Community telemetry (opt-in)
|
|
629
|
+
if (telemetryOptIn) {
|
|
630
|
+
client.reportCommunity({
|
|
631
|
+
model,
|
|
632
|
+
category,
|
|
633
|
+
rating,
|
|
634
|
+
instanceId: getOrCreateInstanceId(),
|
|
635
|
+
});
|
|
636
|
+
}
|
|
637
|
+
|
|
638
|
+
return {
|
|
639
|
+
content: [{
|
|
640
|
+
type: "text",
|
|
641
|
+
text: JSON.stringify({
|
|
642
|
+
recorded: true,
|
|
643
|
+
model,
|
|
644
|
+
category,
|
|
645
|
+
rating,
|
|
646
|
+
communityReported: telemetryOptIn,
|
|
647
|
+
message: rating >= 3
|
|
648
|
+
? "Positive feedback recorded — this model will be favored for similar tasks."
|
|
649
|
+
: "Negative feedback recorded — this model will be deprioritized for similar tasks.",
|
|
650
|
+
}),
|
|
651
|
+
}],
|
|
652
|
+
};
|
|
653
|
+
},
|
|
654
|
+
});
|
|
655
|
+
}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "smart-spawn",
|
|
3
|
+
"name": "Smart Spawn",
|
|
4
|
+
"description": "Intelligent sub-agent spawning with automatic model selection, role composition, and multi-agent orchestration.",
|
|
5
|
+
"version": "1.1.0",
|
|
6
|
+
"author": "deeflect",
|
|
7
|
+
"skills": ["skills/smart-spawn"],
|
|
8
|
+
"configSchema": {
|
|
9
|
+
"type": "object",
|
|
10
|
+
"additionalProperties": false,
|
|
11
|
+
"properties": {
|
|
12
|
+
"apiUrl": {
|
|
13
|
+
"type": "string",
|
|
14
|
+
"description": "Model Intelligence API URL"
|
|
15
|
+
},
|
|
16
|
+
"defaultBudget": {
|
|
17
|
+
"type": "string",
|
|
18
|
+
"enum": ["low", "medium", "high", "any"],
|
|
19
|
+
"description": "Default budget tier"
|
|
20
|
+
},
|
|
21
|
+
"defaultMode": {
|
|
22
|
+
"type": "string",
|
|
23
|
+
"enum": ["single", "collective", "cascade", "plan", "swarm"],
|
|
24
|
+
"description": "Default spawn mode"
|
|
25
|
+
},
|
|
26
|
+
"collectiveCount": {
|
|
27
|
+
"type": "number",
|
|
28
|
+
"description": "Number of models for collective mode (2-5)"
|
|
29
|
+
},
|
|
30
|
+
"telemetryOptIn": {
|
|
31
|
+
"type": "boolean",
|
|
32
|
+
"description": "Opt-in to anonymous community telemetry"
|
|
33
|
+
},
|
|
34
|
+
"communityUrl": {
|
|
35
|
+
"type": "string",
|
|
36
|
+
"description": "Community API URL for shared telemetry"
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@deeflectcom/smart-spawn",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Intelligent sub-agent spawning with automatic model selection for OpenClaw",
|
|
5
|
+
"main": "index.ts",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"files": [
|
|
8
|
+
"index.ts",
|
|
9
|
+
"src/",
|
|
10
|
+
"skills/",
|
|
11
|
+
"openclaw.plugin.json"
|
|
12
|
+
],
|
|
13
|
+
"openclaw": {
|
|
14
|
+
"extensions": ["./index.ts"]
|
|
15
|
+
},
|
|
16
|
+
"author": "deeflect",
|
|
17
|
+
"license": "MIT",
|
|
18
|
+
"keywords": ["openclaw", "plugin", "llm", "model-selection", "agent", "smart-spawn"],
|
|
19
|
+
"repository": {
|
|
20
|
+
"type": "git",
|
|
21
|
+
"url": "https://github.com/deeflect/smart-spawn.git",
|
|
22
|
+
"directory": "smart-spawn"
|
|
23
|
+
},
|
|
24
|
+
"homepage": "https://github.com/deeflect/smart-spawn#readme"
|
|
25
|
+
}
|
package/skills/SKILL.md
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
# Smart Spawn
|
|
2
|
+
|
|
3
|
+
When delegating tasks to sub-agents, prefer `smart_spawn` over `sessions_spawn` unless:
|
|
4
|
+
- The user explicitly requests a specific agent or model
|
|
5
|
+
- You're doing a trivial task where you already know the right model
|
|
6
|
+
|
|
7
|
+
## How It Works
|
|
8
|
+
|
|
9
|
+
`smart_spawn` returns a **JSON response** with a recommended model and action. You MUST then call `sessions_spawn` yourself with the recommended model and task. The tool cannot spawn directly — it only recommends.
|
|
10
|
+
|
|
11
|
+
## Handling Responses
|
|
12
|
+
|
|
13
|
+
After calling `smart_spawn`, parse the JSON response and act on the `action` field:
|
|
14
|
+
|
|
15
|
+
### action: "spawn" (single mode)
|
|
16
|
+
|
|
17
|
+
The tool picked one model. Spawn it:
|
|
18
|
+
|
|
19
|
+
```json
|
|
20
|
+
{
|
|
21
|
+
"action": "spawn",
|
|
22
|
+
"model": "openrouter/anthropic/claude-opus-4-6",
|
|
23
|
+
"task": "...",
|
|
24
|
+
"category": "coding",
|
|
25
|
+
"budget": "medium",
|
|
26
|
+
"reason": "Best coding model at medium budget ($0-5/M) — score: 82",
|
|
27
|
+
"source": "api",
|
|
28
|
+
"label": "smart-spawn: coding (claude-opus-4-6)"
|
|
29
|
+
}
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
**You do:** Call `sessions_spawn` with the `model`, `task`, and `label` from the response.
|
|
33
|
+
|
|
34
|
+
### action: "collective"
|
|
35
|
+
|
|
36
|
+
The tool picked N diverse models. Spawn all of them on the same task, then merge:
|
|
37
|
+
|
|
38
|
+
```json
|
|
39
|
+
{
|
|
40
|
+
"action": "collective",
|
|
41
|
+
"models": [
|
|
42
|
+
{ "id": "openrouter/anthropic/claude-opus-4-6", "reason": "...", "label": "smart-spawn-collective-1: coding (claude-opus-4-6)" },
|
|
43
|
+
{ "id": "openrouter/google/gemini-2.5-pro", "reason": "...", "label": "smart-spawn-collective-2: coding (gemini-2.5-pro)" }
|
|
44
|
+
],
|
|
45
|
+
"task": "...",
|
|
46
|
+
"category": "coding",
|
|
47
|
+
"budget": "medium",
|
|
48
|
+
"count": 2,
|
|
49
|
+
"mergeLabel": "smart-spawn-merge: coding"
|
|
50
|
+
}
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
**You do:**
|
|
54
|
+
1. Call `sessions_spawn` for each model using its `id`, the `task`, and its `label`, with `waitForCompletion: true`
|
|
55
|
+
2. Collect all outputs
|
|
56
|
+
3. Call `sessions_spawn` with a fast model, giving it all outputs to synthesize the best answer, using the `mergeLabel`
|
|
57
|
+
|
|
58
|
+
### action: "cascade"
|
|
59
|
+
|
|
60
|
+
The tool picked a cheap and premium model. Try cheap first, escalate if quality is insufficient:
|
|
61
|
+
|
|
62
|
+
```json
|
|
63
|
+
{
|
|
64
|
+
"action": "cascade",
|
|
65
|
+
"cheapModel": "openrouter/deepseek/deepseek-chat-v3-0324",
|
|
66
|
+
"cheapScore": 71,
|
|
67
|
+
"cheapPricing": { "prompt": 0.27, "completion": 1.1 },
|
|
68
|
+
"premiumModel": "openrouter/anthropic/claude-opus-4-6",
|
|
69
|
+
"premiumScore": 82,
|
|
70
|
+
"premiumPricing": { "prompt": 5, "completion": 25 },
|
|
71
|
+
"cheapLabel": "smart-spawn-cascade-cheap: coding (deepseek-chat-v3-0324)",
|
|
72
|
+
"premiumLabel": "smart-spawn-cascade-premium: coding (claude-opus-4-6)",
|
|
73
|
+
"task": "...",
|
|
74
|
+
"category": "coding",
|
|
75
|
+
"escalationHint": "If the cheap model's response is incomplete, incorrect, or low quality, escalate to the premium model."
|
|
76
|
+
}
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
**You do:**
|
|
80
|
+
1. Call `sessions_spawn` with `cheapModel`, the `task`, and `cheapLabel`, with `waitForCompletion: true`
|
|
81
|
+
2. Evaluate the output quality — escalate if you see:
|
|
82
|
+
- Incomplete or partial answers
|
|
83
|
+
- Syntax errors or incorrect code
|
|
84
|
+
- Vague or generic responses that don't address the task
|
|
85
|
+
- Missing key requirements
|
|
86
|
+
3. If good enough: use that result, done (saved money!)
|
|
87
|
+
4. If not good enough: call `sessions_spawn` with `premiumModel` and `premiumLabel`, include the cheap output as context for improvement
|
|
88
|
+
|
|
89
|
+
### action: "swarm"
|
|
90
|
+
|
|
91
|
+
The tool decomposed a complex task into a dependency DAG with parallel waves:
|
|
92
|
+
|
|
93
|
+
```json
|
|
94
|
+
{
|
|
95
|
+
"action": "swarm",
|
|
96
|
+
"dag": {
|
|
97
|
+
"tasks": [
|
|
98
|
+
{ "id": "swarm-1", "task": "Design database schema", "category": "coding", "model": "openrouter/anthropic/claude-sonnet-4", "budget": "medium", "persona": "database-architect", "dependsOn": [], "wave": 0, "reason": "...", "label": "smart-spawn-swarm-1: coding (claude-sonnet-4)" },
|
|
99
|
+
{ "id": "swarm-2", "task": "Build REST API", "category": "coding", "model": "openrouter/anthropic/claude-sonnet-4", "budget": "medium", "persona": "backend-engineer", "dependsOn": ["swarm-1"], "wave": 1, "reason": "...", "label": "smart-spawn-swarm-2: coding (claude-sonnet-4)" },
|
|
100
|
+
{ "id": "swarm-3", "task": "Create React frontend", "category": "coding", "model": "openrouter/anthropic/claude-sonnet-4", "budget": "medium", "persona": "frontend-engineer", "dependsOn": ["swarm-1"], "wave": 1, "reason": "...", "label": "smart-spawn-swarm-3: coding (claude-sonnet-4)" },
|
|
101
|
+
{ "id": "swarm-4", "task": "Write integration tests", "category": "coding", "model": "openrouter/deepseek/deepseek-chat-v3-0324", "budget": "low", "persona": "software-engineer", "dependsOn": ["swarm-2", "swarm-3"], "wave": 2, "reason": "...", "label": "smart-spawn-swarm-4: coding (deepseek-chat-v3-0324)" }
|
|
102
|
+
],
|
|
103
|
+
"waves": [
|
|
104
|
+
{ "wave": 0, "taskIds": ["swarm-1"], "description": "1 task" },
|
|
105
|
+
{ "wave": 1, "taskIds": ["swarm-2", "swarm-3"], "description": "2 parallel tasks" },
|
|
106
|
+
{ "wave": 2, "taskIds": ["swarm-4"], "description": "1 task" }
|
|
107
|
+
],
|
|
108
|
+
"edges": [
|
|
109
|
+
{ "from": "swarm-1", "to": "swarm-2", "type": "phase" },
|
|
110
|
+
{ "from": "swarm-1", "to": "swarm-3", "type": "phase" },
|
|
111
|
+
{ "from": "swarm-2", "to": "swarm-4", "type": "phase" },
|
|
112
|
+
{ "from": "swarm-3", "to": "swarm-4", "type": "phase" }
|
|
113
|
+
],
|
|
114
|
+
"totalTasks": 4,
|
|
115
|
+
"totalWaves": 3,
|
|
116
|
+
"estimatedCost": { "low": 0.02, "high": 0.18 }
|
|
117
|
+
},
|
|
118
|
+
"originalTask": "...",
|
|
119
|
+
"executionHint": "Execute wave-by-wave. Spawn all tasks within a wave in parallel. Pass outputs from completed tasks as context to their dependents in the next wave."
|
|
120
|
+
}
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
**You do:**
|
|
124
|
+
1. Execute **wave by wave** — process all waves in order (wave 0, then wave 1, etc.)
|
|
125
|
+
2. Within each wave, spawn **all tasks in parallel** — call `sessions_spawn` for each task using its `model`, `task`, and `label`, with `waitForCompletion: true`
|
|
126
|
+
3. When a task has `dependsOn`, include the outputs from those dependency tasks as context in the task prompt
|
|
127
|
+
4. After each wave completes, collect outputs before moving to the next wave
|
|
128
|
+
5. After all waves complete, synthesize the results if needed
|
|
129
|
+
6. Call `smart_spawn_feedback` for each completed task to rate the model's output
|
|
130
|
+
|
|
131
|
+
**Key differences from plan mode:**
|
|
132
|
+
- Plan is sequential (step 1, then step 2, then step 3...)
|
|
133
|
+
- Swarm maximizes parallelism — independent tasks run simultaneously
|
|
134
|
+
- Each task has a `persona` for role-specific prompting
|
|
135
|
+
- Tasks declare explicit `dependsOn` relationships
|
|
136
|
+
|
|
137
|
+
### action: "plan"
|
|
138
|
+
|
|
139
|
+
The tool decomposed a multi-step task into subtasks, each with its own optimal model:
|
|
140
|
+
|
|
141
|
+
```json
|
|
142
|
+
{
|
|
143
|
+
"action": "plan",
|
|
144
|
+
"subtasks": [
|
|
145
|
+
{ "step": 1, "task": "Set up database schema", "category": "coding", "model": "openrouter/anthropic/claude-sonnet-4", "budget": "medium", "reason": "Best coding model at medium budget ($0-5/M) — score: 78", "label": "smart-spawn-plan-1: coding (claude-sonnet-4)" },
|
|
146
|
+
{ "step": 2, "task": "Implement REST API", "category": "coding", "model": "openrouter/anthropic/claude-sonnet-4", "budget": "medium", "reason": "...", "label": "smart-spawn-plan-2: coding (claude-sonnet-4)" },
|
|
147
|
+
{ "step": 3, "task": "Write unit tests", "category": "coding", "model": "openrouter/deepseek/deepseek-chat-v3-0324", "budget": "low", "reason": "...", "label": "smart-spawn-plan-3: coding (deepseek-chat-v3-0324)" }
|
|
148
|
+
],
|
|
149
|
+
"originalTask": "1. Set up database schema\n2. Implement REST API\n3. Write unit tests",
|
|
150
|
+
"totalSteps": 3,
|
|
151
|
+
"executionHint": "Execute steps sequentially. Pass each step's output as context to the next."
|
|
152
|
+
}
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
**You do:**
|
|
156
|
+
1. Execute steps **sequentially** — call `sessions_spawn` for step 1, wait for completion
|
|
157
|
+
2. Pass each step's output as context to the next step (include the previous result in the task prompt)
|
|
158
|
+
3. After each step completes, call `smart_spawn_feedback` to rate that step's model
|
|
159
|
+
4. If a step fails, you may retry with the same model or skip to the next step with a note
|
|
160
|
+
|
|
161
|
+
**When plan mode is useful:**
|
|
162
|
+
- The user provides a numbered list or multi-step instructions
|
|
163
|
+
- The task naturally decomposes into sequential phases (e.g. "design, implement, test")
|
|
164
|
+
- Different steps benefit from different model strengths (e.g. reasoning for architecture, coding for implementation)
|
|
165
|
+
|
|
166
|
+
## When to Use Each Mode
|
|
167
|
+
|
|
168
|
+
- **single** (default) — most tasks. Fast, one model.
|
|
169
|
+
- **collective** — important decisions, creative work, or when diverse perspectives help. Costs 3-4x more.
|
|
170
|
+
- **cascade** — cost-sensitive tasks where a cheap model might suffice. Saves money when cheap model is good enough.
|
|
171
|
+
- **plan** — structured multi-step tasks that must run sequentially. Decomposes into subtasks, picks the best model for each step.
|
|
172
|
+
- **swarm** — complex projects with parallelizable subtasks. Builds a dependency DAG, assigns personas, runs independent tasks simultaneously. Best for multi-component projects (e.g. "build API + frontend + tests + deploy").
|
|
173
|
+
|
|
174
|
+
## Examples
|
|
175
|
+
|
|
176
|
+
```
|
|
177
|
+
smart_spawn(task: "Write a Python function to merge two sorted arrays")
|
|
178
|
+
smart_spawn(task: "Analyze this architecture", category: "reasoning", budget: "high")
|
|
179
|
+
smart_spawn(task: "Write a product launch email", mode: "collective", category: "creative")
|
|
180
|
+
smart_spawn(task: "Convert this JSON to YAML", mode: "cascade")
|
|
181
|
+
smart_spawn(task: "1. Design the database schema\n2. Implement the API endpoints\n3. Write integration tests", mode: "plan")
|
|
182
|
+
smart_spawn(task: "1. Design database schema\n2. Build REST API\n3. Create React frontend\n4. Write integration tests\n5. Deploy to AWS", mode: "swarm", context: "typescript,nextjs,postgres")
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
## Feedback (Learning Loop)
|
|
186
|
+
|
|
187
|
+
After receiving a spawned agent's output, you SHOULD call `smart_spawn_feedback` to rate its quality. This trains the system to pick better models for your tasks over time.
|
|
188
|
+
|
|
189
|
+
```
|
|
190
|
+
smart_spawn_feedback(model: "anthropic/claude-opus-4-6", category: "coding", rating: 5)
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
**Rating scale:**
|
|
194
|
+
- **5** — Excellent. Fully solved the task, high quality.
|
|
195
|
+
- **4** — Good. Completed the task with minor issues.
|
|
196
|
+
- **3** — Acceptable. Got the job done but not great.
|
|
197
|
+
- **2** — Poor. Missed key requirements or had significant issues.
|
|
198
|
+
- **1** — Terrible. Failed the task or produced unusable output.
|
|
199
|
+
|
|
200
|
+
**When to rate:**
|
|
201
|
+
- Always rate after single mode spawns
|
|
202
|
+
- In cascade mode: rate the cheap model's output (this determines if escalation was needed)
|
|
203
|
+
- In collective mode: rate the final merged output
|
|
204
|
+
- Use the `model` and `category` from the original `smart_spawn` response
|
|
205
|
+
|
|
206
|
+
After enough feedback (3+ ratings per model+category), the system starts blending your personal scores into recommendations. Models you rate highly get boosted; models you rate poorly get deprioritized.
|
|
207
|
+
|
|
208
|
+
## Tips
|
|
209
|
+
|
|
210
|
+
- Let `category: "auto"` do its job — it's right most of the time
|
|
211
|
+
- Use `collective` sparingly — it costs 3-4x more
|
|
212
|
+
- If the API is down, the tool falls back to hardcoded defaults (still works)
|
|
213
|
+
- Model IDs may or may not have `openrouter/` prefix depending on user's auth — pass them as-is to `sessions_spawn`
|
|
214
|
+
- Always provide feedback — it makes future picks better for your specific workflow
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: smart-spawn
|
|
3
|
+
description: "Intelligent sub-agent spawning with automatic model selection and role composition. Use instead of sessions_spawn for optimal model routing."
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Smart Spawn
|
|
7
|
+
|
|
8
|
+
Use `smart_spawn` to delegate tasks to sub-agents. It picks the best model and can inject expert role instructions that make cheap models perform like specialists.
|
|
9
|
+
|
|
10
|
+
## Flow
|
|
11
|
+
|
|
12
|
+
1. Analyze the task → pick role blocks if relevant
|
|
13
|
+
2. Call `smart_spawn` → get JSON with model + enriched task
|
|
14
|
+
3. Call `sessions_spawn` with the returned values
|
|
15
|
+
|
|
16
|
+
## Modes
|
|
17
|
+
|
|
18
|
+
| Mode | When to use |
|
|
19
|
+
|------|------------|
|
|
20
|
+
| `single` | Default. One optimal model. |
|
|
21
|
+
| `collective` | Need diverse perspectives. Spawns N models, you merge results. |
|
|
22
|
+
| `cascade` | Cost-sensitive. Cheap model first, escalate to premium if quality is poor. |
|
|
23
|
+
| `plan` | Multi-step sequential tasks. Format task as numbered list. |
|
|
24
|
+
| `swarm` | Complex tasks with parallel subtasks. API builds a dependency DAG. |
|
|
25
|
+
|
|
26
|
+
## Role Blocks
|
|
27
|
+
|
|
28
|
+
Analyze the task and specify blocks that match. **Only include what's clearly relevant — omit if unsure.** Guardrails auto-apply based on persona when not specified.
|
|
29
|
+
|
|
30
|
+
### persona — who the sub-agent is
|
|
31
|
+
|
|
32
|
+
**Engineering:** `software-engineer` `frontend-engineer` `backend-engineer` `fullstack-engineer` `devops-engineer` `data-engineer` `mobile-engineer` `systems-engineer` `security-engineer` `ml-engineer` `performance-engineer`
|
|
33
|
+
|
|
34
|
+
**Architecture:** `architect` `api-designer` `database-architect`
|
|
35
|
+
|
|
36
|
+
**Analysis:** `analyst` `data-analyst` `market-analyst` `financial-analyst`
|
|
37
|
+
|
|
38
|
+
**Problem Solving:** `problem-solver` `debugger` `mathematician`
|
|
39
|
+
|
|
40
|
+
**Content:** `writer` `technical-writer` `copywriter` `editor` `social-media`
|
|
41
|
+
|
|
42
|
+
**Product/Business:** `product-manager` `strategist` `ux-researcher` `project-manager`
|
|
43
|
+
|
|
44
|
+
**Design:** `ui-designer` `brand-designer`
|
|
45
|
+
|
|
46
|
+
**Other:** `sysadmin` `teacher` `legal-analyst` `assistant`
|
|
47
|
+
|
|
48
|
+
### stack — tech expertise (array, max 4)
|
|
49
|
+
|
|
50
|
+
**Frontend:** `react` `nextjs` `vue` `svelte` `angular` `tailwind` `shadcn` `css` `animation` `threejs`
|
|
51
|
+
|
|
52
|
+
**Languages:** `typescript` `python` `rust` `go` `java` `csharp` `php` `ruby` `elixir` `swift` `kotlin`
|
|
53
|
+
|
|
54
|
+
**Backend:** `nodejs` `fastapi` `django` `flask` `react-native` `flutter`
|
|
55
|
+
|
|
56
|
+
**Data:** `sql` `postgres` `mysql` `supabase` `prisma` `drizzle` `mongodb` `redis` `elasticsearch` `kafka` `rabbitmq`
|
|
57
|
+
|
|
58
|
+
**APIs:** `graphql` `rest` `grpc` `websocket` `auth` `stripe` `payment-general`
|
|
59
|
+
|
|
60
|
+
**DevOps:** `docker` `kubernetes` `cicd` `terraform` `aws` `gcp` `nginx` `caddy` `monitoring`
|
|
61
|
+
|
|
62
|
+
**AI/ML:** `llm` `rag` `langchain` `fine-tuning` `pytorch` `pandas`
|
|
63
|
+
|
|
64
|
+
**Web3:** `solidity` `web3-frontend`
|
|
65
|
+
|
|
66
|
+
**Platforms:** `vercel` `railway` `cloudflare` `firebase` `convex`
|
|
67
|
+
|
|
68
|
+
**Other:** `bash` `powershell` `markdown` `astro` `json` `yaml` `regex` `email` `a11y` `seo` `performance` `i18n` `git` `testing` `playwright`
|
|
69
|
+
|
|
70
|
+
### domain — industry (one)
|
|
71
|
+
|
|
72
|
+
`fintech` `ecommerce` `saas` `marketplace` `gaming` `crypto` `healthcare` `education` `media` `iot` `logistics` `real-estate` `social-platform` `legal` `developer-tools`
|
|
73
|
+
|
|
74
|
+
### format — output shape (one)
|
|
75
|
+
|
|
76
|
+
`full-implementation` `fix-debug` `refactor` `explain` `review` `comparison` `planning` `documentation` `copywriting` `social-post` `data-report` `migration` `pitch-deck` `project-proposal` `user-story` `email` `legal-doc`
|
|
77
|
+
|
|
78
|
+
### guardrails — quality rules (array, auto-applied if omitted)
|
|
79
|
+
|
|
80
|
+
`code` `research` `concise` `security` `production` `accuracy`
|
|
81
|
+
|
|
82
|
+
## Acting on Results
|
|
83
|
+
|
|
84
|
+
### `action: "spawn"` (single/fallback)
|
|
85
|
+
```
|
|
86
|
+
sessions_spawn(task: result.task, model: result.model, label: result.label)
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
### `action: "collective"`
|
|
90
|
+
Spawn each model, wait for all, merge the best parts:
|
|
91
|
+
```
|
|
92
|
+
for model in result.models:
|
|
93
|
+
sessions_spawn(task: result.task, model: model.id, label: model.label)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
### `action: "cascade"`
|
|
97
|
+
1. Spawn `cheapModel` first
|
|
98
|
+
2. Check quality via `sessions_history`
|
|
99
|
+
3. Escalate to `premiumModel` if: incomplete, wrong, vague, or too short for the task
|
|
100
|
+
4. Return whichever result is good
|
|
101
|
+
|
|
102
|
+
### `action: "plan"`
|
|
103
|
+
Execute steps sequentially, pass each output as context to the next.
|
|
104
|
+
|
|
105
|
+
### `action: "swarm"`
|
|
106
|
+
Execute wave-by-wave. Spawn all tasks in a wave in parallel. Pass outputs to dependents in the next wave.
|
|
107
|
+
|
|
108
|
+
## Rules
|
|
109
|
+
|
|
110
|
+
- **Always spawn after smart_spawn returns** — don't just report the recommendation
|
|
111
|
+
- Use the exact `model` and `task` strings from the result
|
|
112
|
+
- **Don't guess blocks** — if unsure, omit and the task goes raw
|
|
113
|
+
- For plan mode, format tasks as numbered lists
|
|
114
|
+
- After completion, consider calling `smart_spawn_feedback` with a 1-5 rating
|
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
export interface SwarmResponse {
|
|
2
|
+
decomposed: boolean;
|
|
3
|
+
reason?: string;
|
|
4
|
+
dag?: {
|
|
5
|
+
tasks: Array<{
|
|
6
|
+
id: string;
|
|
7
|
+
description: string;
|
|
8
|
+
category: string;
|
|
9
|
+
budget: string;
|
|
10
|
+
persona: string;
|
|
11
|
+
dependsOn: string[];
|
|
12
|
+
model: {
|
|
13
|
+
id: string;
|
|
14
|
+
name: string;
|
|
15
|
+
provider: string;
|
|
16
|
+
score: number;
|
|
17
|
+
pricing: { prompt: number; completion: number };
|
|
18
|
+
} | null;
|
|
19
|
+
reason: string;
|
|
20
|
+
wave: number;
|
|
21
|
+
}>;
|
|
22
|
+
waves: Array<{
|
|
23
|
+
wave: number;
|
|
24
|
+
taskIds: string[];
|
|
25
|
+
description: string;
|
|
26
|
+
}>;
|
|
27
|
+
edges: Array<{
|
|
28
|
+
from: string;
|
|
29
|
+
to: string;
|
|
30
|
+
type: string;
|
|
31
|
+
}>;
|
|
32
|
+
totalTasks: number;
|
|
33
|
+
totalWaves: number;
|
|
34
|
+
originalTask: string;
|
|
35
|
+
context: string | null;
|
|
36
|
+
estimatedCost: { low: number; high: number };
|
|
37
|
+
warning?: string;
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
export interface DecomposeResponse {
|
|
42
|
+
decomposed: boolean;
|
|
43
|
+
reason?: string;
|
|
44
|
+
totalSteps?: number;
|
|
45
|
+
originalTask?: string;
|
|
46
|
+
context?: string | null;
|
|
47
|
+
steps?: Array<{
|
|
48
|
+
step: number;
|
|
49
|
+
task: string;
|
|
50
|
+
category: string;
|
|
51
|
+
budget: string;
|
|
52
|
+
model: {
|
|
53
|
+
id: string;
|
|
54
|
+
name: string;
|
|
55
|
+
provider: string;
|
|
56
|
+
score: number;
|
|
57
|
+
pricing: { prompt: number; completion: number };
|
|
58
|
+
} | null;
|
|
59
|
+
reason: string;
|
|
60
|
+
}>;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
export interface PickResponse {
|
|
64
|
+
data: {
|
|
65
|
+
id: string;
|
|
66
|
+
name: string;
|
|
67
|
+
provider: string;
|
|
68
|
+
score: number;
|
|
69
|
+
pricing: { prompt: number; completion: number };
|
|
70
|
+
reason: string;
|
|
71
|
+
contextBoost?: number;
|
|
72
|
+
contextTags?: string[];
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
export interface RecommendResponse {
|
|
77
|
+
data: Array<{
|
|
78
|
+
model: {
|
|
79
|
+
id: string;
|
|
80
|
+
name: string;
|
|
81
|
+
provider: string;
|
|
82
|
+
scores: Record<string, number>;
|
|
83
|
+
tier: string;
|
|
84
|
+
};
|
|
85
|
+
reason: string;
|
|
86
|
+
confidence: number;
|
|
87
|
+
}>;
|
|
88
|
+
meta: {
|
|
89
|
+
task: string;
|
|
90
|
+
budget: string;
|
|
91
|
+
candidatesConsidered: number;
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
export class ApiClient {
|
|
96
|
+
private baseUrl: string;
|
|
97
|
+
private communityUrl: string;
|
|
98
|
+
|
|
99
|
+
constructor(baseUrl: string, communityUrl?: string) {
|
|
100
|
+
this.baseUrl = baseUrl.replace(/\/$/, "");
|
|
101
|
+
this.communityUrl = (communityUrl ?? baseUrl).replace(/\/$/, "");
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
async pick(
|
|
105
|
+
task: string,
|
|
106
|
+
budget?: string,
|
|
107
|
+
exclude?: string[],
|
|
108
|
+
context?: string
|
|
109
|
+
): Promise<PickResponse> {
|
|
110
|
+
const params = new URLSearchParams({ task });
|
|
111
|
+
if (budget) params.set("budget", budget);
|
|
112
|
+
if (exclude?.length) params.set("exclude", exclude.join(","));
|
|
113
|
+
if (context) params.set("context", context);
|
|
114
|
+
|
|
115
|
+
const res = await fetch(`${this.baseUrl}/pick?${params}`);
|
|
116
|
+
if (!res.ok) {
|
|
117
|
+
throw new Error(`API /pick failed: ${res.status} ${res.statusText}`);
|
|
118
|
+
}
|
|
119
|
+
return res.json() as Promise<PickResponse>;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
async recommend(opts: {
|
|
123
|
+
task: string;
|
|
124
|
+
budget?: string;
|
|
125
|
+
count?: number;
|
|
126
|
+
exclude?: string[];
|
|
127
|
+
require?: string[];
|
|
128
|
+
minContext?: number;
|
|
129
|
+
context?: string;
|
|
130
|
+
}): Promise<RecommendResponse> {
|
|
131
|
+
const params = new URLSearchParams({ task: opts.task });
|
|
132
|
+
if (opts.budget) params.set("budget", opts.budget);
|
|
133
|
+
if (opts.count) params.set("count", String(opts.count));
|
|
134
|
+
if (opts.exclude?.length) params.set("exclude", opts.exclude.join(","));
|
|
135
|
+
if (opts.require?.length) params.set("require", opts.require.join(","));
|
|
136
|
+
if (opts.minContext) params.set("minContext", String(opts.minContext));
|
|
137
|
+
if (opts.context) params.set("context", opts.context);
|
|
138
|
+
|
|
139
|
+
const res = await fetch(`${this.baseUrl}/recommend?${params}`);
|
|
140
|
+
if (!res.ok) {
|
|
141
|
+
throw new Error(
|
|
142
|
+
`API /recommend failed: ${res.status} ${res.statusText}`
|
|
143
|
+
);
|
|
144
|
+
}
|
|
145
|
+
return res.json() as Promise<RecommendResponse>;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
async decompose(opts: {
|
|
149
|
+
task: string;
|
|
150
|
+
budget?: string;
|
|
151
|
+
context?: string;
|
|
152
|
+
}): Promise<DecomposeResponse> {
|
|
153
|
+
const res = await fetch(`${this.baseUrl}/decompose`, {
|
|
154
|
+
method: "POST",
|
|
155
|
+
headers: { "Content-Type": "application/json" },
|
|
156
|
+
body: JSON.stringify(opts),
|
|
157
|
+
});
|
|
158
|
+
if (!res.ok) {
|
|
159
|
+
throw new Error(`API /decompose failed: ${res.status} ${res.statusText}`);
|
|
160
|
+
}
|
|
161
|
+
return res.json() as Promise<DecomposeResponse>;
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
async swarm(opts: {
|
|
165
|
+
task: string;
|
|
166
|
+
budget?: string;
|
|
167
|
+
context?: string;
|
|
168
|
+
maxParallel?: number;
|
|
169
|
+
}): Promise<SwarmResponse> {
|
|
170
|
+
const res = await fetch(`${this.baseUrl}/swarm`, {
|
|
171
|
+
method: "POST",
|
|
172
|
+
headers: { "Content-Type": "application/json" },
|
|
173
|
+
body: JSON.stringify(opts),
|
|
174
|
+
});
|
|
175
|
+
if (!res.ok) {
|
|
176
|
+
throw new Error(`API /swarm failed: ${res.status} ${res.statusText}`);
|
|
177
|
+
}
|
|
178
|
+
return res.json() as Promise<SwarmResponse>;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
async health(): Promise<boolean> {
|
|
182
|
+
try {
|
|
183
|
+
const res = await fetch(`${this.baseUrl}/status`);
|
|
184
|
+
return res.ok;
|
|
185
|
+
} catch {
|
|
186
|
+
return false;
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Compose a role-enriched task prompt via explicit block selection.
|
|
192
|
+
* Agent specifies what blocks it wants — API assembles them.
|
|
193
|
+
* Returns null if no blocks specified (task sent raw).
|
|
194
|
+
*/
|
|
195
|
+
async composeTaskPrompt(opts: {
|
|
196
|
+
task: string;
|
|
197
|
+
persona?: string;
|
|
198
|
+
stack?: string[];
|
|
199
|
+
domain?: string;
|
|
200
|
+
format?: string;
|
|
201
|
+
guardrails?: string[];
|
|
202
|
+
}): Promise<string | null> {
|
|
203
|
+
// If agent didn't specify any blocks, skip — no role prompt
|
|
204
|
+
const hasBlocks = opts.persona || opts.stack?.length || opts.domain || opts.format || opts.guardrails?.length;
|
|
205
|
+
if (!hasBlocks) return null;
|
|
206
|
+
|
|
207
|
+
try {
|
|
208
|
+
const res = await fetch(`${this.baseUrl}/roles/compose`, {
|
|
209
|
+
method: "POST",
|
|
210
|
+
headers: { "Content-Type": "application/json" },
|
|
211
|
+
body: JSON.stringify(opts),
|
|
212
|
+
});
|
|
213
|
+
if (!res.ok) {
|
|
214
|
+
console.warn(`[smart-spawn] Role composition failed (${res.status}) — sending task without role blocks`);
|
|
215
|
+
return null;
|
|
216
|
+
}
|
|
217
|
+
const data = await res.json() as { hasRole: boolean; fullPrompt: string };
|
|
218
|
+
return data.hasRole ? data.fullPrompt : null;
|
|
219
|
+
} catch {
|
|
220
|
+
console.warn(`[smart-spawn] Role composition unavailable — sending task without role blocks`);
|
|
221
|
+
return null;
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
private logFailCount = 0;
|
|
226
|
+
private logFailWarned = false;
|
|
227
|
+
|
|
228
|
+
private handleLogError(endpoint: string, err: unknown): void {
|
|
229
|
+
this.logFailCount++;
|
|
230
|
+
if (!this.logFailWarned && this.logFailCount >= 3) {
|
|
231
|
+
console.warn(`[smart-spawn] Spawn logging unavailable (${endpoint}) — tracking data may be lost. Is the API running?`);
|
|
232
|
+
this.logFailWarned = true;
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
/** Fire-and-forget spawn log for cost tracking */
|
|
237
|
+
logSpawn(entry: {
|
|
238
|
+
model: string;
|
|
239
|
+
category: string;
|
|
240
|
+
budget: string;
|
|
241
|
+
mode: string;
|
|
242
|
+
role: string;
|
|
243
|
+
source: string;
|
|
244
|
+
context?: string;
|
|
245
|
+
}): void {
|
|
246
|
+
fetch(`${this.baseUrl}/spawn-log`, {
|
|
247
|
+
method: "POST",
|
|
248
|
+
headers: { "Content-Type": "application/json" },
|
|
249
|
+
body: JSON.stringify(entry),
|
|
250
|
+
}).catch((e) => this.handleLogError("/spawn-log", e));
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
/** Fire-and-forget outcome feedback for learning loop */
|
|
254
|
+
logOutcome(entry: {
|
|
255
|
+
model: string;
|
|
256
|
+
category: string;
|
|
257
|
+
rating: number;
|
|
258
|
+
context?: string;
|
|
259
|
+
}): void {
|
|
260
|
+
fetch(`${this.baseUrl}/spawn-log/outcome`, {
|
|
261
|
+
method: "POST",
|
|
262
|
+
headers: { "Content-Type": "application/json" },
|
|
263
|
+
body: JSON.stringify(entry),
|
|
264
|
+
}).catch((e) => this.handleLogError("/spawn-log/outcome", e));
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
/** Fire-and-forget community outcome report */
|
|
268
|
+
reportCommunity(entry: {
|
|
269
|
+
model: string;
|
|
270
|
+
category: string;
|
|
271
|
+
rating: number;
|
|
272
|
+
instanceId: string;
|
|
273
|
+
}): void {
|
|
274
|
+
fetch(`${this.communityUrl}/community/report`, {
|
|
275
|
+
method: "POST",
|
|
276
|
+
headers: { "Content-Type": "application/json" },
|
|
277
|
+
body: JSON.stringify(entry),
|
|
278
|
+
}).catch((e) => this.handleLogError("/community/report", e));
|
|
279
|
+
}
|
|
280
|
+
}
|