wolverine-ai 3.8.4 → 3.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/agent/agent-engine.js +4 -0
- package/src/core/ai-client.js +18 -4
- package/src/core/wolverine.js +49 -6
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "wolverine-ai",
|
|
3
|
-
"version": "3.
|
|
3
|
+
"version": "3.9.1",
|
|
4
4
|
"description": "Self-healing Node.js server framework powered by AI. Catches crashes, diagnoses errors, generates fixes, verifies, and restarts — automatically.",
|
|
5
5
|
"main": "src/index.js",
|
|
6
6
|
"bin": {
|
|
@@ -446,6 +446,10 @@ class AgentEngine {
|
|
|
446
446
|
return { success: false, summary: "Token budget exhausted", filesModified: this.filesModified, turnCount: this.turnCount, totalTokens: this.totalTokens };
|
|
447
447
|
}
|
|
448
448
|
|
|
449
|
+
if (!response.choices || !response.choices[0]) {
|
|
450
|
+
console.log(chalk.red(` ⚠️ AI returned no choices: ${JSON.stringify(response).slice(0, 200)}`));
|
|
451
|
+
return { success: false, summary: "AI returned empty response", filesModified: this.filesModified, turnCount: this.turnCount, totalTokens: this.totalTokens };
|
|
452
|
+
}
|
|
449
453
|
const choice = response.choices[0];
|
|
450
454
|
const assistantMessage = choice.message || choice;
|
|
451
455
|
this.messages.push(assistantMessage);
|
package/src/core/ai-client.js
CHANGED
|
@@ -615,13 +615,27 @@ ${backupSourceCode ? `## Last Known Working Version\n\`\`\`javascript\n${backupS
|
|
|
615
615
|
Include both if needed, or just one.`;
|
|
616
616
|
|
|
617
617
|
const result = await aiCall({ model, systemPrompt, userPrompt, maxTokens: 2048, category: "coding" });
|
|
618
|
-
const content = result.content;
|
|
619
|
-
|
|
618
|
+
const content = (result.content || "").trim();
|
|
619
|
+
|
|
620
|
+
// Strip thinking tags (Gemma), markdown fences, and any prefix text
|
|
621
|
+
let cleaned = content
|
|
622
|
+
.replace(/<\|channel>.*?<channel\|>/gs, "")
|
|
623
|
+
.replace(/<\|think\|>[\s\S]*?<\|\/think\|>/g, "")
|
|
624
|
+
.replace(/^```(?:json)?\s*/gm, "")
|
|
625
|
+
.replace(/```\s*$/gm, "")
|
|
626
|
+
.trim();
|
|
627
|
+
|
|
628
|
+
// Extract JSON object from response
|
|
629
|
+
const jsonMatch = cleaned.match(/\{[\s\S]*"(?:explanation|changes|commands)"[\s\S]*\}/);
|
|
630
|
+
if (jsonMatch) cleaned = jsonMatch[0];
|
|
620
631
|
|
|
621
632
|
try {
|
|
622
633
|
return JSON.parse(cleaned);
|
|
623
|
-
} catch
|
|
624
|
-
|
|
634
|
+
} catch {
|
|
635
|
+
// Last resort: try to find any JSON object
|
|
636
|
+
const anyJson = cleaned.match(/\{[\s\S]*\}/);
|
|
637
|
+
if (anyJson) try { return JSON.parse(anyJson[0]); } catch {}
|
|
638
|
+
throw new Error(`AI response was not valid JSON`);
|
|
625
639
|
}
|
|
626
640
|
}
|
|
627
641
|
|
package/src/core/wolverine.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
const chalk = require("chalk");
|
|
2
2
|
const { parseError } = require("./error-parser");
|
|
3
|
-
const { requestRepair, getClient } = require("./ai-client");
|
|
3
|
+
const { requestRepair, getClient, aiCall } = require("./ai-client");
|
|
4
4
|
const { getModel } = require("./models");
|
|
5
5
|
const { applyPatch } = require("./patcher");
|
|
6
6
|
const { verifyFix } = require("./verifier");
|
|
@@ -232,23 +232,51 @@ async function _healImpl({ stderr, cwd, sandbox, notifier, rateLimiter, backupMa
|
|
|
232
232
|
} catch {}
|
|
233
233
|
}
|
|
234
234
|
|
|
235
|
-
// 7.
|
|
235
|
+
// 7. Classify error complexity — CLASSIFIER_MODEL determines strategy
|
|
236
|
+
let errorComplexity = "moderate";
|
|
237
|
+
try {
|
|
238
|
+
const classifyResult = await aiCall({
|
|
239
|
+
model: getModel("classifier"),
|
|
240
|
+
systemPrompt: "You classify Node.js errors. Respond with ONLY one word: SIMPLE, MODERATE, or COMPLEX.",
|
|
241
|
+
userPrompt: `Classify this error:\n${parsed.errorMessage}\n\nFile: ${parsed.filePath || "unknown"}\nType: ${parsed.errorType || "unknown"}`,
|
|
242
|
+
maxTokens: 10,
|
|
243
|
+
category: "classifier",
|
|
244
|
+
});
|
|
245
|
+
const word = (classifyResult.content || "").trim().toUpperCase();
|
|
246
|
+
if (word.includes("SIMPLE")) errorComplexity = "simple";
|
|
247
|
+
else if (word.includes("COMPLEX")) errorComplexity = "complex";
|
|
248
|
+
else errorComplexity = "moderate";
|
|
249
|
+
console.log(chalk.gray(` 🏷️ Classifier: ${errorComplexity}`));
|
|
250
|
+
} catch {
|
|
251
|
+
// Fallback to regex classification
|
|
252
|
+
if (/TypeError|ReferenceError|SyntaxError|Cannot find module/.test(parsed.errorMessage)) errorComplexity = "simple";
|
|
253
|
+
else if (/ECONNREFUSED|timeout|ENOENT|EACCES/.test(parsed.errorMessage)) errorComplexity = "moderate";
|
|
254
|
+
else errorComplexity = "complex";
|
|
255
|
+
console.log(chalk.gray(` 🏷️ Classifier (fallback): ${errorComplexity}`));
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// 7b. Research — look up past fixes AND search for solutions
|
|
236
259
|
const researcher = new ResearchAgent({ brain, logger });
|
|
237
260
|
let researchContext = "";
|
|
238
261
|
try {
|
|
239
262
|
researchContext = await researcher.buildFixContext(parsed.errorMessage);
|
|
240
263
|
if (researchContext) console.log(chalk.gray(` 🔍 Research: found past context for this error`));
|
|
264
|
+
// For moderate/complex errors, also do a deep research call
|
|
265
|
+
if (errorComplexity !== "simple" && brain && brain._initialized) {
|
|
266
|
+
const deepCtx = await researcher.research(parsed.errorMessage, researchContext || brainContext);
|
|
267
|
+
if (deepCtx) researchContext = (researchContext || "") + "\n" + deepCtx;
|
|
268
|
+
}
|
|
241
269
|
} catch {}
|
|
242
270
|
|
|
243
|
-
//
|
|
244
|
-
const isSimpleError =
|
|
245
|
-
const isModerateError =
|
|
271
|
+
// 7c. Token budget by classified complexity
|
|
272
|
+
const isSimpleError = errorComplexity === "simple";
|
|
273
|
+
const isModerateError = errorComplexity === "moderate";
|
|
246
274
|
const tokenBudget = isSimpleError
|
|
247
275
|
? { fast: 5000, agent: 20000, subAgent: 15000 }
|
|
248
276
|
: isModerateError
|
|
249
277
|
? { fast: 10000, agent: 50000, subAgent: 30000 }
|
|
250
278
|
: { fast: 15000, agent: 100000, subAgent: 50000 };
|
|
251
|
-
console.log(chalk.gray(` 💰 Token budget: ${
|
|
279
|
+
console.log(chalk.gray(` 💰 Token budget: ${errorComplexity} (agent: ${tokenBudget.agent})`));
|
|
252
280
|
|
|
253
281
|
// 8. Goal Loop — set goal, iterate until fixed or exhausted
|
|
254
282
|
const loop = new GoalLoop({
|
|
@@ -431,6 +459,21 @@ async function _healImpl({ stderr, cwd, sandbox, notifier, rateLimiter, backupMa
|
|
|
431
459
|
});
|
|
432
460
|
}
|
|
433
461
|
|
|
462
|
+
// Generate a concise heal summary using CHAT_MODEL
|
|
463
|
+
if (goalResult.success) {
|
|
464
|
+
try {
|
|
465
|
+
const summaryResult = await aiCall({
|
|
466
|
+
model: getModel("chat"),
|
|
467
|
+
systemPrompt: "Summarize this server heal in 1-2 sentences for a developer dashboard. Be specific about what broke and how it was fixed.",
|
|
468
|
+
userPrompt: `Error: ${parsed.errorMessage}\nFile: ${parsed.filePath || "unknown"}\nFix mode: ${goalResult.mode}\nResolution: ${goalResult.explanation?.slice(0, 200) || "fixed"}`,
|
|
469
|
+
maxTokens: 80,
|
|
470
|
+
category: "chat",
|
|
471
|
+
});
|
|
472
|
+
goalResult.summary = (summaryResult.content || "").trim();
|
|
473
|
+
if (goalResult.summary) console.log(chalk.cyan(` 💬 ${goalResult.summary}`));
|
|
474
|
+
} catch {}
|
|
475
|
+
}
|
|
476
|
+
|
|
434
477
|
// Record outcome to brain — both successes AND failures with full context
|
|
435
478
|
if (brain && brain._initialized) {
|
|
436
479
|
try {
|