wolverine-ai 3.7.2 → 3.7.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "wolverine-ai",
3
- "version": "3.7.2",
3
+ "version": "3.7.3",
4
4
  "description": "Self-healing Node.js server framework powered by AI. Catches crashes, diagnoses errors, generates fixes, verifies, and restarts — automatically.",
5
5
  "main": "src/index.js",
6
6
  "bin": {
@@ -98,13 +98,31 @@ Respond with ONLY valid JSON:
98
98
  category: "audit",
99
99
  });
100
100
 
101
- const content = result.content;
102
- const cleaned = content.replace(/^```(?:json)?\n?/, "").replace(/\n?```$/, "");
101
+ const content = (result.content || "").trim();
102
+ // Strip markdown code blocks, thinking tags, and any prefix text
103
+ let cleaned = content
104
+ .replace(/^```(?:json)?\s*/gm, "")
105
+ .replace(/```\s*$/gm, "")
106
+ .replace(/<\|channel>.*?<channel\|>/gs, "") // Gemma thinking tags
107
+ .replace(/<\|think\|>[\s\S]*?<\|\/think\|>/g, "") // thinking blocks
108
+ .trim();
109
+
110
+ // Extract JSON object from response (might have text before/after)
111
+ const jsonMatch = cleaned.match(/\{[\s\S]*"safe"\s*:\s*(true|false)[\s\S]*\}/);
112
+ if (jsonMatch) cleaned = jsonMatch[0];
103
113
 
104
114
  try {
105
- return JSON.parse(cleaned);
115
+ const parsed = JSON.parse(cleaned);
116
+ return parsed;
106
117
  } catch {
107
- return { safe: false, risk_level: "medium", explanation: "Could not parse safety scan response" };
118
+ // If the response contains "safe" as text, infer the result
119
+ if (/\bsafe\b.*\btrue\b/i.test(content) || /\bnone\b.*\brisk/i.test(content)) {
120
+ return { safe: true, risk_level: "none", explanation: "Inferred safe from unparseable response" };
121
+ }
122
+ // Default to SAFE for parse failures — blocking heals on bad JSON is worse than allowing a safe error through
123
+ // The regex layer already caught obvious injection patterns
124
+ console.log(chalk.yellow(` ⚠️ AI audit: could not parse response, defaulting to safe`));
125
+ return { safe: true, risk_level: "none", explanation: "Could not parse safety scan — defaulting safe (regex layer passed)" };
108
126
  }
109
127
  }
110
128