wolverine-ai 4.0.3 → 4.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/brain/embedder.js +2 -2
- package/src/core/ai-client.js +2 -2
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "wolverine-ai",
|
|
3
|
-
"version": "4.0.
|
|
3
|
+
"version": "4.0.4",
|
|
4
4
|
"description": "Self-healing Node.js server framework powered by AI. Catches crashes, diagnoses errors, generates fixes, verifies, and restarts — automatically.",
|
|
5
5
|
"main": "src/index.js",
|
|
6
6
|
"bin": {
|
package/src/brain/embedder.js
CHANGED
|
@@ -51,7 +51,7 @@ async function embed(text) {
|
|
|
51
51
|
response = await client.embeddings.create({ model, input: text });
|
|
52
52
|
} catch (err) {
|
|
53
53
|
// If wolverine proxy is down (startup, crash loop), fall back to OpenAI direct
|
|
54
|
-
if (provider === "wolverine" && /ECONNREFUSED|ECONNRESET|ETIMEDOUT|fetch failed/i.test(err.message || "")) {
|
|
54
|
+
if (provider === "wolverine" && /ECONNREFUSED|ECONNRESET|ETIMEDOUT|fetch failed|Connection error/i.test(err.message || "")) {
|
|
55
55
|
const directClient = getClient("openai");
|
|
56
56
|
response = await directClient.embeddings.create({ model: "text-embedding-3-small", input: text });
|
|
57
57
|
} else {
|
|
@@ -98,7 +98,7 @@ async function embedBatch(texts) {
|
|
|
98
98
|
try {
|
|
99
99
|
response = await client.embeddings.create({ model, input: uncached });
|
|
100
100
|
} catch (err) {
|
|
101
|
-
if (provider === "wolverine" && /ECONNREFUSED|ECONNRESET|ETIMEDOUT|fetch failed/i.test(err.message || "")) {
|
|
101
|
+
if (provider === "wolverine" && /ECONNREFUSED|ECONNRESET|ETIMEDOUT|fetch failed|Connection error/i.test(err.message || "")) {
|
|
102
102
|
const directClient = getClient("openai");
|
|
103
103
|
response = await directClient.embeddings.create({ model: "text-embedding-3-small", input: uncached });
|
|
104
104
|
} else {
|
package/src/core/ai-client.js
CHANGED
|
@@ -232,7 +232,7 @@ async function aiCall({ model, systemPrompt, userPrompt, maxTokens = 2048, tools
|
|
|
232
232
|
result = await _chatCall(_getWolverineClient(), { model, systemPrompt, userPrompt, maxTokens, tools, toolChoice });
|
|
233
233
|
} catch (proxyErr) {
|
|
234
234
|
// If billing proxy is down (server crashing), fall back to direct GPU
|
|
235
|
-
const isConnErr = /ECONNREFUSED|ECONNRESET|ETIMEDOUT|fetch failed/i.test(proxyErr.message || "");
|
|
235
|
+
const isConnErr = /ECONNREFUSED|ECONNRESET|ETIMEDOUT|fetch failed|Connection error/i.test(proxyErr.message || "");
|
|
236
236
|
const directClient = _getWolverineDirectClient();
|
|
237
237
|
if (isConnErr && directClient) {
|
|
238
238
|
console.log(chalk.yellow(" ⚠️ Billing proxy down — using direct GPU (unbilled)"));
|
|
@@ -269,7 +269,7 @@ async function aiCallWithHistory({ model, messages, tools, maxTokens = 4096, cat
|
|
|
269
269
|
try {
|
|
270
270
|
result = await _chatCallWithHistory(_getWolverineClient(), { model, messages, tools, maxTokens });
|
|
271
271
|
} catch (proxyErr) {
|
|
272
|
-
const isConnErr = /ECONNREFUSED|ECONNRESET|ETIMEDOUT|fetch failed/i.test(proxyErr.message || "");
|
|
272
|
+
const isConnErr = /ECONNREFUSED|ECONNRESET|ETIMEDOUT|fetch failed|Connection error/i.test(proxyErr.message || "");
|
|
273
273
|
const directClient = _getWolverineDirectClient();
|
|
274
274
|
if (isConnErr && directClient) {
|
|
275
275
|
console.log(chalk.yellow(" ⚠️ Billing proxy down — using direct GPU (unbilled)"));
|