wolverine-ai 3.9.8 → 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/brain/brain.js +2 -2
- package/src/brain/embedder.js +10 -9
- package/src/core/config.js +37 -24
- package/src/core/models.js +42 -64
- package/src/templates/server/config/settings.json +7 -49
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "wolverine-ai",
|
|
3
|
-
"version": "
|
|
3
|
+
"version": "4.0.0",
|
|
4
4
|
"description": "Self-healing Node.js server framework powered by AI. Catches crashes, diagnoses errors, generates fixes, verifies, and restarts — automatically.",
|
|
5
5
|
"main": "src/index.js",
|
|
6
6
|
"bin": {
|
package/src/brain/brain.js
CHANGED
|
@@ -130,7 +130,7 @@ const SEED_DOCS = [
|
|
|
130
130
|
metadata: { topic: "system-detection" },
|
|
131
131
|
},
|
|
132
132
|
{
|
|
133
|
-
text: "Configuration:
|
|
133
|
+
text: "Configuration: hybrid-always architecture — no provider selection. Users pick the best model for each of 8 task roles directly in settings.json 'models' section. Mix and match: wolverine for audit, claude for reasoning, gpt for coding. Provider auto-detected from model name. Embedding is separate ('embedding' key) — always wolverine-embedding-1 billed through credits (proxies to text-embedding-3-small at 2x markup). Secrets in .env.local. Config priority: env vars > settings.json > defaults.",
|
|
134
134
|
metadata: { topic: "configuration" },
|
|
135
135
|
},
|
|
136
136
|
{
|
|
@@ -162,7 +162,7 @@ const SEED_DOCS = [
|
|
|
162
162
|
metadata: { topic: "smart-edit" },
|
|
163
163
|
},
|
|
164
164
|
{
|
|
165
|
-
text: "Token tracking: every AI call tracked with input/output tokens, USD cost, latencyMs,
|
|
165
|
+
text: "Token tracking: every AI call tracked with input/output tokens, USD cost, latencyMs, success/failure, and TPOT (time per output token). 8 task roles + embedding tracked separately. Categories by ACTIVITY: audit (injection scan), classifier (error classification), reasoning (AI analyzes error), coding (code generation without tools), tool (agent using read_file/write_file/bash_exec), research (deep investigation), chat (summaries), compacting (brain compression). Embedding billed through wolverine-embedding-1 (proxies text-embedding-3-small at 2x). Benchmark metrics: Speed (tok/s), TPOT (ms/output token), Cost/Call, Pass%. All tracked in byModelCategory for per-task model comparison.",
|
|
166
166
|
metadata: { topic: "token-tracking" },
|
|
167
167
|
},
|
|
168
168
|
{
|
package/src/brain/embedder.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
const { getClient, aiCall, detectProvider, _trackEmbedding } = require("../core/ai-client");
|
|
2
|
-
const {
|
|
2
|
+
const { getEmbeddingModel } = require("../core/models");
|
|
3
3
|
|
|
4
4
|
/**
|
|
5
5
|
* Embedder — converts text to vector embeddings using TEXT_EMBEDDING_MODEL.
|
|
@@ -41,12 +41,13 @@ async function embed(text) {
|
|
|
41
41
|
const cached = _cacheGet(text);
|
|
42
42
|
if (cached) return cached;
|
|
43
43
|
|
|
44
|
-
|
|
45
|
-
const
|
|
46
|
-
|
|
44
|
+
const model = getEmbeddingModel();
|
|
45
|
+
const provider = detectProvider(model);
|
|
46
|
+
// wolverine-embedding-1 routes through billing proxy, others go direct
|
|
47
|
+
const client = provider === "wolverine" ? getClient("wolverine") : getClient("openai");
|
|
47
48
|
|
|
48
49
|
const startMs = Date.now();
|
|
49
|
-
const response = await
|
|
50
|
+
const response = await client.embeddings.create({
|
|
50
51
|
model,
|
|
51
52
|
input: text,
|
|
52
53
|
});
|
|
@@ -81,12 +82,12 @@ async function embedBatch(texts) {
|
|
|
81
82
|
|
|
82
83
|
if (uncached.length === 0) return results;
|
|
83
84
|
|
|
84
|
-
|
|
85
|
-
const
|
|
86
|
-
const
|
|
85
|
+
const model = getEmbeddingModel();
|
|
86
|
+
const provider = detectProvider(model);
|
|
87
|
+
const client = provider === "wolverine" ? getClient("wolverine") : getClient("openai");
|
|
87
88
|
|
|
88
89
|
const startMs = Date.now();
|
|
89
|
-
const response = await
|
|
90
|
+
const response = await client.embeddings.create({
|
|
90
91
|
model,
|
|
91
92
|
input: uncached,
|
|
92
93
|
});
|
package/src/core/config.js
CHANGED
|
@@ -2,11 +2,15 @@ const fs = require("fs");
|
|
|
2
2
|
const path = require("path");
|
|
3
3
|
|
|
4
4
|
/**
|
|
5
|
-
* Config Loader —
|
|
5
|
+
* Config Loader — simplified hybrid-always architecture.
|
|
6
|
+
*
|
|
7
|
+
* No more provider selection. Users pick the best model for each task
|
|
8
|
+
* directly in settings.json. Provider is auto-detected from model name.
|
|
9
|
+
* Embedding is always wolverine-embedding-1 (billed through credits).
|
|
6
10
|
*
|
|
7
11
|
* Priority:
|
|
8
12
|
* 1. Environment variables (highest — for CI/Docker overrides)
|
|
9
|
-
* 2.
|
|
13
|
+
* 2. settings.json models section
|
|
10
14
|
* 3. Hardcoded defaults (lowest)
|
|
11
15
|
*/
|
|
12
16
|
|
|
@@ -15,7 +19,6 @@ let _config = null;
|
|
|
15
19
|
function loadConfig() {
|
|
16
20
|
if (_config) return _config;
|
|
17
21
|
|
|
18
|
-
// Load from server/config/settings.json
|
|
19
22
|
const configPath = path.join(process.cwd(), "server", "config", "settings.json");
|
|
20
23
|
let fileConfig = {};
|
|
21
24
|
if (fs.existsSync(configPath)) {
|
|
@@ -26,15 +29,17 @@ function loadConfig() {
|
|
|
26
29
|
}
|
|
27
30
|
}
|
|
28
31
|
|
|
29
|
-
//
|
|
30
|
-
//
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
32
|
+
// Models: read from settings.json "models" section directly.
|
|
33
|
+
// Legacy support: if old provider-based config exists, migrate it.
|
|
34
|
+
let modelSource = fileConfig.models || {};
|
|
35
|
+
if (!fileConfig.models && fileConfig.provider) {
|
|
36
|
+
// Legacy: read from {provider}_settings for backward compatibility
|
|
37
|
+
const settingsKey = `${fileConfig.provider}_settings`;
|
|
38
|
+
modelSource = fileConfig[settingsKey] || fileConfig.hybrid_settings || fileConfig.openai_settings || {};
|
|
39
|
+
}
|
|
34
40
|
|
|
35
41
|
_config = {
|
|
36
|
-
|
|
37
|
-
|
|
42
|
+
// 8 task-specific model slots — user picks any model for each task
|
|
38
43
|
models: {
|
|
39
44
|
reasoning: process.env.REASONING_MODEL || modelSource.reasoning || "gpt-4o",
|
|
40
45
|
coding: process.env.CODING_MODEL || modelSource.coding || "gpt-4o",
|
|
@@ -45,9 +50,11 @@ function loadConfig() {
|
|
|
45
50
|
compacting: process.env.COMPACTING_MODEL || modelSource.compacting || "gpt-4o-mini",
|
|
46
51
|
utility: process.env.COMPACTING_MODEL || modelSource.compacting || "gpt-4o-mini",
|
|
47
52
|
research: process.env.RESEARCH_MODEL || modelSource.research || "gpt-4o",
|
|
48
|
-
embedding: process.env.TEXT_EMBEDDING_MODEL || modelSource.embedding || "text-embedding-3-small",
|
|
49
53
|
},
|
|
50
54
|
|
|
55
|
+
// Embedding: separate from task models — always billed through wolverine credits
|
|
56
|
+
embedding: process.env.TEXT_EMBEDDING_MODEL || fileConfig.embedding || "wolverine-embedding-1",
|
|
57
|
+
|
|
51
58
|
server: {
|
|
52
59
|
port: parseInt(process.env.PORT, 10) || fileConfig.server?.port || 3000,
|
|
53
60
|
maxRetries: parseInt(process.env.WOLVERINE_MAX_RETRIES, 10) || fileConfig.server?.maxRetries || 3,
|
|
@@ -90,30 +97,37 @@ function loadConfig() {
|
|
|
90
97
|
},
|
|
91
98
|
};
|
|
92
99
|
|
|
93
|
-
//
|
|
94
|
-
|
|
100
|
+
// Migrate old settings.json to new format + ensure defaults
|
|
101
|
+
_migrateAndEnsureDefaults(fileConfig, configPath);
|
|
95
102
|
|
|
96
103
|
return _config;
|
|
97
104
|
}
|
|
98
105
|
|
|
99
|
-
/**
|
|
100
|
-
* Get a config value by dot path: getConfig("models.reasoning")
|
|
101
|
-
*/
|
|
102
106
|
function getConfig(dotPath) {
|
|
103
107
|
const config = loadConfig();
|
|
104
108
|
return dotPath.split(".").reduce((obj, key) => obj?.[key], config);
|
|
105
109
|
}
|
|
106
110
|
|
|
107
|
-
/**
|
|
108
|
-
* Reset config cache (for testing).
|
|
109
|
-
*/
|
|
110
111
|
function resetConfig() { _config = null; }
|
|
111
112
|
|
|
112
113
|
/**
|
|
113
|
-
*
|
|
114
|
-
*
|
|
114
|
+
* Migrate old provider-based config to new flat models format.
|
|
115
|
+
* Also ensure default sections exist.
|
|
115
116
|
*/
|
|
116
|
-
function
|
|
117
|
+
function _migrateAndEnsureDefaults(fileConfig, configPath) {
|
|
118
|
+
let needsWrite = false;
|
|
119
|
+
|
|
120
|
+
// Migrate: if old provider system exists, convert to flat models
|
|
121
|
+
if (fileConfig.provider && !fileConfig.models) {
|
|
122
|
+
const settingsKey = `${fileConfig.provider}_settings`;
|
|
123
|
+
const source = fileConfig[settingsKey] || {};
|
|
124
|
+
fileConfig.models = { ...source };
|
|
125
|
+
delete fileConfig.models.embedding; // embedding is now separate
|
|
126
|
+
fileConfig.embedding = source.embedding || "wolverine-embedding-1";
|
|
127
|
+
needsWrite = true;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Ensure default sections
|
|
117
131
|
const DEFAULTS = {
|
|
118
132
|
autoUpdate: { enabled: true, intervalMs: 3600000 },
|
|
119
133
|
errorMonitor: { defaultThreshold: 1, windowMs: 30000, cooldownMs: 60000 },
|
|
@@ -123,7 +137,6 @@ function _ensureDefaults(fileConfig, configPath) {
|
|
|
123
137
|
cluster: { enabled: false, workers: 0 },
|
|
124
138
|
};
|
|
125
139
|
|
|
126
|
-
let needsWrite = false;
|
|
127
140
|
for (const [key, defaults] of Object.entries(DEFAULTS)) {
|
|
128
141
|
if (!fileConfig[key]) {
|
|
129
142
|
fileConfig[key] = defaults;
|
|
@@ -136,7 +149,7 @@ function _ensureDefaults(fileConfig, configPath) {
|
|
|
136
149
|
const tmpPath = configPath + ".tmp";
|
|
137
150
|
fs.writeFileSync(tmpPath, JSON.stringify(fileConfig, null, 2), "utf-8");
|
|
138
151
|
fs.renameSync(tmpPath, configPath);
|
|
139
|
-
} catch {
|
|
152
|
+
} catch {}
|
|
140
153
|
}
|
|
141
154
|
}
|
|
142
155
|
|
package/src/core/models.js
CHANGED
|
@@ -1,18 +1,13 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Model Configuration —
|
|
2
|
+
* Model Configuration — hybrid-always architecture.
|
|
3
3
|
*
|
|
4
|
-
*
|
|
5
|
-
*
|
|
6
|
-
*
|
|
4
|
+
* No provider selection. Users pick the best model for each of the 8 task
|
|
5
|
+
* roles directly in settings.json. Provider is auto-detected from model name.
|
|
6
|
+
* Embedding is separate — always billed through wolverine credits.
|
|
7
7
|
*
|
|
8
|
-
*
|
|
9
|
-
* Mix and match providers per role (e.g., Anthropic for reasoning, OpenAI for coding).
|
|
8
|
+
* Mix and match: wolverine for audit, claude for reasoning, gpt for coding.
|
|
10
9
|
*/
|
|
11
10
|
|
|
12
|
-
/**
|
|
13
|
-
* Detect provider from model name.
|
|
14
|
-
* @returns {"anthropic"|"openai"}
|
|
15
|
-
*/
|
|
16
11
|
function detectProvider(model) {
|
|
17
12
|
if (!model) return "openai";
|
|
18
13
|
if (/^wolverine/i.test(model) || /^gemma/i.test(model)) return "wolverine";
|
|
@@ -22,97 +17,78 @@ function detectProvider(model) {
|
|
|
22
17
|
if (/^llama/i.test(model) || /^meta/i.test(model)) return "meta";
|
|
23
18
|
if (/^deepseek/i.test(model)) return "deepseek";
|
|
24
19
|
if (/^command/i.test(model) || /^cohere/i.test(model)) return "cohere";
|
|
25
|
-
// Default: OpenAI (gpt-*, o1-*, o3-*, o4-*, codex-*, text-embedding-*, dall-e-*, etc.)
|
|
26
20
|
return "openai";
|
|
27
21
|
}
|
|
28
22
|
|
|
23
|
+
// 8 task-specific model roles (embedding is separate)
|
|
29
24
|
const MODEL_ROLES = {
|
|
30
|
-
// Deep reasoning — used for multi-step debugging when a simple fix fails
|
|
31
25
|
reasoning: {
|
|
32
26
|
envKey: "REASONING_MODEL",
|
|
33
|
-
default: "gpt-
|
|
27
|
+
default: "gpt-4o",
|
|
34
28
|
description: "Deep analysis and complex multi-step debugging",
|
|
35
|
-
tier: "premium",
|
|
36
29
|
},
|
|
37
|
-
|
|
38
|
-
// Code generation — the main repair model
|
|
39
30
|
coding: {
|
|
40
31
|
envKey: "CODING_MODEL",
|
|
41
|
-
default: "gpt-
|
|
42
|
-
description: "Code repair and fix generation",
|
|
43
|
-
tier: "premium",
|
|
32
|
+
default: "gpt-4o",
|
|
33
|
+
description: "Code repair and fix generation (no tools)",
|
|
44
34
|
},
|
|
45
|
-
|
|
46
|
-
// Chat/explanation — used for generating human-readable explanations
|
|
47
35
|
chat: {
|
|
48
36
|
envKey: "CHAT_MODEL",
|
|
49
|
-
default: "gpt-
|
|
50
|
-
description: "
|
|
51
|
-
tier: "standard",
|
|
37
|
+
default: "gpt-4o-mini",
|
|
38
|
+
description: "Summaries, explanations, and user-facing messages",
|
|
52
39
|
},
|
|
53
|
-
|
|
54
|
-
// Security audit — injection detection, runs on every single error
|
|
55
40
|
audit: {
|
|
56
41
|
envKey: "AUDIT_MODEL",
|
|
57
|
-
default: "gpt-
|
|
42
|
+
default: "gpt-4o-mini",
|
|
58
43
|
description: "Security scanning and prompt injection detection",
|
|
59
|
-
tier: "economy",
|
|
60
44
|
},
|
|
61
|
-
|
|
62
|
-
// Compacting — compresses text before embedding into brain
|
|
63
|
-
utility: {
|
|
45
|
+
compacting: {
|
|
64
46
|
envKey: "COMPACTING_MODEL",
|
|
65
|
-
default: "gpt-
|
|
47
|
+
default: "gpt-4o-mini",
|
|
66
48
|
description: "Text compaction before brain embedding",
|
|
67
|
-
tier: "economy",
|
|
68
49
|
},
|
|
69
|
-
|
|
70
|
-
// Tool — chat responses that use function calling (call_endpoint, search_brain)
|
|
71
50
|
tool: {
|
|
72
51
|
envKey: "TOOL_MODEL",
|
|
73
52
|
default: "gpt-4o-mini",
|
|
74
|
-
description: "
|
|
75
|
-
tier: "standard",
|
|
53
|
+
description: "Agent with tool calling (read_file, write_file, bash_exec)",
|
|
76
54
|
},
|
|
77
|
-
|
|
78
|
-
// Classifier — routes commands to CHAT vs AGENT, picks tiers
|
|
79
55
|
classifier: {
|
|
80
56
|
envKey: "CLASSIFIER_MODEL",
|
|
81
57
|
default: "gpt-4o-mini",
|
|
82
|
-
description: "
|
|
83
|
-
tier: "economy",
|
|
58
|
+
description: "Error classification and command routing",
|
|
84
59
|
},
|
|
85
|
-
|
|
86
|
-
// Research — deep research for solutions when fixes fail
|
|
87
60
|
research: {
|
|
88
61
|
envKey: "RESEARCH_MODEL",
|
|
89
62
|
default: "gpt-4o",
|
|
90
|
-
description: "Deep research for error solutions
|
|
91
|
-
tier: "premium",
|
|
92
|
-
},
|
|
93
|
-
|
|
94
|
-
// Embedding — vector representations for semantic search
|
|
95
|
-
embedding: {
|
|
96
|
-
envKey: "TEXT_EMBEDDING_MODEL",
|
|
97
|
-
default: "text-embedding-3-small",
|
|
98
|
-
description: "Text embeddings for brain vector store",
|
|
99
|
-
tier: "economy",
|
|
63
|
+
description: "Deep research for error solutions",
|
|
100
64
|
},
|
|
101
65
|
};
|
|
102
66
|
|
|
103
67
|
/**
|
|
104
|
-
* Get the configured model for a
|
|
68
|
+
* Get the configured model for a task role.
|
|
105
69
|
*/
|
|
106
70
|
function getModel(role) {
|
|
71
|
+
// Legacy: "embedding" and "utility" still supported
|
|
72
|
+
if (role === "embedding") return getEmbeddingModel();
|
|
73
|
+
if (role === "utility") role = "compacting";
|
|
74
|
+
|
|
107
75
|
const config = MODEL_ROLES[role];
|
|
108
76
|
if (!config) {
|
|
109
77
|
throw new Error(`Unknown model role: "${role}". Valid roles: ${Object.keys(MODEL_ROLES).join(", ")}`);
|
|
110
78
|
}
|
|
111
|
-
// Priority: env var → wolverine.config.js → hardcoded default
|
|
112
79
|
const { getConfig } = require("./config");
|
|
113
80
|
return process.env[config.envKey] || getConfig(`models.${role}`) || config.default;
|
|
114
81
|
}
|
|
115
82
|
|
|
83
|
+
/**
|
|
84
|
+
* Get the embedding model — separate from task roles.
|
|
85
|
+
* Always billed through wolverine credits.
|
|
86
|
+
*/
|
|
87
|
+
function getEmbeddingModel() {
|
|
88
|
+
const { getConfig } = require("./config");
|
|
89
|
+
return process.env.TEXT_EMBEDDING_MODEL || getConfig("embedding") || "wolverine-embedding-1";
|
|
90
|
+
}
|
|
91
|
+
|
|
116
92
|
/**
|
|
117
93
|
* Get all model assignments for logging.
|
|
118
94
|
*/
|
|
@@ -125,25 +101,27 @@ function getModelConfig() {
|
|
|
125
101
|
config[role] = {
|
|
126
102
|
model: resolved,
|
|
127
103
|
source: fromEnv ? "env" : fromDefault ? "default" : "settings",
|
|
128
|
-
tier: def.tier,
|
|
129
104
|
};
|
|
130
105
|
}
|
|
106
|
+
// Add embedding separately
|
|
107
|
+
const embModel = getEmbeddingModel();
|
|
108
|
+
config.embedding = {
|
|
109
|
+
model: embModel,
|
|
110
|
+
source: process.env.TEXT_EMBEDDING_MODEL ? "env" : embModel === "wolverine-embedding-1" ? "default" : "settings",
|
|
111
|
+
};
|
|
131
112
|
return config;
|
|
132
113
|
}
|
|
133
114
|
|
|
134
|
-
/**
|
|
135
|
-
* Log the current model configuration.
|
|
136
|
-
*/
|
|
137
115
|
function logModelConfig(chalk) {
|
|
138
116
|
const config = getModelConfig();
|
|
139
|
-
const tierColors = { premium: "cyan", standard: "blue", economy: "gray" };
|
|
140
|
-
|
|
141
117
|
for (const [role, info] of Object.entries(config)) {
|
|
142
|
-
const
|
|
118
|
+
const provider = detectProvider(info.model);
|
|
119
|
+
const provColors = { wolverine: "cyan", anthropic: "yellow", openai: "blue", google: "green" };
|
|
120
|
+
const color = provColors[provider] || "gray";
|
|
143
121
|
const label = `${role.padEnd(10)} → ${info.model}`;
|
|
144
|
-
const source = info.source === "env" ? "(
|
|
122
|
+
const source = info.source === "env" ? "(env)" : info.source === "default" ? "" : "(settings)";
|
|
145
123
|
console.log(chalk[color](` ${label} ${source}`));
|
|
146
124
|
}
|
|
147
125
|
}
|
|
148
126
|
|
|
149
|
-
module.exports = { getModel, getModelConfig, logModelConfig, MODEL_ROLES, detectProvider };
|
|
127
|
+
module.exports = { getModel, getEmbeddingModel, getModelConfig, logModelConfig, MODEL_ROLES, detectProvider };
|
|
@@ -5,55 +5,18 @@
|
|
|
5
5
|
"env": "development"
|
|
6
6
|
},
|
|
7
7
|
|
|
8
|
-
"
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
"
|
|
12
|
-
"
|
|
13
|
-
"chat": "gpt-5-nano",
|
|
14
|
-
"tool": "gpt-5.4-mini",
|
|
8
|
+
"models": {
|
|
9
|
+
"reasoning": "gpt-4o",
|
|
10
|
+
"coding": "gpt-4o",
|
|
11
|
+
"chat": "gpt-4o-mini",
|
|
12
|
+
"tool": "gpt-4o-mini",
|
|
15
13
|
"classifier": "gpt-4o-mini",
|
|
16
14
|
"audit": "gpt-4o-mini",
|
|
17
15
|
"compacting": "gpt-4o-mini",
|
|
18
|
-
"research": "
|
|
19
|
-
"embedding": "text-embedding-3-small"
|
|
16
|
+
"research": "gpt-4o"
|
|
20
17
|
},
|
|
21
18
|
|
|
22
|
-
"
|
|
23
|
-
"reasoning": "claude-sonnet-4-6",
|
|
24
|
-
"coding": "claude-opus-4-6",
|
|
25
|
-
"chat": "claude-haiku-4-5",
|
|
26
|
-
"tool": "claude-opus-4-6",
|
|
27
|
-
"classifier": "claude-haiku-4-5",
|
|
28
|
-
"audit": "claude-haiku-4-5",
|
|
29
|
-
"compacting": "claude-haiku-4-5",
|
|
30
|
-
"research": "claude-sonnet-4-6",
|
|
31
|
-
"embedding": "text-embedding-3-small"
|
|
32
|
-
},
|
|
33
|
-
|
|
34
|
-
"hybrid_settings": {
|
|
35
|
-
"reasoning": "claude-haiku-4-5",
|
|
36
|
-
"coding": "claude-sonnet-4-6",
|
|
37
|
-
"chat": "gpt-5-nano",
|
|
38
|
-
"tool": "claude-sonnet-4-6",
|
|
39
|
-
"classifier": "gpt-4o-mini",
|
|
40
|
-
"audit": "gpt-4o-mini",
|
|
41
|
-
"compacting": "gpt-4o-mini",
|
|
42
|
-
"research": "o4-mini-deep-research",
|
|
43
|
-
"embedding": "text-embedding-3-small"
|
|
44
|
-
},
|
|
45
|
-
|
|
46
|
-
"wolverine_settings": {
|
|
47
|
-
"reasoning": "wolverine-test-1",
|
|
48
|
-
"coding": "wolverine-test-1",
|
|
49
|
-
"chat": "wolverine-test-1",
|
|
50
|
-
"tool": "wolverine-test-1",
|
|
51
|
-
"classifier": "wolverine-test-1",
|
|
52
|
-
"audit": "wolverine-test-1",
|
|
53
|
-
"compacting": "wolverine-test-1",
|
|
54
|
-
"research": "wolverine-test-1",
|
|
55
|
-
"embedding": "text-embedding-3-small"
|
|
56
|
-
},
|
|
19
|
+
"embedding": "wolverine-embedding-1",
|
|
57
20
|
|
|
58
21
|
"server": {
|
|
59
22
|
"port": 3000,
|
|
@@ -61,11 +24,6 @@
|
|
|
61
24
|
"maxMemoryMB": 512
|
|
62
25
|
},
|
|
63
26
|
|
|
64
|
-
"cluster": {
|
|
65
|
-
"enabled": false,
|
|
66
|
-
"workers": 0
|
|
67
|
-
},
|
|
68
|
-
|
|
69
27
|
"telemetry": {
|
|
70
28
|
"enabled": true,
|
|
71
29
|
"heartbeatIntervalMs": 60000
|