@darksol/terminal 0.9.1 → 0.9.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/package.json +1 -1
- package/src/cli.js +30 -0
- package/src/llm/engine.js +8 -9
- package/src/llm/models.js +67 -0
- package/src/setup/wizard.js +61 -3
- package/src/web/commands.js +98 -4
package/README.md
CHANGED
|
@@ -15,7 +15,7 @@ A unified CLI for market intel, trading, AI-powered analysis, on-chain oracle, c
|
|
|
15
15
|
[](https://www.gnu.org/licenses/gpl-3.0)
|
|
16
16
|
[](https://nodejs.org/)
|
|
17
17
|
|
|
18
|
-
- Current release: **0.9.
|
|
18
|
+
- Current release: **0.9.2**
|
|
19
19
|
- Changelog: `CHANGELOG.md`
|
|
20
20
|
|
|
21
21
|
## Install
|
package/package.json
CHANGED
package/src/cli.js
CHANGED
|
@@ -32,6 +32,7 @@ import { clearMemories, exportMemories, getRecentMemories, searchMemories } from
|
|
|
32
32
|
import { getAgentStatus, planAgentGoal, runAgentTask } from './agent/index.js';
|
|
33
33
|
import { createRequire } from 'module';
|
|
34
34
|
import { resolve } from 'path';
|
|
35
|
+
import { getConfiguredModel, getProviderDefaultModel } from './llm/models.js';
|
|
35
36
|
const require = createRequire(import.meta.url);
|
|
36
37
|
const { version: PKG_VERSION } = require('../package.json');
|
|
37
38
|
|
|
@@ -1137,6 +1138,8 @@ export function cli(argv) {
|
|
|
1137
1138
|
['Output', cfg.output],
|
|
1138
1139
|
['Slippage', `${cfg.slippage}%`],
|
|
1139
1140
|
['Gas Multiplier', `${cfg.gasMultiplier}x`],
|
|
1141
|
+
['LLM Provider', cfg.llm?.provider || theme.dim('(not set)')],
|
|
1142
|
+
['LLM Model', getConfiguredModel(cfg.llm?.provider || 'openai') || theme.dim('(default)')],
|
|
1140
1143
|
['Soul User', cfg.soul?.userName || theme.dim('(not set)')],
|
|
1141
1144
|
['Agent Name', cfg.soul?.agentName || 'Darksol'],
|
|
1142
1145
|
['Tone', cfg.soul?.tone || theme.dim('(not set)')],
|
|
@@ -1153,6 +1156,33 @@ export function cli(argv) {
|
|
|
1153
1156
|
console.log('');
|
|
1154
1157
|
});
|
|
1155
1158
|
|
|
1159
|
+
config
|
|
1160
|
+
.command('model [model]')
|
|
1161
|
+
.description('Set the LLM model')
|
|
1162
|
+
.option('-p, --provider <provider>', 'LLM provider (defaults to current provider)')
|
|
1163
|
+
.action((model, opts) => {
|
|
1164
|
+
const provider = opts.provider || getConfig('llm.provider') || 'openai';
|
|
1165
|
+
if (!model) {
|
|
1166
|
+
const current = getConfiguredModel(provider);
|
|
1167
|
+
const fallback = getProviderDefaultModel(provider);
|
|
1168
|
+
info(`Current model for ${provider}: ${current || '(not set)'}`);
|
|
1169
|
+
if (fallback) {
|
|
1170
|
+
info(`Provider default: ${fallback}`);
|
|
1171
|
+
}
|
|
1172
|
+
return;
|
|
1173
|
+
}
|
|
1174
|
+
|
|
1175
|
+
if (opts.provider) {
|
|
1176
|
+
setConfig('llm.provider', provider);
|
|
1177
|
+
setConfig('llmProvider', provider);
|
|
1178
|
+
}
|
|
1179
|
+
setConfig('llm.model', model);
|
|
1180
|
+
if (provider === 'ollama') {
|
|
1181
|
+
setConfig('ollamaModel', model);
|
|
1182
|
+
}
|
|
1183
|
+
success(`LLM model for ${provider}: ${model}`);
|
|
1184
|
+
});
|
|
1185
|
+
|
|
1156
1186
|
config
|
|
1157
1187
|
.command('set <key> <value>')
|
|
1158
1188
|
.description('Set config value')
|
package/src/llm/engine.js
CHANGED
|
@@ -3,18 +3,19 @@ import { getKeyFromEnv, getKey } from '../config/keys.js';
|
|
|
3
3
|
import { getConfig } from '../config/store.js';
|
|
4
4
|
import { SessionMemory, extractMemories, searchMemories } from '../memory/index.js';
|
|
5
5
|
import { formatSystemPrompt as formatSoulSystemPrompt } from '../soul/index.js';
|
|
6
|
+
import { getProviderDefaultModel } from './models.js';
|
|
6
7
|
|
|
7
8
|
const PROVIDERS = {
|
|
8
9
|
openai: {
|
|
9
10
|
url: 'https://api.openai.com/v1/chat/completions',
|
|
10
|
-
defaultModel: '
|
|
11
|
+
defaultModel: getProviderDefaultModel('openai'),
|
|
11
12
|
authHeader: (key) => ({ Authorization: `Bearer ${key}` }),
|
|
12
13
|
parseResponse: (data) => data.choices?.[0]?.message?.content,
|
|
13
14
|
parseUsage: (data) => data.usage,
|
|
14
15
|
},
|
|
15
16
|
anthropic: {
|
|
16
17
|
url: 'https://api.anthropic.com/v1/messages',
|
|
17
|
-
defaultModel: '
|
|
18
|
+
defaultModel: getProviderDefaultModel('anthropic'),
|
|
18
19
|
authHeader: (key) => ({ 'x-api-key': key, 'anthropic-version': '2023-06-01' }),
|
|
19
20
|
buildBody: (model, messages, systemPrompt) => ({
|
|
20
21
|
model,
|
|
@@ -30,7 +31,7 @@ const PROVIDERS = {
|
|
|
30
31
|
},
|
|
31
32
|
openrouter: {
|
|
32
33
|
url: 'https://openrouter.ai/api/v1/chat/completions',
|
|
33
|
-
defaultModel: '
|
|
34
|
+
defaultModel: getProviderDefaultModel('openrouter'),
|
|
34
35
|
authHeader: (key) => ({
|
|
35
36
|
Authorization: `Bearer ${key}`,
|
|
36
37
|
'HTTP-Referer': 'https://darksol.net',
|
|
@@ -41,21 +42,21 @@ const PROVIDERS = {
|
|
|
41
42
|
},
|
|
42
43
|
minimax: {
|
|
43
44
|
url: 'https://api.minimax.io/v1/chat/completions',
|
|
44
|
-
defaultModel: '
|
|
45
|
+
defaultModel: getProviderDefaultModel('minimax'),
|
|
45
46
|
authHeader: (key) => ({ Authorization: `Bearer ${key}` }),
|
|
46
47
|
parseResponse: (data) => data.choices?.[0]?.message?.content,
|
|
47
48
|
parseUsage: (data) => data.usage,
|
|
48
49
|
},
|
|
49
50
|
ollama: {
|
|
50
51
|
url: null,
|
|
51
|
-
defaultModel: '
|
|
52
|
+
defaultModel: getProviderDefaultModel('ollama'),
|
|
52
53
|
authHeader: () => ({}),
|
|
53
54
|
parseResponse: (data) => data.choices?.[0]?.message?.content || data.message?.content,
|
|
54
55
|
parseUsage: () => ({ input: 0, output: 0 }),
|
|
55
56
|
},
|
|
56
57
|
bankr: {
|
|
57
58
|
url: 'https://llm.bankr.bot/v1/chat/completions',
|
|
58
|
-
defaultModel: '
|
|
59
|
+
defaultModel: getProviderDefaultModel('bankr'),
|
|
59
60
|
authHeader: (key) => ({ 'X-API-Key': key }),
|
|
60
61
|
parseResponse: (data) => data.choices?.[0]?.message?.content,
|
|
61
62
|
parseUsage: (data) => data.usage,
|
|
@@ -99,9 +100,7 @@ export class LLMEngine {
|
|
|
99
100
|
throw new Error(`Unknown LLM provider: ${this.provider}. Supported: ${Object.keys(PROVIDERS).join(', ')}`);
|
|
100
101
|
}
|
|
101
102
|
|
|
102
|
-
|
|
103
|
-
this.model = providerConfig.defaultModel;
|
|
104
|
-
}
|
|
103
|
+
this.model = this.model || providerConfig.defaultModel || getProviderDefaultModel(this.provider);
|
|
105
104
|
|
|
106
105
|
if (this.provider === 'ollama') {
|
|
107
106
|
const host = this.apiKey || getConfig('llm.ollamaHost') || 'http://localhost:11434';
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import { getConfig } from '../config/store.js';
|
|
2
|
+
|
|
3
|
+
export const MODEL_CATALOG = {
|
|
4
|
+
openai: {
|
|
5
|
+
defaultModel: 'gpt-5.4',
|
|
6
|
+
choices: [
|
|
7
|
+
{ value: 'gpt-5.4', label: 'gpt-5.4', desc: 'flagship, complex reasoning' },
|
|
8
|
+
{ value: 'gpt-5-mini', label: 'gpt-5-mini', desc: 'fast, lower cost' },
|
|
9
|
+
{ value: 'gpt-4o', label: 'gpt-4o', desc: 'previous gen, still good' },
|
|
10
|
+
{ value: 'o3', label: 'o3', desc: 'reasoning model' },
|
|
11
|
+
],
|
|
12
|
+
},
|
|
13
|
+
anthropic: {
|
|
14
|
+
defaultModel: 'claude-sonnet-4-6',
|
|
15
|
+
choices: [
|
|
16
|
+
{ value: 'claude-opus-4-6', label: 'claude-opus-4-6', desc: 'most intelligent, agents+coding' },
|
|
17
|
+
{ value: 'claude-sonnet-4-6', label: 'claude-sonnet-4-6', desc: 'best speed/intelligence balance' },
|
|
18
|
+
{ value: 'claude-haiku-4-5', label: 'claude-haiku-4-5', desc: 'fastest, near-frontier' },
|
|
19
|
+
],
|
|
20
|
+
},
|
|
21
|
+
openrouter: {
|
|
22
|
+
defaultModel: 'anthropic/claude-sonnet-4-6',
|
|
23
|
+
choices: [
|
|
24
|
+
{ value: 'anthropic/claude-sonnet-4-6', label: 'anthropic/claude-sonnet-4-6', desc: 'popular pick' },
|
|
25
|
+
{ value: 'openai/gpt-5.4', label: 'openai/gpt-5.4', desc: 'popular pick' },
|
|
26
|
+
{ value: 'google/gemini-2.5-pro', label: 'google/gemini-2.5-pro', desc: 'popular pick' },
|
|
27
|
+
{ value: 'meta-llama/llama-4-maverick', label: 'meta-llama/llama-4-maverick', desc: 'popular pick' },
|
|
28
|
+
{ value: 'deepseek/deepseek-r1', label: 'deepseek/deepseek-r1', desc: 'popular pick' },
|
|
29
|
+
],
|
|
30
|
+
allowCustom: true,
|
|
31
|
+
},
|
|
32
|
+
minimax: {
|
|
33
|
+
defaultModel: 'MiniMax-M2.5',
|
|
34
|
+
choices: [
|
|
35
|
+
{ value: 'MiniMax-M2.5', label: 'MiniMax-M2.5', desc: 'flagship, 204K context, ~60 tps' },
|
|
36
|
+
{ value: 'MiniMax-M2.5-highspeed', label: 'MiniMax-M2.5-highspeed', desc: 'same perf, ~100 tps' },
|
|
37
|
+
{ value: 'MiniMax-M2.1', label: 'MiniMax-M2.1', desc: 'code-focused' },
|
|
38
|
+
{ value: 'MiniMax-M2.1-highspeed', label: 'MiniMax-M2.1-highspeed', desc: 'code-focused, faster' },
|
|
39
|
+
{ value: 'MiniMax-M2', label: 'MiniMax-M2', desc: 'agentic, advanced reasoning' },
|
|
40
|
+
],
|
|
41
|
+
},
|
|
42
|
+
ollama: {
|
|
43
|
+
defaultModel: 'llama3.1',
|
|
44
|
+
textInput: true,
|
|
45
|
+
},
|
|
46
|
+
bankr: {
|
|
47
|
+
defaultModel: 'claude-sonnet-4.6',
|
|
48
|
+
managed: true,
|
|
49
|
+
},
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
export function getProviderDefaultModel(provider) {
|
|
53
|
+
return MODEL_CATALOG[provider]?.defaultModel || null;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
export function getConfiguredProvider(fallback = 'openai') {
|
|
57
|
+
return getConfig('llm.provider') || fallback;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
export function getConfiguredModel(provider = getConfiguredProvider()) {
|
|
61
|
+
const configured = getConfig('llm.model');
|
|
62
|
+
return configured || getProviderDefaultModel(provider);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
export function getModelSelectionMeta(provider = getConfiguredProvider()) {
|
|
66
|
+
return MODEL_CATALOG[provider] || { defaultModel: null };
|
|
67
|
+
}
|
package/src/setup/wizard.js
CHANGED
|
@@ -8,6 +8,7 @@ import { hasSoul, runSoulSetup } from '../soul/index.js';
|
|
|
8
8
|
import { createServer } from 'http';
|
|
9
9
|
import open from 'open';
|
|
10
10
|
import crypto from 'crypto';
|
|
11
|
+
import { getModelSelectionMeta } from '../llm/models.js';
|
|
11
12
|
|
|
12
13
|
// ══════════════════════════════════════════════════
|
|
13
14
|
// FIRST-RUN SETUP WIZARD
|
|
@@ -116,6 +117,8 @@ export async function runSetupWizard(opts = {}) {
|
|
|
116
117
|
* Setup a cloud provider (OpenAI, Anthropic, OpenRouter, MiniMax)
|
|
117
118
|
*/
|
|
118
119
|
async function setupCloudProvider(provider) {
|
|
120
|
+
await selectAndSaveModel(provider);
|
|
121
|
+
|
|
119
122
|
const supportsOAuth = ['openai', 'anthropic'].includes(provider);
|
|
120
123
|
const providerName = {
|
|
121
124
|
openai: 'OpenAI',
|
|
@@ -204,11 +207,11 @@ async function setupOllama() {
|
|
|
204
207
|
type: 'input',
|
|
205
208
|
name: 'model',
|
|
206
209
|
message: theme.gold('Default model:'),
|
|
207
|
-
default: '
|
|
210
|
+
default: getModelSelectionMeta('ollama').defaultModel,
|
|
211
|
+
validate: (v) => v.trim().length > 0 || 'Model is required',
|
|
208
212
|
}]);
|
|
209
213
|
|
|
210
|
-
|
|
211
|
-
setConfig('ollamaModel', model);
|
|
214
|
+
saveModelConfig(model.trim(), 'ollama');
|
|
212
215
|
setConfig('llm.provider', 'ollama');
|
|
213
216
|
setConfig('llmProvider', 'ollama');
|
|
214
217
|
|
|
@@ -216,6 +219,61 @@ async function setupOllama() {
|
|
|
216
219
|
info('Make sure Ollama is running: ollama serve');
|
|
217
220
|
}
|
|
218
221
|
|
|
222
|
+
async function selectAndSaveModel(provider) {
|
|
223
|
+
const meta = getModelSelectionMeta(provider);
|
|
224
|
+
if (!meta || meta.managed) return null;
|
|
225
|
+
|
|
226
|
+
if (meta.textInput) {
|
|
227
|
+
const { model } = await inquirer.prompt([{
|
|
228
|
+
type: 'input',
|
|
229
|
+
name: 'model',
|
|
230
|
+
message: theme.gold('Model:'),
|
|
231
|
+
default: meta.defaultModel,
|
|
232
|
+
validate: (v) => v.trim().length > 0 || 'Model is required',
|
|
233
|
+
}]);
|
|
234
|
+
saveModelConfig(model.trim(), provider);
|
|
235
|
+
return model.trim();
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
const choices = (meta.choices || []).map((choice) => ({
|
|
239
|
+
name: `${choice.value} - ${choice.desc}`,
|
|
240
|
+
value: choice.value,
|
|
241
|
+
}));
|
|
242
|
+
|
|
243
|
+
if (meta.allowCustom) {
|
|
244
|
+
choices.push({ name: 'Custom model string', value: '__custom__' });
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
const { selectedModel } = await inquirer.prompt([{
|
|
248
|
+
type: 'list',
|
|
249
|
+
name: 'selectedModel',
|
|
250
|
+
message: theme.gold('Choose model:'),
|
|
251
|
+
choices,
|
|
252
|
+
default: meta.defaultModel,
|
|
253
|
+
}]);
|
|
254
|
+
|
|
255
|
+
if (selectedModel === '__custom__') {
|
|
256
|
+
const { customModel } = await inquirer.prompt([{
|
|
257
|
+
type: 'input',
|
|
258
|
+
name: 'customModel',
|
|
259
|
+
message: theme.gold('Custom model string:'),
|
|
260
|
+
validate: (v) => v.trim().length > 0 || 'Model is required',
|
|
261
|
+
}]);
|
|
262
|
+
saveModelConfig(customModel.trim(), provider);
|
|
263
|
+
return customModel.trim();
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
saveModelConfig(selectedModel, provider);
|
|
267
|
+
return selectedModel;
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
function saveModelConfig(model, provider) {
|
|
271
|
+
setConfig('llm.model', model);
|
|
272
|
+
if (provider === 'ollama') {
|
|
273
|
+
setConfig('ollamaModel', model);
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
|
|
219
277
|
/**
|
|
220
278
|
* Show instructions for getting API keys
|
|
221
279
|
*/
|
package/src/web/commands.js
CHANGED
|
@@ -9,6 +9,7 @@ import { join, dirname } from 'path';
|
|
|
9
9
|
import { homedir } from 'os';
|
|
10
10
|
import { spawn } from 'child_process';
|
|
11
11
|
import { fileURLToPath } from 'url';
|
|
12
|
+
import { getConfiguredModel, getModelSelectionMeta, getProviderDefaultModel } from '../llm/models.js';
|
|
12
13
|
|
|
13
14
|
// ══════════════════════════════════════════════════
|
|
14
15
|
// CHAT LOG PERSISTENCE
|
|
@@ -416,12 +417,32 @@ export async function handleMenuSelect(id, value, item, ws) {
|
|
|
416
417
|
})));
|
|
417
418
|
return {};
|
|
418
419
|
}
|
|
420
|
+
if (value === 'model') {
|
|
421
|
+
return showModelSelectionMenu(ws);
|
|
422
|
+
}
|
|
419
423
|
if (value === 'keys') {
|
|
420
424
|
return await handleCommand('keys', ws);
|
|
421
425
|
}
|
|
422
426
|
ws.sendLine('');
|
|
423
427
|
return {};
|
|
424
428
|
|
|
429
|
+
case 'config_model':
|
|
430
|
+
if (value === 'back') {
|
|
431
|
+
ws.sendLine('');
|
|
432
|
+
return {};
|
|
433
|
+
}
|
|
434
|
+
if (value === '__custom__') {
|
|
435
|
+
ws.sendPrompt('config_model_input', 'Model:', { provider: getConfig('llm.provider') || 'openai' });
|
|
436
|
+
return {};
|
|
437
|
+
}
|
|
438
|
+
saveSelectedModel(value);
|
|
439
|
+
chatEngines.delete(ws);
|
|
440
|
+
ws.sendLine('');
|
|
441
|
+
ws.sendLine(` ${ANSI.green}✓ Model set to ${value}${ANSI.reset}`);
|
|
442
|
+
ws.sendLine(` ${ANSI.dim}AI session refreshed.${ANSI.reset}`);
|
|
443
|
+
ws.sendLine('');
|
|
444
|
+
return {};
|
|
445
|
+
|
|
425
446
|
case 'main_menu':
|
|
426
447
|
if (value === 'back') {
|
|
427
448
|
ws.sendLine('');
|
|
@@ -429,7 +450,6 @@ export async function handleMenuSelect(id, value, item, ws) {
|
|
|
429
450
|
}
|
|
430
451
|
return await handleCommand(value, ws);
|
|
431
452
|
}
|
|
432
|
-
|
|
433
453
|
return {};
|
|
434
454
|
}
|
|
435
455
|
|
|
@@ -473,6 +493,23 @@ export async function handlePromptResponse(id, value, meta, ws) {
|
|
|
473
493
|
return {};
|
|
474
494
|
}
|
|
475
495
|
|
|
496
|
+
if (id === 'config_model_input') {
|
|
497
|
+
const provider = meta?.provider || getConfig('llm.provider') || 'openai';
|
|
498
|
+
const model = String(value || '').trim();
|
|
499
|
+
if (!model) {
|
|
500
|
+
ws.sendLine(` ${ANSI.red}✗ Model is required${ANSI.reset}`);
|
|
501
|
+
ws.sendLine('');
|
|
502
|
+
return {};
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
saveSelectedModel(model, provider);
|
|
506
|
+
chatEngines.delete(ws);
|
|
507
|
+
ws.sendLine(` ${ANSI.green}✓ Model set to ${model}${ANSI.reset}`);
|
|
508
|
+
ws.sendLine(` ${ANSI.dim}AI session refreshed.${ANSI.reset}`);
|
|
509
|
+
ws.sendLine('');
|
|
510
|
+
return {};
|
|
511
|
+
}
|
|
512
|
+
|
|
476
513
|
if (id === 'cards_status_id') {
|
|
477
514
|
if (!value) { ws.sendLine(` ${ANSI.red}✗ Cancelled${ANSI.reset}`); ws.sendLine(''); return {}; }
|
|
478
515
|
return await showCardStatus(value.trim(), ws);
|
|
@@ -789,7 +826,9 @@ export function getAIStatus() {
|
|
|
789
826
|
|
|
790
827
|
if (connected.length > 0) {
|
|
791
828
|
const names = connected.map(p => SERVICES[p]?.name || p).join(', ');
|
|
792
|
-
|
|
829
|
+
const provider = getConfig('llm.provider') || connected[0];
|
|
830
|
+
const model = provider === 'bankr' ? 'gateway managed' : (getConfiguredModel(provider) || getProviderDefaultModel(provider) || 'default');
|
|
831
|
+
return ` ${green}● AI ready${reset} ${dim}(${names} | ${provider}/${model})${reset}\r\n ${dim}Type ${gold}ai <question>${dim} to start chatting. Chat logs saved to ~/.darksol/chat-logs/${reset}\r\n\r\n`;
|
|
793
832
|
}
|
|
794
833
|
|
|
795
834
|
return [
|
|
@@ -836,7 +875,7 @@ export async function handleCommand(cmd, ws) {
|
|
|
836
875
|
case 'mail':
|
|
837
876
|
return await cmdMail(args, ws);
|
|
838
877
|
case 'config':
|
|
839
|
-
return await cmdConfig(ws);
|
|
878
|
+
return await cmdConfig(args, ws);
|
|
840
879
|
case 'oracle':
|
|
841
880
|
return await cmdOracle(args, ws);
|
|
842
881
|
case 'cards':
|
|
@@ -1794,11 +1833,20 @@ async function cmdFacilitator(args, ws) {
|
|
|
1794
1833
|
return {};
|
|
1795
1834
|
}
|
|
1796
1835
|
|
|
1797
|
-
async function cmdConfig(ws) {
|
|
1836
|
+
async function cmdConfig(args, ws) {
|
|
1837
|
+
const sub = args[0]?.toLowerCase();
|
|
1838
|
+
if (sub === 'model') {
|
|
1839
|
+
return showModelSelectionMenu(ws);
|
|
1840
|
+
}
|
|
1841
|
+
|
|
1798
1842
|
const chain = getConfig('chain') || 'base';
|
|
1799
1843
|
const wallet = getConfig('activeWallet') || '(none)';
|
|
1800
1844
|
const slippage = getConfig('slippage') || '0.5';
|
|
1801
1845
|
const email = getConfig('mailEmail') || '(none)';
|
|
1846
|
+
const provider = getConfig('llm.provider') || '(not set)';
|
|
1847
|
+
const model = provider === 'bankr'
|
|
1848
|
+
? 'gateway managed'
|
|
1849
|
+
: getConfiguredModel(provider === '(not set)' ? 'openai' : provider) || '(default)';
|
|
1802
1850
|
|
|
1803
1851
|
ws.sendLine(`${ANSI.gold} ◆ CONFIG${ANSI.reset}`);
|
|
1804
1852
|
ws.sendLine(`${ANSI.dim} ${'─'.repeat(50)}${ANSI.reset}`);
|
|
@@ -1806,12 +1854,15 @@ async function cmdConfig(ws) {
|
|
|
1806
1854
|
ws.sendLine(` ${ANSI.darkGold}Wallet${ANSI.reset} ${ANSI.white}${wallet}${ANSI.reset}`);
|
|
1807
1855
|
ws.sendLine(` ${ANSI.darkGold}Slippage${ANSI.reset} ${ANSI.white}${slippage}%${ANSI.reset}`);
|
|
1808
1856
|
ws.sendLine(` ${ANSI.darkGold}Mail${ANSI.reset} ${ANSI.white}${email}${ANSI.reset}`);
|
|
1857
|
+
ws.sendLine(` ${ANSI.darkGold}LLM Provider${ANSI.reset} ${ANSI.white}${provider}${ANSI.reset}`);
|
|
1858
|
+
ws.sendLine(` ${ANSI.darkGold}LLM Model${ANSI.reset} ${ANSI.white}${model}${ANSI.reset}`);
|
|
1809
1859
|
ws.sendLine(` ${ANSI.darkGold}AI${ANSI.reset} ${hasAnyLLM() ? `${ANSI.green}● Ready${ANSI.reset}` : `${ANSI.dim}○ Not configured${ANSI.reset}`}`);
|
|
1810
1860
|
ws.sendLine('');
|
|
1811
1861
|
|
|
1812
1862
|
// Offer interactive config
|
|
1813
1863
|
ws.sendMenu('config_action', '◆ Configure', [
|
|
1814
1864
|
{ value: 'chain', label: '🔗 Change chain', desc: `Currently: ${chain}` },
|
|
1865
|
+
{ value: 'model', label: '🧠 Change model', desc: `Currently: ${model}` },
|
|
1815
1866
|
{ value: 'keys', label: '🔑 LLM / API keys', desc: '' },
|
|
1816
1867
|
{ value: 'back', label: '← Back', desc: '' },
|
|
1817
1868
|
]);
|
|
@@ -1819,6 +1870,49 @@ async function cmdConfig(ws) {
|
|
|
1819
1870
|
return {};
|
|
1820
1871
|
}
|
|
1821
1872
|
|
|
1873
|
+
/**
|
|
1874
|
+
* Show model selection menu for current provider
|
|
1875
|
+
*/
|
|
1876
|
+
function showModelSelectionMenu(ws) {
|
|
1877
|
+
const provider = getConfig('llm.provider') || 'openai';
|
|
1878
|
+
const meta = getModelSelectionMeta(provider);
|
|
1879
|
+
|
|
1880
|
+
if (meta.managed) {
|
|
1881
|
+
ws.sendLine(` ${ANSI.dim}Bankr selects the backing model automatically.${ANSI.reset}`);
|
|
1882
|
+
ws.sendLine('');
|
|
1883
|
+
return {};
|
|
1884
|
+
}
|
|
1885
|
+
|
|
1886
|
+
if (meta.textInput) {
|
|
1887
|
+
ws.sendPrompt('config_model_input', 'Model:', { provider });
|
|
1888
|
+
return {};
|
|
1889
|
+
}
|
|
1890
|
+
|
|
1891
|
+
const items = (meta.choices || []).map(choice => ({
|
|
1892
|
+
value: choice.value,
|
|
1893
|
+
label: choice.value,
|
|
1894
|
+
desc: choice.desc,
|
|
1895
|
+
}));
|
|
1896
|
+
|
|
1897
|
+
if (meta.allowCustom) {
|
|
1898
|
+
items.push({ value: '__custom__', label: 'Custom model', desc: 'Type any model string' });
|
|
1899
|
+
}
|
|
1900
|
+
|
|
1901
|
+
items.push({ value: 'back', label: '← Back', desc: '' });
|
|
1902
|
+
ws.sendMenu('config_model', '🧠 Select Model', items);
|
|
1903
|
+
return {};
|
|
1904
|
+
}
|
|
1905
|
+
|
|
1906
|
+
/**
|
|
1907
|
+
* Save selected model to config
|
|
1908
|
+
*/
|
|
1909
|
+
function saveSelectedModel(model, provider = getConfig('llm.provider') || 'openai') {
|
|
1910
|
+
setConfig('llm.model', model);
|
|
1911
|
+
if (provider === 'ollama') {
|
|
1912
|
+
setConfig('ollamaModel', model);
|
|
1913
|
+
}
|
|
1914
|
+
}
|
|
1915
|
+
|
|
1822
1916
|
// ══════════════════════════════════════════════════
|
|
1823
1917
|
// AI CHAT — LLM-powered assistant in the web shell
|
|
1824
1918
|
// ══════════════════════════════════════════════════
|