0agent 1.0.22 → 1.0.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/chat.js +63 -35
- package/package.json +1 -1
package/bin/chat.js
CHANGED
|
@@ -57,6 +57,55 @@ const C = {
|
|
|
57
57
|
const fmt = (color, text) => `${color}${text}${C.reset}`;
|
|
58
58
|
const clearLine = () => process.stdout.write('\r\x1b[2K');
|
|
59
59
|
|
|
60
|
+
// ─── LLM ping — direct 1-token call, bypasses daemon, instant ────────────────
|
|
61
|
+
async function pingLLM(provider) {
|
|
62
|
+
const key = provider.api_key ?? '';
|
|
63
|
+
const model = provider.model;
|
|
64
|
+
const sig = AbortSignal.timeout(8000);
|
|
65
|
+
|
|
66
|
+
try {
|
|
67
|
+
if (provider.provider === 'anthropic') {
|
|
68
|
+
const r = await fetch('https://api.anthropic.com/v1/messages', {
|
|
69
|
+
method: 'POST', signal: sig,
|
|
70
|
+
headers: { 'x-api-key': key, 'anthropic-version': '2023-06-01', 'Content-Type': 'application/json' },
|
|
71
|
+
body: JSON.stringify({ model, max_tokens: 1, messages: [{ role: 'user', content: 'hi' }] }),
|
|
72
|
+
});
|
|
73
|
+
const d = await r.json();
|
|
74
|
+
if (!r.ok) return { ok: false, error: d.error?.message ?? `HTTP ${r.status}` };
|
|
75
|
+
return { ok: true, model: d.model };
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
if (['openai','xai','gemini'].includes(provider.provider)) {
|
|
79
|
+
const base = provider.provider === 'xai' ? 'https://api.x.ai/v1'
|
|
80
|
+
: provider.provider === 'gemini' ? 'https://generativelanguage.googleapis.com/v1beta/openai'
|
|
81
|
+
: 'https://api.openai.com/v1';
|
|
82
|
+
const r = await fetch(`${base}/chat/completions`, {
|
|
83
|
+
method: 'POST', signal: sig,
|
|
84
|
+
headers: { Authorization: `Bearer ${key}`, 'Content-Type': 'application/json' },
|
|
85
|
+
body: JSON.stringify({ model, max_tokens: 1, messages: [{ role: 'user', content: 'hi' }] }),
|
|
86
|
+
});
|
|
87
|
+
const d = await r.json();
|
|
88
|
+
if (!r.ok) return { ok: false, error: d.error?.message ?? `HTTP ${r.status}` };
|
|
89
|
+
return { ok: true, model: d.model };
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if (provider.provider === 'ollama') {
|
|
93
|
+
const base = provider.base_url ?? 'http://localhost:11434';
|
|
94
|
+
const r = await fetch(`${base}/api/generate`, {
|
|
95
|
+
method: 'POST', signal: sig,
|
|
96
|
+
headers: { 'Content-Type': 'application/json' },
|
|
97
|
+
body: JSON.stringify({ model, prompt: 'hi', stream: false }),
|
|
98
|
+
});
|
|
99
|
+
if (!r.ok) return { ok: false, error: `Ollama HTTP ${r.status}` };
|
|
100
|
+
return { ok: true, model };
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
return { ok: true, model }; // unknown provider — skip check
|
|
104
|
+
} catch (e) {
|
|
105
|
+
return { ok: false, error: e.message };
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
60
109
|
// ─── Config management ────────────────────────────────────────────────────────
|
|
61
110
|
function loadConfig() {
|
|
62
111
|
if (!existsSync(CONFIG_PATH)) return null;
|
|
@@ -437,47 +486,26 @@ connectWS();
|
|
|
437
486
|
else { console.log(` ${fmt(C.red, '✗')} Daemon failed. Run: 0agent start`); rl.prompt(); return; }
|
|
438
487
|
}
|
|
439
488
|
|
|
440
|
-
// Step 2:
|
|
489
|
+
// Step 2: lightweight direct API ping (1 token — fast, no daemon involved)
|
|
441
490
|
const provider = getCurrentProvider(cfg);
|
|
442
|
-
if (provider?.api_key?.trim()
|
|
443
|
-
|
|
491
|
+
if (!provider?.api_key?.trim() && provider?.provider !== 'ollama') {
|
|
492
|
+
console.log(` ${fmt(C.yellow, '⚠')} No API key. Use: ${fmt(C.cyan, '/key ' + (provider?.provider ?? 'anthropic') + ' <key>')}\n`);
|
|
493
|
+
} else {
|
|
494
|
+
const llmSpin = new Spinner(`Checking ${provider.provider}/${provider.model}`);
|
|
444
495
|
llmSpin.start();
|
|
445
496
|
try {
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
const sid = s.session_id ?? s.id;
|
|
454
|
-
// Poll briefly for completion
|
|
455
|
-
for (let i = 0; i < 20; i++) {
|
|
456
|
-
await new Promise(r => setTimeout(r, 500));
|
|
457
|
-
const check = await fetch(`${BASE_URL}/api/sessions/${sid}`).then(r => r.json()).catch(() => null);
|
|
458
|
-
if (check?.status === 'completed') {
|
|
459
|
-
llmSpin.stop();
|
|
460
|
-
const model = check.result?.model ?? provider.model;
|
|
461
|
-
console.log(` ${fmt(C.green, '✓')} LLM connected — ${fmt(C.cyan, model)}\n`);
|
|
462
|
-
break;
|
|
463
|
-
}
|
|
464
|
-
if (check?.status === 'failed') {
|
|
465
|
-
llmSpin.stop();
|
|
466
|
-
console.log(` ${fmt(C.red, '✗')} LLM error: ${check.error}`);
|
|
467
|
-
console.log(` ${fmt(C.dim, 'Check your API key with: /key ' + provider.provider)}\n`);
|
|
468
|
-
break;
|
|
469
|
-
}
|
|
470
|
-
}
|
|
471
|
-
if (llmSpin.active) {
|
|
472
|
-
llmSpin.stop();
|
|
473
|
-
console.log(` ${fmt(C.yellow, '⚠')} LLM check timed out — it may still work\n`);
|
|
497
|
+
const result = await pingLLM(provider);
|
|
498
|
+
llmSpin.stop();
|
|
499
|
+
if (result.ok) {
|
|
500
|
+
console.log(` ${fmt(C.green, '✓')} ${fmt(C.cyan, result.model ?? provider.model)} is ready\n`);
|
|
501
|
+
} else {
|
|
502
|
+
console.log(` ${fmt(C.red, '✗')} LLM error: ${result.error}`);
|
|
503
|
+
console.log(` ${fmt(C.dim, 'Fix with: /key ' + provider.provider + ' <new-key>')}\n`);
|
|
474
504
|
}
|
|
475
|
-
} catch {
|
|
505
|
+
} catch (e) {
|
|
476
506
|
llmSpin.stop();
|
|
477
|
-
console.log(` ${fmt(C.yellow, '⚠')} Could not
|
|
507
|
+
console.log(` ${fmt(C.yellow, '⚠')} Could not reach ${provider.provider}: ${e.message}\n`);
|
|
478
508
|
}
|
|
479
|
-
} else {
|
|
480
|
-
console.log(` ${fmt(C.yellow, '⚠')} No API key set. Use: ${fmt(C.cyan, '/key ' + (provider?.provider ?? 'anthropic') + ' <your-key>')}\n`);
|
|
481
509
|
}
|
|
482
510
|
|
|
483
511
|
rl.prompt();
|