open-agents-ai 0.16.1 → 0.16.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +42 -0
- package/dist/index.js +200 -47
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -276,6 +276,48 @@ oa --backend vllm --backend-url http://localhost:8000/v1 "add tests"
|
|
|
276
276
|
oa --backend-url http://10.0.0.5:11434 "refactor auth"
|
|
277
277
|
```
|
|
278
278
|
|
|
279
|
+
## Supported Inference Providers
|
|
280
|
+
|
|
281
|
+
Open Agents auto-detects your provider from the endpoint URL and configures auth + health checks accordingly. All providers use standard `Authorization: Bearer <key>` authentication.
|
|
282
|
+
|
|
283
|
+
| Provider | Endpoint URL | API Key | Notes |
|
|
284
|
+
|----------|-------------|---------|-------|
|
|
285
|
+
| **Ollama** (local) | `http://localhost:11434` | None | Default. Auto-detects, auto-expands context window |
|
|
286
|
+
| **vLLM** (local) | `http://localhost:8000` | Optional | Self-hosted OpenAI-compatible server |
|
|
287
|
+
| **LM Studio** (local) | `http://localhost:1234` | None | Local model server with GUI |
|
|
288
|
+
| **Chutes AI** | `https://llm.chutes.ai` | `cpk_...` | Bearer auth. Fast cloud inference |
|
|
289
|
+
| **Together AI** | `https://api.together.xyz` | Required | Large model catalog |
|
|
290
|
+
| **Groq** | `https://api.groq.com/openai` | `gsk_...` | Ultra-fast LPU inference |
|
|
291
|
+
| **OpenRouter** | `https://openrouter.ai/api` | `sk-or-...` | Multi-provider routing |
|
|
292
|
+
| **Fireworks AI** | `https://api.fireworks.ai/inference` | `fw_...` | Fast serverless inference |
|
|
293
|
+
| **DeepInfra** | `https://api.deepinfra.com` | Required | Cost-effective inference |
|
|
294
|
+
| **Mistral AI** | `https://api.mistral.ai` | Required | Mistral models |
|
|
295
|
+
| **Cerebras** | `https://api.cerebras.ai` | `csk-...` | Wafer-scale inference |
|
|
296
|
+
| **SambaNova** | `https://api.sambanova.ai` | Required | RDU-accelerated inference |
|
|
297
|
+
| **NVIDIA NIM** | `https://integrate.api.nvidia.com` | `nvapi-...` | NVIDIA cloud inference |
|
|
298
|
+
| **Hyperbolic** | `https://api.hyperbolic.xyz` | Required | GPU cloud inference |
|
|
299
|
+
| **OpenAI** | `https://api.openai.com` | `sk-...` | GPT models (tool calling) |
|
|
300
|
+
|
|
301
|
+
### Connecting to a Provider
|
|
302
|
+
|
|
303
|
+
Use `/endpoint` in the TUI or pass via CLI:
|
|
304
|
+
|
|
305
|
+
```bash
|
|
306
|
+
# Chutes AI
|
|
307
|
+
/endpoint https://llm.chutes.ai --auth cpk_your_key_here
|
|
308
|
+
|
|
309
|
+
# Groq
|
|
310
|
+
/endpoint https://api.groq.com/openai --auth gsk_your_key_here
|
|
311
|
+
|
|
312
|
+
# Together AI
|
|
313
|
+
/endpoint https://api.together.xyz --auth your_key_here
|
|
314
|
+
|
|
315
|
+
# Self-hosted vLLM on LAN
|
|
316
|
+
/endpoint http://10.0.0.5:8000
|
|
317
|
+
```
|
|
318
|
+
|
|
319
|
+
The agent auto-detects the provider, normalizes the URL (strips `/v1/chat/completions` if pasted), tests connectivity, and saves the configuration. You can paste full endpoint URLs — they'll be cleaned up automatically.
|
|
320
|
+
|
|
279
321
|
## Evaluation Suite
|
|
280
322
|
|
|
281
323
|
23 evaluation tasks test the agent's autonomous capabilities across coding, web research, SDLC analysis, and tool creation:
|
package/dist/index.js
CHANGED
|
@@ -493,6 +493,107 @@ var init_sleep = __esm({
|
|
|
493
493
|
}
|
|
494
494
|
});
|
|
495
495
|
|
|
496
|
+
// packages/backend-vllm/dist/normalizeUrl.js
|
|
497
|
+
function normalizeBaseUrl(url) {
|
|
498
|
+
let u = url.trim();
|
|
499
|
+
u = u.replace(/\/+$/, "");
|
|
500
|
+
u = u.replace(/\/chat\/completions$/, "");
|
|
501
|
+
u = u.replace(/\/completions$/, "");
|
|
502
|
+
u = u.replace(/\/embeddings$/, "");
|
|
503
|
+
u = u.replace(/\/models(?:\/.*)?$/, "");
|
|
504
|
+
u = u.replace(/\/+$/, "");
|
|
505
|
+
u = u.replace(/\/v1\/openai$/, "");
|
|
506
|
+
u = u.replace(/\/+$/, "");
|
|
507
|
+
u = u.replace(/\/v1$/, "");
|
|
508
|
+
u = u.replace(/\/+$/, "");
|
|
509
|
+
return u;
|
|
510
|
+
}
|
|
511
|
+
function detectProvider(url) {
|
|
512
|
+
const normalized = url.trim().toLowerCase();
|
|
513
|
+
for (const { match, info } of PROVIDERS) {
|
|
514
|
+
if (match(normalized))
|
|
515
|
+
return info;
|
|
516
|
+
}
|
|
517
|
+
const isLocal = /localhost|127\.0\.0\.1|0\.0\.0\.0/i.test(normalized);
|
|
518
|
+
return {
|
|
519
|
+
id: "unknown",
|
|
520
|
+
label: isLocal ? "Local OpenAI-compatible" : "OpenAI-compatible",
|
|
521
|
+
local: isLocal,
|
|
522
|
+
authRequired: !isLocal,
|
|
523
|
+
modelsPath: isLocal ? "/v1/models" : "/v1/models"
|
|
524
|
+
};
|
|
525
|
+
}
|
|
526
|
+
var PROVIDERS;
|
|
527
|
+
var init_normalizeUrl = __esm({
|
|
528
|
+
"packages/backend-vllm/dist/normalizeUrl.js"() {
|
|
529
|
+
"use strict";
|
|
530
|
+
PROVIDERS = [
|
|
531
|
+
// --- Cloud providers (specific domains) ---
|
|
532
|
+
{
|
|
533
|
+
match: (u) => /api\.openai\.com/i.test(u),
|
|
534
|
+
info: { id: "openai", label: "OpenAI", local: false, authRequired: true, keyPrefix: "sk-", modelsPath: "/v1/models" }
|
|
535
|
+
},
|
|
536
|
+
{
|
|
537
|
+
match: (u) => /api\.together\.xyz/i.test(u),
|
|
538
|
+
info: { id: "together", label: "Together AI", local: false, authRequired: true, modelsPath: "/v1/models" }
|
|
539
|
+
},
|
|
540
|
+
{
|
|
541
|
+
match: (u) => /api\.groq\.com/i.test(u),
|
|
542
|
+
info: { id: "groq", label: "Groq", local: false, authRequired: true, keyPrefix: "gsk_", modelsPath: "/v1/models" }
|
|
543
|
+
},
|
|
544
|
+
{
|
|
545
|
+
match: (u) => /openrouter\.ai/i.test(u),
|
|
546
|
+
info: { id: "openrouter", label: "OpenRouter", local: false, authRequired: true, keyPrefix: "sk-or-", modelsPath: "/v1/models" }
|
|
547
|
+
},
|
|
548
|
+
{
|
|
549
|
+
match: (u) => /api\.fireworks\.ai/i.test(u),
|
|
550
|
+
info: { id: "fireworks", label: "Fireworks AI", local: false, authRequired: true, keyPrefix: "fw_", modelsPath: "/v1/models" }
|
|
551
|
+
},
|
|
552
|
+
{
|
|
553
|
+
match: (u) => /api\.deepinfra\.com/i.test(u),
|
|
554
|
+
info: { id: "deepinfra", label: "DeepInfra", local: false, authRequired: true, modelsPath: "/v1/models" }
|
|
555
|
+
},
|
|
556
|
+
{
|
|
557
|
+
match: (u) => /api\.mistral\.ai/i.test(u),
|
|
558
|
+
info: { id: "mistral", label: "Mistral AI", local: false, authRequired: true, modelsPath: "/v1/models" }
|
|
559
|
+
},
|
|
560
|
+
{
|
|
561
|
+
match: (u) => /llm\.chutes\.ai|chutes\.ai/i.test(u),
|
|
562
|
+
info: { id: "chutes", label: "Chutes AI", local: false, authRequired: true, keyPrefix: "cpk_", modelsPath: "/v1/models" }
|
|
563
|
+
},
|
|
564
|
+
{
|
|
565
|
+
match: (u) => /api\.cerebras\.ai/i.test(u),
|
|
566
|
+
info: { id: "cerebras", label: "Cerebras", local: false, authRequired: true, keyPrefix: "csk-", modelsPath: "/v1/models" }
|
|
567
|
+
},
|
|
568
|
+
{
|
|
569
|
+
match: (u) => /api\.sambanova\.ai/i.test(u),
|
|
570
|
+
info: { id: "sambanova", label: "SambaNova", local: false, authRequired: true, modelsPath: "/v1/models" }
|
|
571
|
+
},
|
|
572
|
+
{
|
|
573
|
+
match: (u) => /integrate\.api\.nvidia\.com/i.test(u),
|
|
574
|
+
info: { id: "nvidia", label: "NVIDIA NIM", local: false, authRequired: true, keyPrefix: "nvapi-", modelsPath: "/v1/models" }
|
|
575
|
+
},
|
|
576
|
+
{
|
|
577
|
+
match: (u) => /api\.hyperbolic\.xyz/i.test(u),
|
|
578
|
+
info: { id: "hyperbolic", label: "Hyperbolic", local: false, authRequired: true, modelsPath: "/v1/models" }
|
|
579
|
+
},
|
|
580
|
+
// --- Local providers (port-based detection) ---
|
|
581
|
+
{
|
|
582
|
+
match: (u) => /(?:localhost|127\.0\.0\.1|0\.0\.0\.0):11434/i.test(u),
|
|
583
|
+
info: { id: "ollama", label: "Ollama (local)", local: true, authRequired: false, modelsPath: "/api/tags" }
|
|
584
|
+
},
|
|
585
|
+
{
|
|
586
|
+
match: (u) => /(?:localhost|127\.0\.0\.1|0\.0\.0\.0):1234/i.test(u),
|
|
587
|
+
info: { id: "lmstudio", label: "LM Studio (local)", local: true, authRequired: false, modelsPath: "/v1/models" }
|
|
588
|
+
},
|
|
589
|
+
{
|
|
590
|
+
match: (u) => /(?:localhost|127\.0\.0\.1|0\.0\.0\.0):8000/i.test(u),
|
|
591
|
+
info: { id: "vllm", label: "vLLM (local)", local: true, authRequired: false, modelsPath: "/v1/models" }
|
|
592
|
+
}
|
|
593
|
+
];
|
|
594
|
+
}
|
|
595
|
+
});
|
|
596
|
+
|
|
496
597
|
// packages/backend-vllm/dist/VllmBackend.js
|
|
497
598
|
function agentMessageToChatMessage(msg) {
|
|
498
599
|
return {
|
|
@@ -508,6 +609,7 @@ var init_VllmBackend = __esm({
|
|
|
508
609
|
"packages/backend-vllm/dist/VllmBackend.js"() {
|
|
509
610
|
"use strict";
|
|
510
611
|
init_sleep();
|
|
612
|
+
init_normalizeUrl();
|
|
511
613
|
VllmBackend = class {
|
|
512
614
|
baseUrl;
|
|
513
615
|
model;
|
|
@@ -516,7 +618,7 @@ var init_VllmBackend = __esm({
|
|
|
516
618
|
maxRetries;
|
|
517
619
|
sleepManager;
|
|
518
620
|
constructor(config) {
|
|
519
|
-
this.baseUrl = config.baseUrl
|
|
621
|
+
this.baseUrl = normalizeBaseUrl(config.baseUrl);
|
|
520
622
|
this.model = config.model;
|
|
521
623
|
this.apiKey = config.apiKey ?? "dummy-key";
|
|
522
624
|
this.timeoutMs = config.timeoutMs ?? 6e4;
|
|
@@ -661,6 +763,7 @@ var OllamaBackend;
|
|
|
661
763
|
var init_OllamaBackend = __esm({
|
|
662
764
|
"packages/backend-vllm/dist/OllamaBackend.js"() {
|
|
663
765
|
"use strict";
|
|
766
|
+
init_normalizeUrl();
|
|
664
767
|
OllamaBackend = class _OllamaBackend {
|
|
665
768
|
baseUrl;
|
|
666
769
|
model;
|
|
@@ -668,7 +771,7 @@ var init_OllamaBackend = __esm({
|
|
|
668
771
|
maxRetries;
|
|
669
772
|
static DEFAULT_BASE_URL = "http://localhost:11434";
|
|
670
773
|
constructor(config) {
|
|
671
|
-
this.baseUrl = (config.baseUrl ?? _OllamaBackend.DEFAULT_BASE_URL)
|
|
774
|
+
this.baseUrl = normalizeBaseUrl(config.baseUrl ?? _OllamaBackend.DEFAULT_BASE_URL);
|
|
672
775
|
this.model = config.model;
|
|
673
776
|
this.timeoutMs = config.timeoutMs ?? 3e5;
|
|
674
777
|
this.maxRetries = config.maxRetries ?? 3;
|
|
@@ -761,7 +864,7 @@ var init_OllamaBackend = __esm({
|
|
|
761
864
|
* @returns Array of model name strings (e.g., ["qwen3.5:122b", "llama3:8b"])
|
|
762
865
|
*/
|
|
763
866
|
static async listModels(baseUrl) {
|
|
764
|
-
const resolvedUrl = (baseUrl ?? _OllamaBackend.DEFAULT_BASE_URL)
|
|
867
|
+
const resolvedUrl = normalizeBaseUrl(baseUrl ?? _OllamaBackend.DEFAULT_BASE_URL);
|
|
765
868
|
const url = `${resolvedUrl}/api/tags`;
|
|
766
869
|
const response = await fetch(url, { method: "GET" });
|
|
767
870
|
if (!response.ok) {
|
|
@@ -1091,6 +1194,7 @@ var init_dist = __esm({
|
|
|
1091
1194
|
init_metrics();
|
|
1092
1195
|
init_sleep();
|
|
1093
1196
|
init_routing();
|
|
1197
|
+
init_normalizeUrl();
|
|
1094
1198
|
}
|
|
1095
1199
|
});
|
|
1096
1200
|
|
|
@@ -7879,6 +7983,7 @@ var SYSTEM_PROMPT, AgenticRunner, OllamaAgenticBackend;
|
|
|
7879
7983
|
var init_agenticRunner = __esm({
|
|
7880
7984
|
"packages/orchestrator/dist/agenticRunner.js"() {
|
|
7881
7985
|
"use strict";
|
|
7986
|
+
init_dist();
|
|
7882
7987
|
SYSTEM_PROMPT = `You are Open Agent, an expert coding assistant that works iteratively using tools. You solve coding tasks by exploring code, making precise changes, and validating results through test execution.
|
|
7883
7988
|
|
|
7884
7989
|
## Available Tools
|
|
@@ -9070,9 +9175,19 @@ ${newerSummary}` : newerSummary;
|
|
|
9070
9175
|
OllamaAgenticBackend = class {
|
|
9071
9176
|
baseUrl;
|
|
9072
9177
|
model;
|
|
9073
|
-
|
|
9074
|
-
|
|
9178
|
+
apiKey;
|
|
9179
|
+
constructor(baseUrl, model, apiKey) {
|
|
9180
|
+
this.baseUrl = normalizeBaseUrl(baseUrl);
|
|
9075
9181
|
this.model = model;
|
|
9182
|
+
this.apiKey = apiKey ?? "";
|
|
9183
|
+
}
|
|
9184
|
+
/** Build auth headers — all providers use standard Bearer token auth. */
|
|
9185
|
+
authHeaders() {
|
|
9186
|
+
const headers = { "Content-Type": "application/json" };
|
|
9187
|
+
if (this.apiKey) {
|
|
9188
|
+
headers["Authorization"] = `Bearer ${this.apiKey}`;
|
|
9189
|
+
}
|
|
9190
|
+
return headers;
|
|
9076
9191
|
}
|
|
9077
9192
|
async chatCompletion(request) {
|
|
9078
9193
|
const body = {
|
|
@@ -9085,7 +9200,7 @@ ${newerSummary}` : newerSummary;
|
|
|
9085
9200
|
};
|
|
9086
9201
|
const resp = await fetch(`${this.baseUrl}/v1/chat/completions`, {
|
|
9087
9202
|
method: "POST",
|
|
9088
|
-
headers:
|
|
9203
|
+
headers: this.authHeaders(),
|
|
9089
9204
|
body: JSON.stringify(body),
|
|
9090
9205
|
signal: AbortSignal.timeout(request.timeoutMs)
|
|
9091
9206
|
});
|
|
@@ -9146,7 +9261,7 @@ ${newerSummary}` : newerSummary;
|
|
|
9146
9261
|
};
|
|
9147
9262
|
const resp = await fetch(`${this.baseUrl}/v1/chat/completions`, {
|
|
9148
9263
|
method: "POST",
|
|
9149
|
-
headers:
|
|
9264
|
+
headers: this.authHeaders(),
|
|
9150
9265
|
body: JSON.stringify(body),
|
|
9151
9266
|
signal: AbortSignal.timeout(request.timeoutMs)
|
|
9152
9267
|
});
|
|
@@ -9703,7 +9818,7 @@ var init_listen = __esm({
|
|
|
9703
9818
|
|
|
9704
9819
|
// packages/cli/dist/tui/model-picker.js
|
|
9705
9820
|
async function fetchOllamaModels(baseUrl) {
|
|
9706
|
-
const url = `${baseUrl
|
|
9821
|
+
const url = `${normalizeBaseUrl(baseUrl)}/api/tags`;
|
|
9707
9822
|
const resp = await fetch(url, {
|
|
9708
9823
|
signal: AbortSignal.timeout(1e4)
|
|
9709
9824
|
});
|
|
@@ -9766,6 +9881,7 @@ function formatRelativeTime(iso) {
|
|
|
9766
9881
|
var init_model_picker = __esm({
|
|
9767
9882
|
"packages/cli/dist/tui/model-picker.js"() {
|
|
9768
9883
|
"use strict";
|
|
9884
|
+
init_dist();
|
|
9769
9885
|
}
|
|
9770
9886
|
});
|
|
9771
9887
|
|
|
@@ -11010,7 +11126,7 @@ async function promptForCustomEndpoint(config, rl) {
|
|
|
11010
11126
|
}
|
|
11011
11127
|
return config.model;
|
|
11012
11128
|
}
|
|
11013
|
-
const cleanUrl = endpoint
|
|
11129
|
+
const cleanUrl = normalizeBaseUrl(endpoint);
|
|
11014
11130
|
const needsKey = await ask(rl, `
|
|
11015
11131
|
${c2.bold("Does this endpoint require an API key?")} (y/n) `);
|
|
11016
11132
|
let apiKey = "";
|
|
@@ -11025,29 +11141,35 @@ async function promptForCustomEndpoint(config, rl) {
|
|
|
11025
11141
|
`);
|
|
11026
11142
|
const modelName = await ask(rl, ` ${c2.bold("Model name")} (Enter for ${c2.dim(config.model)}): `);
|
|
11027
11143
|
const chosenModel = modelName || config.model;
|
|
11144
|
+
const provider = detectProvider(endpoint);
|
|
11028
11145
|
process.stdout.write(`
|
|
11029
|
-
${c2.cyan("\u25CF")}
|
|
11146
|
+
${c2.cyan("\u25CF")} Detected provider: ${c2.bold(provider.label)}
|
|
11147
|
+
`);
|
|
11148
|
+
process.stdout.write(` ${c2.cyan("\u25CF")} Testing endpoint ${c2.bold(cleanUrl)}...
|
|
11030
11149
|
`);
|
|
11031
11150
|
let testOk = false;
|
|
11032
11151
|
try {
|
|
11033
|
-
const testUrl =
|
|
11152
|
+
const testUrl = `${cleanUrl}${provider.modelsPath}`;
|
|
11034
11153
|
const headers = { "Content-Type": "application/json" };
|
|
11035
|
-
if (apiKey)
|
|
11154
|
+
if (apiKey) {
|
|
11036
11155
|
headers["Authorization"] = `Bearer ${apiKey}`;
|
|
11156
|
+
}
|
|
11037
11157
|
const resp = await fetch(testUrl, { headers, signal: AbortSignal.timeout(1e4) });
|
|
11038
11158
|
if (resp.ok) {
|
|
11039
11159
|
process.stdout.write(` ${c2.green("\u2714")} Endpoint reachable.
|
|
11040
11160
|
`);
|
|
11041
11161
|
testOk = true;
|
|
11042
11162
|
} else {
|
|
11043
|
-
|
|
11044
|
-
|
|
11045
|
-
|
|
11046
|
-
|
|
11163
|
+
if (provider.id !== "ollama") {
|
|
11164
|
+
try {
|
|
11165
|
+
const ollamaResp = await fetch(`${cleanUrl}/api/tags`, { signal: AbortSignal.timeout(1e4) });
|
|
11166
|
+
if (ollamaResp.ok) {
|
|
11167
|
+
process.stdout.write(` ${c2.green("\u2714")} Ollama endpoint detected.
|
|
11047
11168
|
`);
|
|
11048
|
-
|
|
11169
|
+
testOk = true;
|
|
11170
|
+
}
|
|
11171
|
+
} catch {
|
|
11049
11172
|
}
|
|
11050
|
-
} catch {
|
|
11051
11173
|
}
|
|
11052
11174
|
if (!testOk) {
|
|
11053
11175
|
process.stdout.write(` ${c2.yellow("\u26A0")} Endpoint returned HTTP ${resp.status}
|
|
@@ -11059,6 +11181,10 @@ async function promptForCustomEndpoint(config, rl) {
|
|
|
11059
11181
|
`);
|
|
11060
11182
|
}
|
|
11061
11183
|
if (!testOk) {
|
|
11184
|
+
if (provider.authRequired && !apiKey) {
|
|
11185
|
+
process.stdout.write(` ${c2.dim(`${provider.label} typically requires an API key.`)}
|
|
11186
|
+
`);
|
|
11187
|
+
}
|
|
11062
11188
|
const startAnyway = await ask(rl, `
|
|
11063
11189
|
${c2.bold("Endpoint unreachable. Start anyway?")} (y/n) `);
|
|
11064
11190
|
if (startAnyway.toLowerCase() !== "y" && startAnyway.toLowerCase() !== "yes") {
|
|
@@ -11073,10 +11199,12 @@ async function promptForCustomEndpoint(config, rl) {
|
|
|
11073
11199
|
if (apiKey) {
|
|
11074
11200
|
setConfigValue("apiKey", apiKey);
|
|
11075
11201
|
}
|
|
11076
|
-
const backendType =
|
|
11202
|
+
const backendType = provider.id === "ollama" ? "ollama" : "vllm";
|
|
11077
11203
|
setConfigValue("backendType", backendType);
|
|
11078
11204
|
process.stdout.write(`
|
|
11079
11205
|
${c2.green("\u2714")} Configured: ${c2.bold(chosenModel)} at ${c2.bold(cleanUrl)}
|
|
11206
|
+
`);
|
|
11207
|
+
process.stdout.write(` ${c2.green("\u2714")} Provider: ${c2.bold(provider.label)}
|
|
11080
11208
|
`);
|
|
11081
11209
|
if (apiKey)
|
|
11082
11210
|
process.stdout.write(` ${c2.green("\u2714")} API key saved.
|
|
@@ -11405,6 +11533,7 @@ var init_setup = __esm({
|
|
|
11405
11533
|
init_model_picker();
|
|
11406
11534
|
init_render();
|
|
11407
11535
|
init_config();
|
|
11536
|
+
init_dist();
|
|
11408
11537
|
QWEN_VARIANTS = [
|
|
11409
11538
|
{ tag: "qwen3.5:0.8b", sizeGB: 1, label: "0.8B params (1.0 GB)", cloud: false },
|
|
11410
11539
|
{ tag: "qwen3.5:2b", sizeGB: 2.7, label: "2B params (2.7 GB)", cloud: false },
|
|
@@ -11696,9 +11825,12 @@ async function showModelPicker(ctx) {
|
|
|
11696
11825
|
}
|
|
11697
11826
|
async function handleEndpoint(arg, ctx, local = false) {
|
|
11698
11827
|
if (!arg) {
|
|
11828
|
+
const currentProvider = detectProvider(ctx.config.backendUrl);
|
|
11699
11829
|
process.stdout.write(`
|
|
11700
11830
|
${c2.bold("Current endpoint:")}
|
|
11701
11831
|
|
|
11832
|
+
`);
|
|
11833
|
+
process.stdout.write(` ${c2.cyan("Provider".padEnd(12))} ${currentProvider.label}
|
|
11702
11834
|
`);
|
|
11703
11835
|
process.stdout.write(` ${c2.cyan("URL".padEnd(12))} ${ctx.config.backendUrl}
|
|
11704
11836
|
`);
|
|
@@ -11709,11 +11841,15 @@ async function handleEndpoint(arg, ctx, local = false) {
|
|
|
11709
11841
|
process.stdout.write(`
|
|
11710
11842
|
${c2.dim("Usage: /endpoint <url> [--auth <token>]")}
|
|
11711
11843
|
`);
|
|
11712
|
-
process.stdout.write(` ${c2.dim(" /endpoint http://localhost:11434
|
|
11844
|
+
process.stdout.write(` ${c2.dim(" /endpoint http://localhost:11434 Ollama")}
|
|
11713
11845
|
`);
|
|
11714
|
-
process.stdout.write(` ${c2.dim(" /endpoint
|
|
11846
|
+
process.stdout.write(` ${c2.dim(" /endpoint https://llm.chutes.ai --auth cpk_... Chutes AI")}
|
|
11715
11847
|
`);
|
|
11716
|
-
process.stdout.write(` ${c2.dim(" /endpoint
|
|
11848
|
+
process.stdout.write(` ${c2.dim(" /endpoint https://api.groq.com/openai --auth gsk_... Groq")}
|
|
11849
|
+
`);
|
|
11850
|
+
process.stdout.write(` ${c2.dim(" /endpoint https://api.together.xyz --auth ... Together AI")}
|
|
11851
|
+
`);
|
|
11852
|
+
process.stdout.write(` ${c2.dim(" /endpoint http://localhost:8000 vLLM")}
|
|
11717
11853
|
|
|
11718
11854
|
`);
|
|
11719
11855
|
return;
|
|
@@ -11731,17 +11867,19 @@ async function handleEndpoint(arg, ctx, local = false) {
|
|
|
11731
11867
|
renderError(`Invalid URL: "${url}"`);
|
|
11732
11868
|
return;
|
|
11733
11869
|
}
|
|
11734
|
-
|
|
11735
|
-
|
|
11736
|
-
|
|
11737
|
-
}
|
|
11870
|
+
const normalizedUrl = normalizeBaseUrl(url);
|
|
11871
|
+
const provider = detectProvider(url);
|
|
11872
|
+
const backendType = provider.id === "ollama" ? "ollama" : "vllm";
|
|
11738
11873
|
process.stdout.write(`
|
|
11739
|
-
${c2.dim("
|
|
11874
|
+
${c2.dim("Detected:")} ${c2.bold(provider.label)}
|
|
11875
|
+
`);
|
|
11876
|
+
process.stdout.write(` ${c2.dim("Testing connection...")} `);
|
|
11740
11877
|
try {
|
|
11741
|
-
const healthUrl =
|
|
11878
|
+
const healthUrl = `${normalizedUrl}${provider.modelsPath}`;
|
|
11742
11879
|
const headers = {};
|
|
11743
|
-
if (apiKey)
|
|
11880
|
+
if (apiKey) {
|
|
11744
11881
|
headers["Authorization"] = `Bearer ${apiKey}`;
|
|
11882
|
+
}
|
|
11745
11883
|
const resp = await fetch(healthUrl, {
|
|
11746
11884
|
headers,
|
|
11747
11885
|
signal: AbortSignal.timeout(1e4)
|
|
@@ -11754,14 +11892,17 @@ async function handleEndpoint(arg, ctx, local = false) {
|
|
|
11754
11892
|
process.stdout.write(`${c2.yellow("\u26A0")} Could not verify
|
|
11755
11893
|
`);
|
|
11756
11894
|
renderWarning(`Endpoint may not be reachable: ${err instanceof Error ? err.message : String(err)}`);
|
|
11895
|
+
if (provider.authRequired && !apiKey) {
|
|
11896
|
+
renderInfo(`${provider.label} typically requires an API key. Use: /endpoint ${url} --auth <key>`);
|
|
11897
|
+
}
|
|
11757
11898
|
renderInfo("Setting endpoint anyway \u2014 it may come online later.");
|
|
11758
11899
|
}
|
|
11759
|
-
ctx.setEndpoint(
|
|
11760
|
-
const endpointSettings = { backendUrl:
|
|
11900
|
+
ctx.setEndpoint(normalizedUrl, backendType, apiKey);
|
|
11901
|
+
const endpointSettings = { backendUrl: normalizedUrl, backendType, ...apiKey ? { apiKey } : {} };
|
|
11761
11902
|
if (local) {
|
|
11762
11903
|
ctx.saveLocalSettings(endpointSettings);
|
|
11763
11904
|
} else {
|
|
11764
|
-
setConfigValue("backendUrl",
|
|
11905
|
+
setConfigValue("backendUrl", normalizedUrl);
|
|
11765
11906
|
setConfigValue("backendType", backendType);
|
|
11766
11907
|
if (apiKey) {
|
|
11767
11908
|
setConfigValue("apiKey", apiKey);
|
|
@@ -11771,15 +11912,17 @@ async function handleEndpoint(arg, ctx, local = false) {
|
|
|
11771
11912
|
process.stdout.write(`
|
|
11772
11913
|
${c2.green("\u2714")} Endpoint updated and saved${local ? " (project-local)" : ""}:
|
|
11773
11914
|
`);
|
|
11774
|
-
process.stdout.write(` ${c2.cyan("
|
|
11915
|
+
process.stdout.write(` ${c2.cyan("Provider".padEnd(12))} ${provider.label}
|
|
11916
|
+
`);
|
|
11917
|
+
process.stdout.write(` ${c2.cyan("URL".padEnd(12))} ${normalizedUrl}
|
|
11775
11918
|
`);
|
|
11776
|
-
process.stdout.write(` ${c2.cyan("Type".padEnd(
|
|
11919
|
+
process.stdout.write(` ${c2.cyan("Type".padEnd(12))} ${backendType}
|
|
11777
11920
|
`);
|
|
11778
11921
|
if (apiKey) {
|
|
11779
|
-
process.stdout.write(` ${c2.cyan("Auth".padEnd(
|
|
11922
|
+
process.stdout.write(` ${c2.cyan("Auth".padEnd(12))} Bearer ${apiKey.slice(0, 8)}...
|
|
11780
11923
|
`);
|
|
11781
11924
|
} else {
|
|
11782
|
-
process.stdout.write(` ${c2.cyan("Auth".padEnd(
|
|
11925
|
+
process.stdout.write(` ${c2.cyan("Auth".padEnd(12))} ${provider.authRequired ? c2.yellow("none (may be required)") : "none"}
|
|
11783
11926
|
`);
|
|
11784
11927
|
}
|
|
11785
11928
|
process.stdout.write("\n");
|
|
@@ -11912,6 +12055,7 @@ var init_commands = __esm({
|
|
|
11912
12055
|
init_updater();
|
|
11913
12056
|
init_oa_directory();
|
|
11914
12057
|
init_setup();
|
|
12058
|
+
init_dist();
|
|
11915
12059
|
}
|
|
11916
12060
|
});
|
|
11917
12061
|
|
|
@@ -14575,7 +14719,7 @@ ${result.summary}`;
|
|
|
14575
14719
|
}
|
|
14576
14720
|
/** Run a dream agent with appropriate tools */
|
|
14577
14721
|
async runDreamAgent(prompt, toolMode, onEvent) {
|
|
14578
|
-
const backend = new OllamaAgenticBackend(this.config.backendUrl.
|
|
14722
|
+
const backend = new OllamaAgenticBackend(this.config.backendUrl, this.config.model, this.config.apiKey);
|
|
14579
14723
|
const projectCtx = buildProjectContext(this.repoRoot);
|
|
14580
14724
|
const dynamicContext = formatContextForPrompt(projectCtx);
|
|
14581
14725
|
const runner = new AgenticRunner(backend, {
|
|
@@ -15251,7 +15395,7 @@ function createSubAgentTool(config, repoRoot) {
|
|
|
15251
15395
|
if (!task) {
|
|
15252
15396
|
return { success: false, output: "", error: "task is required" };
|
|
15253
15397
|
}
|
|
15254
|
-
const backend = new OllamaAgenticBackend(config.backendUrl.
|
|
15398
|
+
const backend = new OllamaAgenticBackend(config.backendUrl, config.model, config.apiKey);
|
|
15255
15399
|
const subRunner = new AgenticRunner(backend, {
|
|
15256
15400
|
maxTurns,
|
|
15257
15401
|
maxTokens: 16384,
|
|
@@ -15306,7 +15450,7 @@ Use task_status("${taskId}") or task_output("${taskId}") to check progress.`
|
|
|
15306
15450
|
function startTask(task, config, repoRoot, voice, stream, taskStores, bruteForce, statusBar, sudoCallback) {
|
|
15307
15451
|
const projectCtx = buildProjectContext(repoRoot, taskStores?.contextStores);
|
|
15308
15452
|
const dynamicContext = formatContextForPrompt(projectCtx);
|
|
15309
|
-
const backend = new OllamaAgenticBackend(config.backendUrl.
|
|
15453
|
+
const backend = new OllamaAgenticBackend(config.backendUrl, config.model, config.apiKey);
|
|
15310
15454
|
const runner = new AgenticRunner(backend, {
|
|
15311
15455
|
maxTurns: 60,
|
|
15312
15456
|
maxTokens: 16384,
|
|
@@ -15553,16 +15697,20 @@ async function startInteractive(config, repoPath) {
|
|
|
15553
15697
|
}
|
|
15554
15698
|
if (!isResumed) {
|
|
15555
15699
|
try {
|
|
15556
|
-
const
|
|
15700
|
+
const baseUrl = normalizeBaseUrl(config.backendUrl);
|
|
15701
|
+
const provider = detectProvider(config.backendUrl);
|
|
15702
|
+
const healthUrl = `${baseUrl}${provider.modelsPath}`;
|
|
15557
15703
|
const headers = {};
|
|
15558
|
-
if (config.apiKey)
|
|
15704
|
+
if (config.apiKey) {
|
|
15559
15705
|
headers["Authorization"] = `Bearer ${config.apiKey}`;
|
|
15706
|
+
}
|
|
15560
15707
|
const resp = await fetch(healthUrl, { headers, signal: AbortSignal.timeout(1e4) });
|
|
15561
15708
|
if (!resp.ok)
|
|
15562
15709
|
throw new Error(`HTTP ${resp.status}`);
|
|
15563
15710
|
} catch {
|
|
15564
|
-
|
|
15565
|
-
|
|
15711
|
+
const provider = detectProvider(config.backendUrl);
|
|
15712
|
+
renderWarning(`Cannot reach ${provider.label} at ${config.backendUrl}`);
|
|
15713
|
+
if (provider.id === "ollama") {
|
|
15566
15714
|
renderInfo("Start Ollama with: ollama serve");
|
|
15567
15715
|
}
|
|
15568
15716
|
renderInfo("Use /endpoint to configure a different backend. Starting anyway...");
|
|
@@ -16169,16 +16317,20 @@ async function runWithTUI(task, config, repoPath) {
|
|
|
16169
16317
|
}
|
|
16170
16318
|
}
|
|
16171
16319
|
try {
|
|
16172
|
-
const
|
|
16320
|
+
const baseUrl2 = normalizeBaseUrl(config.backendUrl);
|
|
16321
|
+
const provider2 = detectProvider(config.backendUrl);
|
|
16322
|
+
const healthUrl = `${baseUrl2}${provider2.modelsPath}`;
|
|
16173
16323
|
const headers = {};
|
|
16174
|
-
if (config.apiKey)
|
|
16324
|
+
if (config.apiKey) {
|
|
16175
16325
|
headers["Authorization"] = `Bearer ${config.apiKey}`;
|
|
16326
|
+
}
|
|
16176
16327
|
const resp = await fetch(healthUrl, { headers, signal: AbortSignal.timeout(1e4) });
|
|
16177
16328
|
if (!resp.ok)
|
|
16178
16329
|
throw new Error(`HTTP ${resp.status}`);
|
|
16179
16330
|
} catch {
|
|
16180
|
-
|
|
16181
|
-
|
|
16331
|
+
const provider2 = detectProvider(config.backendUrl);
|
|
16332
|
+
renderWarning(`Cannot reach ${provider2.label} at ${config.backendUrl}`);
|
|
16333
|
+
if (provider2.id === "ollama") {
|
|
16182
16334
|
renderInfo("Start Ollama with: ollama serve");
|
|
16183
16335
|
}
|
|
16184
16336
|
renderInfo("The agent will retry when you submit a task. Use /endpoint to reconfigure.");
|
|
@@ -16199,6 +16351,7 @@ var init_interactive = __esm({
|
|
|
16199
16351
|
"use strict";
|
|
16200
16352
|
init_dist5();
|
|
16201
16353
|
init_dist2();
|
|
16354
|
+
init_dist();
|
|
16202
16355
|
init_listen();
|
|
16203
16356
|
init_config();
|
|
16204
16357
|
init_updater();
|
package/package.json
CHANGED