open-agents-ai 0.1.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +526 -21
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -101,7 +101,7 @@ var init_config = __esm({
101
101
  "use strict";
102
102
  DEFAULT_CONFIG = Object.freeze({
103
103
  backendUrl: "http://localhost:11434",
104
- model: "qwen3.5:122b",
104
+ model: "qwen3.5:latest",
105
105
  backendType: "ollama",
106
106
  apiKey: "",
107
107
  maxRetries: 3,
@@ -5066,9 +5066,12 @@ function renderCompactHeader(model) {
5066
5066
  }
5067
5067
  function renderSlashHelp() {
5068
5068
  const commands = [
5069
- ["/model", "Select from available Ollama models"],
5069
+ ["/model", "Select from available models"],
5070
5070
  ["/model <name>", "Switch to a specific model"],
5071
- ["/models", "List all available Ollama models"],
5071
+ ["/models", "List all available models"],
5072
+ ["/endpoint", "Show current endpoint"],
5073
+ ["/endpoint <url>", "Set backend URL (auto-detects type)"],
5074
+ ["/endpoint <url> --auth <t>", "Set endpoint with Bearer auth"],
5072
5075
  ["/config", "Show current configuration"],
5073
5076
  ["/verbose", "Toggle verbose mode"],
5074
5077
  ["/clear", "Clear the screen"],
@@ -5260,6 +5263,10 @@ async function handleSlashCommand(input, ctx) {
5260
5263
  case "models":
5261
5264
  await listModels(ctx);
5262
5265
  return "handled";
5266
+ case "endpoint":
5267
+ case "ep":
5268
+ await handleEndpoint(arg, ctx);
5269
+ return "handled";
5263
5270
  default:
5264
5271
  renderWarning(`Unknown command: /${cmd}. Type /help for available commands.`);
5265
5272
  return "handled";
@@ -5285,6 +5292,90 @@ async function showModelPicker(ctx) {
5285
5292
  renderError(`Failed to fetch models: ${err instanceof Error ? err.message : String(err)}`);
5286
5293
  }
5287
5294
  }
5295
+ async function handleEndpoint(arg, ctx) {
5296
+ if (!arg) {
5297
+ process.stdout.write(`
5298
+ ${c2.bold("Current endpoint:")}
5299
+
5300
+ `);
5301
+ process.stdout.write(` ${c2.cyan("URL".padEnd(12))} ${ctx.config.backendUrl}
5302
+ `);
5303
+ process.stdout.write(` ${c2.cyan("Type".padEnd(12))} ${ctx.config.backendType}
5304
+ `);
5305
+ process.stdout.write(` ${c2.cyan("Auth".padEnd(12))} ${ctx.config.apiKey ? "Bearer token set" : "none"}
5306
+ `);
5307
+ process.stdout.write(`
5308
+ ${c2.dim("Usage: /endpoint <url> [--auth <token>]")}
5309
+ `);
5310
+ process.stdout.write(` ${c2.dim(" /endpoint http://localhost:11434 (Ollama, no auth)")}
5311
+ `);
5312
+ process.stdout.write(` ${c2.dim(" /endpoint http://remote:8000/v1 --auth sk-... (OpenAI-compatible)")}
5313
+ `);
5314
+ process.stdout.write(` ${c2.dim(" /endpoint http://remote:8000/v1 (OpenAI-compatible, no auth)")}
5315
+
5316
+ `);
5317
+ return;
5318
+ }
5319
+ const parts = arg.split(/\s+/);
5320
+ const url = parts[0];
5321
+ let apiKey;
5322
+ const authIdx = parts.indexOf("--auth");
5323
+ if (authIdx !== -1 && parts[authIdx + 1]) {
5324
+ apiKey = parts[authIdx + 1];
5325
+ }
5326
+ try {
5327
+ new URL(url);
5328
+ } catch {
5329
+ renderError(`Invalid URL: "${url}"`);
5330
+ return;
5331
+ }
5332
+ let backendType = "ollama";
5333
+ if (url.includes("/v1") || url.includes(":8000") || apiKey) {
5334
+ backendType = "vllm";
5335
+ }
5336
+ process.stdout.write(`
5337
+ ${c2.dim("Testing connection...")} `);
5338
+ try {
5339
+ const healthUrl = backendType === "ollama" ? `${url.replace(/\/$/, "")}/api/tags` : `${url.replace(/\/$/, "")}/models`;
5340
+ const headers = {};
5341
+ if (apiKey)
5342
+ headers["Authorization"] = `Bearer ${apiKey}`;
5343
+ const resp = await fetch(healthUrl, {
5344
+ headers,
5345
+ signal: AbortSignal.timeout(1e4)
5346
+ });
5347
+ if (!resp.ok)
5348
+ throw new Error(`HTTP ${resp.status}`);
5349
+ process.stdout.write(`${c2.green("\u2714")} Connected
5350
+ `);
5351
+ } catch (err) {
5352
+ process.stdout.write(`${c2.yellow("\u26A0")} Could not verify
5353
+ `);
5354
+ renderWarning(`Endpoint may not be reachable: ${err instanceof Error ? err.message : String(err)}`);
5355
+ renderInfo("Setting endpoint anyway \u2014 it may come online later.");
5356
+ }
5357
+ ctx.setEndpoint(url, backendType, apiKey);
5358
+ setConfigValue("backendUrl", url);
5359
+ setConfigValue("backendType", backendType);
5360
+ if (apiKey) {
5361
+ setConfigValue("apiKey", apiKey);
5362
+ }
5363
+ process.stdout.write(`
5364
+ ${c2.green("\u2714")} Endpoint updated and saved:
5365
+ `);
5366
+ process.stdout.write(` ${c2.cyan("URL".padEnd(8))} ${url}
5367
+ `);
5368
+ process.stdout.write(` ${c2.cyan("Type".padEnd(8))} ${backendType}
5369
+ `);
5370
+ if (apiKey) {
5371
+ process.stdout.write(` ${c2.cyan("Auth".padEnd(8))} Bearer ${apiKey.slice(0, 8)}...
5372
+ `);
5373
+ } else {
5374
+ process.stdout.write(` ${c2.cyan("Auth".padEnd(8))} none
5375
+ `);
5376
+ }
5377
+ process.stdout.write("\n");
5378
+ }
5288
5379
  async function switchModel(query, ctx) {
5289
5380
  try {
5290
5381
  const models = await fetchOllamaModels(ctx.config.backendUrl);
@@ -5309,11 +5400,399 @@ var init_commands = __esm({
5309
5400
  "use strict";
5310
5401
  init_model_picker();
5311
5402
  init_render();
5403
+ init_config();
5312
5404
  }
5313
5405
  });
5314
5406
 
5315
- // packages/cli/dist/tui/interactive.js
5407
+ // packages/cli/dist/tui/setup.js
5316
5408
  import * as readline from "node:readline";
5409
+ import { execSync } from "node:child_process";
5410
+ import { existsSync as existsSync2, writeFileSync as writeFileSync2, mkdirSync as mkdirSync2 } from "node:fs";
5411
+ import { join as join7 } from "node:path";
5412
+ import { homedir as homedir2 } from "node:os";
5413
+ function detectSystemSpecs() {
5414
+ let totalRamGB = 0;
5415
+ let availableRamGB = 0;
5416
+ let gpuVramGB = 0;
5417
+ let gpuName = "";
5418
+ try {
5419
+ const memInfo = execSync("free -b 2>/dev/null || sysctl -n hw.memsize 2>/dev/null", {
5420
+ encoding: "utf8",
5421
+ timeout: 5e3
5422
+ });
5423
+ if (memInfo.includes("Mem:")) {
5424
+ const match = memInfo.match(/^Mem:\s+(\d+)\s+\d+\s+\d+\s+\d+\s+\d+\s+(\d+)/m);
5425
+ if (match) {
5426
+ totalRamGB = parseInt(match[1], 10) / 1024 ** 3;
5427
+ availableRamGB = parseInt(match[2], 10) / 1024 ** 3;
5428
+ }
5429
+ } else {
5430
+ const bytes = parseInt(memInfo.trim(), 10);
5431
+ if (!isNaN(bytes)) {
5432
+ totalRamGB = bytes / 1024 ** 3;
5433
+ availableRamGB = totalRamGB * 0.7;
5434
+ }
5435
+ }
5436
+ } catch {
5437
+ }
5438
+ try {
5439
+ const nvidiaSmi = execSync("nvidia-smi --query-gpu=memory.total,name --format=csv,noheader,nounits 2>/dev/null", { encoding: "utf8", timeout: 5e3 });
5440
+ const lines = nvidiaSmi.trim().split("\n");
5441
+ if (lines.length > 0) {
5442
+ for (const line of lines) {
5443
+ const parts = line.split(",").map((s) => s.trim());
5444
+ const vramMB = parseInt(parts[0] ?? "0", 10);
5445
+ if (!isNaN(vramMB))
5446
+ gpuVramGB += vramMB / 1024;
5447
+ if (!gpuName && parts[1])
5448
+ gpuName = parts[1];
5449
+ }
5450
+ }
5451
+ } catch {
5452
+ }
5453
+ return {
5454
+ totalRamGB: Math.round(totalRamGB * 10) / 10,
5455
+ availableRamGB: Math.round(availableRamGB * 10) / 10,
5456
+ gpuVramGB: Math.round(gpuVramGB * 10) / 10,
5457
+ gpuName
5458
+ };
5459
+ }
5460
+ function recommendModel(specs) {
5461
+ const effectiveGB = Math.max(specs.gpuVramGB, specs.availableRamGB);
5462
+ const budget = effectiveGB * 0.8;
5463
+ const localVariants = QWEN_VARIANTS.filter((v) => !v.cloud);
5464
+ for (let i = localVariants.length - 1; i >= 0; i--) {
5465
+ if (localVariants[i].sizeGB <= budget) {
5466
+ return localVariants[i];
5467
+ }
5468
+ }
5469
+ return QWEN_VARIANTS.find((v) => v.tag === "qwen3.5:cloud");
5470
+ }
5471
+ function calculateContextWindow(specs, modelSizeGB) {
5472
+ const totalAvail = Math.max(specs.gpuVramGB, specs.totalRamGB);
5473
+ const remaining = totalAvail - modelSizeGB;
5474
+ if (remaining >= 200)
5475
+ return { numCtx: 131072, label: "128K" };
5476
+ if (remaining >= 100)
5477
+ return { numCtx: 65536, label: "64K" };
5478
+ if (remaining >= 50)
5479
+ return { numCtx: 32768, label: "32K" };
5480
+ if (remaining >= 20)
5481
+ return { numCtx: 16384, label: "16K" };
5482
+ if (remaining >= 8)
5483
+ return { numCtx: 8192, label: "8K" };
5484
+ return { numCtx: 4096, label: "4K" };
5485
+ }
5486
+ function modelSupportsToolCalling(modelName) {
5487
+ const lower = modelName.toLowerCase();
5488
+ for (const known of TOOL_CALLING_MODELS) {
5489
+ if (lower.startsWith(known) || lower.includes(known))
5490
+ return true;
5491
+ }
5492
+ return false;
5493
+ }
5494
+ function ask(rl, question) {
5495
+ return new Promise((resolve11) => {
5496
+ rl.question(question, (answer) => resolve11(answer.trim()));
5497
+ });
5498
+ }
5499
+ function pullModelWithAutoUpdate(tag) {
5500
+ try {
5501
+ execSync(`ollama pull ${tag}`, {
5502
+ stdio: "inherit",
5503
+ timeout: 36e5
5504
+ // 1 hour max
5505
+ });
5506
+ } catch (err) {
5507
+ const errMsg = err instanceof Error ? err.message : String(err);
5508
+ const stderr = err?.stderr?.toString?.() ?? errMsg;
5509
+ const combined = errMsg + "\n" + stderr;
5510
+ if (combined.includes("412") || combined.includes("newer version") || combined.includes("requires a newer version")) {
5511
+ process.stdout.write(`
5512
+ ${c2.yellow("\u26A0")} Ollama needs to be updated for this model.
5513
+ `);
5514
+ process.stdout.write(` ${c2.cyan("\u25CF")} Updating Ollama via official install script...
5515
+
5516
+ `);
5517
+ try {
5518
+ execSync("curl -fsSL https://ollama.com/install.sh | sh", {
5519
+ stdio: "inherit",
5520
+ timeout: 3e5
5521
+ // 5 min max for install
5522
+ });
5523
+ process.stdout.write(`
5524
+ ${c2.green("\u2714")} Ollama updated successfully.
5525
+ `);
5526
+ process.stdout.write(` ${c2.cyan("\u25CF")} Retrying pull of ${c2.bold(tag)}...
5527
+
5528
+ `);
5529
+ execSync(`ollama pull ${tag}`, {
5530
+ stdio: "inherit",
5531
+ timeout: 36e5
5532
+ });
5533
+ } catch (updateErr) {
5534
+ const updateMsg = updateErr instanceof Error ? updateErr.message : String(updateErr);
5535
+ throw new Error(`Failed to update Ollama and retry pull: ${updateMsg}
5536
+ Try manually:
5537
+ curl -fsSL https://ollama.com/install.sh | sh
5538
+ ollama pull ${tag}`);
5539
+ }
5540
+ } else {
5541
+ throw err;
5542
+ }
5543
+ }
5544
+ }
5545
+ async function runSetupWizard(config) {
5546
+ const rl = readline.createInterface({
5547
+ input: process.stdin,
5548
+ output: process.stdout,
5549
+ terminal: true
5550
+ });
5551
+ try {
5552
+ return await doSetup(config, rl);
5553
+ } finally {
5554
+ rl.close();
5555
+ }
5556
+ }
5557
+ async function doSetup(config, rl) {
5558
+ process.stdout.write(`
5559
+ ${c2.bold(c2.cyan("open-agents"))}
5560
+ `);
5561
+ process.stdout.write(` ${c2.dim("\u2500".repeat(60))}
5562
+ `);
5563
+ process.stdout.write(` ${c2.bold("First-run setup")}
5564
+
5565
+ `);
5566
+ process.stdout.write(` ${c2.cyan("\u25CF")} Detecting system specs...
5567
+ `);
5568
+ const specs = detectSystemSpecs();
5569
+ process.stdout.write(` ${c2.dim(" RAM:")} ${specs.totalRamGB.toFixed(1)} GB total, ${specs.availableRamGB.toFixed(1)} GB available
5570
+ `);
5571
+ if (specs.gpuVramGB > 0) {
5572
+ process.stdout.write(` ${c2.dim(" GPU:")} ${specs.gpuName || "NVIDIA"} \u2014 ${specs.gpuVramGB.toFixed(1)} GB VRAM
5573
+ `);
5574
+ } else {
5575
+ process.stdout.write(` ${c2.dim(" GPU:")} No NVIDIA GPU detected (CPU inference)
5576
+ `);
5577
+ }
5578
+ process.stdout.write("\n");
5579
+ let models = [];
5580
+ try {
5581
+ models = await fetchOllamaModels(config.backendUrl);
5582
+ } catch {
5583
+ renderError(`Cannot reach Ollama at ${config.backendUrl}`);
5584
+ renderInfo("Start Ollama with: ollama serve");
5585
+ renderInfo("Or use /endpoint to configure a remote backend after startup.");
5586
+ const answer = await ask(rl, `
5587
+ ${c2.bold("Continue without Ollama?")} (y/n) `);
5588
+ if (answer.toLowerCase() !== "y")
5589
+ return null;
5590
+ return config.model;
5591
+ }
5592
+ const currentModel = findModel(models, config.model);
5593
+ if (currentModel) {
5594
+ process.stdout.write(` ${c2.green("\u2714")} Model ${c2.bold(currentModel.name)} is available.
5595
+
5596
+ `);
5597
+ return currentModel.name;
5598
+ }
5599
+ process.stdout.write(` ${c2.yellow("\u26A0")} Default model ${c2.bold(config.model)} is not available.
5600
+
5601
+ `);
5602
+ const toolCallingModels = models.filter((m) => modelSupportsToolCalling(m.name));
5603
+ if (toolCallingModels.length > 0) {
5604
+ process.stdout.write(` ${c2.cyan("\u25CF")} Found ${toolCallingModels.length} model(s) with tool-calling support:
5605
+
5606
+ `);
5607
+ for (let i = 0; i < Math.min(toolCallingModels.length, 10); i++) {
5608
+ const m = toolCallingModels[i];
5609
+ process.stdout.write(` ${c2.bold(String(i + 1))}. ${m.name} ${c2.dim(`(${m.size})`)}
5610
+ `);
5611
+ }
5612
+ process.stdout.write(`
5613
+ ${c2.dim("0")}. Pull a new qwen3.5 model instead
5614
+ `);
5615
+ process.stdout.write("\n");
5616
+ const choice = await ask(rl, ` ${c2.bold("Select a model")} (1-${Math.min(toolCallingModels.length, 10)}, or 0 to pull new): `);
5617
+ const idx = parseInt(choice, 10);
5618
+ if (idx > 0 && idx <= toolCallingModels.length) {
5619
+ const selected = toolCallingModels[idx - 1];
5620
+ setConfigValue("model", selected.name);
5621
+ process.stdout.write(`
5622
+ ${c2.green("\u2714")} Selected ${c2.bold(selected.name)}. Saved to config.
5623
+
5624
+ `);
5625
+ return selected.name;
5626
+ }
5627
+ } else {
5628
+ process.stdout.write(` ${c2.yellow("\u26A0")} No tool-calling capable models found on this system.
5629
+
5630
+ `);
5631
+ }
5632
+ const recommended = recommendModel(specs);
5633
+ process.stdout.write(` ${c2.cyan("\u25CF")} Recommended model based on your system:
5634
+
5635
+ `);
5636
+ const localVariants = QWEN_VARIANTS.filter((v) => !v.cloud);
5637
+ for (let i = 0; i < localVariants.length; i++) {
5638
+ const v = localVariants[i];
5639
+ const fits = v.sizeGB <= Math.max(specs.gpuVramGB, specs.availableRamGB) * 0.8;
5640
+ const isRec = v.tag === recommended.tag;
5641
+ const marker = isRec ? c2.green("\u2192") : fits ? c2.dim(" ") : c2.red("\u2716");
5642
+ const name = isRec ? c2.bold(c2.green(v.tag)) : fits ? v.tag : c2.dim(v.tag);
5643
+ const label = isRec ? c2.bold(v.label) : c2.dim(v.label);
5644
+ const tooLarge = !fits && !v.cloud ? c2.red(" (exceeds available memory)") : "";
5645
+ process.stdout.write(` ${marker} ${String(i + 1).padStart(2)}. ${name.padEnd(isRec ? 45 : 25)} ${label}${tooLarge}
5646
+ `);
5647
+ }
5648
+ process.stdout.write(`
5649
+ ${c2.dim(" ")} ${String(localVariants.length + 1).padStart(2)}. ${c2.dim("qwen3.5:cloud")} ${c2.dim("Ollama Cloud")}
5650
+ `);
5651
+ process.stdout.write(` ${c2.dim(" ")} ${String(localVariants.length + 2).padStart(2)}. ${c2.dim("qwen3.5:397b-cloud")} ${c2.dim("397B Ollama Cloud")}
5652
+ `);
5653
+ process.stdout.write("\n");
5654
+ const pullChoice = await ask(rl, ` ${c2.bold("Select a model to pull")} (1-${localVariants.length + 2}, or Enter for recommended): `);
5655
+ const pullIdx = pullChoice ? parseInt(pullChoice, 10) : 0;
5656
+ let selectedVariant;
5657
+ if (pullIdx === 0 || isNaN(pullIdx)) {
5658
+ selectedVariant = recommended;
5659
+ } else if (pullIdx <= localVariants.length) {
5660
+ selectedVariant = localVariants[pullIdx - 1];
5661
+ } else if (pullIdx === localVariants.length + 1) {
5662
+ selectedVariant = QWEN_VARIANTS.find((v) => v.tag === "qwen3.5:cloud");
5663
+ } else {
5664
+ selectedVariant = QWEN_VARIANTS.find((v) => v.tag === "qwen3.5:397b-cloud");
5665
+ }
5666
+ const confirmPull = await ask(rl, `
5667
+ Pull ${c2.bold(selectedVariant.tag)} (${selectedVariant.label})? (Y/n) `);
5668
+ if (confirmPull.toLowerCase() === "n") {
5669
+ process.stdout.write(`
5670
+ ${c2.dim("Skipping model pull. You can pull manually with: ollama pull <model>")}
5671
+
5672
+ `);
5673
+ return config.model;
5674
+ }
5675
+ process.stdout.write(`
5676
+ ${c2.cyan("\u25CF")} Pulling ${c2.bold(selectedVariant.tag)}... (this may take a while)
5677
+ `);
5678
+ try {
5679
+ pullModelWithAutoUpdate(selectedVariant.tag);
5680
+ process.stdout.write(`
5681
+ ${c2.green("\u2714")} Model ${c2.bold(selectedVariant.tag)} pulled successfully.
5682
+
5683
+ `);
5684
+ } catch (err) {
5685
+ renderError(`Failed to pull model: ${err instanceof Error ? err.message : String(err)}`);
5686
+ renderInfo("Try manually: ollama pull " + selectedVariant.tag);
5687
+ return config.model;
5688
+ }
5689
+ if (!selectedVariant.cloud) {
5690
+ const ctx = calculateContextWindow(specs, selectedVariant.sizeGB);
5691
+ const customName = `open-agents-${selectedVariant.tag.replace(":", "-").replace(".", "")}`;
5692
+ process.stdout.write(` ${c2.cyan("\u25CF")} Context window recommendation: ${c2.bold(ctx.label)} (${ctx.numCtx} tokens)
5693
+ `);
5694
+ process.stdout.write(` ${c2.dim(`Based on ${specs.totalRamGB.toFixed(0)} GB RAM, ${selectedVariant.sizeGB} GB model`)}
5695
+
5696
+ `);
5697
+ const createModelfile = await ask(rl, ` Create optimized model "${c2.bold(customName)}" with ${ctx.label} context? (Y/n) `);
5698
+ if (createModelfile.toLowerCase() !== "n") {
5699
+ try {
5700
+ const modelfileContent = [
5701
+ `FROM ${selectedVariant.tag}`,
5702
+ `PARAMETER num_ctx ${ctx.numCtx}`,
5703
+ `PARAMETER temperature 0`,
5704
+ `PARAMETER num_predict 16384`,
5705
+ `PARAMETER stop "<|endoftext|>"`
5706
+ ].join("\n");
5707
+ const modelDir = join7(homedir2(), ".open-agents", "models");
5708
+ mkdirSync2(modelDir, { recursive: true });
5709
+ const modelfilePath = join7(modelDir, `Modelfile.${customName}`);
5710
+ writeFileSync2(modelfilePath, modelfileContent + "\n", "utf8");
5711
+ process.stdout.write(` ${c2.dim("Creating model...")} `);
5712
+ execSync(`ollama create ${customName} -f ${modelfilePath}`, {
5713
+ stdio: "pipe",
5714
+ timeout: 12e4
5715
+ });
5716
+ process.stdout.write(`${c2.green("\u2714")}
5717
+ `);
5718
+ setConfigValue("model", customName);
5719
+ process.stdout.write(`
5720
+ ${c2.green("\u2714")} Model ${c2.bold(customName)} created with ${ctx.label} context.
5721
+ `);
5722
+ process.stdout.write(` ${c2.green("\u2714")} Saved as default model in config.
5723
+
5724
+ `);
5725
+ return customName;
5726
+ } catch (err) {
5727
+ renderWarning(`Could not create custom model: ${err instanceof Error ? err.message : String(err)}`);
5728
+ renderInfo(`Using base model ${selectedVariant.tag} instead.`);
5729
+ }
5730
+ }
5731
+ setConfigValue("model", selectedVariant.tag);
5732
+ process.stdout.write(`
5733
+ ${c2.green("\u2714")} Saved ${c2.bold(selectedVariant.tag)} as default model.
5734
+
5735
+ `);
5736
+ return selectedVariant.tag;
5737
+ }
5738
+ setConfigValue("model", selectedVariant.tag);
5739
+ process.stdout.write(`
5740
+ ${c2.green("\u2714")} Saved ${c2.bold(selectedVariant.tag)} as default model.
5741
+
5742
+ `);
5743
+ return selectedVariant.tag;
5744
+ }
5745
+ async function isModelAvailable(config) {
5746
+ try {
5747
+ const models = await fetchOllamaModels(config.backendUrl);
5748
+ return !!findModel(models, config.model);
5749
+ } catch {
5750
+ return false;
5751
+ }
5752
+ }
5753
+ function isFirstRun() {
5754
+ try {
5755
+ return !existsSync2(join7(homedir2(), ".open-agents", "config.json"));
5756
+ } catch {
5757
+ return true;
5758
+ }
5759
+ }
5760
+ var QWEN_VARIANTS, TOOL_CALLING_MODELS;
5761
+ var init_setup = __esm({
5762
+ "packages/cli/dist/tui/setup.js"() {
5763
+ "use strict";
5764
+ init_model_picker();
5765
+ init_render();
5766
+ init_config();
5767
+ QWEN_VARIANTS = [
5768
+ { tag: "qwen3.5:0.8b", sizeGB: 1, label: "0.8B params (1.0 GB)", cloud: false },
5769
+ { tag: "qwen3.5:2b", sizeGB: 2.7, label: "2B params (2.7 GB)", cloud: false },
5770
+ { tag: "qwen3.5:4b", sizeGB: 3.4, label: "4B params (3.4 GB)", cloud: false },
5771
+ { tag: "qwen3.5:9b", sizeGB: 6.6, label: "9B params (6.6 GB) \u2014 recommended minimum", cloud: false },
5772
+ { tag: "qwen3.5:27b", sizeGB: 17, label: "27B params (17 GB)", cloud: false },
5773
+ { tag: "qwen3.5:35b", sizeGB: 24, label: "35B params (24 GB)", cloud: false },
5774
+ { tag: "qwen3.5:122b", sizeGB: 81, label: "122B params (81 GB) \u2014 best local", cloud: false },
5775
+ { tag: "qwen3.5:cloud", sizeGB: 0, label: "Cloud (Ollama Cloud)", cloud: true },
5776
+ { tag: "qwen3.5:397b-cloud", sizeGB: 0, label: "397B Cloud (Ollama Cloud)", cloud: true }
5777
+ ];
5778
+ TOOL_CALLING_MODELS = /* @__PURE__ */ new Set([
5779
+ "qwen3.5",
5780
+ "qwen3",
5781
+ "qwen2.5",
5782
+ "llama3.3",
5783
+ "llama3.1",
5784
+ "mistral",
5785
+ "mixtral",
5786
+ "command-r",
5787
+ "gemma3",
5788
+ "devstral",
5789
+ "deepseek"
5790
+ ]);
5791
+ }
5792
+ });
5793
+
5794
+ // packages/cli/dist/tui/interactive.js
5795
+ import * as readline2 from "node:readline";
5317
5796
  import { cwd } from "node:process";
5318
5797
  import { resolve as resolve9 } from "node:path";
5319
5798
  function adaptTool(tool) {
@@ -5402,6 +5881,14 @@ async function runTask(task, config, repoRoot) {
5402
5881
  }
5403
5882
  async function startInteractive(config, repoPath) {
5404
5883
  const repoRoot = resolve9(repoPath ?? cwd());
5884
+ const needsSetup = isFirstRun() || !await isModelAvailable(config);
5885
+ if (needsSetup && config.backendType === "ollama") {
5886
+ const setupModel = await runSetupWizard(config);
5887
+ if (setupModel === null) {
5888
+ process.exit(0);
5889
+ }
5890
+ config = { ...config, model: setupModel };
5891
+ }
5405
5892
  try {
5406
5893
  const healthUrl = config.backendType === "ollama" ? `${config.backendUrl}/api/tags` : `${config.backendUrl}/v1/models`;
5407
5894
  const resp = await fetch(healthUrl, { signal: AbortSignal.timeout(1e4) });
@@ -5412,11 +5899,12 @@ async function startInteractive(config, repoPath) {
5412
5899
  if (config.backendType === "ollama") {
5413
5900
  renderInfo("Start Ollama with: ollama serve");
5414
5901
  }
5902
+ renderInfo("Use /endpoint to configure a different backend.");
5415
5903
  process.exit(1);
5416
5904
  }
5417
5905
  renderHeader(config.model);
5418
5906
  let currentConfig = { ...config };
5419
- const rl = readline.createInterface({
5907
+ const rl = readline2.createInterface({
5420
5908
  input: process.stdin,
5421
5909
  output: process.stdout,
5422
5910
  prompt: `${c2.bold(c2.blue("> "))}`,
@@ -5433,6 +5921,14 @@ async function startInteractive(config, repoPath) {
5433
5921
  setVerbose(verbose) {
5434
5922
  currentConfig = { ...currentConfig, verbose };
5435
5923
  },
5924
+ setEndpoint(url, backendType, apiKey) {
5925
+ currentConfig = {
5926
+ ...currentConfig,
5927
+ backendUrl: url,
5928
+ backendType,
5929
+ ...apiKey !== void 0 ? { apiKey } : {}
5930
+ };
5931
+ },
5436
5932
  clearScreen() {
5437
5933
  process.stdout.write("\x1B[2J\x1B[H");
5438
5934
  renderCompactHeader(currentConfig.model);
@@ -5485,6 +5981,14 @@ ${c2.dim("(Use /quit to exit)")}
5485
5981
  }
5486
5982
  async function runWithTUI(task, config, repoPath) {
5487
5983
  const repoRoot = resolve9(repoPath ?? cwd());
5984
+ const needsSetup = isFirstRun() || !await isModelAvailable(config);
5985
+ if (needsSetup && config.backendType === "ollama") {
5986
+ const setupModel = await runSetupWizard(config);
5987
+ if (setupModel === null) {
5988
+ process.exit(0);
5989
+ }
5990
+ config = { ...config, model: setupModel };
5991
+ }
5488
5992
  try {
5489
5993
  const healthUrl = config.backendType === "ollama" ? `${config.backendUrl}/api/tags` : `${config.backendUrl}/v1/models`;
5490
5994
  const resp = await fetch(healthUrl, { signal: AbortSignal.timeout(1e4) });
@@ -5512,6 +6016,7 @@ var init_interactive = __esm({
5512
6016
  init_dist5();
5513
6017
  init_dist2();
5514
6018
  init_commands();
6019
+ init_setup();
5515
6020
  init_render();
5516
6021
  }
5517
6022
  });
@@ -5547,7 +6052,7 @@ import { glob } from "glob";
5547
6052
  import ignore from "ignore";
5548
6053
  import { readFile as readFile7, stat as stat2 } from "node:fs/promises";
5549
6054
  import { createHash } from "node:crypto";
5550
- import { join as join7, relative, extname as extname2, basename } from "node:path";
6055
+ import { join as join8, relative, extname as extname2, basename } from "node:path";
5551
6056
  var DEFAULT_EXCLUDE, LANGUAGE_MAP, CodebaseIndexer;
5552
6057
  var init_codebase_indexer = __esm({
5553
6058
  "packages/indexer/dist/codebase-indexer.js"() {
@@ -5591,7 +6096,7 @@ var init_codebase_indexer = __esm({
5591
6096
  const ig = ignore.default();
5592
6097
  if (this.config.respectGitignore) {
5593
6098
  try {
5594
- const gitignoreContent = await readFile7(join7(this.config.rootDir, ".gitignore"), "utf-8");
6099
+ const gitignoreContent = await readFile7(join8(this.config.rootDir, ".gitignore"), "utf-8");
5595
6100
  ig.add(gitignoreContent);
5596
6101
  } catch {
5597
6102
  }
@@ -5606,7 +6111,7 @@ var init_codebase_indexer = __esm({
5606
6111
  for (const relativePath of files) {
5607
6112
  if (ig.ignores(relativePath))
5608
6113
  continue;
5609
- const fullPath = join7(this.config.rootDir, relativePath);
6114
+ const fullPath = join8(this.config.rootDir, relativePath);
5610
6115
  try {
5611
6116
  const fileStat = await stat2(fullPath);
5612
6117
  if (fileStat.size > this.config.maxFileSize)
@@ -5652,7 +6157,7 @@ var init_codebase_indexer = __esm({
5652
6157
  if (!child) {
5653
6158
  child = {
5654
6159
  name: part,
5655
- path: join7(current.path, part),
6160
+ path: join8(current.path, part),
5656
6161
  type: "directory",
5657
6162
  children: []
5658
6163
  };
@@ -5727,13 +6232,13 @@ __export(index_repo_exports, {
5727
6232
  indexRepoCommand: () => indexRepoCommand
5728
6233
  });
5729
6234
  import { resolve as resolve10 } from "node:path";
5730
- import { existsSync as existsSync2, statSync as statSync2 } from "node:fs";
6235
+ import { existsSync as existsSync3, statSync as statSync2 } from "node:fs";
5731
6236
  import { cwd as cwd2 } from "node:process";
5732
6237
  async function indexRepoCommand(opts, _config) {
5733
6238
  const repoRoot = resolve10(opts.repoPath ?? cwd2());
5734
6239
  printHeader("Index Repository");
5735
6240
  printInfo(`Indexing: ${repoRoot}`);
5736
- if (!existsSync2(repoRoot)) {
6241
+ if (!existsSync3(repoRoot)) {
5737
6242
  printError(`Path does not exist: ${repoRoot}`);
5738
6243
  process.exit(1);
5739
6244
  }
@@ -5979,8 +6484,8 @@ var config_exports = {};
5979
6484
  __export(config_exports, {
5980
6485
  configCommand: () => configCommand
5981
6486
  });
5982
- import { join as join8 } from "node:path";
5983
- import { homedir as homedir2 } from "node:os";
6487
+ import { join as join9 } from "node:path";
6488
+ import { homedir as homedir3 } from "node:os";
5984
6489
  async function configCommand(opts, config) {
5985
6490
  if (opts.subCommand === "set") {
5986
6491
  return handleSet(opts, config);
@@ -6002,7 +6507,7 @@ function handleShow(opts, config) {
6002
6507
  printKeyValue("verbose", String(config.verbose), 2);
6003
6508
  printKeyValue("dbPath", config.dbPath, 2);
6004
6509
  printSection("Config File");
6005
- printInfo(`~/.open-agents/config.json (${join8(homedir2(), ".open-agents", "config.json")})`);
6510
+ printInfo(`~/.open-agents/config.json (${join9(homedir3(), ".open-agents", "config.json")})`);
6006
6511
  printSection("Environment Variables");
6007
6512
  printInfo("OPEN_AGENTS_BACKEND_URL \u2014 override backendUrl");
6008
6513
  printInfo("OPEN_AGENTS_MODEL \u2014 override model");
@@ -6234,8 +6739,8 @@ __export(eval_exports, {
6234
6739
  evalCommand: () => evalCommand
6235
6740
  });
6236
6741
  import { tmpdir } from "node:os";
6237
- import { mkdirSync as mkdirSync2, writeFileSync as writeFileSync2 } from "node:fs";
6238
- import { join as join9 } from "node:path";
6742
+ import { mkdirSync as mkdirSync3, writeFileSync as writeFileSync3 } from "node:fs";
6743
+ import { join as join10 } from "node:path";
6239
6744
  async function evalCommand(opts, config) {
6240
6745
  const suiteName = opts.suite ?? "basic";
6241
6746
  const suite = SUITES[suiteName];
@@ -6356,9 +6861,9 @@ async function evalCommand(opts, config) {
6356
6861
  process.exit(failed > 0 ? 1 : 0);
6357
6862
  }
6358
6863
  function createTempEvalRepo() {
6359
- const dir = join9(tmpdir(), `open-agents-eval-${Date.now()}`);
6360
- mkdirSync2(dir, { recursive: true });
6361
- writeFileSync2(join9(dir, "package.json"), JSON.stringify({ name: "eval-repo", version: "0.0.0" }, null, 2) + "\n", "utf8");
6864
+ const dir = join10(tmpdir(), `open-agents-eval-${Date.now()}`);
6865
+ mkdirSync3(dir, { recursive: true });
6866
+ writeFileSync3(join10(dir, "package.json"), JSON.stringify({ name: "eval-repo", version: "0.0.0" }, null, 2) + "\n", "utf8");
6362
6867
  return dir;
6363
6868
  }
6364
6869
  var BASIC_SUITE, FULL_SUITE, SUITES;
@@ -6417,7 +6922,7 @@ init_output();
6417
6922
  import { parseArgs as nodeParseArgs2 } from "node:util";
6418
6923
  import { createRequire } from "node:module";
6419
6924
  import { fileURLToPath } from "node:url";
6420
- import { dirname as dirname2, join as join10 } from "node:path";
6925
+ import { dirname as dirname2, join as join11 } from "node:path";
6421
6926
 
6422
6927
  // packages/cli/dist/cli.js
6423
6928
  import { createInterface } from "node:readline";
@@ -6524,7 +7029,7 @@ init_output();
6524
7029
  function getVersion() {
6525
7030
  try {
6526
7031
  const require2 = createRequire(import.meta.url);
6527
- const pkgPath = join10(dirname2(fileURLToPath(import.meta.url)), "..", "package.json");
7032
+ const pkgPath = join11(dirname2(fileURLToPath(import.meta.url)), "..", "package.json");
6528
7033
  const pkg = require2(pkgPath);
6529
7034
  return pkg.version;
6530
7035
  } catch {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "open-agents-ai",
3
- "version": "0.1.0",
3
+ "version": "0.2.1",
4
4
  "description": "AI coding agent powered by open-source models (Ollama/vLLM) — Claude Code-style TUI with agentic tool-calling loop",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",