open-agents-ai 0.187.312 → 0.187.313

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +94 -10
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -299376,8 +299376,26 @@ The session corrections MUST become hard rules in the SKILL.md Rules section.`;
299376
299376
  onEnter: (item, { done }) => {
299377
299377
  (async () => {
299378
299378
  if (item.key === "__kill__") {
299379
- await doFetch("/v1/scheduled/kill", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({}) });
299380
- renderInfo2("Kill signal sent to OA scheduler processes.");
299379
+ const resp = await doFetch("/v1/scheduled/kill", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({}) });
299380
+ try {
299381
+ const j = await resp.json();
299382
+ const kb = Array.isArray(j.killed) ? j.killed.length : 0;
299383
+ const ka = Array.isArray(j.additionally) ? j.additionally.length : 0;
299384
+ renderInfo2(`Killed ${kb + ka} processes (schedulers + active runs).`);
299385
+ const before = j.gpu_before?.[0];
299386
+ const after = j.gpu_after?.[0];
299387
+ if (before && after) {
299388
+ renderInfo2(`GPU util: ${before.gpu_pct}% → ${after.gpu_pct}%, VRAM: ${before.vram_used_gb}/${before.vram_total_gb} GB → ${after.vram_used_gb}/${after.vram_total_gb} GB`);
299389
+ }
299390
+ const rem = Array.isArray(j.procs_after) ? j.procs_after.length : 0;
299391
+ if (rem > 0) {
299392
+ renderWarning2(`Remaining matched processes: ${rem}`);
299393
+ } else {
299394
+ renderInfo2("No remaining matched processes.");
299395
+ }
299396
+ } catch {
299397
+ renderInfo2("Kill signal sent.");
299398
+ }
299381
299399
  done();
299382
299400
  return;
299383
299401
  }
@@ -321792,9 +321810,24 @@ async function loadScheduled() {
321792
321810
 
321793
321811
  (window as any).killScheduled = async function() {
321794
321812
  try {
321795
- await fetch('/v1/scheduled/kill', { method:'POST', headers: headers(), body: JSON.stringify({}) });
321796
- alert('Kill signal sent to OA scheduler processes');
321797
- } catch {}
321813
+ const r = await fetch('/v1/scheduled/kill', { method:'POST', headers: headers(), body: JSON.stringify({}) });
321814
+ const j = await r.json();
321815
+ const kb = Array.isArray(j.killed) ? j.killed.length : 0;
321816
+ const ka = Array.isArray(j.additionally) ? j.additionally.length : 0;
321817
+ const before = j.gpu_before && j.gpu_before[0] ? j.gpu_before[0] : null;
321818
+ const after = j.gpu_after && j.gpu_after[0] ? j.gpu_after[0] : null;
321819
+ const rem = Array.isArray(j.procs_after) ? j.procs_after.length : 0;
321820
+ let msg = 'Killed ' + (kb + ka) + ' processes.';
321821
+ if (before && after) {
321822
+ msg += '
321823
+ GPU util: ' + before.gpu_pct + '% → ' + after.gpu_pct + '%, VRAM: ' + before.vram_used_gb + '/' + before.vram_total_gb + ' GB → ' + after.vram_used_gb + '/' + after.vram_total_gb + ' GB';
321824
+ }
321825
+ msg += rem > 0 ? ('
321826
+ Remaining matched processes: ' + rem) : '
321827
+ No remaining matched processes.';
321828
+ alert(msg);
321829
+ loadScheduled();
321830
+ } catch (e) { alert('Kill failed: ' + (e && e.message || String(e))); }
321798
321831
  }
321799
321832
 
321800
321833
  (window as any).disableAllScheduled = async function() {
@@ -327541,6 +327574,8 @@ async function handleRequest(req2, res, ollamaUrl, verbose) {
327541
327574
  const body = await parseJsonBody(req2);
327542
327575
  const pids = Array.isArray(body?.pids) ? body.pids.filter((n2) => Number.isInteger(n2)) : void 0;
327543
327576
  const pattern = typeof body?.pattern === "string" && body.pattern.trim() ? body.pattern.trim() : "(/bin/oa|open-agents-ai|nexus-daemon|OPEN-AGENTS-SCHEDULED|ollama)";
327577
+ const procsBefore = listMatchingProcesses(pattern);
327578
+ const gpuBefore = sampleGpuUtil();
327544
327579
  const killed = killScheduledProcesses(pids, pattern);
327545
327580
  const additionally = [];
327546
327581
  try {
@@ -327553,23 +327588,35 @@ async function handleRequest(req2, res, ollamaUrl, verbose) {
327553
327588
  } catch {
327554
327589
  additionally.push({ pid, ok: false, signal: "TERM", run_id: rid });
327555
327590
  }
327556
- }
327557
- try {
327558
- if (pid > 0) {
327591
+ try {
327559
327592
  process.kill(pid, 0);
327560
327593
  try {
327561
327594
  process.kill(pid, "SIGKILL");
327562
327595
  additionally.push({ pid, ok: true, signal: "KILL", run_id: rid });
327563
327596
  } catch {
327564
327597
  }
327598
+ } catch {
327565
327599
  }
327566
- } catch {
327567
327600
  }
327568
327601
  runningProcesses.delete(rid);
327569
327602
  }
327570
327603
  } catch {
327571
327604
  }
327572
- jsonResponse(res, 200, { killed_count: killed.length + additionally.length, killed, additionally });
327605
+ try {
327606
+ await new Promise((r2) => setTimeout(r2, 600));
327607
+ } catch {
327608
+ }
327609
+ const procsAfter = listMatchingProcesses(pattern);
327610
+ const gpuAfter = sampleGpuUtil();
327611
+ jsonResponse(res, 200, {
327612
+ killed_count: killed.length + additionally.length,
327613
+ killed,
327614
+ additionally,
327615
+ procs_before: procsBefore,
327616
+ procs_after: procsAfter,
327617
+ gpu_before: gpuBefore,
327618
+ gpu_after: gpuAfter
327619
+ });
327573
327620
  return;
327574
327621
  }
327575
327622
  if ((pathname === "/v1/chat" || pathname === "/api/chat") && method === "POST") {
@@ -328577,6 +328624,43 @@ function killScheduledProcesses(pids, pattern) {
328577
328624
  }
328578
328625
  return killed;
328579
328626
  }
328627
+ function listMatchingProcesses(pattern) {
328628
+ const list = [];
328629
+ try {
328630
+ const { execSync: es } = __require("node:child_process");
328631
+ const re = new RegExp(pattern, "i");
328632
+ const ps = es("ps -eo pid,pcpu,pmem,command", { encoding: "utf8", stdio: "pipe" });
328633
+ for (const line of ps.split("\n")) {
328634
+ const m2 = line.trim().match(/^(\d+)\s+([0-9.]+)?\s+([0-9.]+)?\s+(.+)$/);
328635
+ if (!m2) continue;
328636
+ const pid = parseInt(m2[1], 10);
328637
+ const cpu = m2[2] ? parseFloat(m2[2]) : null;
328638
+ const mem = m2[3] ? parseFloat(m2[3]) : null;
328639
+ const cmd = m2[4] || "";
328640
+ if (!isFinite(pid)) continue;
328641
+ if (re.test(cmd)) list.push({ pid, cpu, mem, cmd });
328642
+ }
328643
+ } catch {
328644
+ }
328645
+ return list;
328646
+ }
328647
+ function sampleGpuUtil() {
328648
+ try {
328649
+ const { execSync: es } = __require("node:child_process");
328650
+ const out = es("nvidia-smi --query-gpu=utilization.gpu,memory.used,memory.total --format=csv,noheader,nounits", { encoding: "utf8", timeout: 3e3, stdio: "pipe" });
328651
+ const arr = [];
328652
+ for (const line of out.trim().split("\n")) {
328653
+ const [pctS, usedS, totalS] = line.split(",").map((s2) => s2.trim());
328654
+ const pct = parseInt(pctS, 10);
328655
+ const usedGb = Math.round(parseInt(usedS, 10) / 1024 * 10) / 10;
328656
+ const totalGb = Math.round(parseInt(totalS, 10) / 1024);
328657
+ if (!isNaN(pct) && !isNaN(usedGb) && !isNaN(totalGb)) arr.push({ gpu_pct: pct, vram_used_gb: usedGb, vram_total_gb: totalGb });
328658
+ }
328659
+ return arr;
328660
+ } catch {
328661
+ return null;
328662
+ }
328663
+ }
328580
328664
  function startApiServer(options2 = {}) {
328581
328665
  if (options2.quiet) setQuiet(true);
328582
328666
  const log22 = options2.quiet ? (_msg) => {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "open-agents-ai",
3
- "version": "0.187.312",
3
+ "version": "0.187.313",
4
4
  "description": "AI coding agent powered by open-source models (Ollama/vLLM) — interactive TUI with agentic tool-calling loop",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",