open-agents-ai 0.187.514 → 0.187.516

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -525083,14 +525083,31 @@ var init_errorClusterTracker = __esm({
525083
525083
  // Style B: file(line,col): severity CODE: message (TypeScript)
525084
525084
  {
525085
525085
  re: /^([^\s(]+):?\s*\((\d+),(\d+)\)\s*:?\s*(error|warning|note)\s+([A-Z]+\d+)\s*:\s*(.*)$/gm,
525086
- build: (m2) => ({
525087
- file: m2[1],
525088
- line: parseInt(m2[2], 10),
525089
- col: parseInt(m2[3], 10),
525090
- severity: m2[4],
525091
- code: m2[5],
525092
- message: (m2[6] ?? "").trim().slice(0, 240)
525093
- })
525086
+ build: (m2) => {
525087
+ const code8 = m2[5];
525088
+ const message2 = (m2[6] ?? "").trim().slice(0, 240);
525089
+ let hint;
525090
+ if ((code8 === "TS2305" || code8 === "TS2724") && process.env["OA_DISABLE_TS_HINT"] !== "1") {
525091
+ const dym = message2.match(/Did you mean ['"]([^'"]+)['"]\??/);
525092
+ if (dym) {
525093
+ hint = `TS suggests "${dym[1]}" exists in the imported module, but with a different identifier. Before retrying:
525094
+ (1) file_read the imported module's source
525095
+ (2) check whether the suggested name is declared with 'interface'/'type' (TYPE-only — cannot be used as a runtime value) or with 'class'/'function'/'const'/'let'/'var' (RUNTIME value).
525096
+ If your import expects a runtime value but the suggested name is a type, ADD a runtime export to the source module (e.g. 'export const X = ...' or 'export class X {...}'). Renaming the import will only fix the error if the suggested name is ALREADY a runtime value.`;
525097
+ }
525098
+ }
525099
+ const out = {
525100
+ file: m2[1],
525101
+ line: parseInt(m2[2], 10),
525102
+ col: parseInt(m2[3], 10),
525103
+ severity: m2[4],
525104
+ code: code8,
525105
+ message: message2
525106
+ };
525107
+ if (hint)
525108
+ out.hint = hint;
525109
+ return out;
525110
+ }
525094
525111
  },
525095
525112
  // Style C: error[CODE]: message ... --> file:line:col (Rust-style)
525096
525113
  {
@@ -525153,16 +525170,19 @@ var init_errorClusterTracker = __esm({
525153
525170
  for (const e2 of errors) {
525154
525171
  const key = clusterKey(toolKey, e2.file, e2.code);
525155
525172
  const cur = observedClusters.get(key);
525156
- if (cur)
525173
+ if (cur) {
525157
525174
  cur.count += 1;
525158
- else
525159
- observedClusters.set(key, { count: 1, sample: e2.message });
525175
+ if (!cur.hint && e2.hint)
525176
+ cur.hint = e2.hint;
525177
+ } else {
525178
+ observedClusters.set(key, { count: 1, sample: e2.message, hint: e2.hint });
525179
+ }
525160
525180
  }
525161
525181
  const changes = [];
525162
525182
  const lines = [];
525163
525183
  let hasCritical = false;
525164
525184
  const nowMs = this.now();
525165
- for (const [key, { count, sample }] of observedClusters) {
525185
+ for (const [key, { count, sample, hint }] of observedClusters) {
525166
525186
  const prior = this.clusters.get(key);
525167
525187
  if (!prior) {
525168
525188
  const ck = parseClusterKey(key);
@@ -525173,7 +525193,8 @@ var init_errorClusterTracker = __esm({
525173
525193
  observations: 1,
525174
525194
  attemptsSinceCountChange: 1,
525175
525195
  lastUpdated: nowMs,
525176
- previousCount: 0
525196
+ previousCount: 0,
525197
+ hint
525177
525198
  };
525178
525199
  this.clusters.set(key, entry);
525179
525200
  changes.push({ kind: "new", cluster: entry });
@@ -525182,6 +525203,8 @@ var init_errorClusterTracker = __esm({
525182
525203
  prior.observations++;
525183
525204
  prior.lastUpdated = nowMs;
525184
525205
  prior.sample = sample;
525206
+ if (hint)
525207
+ prior.hint = hint;
525185
525208
  if (count === prior.count) {
525186
525209
  prior.attemptsSinceCountChange++;
525187
525210
  } else {
@@ -525242,6 +525265,30 @@ var init_errorClusterTracker = __esm({
525242
525265
  this.lastEmittedAt.clear();
525243
525266
  this.observationCounter = 0;
525244
525267
  }
525268
+ /**
525269
+ * RCA-3 (root-cause from batch517 stax): top-N clusters by observation
525270
+ * count, returned as shallow snapshots. Used by REG-58/59/60 to surface
525271
+ * the specific repeating error in the structured replan inject. Caller
525272
+ * must not mutate.
525273
+ */
525274
+ topClusters(n2 = 3) {
525275
+ const all2 = [...this.clusters.values()];
525276
+ all2.sort((a2, b) => b.observations - a2.observations || b.lastUpdated - a2.lastUpdated);
525277
+ const lim = Math.max(1, Math.min(20, n2));
525278
+ return all2.slice(0, lim).map((c9) => {
525279
+ const out = {
525280
+ pattern: `${c9.key.code} ${c9.sample.slice(0, 80)}`.trim(),
525281
+ count: c9.count,
525282
+ observations: c9.observations,
525283
+ sample: c9.sample.slice(0, 240),
525284
+ file: c9.key.file,
525285
+ code: c9.key.code
525286
+ };
525287
+ if (c9.hint)
525288
+ out.hint = c9.hint;
525289
+ return out;
525290
+ });
525291
+ }
525245
525292
  evictIfNeeded() {
525246
525293
  if (this.clusters.size <= this.opts.maxClusters)
525247
525294
  return;
@@ -525785,6 +525832,15 @@ var init_agenticRunner = __esm({
525785
525832
  // etc. Fires stagnation independent of failure/variant thresholds because
525786
525833
  // those metrics get diluted by successful file_reads.
525787
525834
  _lastFileWriteTurn = -1;
525835
+ // RCA-2 (root-cause from batch517 osm slow-grind): rolling timestamps of
525836
+ // recent file_write events. REG-60 detects glacial progress that REG-58's
525837
+ // binary turn-gap check misses — agent that writes 1 file every 25 turns
525838
+ // never trips REG-58 but is still effectively stuck.
525839
+ _fileWriteTimestamps = [];
525840
+ // RCA-1 (root-cause from batch517 zombies): set true when SIGTERM received
525841
+ // (daemon timeout). End-of-Task block uses this to set finalStatus="timeout"
525842
+ // in the session_gist instead of "abandoned".
525843
+ _aborting = false;
525788
525844
  // MEM_PATH item #9: adaptive retrieval cache. When the (goalHash, recent-tool-sig)
525789
525845
  // hasn't changed since last retrieval, skip the PPR call entirely and reuse
525790
525846
  // the previous memoryLines.
@@ -526114,6 +526170,46 @@ ${graphSummary}`,
526114
526170
  * Returns null if no session id OR no file. Used by buildPlanSkeleton
526115
526171
  * and by the turn-counter reminder.
526116
526172
  */
526173
+ /**
526174
+ * RCA-3 Part A: build an "RECENT FAILURE PATTERN" preamble for stagnation
526175
+ * replan injects. Pulls the top cluster (by observation count) from the
526176
+ * ErrorClusterTracker and includes its sample + any TS-aware hint. Returns
526177
+ * empty string when no tracker is live or no clusters exist — caller
526178
+ * concatenates the result, so empty is a valid no-op.
526179
+ *
526180
+ * Generic: works for any error pattern the cluster tracker has seen, not
526181
+ * tied to any project-specific symbol or signature.
526182
+ */
526183
+ _buildErrorContextPreamble() {
526184
+ try {
526185
+ const ec = this._errorClusterTracker;
526186
+ if (!ec || typeof ec.topClusters !== "function")
526187
+ return "";
526188
+ const top = ec.topClusters(1);
526189
+ if (!Array.isArray(top) || top.length === 0)
526190
+ return "";
526191
+ const t2 = top[0];
526192
+ const obs = typeof t2.observations === "number" ? t2.observations : 0;
526193
+ const code8 = (t2.code || "").toString();
526194
+ const sample = (t2.sample || "").toString().slice(0, 240);
526195
+ let body = `
526196
+ RECENT FAILURE PATTERN (from error-cluster tracker, hit ${obs}× in this run):
526197
+ ${code8} ${sample}
526198
+ `;
526199
+ if (t2.hint) {
526200
+ body += `
526201
+ HINT (semantic, generic):
526202
+ ${t2.hint}
526203
+ `;
526204
+ }
526205
+ body += `
526206
+ Your hypotheses MUST address this specific error, not generic causes.
526207
+ `;
526208
+ return body;
526209
+ } catch {
526210
+ return "";
526211
+ }
526212
+ }
526117
526213
  readSessionTodos() {
526118
526214
  try {
526119
526215
  const sid = process.env["OA_SESSION_ID"] || this._sessionId || "default";
@@ -527925,6 +528021,19 @@ Respond with your assessment, then take action.`;
527925
528021
  this._runErrorPatterns = [];
527926
528022
  this._runWhatWorked = [];
527927
528023
  this._lastFileWriteTurn = -1;
528024
+ this._fileWriteTimestamps = [];
528025
+ this._aborting = false;
528026
+ if (!globalThis.__oa_rca1_sigterm_installed) {
528027
+ globalThis.__oa_rca1_sigterm_installed = true;
528028
+ const _sigtermHandler = () => {
528029
+ this._aborting = true;
528030
+ setTimeout(() => process.exit(124), 5e3).unref();
528031
+ };
528032
+ try {
528033
+ process.on("SIGTERM", _sigtermHandler);
528034
+ } catch {
528035
+ }
528036
+ }
527928
528037
  this._fileRegistry.clear();
527929
528038
  this._memexArchive.clear();
527930
528039
  this._sessionId = process.env["OA_SESSION_ID"] && String(process.env["OA_SESSION_ID"]) || `session-${Date.now()}`;
@@ -528340,9 +528449,10 @@ TASK: ${task}` : task;
528340
528449
  const REG58_NO_WRITE_BUDGET = 30;
528341
528450
  if (turn > stagnationCooldownUntilTurn && this._lastFileWriteTurn >= 0 && turn - this._lastFileWriteTurn >= REG58_NO_WRITE_BUDGET && process.env["OA_DISABLE_REG58"] !== "1") {
528342
528451
  const gap = turn - this._lastFileWriteTurn;
528452
+ const _errCtx = this._buildErrorContextPreamble();
528343
528453
  const replan = `[STAGNATION REPLAN — REG-58 no-write convergence]
528344
528454
  You have made ${gap} tool calls without a single file_write/file_edit/batch_edit/file_patch since turn ${this._lastFileWriteTurn}. That is exploration-without-action. STOP retrying the current approach.
528345
-
528455
+ ` + _errCtx + `
528346
528456
  Respond with EXACTLY this structure before your next tool call:
528347
528457
  HYPOTHESES (3 distinct theories why progress stalled):
528348
528458
  1. ...
@@ -528363,6 +528473,36 @@ If the hypothesis cannot be tested by a creative edit, ask the human via task_co
528363
528473
  });
528364
528474
  this._lastFileWriteTurn = turn;
528365
528475
  }
528476
+ const REG60_WINDOW_MS = 60 * 60 * 1e3;
528477
+ const REG60_MIN_WRITES = 3;
528478
+ const REG60_COOLDOWN_TURNS = 10;
528479
+ if (turn > stagnationCooldownUntilTurn && this._runStartTime > 0 && Date.now() - this._runStartTime >= REG60_WINDOW_MS && process.env["OA_DISABLE_REG60"] !== "1") {
528480
+ const cutoff = Date.now() - REG60_WINDOW_MS;
528481
+ const recentWrites = this._fileWriteTimestamps.filter((ts) => ts >= cutoff).length;
528482
+ if (recentWrites < REG60_MIN_WRITES) {
528483
+ const _errCtx60 = this._buildErrorContextPreamble();
528484
+ const _replan60 = `[STAGNATION REPLAN — REG-60 low-write-rate]
528485
+ Over the last 60 minutes you have made only ${recentWrites} creative edit(s) (file_write/file_edit/batch_edit/file_patch). That is a glacial pace — your current approach is producing exploration without action.
528486
+ ` + _errCtx60 + `
528487
+ Respond with EXACTLY this structure before your next tool call:
528488
+ HYPOTHESES (3 NEW theories why progress stalled — must be DIFFERENT from
528489
+ anything tried so far):
528490
+ 1. ...
528491
+ 2. ...
528492
+ 3. ...
528493
+ PICK: <number 1-3>
528494
+ WHY: <one sentence>
528495
+ FALSIFICATION: <observable signal that would refute the pick>
528496
+ NEXT ACTION: <a single creative edit — file_write / file_edit / batch_edit>`;
528497
+ messages2.push({ role: "system", content: _replan60 });
528498
+ stagnationCooldownUntilTurn = turn + REG60_COOLDOWN_TURNS;
528499
+ this.emit({
528500
+ type: "status",
528501
+ content: `REG-60 LOW-WRITE-RATE — ${recentWrites} creative edits in last 60 min (threshold ${REG60_MIN_WRITES})`,
528502
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
528503
+ });
528504
+ }
528505
+ }
528366
528506
  if (turn > stagnationCooldownUntilTurn && stagnationWindow.length >= STAG_MIN_SAMPLES) {
528367
528507
  const cutoffTurn = turn - STAG_WINDOW_TURNS;
528368
528508
  const cutoffTs = Date.now() - STAG_WINDOW_MS;
@@ -530281,11 +530421,12 @@ ${criticDecision.cachedResult.slice(0, 500)}` : `[BLOCKED — the observer confi
530281
530421
  if (obs.hasCritical) {
530282
530422
  messages2.push({ role: "system", content: obs.summaryLine });
530283
530423
  if (process.env["OA_DISABLE_REG59"] !== "1") {
530424
+ const _errCtx59 = this._buildErrorContextPreamble();
530284
530425
  messages2.push({
530285
530426
  role: "system",
530286
530427
  content: `[STAGNATION REPLAN — REG-59 same-error escalation]
530287
530428
  The error cluster you keep hitting has occurred 5+ times. Your current approach is NOT working. STOP retrying it.
530288
-
530429
+ ` + _errCtx59 + `
530289
530430
  Respond with EXACTLY this structure before your next tool call:
530290
530431
  HYPOTHESES (3 NEW theories — must be DIFFERENT from anything tried so far):
530291
530432
  1. ...
@@ -531006,6 +531147,10 @@ Respond with EXACTLY this structure before your next tool call:
531006
531147
  const creativeTools = ["file_write", "file_edit", "batch_edit", "file_patch"];
531007
531148
  if (creativeTools.includes(tc.name)) {
531008
531149
  this._lastFileWriteTurn = turn;
531150
+ this._fileWriteTimestamps.push(Date.now());
531151
+ if (this._fileWriteTimestamps.length > 200) {
531152
+ this._fileWriteTimestamps.shift();
531153
+ }
531009
531154
  }
531010
531155
  }
531011
531156
  if (result && result.success === false) {
@@ -532357,8 +532502,9 @@ Full content available via: repl_exec(code="data = retrieve('${handleId}')") or
532357
532502
  toolCounts.set(t2, (toolCounts.get(t2) ?? 0) + 1);
532358
532503
  const topToolNames = [...toolCounts.entries()].sort((a2, b) => b[1] - a2[1]).slice(0, 3).map(([n2]) => n2);
532359
532504
  const whatWorked = this._runLessons.slice(-3).map((l2) => l2.whatWorked).filter(Boolean);
532505
+ const _wasTimeout = this._aborting === true;
532360
532506
  const _wasStagnationExit = !!this._pendingStagnationEntry || /^\s*BLOCKED\b/i.test(summary || "");
532361
- const finalStatus = completed ? _wasStagnationExit ? "stagnation_exit" : "complete" : "abandoned";
532507
+ const finalStatus = _wasTimeout ? "timeout" : completed ? _wasStagnationExit ? "stagnation_exit" : "complete" : "abandoned";
532362
532508
  const gist2 = memMod.buildGist({
532363
532509
  goal,
532364
532510
  durationMs: durationMs2,
@@ -595852,6 +595998,159 @@ function checkConcurrentJobLimit(auth) {
595852
595998
  }
595853
595999
  return null;
595854
596000
  }
596001
+ function adoptHandoffRuns() {
596002
+ if (process.env["OA_DISABLE_RUN_HANDOFF"] === "1") return;
596003
+ const fs7 = require3("node:fs");
596004
+ const path8 = require3("node:path");
596005
+ const home = require3("node:os").homedir();
596006
+ const handoffPath2 = path8.join(home, ".open-agents", "runs-handoff.json");
596007
+ if (!fs7.existsSync(handoffPath2)) return;
596008
+ let handoff = {};
596009
+ try {
596010
+ const raw = fs7.readFileSync(handoffPath2, "utf-8");
596011
+ handoff = JSON.parse(raw);
596012
+ } catch (e2) {
596013
+ process.stderr.write(` WARN: handoff file unreadable (${e2.message}) — discarding.
596014
+ `);
596015
+ try {
596016
+ fs7.unlinkSync(handoffPath2);
596017
+ } catch {
596018
+ }
596019
+ return;
596020
+ }
596021
+ try {
596022
+ fs7.unlinkSync(handoffPath2);
596023
+ } catch {
596024
+ }
596025
+ const runs = handoff.runs ?? [];
596026
+ if (runs.length === 0) return;
596027
+ const adopted = [];
596028
+ const orphaned = [];
596029
+ for (const r2 of runs) {
596030
+ if (!r2.pid || r2.pid <= 0) continue;
596031
+ let alive = false;
596032
+ try {
596033
+ process.kill(r2.pid, 0);
596034
+ alive = true;
596035
+ } catch {
596036
+ alive = false;
596037
+ }
596038
+ const jobsDir3 = path8.join(home, ".open-agents", "jobs");
596039
+ const jobFile = path8.join(jobsDir3, `${r2.jobId}.json`);
596040
+ let job = null;
596041
+ try {
596042
+ if (fs7.existsSync(jobFile)) job = JSON.parse(fs7.readFileSync(jobFile, "utf-8"));
596043
+ } catch {
596044
+ }
596045
+ if (!alive) {
596046
+ orphaned.push(r2.jobId);
596047
+ if (job) {
596048
+ let finalJson = "";
596049
+ try {
596050
+ if (r2.outputFile && fs7.existsSync(r2.outputFile)) {
596051
+ const raw = fs7.readFileSync(r2.outputFile, "utf-8");
596052
+ const lines = raw.trim().split("\n");
596053
+ for (let i2 = lines.length - 1; i2 >= 0; i2--) {
596054
+ if (lines[i2].trimEnd() === "{" || lines[i2].startsWith("{") && !lines[i2].startsWith('{"type":"tool_call"')) {
596055
+ finalJson = lines.slice(i2).join("\n");
596056
+ break;
596057
+ }
596058
+ }
596059
+ }
596060
+ } catch {
596061
+ }
596062
+ try {
596063
+ const parsed = finalJson ? JSON.parse(finalJson) : null;
596064
+ if (parsed && parsed.status) job.status = parsed.status;
596065
+ else job.status = "failed";
596066
+ job.completedAt = (/* @__PURE__ */ new Date()).toISOString();
596067
+ if (parsed?.summary) job.summary = parsed.summary;
596068
+ if (parsed?.error) job.error = parsed.error;
596069
+ job.adoptedAs = "orphaned";
596070
+ } catch {
596071
+ job.status = "failed";
596072
+ job.completedAt = (/* @__PURE__ */ new Date()).toISOString();
596073
+ job.adoptedAs = "orphaned";
596074
+ }
596075
+ try {
596076
+ fs7.writeFileSync(jobFile, JSON.stringify(job, null, 2), "utf-8");
596077
+ } catch {
596078
+ }
596079
+ }
596080
+ continue;
596081
+ }
596082
+ adopted.push(r2.jobId);
596083
+ const adoptedShim = {
596084
+ pid: r2.pid,
596085
+ // Stub stdio (we never read from it again — the child writes directly
596086
+ // to the on-disk output file which /v1/runs/{id}/output already reads).
596087
+ stdout: { on: () => {
596088
+ } },
596089
+ stderr: { on: () => {
596090
+ } },
596091
+ __adopted: true
596092
+ };
596093
+ runningProcesses.set(r2.jobId, adoptedShim);
596094
+ if (job) {
596095
+ job.adoptedAs = "live";
596096
+ job.adoptedAt = (/* @__PURE__ */ new Date()).toISOString();
596097
+ try {
596098
+ fs7.writeFileSync(jobFile, JSON.stringify(job, null, 2), "utf-8");
596099
+ } catch {
596100
+ }
596101
+ }
596102
+ const pollHandle = setInterval(() => {
596103
+ let stillAlive = false;
596104
+ try {
596105
+ process.kill(r2.pid, 0);
596106
+ stillAlive = true;
596107
+ } catch {
596108
+ stillAlive = false;
596109
+ }
596110
+ if (stillAlive) return;
596111
+ clearInterval(pollHandle);
596112
+ runningProcesses.delete(r2.jobId);
596113
+ try {
596114
+ const jf = jobFile;
596115
+ if (!fs7.existsSync(jf)) return;
596116
+ const j = JSON.parse(fs7.readFileSync(jf, "utf-8"));
596117
+ if (j.status !== "running") return;
596118
+ let finalJson = "";
596119
+ try {
596120
+ if (r2.outputFile && fs7.existsSync(r2.outputFile)) {
596121
+ const raw = fs7.readFileSync(r2.outputFile, "utf-8");
596122
+ const lines = raw.trim().split("\n");
596123
+ for (let i2 = lines.length - 1; i2 >= 0; i2--) {
596124
+ if (lines[i2].trimEnd() === "{" || lines[i2].startsWith("{") && !lines[i2].startsWith('{"type":"tool_call"')) {
596125
+ finalJson = lines.slice(i2).join("\n");
596126
+ break;
596127
+ }
596128
+ }
596129
+ }
596130
+ } catch {
596131
+ }
596132
+ try {
596133
+ const parsed = finalJson ? JSON.parse(finalJson) : null;
596134
+ if (parsed && parsed.status) j.status = parsed.status;
596135
+ else j.status = "completed";
596136
+ j.completedAt = (/* @__PURE__ */ new Date()).toISOString();
596137
+ if (parsed?.summary) j.summary = parsed.summary;
596138
+ if (parsed?.error) j.error = parsed.error;
596139
+ fs7.writeFileSync(jf, JSON.stringify(j, null, 2), "utf-8");
596140
+ } catch {
596141
+ }
596142
+ } catch {
596143
+ }
596144
+ }, 3e3);
596145
+ pollHandle.unref();
596146
+ }
596147
+ process.stderr.write(` Adopted ${adopted.length} live run(s) from previous daemon, ${orphaned.length} orphaned.
596148
+ `);
596149
+ if (adopted.length > 0) process.stderr.write(` live: ${adopted.join(", ")}
596150
+ `);
596151
+ if (orphaned.length > 0) process.stderr.write(` orphaned: ${orphaned.join(", ")}
596152
+ `);
596153
+ }
595855
596154
  function incrementActiveJobs(user) {
595856
596155
  if (!user) return;
595857
596156
  getKeyUsage(user).activeJobs++;
@@ -597730,6 +598029,43 @@ async function handleV1Run(req2, res) {
597730
598029
  });
597731
598030
  } catch {
597732
598031
  }
598032
+ let _rca1DeadlineFired = false;
598033
+ const _rca1KillSwitch = process.env["OA_DISABLE_DAEMON_TIMEOUT_KILL"] === "1";
598034
+ const _rca1EffectiveTimeoutS = timeout2 && timeout2 > 0 ? timeout2 : activeProfile?.limits?.timeout_s && activeProfile.limits.timeout_s > 0 ? activeProfile.limits.timeout_s : 1800;
598035
+ const _rca1Deadline = _rca1KillSwitch ? null : setTimeout(() => {
598036
+ if (job.status !== "running") return;
598037
+ _rca1DeadlineFired = true;
598038
+ job.status = "timeout";
598039
+ job.completedAt = (/* @__PURE__ */ new Date()).toISOString();
598040
+ atomicJobWrite(dir, id, job);
598041
+ try {
598042
+ if (child.pid) process.kill(-child.pid, "SIGTERM");
598043
+ } catch {
598044
+ }
598045
+ try {
598046
+ if (child.pid) process.kill(child.pid, "SIGTERM");
598047
+ } catch {
598048
+ }
598049
+ setTimeout(() => {
598050
+ try {
598051
+ if (child.pid) process.kill(-child.pid, "SIGKILL");
598052
+ } catch {
598053
+ }
598054
+ try {
598055
+ if (child.pid) process.kill(child.pid, "SIGKILL");
598056
+ } catch {
598057
+ }
598058
+ }, 3e4).unref();
598059
+ try {
598060
+ publishEvent(
598061
+ "run.timeout",
598062
+ { run_id: id, deadline_s: _rca1EffectiveTimeoutS },
598063
+ { subject: authUser, aimsControl: "A.6.2.6" }
598064
+ );
598065
+ } catch {
598066
+ }
598067
+ }, (_rca1EffectiveTimeoutS + 60) * 1e3);
598068
+ if (_rca1Deadline) _rca1Deadline.unref();
597733
598069
  if (streamMode) {
597734
598070
  res.writeHead(200, {
597735
598071
  "Content-Type": "text/event-stream",
@@ -597752,7 +598088,9 @@ async function handleV1Run(req2, res) {
597752
598088
  `);
597753
598089
  });
597754
598090
  onChildExit(child, (code8) => {
597755
- job.status = code8 === 0 ? "completed" : "failed";
598091
+ if (_rca1Deadline) clearTimeout(_rca1Deadline);
598092
+ if (_rca1DeadlineFired) {
598093
+ } else job.status = code8 === 0 ? "completed" : "failed";
597756
598094
  job.completedAt = (/* @__PURE__ */ new Date()).toISOString();
597757
598095
  atomicJobWrite(dir, id, job);
597758
598096
  runningProcesses.delete(id);
@@ -597800,6 +598138,7 @@ async function handleV1Run(req2, res) {
597800
598138
  }
597801
598139
  });
597802
598140
  onChildExit(child, (code8) => {
598141
+ if (_rca1Deadline) clearTimeout(_rca1Deadline);
597803
598142
  try {
597804
598143
  const lines = output.trim().split("\n");
597805
598144
  let finalJson = "";
@@ -597810,14 +598149,18 @@ async function handleV1Run(req2, res) {
597810
598149
  }
597811
598150
  }
597812
598151
  const result = finalJson ? JSON.parse(finalJson) : { status: code8 === 0 ? "completed" : "failed" };
597813
- job.status = result["status"] === "completed" ? "completed" : code8 === 0 ? "completed" : "failed";
598152
+ if (!_rca1DeadlineFired) {
598153
+ job.status = result["status"] === "completed" ? "completed" : code8 === 0 ? "completed" : "failed";
598154
+ }
597814
598155
  job.completedAt = (/* @__PURE__ */ new Date()).toISOString();
597815
598156
  job.summary = result["summary"];
597816
598157
  job.output = result["assistant_text"] || result["text"] || result["summary"];
597817
598158
  job.durationMs = result["durationMs"];
597818
598159
  job.error = result["error"];
597819
598160
  } catch {
597820
- job.status = code8 === 0 ? "completed" : "failed";
598161
+ if (!_rca1DeadlineFired) {
598162
+ job.status = code8 === 0 ? "completed" : "failed";
598163
+ }
597821
598164
  job.completedAt = (/* @__PURE__ */ new Date()).toISOString();
597822
598165
  }
597823
598166
  atomicJobWrite(dir, id, job);
@@ -601424,6 +601767,12 @@ function startApiServer(options2 = {}) {
601424
601767
  `);
601425
601768
  log22(` Primary: ${config.backendUrl} (${config.backendType || "ollama"})
601426
601769
  `);
601770
+ try {
601771
+ adoptHandoffRuns();
601772
+ } catch (e2) {
601773
+ log22(` WARN: handoff adoption crashed: ${e2.message}
601774
+ `);
601775
+ }
601427
601776
  const _retCheck = process.env["OA_RUN_RETENTION_H"];
601428
601777
  const _retOff = _retCheck === "0";
601429
601778
  if (!_retOff) {
@@ -601470,8 +601819,96 @@ function startApiServer(options2 = {}) {
601470
601819
  }).catch(() => {
601471
601820
  });
601472
601821
  });
601473
- const shutdown = () => {
601474
- log22("\n Shutting down API server...\n");
601822
+ const shutdown = (signal) => {
601823
+ const hardKill = signal === "SIGINT" || process.env["OA_DAEMON_HARD_SHUTDOWN"] === "1";
601824
+ log22(`
601825
+ Shutting down API server (signal=${signal}, mode=${hardKill ? "hard-kill" : "graceful-handoff"}) ...
601826
+ `);
601827
+ if (!hardKill && runningProcesses.size > 0) {
601828
+ try {
601829
+ const handoffPath2 = (() => {
601830
+ try {
601831
+ const home = require3("node:os").homedir();
601832
+ return require3("node:path").join(home, ".open-agents", "runs-handoff.json");
601833
+ } catch {
601834
+ return "";
601835
+ }
601836
+ })();
601837
+ if (handoffPath2) {
601838
+ const handoff = [];
601839
+ for (const [id, child] of runningProcesses) {
601840
+ const pid = child.pid;
601841
+ if (!pid || pid <= 0) continue;
601842
+ let outputFile = null;
601843
+ let startedAt2 = null;
601844
+ let user = null;
601845
+ let scope = null;
601846
+ let cwd5 = null;
601847
+ try {
601848
+ const fs7 = require3("node:fs");
601849
+ const path8 = require3("node:path");
601850
+ const home = require3("node:os").homedir();
601851
+ const jobsDir3 = path8.join(home, ".open-agents", "jobs");
601852
+ const jobFile = path8.join(jobsDir3, `${id}.json`);
601853
+ if (fs7.existsSync(jobFile)) {
601854
+ const j = JSON.parse(fs7.readFileSync(jobFile, "utf-8"));
601855
+ outputFile = j.outputFile ?? path8.join(jobsDir3, `${id}.output`);
601856
+ startedAt2 = j.startedAt ?? null;
601857
+ user = j.user ?? null;
601858
+ scope = j.scope ?? null;
601859
+ cwd5 = j.cwd ?? null;
601860
+ }
601861
+ } catch {
601862
+ }
601863
+ handoff.push({ jobId: id, pid, outputFile, startedAt: startedAt2, user, scope, cwd: cwd5, handoffAt: (/* @__PURE__ */ new Date()).toISOString() });
601864
+ }
601865
+ if (handoff.length > 0) {
601866
+ try {
601867
+ const fs7 = require3("node:fs");
601868
+ const path8 = require3("node:path");
601869
+ const home = require3("node:os").homedir();
601870
+ const dir = path8.join(home, ".open-agents");
601871
+ fs7.mkdirSync(dir, { recursive: true });
601872
+ fs7.writeFileSync(handoffPath2, JSON.stringify({
601873
+ writtenAt: (/* @__PURE__ */ new Date()).toISOString(),
601874
+ fromPid: process.pid,
601875
+ runs: handoff
601876
+ }, null, 2), "utf-8");
601877
+ log22(` Wrote handoff for ${handoff.length} in-flight run(s) → ${handoffPath2}
601878
+ `);
601879
+ log22(` Children will continue running; next daemon will adopt them.
601880
+ `);
601881
+ } catch (e2) {
601882
+ log22(` WARN: failed to write handoff (${e2.message}) — falling back to hard-kill.
601883
+ `);
601884
+ for (const [id, child] of runningProcesses) {
601885
+ const pid = child.pid;
601886
+ if (pid && pid > 0) {
601887
+ try {
601888
+ process.kill(-pid, "SIGTERM");
601889
+ } catch {
601890
+ }
601891
+ try {
601892
+ process.kill(pid, "SIGTERM");
601893
+ } catch {
601894
+ }
601895
+ }
601896
+ runningProcesses.delete(id);
601897
+ }
601898
+ }
601899
+ }
601900
+ }
601901
+ } catch (e2) {
601902
+ log22(` WARN: handoff block crashed (${e2.message}) — proceeding to server.close()
601903
+ `);
601904
+ }
601905
+ server2.close(() => {
601906
+ log22(" Server stopped (children preserved for adoption).\n");
601907
+ process.exit(0);
601908
+ });
601909
+ setTimeout(() => process.exit(0), 8e3).unref();
601910
+ return;
601911
+ }
601475
601912
  for (const [id, child] of runningProcesses) {
601476
601913
  const pid = child.pid;
601477
601914
  if (pid && pid > 0) {
@@ -601507,8 +601944,8 @@ function startApiServer(options2 = {}) {
601507
601944
  });
601508
601945
  setTimeout(() => process.exit(1), 5e3).unref();
601509
601946
  };
601510
- process.on("SIGINT", shutdown);
601511
- process.on("SIGTERM", shutdown);
601947
+ process.on("SIGINT", () => shutdown("SIGINT"));
601948
+ process.on("SIGTERM", () => shutdown("SIGTERM"));
601512
601949
  return server2;
601513
601950
  }
601514
601951
  async function handleVisionEmbed(req2, res) {
@@ -145,6 +145,105 @@ function effectiveUser() {
145
145
  return process.env.USER || process.env.LOGNAME || os.userInfo().username;
146
146
  }
147
147
 
148
+ // ─── Force-kill port holder (regardless of how it was launched) ────────────
149
+ //
150
+ // Critical for upgrade correctness: when an OLD daemon (started before this
151
+ // install) is running, it holds port 11435 with stale in-memory code. The
152
+ // systemctl/launchctl `restart` calls below CANNOT reach it because the
153
+ // service manager doesn't own that process — it was started detached by
154
+ // `oa serve --daemon` from a previous TUI session.
155
+ //
156
+ // Result without this kill: postinstall tries to start a NEW systemd
157
+ // daemon, port-bind fails, old daemon stays alive serving stale code,
158
+ // and the user's runs continue with broken patches.
159
+ //
160
+ // Strategy (matches packages/cli/src/daemon.ts:forceKillDaemon):
161
+ // 1. SIGTERM via known PID files (~/.open-agents/daemon.pid + .oa/nexus/daemon.pid)
162
+ // 2. lsof / fuser port probe — find ANY other holder
163
+ // 3. 2s graceful grace, then SIGKILL stragglers
164
+ // 4. Poll /health up to 5s for confirmation
165
+ //
166
+ // Skips when OA_DISABLE_FORCE_KILL_DAEMON=1 (escape valve).
167
+ function forceKillPortHolder(port, cb) {
168
+ if (process.env.OA_DISABLE_FORCE_KILL_DAEMON === "1") {
169
+ log("OA_DISABLE_FORCE_KILL_DAEMON=1 — skipping port-holder kill (upgrades may not pick up new code).");
170
+ return cb(0);
171
+ }
172
+
173
+ var killed = 0;
174
+
175
+ // Step 1: SIGTERM via PID files (graceful)
176
+ var pidFiles = [
177
+ path.join(HOME, ".open-agents", "daemon.pid"),
178
+ path.join(process.cwd(), ".oa", "nexus", "daemon.pid"),
179
+ ];
180
+ pidFiles.forEach(function (pidFile) {
181
+ try {
182
+ if (!fs.existsSync(pidFile)) return;
183
+ var n = parseInt(fs.readFileSync(pidFile, "utf8").trim(), 10);
184
+ if (!n || n <= 0) return;
185
+ try { process.kill(n, "SIGTERM"); killed++; log("SIGTERM old daemon (pid " + n + ", from " + pidFile + ")"); } catch (e) { /* dead */ }
186
+ } catch (e) { /* */ }
187
+ });
188
+
189
+ // Step 2: port probe — lsof/fuser to find ANY pid holding the port
190
+ try {
191
+ var out = "";
192
+ try {
193
+ out = cp.execSync("lsof -ti :" + port + " 2>/dev/null || fuser " + port + "/tcp 2>/dev/null || true", {
194
+ encoding: "utf8", timeout: 3000,
195
+ }).trim();
196
+ } catch (e) { /* tools unavailable */ }
197
+ if (out) {
198
+ var pids = out.split(/[\s\n]+/).map(function (s) { return parseInt(s, 10); }).filter(function (n) {
199
+ return Number.isFinite(n) && n > 0 && n !== process.pid;
200
+ });
201
+ pids.forEach(function (otherPid) {
202
+ try { process.kill(otherPid, "SIGTERM"); killed++; log("SIGTERM port-holder (pid " + otherPid + ")"); } catch (e) { /* */ }
203
+ });
204
+ }
205
+ } catch (e) { /* */ }
206
+
207
+ if (killed === 0) {
208
+ // Nothing to kill — port was already free.
209
+ return cb(0);
210
+ }
211
+
212
+ // Step 3: 2s grace, then SIGKILL stragglers
213
+ setTimeout(function () {
214
+ try {
215
+ var out = cp.execSync("lsof -ti :" + port + " 2>/dev/null || fuser " + port + "/tcp 2>/dev/null || true", {
216
+ encoding: "utf8", timeout: 3000,
217
+ }).trim();
218
+ if (out) {
219
+ var pids = out.split(/[\s\n]+/).map(function (s) { return parseInt(s, 10); }).filter(function (n) {
220
+ return Number.isFinite(n) && n > 0 && n !== process.pid;
221
+ });
222
+ pids.forEach(function (otherPid) {
223
+ try { process.kill(otherPid, "SIGKILL"); log("SIGKILL straggler (pid " + otherPid + ")"); } catch (e) { /* */ }
224
+ });
225
+ }
226
+ } catch (e) { /* */ }
227
+
228
+ // Step 4: poll until port is free (up to 5s) — required so the next
229
+ // service-manager restart binds without conflict.
230
+ var pollStart = Date.now();
231
+ function pollFree() {
232
+ if (Date.now() - pollStart > 5000) return cb(killed);
233
+ tryHealth(port, function (alive) {
234
+ if (!alive) return cb(killed);
235
+ setTimeout(pollFree, 200);
236
+ });
237
+ }
238
+ pollFree();
239
+
240
+ // Clean up PID files now that the processes are dead
241
+ pidFiles.forEach(function (pidFile) {
242
+ try { if (fs.existsSync(pidFile)) fs.unlinkSync(pidFile); } catch (e) { /* */ }
243
+ });
244
+ }, 2000);
245
+ }
246
+
148
247
  // ─── Nexus cleanup (preserve prior postinstall behaviour) ──────────────────
149
248
 
150
249
  function cleanNexus() {
@@ -572,7 +671,25 @@ function main() {
572
671
  return safeExit(0);
573
672
  }
574
673
 
674
+ // Force-kill any process holding the daemon port BEFORE we try to install
675
+ // a service. This handles the orphan-detached-daemon case (started by
676
+ // `oa serve --daemon` from a previous TUI session) — systemctl/launchctl
677
+ // restart can't reach it, so we have to clean up explicitly. Without this,
678
+ // the new service-managed daemon fails to bind port 11435 and the user
679
+ // ends up running stale in-memory code from the previous version.
680
+ forceKillPortHolder(PORT, function (killedCount) {
681
+ if (killedCount > 0) {
682
+ log("Killed " + killedCount + " stale daemon process(es) holding port " + PORT + ".");
683
+ }
684
+ runMainAfterKill();
685
+ });
686
+ }
687
+
688
+ function runMainAfterKill() {
575
689
  // Fast path: daemon already answering /health on the target port.
690
+ // After forceKillPortHolder, this should be false unless a service manager
691
+ // already auto-restarted (rare race). We still check so the rest of the
692
+ // flow stays idempotent.
576
693
  tryHealth(PORT, function (alreadyUp) {
577
694
  var user = effectiveUser();
578
695
  var nodeBin = resolveNodeBinary();
@@ -584,10 +701,10 @@ function main() {
584
701
  }
585
702
 
586
703
  if (alreadyUp) {
587
- log("OA daemon already responding on port " + PORT + " — reinstalling service definition so it picks up the new build.");
588
- // Fall through to (re)install service files; the serve command's
589
- // daemon-mode /health pre-check will no-op if the existing daemon
590
- // is still healthy.
704
+ log("OA daemon answering /health on port " + PORT + " — re-checking version drift before service (re)install.");
705
+ // Note: we already force-killed any stale process anything answering
706
+ // now is a fresh service-managed daemon. Service (re)install below is
707
+ // idempotent and ensures the unit file matches the current bundle.
591
708
  }
592
709
 
593
710
  log("Installing OA API daemon service for port " + PORT + " ...");
@@ -1,12 +1,12 @@
1
1
  {
2
2
  "name": "open-agents-ai",
3
- "version": "0.187.514",
3
+ "version": "0.187.516",
4
4
  "lockfileVersion": 3,
5
5
  "requires": true,
6
6
  "packages": {
7
7
  "": {
8
8
  "name": "open-agents-ai",
9
- "version": "0.187.514",
9
+ "version": "0.187.516",
10
10
  "hasInstallScript": true,
11
11
  "license": "CC-BY-NC-4.0",
12
12
  "dependencies": {
@@ -1570,20 +1570,29 @@
1570
1570
  "node": ">=8.0.0"
1571
1571
  }
1572
1572
  },
1573
+ "node_modules/@peculiar/utils": {
1574
+ "version": "2.0.2",
1575
+ "resolved": "https://registry.npmjs.org/@peculiar/utils/-/utils-2.0.2.tgz",
1576
+ "integrity": "sha512-lHhrK/1QAXGn0GUYkme7t4zo0mQ5QIp+/8YED6pzu8AQFdjA9bAXeNURAHk4sw7n9i89MMNQVom0LkuuLUZMog==",
1577
+ "license": "MIT",
1578
+ "dependencies": {
1579
+ "tslib": "^2.8.1"
1580
+ }
1581
+ },
1573
1582
  "node_modules/@peculiar/webcrypto": {
1574
- "version": "1.5.0",
1575
- "resolved": "https://registry.npmjs.org/@peculiar/webcrypto/-/webcrypto-1.5.0.tgz",
1576
- "integrity": "sha512-BRs5XUAwiyCDQMsVA9IDvDa7UBR9gAvPHgugOeGng3YN6vJ9JYonyDc0lNczErgtCWtucjR5N7VtaonboD/ezg==",
1583
+ "version": "1.7.0",
1584
+ "resolved": "https://registry.npmjs.org/@peculiar/webcrypto/-/webcrypto-1.7.0.tgz",
1585
+ "integrity": "sha512-04vwLd8VKfvsq3CsW5Xix6Xs9wAMK/xci/TO2T/VFLRGlYa06RGiUS9eClwnAx3z5XIjf0EdmHlmGcNzlBJLmA==",
1577
1586
  "license": "MIT",
1578
1587
  "dependencies": {
1579
- "@peculiar/asn1-schema": "^2.3.8",
1588
+ "@peculiar/asn1-schema": "^2.6.0",
1580
1589
  "@peculiar/json-schema": "^1.1.12",
1581
- "pvtsutils": "^1.3.5",
1582
- "tslib": "^2.6.2",
1583
- "webcrypto-core": "^1.8.0"
1590
+ "@peculiar/utils": "^2.0.1",
1591
+ "tslib": "^2.8.1",
1592
+ "webcrypto-core": "^1.9.0"
1584
1593
  },
1585
1594
  "engines": {
1586
- "node": ">=10.12.0"
1595
+ "node": ">=14.18.0"
1587
1596
  }
1588
1597
  },
1589
1598
  "node_modules/@peculiar/x509": {
@@ -2072,9 +2081,9 @@
2072
2081
  }
2073
2082
  },
2074
2083
  "node_modules/b4a": {
2075
- "version": "1.8.0",
2076
- "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.8.0.tgz",
2077
- "integrity": "sha512-qRuSmNSkGQaHwNbM7J78Wwy+ghLEYF1zNrSeMxj4Kgw6y33O3mXcQ6Ie9fRvfU/YnxWkOchPXbaLb73TkIsfdg==",
2084
+ "version": "1.8.1",
2085
+ "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.8.1.tgz",
2086
+ "integrity": "sha512-aiqre1Nr0B/6DgE2N5vwTc+2/oQZ4Wh1t4NznYY4E00y8LCt6NqdRv81so00oo27D8MVKTpUa/MwUUtBLXCoDw==",
2078
2087
  "license": "Apache-2.0",
2079
2088
  "optional": true,
2080
2089
  "peerDependencies": {
@@ -3629,9 +3638,9 @@
3629
3638
  }
3630
3639
  },
3631
3640
  "node_modules/hono": {
3632
- "version": "4.12.15",
3633
- "resolved": "https://registry.npmjs.org/hono/-/hono-4.12.15.tgz",
3634
- "integrity": "sha512-qM0jDhFEaCBb4TxoW7f53Qrpv9RBiayUHo0S52JudprkhvpjIrGoU1mnnr29Fvd1U335ZFPZQY1wlkqgfGXyLg==",
3641
+ "version": "4.12.16",
3642
+ "resolved": "https://registry.npmjs.org/hono/-/hono-4.12.16.tgz",
3643
+ "integrity": "sha512-jN0ZewiNAWSe5khM3EyCmBb250+b40wWbwNILNfEvq84VREWwOIkuUsFONk/3i3nqkz7Oe1PcpM2mwQEK2L9Kg==",
3635
3644
  "license": "MIT",
3636
3645
  "engines": {
3637
3646
  "node": ">=16.9.0"
@@ -4780,9 +4789,9 @@
4780
4789
  "license": "Apache-2.0 OR MIT"
4781
4790
  },
4782
4791
  "node_modules/nanoid": {
4783
- "version": "5.1.9",
4784
- "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.9.tgz",
4785
- "integrity": "sha512-ZUvP7KeBLe3OZ1ypw6dI/TzYJuvHP77IM4Ry73waSQTLn8/g8rpdjfyVAh7t1/+FjBtG4lCP42MEbDxOsRpBMw==",
4792
+ "version": "5.1.11",
4793
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.11.tgz",
4794
+ "integrity": "sha512-v+KEsUv2ps74PaSKv0gHTxTCgMXOIfBEbaqa6w6ISIGC7ZsvHN4N9oJ8d4cmf0n5oTzQz2SLmThbQWhjd/8eKg==",
4786
4795
  "funding": [
4787
4796
  {
4788
4797
  "type": "github",
@@ -6065,9 +6074,9 @@
6065
6074
  }
6066
6075
  },
6067
6076
  "node_modules/sharp/node_modules/tar-stream": {
6068
- "version": "3.1.8",
6069
- "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.8.tgz",
6070
- "integrity": "sha512-U6QpVRyCGHva435KoNWy9PRoi2IFYCgtEhq9nmrPPpbRacPs9IH4aJ3gbrFC8dPcXvdSZ4XXfXT5Fshbp2MtlQ==",
6077
+ "version": "3.2.0",
6078
+ "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.2.0.tgz",
6079
+ "integrity": "sha512-ojzvCvVaNp6aOTFmG7jaRD0meowIAuPc3cMMhSgKiVWws1GyHbGd/xvnyuRKcKlMpt3qvxx6r0hreCNITP9hIg==",
6071
6080
  "license": "MIT",
6072
6081
  "optional": true,
6073
6082
  "dependencies": {
@@ -6818,16 +6827,16 @@
6818
6827
  }
6819
6828
  },
6820
6829
  "node_modules/webcrypto-core": {
6821
- "version": "1.8.1",
6822
- "resolved": "https://registry.npmjs.org/webcrypto-core/-/webcrypto-core-1.8.1.tgz",
6823
- "integrity": "sha512-P+x1MvlNCXlKbLSOY4cYrdreqPG5hbzkmawbcXLKN/mf6DZW0SdNNkZ+sjwsqVkI4A4Ko2sPZmkZtCKY58w83A==",
6830
+ "version": "1.9.0",
6831
+ "resolved": "https://registry.npmjs.org/webcrypto-core/-/webcrypto-core-1.9.0.tgz",
6832
+ "integrity": "sha512-ULMJZdRaACcQym3tuBEVcFHRfIbO2sbgBd2eIzo+KYk5eRk9wNxB8td9mX3mmAytLrtE4MRfYSwE1irSw1iGAg==",
6824
6833
  "license": "MIT",
6825
6834
  "dependencies": {
6826
- "@peculiar/asn1-schema": "^2.3.13",
6835
+ "@peculiar/asn1-schema": "^2.6.0",
6827
6836
  "@peculiar/json-schema": "^1.1.12",
6828
- "asn1js": "^3.0.5",
6829
- "pvtsutils": "^1.3.5",
6830
- "tslib": "^2.7.0"
6837
+ "@peculiar/utils": "^2.0.1",
6838
+ "asn1js": "^3.0.10",
6839
+ "tslib": "^2.8.1"
6831
6840
  }
6832
6841
  },
6833
6842
  "node_modules/webidl-conversions": {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "open-agents-ai",
3
- "version": "0.187.514",
3
+ "version": "0.187.516",
4
4
  "description": "AI coding agent powered by open-source models (Ollama/vLLM) — interactive TUI with agentic tool-calling loop",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",