@pushpalsdev/cli 1.0.76 → 1.0.78

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2273,6 +2273,26 @@ function writeTextFileIfMissing(pathValue, text) {
2273
2273
  mkdirSync(dirname(pathValue), { recursive: true });
2274
2274
  writeFileSync(pathValue, text, "utf8");
2275
2275
  }
2276
+ function escapeRegExp(value) {
2277
+ return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
2278
+ }
2279
+ function migrateEmbeddedRuntimeTomlSection(text, sectionName, transform) {
2280
+ const sectionPattern = new RegExp(`(^\\[${escapeRegExp(sectionName)}\\]\\r?\\n)([\\s\\S]*?)(?=\\r?\\n\\[|(?![\\s\\S]))`, "m");
2281
+ return text.replace(sectionPattern, (_match, header, body) => {
2282
+ return `${header}${transform(body)}`;
2283
+ });
2284
+ }
2285
+ function migrateLegacyOpenAICodexDefaults(sectionBody, opts) {
2286
+ const backendIsOpenAICodex = /^\s*backend\s*=\s*"openai_codex"\s*$/m.test(sectionBody);
2287
+ if (!backendIsOpenAICodex && opts.includeModel)
2288
+ return sectionBody;
2289
+ let updated = sectionBody;
2290
+ if (opts.includeModel) {
2291
+ updated = updated.replace(/^(\s*model\s*=\s*)"gpt-5\.4"\s*$/m, '$1"gpt-5.5"');
2292
+ }
2293
+ updated = updated.replace(/^(\s*reasoning_effort\s*=\s*)"high"\s*$/m, '$1"xhigh"');
2294
+ return updated;
2295
+ }
2276
2296
  function migrateEmbeddedRuntimeLocalToml(localTomlPath) {
2277
2297
  if (!existsSync5(localTomlPath))
2278
2298
  return;
@@ -2284,8 +2304,13 @@ function migrateEmbeddedRuntimeLocalToml(localTomlPath) {
2284
2304
  }
2285
2305
  const updated = original.replace(/^(\[remotebuddy\.autonomy\]\r?\n)(enabled\s*=\s*false\s*\r?\n)/m, `$1enabled = true
2286
2306
  `);
2287
- if (updated !== original) {
2288
- writeFileSync(localTomlPath, updated, "utf8");
2307
+ let migrated = updated;
2308
+ for (const sectionName of ["localbuddy.llm", "remotebuddy.llm", "workerpals.llm"]) {
2309
+ migrated = migrateEmbeddedRuntimeTomlSection(migrated, sectionName, (sectionBody) => migrateLegacyOpenAICodexDefaults(sectionBody, { includeModel: true }));
2310
+ }
2311
+ migrated = migrateEmbeddedRuntimeTomlSection(migrated, "workerpals.openai_codex", (sectionBody) => migrateLegacyOpenAICodexDefaults(sectionBody, { includeModel: false }));
2312
+ if (migrated !== original) {
2313
+ writeFileSync(localTomlPath, migrated, "utf8");
2289
2314
  }
2290
2315
  }
2291
2316
  function copyRuntimeAssetBundle(source, runtimeRoot, force) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@pushpalsdev/cli",
3
- "version": "1.0.76",
3
+ "version": "1.0.78",
4
4
  "description": "PushPals terminal CLI for LocalBuddy -> RemoteBuddy orchestration",
5
5
  "license": "MIT",
6
6
  "repository": {
@@ -1736,6 +1736,7 @@ var DEFAULT_OLLAMA_ENDPOINT = "http://127.0.0.1:11434/api/chat";
1736
1736
  var DEFAULT_OPENAI_ENDPOINT = "https://api.openai.com/v1/chat/completions";
1737
1737
  var DEFAULT_MODEL = "local-model";
1738
1738
  var DEFAULT_CODEX_MODEL = "gpt-5.5";
1739
+ var LEGACY_CODEX_MODEL_FALLBACK = "gpt-5.4";
1739
1740
  var DEFAULT_CODEX_REASONING_EFFORT = "xhigh";
1740
1741
  var DEFAULT_CODEX_TIMEOUT_MS = 120000;
1741
1742
  var DEFAULT_LMSTUDIO_CONTEXT_WINDOW = 4096;
@@ -1860,6 +1861,55 @@ function codexReasoningEffort(configured, model) {
1860
1861
  }
1861
1862
  return defaultEffort;
1862
1863
  }
1864
+ function isDefaultCodexLauncher(command) {
1865
+ const normalized = command.map((part) => part.trim().toLowerCase()).filter(Boolean);
1866
+ return normalized.length === 0 || normalized.join("\x00") === ["bun", "x", "--yes", "@openai/codex"].join("\x00") || normalized.join("\x00") === ["bunx", "--yes", "@openai/codex"].join("\x00");
1867
+ }
1868
+ function parseCodexCliVersion(text) {
1869
+ const match = text.match(/(?:codex(?:-cli)?|openai\s+codex)?\s*v?(\d+)\.(\d+)\.(\d+)(?:-([0-9a-z.-]+))?/i);
1870
+ if (!match)
1871
+ return null;
1872
+ return {
1873
+ major: Number.parseInt(match[1], 10),
1874
+ minor: Number.parseInt(match[2], 10),
1875
+ patch: Number.parseInt(match[3], 10),
1876
+ prerelease: match[4] ?? ""
1877
+ };
1878
+ }
1879
+ function compareCodexVersions(a, b) {
1880
+ if (a && !b)
1881
+ return 1;
1882
+ if (!a && b)
1883
+ return -1;
1884
+ if (!a || !b)
1885
+ return 0;
1886
+ for (const key of ["major", "minor", "patch"]) {
1887
+ if (a[key] !== b[key])
1888
+ return a[key] > b[key] ? 1 : -1;
1889
+ }
1890
+ if (a.prerelease === b.prerelease)
1891
+ return 0;
1892
+ if (!a.prerelease)
1893
+ return 1;
1894
+ if (!b.prerelease)
1895
+ return -1;
1896
+ return a.prerelease.localeCompare(b.prerelease);
1897
+ }
1898
+ function chooseCodexCommandProbe(probes, opts) {
1899
+ if (probes.length === 0)
1900
+ return null;
1901
+ if (!opts.preferNewestCompatible)
1902
+ return probes[0];
1903
+ return probes.reduce((best, probe) => compareCodexVersions(probe.version, best.version) > 0 ? probe : best);
1904
+ }
1905
+ function requiresNewerCodexForModel(stdout, stderr) {
1906
+ const combined = `${stdout}
1907
+ ${stderr}`.toLowerCase();
1908
+ return combined.includes("requires a newer version of codex") || combined.includes("requires newer") && combined.includes("codex");
1909
+ }
1910
+ function isDefaultCodexModel(model) {
1911
+ return model.trim().toLowerCase() === DEFAULT_CODEX_MODEL.toLowerCase();
1912
+ }
1863
1913
  function normalizeCodexModel(rawModel) {
1864
1914
  const model = rawModel.trim();
1865
1915
  if (!model)
@@ -1888,6 +1938,63 @@ function normalizeOpenAiBaseFromEndpoint(rawEndpoint) {
1888
1938
  return trimmed;
1889
1939
  }
1890
1940
  async function runProcess(command, opts) {
1941
+ const bunRuntime = globalThis.Bun;
1942
+ if (typeof bunRuntime?.spawn === "function") {
1943
+ return runProcessWithBun(command, opts);
1944
+ }
1945
+ return runProcessWithNode(command, opts);
1946
+ }
1947
+ async function runProcessWithBun(command, opts) {
1948
+ const bunRuntime = globalThis.Bun;
1949
+ const timeoutMs = opts.timeoutMs ?? 0;
1950
+ let timedOut = false;
1951
+ let timeout = null;
1952
+ let killTimeout = null;
1953
+ const proc = bunRuntime.spawn(command, {
1954
+ cwd: opts.cwd,
1955
+ env: opts.env,
1956
+ stdin: "pipe",
1957
+ stdout: "pipe",
1958
+ stderr: "pipe"
1959
+ });
1960
+ const stdoutPromise = new Response(proc.stdout).text();
1961
+ const stderrPromise = new Response(proc.stderr).text();
1962
+ if (timeoutMs > 0) {
1963
+ timeout = setTimeout(() => {
1964
+ timedOut = true;
1965
+ try {
1966
+ proc.kill("SIGTERM");
1967
+ } catch {}
1968
+ killTimeout = setTimeout(() => {
1969
+ try {
1970
+ proc.kill("SIGKILL");
1971
+ } catch {}
1972
+ }, 1000);
1973
+ killTimeout.unref?.();
1974
+ }, timeoutMs);
1975
+ }
1976
+ try {
1977
+ if (typeof opts.stdin === "string") {
1978
+ proc.stdin?.write(opts.stdin);
1979
+ }
1980
+ proc.stdin?.end();
1981
+ const code = await proc.exited;
1982
+ const [stdout, stderr] = await Promise.all([stdoutPromise, stderrPromise]);
1983
+ return {
1984
+ code,
1985
+ signal: null,
1986
+ stdout,
1987
+ stderr,
1988
+ timedOut
1989
+ };
1990
+ } finally {
1991
+ if (timeout)
1992
+ clearTimeout(timeout);
1993
+ if (killTimeout)
1994
+ clearTimeout(killTimeout);
1995
+ }
1996
+ }
1997
+ async function runProcessWithNode(command, opts) {
1891
1998
  const timeoutMs = opts.timeoutMs ?? 0;
1892
1999
  return new Promise((resolve4, reject) => {
1893
2000
  const child = spawn(command[0], command.slice(1), {
@@ -1953,6 +2060,7 @@ async function resolveCodexCommandPrefix(configuredCommand) {
1953
2060
  if (cached)
1954
2061
  return cached;
1955
2062
  const preferred = override.length > 0 ? override : ["bun", "x", "--yes", "@openai/codex"];
2063
+ const preferNewestCompatible = isDefaultCodexLauncher(preferred);
1956
2064
  const candidates = [];
1957
2065
  const pushCandidate = (cmd) => {
1958
2066
  if (cmd.length === 0)
@@ -1976,6 +2084,7 @@ async function resolveCodexCommandPrefix(configuredCommand) {
1976
2084
  const cwd = process.cwd();
1977
2085
  const env = process.env;
1978
2086
  const attemptErrors = [];
2087
+ const successfulProbes = [];
1979
2088
  for (const candidate of candidates) {
1980
2089
  if (candidate.length === 0)
1981
2090
  continue;
@@ -1987,8 +2096,15 @@ async function resolveCodexCommandPrefix(configuredCommand) {
1987
2096
  timeoutMs: 15000
1988
2097
  });
1989
2098
  if (probe.code === 0) {
1990
- cachedCodexCommandPrefix.set(cacheKey, candidate);
1991
- return candidate;
2099
+ const versionText = (probe.stdout || probe.stderr || "").trim().split(/\r?\n/, 1)[0] ?? "";
2100
+ successfulProbes.push({
2101
+ command: candidate,
2102
+ version: parseCodexCliVersion(versionText),
2103
+ versionText
2104
+ });
2105
+ if (!preferNewestCompatible)
2106
+ break;
2107
+ continue;
1992
2108
  }
1993
2109
  const detail = (probe.stderr || probe.stdout || "").trim();
1994
2110
  attemptErrors.push(`${rendered} -> exit ${probe.code ?? "unknown"}${detail ? ` (${detail.split(/\r?\n/, 1)[0]})` : ""}`);
@@ -1996,6 +2112,12 @@ async function resolveCodexCommandPrefix(configuredCommand) {
1996
2112
  attemptErrors.push(`${rendered} -> ${String(err)}`);
1997
2113
  }
1998
2114
  }
2115
+ const selected = chooseCodexCommandProbe(successfulProbes, { preferNewestCompatible });
2116
+ if (selected) {
2117
+ cachedCodexCommandPrefix.set(cacheKey, selected.command);
2118
+ console.log(`[LLM] Resolved Codex CLI command: ${selected.command.join(" ")}${selected.versionText ? ` (${selected.versionText})` : ""}.`);
2119
+ return selected.command;
2120
+ }
1999
2121
  const details = attemptErrors.length > 0 ? ` Tried: ${attemptErrors.join("; ")}` : "";
2000
2122
  throw new Error("OpenAI Codex CLI is unavailable. Install/use Codex CLI (`bun x --yes @openai/codex` or `codex`) and retry." + details);
2001
2123
  }
@@ -2742,7 +2864,7 @@ class OpenAiCodexCliClient {
2742
2864
  service: this.service,
2743
2865
  sessionId: this.sessionTag || undefined,
2744
2866
  backend: "openai_codex",
2745
- modelId: this.model,
2867
+ modelId: usage.modelId ?? this.model,
2746
2868
  promptTokens: usage.promptTokens,
2747
2869
  completionTokens: usage.completionTokens,
2748
2870
  totalTokens: usage.promptTokens + usage.completionTokens,
@@ -2788,6 +2910,13 @@ class OpenAiCodexCliClient {
2788
2910
  }
2789
2911
  }
2790
2912
  async runCodexExec(prompt) {
2913
+ return this.runCodexExecAttempt(prompt, {
2914
+ model: this.model,
2915
+ modelCompatibilityRecoveryAttempt: 0
2916
+ });
2917
+ }
2918
+ async runCodexExecAttempt(prompt, opts) {
2919
+ const model = normalizeCodexModel(opts.model);
2791
2920
  const commandPrefix = await resolveCodexCommandPrefix(this.codexBin);
2792
2921
  const env = { ...process.env };
2793
2922
  env.PYTHONIOENCODING = "utf-8";
@@ -2821,7 +2950,7 @@ class OpenAiCodexCliClient {
2821
2950
  const command = [
2822
2951
  ...commandPrefix,
2823
2952
  "-c",
2824
- `model_reasoning_effort="${codexReasoningEffort(this.reasoningEffort, this.model)}"`,
2953
+ `model_reasoning_effort="${codexReasoningEffort(this.reasoningEffort, model)}"`,
2825
2954
  "-a",
2826
2955
  "never",
2827
2956
  "-s",
@@ -2832,8 +2961,8 @@ class OpenAiCodexCliClient {
2832
2961
  "--output-last-message",
2833
2962
  lastMessagePath
2834
2963
  ];
2835
- if (this.model) {
2836
- command.push("-m", this.model);
2964
+ if (model) {
2965
+ command.push("-m", model);
2837
2966
  }
2838
2967
  command.push("-");
2839
2968
  const result = await runProcess(command, {
@@ -2850,13 +2979,20 @@ class OpenAiCodexCliClient {
2850
2979
  const lastMessage = existsSync3(lastMessagePath) ? readFileSync4(lastMessagePath, "utf8").trim() : "";
2851
2980
  if (result.code !== 0) {
2852
2981
  const detail = stderr || stdout || "codex exec exited with non-zero status";
2982
+ if (opts.modelCompatibilityRecoveryAttempt < 1 && isDefaultCodexModel(model) && LEGACY_CODEX_MODEL_FALLBACK.trim().toLowerCase() !== DEFAULT_CODEX_MODEL.toLowerCase() && requiresNewerCodexForModel(stdout, stderr)) {
2983
+ console.warn(`[LLM] Codex CLI rejected default model ${DEFAULT_CODEX_MODEL}; retrying once with ${LEGACY_CODEX_MODEL_FALLBACK}. Upgrade Codex CLI to use ${DEFAULT_CODEX_MODEL}.`);
2984
+ return this.runCodexExecAttempt(prompt, {
2985
+ model: LEGACY_CODEX_MODEL_FALLBACK,
2986
+ modelCompatibilityRecoveryAttempt: opts.modelCompatibilityRecoveryAttempt + 1
2987
+ });
2988
+ }
2853
2989
  throw new Error(`Codex CLI request failed (exit ${result.code ?? "unknown"}): ${detail}`);
2854
2990
  }
2855
2991
  const text = lastMessage || stdout;
2856
2992
  if (!text) {
2857
2993
  throw new Error("Codex CLI completed without producing a response.");
2858
2994
  }
2859
- return { text, stderr };
2995
+ return { text, stderr, model };
2860
2996
  } finally {
2861
2997
  rmSync(tmp, { recursive: true, force: true });
2862
2998
  }
@@ -2874,7 +3010,7 @@ class OpenAiCodexCliClient {
2874
3010
  promptTokens: estimateTokensFromText(prompt),
2875
3011
  completionTokens: estimateTokensFromText(result.text)
2876
3012
  });
2877
- await this.maybeReportUsage(usage);
3013
+ await this.maybeReportUsage({ ...usage, modelId: result.model });
2878
3014
  return {
2879
3015
  text: result.text,
2880
3016
  usage: {