@pushpalsdev/cli 1.0.65 → 1.0.67

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@pushpalsdev/cli",
3
- "version": "1.0.65",
3
+ "version": "1.0.67",
4
4
  "description": "PushPals terminal CLI for LocalBuddy -> RemoteBuddy orchestration",
5
5
  "license": "MIT",
6
6
  "repository": {
@@ -8,7 +8,7 @@
8
8
 
9
9
  [localbuddy.llm]
10
10
  backend = "openai_codex"
11
- model = "gpt-5.4"
11
+ model = "gpt-5.5"
12
12
  codex_auth_mode = "chatgpt"
13
13
  codex_bin = "bun x --yes @openai/codex"
14
14
  codex_timeout_ms = 120000
@@ -16,7 +16,7 @@ reasoning_effort = "high"
16
16
 
17
17
  [remotebuddy.llm]
18
18
  backend = "openai_codex"
19
- model = "gpt-5.4"
19
+ model = "gpt-5.5"
20
20
  codex_auth_mode = "chatgpt"
21
21
  codex_bin = "bun x --yes @openai/codex"
22
22
  codex_timeout_ms = 120000
@@ -43,7 +43,7 @@ retention_days = 30
43
43
 
44
44
  [workerpals.llm]
45
45
  backend = "openai_codex"
46
- model = "gpt-5.4"
46
+ model = "gpt-5.5"
47
47
  codex_auth_mode = "chatgpt"
48
48
  codex_bin = "bun x --yes @openai/codex"
49
49
  codex_timeout_ms = 120000
@@ -43,7 +43,8 @@ from executor_base import (
43
43
  )
44
44
 
45
45
  LOG_PREFIX = "[OpenAICodexExecutor]"
46
- DEFAULT_CODEX_MODEL = "gpt-5.4"
46
+ DEFAULT_CODEX_MODEL = "gpt-5.5"
47
+ LEGACY_CODEX_MODEL_FALLBACK = "gpt-5.4"
47
48
  _ACTIVE_CHILD: Optional[subprocess.Popen[str]] = None
48
49
  _INTERRUPTED_SIGNAL: Optional[int] = None
49
50
  log = Logger(LOG_PREFIX)
@@ -76,6 +77,10 @@ _CODEX_WORKAROUND_NEGATION_HINTS = (
76
77
  "codex cli is required infrastructure",
77
78
  )
78
79
  _REJECTED_EXEC_COMMAND_PATTERN = re.compile(r"exec_command failed for `([^`]+)`", re.IGNORECASE)
80
+ _MODEL_REQUIRES_NEWER_CODEX_PATTERN = re.compile(
81
+ r"model requires a newer version of codex|requires a newer version of codex|upgrade to the latest app or cli",
82
+ re.IGNORECASE,
83
+ )
79
84
  _DISALLOWED_SHELL_WRAPPER_PREFIXES = (
80
85
  "/bin/bash -lc ",
81
86
  "/bin/bash -c ",
@@ -210,6 +215,19 @@ def shutil_which(binary: str) -> str:
210
215
  return which(binary) or ""
211
216
 
212
217
 
218
+ def _resolve_command_executable(binary: str) -> str:
219
+ value = str(binary or "").strip()
220
+ if not value or os.path.dirname(value) or os.path.isabs(value):
221
+ return value
222
+ return shutil_which(value) or value
223
+
224
+
225
+ def _normalize_command_prefix(parts: List[str]) -> List[str]:
226
+ if not parts:
227
+ return []
228
+ return [_resolve_command_executable(parts[0]), *parts[1:]]
229
+
230
+
213
231
  def _truncate(text: str, max_chars: int = 4000) -> str:
214
232
  value = str(text or "")
215
233
  if len(value) <= max_chars:
@@ -376,7 +394,7 @@ def _resolve_codex_command_prefix(config: OpenAICodexRuntimeConfig) -> List[str]
376
394
  if isinstance(parsed, list):
377
395
  parts = [str(p).strip() for p in parsed if str(p).strip()]
378
396
  if parts:
379
- return parts
397
+ return _normalize_command_prefix(parts)
380
398
  except Exception:
381
399
  log.info(
382
400
  "Invalid PUSHPALS_OPENAI_CODEX_BIN_JSON; expected JSON array of command segments."
@@ -391,13 +409,15 @@ def _resolve_codex_command_prefix(config: OpenAICodexRuntimeConfig) -> List[str]
391
409
  "Invalid PUSHPALS_OPENAI_CODEX_BIN value; expected a command string parseable by shlex."
392
410
  )
393
411
  return []
394
- return parts
412
+ return _normalize_command_prefix(parts)
395
413
 
396
414
  # Prefer bunx to avoid requiring a separate node runtime in the container.
397
- if shutil_which("bunx"):
398
- return ["bunx", "--yes", "@openai/codex"]
399
- if shutil_which("codex"):
400
- return ["codex"]
415
+ bunx = shutil_which("bunx")
416
+ if bunx:
417
+ return [bunx, "--yes", "@openai/codex"]
418
+ codex = shutil_which("codex")
419
+ if codex:
420
+ return [codex]
401
421
  return []
402
422
 
403
423
 
@@ -964,6 +984,10 @@ def _safe_model_for_codex(raw_model: str, base_url: str) -> str:
964
984
  return DEFAULT_CODEX_MODEL
965
985
 
966
986
 
987
+ def _requires_newer_codex_for_model(*texts: str) -> bool:
988
+ return any(_MODEL_REQUIRES_NEWER_CODEX_PATTERN.search(str(text or "")) for text in texts)
989
+
990
+
967
991
  def _build_instruction(instruction: str, supplemental_guidance: List[str]) -> str:
968
992
  system_prompt = (_load_prompt_template(_TASK_SYSTEM_PROMPT_PATH) or "").strip()
969
993
  if not system_prompt:
@@ -1343,6 +1367,8 @@ def _run_codex_task(
1343
1367
  supplemental_guidance: List[str],
1344
1368
  *,
1345
1369
  wrapper_recovery_attempt: int = 0,
1370
+ model_compatibility_recovery_attempt: int = 0,
1371
+ model_override: Optional[str] = None,
1346
1372
  baseline_changes: Optional[List[str]] = None,
1347
1373
  ) -> Dict[str, Any]:
1348
1374
  global _ACTIVE_CHILD, _INTERRUPTED_SIGNAL
@@ -1377,7 +1403,12 @@ def _run_codex_task(
1377
1403
  configured_model, api_key, base_url = resolve_llm_config(DEFAULT_CODEX_MODEL, logger=log)
1378
1404
  auth_mode_raw = runtime_config.auth_mode
1379
1405
  auth_mode_configured = _normalize_auth_mode(auth_mode_raw)
1380
- model = _safe_model_for_codex(configured_model, base_url)
1406
+ model = str(model_override or "").strip() or _safe_model_for_codex(configured_model, base_url)
1407
+ if model_override:
1408
+ log.info(
1409
+ f"Using Codex model compatibility override {model!r} instead of configured/default "
1410
+ f"model {configured_model!r}."
1411
+ )
1381
1412
  approval = _normalize_choice(
1382
1413
  runtime_config.approval_policy,
1383
1414
  _VALID_APPROVAL_POLICIES,
@@ -1743,6 +1774,8 @@ def _run_codex_task(
1743
1774
  recovery_guidance,
1744
1775
  ],
1745
1776
  wrapper_recovery_attempt=wrapper_recovery_attempt + 1,
1777
+ model_compatibility_recovery_attempt=model_compatibility_recovery_attempt,
1778
+ model_override=model_override,
1746
1779
  baseline_changes=baseline_snapshot,
1747
1780
  )
1748
1781
  retry_result["usage"] = _merge_usage_records(usage, retry_result.get("usage"))
@@ -1808,6 +1841,36 @@ def _run_codex_task(
1808
1841
  exit_code = int(return_code)
1809
1842
 
1810
1843
  if exit_code != 0:
1844
+ if (
1845
+ model_compatibility_recovery_attempt < 1
1846
+ and model.strip().lower() == DEFAULT_CODEX_MODEL.lower()
1847
+ and LEGACY_CODEX_MODEL_FALLBACK.strip().lower() != DEFAULT_CODEX_MODEL.lower()
1848
+ and _requires_newer_codex_for_model(stdout, stderr)
1849
+ ):
1850
+ log.warning(
1851
+ f"Codex CLI rejected default model {DEFAULT_CODEX_MODEL}; retrying once with "
1852
+ f"{LEGACY_CODEX_MODEL_FALLBACK}. Upgrade Codex CLI to use {DEFAULT_CODEX_MODEL}."
1853
+ )
1854
+ retry_result = _run_codex_task(
1855
+ repo,
1856
+ instruction,
1857
+ effective_supplemental_guidance,
1858
+ wrapper_recovery_attempt=wrapper_recovery_attempt,
1859
+ model_compatibility_recovery_attempt=model_compatibility_recovery_attempt + 1,
1860
+ model_override=LEGACY_CODEX_MODEL_FALLBACK,
1861
+ baseline_changes=baseline_snapshot,
1862
+ )
1863
+ retry_result["usage"] = _merge_usage_records(usage, retry_result.get("usage"))
1864
+ if retry_result.get("ok"):
1865
+ recovered_stdout = str(retry_result.get("stdout") or "").strip()
1866
+ retry_result["stdout"] = _truncate(
1867
+ (
1868
+ f"Codex CLI rejected default model {DEFAULT_CODEX_MODEL} because it "
1869
+ "requires a newer Codex version; recovered by retrying with "
1870
+ f"{LEGACY_CODEX_MODEL_FALLBACK}.\n\n{recovered_stdout}"
1871
+ ).strip()
1872
+ )
1873
+ return retry_result
1811
1874
  detail = stderr.strip() or stdout.strip() or "codex exec exited with a non-zero status"
1812
1875
  if last_message:
1813
1876
  detail = f"{detail}\nLast assistant message:\n{last_message}"
@@ -34,6 +34,7 @@ from openai_codex_executor import (
34
34
  _extract_usage_counts,
35
35
  _load_prompt_template,
36
36
  _repo_root_for_prompt_loading,
37
+ _resolve_codex_command_prefix,
37
38
  _unwrap_shell_wrapper_command,
38
39
  _usage_from_trace_or_estimate,
39
40
  )
@@ -79,14 +80,52 @@ class OpenAICodexRuntimeConfigTests(unittest.TestCase):
79
80
  self.assertEqual(cfg.reasoning_effort, "high")
80
81
  self.assertFalse(cfg.json_output)
81
82
 
82
- def test_reasoning_effort_caps_extra_high_for_gpt_5_4(self) -> None:
83
+ def test_resolve_codex_command_prefix_resolves_configured_executable(self) -> None:
84
+ cfg = OpenAICodexRuntimeConfig.from_sources(
85
+ SettingsResolver(
86
+ env={"PUSHPALS_OPENAI_CODEX_BIN": "bun x --yes @openai/codex"},
87
+ config_loader=lambda: {},
88
+ ),
89
+ )
90
+ with mock.patch(
91
+ "openai_codex_executor.shutil_which",
92
+ side_effect=lambda binary: {"bun": r"C:\Tools\bun.CMD"}.get(binary, ""),
93
+ ):
94
+ self.assertEqual(
95
+ _resolve_codex_command_prefix(cfg),
96
+ [r"C:\Tools\bun.CMD", "x", "--yes", "@openai/codex"],
97
+ )
98
+
99
+ def test_resolve_codex_command_prefix_resolves_fallback_executable(self) -> None:
100
+ cfg = OpenAICodexRuntimeConfig.from_sources(
101
+ SettingsResolver(env={}, config_loader=lambda: {}),
102
+ )
103
+ with mock.patch(
104
+ "openai_codex_executor.shutil_which",
105
+ side_effect=lambda binary: {"bunx": "/usr/local/bin/bunx" }.get(binary, ""),
106
+ ):
107
+ self.assertEqual(
108
+ _resolve_codex_command_prefix(cfg),
109
+ ["/usr/local/bin/bunx", "--yes", "@openai/codex"],
110
+ )
111
+
112
+ def test_reasoning_effort_caps_extra_high_for_legacy_gpt_5_4(self) -> None:
113
+ cfg = OpenAICodexRuntimeConfig.from_sources(
114
+ SettingsResolver(
115
+ env={"WORKERPALS_OPENAI_CODEX_REASONING_EFFORT": "extra high"},
116
+ config_loader=lambda: {},
117
+ ),
118
+ )
119
+ self.assertEqual(_resolve_reasoning_effort(cfg, model="gpt-5.4"), "high")
120
+
121
+ def test_reasoning_effort_preserves_extra_high_for_default_gpt_5_5(self) -> None:
83
122
  cfg = OpenAICodexRuntimeConfig.from_sources(
84
123
  SettingsResolver(
85
124
  env={"WORKERPALS_OPENAI_CODEX_REASONING_EFFORT": "extra high"},
86
125
  config_loader=lambda: {},
87
126
  ),
88
127
  )
89
- self.assertEqual(_resolve_reasoning_effort(cfg), "high")
128
+ self.assertEqual(_resolve_reasoning_effort(cfg), "xhigh")
90
129
 
91
130
  def test_reasoning_effort_preserves_extra_high_for_future_models(self) -> None:
92
131
  cfg = OpenAICodexRuntimeConfig.from_sources(
@@ -436,6 +475,88 @@ class OpenAICodexRuntimeConfigTests(unittest.TestCase):
436
475
  self.assertIn("strict wrapper recovery", str(result.get("stdout") or "").lower())
437
476
  self.assertIn("backend-supplied direct command bootstrap", str(result.get("stdout") or ""))
438
477
 
478
+ def test_run_codex_task_recovers_when_default_model_requires_newer_codex(self) -> None:
479
+ with tempfile.TemporaryDirectory(prefix="pushpals-codex-model-compat-") as temp_dir:
480
+ repo = Path(temp_dir) / "repo"
481
+ repo.mkdir(parents=True, exist_ok=True)
482
+ (repo / "README.md").write_text("# model compatibility test\n", encoding="utf-8")
483
+ subprocess.run(["git", "init"], cwd=repo, check=True, capture_output=True, text=True)
484
+ subprocess.run(
485
+ ["git", "config", "user.name", "PushPals Test"],
486
+ cwd=repo,
487
+ check=True,
488
+ capture_output=True,
489
+ text=True,
490
+ )
491
+ subprocess.run(
492
+ ["git", "config", "user.email", "pushpals-tests@example.com"],
493
+ cwd=repo,
494
+ check=True,
495
+ capture_output=True,
496
+ text=True,
497
+ )
498
+ subprocess.run(["git", "add", "README.md"], cwd=repo, check=True, capture_output=True, text=True)
499
+ subprocess.run(
500
+ ["git", "commit", "-m", "chore: seed model compatibility repo"],
501
+ cwd=repo,
502
+ check=True,
503
+ capture_output=True,
504
+ text=True,
505
+ )
506
+
507
+ stub_path = Path(temp_dir) / "fake_codex_model_compat.py"
508
+ stub_path.write_text(
509
+ "\n".join(
510
+ [
511
+ "from pathlib import Path",
512
+ "import sys",
513
+ "",
514
+ "argv = sys.argv[1:]",
515
+ "model = ''",
516
+ "last_message_path = None",
517
+ "for index, arg in enumerate(argv):",
518
+ " if arg == '-m' and index + 1 < len(argv):",
519
+ " model = argv[index + 1]",
520
+ " if arg == '--output-last-message' and index + 1 < len(argv):",
521
+ " last_message_path = argv[index + 1]",
522
+ "",
523
+ "if model == 'gpt-5.5':",
524
+ " print(\"ERROR: {'detail': \\\"The 'gpt-5.5' model requires a newer version of Codex. Please upgrade to the latest app or CLI and try again.\\\"}\", file=sys.stderr)",
525
+ " sys.exit(1)",
526
+ "",
527
+ "if model == 'gpt-5.4':",
528
+ " if last_message_path:",
529
+ " Path(last_message_path).write_text('Recovered on legacy model fallback.', encoding='utf-8')",
530
+ " print('item.completed | Used legacy model fallback.', flush=True)",
531
+ " sys.exit(0)",
532
+ "",
533
+ "print(f'unexpected model {model}', file=sys.stderr)",
534
+ "sys.exit(2)",
535
+ ]
536
+ ),
537
+ encoding="utf-8",
538
+ )
539
+
540
+ env_overrides = {
541
+ "PUSHPALS_OPENAI_CODEX_BIN_JSON": json.dumps([sys.executable, str(stub_path)]),
542
+ "PUSHPALS_OPENAI_CODEX_AUTH_MODE": "api_key",
543
+ "OPENAI_API_KEY": "pushpals-model-compat-test-key",
544
+ "WORKERPALS_OPENAI_CODEX_TIMEOUT_S": "10",
545
+ "WORKERPALS_OPENAI_CODEX_PROGRESS_LOG_INTERVAL_S": "1",
546
+ }
547
+ with mock.patch.dict(os.environ, env_overrides, clear=False):
548
+ result = _run_codex_task(
549
+ str(repo),
550
+ "Use the configured Codex model.",
551
+ [],
552
+ )
553
+
554
+ self.assertTrue(result.get("ok"), result)
555
+ stdout = str(result.get("stdout") or "")
556
+ self.assertIn("rejected default model gpt-5.5", stdout.lower())
557
+ self.assertIn("gpt-5.4", stdout)
558
+ self.assertIn("Recovered on legacy model fallback.", stdout)
559
+
439
560
  def test_usage_falls_back_to_estimate_when_trace_has_no_usage(self) -> None:
440
561
  usage = _usage_from_trace_or_estimate({}, "abc" * 30, "done", model="gpt-5.4")
441
562
  self.assertTrue(usage["estimated"])
@@ -8,7 +8,7 @@
8
8
 
9
9
  [localbuddy.llm]
10
10
  backend = "openai_codex"
11
- model = "gpt-5.4"
11
+ model = "gpt-5.5"
12
12
  codex_auth_mode = "chatgpt"
13
13
  codex_bin = "bun x --yes @openai/codex"
14
14
  codex_timeout_ms = 120000
@@ -16,7 +16,7 @@ reasoning_effort = "high"
16
16
 
17
17
  [remotebuddy.llm]
18
18
  backend = "openai_codex"
19
- model = "gpt-5.4"
19
+ model = "gpt-5.5"
20
20
  codex_auth_mode = "chatgpt"
21
21
  codex_bin = "bun x --yes @openai/codex"
22
22
  codex_timeout_ms = 120000
@@ -43,7 +43,7 @@ retention_days = 30
43
43
 
44
44
  [workerpals.llm]
45
45
  backend = "openai_codex"
46
- model = "gpt-5.4"
46
+ model = "gpt-5.5"
47
47
  codex_auth_mode = "chatgpt"
48
48
  codex_bin = "bun x --yes @openai/codex"
49
49
  codex_timeout_ms = 120000