@pushpalsdev/cli 1.0.68 → 1.0.70
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/pushpals-cli.js +31 -3
- package/package.json +1 -1
- package/runtime/configs/default.toml +1 -1
- package/runtime/configs/local.example.toml +4 -4
- package/runtime/sandbox/.pushpals-remotebuddy-fallback.js +19 -7
- package/runtime/sandbox/apps/workerpals/src/backends/openai_codex/openai_codex_executor.py +4 -3
- package/runtime/sandbox/apps/workerpals/src/backends/openai_codex/test_openai_codex_runtime_config.py +7 -1
- package/runtime/sandbox/apps/workerpals/src/execute_job.ts +2 -1
- package/runtime/sandbox/configs/default.toml +1 -1
- package/runtime/sandbox/configs/local.example.toml +4 -4
- package/runtime/sandbox/packages/shared/src/config.ts +14 -6
package/dist/pushpals-cli.js
CHANGED
|
@@ -456,6 +456,8 @@ var DEFAULT_REMOTEBUDDY_MEMORY_MAX_RECALL_ITEMS = 12;
|
|
|
456
456
|
var DEFAULT_REMOTEBUDDY_MEMORY_MAX_RECALL_CHARS = 2400;
|
|
457
457
|
var DEFAULT_REMOTEBUDDY_MEMORY_MAX_SUMMARY_CHARS = 420;
|
|
458
458
|
var DEFAULT_REMOTEBUDDY_MEMORY_RETENTION_DAYS = 30;
|
|
459
|
+
var DEFAULT_OPENAI_CODEX_MODEL = "gpt-5.5";
|
|
460
|
+
var DEFAULT_OPENAI_CODEX_REASONING_EFFORT = "xhigh";
|
|
459
461
|
var cachedConfig = null;
|
|
460
462
|
var cachedConfigKey = "";
|
|
461
463
|
function firstNonEmpty(...values) {
|
|
@@ -486,7 +488,7 @@ function parseIntEnv(name) {
|
|
|
486
488
|
function parseTomlFile(path) {
|
|
487
489
|
if (!existsSync2(path))
|
|
488
490
|
return {};
|
|
489
|
-
const raw = readFileSync(path, "utf-8");
|
|
491
|
+
const raw = readFileSync(path, "utf-8").replace(/^\uFEFF/, "");
|
|
490
492
|
const parsed = Bun.TOML.parse(raw);
|
|
491
493
|
if (!parsed || typeof parsed !== "object" || Array.isArray(parsed))
|
|
492
494
|
return {};
|
|
@@ -651,10 +653,14 @@ function resolveLlmConfig(serviceNode, envPrefix, defaults, globalSessionId) {
|
|
|
651
653
|
const llmNode = getObject(serviceNode, "llm");
|
|
652
654
|
const backend = normalizeBackend(firstNonEmpty(process.env[`${envPrefix}_LLM_BACKEND`], asString(llmNode.backend, defaults.backend), defaults.backend));
|
|
653
655
|
const endpoint = firstNonEmpty(process.env[`${envPrefix}_LLM_ENDPOINT`], asString(llmNode.endpoint, defaults.endpoint), defaults.endpoint);
|
|
654
|
-
const
|
|
656
|
+
const envModel = firstNonEmpty(process.env[`${envPrefix}_LLM_MODEL`]);
|
|
657
|
+
const configuredFileModel = firstNonEmpty(asString(llmNode.model, ""));
|
|
658
|
+
const configuredModel = firstNonEmpty(envModel, configuredFileModel);
|
|
659
|
+
const modelFallback = backend === "openai_codex" ? DEFAULT_OPENAI_CODEX_MODEL : defaults.model;
|
|
660
|
+
const model = backend === "openai_codex" && !envModel && (!configuredFileModel || configuredFileModel === defaults.model) ? DEFAULT_OPENAI_CODEX_MODEL : firstNonEmpty(configuredModel, modelFallback) ?? modelFallback;
|
|
655
661
|
const sessionId = firstNonEmpty(process.env[`${envPrefix}_LLM_SESSION_ID`], asString(llmNode.session_id, defaults.sessionId), process.env.PUSHPALS_LLM_SESSION_ID, globalSessionId);
|
|
656
662
|
const apiKey = firstNonEmpty(process.env[`${envPrefix}_LLM_API_KEY`], defaultApiKeyForBackend(backend, endpoint));
|
|
657
|
-
const reasoningEffort = firstNonEmpty(process.env[`${envPrefix}_LLM_REASONING_EFFORT`], asString(llmNode.reasoning_effort, ""));
|
|
663
|
+
const reasoningEffort = firstNonEmpty(process.env[`${envPrefix}_LLM_REASONING_EFFORT`], asString(llmNode.reasoning_effort, ""), backend === "openai_codex" ? DEFAULT_OPENAI_CODEX_REASONING_EFFORT : "");
|
|
658
664
|
const codexAuthMode = firstNonEmpty(process.env[`${envPrefix}_LLM_CODEX_AUTH_MODE`], asString(llmNode.codex_auth_mode, ""));
|
|
659
665
|
const codexBin = firstNonEmpty(process.env[`${envPrefix}_LLM_CODEX_BIN`], asString(llmNode.codex_bin, ""));
|
|
660
666
|
const codexTimeoutMs = Math.max(1e4, asInt(parseIntEnv(`${envPrefix}_LLM_CODEX_TIMEOUT_MS`) ?? llmNode.codex_timeout_ms, 120000));
|
|
@@ -2183,6 +2189,8 @@ function parseSemverFromPackageVersion(value) {
|
|
|
2183
2189
|
const raw = String(value ?? "").trim();
|
|
2184
2190
|
if (!raw)
|
|
2185
2191
|
return "";
|
|
2192
|
+
if (raw === "0.0.0-dev")
|
|
2193
|
+
return "";
|
|
2186
2194
|
const match = raw.match(/^\d+\.\d+\.\d+(?:[-.][0-9A-Za-z.-]+)?$/);
|
|
2187
2195
|
return match ? raw : "";
|
|
2188
2196
|
}
|
|
@@ -3053,6 +3061,13 @@ async function cleanupLingeringWorkerpalWarmContainers(opts) {
|
|
|
3053
3061
|
], opts.repoRoot, opts.env, commandTimeoutMs);
|
|
3054
3062
|
if (!list.ok) {
|
|
3055
3063
|
const detail = list.stderr || list.stdout || `exit ${list.exitCode}`;
|
|
3064
|
+
if (isDockerUnavailableDetail(detail)) {
|
|
3065
|
+
return {
|
|
3066
|
+
ok: true,
|
|
3067
|
+
detail: `docker unavailable; skipped WorkerPal warm-container cleanup: ${detail}`,
|
|
3068
|
+
removed: 0
|
|
3069
|
+
};
|
|
3070
|
+
}
|
|
3056
3071
|
return {
|
|
3057
3072
|
ok: false,
|
|
3058
3073
|
detail: `failed to inspect lingering WorkerPal warm containers: ${detail}`,
|
|
@@ -3106,6 +3121,14 @@ async function cleanupLocalWorkerpalSandboxImage(opts) {
|
|
|
3106
3121
|
imageName
|
|
3107
3122
|
};
|
|
3108
3123
|
}
|
|
3124
|
+
if (isDockerUnavailableDetail(detail)) {
|
|
3125
|
+
return {
|
|
3126
|
+
ok: true,
|
|
3127
|
+
detail: `docker unavailable; skipped WorkerPal sandbox image cleanup: ${detail}`,
|
|
3128
|
+
removed: false,
|
|
3129
|
+
imageName
|
|
3130
|
+
};
|
|
3131
|
+
}
|
|
3109
3132
|
return {
|
|
3110
3133
|
ok: false,
|
|
3111
3134
|
detail: `failed to remove local WorkerPal sandbox image ${imageName}: ${detail}`,
|
|
@@ -3205,6 +3228,10 @@ async function cleanupLingeringPushPalsGitWorktrees(opts) {
|
|
|
3205
3228
|
function isMissingDockerImageDetail(detail) {
|
|
3206
3229
|
return /\b(no such object|no such image|not found)\b/i.test(String(detail ?? ""));
|
|
3207
3230
|
}
|
|
3231
|
+
function isDockerUnavailableDetail(detail) {
|
|
3232
|
+
const text = String(detail ?? "");
|
|
3233
|
+
return /cannot connect to (the )?docker daemon/i.test(text) || /docker daemon is not running/i.test(text) || /failed to connect to the docker api/i.test(text) || /docker_engine/i.test(text) || /is the docker daemon running/i.test(text) || /docker(?:\.exe)?: command not found/i.test(text) || /spawn\s+docker(?:\.exe)?\s+ENOENT/i.test(text) || /docker(?:\.exe)?'?\s+is not recognized as an internal or external command/i.test(text);
|
|
3234
|
+
}
|
|
3208
3235
|
async function inspectDockerImageRuntimeTag(dockerExecutable, imageName, cwd, env, timeoutMs = WORKERPAL_IMAGE_INSPECT_TIMEOUT_MS) {
|
|
3209
3236
|
const inspect = await runCommandWithEnv([
|
|
3210
3237
|
dockerExecutable,
|
|
@@ -5413,6 +5440,7 @@ export {
|
|
|
5413
5440
|
normalizeRepoPathForComparison,
|
|
5414
5441
|
normalizeCliInteractiveMessage,
|
|
5415
5442
|
normalizeChildProcessEnv,
|
|
5443
|
+
isDockerUnavailableDetail,
|
|
5416
5444
|
isCliExitCommand,
|
|
5417
5445
|
injectMonitoringHubBootstrap,
|
|
5418
5446
|
formatWorkerExecutionReadinessLines,
|
package/package.json
CHANGED
|
@@ -199,7 +199,7 @@ session_id = "workerpals-dev"
|
|
|
199
199
|
[workerpals.openai_codex]
|
|
200
200
|
timeout_ms = 7200000
|
|
201
201
|
progress_log_interval_s = 30
|
|
202
|
-
reasoning_effort = "
|
|
202
|
+
reasoning_effort = "xhigh"
|
|
203
203
|
approval_policy = "never"
|
|
204
204
|
sandbox = "workspace-write"
|
|
205
205
|
color = "never"
|
|
@@ -12,7 +12,7 @@ model = "gpt-5.5"
|
|
|
12
12
|
codex_auth_mode = "chatgpt"
|
|
13
13
|
codex_bin = "bun x --yes @openai/codex"
|
|
14
14
|
codex_timeout_ms = 120000
|
|
15
|
-
reasoning_effort = "
|
|
15
|
+
reasoning_effort = "xhigh"
|
|
16
16
|
|
|
17
17
|
[remotebuddy.llm]
|
|
18
18
|
backend = "openai_codex"
|
|
@@ -20,7 +20,7 @@ model = "gpt-5.5"
|
|
|
20
20
|
codex_auth_mode = "chatgpt"
|
|
21
21
|
codex_bin = "bun x --yes @openai/codex"
|
|
22
22
|
codex_timeout_ms = 120000
|
|
23
|
-
reasoning_effort = "
|
|
23
|
+
reasoning_effort = "xhigh"
|
|
24
24
|
|
|
25
25
|
[remotebuddy]
|
|
26
26
|
min_workerpals = 1
|
|
@@ -47,7 +47,7 @@ model = "gpt-5.5"
|
|
|
47
47
|
codex_auth_mode = "chatgpt"
|
|
48
48
|
codex_bin = "bun x --yes @openai/codex"
|
|
49
49
|
codex_timeout_ms = 120000
|
|
50
|
-
reasoning_effort = "
|
|
50
|
+
reasoning_effort = "xhigh"
|
|
51
51
|
|
|
52
52
|
[workerpals]
|
|
53
53
|
executor = "openai_codex"
|
|
@@ -93,7 +93,7 @@ bin = "bun x --yes @openai/codex"
|
|
|
93
93
|
timeout_ms = 7200000
|
|
94
94
|
progress_log_interval_s = 30
|
|
95
95
|
# timeout_s = 120 # optional; if set, overrides timeout_ms
|
|
96
|
-
reasoning_effort = "
|
|
96
|
+
reasoning_effort = "xhigh"
|
|
97
97
|
approval_policy = "never"
|
|
98
98
|
sandbox = "workspace-write"
|
|
99
99
|
color = "never"
|
|
@@ -749,6 +749,8 @@ var DEFAULT_REMOTEBUDDY_MEMORY_MAX_RECALL_ITEMS = 12;
|
|
|
749
749
|
var DEFAULT_REMOTEBUDDY_MEMORY_MAX_RECALL_CHARS = 2400;
|
|
750
750
|
var DEFAULT_REMOTEBUDDY_MEMORY_MAX_SUMMARY_CHARS = 420;
|
|
751
751
|
var DEFAULT_REMOTEBUDDY_MEMORY_RETENTION_DAYS = 30;
|
|
752
|
+
var DEFAULT_OPENAI_CODEX_MODEL = "gpt-5.5";
|
|
753
|
+
var DEFAULT_OPENAI_CODEX_REASONING_EFFORT = "xhigh";
|
|
752
754
|
var REDACTED_LOG_VALUE = "[REDACTED]";
|
|
753
755
|
var SENSITIVE_CONFIG_KEY_PATTERN = /(token|secret|password|api[_-]?key|private[_-]?key|access[_-]?key)/i;
|
|
754
756
|
var cachedConfig = null;
|
|
@@ -781,7 +783,7 @@ function parseIntEnv(name) {
|
|
|
781
783
|
function parseTomlFile(path) {
|
|
782
784
|
if (!existsSync2(path))
|
|
783
785
|
return {};
|
|
784
|
-
const raw = readFileSync3(path, "utf-8");
|
|
786
|
+
const raw = readFileSync3(path, "utf-8").replace(/^\uFEFF/, "");
|
|
785
787
|
const parsed = Bun.TOML.parse(raw);
|
|
786
788
|
if (!parsed || typeof parsed !== "object" || Array.isArray(parsed))
|
|
787
789
|
return {};
|
|
@@ -946,10 +948,14 @@ function resolveLlmConfig(serviceNode, envPrefix, defaults, globalSessionId) {
|
|
|
946
948
|
const llmNode = getObject(serviceNode, "llm");
|
|
947
949
|
const backend = normalizeBackend(firstNonEmpty(process.env[`${envPrefix}_LLM_BACKEND`], asString(llmNode.backend, defaults.backend), defaults.backend));
|
|
948
950
|
const endpoint = firstNonEmpty(process.env[`${envPrefix}_LLM_ENDPOINT`], asString(llmNode.endpoint, defaults.endpoint), defaults.endpoint);
|
|
949
|
-
const
|
|
951
|
+
const envModel = firstNonEmpty(process.env[`${envPrefix}_LLM_MODEL`]);
|
|
952
|
+
const configuredFileModel = firstNonEmpty(asString(llmNode.model, ""));
|
|
953
|
+
const configuredModel = firstNonEmpty(envModel, configuredFileModel);
|
|
954
|
+
const modelFallback = backend === "openai_codex" ? DEFAULT_OPENAI_CODEX_MODEL : defaults.model;
|
|
955
|
+
const model = backend === "openai_codex" && !envModel && (!configuredFileModel || configuredFileModel === defaults.model) ? DEFAULT_OPENAI_CODEX_MODEL : firstNonEmpty(configuredModel, modelFallback) ?? modelFallback;
|
|
950
956
|
const sessionId = firstNonEmpty(process.env[`${envPrefix}_LLM_SESSION_ID`], asString(llmNode.session_id, defaults.sessionId), process.env.PUSHPALS_LLM_SESSION_ID, globalSessionId);
|
|
951
957
|
const apiKey = firstNonEmpty(process.env[`${envPrefix}_LLM_API_KEY`], defaultApiKeyForBackend(backend, endpoint));
|
|
952
|
-
const reasoningEffort = firstNonEmpty(process.env[`${envPrefix}_LLM_REASONING_EFFORT`], asString(llmNode.reasoning_effort, ""));
|
|
958
|
+
const reasoningEffort = firstNonEmpty(process.env[`${envPrefix}_LLM_REASONING_EFFORT`], asString(llmNode.reasoning_effort, ""), backend === "openai_codex" ? DEFAULT_OPENAI_CODEX_REASONING_EFFORT : "");
|
|
953
959
|
const codexAuthMode = firstNonEmpty(process.env[`${envPrefix}_LLM_CODEX_AUTH_MODE`], asString(llmNode.codex_auth_mode, ""));
|
|
954
960
|
const codexBin = firstNonEmpty(process.env[`${envPrefix}_LLM_CODEX_BIN`], asString(llmNode.codex_bin, ""));
|
|
955
961
|
const codexTimeoutMs = Math.max(1e4, asInt(parseIntEnv(`${envPrefix}_LLM_CODEX_TIMEOUT_MS`) ?? llmNode.codex_timeout_ms, 120000));
|
|
@@ -1727,7 +1733,8 @@ var DEFAULT_LMSTUDIO_ENDPOINT = "http://127.0.0.1:1234";
|
|
|
1727
1733
|
var DEFAULT_OLLAMA_ENDPOINT = "http://127.0.0.1:11434/api/chat";
|
|
1728
1734
|
var DEFAULT_OPENAI_ENDPOINT = "https://api.openai.com/v1/chat/completions";
|
|
1729
1735
|
var DEFAULT_MODEL = "local-model";
|
|
1730
|
-
var DEFAULT_CODEX_MODEL = "gpt-5.
|
|
1736
|
+
var DEFAULT_CODEX_MODEL = "gpt-5.5";
|
|
1737
|
+
var DEFAULT_CODEX_REASONING_EFFORT = "xhigh";
|
|
1731
1738
|
var DEFAULT_CODEX_TIMEOUT_MS = 120000;
|
|
1732
1739
|
var DEFAULT_LMSTUDIO_CONTEXT_WINDOW = 4096;
|
|
1733
1740
|
var DEFAULT_LMSTUDIO_MIN_OUTPUT_TOKENS = 256;
|
|
@@ -1842,13 +1849,14 @@ function codexTimeoutMs(configuredTimeoutMs) {
|
|
|
1842
1849
|
function codexReasoningEffort(configured, model) {
|
|
1843
1850
|
const raw = (configured ?? "").trim().toLowerCase();
|
|
1844
1851
|
const supportsExtraHigh = !/^(gpt-5\.4(?:$|-)|codex-1p(?:$|-))/i.test(model.trim());
|
|
1852
|
+
const defaultEffort = supportsExtraHigh ? DEFAULT_CODEX_REASONING_EFFORT : "high";
|
|
1845
1853
|
if (raw === "low" || raw === "medium" || raw === "high" || raw === "xhigh") {
|
|
1846
1854
|
return raw === "xhigh" && !supportsExtraHigh ? "high" : raw;
|
|
1847
1855
|
}
|
|
1848
1856
|
if (raw === "extra high" || raw === "extra-high" || raw === "extrahigh" || raw === "x-high") {
|
|
1849
1857
|
return supportsExtraHigh ? "xhigh" : "high";
|
|
1850
1858
|
}
|
|
1851
|
-
return
|
|
1859
|
+
return defaultEffort;
|
|
1852
1860
|
}
|
|
1853
1861
|
function normalizeCodexModel(rawModel) {
|
|
1854
1862
|
const model = rawModel.trim();
|
|
@@ -2046,7 +2054,11 @@ function resolveServiceLlmConfig(opts = {}) {
|
|
|
2046
2054
|
const fallbackEndpoint = explicitBackend === "ollama" ? DEFAULT_OLLAMA_ENDPOINT : explicitBackend === "openai" || explicitBackend === "openai_codex" ? DEFAULT_OPENAI_ENDPOINT : DEFAULT_LMSTUDIO_ENDPOINT;
|
|
2047
2055
|
const endpoint = firstNonEmpty2(opts.endpoint, serviceLlmConfig.endpoint, fallbackEndpoint);
|
|
2048
2056
|
let backend = configuredBackend(endpoint ?? "", explicitBackend);
|
|
2049
|
-
const
|
|
2057
|
+
const configuredModel = firstNonEmpty2(opts.model, serviceLlmConfig.model, "");
|
|
2058
|
+
let model = firstNonEmpty2(configuredModel, backend === "openai_codex" ? DEFAULT_CODEX_MODEL : DEFAULT_MODEL) ?? DEFAULT_MODEL;
|
|
2059
|
+
if (backend === "openai_codex" && model === DEFAULT_MODEL) {
|
|
2060
|
+
model = DEFAULT_CODEX_MODEL;
|
|
2061
|
+
}
|
|
2050
2062
|
const requestedCodexAuthMode = firstNonEmpty2(opts.codexAuthMode, serviceLlmConfig.codexAuthMode, "") ?? "";
|
|
2051
2063
|
const openAiApiKey = (process.env.OPENAI_API_KEY ?? "").trim();
|
|
2052
2064
|
const apiKey = firstNonEmpty2(opts.apiKey, serviceLlmConfig.apiKey, backend === "lmstudio" ? "lmstudio" : backend === "openai" || backend === "openai_codex" ? openAiApiKey : "") ?? "";
|
|
@@ -2061,7 +2073,7 @@ function resolveServiceLlmConfig(opts = {}) {
|
|
|
2061
2073
|
model,
|
|
2062
2074
|
apiKey,
|
|
2063
2075
|
sessionId,
|
|
2064
|
-
reasoningEffort: firstNonEmpty2(opts.reasoningEffort, serviceLlmConfig.reasoningEffort, "") ?? "",
|
|
2076
|
+
reasoningEffort: firstNonEmpty2(opts.reasoningEffort, serviceLlmConfig.reasoningEffort, backend === "openai_codex" ? DEFAULT_CODEX_REASONING_EFFORT : "") ?? "",
|
|
2065
2077
|
codexAuthMode: requestedCodexAuthMode,
|
|
2066
2078
|
codexBin: firstNonEmpty2(opts.codexBin, serviceLlmConfig.codexBin, "") ?? "",
|
|
2067
2079
|
codexTimeoutMs: opts.codexTimeoutMs ?? serviceLlmConfig.codexTimeoutMs,
|
|
@@ -186,7 +186,7 @@ class OpenAICodexRuntimeConfig:
|
|
|
186
186
|
reasoning_effort=cfg.get_str(
|
|
187
187
|
env_names=("WORKERPALS_LLM_REASONING_EFFORT", "WORKERPALS_OPENAI_CODEX_REASONING_EFFORT"),
|
|
188
188
|
config_paths=("workerpals.llm.reasoning_effort", "workerpals.openai_codex.reasoning_effort"),
|
|
189
|
-
default="
|
|
189
|
+
default="xhigh",
|
|
190
190
|
),
|
|
191
191
|
approval_policy=cfg.get_str(
|
|
192
192
|
env_names=("WORKERPALS_OPENAI_CODEX_APPROVAL_POLICY",),
|
|
@@ -441,6 +441,7 @@ def _resolve_communicate_timeout_seconds(config: OpenAICodexRuntimeConfig) -> Op
|
|
|
441
441
|
def _resolve_reasoning_effort(config: OpenAICodexRuntimeConfig, model: str = DEFAULT_CODEX_MODEL) -> str:
|
|
442
442
|
raw = config.reasoning_effort
|
|
443
443
|
normalized = str(raw).strip().lower()
|
|
444
|
+
default_effort = "xhigh" if _model_supports_xhigh_reasoning(model) else "high"
|
|
444
445
|
if normalized in {"extra high", "extra-high", "extrahigh", "x-high"}:
|
|
445
446
|
normalized = "xhigh"
|
|
446
447
|
if normalized == "xhigh" and not _model_supports_xhigh_reasoning(model):
|
|
@@ -452,9 +453,9 @@ def _resolve_reasoning_effort(config: OpenAICodexRuntimeConfig, model: str = DEF
|
|
|
452
453
|
return normalized
|
|
453
454
|
log.info(
|
|
454
455
|
"Invalid workerpals.openai_codex.reasoning_effort="
|
|
455
|
-
f"{raw!r}; using default
|
|
456
|
+
f"{raw!r}; using default {default_effort!r}. Allowed: low, medium, high, xhigh."
|
|
456
457
|
)
|
|
457
|
-
return
|
|
458
|
+
return default_effort
|
|
458
459
|
|
|
459
460
|
|
|
460
461
|
def _resolve_progress_log_interval_seconds(config: OpenAICodexRuntimeConfig) -> int:
|
|
@@ -77,9 +77,15 @@ class OpenAICodexRuntimeConfigTests(unittest.TestCase):
|
|
|
77
77
|
self.assertEqual(cfg.approval_policy, "never")
|
|
78
78
|
self.assertEqual(cfg.sandbox, "workspace-write")
|
|
79
79
|
self.assertEqual(cfg.color, "never")
|
|
80
|
-
self.assertEqual(cfg.reasoning_effort, "
|
|
80
|
+
self.assertEqual(cfg.reasoning_effort, "xhigh")
|
|
81
81
|
self.assertFalse(cfg.json_output)
|
|
82
82
|
|
|
83
|
+
def test_reasoning_effort_defaults_to_extra_high_for_default_gpt_5_5(self) -> None:
|
|
84
|
+
cfg = OpenAICodexRuntimeConfig.from_sources(
|
|
85
|
+
SettingsResolver(env={}, config_loader=lambda: {}),
|
|
86
|
+
)
|
|
87
|
+
self.assertEqual(_resolve_reasoning_effort(cfg), "xhigh")
|
|
88
|
+
|
|
83
89
|
def test_resolve_codex_command_prefix_resolves_configured_executable(self) -> None:
|
|
84
90
|
cfg = OpenAICodexRuntimeConfig.from_sources(
|
|
85
91
|
SettingsResolver(
|
|
@@ -2850,6 +2850,7 @@ function normalizeCodexReasoningEffort(
|
|
|
2850
2850
|
.trim()
|
|
2851
2851
|
.toLowerCase();
|
|
2852
2852
|
const supportsExtraHigh = !/^(gpt-5\.4(?:$|-)|codex-1p(?:$|-))/i.test(String(model ?? "").trim());
|
|
2853
|
+
const defaultEffort = supportsExtraHigh ? "xhigh" : "high";
|
|
2853
2854
|
if (
|
|
2854
2855
|
normalized === "low" ||
|
|
2855
2856
|
normalized === "medium" ||
|
|
@@ -2866,7 +2867,7 @@ function normalizeCodexReasoningEffort(
|
|
|
2866
2867
|
) {
|
|
2867
2868
|
return supportsExtraHigh ? "xhigh" : "high";
|
|
2868
2869
|
}
|
|
2869
|
-
return
|
|
2870
|
+
return defaultEffort;
|
|
2870
2871
|
}
|
|
2871
2872
|
|
|
2872
2873
|
async function generateCommitMessageFromDiff(
|
|
@@ -199,7 +199,7 @@ session_id = "workerpals-dev"
|
|
|
199
199
|
[workerpals.openai_codex]
|
|
200
200
|
timeout_ms = 7200000
|
|
201
201
|
progress_log_interval_s = 30
|
|
202
|
-
reasoning_effort = "
|
|
202
|
+
reasoning_effort = "xhigh"
|
|
203
203
|
approval_policy = "never"
|
|
204
204
|
sandbox = "workspace-write"
|
|
205
205
|
color = "never"
|
|
@@ -12,7 +12,7 @@ model = "gpt-5.5"
|
|
|
12
12
|
codex_auth_mode = "chatgpt"
|
|
13
13
|
codex_bin = "bun x --yes @openai/codex"
|
|
14
14
|
codex_timeout_ms = 120000
|
|
15
|
-
reasoning_effort = "
|
|
15
|
+
reasoning_effort = "xhigh"
|
|
16
16
|
|
|
17
17
|
[remotebuddy.llm]
|
|
18
18
|
backend = "openai_codex"
|
|
@@ -20,7 +20,7 @@ model = "gpt-5.5"
|
|
|
20
20
|
codex_auth_mode = "chatgpt"
|
|
21
21
|
codex_bin = "bun x --yes @openai/codex"
|
|
22
22
|
codex_timeout_ms = 120000
|
|
23
|
-
reasoning_effort = "
|
|
23
|
+
reasoning_effort = "xhigh"
|
|
24
24
|
|
|
25
25
|
[remotebuddy]
|
|
26
26
|
min_workerpals = 1
|
|
@@ -47,7 +47,7 @@ model = "gpt-5.5"
|
|
|
47
47
|
codex_auth_mode = "chatgpt"
|
|
48
48
|
codex_bin = "bun x --yes @openai/codex"
|
|
49
49
|
codex_timeout_ms = 120000
|
|
50
|
-
reasoning_effort = "
|
|
50
|
+
reasoning_effort = "xhigh"
|
|
51
51
|
|
|
52
52
|
[workerpals]
|
|
53
53
|
executor = "openai_codex"
|
|
@@ -93,7 +93,7 @@ bin = "bun x --yes @openai/codex"
|
|
|
93
93
|
timeout_ms = 7200000
|
|
94
94
|
progress_log_interval_s = 30
|
|
95
95
|
# timeout_s = 120 # optional; if set, overrides timeout_ms
|
|
96
|
-
reasoning_effort = "
|
|
96
|
+
reasoning_effort = "xhigh"
|
|
97
97
|
approval_policy = "never"
|
|
98
98
|
sandbox = "workspace-write"
|
|
99
99
|
color = "never"
|
|
@@ -29,6 +29,8 @@ const DEFAULT_REMOTEBUDDY_MEMORY_MAX_RECALL_ITEMS = 12;
|
|
|
29
29
|
const DEFAULT_REMOTEBUDDY_MEMORY_MAX_RECALL_CHARS = 2400;
|
|
30
30
|
const DEFAULT_REMOTEBUDDY_MEMORY_MAX_SUMMARY_CHARS = 420;
|
|
31
31
|
const DEFAULT_REMOTEBUDDY_MEMORY_RETENTION_DAYS = 30;
|
|
32
|
+
const DEFAULT_OPENAI_CODEX_MODEL = "gpt-5.5";
|
|
33
|
+
const DEFAULT_OPENAI_CODEX_REASONING_EFFORT = "xhigh";
|
|
32
34
|
const REDACTED_LOG_VALUE = "[REDACTED]";
|
|
33
35
|
const SENSITIVE_CONFIG_KEY_PATTERN =
|
|
34
36
|
/(token|secret|password|api[_-]?key|private[_-]?key|access[_-]?key)/i;
|
|
@@ -317,7 +319,7 @@ function parseIntEnv(name: string): number | null {
|
|
|
317
319
|
|
|
318
320
|
function parseTomlFile(path: string): TomlObject {
|
|
319
321
|
if (!existsSync(path)) return {};
|
|
320
|
-
const raw = readFileSync(path, "utf-8");
|
|
322
|
+
const raw = readFileSync(path, "utf-8").replace(/^\uFEFF/, "");
|
|
321
323
|
const parsed = Bun.TOML.parse(raw) as unknown;
|
|
322
324
|
if (!parsed || typeof parsed !== "object" || Array.isArray(parsed)) return {};
|
|
323
325
|
return parsed as TomlObject;
|
|
@@ -503,11 +505,16 @@ function resolveLlmConfig(
|
|
|
503
505
|
asString(llmNode.endpoint, defaults.endpoint),
|
|
504
506
|
defaults.endpoint,
|
|
505
507
|
);
|
|
506
|
-
const
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
508
|
+
const envModel = firstNonEmpty(process.env[`${envPrefix}_LLM_MODEL`]);
|
|
509
|
+
const configuredFileModel = firstNonEmpty(asString(llmNode.model, ""));
|
|
510
|
+
const configuredModel = firstNonEmpty(envModel, configuredFileModel);
|
|
511
|
+
const modelFallback = backend === "openai_codex" ? DEFAULT_OPENAI_CODEX_MODEL : defaults.model;
|
|
512
|
+
const model =
|
|
513
|
+
backend === "openai_codex" &&
|
|
514
|
+
!envModel &&
|
|
515
|
+
(!configuredFileModel || configuredFileModel === defaults.model)
|
|
516
|
+
? DEFAULT_OPENAI_CODEX_MODEL
|
|
517
|
+
: (firstNonEmpty(configuredModel, modelFallback) ?? modelFallback);
|
|
511
518
|
const sessionId = firstNonEmpty(
|
|
512
519
|
process.env[`${envPrefix}_LLM_SESSION_ID`],
|
|
513
520
|
asString(llmNode.session_id, defaults.sessionId),
|
|
@@ -521,6 +528,7 @@ function resolveLlmConfig(
|
|
|
521
528
|
const reasoningEffort = firstNonEmpty(
|
|
522
529
|
process.env[`${envPrefix}_LLM_REASONING_EFFORT`],
|
|
523
530
|
asString(llmNode.reasoning_effort, ""),
|
|
531
|
+
backend === "openai_codex" ? DEFAULT_OPENAI_CODEX_REASONING_EFFORT : "",
|
|
524
532
|
);
|
|
525
533
|
const codexAuthMode = firstNonEmpty(
|
|
526
534
|
process.env[`${envPrefix}_LLM_CODEX_AUTH_MODE`],
|