codex-exec-json 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,75 @@
1
+ # codex-exec-json
2
+
3
+ This is a tiny wrapper around `codex exec`. It runs a one-shot, non-interactive Codex task, forces a JSON-shaped final answer, writes that JSON to a file, prints the file path, and exits. It is meant for agents and automations that want a predictable machine-readable output and no long-lived session.
4
+
5
+ ## Install / npx
6
+
7
+ ```bash
8
+ npx -y codex-exec-json "summarize this repo"
9
+ ```
10
+
11
+ ## Usage
12
+
13
+ ```bash
14
+ codex-exec-json [options] <prompt...>
15
+ ```
16
+
17
+ Options are `--schema <json>` for an inline JSON schema string, `--schema-file <path>` to load a schema from disk, `--out <path>` to control the output file path (default is `/tmp/.codex-exec.<uuid>.json`), `--model <name>` to select a model, `--reasoning <level>` to set reasoning effort, `--codex-bin <path>` to point at a different Codex binary, `--yolo` to bypass approvals and sandboxing, `--keep-trace` to keep Codex session files, `--models` to print available models, `--models-json` to print models as JSON, and `--help` to show usage.
18
+
19
+ Pass through args to Codex after `--`:
20
+
21
+ ```bash
22
+ codex-exec-json -- --add-dir /tmp "update the README"
23
+ ```
24
+
25
+ ## For LLMs and agents
26
+
27
+ The point is to return a JSON file and exit. Treat stdout as the file path and read the JSON from disk.
28
+
29
+ ```bash
30
+ npx -y codex-exec-json --schema '{"type":"object","properties":{"summary":{"type":"string"},"next":{"type":"array","items":{"type":"string"}}},"required":["summary","next"]}' "Summarize and list next steps"
31
+ ```
32
+
33
+ Example flow:
34
+
35
+ ```bash
36
+ OUT=$(npx -y codex-exec-json "Describe the repo in JSON")
37
+ cat "$OUT"
38
+ ```
39
+
40
+ ## Models and reasoning levels
41
+
42
+ To list available models and their supported reasoning levels with descriptions, run:
43
+
44
+ ```bash
45
+ npx -y codex-exec-json --models
46
+ ```
47
+
48
+ For JSON output:
49
+
50
+ ```bash
51
+ npx -y codex-exec-json --models-json
52
+ ```
53
+
54
+ This uses your Codex auth and local cache. If the API is unavailable, it falls back to the built-in preset list.
55
+
56
+ ## Default schema
57
+
58
+ If no schema is provided, this default is used, so you always get a predictable JSON object:
59
+
60
+ ```json
61
+ {
62
+ "type": "object",
63
+ "additionalProperties": false,
64
+ "properties": {
65
+ "ok": { "type": "boolean" },
66
+ "result": { "type": "string" },
67
+ "error": { "type": "string" }
68
+ },
69
+ "required": ["ok", "result"]
70
+ }
71
+ ```
72
+
73
+ ## Skill
74
+
75
+ If you use the Codex skills format, a ready-to-pull skill is included at `skills/codex-exec-json/SKILL.md`.
package/bin/cli.js ADDED
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env node
2
+ import "../dist/index.js";
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env node
2
+ export {};
package/dist/index.js ADDED
@@ -0,0 +1,639 @@
1
+ #!/usr/bin/env node
2
+ import { spawn } from "node:child_process";
3
+ import { readFile, writeFile, readdir, rm } from "node:fs/promises";
4
+ import { existsSync } from "node:fs";
5
+ import os from "node:os";
6
+ import path from "node:path";
7
+ import crypto from "node:crypto";
8
+ const DEFAULT_SCHEMA = {
9
+ type: "object",
10
+ additionalProperties: false,
11
+ properties: {
12
+ ok: { type: "boolean" },
13
+ result: { type: "string" },
14
+ error: { type: "string" }
15
+ },
16
+ required: ["ok", "result"]
17
+ };
18
+ const FALLBACK_MODELS = [
19
+ {
20
+ slug: "gpt-5.2-codex",
21
+ display_name: "gpt-5.2-codex",
22
+ description: "Latest frontier agentic coding model.",
23
+ default_reasoning_level: "medium",
24
+ supported_reasoning_levels: [
25
+ { effort: "low", description: "Fast responses with lighter reasoning" },
26
+ { effort: "medium", description: "Balances speed and reasoning depth for everyday tasks" },
27
+ { effort: "high", description: "Greater reasoning depth for complex problems" },
28
+ { effort: "xhigh", description: "Extra high reasoning depth for complex problems" }
29
+ ],
30
+ supported_in_api: true
31
+ },
32
+ {
33
+ slug: "gpt-5.1-codex-max",
34
+ display_name: "gpt-5.1-codex-max",
35
+ description: "Codex-optimized flagship for deep and fast reasoning.",
36
+ default_reasoning_level: "medium",
37
+ supported_reasoning_levels: [
38
+ { effort: "low", description: "Fast responses with lighter reasoning" },
39
+ { effort: "medium", description: "Balances speed and reasoning depth for everyday tasks" },
40
+ { effort: "high", description: "Greater reasoning depth for complex problems" },
41
+ { effort: "xhigh", description: "Extra high reasoning depth for complex problems" }
42
+ ],
43
+ supported_in_api: true
44
+ },
45
+ {
46
+ slug: "gpt-5.1-codex-mini",
47
+ display_name: "gpt-5.1-codex-mini",
48
+ description: "Optimized for codex. Cheaper, faster, but less capable.",
49
+ default_reasoning_level: "medium",
50
+ supported_reasoning_levels: [
51
+ { effort: "medium", description: "Dynamically adjusts reasoning based on the task" },
52
+ { effort: "high", description: "Maximizes reasoning depth for complex or ambiguous problems" }
53
+ ],
54
+ supported_in_api: true
55
+ },
56
+ {
57
+ slug: "gpt-5.2",
58
+ display_name: "gpt-5.2",
59
+ description: "Latest frontier model with improvements across knowledge, reasoning and coding",
60
+ default_reasoning_level: "medium",
61
+ supported_reasoning_levels: [
62
+ {
63
+ effort: "low",
64
+ description: "Balances speed with some reasoning; useful for straightforward queries and short explanations"
65
+ },
66
+ {
67
+ effort: "medium",
68
+ description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks"
69
+ },
70
+ { effort: "high", description: "Maximizes reasoning depth for complex or ambiguous problems" },
71
+ { effort: "xhigh", description: "Extra high reasoning depth for complex problems" }
72
+ ],
73
+ supported_in_api: true
74
+ },
75
+ {
76
+ slug: "bengalfox",
77
+ display_name: "bengalfox",
78
+ description: "bengalfox",
79
+ default_reasoning_level: "medium",
80
+ supported_reasoning_levels: [
81
+ { effort: "low", description: "Fast responses with lighter reasoning" },
82
+ { effort: "medium", description: "Balances speed and reasoning depth for everyday tasks" },
83
+ { effort: "high", description: "Greater reasoning depth for complex problems" },
84
+ { effort: "xhigh", description: "Extra high reasoning depth for complex problems" }
85
+ ],
86
+ supported_in_api: true
87
+ },
88
+ {
89
+ slug: "boomslang",
90
+ display_name: "boomslang",
91
+ description: "boomslang",
92
+ default_reasoning_level: "medium",
93
+ supported_reasoning_levels: [
94
+ {
95
+ effort: "low",
96
+ description: "Balances speed with some reasoning; useful for straightforward queries and short explanations"
97
+ },
98
+ {
99
+ effort: "medium",
100
+ description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks"
101
+ },
102
+ { effort: "high", description: "Maximizes reasoning depth for complex or ambiguous problems" },
103
+ { effort: "xhigh", description: "Extra high reasoning depth for complex problems" }
104
+ ],
105
+ supported_in_api: true
106
+ },
107
+ {
108
+ slug: "gpt-5-codex",
109
+ display_name: "gpt-5-codex",
110
+ description: "Optimized for codex.",
111
+ default_reasoning_level: "medium",
112
+ supported_reasoning_levels: [
113
+ { effort: "low", description: "Fastest responses with limited reasoning" },
114
+ { effort: "medium", description: "Dynamically adjusts reasoning based on the task" },
115
+ { effort: "high", description: "Maximizes reasoning depth for complex or ambiguous problems" }
116
+ ],
117
+ supported_in_api: true
118
+ },
119
+ {
120
+ slug: "gpt-5-codex-mini",
121
+ display_name: "gpt-5-codex-mini",
122
+ description: "Optimized for codex. Cheaper, faster, but less capable.",
123
+ default_reasoning_level: "medium",
124
+ supported_reasoning_levels: [
125
+ { effort: "medium", description: "Dynamically adjusts reasoning based on the task" },
126
+ { effort: "high", description: "Maximizes reasoning depth for complex or ambiguous problems" }
127
+ ],
128
+ supported_in_api: true
129
+ },
130
+ {
131
+ slug: "gpt-5.1-codex",
132
+ display_name: "gpt-5.1-codex",
133
+ description: "Optimized for codex.",
134
+ default_reasoning_level: "medium",
135
+ supported_reasoning_levels: [
136
+ { effort: "low", description: "Fastest responses with limited reasoning" },
137
+ { effort: "medium", description: "Dynamically adjusts reasoning based on the task" },
138
+ { effort: "high", description: "Maximizes reasoning depth for complex or ambiguous problems" }
139
+ ],
140
+ supported_in_api: true
141
+ },
142
+ {
143
+ slug: "gpt-5",
144
+ display_name: "gpt-5",
145
+ description: "Broad world knowledge with strong general reasoning.",
146
+ default_reasoning_level: "medium",
147
+ supported_reasoning_levels: [
148
+ { effort: "minimal", description: "Fastest responses with little reasoning" },
149
+ {
150
+ effort: "low",
151
+ description: "Balances speed with some reasoning; useful for straightforward queries and short explanations"
152
+ },
153
+ {
154
+ effort: "medium",
155
+ description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks"
156
+ },
157
+ { effort: "high", description: "Maximizes reasoning depth for complex or ambiguous problems" }
158
+ ],
159
+ supported_in_api: true
160
+ },
161
+ {
162
+ slug: "gpt-5.1",
163
+ display_name: "gpt-5.1",
164
+ description: "Broad world knowledge with strong general reasoning.",
165
+ default_reasoning_level: "medium",
166
+ supported_reasoning_levels: [
167
+ {
168
+ effort: "low",
169
+ description: "Balances speed with some reasoning; useful for straightforward queries and short explanations"
170
+ },
171
+ {
172
+ effort: "medium",
173
+ description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks"
174
+ },
175
+ { effort: "high", description: "Maximizes reasoning depth for complex or ambiguous problems" }
176
+ ],
177
+ supported_in_api: true
178
+ }
179
+ ];
180
+ const USAGE = `codex-exec-json [options] <prompt...>
181
+
182
+ Options:
183
+ --schema <json> Inline JSON schema string
184
+ --schema-file <path> Path to JSON schema file
185
+ --out <path> Output file path (default: /tmp/.codex-exec.<uuid>.json)
186
+ --model <name> Codex model to use
187
+ --reasoning <level> Reasoning effort (none|minimal|low|medium|high|xhigh)
188
+ --codex-bin <path> Path to codex binary (default: codex)
189
+ --yolo Pass --dangerously-bypass-approvals-and-sandbox
190
+ --keep-trace Do not delete new session files
191
+ --models Print available models and reasoning levels
192
+ --models-json Print available models as JSON
193
+ --help Show help
194
+
195
+ Pass through args to codex after "--":
196
+ codex-exec-json -- --add-dir /tmp "do the thing"`;
197
+ function parseArgs(argv) {
198
+ const options = {
199
+ yolo: false,
200
+ keepTrace: false,
201
+ codexBin: "codex",
202
+ showModels: false,
203
+ showModelsJson: false,
204
+ passThroughArgs: []
205
+ };
206
+ const promptParts = [];
207
+ const passThroughIndex = argv.indexOf("--");
208
+ const mainArgs = passThroughIndex === -1 ? argv : argv.slice(0, passThroughIndex);
209
+ const passThroughArgs = passThroughIndex === -1 ? [] : argv.slice(passThroughIndex + 1);
210
+ options.passThroughArgs = passThroughArgs;
211
+ for (let i = 0; i < mainArgs.length; i += 1) {
212
+ const arg = mainArgs[i];
213
+ if (arg === "--help" || arg === "-h") {
214
+ console.log(USAGE);
215
+ process.exit(0);
216
+ }
217
+ if (arg === "--schema") {
218
+ const value = mainArgs[i + 1];
219
+ if (!value)
220
+ throw new Error("--schema requires a value");
221
+ options.schemaJson = value;
222
+ i += 1;
223
+ continue;
224
+ }
225
+ if (arg === "--schema-file") {
226
+ const value = mainArgs[i + 1];
227
+ if (!value)
228
+ throw new Error("--schema-file requires a value");
229
+ options.schemaFile = value;
230
+ i += 1;
231
+ continue;
232
+ }
233
+ if (arg === "--out") {
234
+ const value = mainArgs[i + 1];
235
+ if (!value)
236
+ throw new Error("--out requires a value");
237
+ options.outPath = value;
238
+ i += 1;
239
+ continue;
240
+ }
241
+ if (arg === "--model") {
242
+ const value = mainArgs[i + 1];
243
+ if (!value)
244
+ throw new Error("--model requires a value");
245
+ options.model = value;
246
+ i += 1;
247
+ continue;
248
+ }
249
+ if (arg === "--reasoning") {
250
+ const value = mainArgs[i + 1];
251
+ if (!value)
252
+ throw new Error("--reasoning requires a value");
253
+ options.reasoning = value;
254
+ i += 1;
255
+ continue;
256
+ }
257
+ if (arg === "--codex-bin") {
258
+ const value = mainArgs[i + 1];
259
+ if (!value)
260
+ throw new Error("--codex-bin requires a value");
261
+ options.codexBin = value;
262
+ i += 1;
263
+ continue;
264
+ }
265
+ if (arg === "--yolo") {
266
+ options.yolo = true;
267
+ continue;
268
+ }
269
+ if (arg === "--keep-trace") {
270
+ options.keepTrace = true;
271
+ continue;
272
+ }
273
+ if (arg === "--models") {
274
+ options.showModels = true;
275
+ continue;
276
+ }
277
+ if (arg === "--models-json") {
278
+ options.showModelsJson = true;
279
+ continue;
280
+ }
281
+ if (arg.startsWith("-")) {
282
+ throw new Error(`Unknown option: ${arg}`);
283
+ }
284
+ promptParts.push(arg);
285
+ }
286
+ return { options, promptParts };
287
+ }
288
+ async function readStdin() {
289
+ if (process.stdin.isTTY)
290
+ return "";
291
+ const chunks = [];
292
+ for await (const chunk of process.stdin) {
293
+ chunks.push(Buffer.from(chunk));
294
+ }
295
+ return Buffer.concat(chunks).toString("utf8");
296
+ }
297
+ async function resolveSchema(options) {
298
+ if (options.schemaJson && options.schemaFile) {
299
+ throw new Error("Use only one of --schema or --schema-file");
300
+ }
301
+ if (options.schemaFile) {
302
+ const fileContents = await readFile(options.schemaFile, "utf8");
303
+ const schemaObj = JSON.parse(fileContents);
304
+ return { schemaObj, schemaString: JSON.stringify(schemaObj) };
305
+ }
306
+ if (options.schemaJson) {
307
+ const schemaObj = JSON.parse(options.schemaJson);
308
+ return { schemaObj, schemaString: JSON.stringify(schemaObj) };
309
+ }
310
+ const schemaObj = DEFAULT_SCHEMA;
311
+ return { schemaObj, schemaString: JSON.stringify(schemaObj) };
312
+ }
313
+ function getCodexHome() {
314
+ return process.env.CODEX_HOME || path.join(os.homedir(), ".codex");
315
+ }
316
+ async function listSessionFiles(codexHome) {
317
+ const sessionsDir = path.join(codexHome, "sessions");
318
+ if (!existsSync(sessionsDir))
319
+ return [];
320
+ const results = [];
321
+ const stack = [sessionsDir];
322
+ while (stack.length) {
323
+ const dir = stack.pop();
324
+ if (!dir)
325
+ break;
326
+ let entries = [];
327
+ try {
328
+ entries = await readdir(dir, { withFileTypes: true });
329
+ }
330
+ catch {
331
+ continue;
332
+ }
333
+ for (const entry of entries) {
334
+ const fullPath = path.join(dir, entry.name);
335
+ if (entry.isDirectory()) {
336
+ stack.push(fullPath);
337
+ }
338
+ else if (entry.isFile()) {
339
+ results.push(fullPath);
340
+ }
341
+ }
342
+ }
343
+ return results;
344
+ }
345
+ async function deleteNewSessionFiles(before, after, codexHome) {
346
+ const beforeSet = new Set(before);
347
+ const newFiles = after.filter((p) => !beforeSet.has(p));
348
+ for (const file of newFiles) {
349
+ try {
350
+ await rm(file, { force: true });
351
+ }
352
+ catch {
353
+ // ignore
354
+ }
355
+ }
356
+ const sessionsDir = path.join(codexHome, "sessions");
357
+ for (const file of newFiles) {
358
+ let dir = path.dirname(file);
359
+ while (dir.startsWith(sessionsDir) && dir !== sessionsDir) {
360
+ try {
361
+ const items = await readdir(dir);
362
+ if (items.length === 0) {
363
+ await rm(dir, { force: true, recursive: true });
364
+ dir = path.dirname(dir);
365
+ continue;
366
+ }
367
+ }
368
+ catch {
369
+ // ignore
370
+ }
371
+ break;
372
+ }
373
+ }
374
+ }
375
+ async function getCodexVersion(codexBin) {
376
+ const child = spawn(codexBin, ["--version"], { stdio: ["ignore", "pipe", "pipe"] });
377
+ let stdout = "";
378
+ let stderr = "";
379
+ child.stdout.on("data", (d) => {
380
+ stdout += d.toString();
381
+ });
382
+ child.stderr.on("data", (d) => {
383
+ stderr += d.toString();
384
+ });
385
+ const exitCode = await new Promise((resolve) => child.on("close", resolve));
386
+ if (exitCode !== 0) {
387
+ if (stderr.trim()) {
388
+ throw new Error(stderr.trim());
389
+ }
390
+ throw new Error("Failed to get codex version");
391
+ }
392
+ const match = stdout.trim().match(/(\d+\.\d+\.\d+)/);
393
+ return match ? match[1] : "0.0.0";
394
+ }
395
+ async function readConfigValue(key) {
396
+ const configPath = path.join(getCodexHome(), "config.toml");
397
+ if (!existsSync(configPath))
398
+ return null;
399
+ const text = await readFile(configPath, "utf8");
400
+ const re = new RegExp(`^\\s*${key}\\s*=\\s*\"([^\"]+)\"`, "m");
401
+ const match = text.match(re);
402
+ return match ? match[1] : null;
403
+ }
404
+ async function loadAuth() {
405
+ const authPath = path.join(getCodexHome(), "auth.json");
406
+ if (!existsSync(authPath))
407
+ return {};
408
+ const raw = await readFile(authPath, "utf8");
409
+ const data = JSON.parse(raw);
410
+ const tokens = data.tokens;
411
+ const accessToken = typeof tokens?.access_token === "string" ? tokens.access_token : undefined;
412
+ const accountId = typeof tokens?.account_id === "string" ? tokens.account_id : undefined;
413
+ const apiKey = typeof data.OPENAI_API_KEY === "string" ? data.OPENAI_API_KEY : undefined;
414
+ return { accessToken, accountId, apiKey };
415
+ }
416
+ async function loadModelsCache(codexHome) {
417
+ const cachePath = path.join(codexHome, "models_cache.json");
418
+ if (!existsSync(cachePath))
419
+ return null;
420
+ try {
421
+ const raw = await readFile(cachePath, "utf8");
422
+ const data = JSON.parse(raw);
423
+ if (Array.isArray(data.models) && data.models.length > 0)
424
+ return data.models;
425
+ }
426
+ catch {
427
+ return null;
428
+ }
429
+ return null;
430
+ }
431
+ async function primeModelsCache(codexBin, codexHome) {
432
+ const uuid = crypto.randomUUID();
433
+ const outputPath = path.join(os.tmpdir(), `.codex-exec-models.${uuid}.json`);
434
+ const schemaPath = path.join(os.tmpdir(), `.codex-exec-models-schema.${uuid}.json`);
435
+ const schema = {
436
+ type: "object",
437
+ additionalProperties: false,
438
+ properties: { ok: { type: "boolean" } },
439
+ required: ["ok"]
440
+ };
441
+ await writeFile(schemaPath, JSON.stringify(schema), "utf8");
442
+ const before = await listSessionFiles(codexHome);
443
+ const args = [
444
+ "exec",
445
+ "--skip-git-repo-check",
446
+ "--output-last-message",
447
+ outputPath,
448
+ "--output-schema",
449
+ schemaPath,
450
+ "--color",
451
+ "never",
452
+ "Return {\"ok\": true}"
453
+ ];
454
+ const child = spawn(codexBin, args, { stdio: ["ignore", "pipe", "pipe"] });
455
+ child.stdout.on("data", () => { });
456
+ child.stderr.on("data", () => { });
457
+ await new Promise((resolve) => child.on("close", resolve));
458
+ const after = await listSessionFiles(codexHome);
459
+ await deleteNewSessionFiles(before, after, codexHome);
460
+ try {
461
+ await rm(outputPath, { force: true });
462
+ }
463
+ catch {
464
+ // ignore
465
+ }
466
+ try {
467
+ await rm(schemaPath, { force: true });
468
+ }
469
+ catch {
470
+ // ignore
471
+ }
472
+ }
473
+ async function fetchModels(codexBin) {
474
+ const codexHome = getCodexHome();
475
+ const cached = await loadModelsCache(codexHome);
476
+ if (cached && cached.length > 0)
477
+ return { models: cached, source: "cache" };
478
+ const version = await getCodexVersion(codexBin);
479
+ const { accessToken, accountId, apiKey } = await loadAuth();
480
+ const chatgptBase = (await readConfigValue("chatgpt_base_url")) || "https://chatgpt.com/backend-api/";
481
+ const chatgptBaseClean = chatgptBase.replace(/\/+$/, "");
482
+ const codexBase = chatgptBaseClean.endsWith("/codex") ? chatgptBaseClean : `${chatgptBaseClean}/codex`;
483
+ if (accessToken) {
484
+ const url = new URL(`${codexBase}/models`);
485
+ url.searchParams.set("client_version", version);
486
+ const headers = {
487
+ Authorization: `Bearer ${accessToken}`
488
+ };
489
+ if (accountId)
490
+ headers["ChatGPT-Account-ID"] = accountId;
491
+ const res = await fetch(url, { headers });
492
+ if (res.ok) {
493
+ const body = (await res.json());
494
+ if (Array.isArray(body.models))
495
+ return { models: body.models, source: "api" };
496
+ }
497
+ }
498
+ if (apiKey) {
499
+ const url = new URL("https://api.openai.com/v1/models");
500
+ const headers = { Authorization: `Bearer ${apiKey}` };
501
+ const res = await fetch(url, { headers });
502
+ if (res.ok) {
503
+ const body = (await res.json());
504
+ if (Array.isArray(body.data)) {
505
+ return {
506
+ models: body.data.map((item) => ({
507
+ slug: item.id,
508
+ display_name: item.id,
509
+ description: "OpenAI model",
510
+ supported_reasoning_levels: []
511
+ })),
512
+ source: "api"
513
+ };
514
+ }
515
+ }
516
+ }
517
+ await primeModelsCache(codexBin, codexHome);
518
+ const cachedAfter = await loadModelsCache(codexHome);
519
+ if (cachedAfter && cachedAfter.length > 0)
520
+ return { models: cachedAfter, source: "cache" };
521
+ return { models: FALLBACK_MODELS, source: "fallback" };
522
+ }
523
+ function formatModelsText(models) {
524
+ const lines = [];
525
+ for (const model of models) {
526
+ lines.push(`${model.slug} (${model.display_name})`);
527
+ if (model.description) {
528
+ lines.push(model.description);
529
+ }
530
+ const defaultReasoning = model.default_reasoning_level ?? "default";
531
+ const supported = model.supported_reasoning_levels || [];
532
+ if (supported.length > 0) {
533
+ lines.push(`Reasoning default: ${defaultReasoning}`);
534
+ for (const preset of supported) {
535
+ lines.push(`- ${preset.effort}: ${preset.description}`);
536
+ }
537
+ }
538
+ lines.push("");
539
+ }
540
+ return lines.join("\n").trim();
541
+ }
542
+ async function runCodex(options, prompt, schemaString, outputPath) {
543
+ const schemaPath = path.join(os.tmpdir(), `.codex-exec-schema.${crypto.randomUUID()}.json`);
544
+ await writeFile(schemaPath, schemaString, "utf8");
545
+ const preprompt = [
546
+ "You are running in exec mode.",
547
+ "Return only valid JSON that matches the provided schema.",
548
+ "Do not include extra text."
549
+ ].join(" ");
550
+ const finalPrompt = `${preprompt}\n\nTask: ${prompt}`;
551
+ const args = [
552
+ "exec",
553
+ "--skip-git-repo-check",
554
+ "--output-last-message",
555
+ outputPath,
556
+ "--output-schema",
557
+ schemaPath,
558
+ "--color",
559
+ "never"
560
+ ];
561
+ if (options.model) {
562
+ args.push("--model", options.model);
563
+ }
564
+ if (options.reasoning) {
565
+ args.push("--config", `model_reasoning_effort=\"${options.reasoning}\"`);
566
+ }
567
+ if (options.yolo) {
568
+ args.push("--dangerously-bypass-approvals-and-sandbox");
569
+ }
570
+ args.push(...options.passThroughArgs, finalPrompt);
571
+ const codexHome = getCodexHome();
572
+ const beforeSessions = options.keepTrace ? [] : await listSessionFiles(codexHome);
573
+ const child = spawn(options.codexBin, args, { stdio: ["ignore", "pipe", "pipe"] });
574
+ let stdout = "";
575
+ let stderr = "";
576
+ child.stdout.on("data", (data) => {
577
+ stdout += data.toString();
578
+ });
579
+ child.stderr.on("data", (data) => {
580
+ stderr += data.toString();
581
+ });
582
+ const exitCode = await new Promise((resolve) => {
583
+ child.on("close", resolve);
584
+ });
585
+ if (!options.keepTrace) {
586
+ const afterSessions = await listSessionFiles(codexHome);
587
+ await deleteNewSessionFiles(beforeSessions, afterSessions, codexHome);
588
+ }
589
+ try {
590
+ await rm(schemaPath, { force: true });
591
+ }
592
+ catch {
593
+ // ignore
594
+ }
595
+ if (exitCode !== 0) {
596
+ if (stderr.trim()) {
597
+ console.error(stderr.trim());
598
+ }
599
+ else if (stdout.trim()) {
600
+ console.error(stdout.trim());
601
+ }
602
+ throw new Error(`codex exited with code ${exitCode}`);
603
+ }
604
+ }
605
+ async function run() {
606
+ const { options, promptParts } = parseArgs(process.argv.slice(2));
607
+ if (options.showModels || options.showModelsJson) {
608
+ const { models, source } = await fetchModels(options.codexBin);
609
+ if (options.showModelsJson) {
610
+ console.log(JSON.stringify({ source, models }, null, 2));
611
+ }
612
+ else {
613
+ const header = source === "fallback"
614
+ ? "Models (fallback list)"
615
+ : source === "cache"
616
+ ? "Models (cached)"
617
+ : "Models";
618
+ console.log(`${header}\\n\\n${formatModelsText(models)}`.trim());
619
+ }
620
+ return;
621
+ }
622
+ const stdinPrompt = await readStdin();
623
+ const prompt = promptParts.join(" ").trim() || stdinPrompt.trim();
624
+ if (!prompt) {
625
+ console.error("Missing prompt. Provide a prompt as arguments or via stdin.\n");
626
+ console.error(USAGE);
627
+ process.exit(1);
628
+ }
629
+ const uuid = crypto.randomUUID();
630
+ const outputPath = options.outPath ?? path.join(os.tmpdir(), `.codex-exec.${uuid}.json`);
631
+ const { schemaString } = await resolveSchema(options);
632
+ await runCodex(options, prompt, schemaString, outputPath);
633
+ console.log(outputPath);
634
+ }
635
+ run().catch((error) => {
636
+ const message = error instanceof Error ? error.message : String(error);
637
+ console.error(message);
638
+ process.exit(1);
639
+ });
package/package.json ADDED
@@ -0,0 +1,36 @@
1
+ {
2
+ "name": "codex-exec-json",
3
+ "version": "0.1.0",
4
+ "description": "Run Codex exec in non-persistent mode and write structured JSON output to a temp file",
5
+ "type": "module",
6
+ "main": "dist/index.js",
7
+ "types": "dist/index.d.ts",
8
+ "bin": {
9
+ "codex-exec-json": "bin/cli.js"
10
+ },
11
+ "files": [
12
+ "bin",
13
+ "dist"
14
+ ],
15
+ "scripts": {
16
+ "build": "tsc -p tsconfig.json",
17
+ "dev": "tsc -p tsconfig.json --watch",
18
+ "lint": "tsc -p tsconfig.json --noEmit",
19
+ "prepublishOnly": "npm run build"
20
+ },
21
+ "keywords": [
22
+ "codex",
23
+ "cli",
24
+ "json",
25
+ "exec"
26
+ ],
27
+ "author": "pp",
28
+ "license": "MIT",
29
+ "engines": {
30
+ "node": ">=18"
31
+ },
32
+ "devDependencies": {
33
+ "@types/node": "^20.11.30",
34
+ "typescript": "^5.6.3"
35
+ }
36
+ }