ai-sdk-provider-codex-cli 1.0.0 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -3
- package/dist/index.cjs +2 -6
- package/dist/index.d.cts +2 -8
- package/dist/index.d.ts +2 -8
- package/dist/index.js +2 -6
- package/package.json +2 -2
package/README.md
CHANGED
|
@@ -10,7 +10,7 @@
|
|
|
10
10
|
[](https://github.com/ben-vargas/ai-sdk-provider-codex-cli/issues)
|
|
11
11
|
[](https://github.com/ben-vargas/ai-sdk-provider-codex-cli/releases/latest)
|
|
12
12
|
|
|
13
|
-
A community provider for Vercel AI SDK v6 that uses OpenAI's Codex CLI (non‑interactive `codex exec`) to talk to GPT‑5.1 class models (`gpt-5.1`, the Codex-specific `gpt-5.1-codex
|
|
13
|
+
A community provider for Vercel AI SDK v6 that uses OpenAI's Codex CLI (non‑interactive `codex exec`) to talk to GPT‑5.1 / GPT‑5.2 class models (`gpt-5.1`, `gpt-5.2`, the Codex-specific `gpt-5.1-codex` / `gpt-5.2-codex`, the flagship `*-codex-max`, and the lightweight `*-codex-mini` slugs) with your ChatGPT Plus/Pro subscription. The provider spawns the Codex CLI process, parses its JSONL output, and adapts it to the AI SDK LanguageModelV3 interface. Legacy GPT-5 / GPT-5-codex slugs remain compatible for existing workflows.
|
|
14
14
|
|
|
15
15
|
- Works with `generateText`, `streamText`, and `generateObject` (native JSON Schema support via `--output-schema`)
|
|
16
16
|
- Uses ChatGPT OAuth from `codex login` (tokens in `~/.codex/auth.json`) or `OPENAI_API_KEY`
|
|
@@ -307,13 +307,12 @@ const model = codexCli('gpt-5.1-codex', {
|
|
|
307
307
|
addDirs: ['../shared'],
|
|
308
308
|
|
|
309
309
|
// Reasoning & verbosity
|
|
310
|
-
reasoningEffort: 'medium', // minimal | low | medium | high | xhigh (xhigh
|
|
310
|
+
reasoningEffort: 'medium', // none | minimal | low | medium | high | xhigh (xhigh on codex-max and newer models that expose it)
|
|
311
311
|
reasoningSummary: 'auto', // auto | detailed (Note: 'concise' and 'none' are rejected by API)
|
|
312
312
|
reasoningSummaryFormat: 'none', // none | experimental
|
|
313
313
|
modelVerbosity: 'high', // low | medium | high
|
|
314
314
|
|
|
315
315
|
// Advanced features
|
|
316
|
-
includePlanTool: true, // adds --include-plan-tool
|
|
317
316
|
profile: 'production', // adds --profile production
|
|
318
317
|
oss: false, // adds --oss when true
|
|
319
318
|
webSearch: true, // maps to -c tools.web_search=true
|
package/dist/index.cjs
CHANGED
|
@@ -111,13 +111,12 @@ var settingsSchema = zod.z.object({
|
|
|
111
111
|
verbose: zod.z.boolean().optional(),
|
|
112
112
|
logger: zod.z.union([zod.z.literal(false), loggerFunctionSchema]).optional(),
|
|
113
113
|
// NEW: Reasoning & Verbosity
|
|
114
|
-
reasoningEffort: zod.z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
|
|
114
|
+
reasoningEffort: zod.z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(),
|
|
115
115
|
// Note: API rejects 'concise' and 'none' despite error messages claiming they're valid
|
|
116
116
|
reasoningSummary: zod.z.enum(["auto", "detailed"]).optional(),
|
|
117
117
|
reasoningSummaryFormat: zod.z.enum(["none", "experimental"]).optional(),
|
|
118
118
|
modelVerbosity: zod.z.enum(["low", "medium", "high"]).optional(),
|
|
119
119
|
// NEW: Advanced features
|
|
120
|
-
includePlanTool: zod.z.boolean().optional(),
|
|
121
120
|
profile: zod.z.string().optional(),
|
|
122
121
|
oss: zod.z.boolean().optional(),
|
|
123
122
|
webSearch: zod.z.boolean().optional(),
|
|
@@ -392,7 +391,7 @@ function mapCodexCliFinishReason(reason) {
|
|
|
392
391
|
}
|
|
393
392
|
}
|
|
394
393
|
var codexCliProviderOptionsSchema = zod.z.object({
|
|
395
|
-
reasoningEffort: zod.z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
|
|
394
|
+
reasoningEffort: zod.z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(),
|
|
396
395
|
reasoningSummary: zod.z.enum(["auto", "detailed"]).optional(),
|
|
397
396
|
reasoningSummaryFormat: zod.z.enum(["none", "experimental"]).optional(),
|
|
398
397
|
textVerbosity: zod.z.enum(["low", "medium", "high"]).optional(),
|
|
@@ -562,9 +561,6 @@ var CodexCliLanguageModel = class {
|
|
|
562
561
|
if (settings.modelVerbosity) {
|
|
563
562
|
args.push("-c", `model_verbosity=${settings.modelVerbosity}`);
|
|
564
563
|
}
|
|
565
|
-
if (settings.includePlanTool) {
|
|
566
|
-
args.push("--include-plan-tool");
|
|
567
|
-
}
|
|
568
564
|
if (settings.profile) {
|
|
569
565
|
args.push("--profile", settings.profile);
|
|
570
566
|
}
|
package/dist/index.d.cts
CHANGED
|
@@ -37,7 +37,7 @@ interface Logger {
|
|
|
37
37
|
}
|
|
38
38
|
type ApprovalMode = 'untrusted' | 'on-failure' | 'on-request' | 'never';
|
|
39
39
|
type SandboxMode = 'read-only' | 'workspace-write' | 'danger-full-access';
|
|
40
|
-
type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
|
40
|
+
type ReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
|
41
41
|
/**
|
|
42
42
|
* Reasoning summary detail level.
|
|
43
43
|
* Note: The API error messages claim 'concise' and 'none' are valid, but they are
|
|
@@ -116,8 +116,8 @@ interface CodexCliSettings {
|
|
|
116
116
|
* and legacy GPT-5 slugs). Higher effort produces more thorough reasoning at the cost of latency.
|
|
117
117
|
*
|
|
118
118
|
* Codex CLI model presets currently expose `low`/`medium`/`high` for `gpt-5.1` and `gpt-5.1-codex`.
|
|
119
|
+
* Per OpenAI API docs, GPT‑5.1+ models support a `none` level (no extra reasoning); older GPT‑5 slugs used `minimal` instead.
|
|
119
120
|
* `gpt-5.1-codex-max` additionally supports `xhigh`. `gpt-5.1-codex-mini` only offers `medium`/`high`.
|
|
120
|
-
* The legacy `gpt-5` slug still allowed `minimal`, but GPT-5.1 rejects it.
|
|
121
121
|
*
|
|
122
122
|
* Maps to: `-c model_reasoning_effort=<value>`
|
|
123
123
|
* @see https://platform.openai.com/docs/guides/reasoning
|
|
@@ -158,12 +158,6 @@ interface CodexCliSettings {
|
|
|
158
158
|
* Maps to: `-c features.rmcp_client=true`
|
|
159
159
|
*/
|
|
160
160
|
rmcpClient?: boolean;
|
|
161
|
-
/**
|
|
162
|
-
* Include experimental plan tool that the model can use to update its current plan.
|
|
163
|
-
*
|
|
164
|
-
* Maps to: `--include-plan-tool`
|
|
165
|
-
*/
|
|
166
|
-
includePlanTool?: boolean;
|
|
167
161
|
/**
|
|
168
162
|
* Configuration profile from config.toml to specify default options.
|
|
169
163
|
*
|
package/dist/index.d.ts
CHANGED
|
@@ -37,7 +37,7 @@ interface Logger {
|
|
|
37
37
|
}
|
|
38
38
|
type ApprovalMode = 'untrusted' | 'on-failure' | 'on-request' | 'never';
|
|
39
39
|
type SandboxMode = 'read-only' | 'workspace-write' | 'danger-full-access';
|
|
40
|
-
type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
|
40
|
+
type ReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
|
41
41
|
/**
|
|
42
42
|
* Reasoning summary detail level.
|
|
43
43
|
* Note: The API error messages claim 'concise' and 'none' are valid, but they are
|
|
@@ -116,8 +116,8 @@ interface CodexCliSettings {
|
|
|
116
116
|
* and legacy GPT-5 slugs). Higher effort produces more thorough reasoning at the cost of latency.
|
|
117
117
|
*
|
|
118
118
|
* Codex CLI model presets currently expose `low`/`medium`/`high` for `gpt-5.1` and `gpt-5.1-codex`.
|
|
119
|
+
* Per OpenAI API docs, GPT‑5.1+ models support a `none` level (no extra reasoning); older GPT‑5 slugs used `minimal` instead.
|
|
119
120
|
* `gpt-5.1-codex-max` additionally supports `xhigh`. `gpt-5.1-codex-mini` only offers `medium`/`high`.
|
|
120
|
-
* The legacy `gpt-5` slug still allowed `minimal`, but GPT-5.1 rejects it.
|
|
121
121
|
*
|
|
122
122
|
* Maps to: `-c model_reasoning_effort=<value>`
|
|
123
123
|
* @see https://platform.openai.com/docs/guides/reasoning
|
|
@@ -158,12 +158,6 @@ interface CodexCliSettings {
|
|
|
158
158
|
* Maps to: `-c features.rmcp_client=true`
|
|
159
159
|
*/
|
|
160
160
|
rmcpClient?: boolean;
|
|
161
|
-
/**
|
|
162
|
-
* Include experimental plan tool that the model can use to update its current plan.
|
|
163
|
-
*
|
|
164
|
-
* Maps to: `--include-plan-tool`
|
|
165
|
-
*/
|
|
166
|
-
includePlanTool?: boolean;
|
|
167
161
|
/**
|
|
168
162
|
* Configuration profile from config.toml to specify default options.
|
|
169
163
|
*
|
package/dist/index.js
CHANGED
|
@@ -108,13 +108,12 @@ var settingsSchema = z.object({
|
|
|
108
108
|
verbose: z.boolean().optional(),
|
|
109
109
|
logger: z.union([z.literal(false), loggerFunctionSchema]).optional(),
|
|
110
110
|
// NEW: Reasoning & Verbosity
|
|
111
|
-
reasoningEffort: z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
|
|
111
|
+
reasoningEffort: z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(),
|
|
112
112
|
// Note: API rejects 'concise' and 'none' despite error messages claiming they're valid
|
|
113
113
|
reasoningSummary: z.enum(["auto", "detailed"]).optional(),
|
|
114
114
|
reasoningSummaryFormat: z.enum(["none", "experimental"]).optional(),
|
|
115
115
|
modelVerbosity: z.enum(["low", "medium", "high"]).optional(),
|
|
116
116
|
// NEW: Advanced features
|
|
117
|
-
includePlanTool: z.boolean().optional(),
|
|
118
117
|
profile: z.string().optional(),
|
|
119
118
|
oss: z.boolean().optional(),
|
|
120
119
|
webSearch: z.boolean().optional(),
|
|
@@ -389,7 +388,7 @@ function mapCodexCliFinishReason(reason) {
|
|
|
389
388
|
}
|
|
390
389
|
}
|
|
391
390
|
var codexCliProviderOptionsSchema = z.object({
|
|
392
|
-
reasoningEffort: z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
|
|
391
|
+
reasoningEffort: z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(),
|
|
393
392
|
reasoningSummary: z.enum(["auto", "detailed"]).optional(),
|
|
394
393
|
reasoningSummaryFormat: z.enum(["none", "experimental"]).optional(),
|
|
395
394
|
textVerbosity: z.enum(["low", "medium", "high"]).optional(),
|
|
@@ -559,9 +558,6 @@ var CodexCliLanguageModel = class {
|
|
|
559
558
|
if (settings.modelVerbosity) {
|
|
560
559
|
args.push("-c", `model_verbosity=${settings.modelVerbosity}`);
|
|
561
560
|
}
|
|
562
|
-
if (settings.includePlanTool) {
|
|
563
|
-
args.push("--include-plan-tool");
|
|
564
|
-
}
|
|
565
561
|
if (settings.profile) {
|
|
566
562
|
args.push("--profile", settings.profile);
|
|
567
563
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "ai-sdk-provider-codex-cli",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.2",
|
|
4
4
|
"description": "AI SDK v6 provider for OpenAI Codex CLI (use ChatGPT Plus/Pro subscription)",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"ai-sdk",
|
|
@@ -64,7 +64,7 @@
|
|
|
64
64
|
"jsonc-parser": "^3.3.1"
|
|
65
65
|
},
|
|
66
66
|
"optionalDependencies": {
|
|
67
|
-
"@openai/codex": "^0.
|
|
67
|
+
"@openai/codex": "^0.77.0"
|
|
68
68
|
},
|
|
69
69
|
"devDependencies": {
|
|
70
70
|
"@eslint/js": "^9.14.0",
|