ai-sdk-provider-codex-cli 0.5.0 → 0.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -10,7 +10,7 @@
10
10
  [![PRs welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](https://github.com/ben-vargas/ai-sdk-provider-codex-cli/issues)
11
11
  [![Latest Release](https://img.shields.io/github/v/release/ben-vargas/ai-sdk-provider-codex-cli?display_name=tag)](https://github.com/ben-vargas/ai-sdk-provider-codex-cli/releases/latest)
12
12
 
13
- A community provider for Vercel AI SDK v5 that uses OpenAI’s Codex CLI (non‑interactive `codex exec`) to talk to GPT‑5 class models (`gpt-5` and the Codex-specific `gpt-5-codex` slug) with your ChatGPT Plus/Pro subscription. The provider spawns the Codex CLI process, parses its JSONL output, and adapts it to the AI SDK LanguageModelV2 interface.
13
+ A community provider for Vercel AI SDK v5 that uses OpenAI’s Codex CLI (non‑interactive `codex exec`) to talk to GPT‑5.1 class models (`gpt-5.1`, the Codex-specific `gpt-5.1-codex`, the flagship `gpt-5.1-codex-max`, and the lightweight `gpt-5.1-codex-mini` slugs) with your ChatGPT Plus/Pro subscription. The provider spawns the Codex CLI process, parses its JSONL output, and adapts it to the AI SDK LanguageModelV2 interface. Legacy GPT-5 / GPT-5-codex slugs remain compatible for existing workflows.
14
14
 
15
15
  - Works with `generateText`, `streamText`, and `generateObject` (native JSON Schema support via `--output-schema`)
16
16
  - Uses ChatGPT OAuth from `codex login` (tokens in `~/.codex/auth.json`) or `OPENAI_API_KEY`
@@ -28,7 +28,7 @@ npm i -g @openai/codex
28
28
  codex login # or set OPENAI_API_KEY
29
29
  ```
30
30
 
31
- > **⚠️ Version Requirement**: Requires Codex CLI **>= 0.42.0** for `--experimental-json` and `--output-schema` support. **>= 0.44.0 recommended** for full usage tracking and tool streaming support. Check your version with `codex --version` and upgrade if needed:
31
+ > **⚠️ Version Requirement**: Requires Codex CLI **>= 0.42.0** for `--experimental-json` and `--output-schema` support. **>= 0.60.0 recommended** for `gpt-5.1-codex-max` and `xhigh` reasoning effort. If you supply your own Codex CLI (global install or custom `codexPath`/`allowNpx`), check it with `codex --version` and upgrade if needed. The optional dependency `@openai/codex` in this package pulls a compatible version automatically.
32
32
  >
33
33
  > ```bash
34
34
  > npm i -g @openai/codex@latest
@@ -48,7 +48,7 @@ Text generation
48
48
  import { generateText } from 'ai';
49
49
  import { codexCli } from 'ai-sdk-provider-codex-cli';
50
50
 
51
- const model = codexCli('gpt-5-codex', {
51
+ const model = codexCli('gpt-5.1-codex', {
52
52
  allowNpx: true,
53
53
  skipGitRepoCheck: true,
54
54
  approvalMode: 'on-failure',
@@ -68,10 +68,10 @@ Streaming
68
68
  import { streamText } from 'ai';
69
69
  import { codexCli } from 'ai-sdk-provider-codex-cli';
70
70
 
71
- // The provider works with both `gpt-5` and `gpt-5-codex`; use the latter for
72
- // the Codex CLI specific slug.
71
+ // The provider works with both `gpt-5.1` and `gpt-5.1-codex`; use the latter for
72
+ // the Codex CLI specific slug. Legacy `gpt-5` slugs still work if you need them.
73
73
  const { textStream } = await streamText({
74
- model: codexCli('gpt-5-codex', { allowNpx: true, skipGitRepoCheck: true }),
74
+ model: codexCli('gpt-5.1-codex', { allowNpx: true, skipGitRepoCheck: true }),
75
75
  prompt: 'Write two short lines of encouragement.',
76
76
  });
77
77
  for await (const chunk of textStream) process.stdout.write(chunk);
@@ -86,7 +86,7 @@ import { codexCli } from 'ai-sdk-provider-codex-cli';
86
86
 
87
87
  const schema = z.object({ name: z.string(), age: z.number().int() });
88
88
  const { object } = await generateObject({
89
- model: codexCli('gpt-5-codex', { allowNpx: true, skipGitRepoCheck: true }),
89
+ model: codexCli('gpt-5.1-codex', { allowNpx: true, skipGitRepoCheck: true }),
90
90
  schema,
91
91
  prompt: 'Generate a small user profile.',
92
92
  });
@@ -114,7 +114,7 @@ import { streamText } from 'ai';
114
114
  import { codexCli } from 'ai-sdk-provider-codex-cli';
115
115
 
116
116
  const result = await streamText({
117
- model: codexCli('gpt-5-codex', { allowNpx: true, skipGitRepoCheck: true }),
117
+ model: codexCli('gpt-5.1-codex', { allowNpx: true, skipGitRepoCheck: true }),
118
118
  prompt: 'List files and count lines in the largest one',
119
119
  });
120
120
 
@@ -145,20 +145,20 @@ Control logging verbosity and integrate with your observability stack:
145
145
  import { codexCli } from 'ai-sdk-provider-codex-cli';
146
146
 
147
147
  // Default: warn/error only (clean production output)
148
- const model = codexCli('gpt-5-codex', {
148
+ const model = codexCli('gpt-5.1-codex', {
149
149
  allowNpx: true,
150
150
  skipGitRepoCheck: true,
151
151
  });
152
152
 
153
153
  // Verbose mode: enable debug/info logs for troubleshooting
154
- const verboseModel = codexCli('gpt-5-codex', {
154
+ const verboseModel = codexCli('gpt-5.1-codex', {
155
155
  allowNpx: true,
156
156
  skipGitRepoCheck: true,
157
157
  verbose: true, // Shows all log levels
158
158
  });
159
159
 
160
160
  // Custom logger: integrate with Winston, Pino, Datadog, etc.
161
- const customModel = codexCli('gpt-5-codex', {
161
+ const customModel = codexCli('gpt-5.1-codex', {
162
162
  allowNpx: true,
163
163
  skipGitRepoCheck: true,
164
164
  verbose: true,
@@ -171,7 +171,7 @@ const customModel = codexCli('gpt-5-codex', {
171
171
  });
172
172
 
173
173
  // Silent: disable all logging
174
- const silentModel = codexCli('gpt-5-codex', {
174
+ const silentModel = codexCli('gpt-5.1-codex', {
175
175
  allowNpx: true,
176
176
  skipGitRepoCheck: true,
177
177
  logger: false, // No logs at all
@@ -243,12 +243,12 @@ Control reasoning effort, verbosity, and advanced Codex features at model creati
243
243
  ```ts
244
244
  import { codexCli } from 'ai-sdk-provider-codex-cli';
245
245
 
246
- const model = codexCli('gpt-5-codex', {
246
+ const model = codexCli('gpt-5.1-codex', {
247
247
  allowNpx: true,
248
248
  skipGitRepoCheck: true,
249
249
 
250
250
  // Reasoning & verbosity
251
- reasoningEffort: 'medium', // minimal | low | medium | high
251
+ reasoningEffort: 'medium', // minimal | low | medium | high | xhigh (xhigh only on gpt-5.1-codex-max)
252
252
  reasoningSummary: 'auto', // auto | detailed (Note: 'concise' and 'none' are rejected by API)
253
253
  reasoningSummaryFormat: 'none', // none | experimental
254
254
  modelVerbosity: 'high', // low | medium | high
@@ -279,7 +279,7 @@ values take precedence over constructor defaults while leaving other settings in
279
279
  import { generateText } from 'ai';
280
280
  import { codexCli } from 'ai-sdk-provider-codex-cli';
281
281
 
282
- const model = codexCli('gpt-5-codex', {
282
+ const model = codexCli('gpt-5.1-codex', {
283
283
  allowNpx: true,
284
284
  reasoningEffort: 'medium',
285
285
  modelVerbosity: 'medium',
package/dist/index.cjs CHANGED
@@ -82,7 +82,7 @@ var settingsSchema = zod.z.object({
82
82
  verbose: zod.z.boolean().optional(),
83
83
  logger: zod.z.union([zod.z.literal(false), loggerFunctionSchema]).optional(),
84
84
  // NEW: Reasoning & Verbosity
85
- reasoningEffort: zod.z.enum(["minimal", "low", "medium", "high"]).optional(),
85
+ reasoningEffort: zod.z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
86
86
  // Note: API rejects 'concise' and 'none' despite error messages claiming they're valid
87
87
  reasoningSummary: zod.z.enum(["auto", "detailed"]).optional(),
88
88
  reasoningSummaryFormat: zod.z.enum(["none", "experimental"]).optional(),
@@ -231,7 +231,7 @@ function isAuthenticationError(err) {
231
231
 
232
232
  // src/codex-cli-language-model.ts
233
233
  var codexCliProviderOptionsSchema = zod.z.object({
234
- reasoningEffort: zod.z.enum(["minimal", "low", "medium", "high"]).optional(),
234
+ reasoningEffort: zod.z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
235
235
  reasoningSummary: zod.z.enum(["auto", "detailed"]).optional(),
236
236
  reasoningSummaryFormat: zod.z.enum(["none", "experimental"]).optional(),
237
237
  textVerbosity: zod.z.enum(["low", "medium", "high"]).optional(),
package/dist/index.d.cts CHANGED
@@ -37,7 +37,7 @@ interface Logger {
37
37
  }
38
38
  type ApprovalMode = 'untrusted' | 'on-failure' | 'on-request' | 'never';
39
39
  type SandboxMode = 'read-only' | 'workspace-write' | 'danger-full-access';
40
- type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high';
40
+ type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
41
41
  /**
42
42
  * Reasoning summary detail level.
43
43
  * Note: The API error messages claim 'concise' and 'none' are valid, but they are
@@ -61,8 +61,12 @@ interface CodexCliSettings {
61
61
  verbose?: boolean;
62
62
  logger?: Logger | false;
63
63
  /**
64
- * Controls reasoning effort for reasoning-capable models (o3, o4-mini, gpt-5, gpt-5-codex).
65
- * Higher effort produces more thorough reasoning at the cost of latency.
64
+ * Controls reasoning effort for reasoning-capable models (o3, o4-mini, the GPT-5.1 family,
65
+ * and legacy GPT-5 slugs). Higher effort produces more thorough reasoning at the cost of latency.
66
+ *
67
+ * Codex CLI model presets currently expose `low`/`medium`/`high` for `gpt-5.1` and `gpt-5.1-codex`.
68
+ * `gpt-5.1-codex-max` additionally supports `xhigh`. `gpt-5.1-codex-mini` only offers `medium`/`high`.
69
+ * The legacy `gpt-5` slug still allowed `minimal`, but GPT-5.1 rejects it.
66
70
  *
67
71
  * Maps to: `-c model_reasoning_effort=<value>`
68
72
  * @see https://platform.openai.com/docs/guides/reasoning
@@ -86,7 +90,8 @@ interface CodexCliSettings {
86
90
  */
87
91
  reasoningSummaryFormat?: ReasoningSummaryFormat;
88
92
  /**
89
- * Controls output length/detail for GPT-5 family models.
93
+ * Controls output length/detail for GPT-5.1 (non-Codex) and legacy GPT-5 models.
94
+ * Codex-specific slugs ignore this flag because the CLI disables verbosity for them.
90
95
  * Only applies to models using the Responses API.
91
96
  *
92
97
  * Maps to: `-c model_verbosity=<value>`
package/dist/index.d.ts CHANGED
@@ -37,7 +37,7 @@ interface Logger {
37
37
  }
38
38
  type ApprovalMode = 'untrusted' | 'on-failure' | 'on-request' | 'never';
39
39
  type SandboxMode = 'read-only' | 'workspace-write' | 'danger-full-access';
40
- type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high';
40
+ type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
41
41
  /**
42
42
  * Reasoning summary detail level.
43
43
  * Note: The API error messages claim 'concise' and 'none' are valid, but they are
@@ -61,8 +61,12 @@ interface CodexCliSettings {
61
61
  verbose?: boolean;
62
62
  logger?: Logger | false;
63
63
  /**
64
- * Controls reasoning effort for reasoning-capable models (o3, o4-mini, gpt-5, gpt-5-codex).
65
- * Higher effort produces more thorough reasoning at the cost of latency.
64
+ * Controls reasoning effort for reasoning-capable models (o3, o4-mini, the GPT-5.1 family,
65
+ * and legacy GPT-5 slugs). Higher effort produces more thorough reasoning at the cost of latency.
66
+ *
67
+ * Codex CLI model presets currently expose `low`/`medium`/`high` for `gpt-5.1` and `gpt-5.1-codex`.
68
+ * `gpt-5.1-codex-max` additionally supports `xhigh`. `gpt-5.1-codex-mini` only offers `medium`/`high`.
69
+ * The legacy `gpt-5` slug still allowed `minimal`, but GPT-5.1 rejects it.
66
70
  *
67
71
  * Maps to: `-c model_reasoning_effort=<value>`
68
72
  * @see https://platform.openai.com/docs/guides/reasoning
@@ -86,7 +90,8 @@ interface CodexCliSettings {
86
90
  */
87
91
  reasoningSummaryFormat?: ReasoningSummaryFormat;
88
92
  /**
89
- * Controls output length/detail for GPT-5 family models.
93
+ * Controls output length/detail for GPT-5.1 (non-Codex) and legacy GPT-5 models.
94
+ * Codex-specific slugs ignore this flag because the CLI disables verbosity for them.
90
95
  * Only applies to models using the Responses API.
91
96
  *
92
97
  * Maps to: `-c model_verbosity=<value>`
package/dist/index.js CHANGED
@@ -79,7 +79,7 @@ var settingsSchema = z.object({
79
79
  verbose: z.boolean().optional(),
80
80
  logger: z.union([z.literal(false), loggerFunctionSchema]).optional(),
81
81
  // NEW: Reasoning & Verbosity
82
- reasoningEffort: z.enum(["minimal", "low", "medium", "high"]).optional(),
82
+ reasoningEffort: z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
83
83
  // Note: API rejects 'concise' and 'none' despite error messages claiming they're valid
84
84
  reasoningSummary: z.enum(["auto", "detailed"]).optional(),
85
85
  reasoningSummaryFormat: z.enum(["none", "experimental"]).optional(),
@@ -228,7 +228,7 @@ function isAuthenticationError(err) {
228
228
 
229
229
  // src/codex-cli-language-model.ts
230
230
  var codexCliProviderOptionsSchema = z.object({
231
- reasoningEffort: z.enum(["minimal", "low", "medium", "high"]).optional(),
231
+ reasoningEffort: z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
232
232
  reasoningSummary: z.enum(["auto", "detailed"]).optional(),
233
233
  reasoningSummaryFormat: z.enum(["none", "experimental"]).optional(),
234
234
  textVerbosity: z.enum(["low", "medium", "high"]).optional(),
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ai-sdk-provider-codex-cli",
3
- "version": "0.5.0",
3
+ "version": "0.5.2",
4
4
  "description": "AI SDK v5 provider for OpenAI Codex CLI with native JSON Schema support",
5
5
  "keywords": [
6
6
  "ai-sdk",
@@ -9,6 +9,9 @@
9
9
  "cli",
10
10
  "language-model",
11
11
  "gpt-5",
12
+ "gpt-5.1",
13
+ "gpt-5.1-codex",
14
+ "gpt-5.1-codex-max",
12
15
  "provider"
13
16
  ],
14
17
  "homepage": "https://github.com/ben-vargas/ai-sdk-provider-codex-cli",
@@ -61,20 +64,20 @@
61
64
  "jsonc-parser": "^3.3.1"
62
65
  },
63
66
  "optionalDependencies": {
64
- "@openai/codex": "^0.44.0"
67
+ "@openai/codex": "^0.60.1"
65
68
  },
66
69
  "devDependencies": {
67
70
  "@eslint/js": "^9.14.0",
68
- "@types/node": "20.17.24",
71
+ "@types/node": "20.19.6",
69
72
  "@vitest/coverage-v8": "^3.2.4",
70
- "ai": "5.0.14",
73
+ "ai": "5.0.93",
71
74
  "eslint": "^9.14.0",
72
75
  "prettier": "^3.3.3",
73
76
  "tsup": "8.5.0",
74
77
  "typescript": "5.6.3",
75
78
  "vitest": "^3.2.4",
76
79
  "typescript-eslint": "^8.6.0",
77
- "zod": "^4.0.17"
80
+ "zod": "^4.1.8"
78
81
  },
79
82
  "peerDependencies": {
80
83
  "zod": "^3.0.0 || ^4.0.0"