ai-sdk-provider-codex-cli 1.0.1 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -10,7 +10,7 @@
10
10
  [![PRs welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](https://github.com/ben-vargas/ai-sdk-provider-codex-cli/issues)
11
11
  [![Latest Release](https://img.shields.io/github/v/release/ben-vargas/ai-sdk-provider-codex-cli?display_name=tag)](https://github.com/ben-vargas/ai-sdk-provider-codex-cli/releases/latest)
12
12
 
13
- A community provider for Vercel AI SDK v6 that uses OpenAI's Codex CLI (non‑interactive `codex exec`) to talk to GPT‑5.1 class models (`gpt-5.1`, the Codex-specific `gpt-5.1-codex`, the flagship `gpt-5.1-codex-max`, and the lightweight `gpt-5.1-codex-mini` slugs) with your ChatGPT Plus/Pro subscription. The provider spawns the Codex CLI process, parses its JSONL output, and adapts it to the AI SDK LanguageModelV3 interface. Legacy GPT-5 / GPT-5-codex slugs remain compatible for existing workflows.
13
+ A community provider for Vercel AI SDK v6 that uses OpenAI's Codex CLI (non‑interactive `codex exec`) to talk to GPT‑5.1 / GPT‑5.2 class models (`gpt-5.1`, `gpt-5.2`, the Codex-specific `gpt-5.1-codex` / `gpt-5.2-codex`, the flagship `*-codex-max`, and the lightweight `*-codex-mini` slugs) with your ChatGPT Plus/Pro subscription. The provider spawns the Codex CLI process, parses its JSONL output, and adapts it to the AI SDK LanguageModelV3 interface. Legacy GPT-5 / GPT-5-codex slugs remain compatible for existing workflows.
14
14
 
15
15
  - Works with `generateText`, `streamText`, and `generateObject` (native JSON Schema support via `--output-schema`)
16
16
  - Uses ChatGPT OAuth from `codex login` (tokens in `~/.codex/auth.json`) or `OPENAI_API_KEY`
@@ -307,7 +307,7 @@ const model = codexCli('gpt-5.1-codex', {
307
307
  addDirs: ['../shared'],
308
308
 
309
309
  // Reasoning & verbosity
310
- reasoningEffort: 'medium', // minimal | low | medium | high | xhigh (xhigh only on gpt-5.1-codex-max)
310
+ reasoningEffort: 'medium', // none | minimal | low | medium | high | xhigh (xhigh on codex-max and newer models that expose it)
311
311
  reasoningSummary: 'auto', // auto | detailed (Note: 'concise' and 'none' are rejected by API)
312
312
  reasoningSummaryFormat: 'none', // none | experimental
313
313
  modelVerbosity: 'high', // low | medium | high
package/dist/index.cjs CHANGED
@@ -111,7 +111,7 @@ var settingsSchema = zod.z.object({
111
111
  verbose: zod.z.boolean().optional(),
112
112
  logger: zod.z.union([zod.z.literal(false), loggerFunctionSchema]).optional(),
113
113
  // NEW: Reasoning & Verbosity
114
- reasoningEffort: zod.z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
114
+ reasoningEffort: zod.z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(),
115
115
  // Note: API rejects 'concise' and 'none' despite error messages claiming they're valid
116
116
  reasoningSummary: zod.z.enum(["auto", "detailed"]).optional(),
117
117
  reasoningSummaryFormat: zod.z.enum(["none", "experimental"]).optional(),
@@ -391,7 +391,7 @@ function mapCodexCliFinishReason(reason) {
391
391
  }
392
392
  }
393
393
  var codexCliProviderOptionsSchema = zod.z.object({
394
- reasoningEffort: zod.z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
394
+ reasoningEffort: zod.z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(),
395
395
  reasoningSummary: zod.z.enum(["auto", "detailed"]).optional(),
396
396
  reasoningSummaryFormat: zod.z.enum(["none", "experimental"]).optional(),
397
397
  textVerbosity: zod.z.enum(["low", "medium", "high"]).optional(),
package/dist/index.d.cts CHANGED
@@ -37,7 +37,7 @@ interface Logger {
37
37
  }
38
38
  type ApprovalMode = 'untrusted' | 'on-failure' | 'on-request' | 'never';
39
39
  type SandboxMode = 'read-only' | 'workspace-write' | 'danger-full-access';
40
- type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
40
+ type ReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
41
41
  /**
42
42
  * Reasoning summary detail level.
43
43
  * Note: The API error messages claim 'concise' and 'none' are valid, but they are
@@ -116,8 +116,8 @@ interface CodexCliSettings {
116
116
  * and legacy GPT-5 slugs). Higher effort produces more thorough reasoning at the cost of latency.
117
117
  *
118
118
  * Codex CLI model presets currently expose `low`/`medium`/`high` for `gpt-5.1` and `gpt-5.1-codex`.
119
+ * Per OpenAI API docs, GPT‑5.1+ models support a `none` level (no extra reasoning); older GPT‑5 slugs used `minimal` instead.
119
120
  * `gpt-5.1-codex-max` additionally supports `xhigh`. `gpt-5.1-codex-mini` only offers `medium`/`high`.
120
- * The legacy `gpt-5` slug still allowed `minimal`, but GPT-5.1 rejects it.
121
121
  *
122
122
  * Maps to: `-c model_reasoning_effort=<value>`
123
123
  * @see https://platform.openai.com/docs/guides/reasoning
package/dist/index.d.ts CHANGED
@@ -37,7 +37,7 @@ interface Logger {
37
37
  }
38
38
  type ApprovalMode = 'untrusted' | 'on-failure' | 'on-request' | 'never';
39
39
  type SandboxMode = 'read-only' | 'workspace-write' | 'danger-full-access';
40
- type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
40
+ type ReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
41
41
  /**
42
42
  * Reasoning summary detail level.
43
43
  * Note: The API error messages claim 'concise' and 'none' are valid, but they are
@@ -116,8 +116,8 @@ interface CodexCliSettings {
116
116
  * and legacy GPT-5 slugs). Higher effort produces more thorough reasoning at the cost of latency.
117
117
  *
118
118
  * Codex CLI model presets currently expose `low`/`medium`/`high` for `gpt-5.1` and `gpt-5.1-codex`.
119
+ * Per OpenAI API docs, GPT‑5.1+ models support a `none` level (no extra reasoning); older GPT‑5 slugs used `minimal` instead.
119
120
  * `gpt-5.1-codex-max` additionally supports `xhigh`. `gpt-5.1-codex-mini` only offers `medium`/`high`.
120
- * The legacy `gpt-5` slug still allowed `minimal`, but GPT-5.1 rejects it.
121
121
  *
122
122
  * Maps to: `-c model_reasoning_effort=<value>`
123
123
  * @see https://platform.openai.com/docs/guides/reasoning
package/dist/index.js CHANGED
@@ -108,7 +108,7 @@ var settingsSchema = z.object({
108
108
  verbose: z.boolean().optional(),
109
109
  logger: z.union([z.literal(false), loggerFunctionSchema]).optional(),
110
110
  // NEW: Reasoning & Verbosity
111
- reasoningEffort: z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
111
+ reasoningEffort: z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(),
112
112
  // Note: API rejects 'concise' and 'none' despite error messages claiming they're valid
113
113
  reasoningSummary: z.enum(["auto", "detailed"]).optional(),
114
114
  reasoningSummaryFormat: z.enum(["none", "experimental"]).optional(),
@@ -388,7 +388,7 @@ function mapCodexCliFinishReason(reason) {
388
388
  }
389
389
  }
390
390
  var codexCliProviderOptionsSchema = z.object({
391
- reasoningEffort: z.enum(["minimal", "low", "medium", "high", "xhigh"]).optional(),
391
+ reasoningEffort: z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(),
392
392
  reasoningSummary: z.enum(["auto", "detailed"]).optional(),
393
393
  reasoningSummaryFormat: z.enum(["none", "experimental"]).optional(),
394
394
  textVerbosity: z.enum(["low", "medium", "high"]).optional(),
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ai-sdk-provider-codex-cli",
3
- "version": "1.0.1",
3
+ "version": "1.0.2",
4
4
  "description": "AI SDK v6 provider for OpenAI Codex CLI (use ChatGPT Plus/Pro subscription)",
5
5
  "keywords": [
6
6
  "ai-sdk",