oc-chatgpt-multi-auth 5.1.0 → 5.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +68 -5
  2. package/config/README.md +50 -76
  3. package/dist/index.d.ts.map +1 -1
  4. package/dist/index.js +64 -14
  5. package/dist/index.js.map +1 -1
  6. package/dist/lib/accounts.d.ts.map +1 -1
  7. package/dist/lib/accounts.js +2 -0
  8. package/dist/lib/accounts.js.map +1 -1
  9. package/dist/lib/config.d.ts +4 -0
  10. package/dist/lib/config.d.ts.map +1 -1
  11. package/dist/lib/config.js +79 -1
  12. package/dist/lib/config.js.map +1 -1
  13. package/dist/lib/prompts/codex.d.ts +1 -1
  14. package/dist/lib/prompts/codex.d.ts.map +1 -1
  15. package/dist/lib/prompts/codex.js +8 -0
  16. package/dist/lib/prompts/codex.js.map +1 -1
  17. package/dist/lib/request/fetch-helpers.d.ts +18 -0
  18. package/dist/lib/request/fetch-helpers.d.ts.map +1 -1
  19. package/dist/lib/request/fetch-helpers.js +124 -16
  20. package/dist/lib/request/fetch-helpers.js.map +1 -1
  21. package/dist/lib/request/helpers/model-map.d.ts.map +1 -1
  22. package/dist/lib/request/helpers/model-map.js +8 -0
  23. package/dist/lib/request/helpers/model-map.js.map +1 -1
  24. package/dist/lib/request/request-transformer.d.ts.map +1 -1
  25. package/dist/lib/request/request-transformer.js +83 -15
  26. package/dist/lib/request/request-transformer.js.map +1 -1
  27. package/dist/lib/schemas.d.ts +9 -0
  28. package/dist/lib/schemas.d.ts.map +1 -1
  29. package/dist/lib/schemas.js +3 -0
  30. package/dist/lib/schemas.js.map +1 -1
  31. package/dist/lib/storage/migrations.d.ts.map +1 -1
  32. package/dist/lib/storage/migrations.js +1 -0
  33. package/dist/lib/storage/migrations.js.map +1 -1
  34. package/dist/lib/types.d.ts +1 -1
  35. package/dist/lib/types.d.ts.map +1 -1
  36. package/package.json +1 -1
package/README.md CHANGED
@@ -5,7 +5,7 @@
5
5
  [![Tests](https://github.com/ndycode/oc-chatgpt-multi-auth/actions/workflows/ci.yml/badge.svg)](https://github.com/ndycode/oc-chatgpt-multi-auth/actions)
6
6
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
7
7
 
8
- OAuth plugin for OpenCode that lets you use ChatGPT Plus/Pro rate limits with models like `gpt-5.2`, `gpt-5.3-codex`, and `gpt-5.1-codex-max`.
8
+ OAuth plugin for OpenCode that lets you use ChatGPT Plus/Pro rate limits with models like `gpt-5.2`, `gpt-5.3-codex`, and `gpt-5.1-codex-max` (plus optional `gpt-5.3-codex-spark` IDs when entitled).
9
9
 
10
10
  > [!NOTE]
11
11
  > **Renamed from `opencode-openai-codex-auth-multi`** — If you were using the old package, update your config to use `oc-chatgpt-multi-auth` instead. The rename was necessary because OpenCode blocks plugins containing `opencode-openai-codex-auth` in the name.
@@ -18,7 +18,7 @@ OAuth plugin for OpenCode that lets you use ChatGPT Plus/Pro rate limits with mo
18
18
  - **Click-to-switch** — Switch accounts directly from the OpenCode TUI
19
19
  - **Strict tool validation** — Automatically cleans schemas for compatibility with strict models
20
20
  - **Auto-update notifications** — Get notified when a new version is available
21
- - **22 model presets** — Full variant system with reasoning levels (none/low/medium/high/xhigh)
21
+ - **22 template model presets** — Full variant system with reasoning levels (none/low/medium/high/xhigh)
22
22
  - **Prompt caching** — Session-based caching for faster multi-turn conversations
23
23
  - **Usage-aware errors** — Friendly messages with rate limit reset timing
24
24
  - **Plugin compatible** — Works alongside other OpenCode plugins (oh-my-opencode, dcp, etc.)
@@ -131,11 +131,14 @@ opencode run "Hello" --model=openai/gpt-5.2 --variant=medium
131
131
  |-------|----------|-------|
132
132
  | `gpt-5.2` | none, low, medium, high, xhigh | Latest GPT-5.2 with reasoning levels |
133
133
  | `gpt-5.3-codex` | low, medium, high, xhigh | Latest GPT-5.3 Codex for code generation (default: xhigh) |
134
+ | `gpt-5.3-codex-spark` | low, medium, high, xhigh | Spark IDs are supported by the plugin, but access is entitlement-gated by account/workspace |
134
135
  | `gpt-5.1-codex-max` | low, medium, high, xhigh | Maximum context Codex |
135
136
  | `gpt-5.1-codex` | low, medium, high | Standard Codex |
136
137
  | `gpt-5.1-codex-mini` | medium, high | Lightweight Codex |
137
138
  | `gpt-5.1` | none, low, medium, high | GPT-5.1 base model |
138
139
 
140
+ Config templates intentionally omit Spark model IDs by default to reduce entitlement failures on unsupported accounts. Add Spark manually only if your workspace is entitled.
141
+
139
142
  **Using variants:**
140
143
  ```bash
141
144
  # Modern OpenCode (v1.0.210+)
@@ -238,6 +241,21 @@ Add this to your `~/.config/opencode/opencode.json`:
238
241
  }
239
242
  ```
240
243
 
244
+ Optional Spark model block (manual add only when entitled):
245
+ ```json
246
+ "gpt-5.3-codex-spark": {
247
+ "name": "GPT 5.3 Codex Spark (OAuth)",
248
+ "limit": { "context": 272000, "output": 128000 },
249
+ "modalities": { "input": ["text", "image", "pdf"], "output": ["text"] },
250
+ "variants": {
251
+ "low": { "reasoningEffort": "low" },
252
+ "medium": { "reasoningEffort": "medium" },
253
+ "high": { "reasoningEffort": "high" },
254
+ "xhigh": { "reasoningEffort": "xhigh" }
255
+ }
256
+ }
257
+ ```
258
+
241
259
  For legacy OpenCode (v1.0.209 and below), use `config/opencode-legacy.json` which has individual model entries like `gpt-5.2-low`, `gpt-5.2-medium`, etc.
242
260
 
243
261
  </details>
@@ -557,6 +575,42 @@ OpenCode uses `~/.config/opencode/` on **all platforms** including Windows.
557
575
 
558
576
  </details>
559
577
 
578
+ <details>
579
+ <summary><b>Unsupported Codex Model for ChatGPT Account</b></summary>
580
+
581
+ **Error example:** `Bad Request: {"detail":"The 'gpt-5.3-codex-spark' model is not supported when using Codex with a ChatGPT account."}`
582
+
583
+ **Cause:** Active workspace/account is not entitled for the requested Codex model.
584
+
585
+ **Solutions:**
586
+ 1. Re-auth to refresh workspace selection (most common Spark fix):
587
+ ```bash
588
+ opencode auth login
589
+ ```
590
+ 2. Add another entitled account/workspace. The plugin will try remaining accounts/workspaces before model fallback.
591
+ 3. Enable automatic fallback only if you want degraded-model retries when Spark is not entitled:
592
+ ```bash
593
+ CODEX_AUTH_UNSUPPORTED_MODEL_POLICY=fallback opencode
594
+ ```
595
+ 4. Use custom fallback chain in `~/.opencode/openai-codex-auth-config.json`:
596
+ ```json
597
+ {
598
+ "unsupportedCodexPolicy": "fallback",
599
+ "fallbackOnUnsupportedCodexModel": true,
600
+ "unsupportedCodexFallbackChain": {
601
+ "gpt-5.3-codex": ["gpt-5.2-codex"],
602
+ "gpt-5.3-codex-spark": ["gpt-5.3-codex", "gpt-5.2-codex"]
603
+ }
604
+ }
605
+ ```
606
+ 5. Verify effective upstream model when needed:
607
+ ```bash
608
+ ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "ping" --model=openai/gpt-5.3-codex-spark
609
+ ```
610
+ The UI can keep showing your selected model while fallback is applied internally.
611
+
612
+ </details>
613
+
560
614
  <details>
561
615
  <summary><b>Rate Limit Exceeded</b></summary>
562
616
 
@@ -659,7 +713,7 @@ Create `~/.opencode/openai-codex-auth-config.json` for optional settings:
659
713
  | `codexTuiV2` | `true` | Enables Codex-style terminal UI output (set `false` for legacy output) |
660
714
  | `codexTuiColorProfile` | `truecolor` | Terminal color profile for Codex UI (`truecolor`, `ansi256`, `ansi16`) |
661
715
  | `codexTuiGlyphMode` | `ascii` | Glyph mode for Codex UI (`ascii`, `unicode`, `auto`) |
662
- | `fastSession` | `false` | Forces low-latency settings per request (`reasoningEffort=none/low`, `reasoningSummary=off`, `textVerbosity=low`) |
716
+ | `fastSession` | `false` | Forces low-latency settings per request (`reasoningEffort=none/low`, `reasoningSummary=auto`, `textVerbosity=low`) |
663
717
  | `fastSessionStrategy` | `hybrid` | `hybrid` speeds simple turns but keeps full-depth on complex prompts; `always` forces fast tuning on every turn |
664
718
  | `fastSessionMaxInputItems` | `30` | Max input items kept when fast tuning is applied |
665
719
 
@@ -677,10 +731,17 @@ Create `~/.opencode/openai-codex-auth-config.json` for optional settings:
677
731
  | `retryAllAccountsRateLimited` | `true` | Wait and retry when all accounts are rate-limited |
678
732
  | `retryAllAccountsMaxWaitMs` | `0` | Max wait time (0 = unlimited) |
679
733
  | `retryAllAccountsMaxRetries` | `Infinity` | Max retry attempts |
680
- | `fallbackToGpt52OnUnsupportedGpt53` | `true` | Automatically retry once with `gpt-5.2-codex` when `gpt-5.3-codex` is rejected for ChatGPT Codex OAuth entitlement |
734
+ | `unsupportedCodexPolicy` | `strict` | Unsupported-model behavior: `strict` (return entitlement error) or `fallback` (retry next model in fallback chain) |
735
+ | `fallbackOnUnsupportedCodexModel` | `false` | Legacy fallback toggle mapped to `unsupportedCodexPolicy` (prefer using `unsupportedCodexPolicy`) |
736
+ | `fallbackToGpt52OnUnsupportedGpt53` | `true` | Legacy compatibility toggle for the `gpt-5.3-codex -> gpt-5.2-codex` edge when generic fallback is enabled |
737
+ | `unsupportedCodexFallbackChain` | `{}` | Optional per-model fallback-chain override (map of `model -> [fallback1, fallback2, ...]`) |
681
738
  | `fetchTimeoutMs` | `60000` | Request timeout to Codex backend (ms) |
682
739
  | `streamStallTimeoutMs` | `45000` | Abort non-stream parsing if SSE stalls (ms) |
683
740
 
741
+ Default unsupported-model fallback chain (used when `unsupportedCodexPolicy` is `fallback`):
742
+ - `gpt-5.3-codex -> gpt-5.2-codex`
743
+ - `gpt-5.3-codex-spark -> gpt-5.3-codex -> gpt-5.2-codex` (applies if you manually select Spark model IDs)
744
+
684
745
  ### Environment Variables
685
746
 
686
747
  ```bash
@@ -695,7 +756,9 @@ CODEX_AUTH_PREWARM=0 opencode # Disable startup prewarm (prom
695
756
  CODEX_AUTH_FAST_SESSION=1 opencode # Enable faster response defaults
696
757
  CODEX_AUTH_FAST_SESSION_STRATEGY=always opencode # Force fast mode for all prompts
697
758
  CODEX_AUTH_FAST_SESSION_MAX_INPUT_ITEMS=24 opencode # Tune fast-mode history window
698
- CODEX_AUTH_FALLBACK_GPT53_TO_GPT52=0 opencode # Disable gpt-5.3 -> gpt-5.2 fallback (strict mode)
759
+ CODEX_AUTH_UNSUPPORTED_MODEL_POLICY=fallback opencode # Enable generic unsupported-model fallback
760
+ CODEX_AUTH_FALLBACK_UNSUPPORTED_MODEL=1 opencode # Legacy fallback toggle (prefer policy var above)
761
+ CODEX_AUTH_FALLBACK_GPT53_TO_GPT52=0 opencode # Disable only the legacy gpt-5.3 -> gpt-5.2 edge
699
762
  CODEX_AUTH_FETCH_TIMEOUT_MS=120000 opencode # Override request timeout
700
763
  CODEX_AUTH_STREAM_STALL_TIMEOUT_MS=60000 opencode # Override SSE stall timeout
701
764
  ```
package/config/README.md CHANGED
@@ -1,110 +1,84 @@
1
1
  # Configuration
2
2
 
3
- This directory contains the official opencode configuration files for the OpenAI Codex OAuth plugin.
3
+ This directory contains the official OpenCode config templates for the ChatGPT Codex OAuth plugin.
4
4
 
5
- ## ⚠️ REQUIRED: Choose the Right Configuration
5
+ ## Required: choose the right config file
6
6
 
7
- **Two configuration files are available based on your OpenCode version:**
8
-
9
- | File | OpenCode Version | Description |
7
+ | File | OpenCode version | Description |
10
8
  |------|------------------|-------------|
11
- | [`opencode-modern.json`](./opencode-modern.json) | **v1.0.210+ (Jan 2026+)** | Compact config using variants system - 6 models with built-in reasoning level variants |
12
- | [`opencode-legacy.json`](./opencode-legacy.json) | **v1.0.209 and below** | Extended config with separate model entries for each reasoning level - 20+ individual model definitions |
9
+ | [`opencode-modern.json`](./opencode-modern.json) | **v1.0.210+** | Variant-based config: 6 base models with 22 total presets |
10
+ | [`opencode-legacy.json`](./opencode-legacy.json) | **v1.0.209 and below** | Legacy explicit entries: 22 individual model definitions |
11
+
12
+ ## Quick pick
13
13
 
14
- ### Which one should I use?
14
+ If your OpenCode version is v1.0.210 or newer:
15
15
 
16
- **If you have OpenCode v1.0.210 or newer** (check with `opencode --version`):
17
16
  ```bash
18
17
  cp config/opencode-modern.json ~/.config/opencode/opencode.json
19
18
  ```
20
19
 
21
- **If you have OpenCode v1.0.209 or older**:
20
+ If your OpenCode version is v1.0.209 or older:
21
+
22
22
  ```bash
23
23
  cp config/opencode-legacy.json ~/.config/opencode/opencode.json
24
24
  ```
25
25
 
26
- ### Why two configs?
27
-
28
- OpenCode v1.0.210+ introduced a **variants system** that allows defining reasoning effort levels as variants under a single model. This reduces config size from 572 lines to ~150 lines while maintaining the same functionality.
29
-
30
- **What you get:**
31
-
32
- | Config File | Model Families | Reasoning Variants | Total Models |
33
- |------------|----------------|-------------------|--------------|
34
- | `opencode-modern.json` | 6 | Built-in variants (low/medium/high/xhigh) | 6 base models with 22 total variants |
35
- | `opencode-legacy.json` | 6 | Separate model entries | 22 individual model definitions |
36
-
37
- Both configs provide:
38
- - ✅ All supported GPT 5.3/5.2/5.1 variants: gpt-5.2, gpt-5.3-codex, gpt-5.1, gpt-5.1-codex, gpt-5.1-codex-max, gpt-5.1-codex-mini
39
- - ✅ Proper reasoning effort settings for each variant (including `xhigh` for Codex Max/5.2)
40
- - ✅ Context limits (272k context / 128k output for all Codex families)
41
- - ✅ Required options: `store: false`, `include: ["reasoning.encrypted_content"]`
42
- - ✅ Image input support for all models
43
- - ✅ All required metadata for OpenCode features
44
-
45
- ### Modern Config Benefits (v1.0.210+)
26
+ Check your version with:
46
27
 
47
- - **74% smaller**: 150 lines vs 572 lines
48
- - **DRY**: Common options defined once at provider level
49
- - **Variant cycling**: Built-in support for `Ctrl+T` to switch reasoning levels
50
- - **Easier maintenance**: Add new variants without copying model definitions
51
-
52
- ## Usage
53
-
54
- 1. **Check your OpenCode version**:
55
- ```bash
56
- opencode --version
57
- ```
28
+ ```bash
29
+ opencode --version
30
+ ```
58
31
 
59
- 2. **Copy the appropriate config** based on your version:
60
- ```bash
61
- # For v1.0.210+ (recommended):
62
- cp config/opencode-modern.json ~/.config/opencode/opencode.json
32
+ ## Why there are two templates
63
33
 
64
- # For older versions:
65
- cp config/opencode-legacy.json ~/.config/opencode/opencode.json
66
- ```
34
+ OpenCode v1.0.210+ added model `variants`, so one model entry can expose multiple reasoning levels. That keeps modern config much smaller while preserving the same effective presets.
67
35
 
68
- 3. **Run opencode**:
69
- ```bash
70
- # Modern config (v1.0.210+):
71
- opencode run "task" --model=openai/gpt-5.2 --variant=medium
72
- opencode run "task" --model=openai/gpt-5.2 --variant=high
36
+ Both templates include:
37
+ - GPT-5.2, GPT-5.3 Codex, GPT-5.1, GPT-5.1 Codex, GPT-5.1 Codex Max, GPT-5.1 Codex Mini
38
+ - Reasoning variants per model family
39
+ - `store: false` and `include: ["reasoning.encrypted_content"]`
40
+ - Context metadata (272k context / 128k output)
73
41
 
74
- # Legacy config:
75
- opencode run "task" --model=openai/gpt-5.2-medium
76
- opencode run "task" --model=openai/gpt-5.2-high
77
- ```
42
+ ## Spark model note
78
43
 
79
- > **⚠️ Important**: Use the config file appropriate for your OpenCode version. Using the modern config with an older OpenCode version (v1.0.209 or below) will not work correctly.
44
+ The templates intentionally do **not** include `gpt-5.3-codex-spark` by default. Spark is often entitlement-gated at the account/workspace level, so shipping it by default causes avoidable startup failures for many users.
80
45
 
81
- > **Note**: The config templates use an **unversioned** plugin entry (`oc-chatgpt-multi-auth`) so the installer can always pull the latest release. If you need reproducibility, pin a specific version manually.
46
+ If your workspace is entitled, you can add Spark model IDs manually.
82
47
 
83
- ### Minimal config (advanced)
48
+ ## Usage examples
84
49
 
85
- A barebones example is available at [`minimal-opencode.json`](./minimal-opencode.json). It’s intended for debugging and does not include the full GPT‑5.x/Codex preset/variant definitions.
50
+ Modern template (v1.0.210+):
86
51
 
87
- For normal usage, prefer `opencode-modern.json` (v1.0.210+) or `opencode-legacy.json` (v1.0.209 and below).
52
+ ```bash
53
+ opencode run "task" --model=openai/gpt-5.2 --variant=medium
54
+ opencode run "task" --model=openai/gpt-5.3-codex --variant=high
55
+ ```
88
56
 
89
- ## Available Models
57
+ Legacy template (v1.0.209 and below):
90
58
 
59
+ ```bash
60
+ opencode run "task" --model=openai/gpt-5.2-medium
61
+ opencode run "task" --model=openai/gpt-5.3-codex-high
62
+ ```
91
63
 
92
- Both configs provide access to the same model families:
64
+ ## Minimal config (advanced)
93
65
 
94
- - **gpt-5.2** (none/low/medium/high/xhigh) - Latest GPT 5.2 model with full reasoning support
95
- - **gpt-5.3-codex** (low/medium/high/xhigh) - Latest GPT 5.3 Codex presets
96
- - **gpt-5.1-codex-max** (low/medium/high/xhigh) - Codex Max presets
97
- - **gpt-5.1-codex** (low/medium/high) - Codex model presets
98
- - **gpt-5.1-codex-mini** (medium/high) - Codex mini tier presets
99
- - **gpt-5.1** (none/low/medium/high) - General-purpose reasoning presets
66
+ A barebones debug template is available at [`minimal-opencode.json`](./minimal-opencode.json). It omits the full preset catalog.
100
67
 
101
- All appear in the opencode model selector as "GPT 5.1 Codex Low (OAuth)", "GPT 5.1 High (OAuth)", etc.
68
+ ## Unsupported-model behavior
102
69
 
103
- ## Configuration Options
70
+ Current defaults are strict entitlement handling:
71
+ - `unsupportedCodexPolicy: "strict"` returns entitlement errors directly
72
+ - set `unsupportedCodexPolicy: "fallback"` (or `CODEX_AUTH_UNSUPPORTED_MODEL_POLICY=fallback`) to enable automatic fallback retries
73
+ - `fallbackToGpt52OnUnsupportedGpt53: true` keeps the legacy `gpt-5.3-codex -> gpt-5.2-codex` edge inside fallback mode
74
+ - `unsupportedCodexFallbackChain` lets you override fallback order per model
104
75
 
105
- See the main [README.md](../README.md#configuration) for detailed documentation of all configuration options.
76
+ Default fallback chain (when policy is `fallback`):
77
+ - `gpt-5.3-codex -> gpt-5.2-codex`
78
+ - `gpt-5.3-codex-spark -> gpt-5.3-codex -> gpt-5.2-codex` (only relevant if Spark IDs are added manually)
106
79
 
107
- ## Version History
80
+ ## Additional docs
108
81
 
109
- - **January 2026 (v1.0.210+)**: Introduced variant system support. Use `opencode-modern.json`
110
- - **December 2025 and earlier**: Use `opencode-legacy.json`
82
+ - Main config reference: [`docs/configuration.md`](../docs/configuration.md)
83
+ - Getting started: [`docs/getting-started.md`](../docs/getting-started.md)
84
+ - Troubleshooting: [`docs/troubleshooting.md`](../docs/troubleshooting.md)
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;GAuBG;AAGH,OAAO,KAAK,EAAE,MAAM,EAAe,MAAM,qBAAqB,CAAC;AAmI/D;;;;;;;;;;;;;;;GAeG;AAEH,eAAO,MAAM,iBAAiB,EAAE,MAs+E/B,CAAC;AAEF,eAAO,MAAM,gBAAgB,QAAoB,CAAC;AAElD,eAAe,iBAAiB,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;GAuBG;AAGH,OAAO,KAAK,EAAE,MAAM,EAAe,MAAM,qBAAqB,CAAC;AAsI/D;;;;;;;;;;;;;;;GAeG;AAEH,eAAO,MAAM,iBAAiB,EAAE,MA0iF/B,CAAC;AAEF,eAAO,MAAM,gBAAgB,QAAoB,CAAC;AAElD,eAAe,iBAAiB,CAAC"}
package/dist/index.js CHANGED
@@ -28,14 +28,14 @@ import { queuedRefresh } from "./lib/refresh-queue.js";
28
28
  import { openBrowserUrl } from "./lib/auth/browser.js";
29
29
  import { startLocalOAuthServer } from "./lib/auth/server.js";
30
30
  import { promptAddAnotherAccount, promptLoginMode } from "./lib/cli.js";
31
- import { getCodexMode, getFastSession, getFastSessionStrategy, getFastSessionMaxInputItems, getRateLimitToastDebounceMs, getRetryAllAccountsMaxRetries, getRetryAllAccountsMaxWaitMs, getRetryAllAccountsRateLimited, getFallbackToGpt52OnUnsupportedGpt53, getTokenRefreshSkewMs, getSessionRecovery, getAutoResume, getToastDurationMs, getPerProjectAccounts, getEmptyResponseMaxRetries, getEmptyResponseRetryDelayMs, getPidOffsetEnabled, getFetchTimeoutMs, getStreamStallTimeoutMs, getCodexTuiV2, getCodexTuiColorProfile, getCodexTuiGlyphMode, loadPluginConfig, } from "./lib/config.js";
31
+ import { getCodexMode, getFastSession, getFastSessionStrategy, getFastSessionMaxInputItems, getRateLimitToastDebounceMs, getRetryAllAccountsMaxRetries, getRetryAllAccountsMaxWaitMs, getRetryAllAccountsRateLimited, getFallbackToGpt52OnUnsupportedGpt53, getUnsupportedCodexPolicy, getUnsupportedCodexFallbackChain, getTokenRefreshSkewMs, getSessionRecovery, getAutoResume, getToastDurationMs, getPerProjectAccounts, getEmptyResponseMaxRetries, getEmptyResponseRetryDelayMs, getPidOffsetEnabled, getFetchTimeoutMs, getStreamStallTimeoutMs, getCodexTuiV2, getCodexTuiColorProfile, getCodexTuiGlyphMode, loadPluginConfig, } from "./lib/config.js";
32
32
  import { AUTH_LABELS, CODEX_BASE_URL, DUMMY_API_KEY, LOG_STAGES, PLUGIN_NAME, PROVIDER_ID, ACCOUNT_LIMITS, } from "./lib/constants.js";
33
33
  import { initLogger, logRequest, logDebug, logInfo, logWarn, logError, setCorrelationId, clearCorrelationId, } from "./lib/logger.js";
34
34
  import { checkAndNotify } from "./lib/auto-update-checker.js";
35
35
  import { handleContextOverflow } from "./lib/context-overflow.js";
36
36
  import { AccountManager, getAccountIdCandidates, extractAccountEmail, extractAccountId, formatAccountLabel, formatCooldown, formatWaitTime, sanitizeEmail, selectBestAccountCandidate, shouldUpdateAccountIdFromToken, resolveRequestAccountId, parseRateLimitReason, } from "./lib/accounts.js";
37
37
  import { getStoragePath, loadAccounts, saveAccounts, clearAccounts, setStoragePath, exportAccounts, importAccounts, loadFlaggedAccounts, saveFlaggedAccounts, clearFlaggedAccounts, StorageError, formatStorageErrorHint, } from "./lib/storage.js";
38
- import { createCodexHeaders, extractRequestUrl, handleErrorResponse, handleSuccessResponse, shouldFallbackToGpt52OnUnsupportedGpt53, refreshAndUpdateToken, rewriteUrlForCodex, shouldRefreshToken, transformRequestForCodex, } from "./lib/request/fetch-helpers.js";
38
+ import { createCodexHeaders, extractRequestUrl, handleErrorResponse, handleSuccessResponse, getUnsupportedCodexModelInfo, resolveUnsupportedCodexFallbackModel, refreshAndUpdateToken, rewriteUrlForCodex, shouldRefreshToken, transformRequestForCodex, } from "./lib/request/fetch-helpers.js";
39
39
  import { applyFastSessionDefaults } from "./lib/request/request-transformer.js";
40
40
  import { getRateLimitBackoff, RATE_LIMIT_SHORT_RETRY_THRESHOLD_MS, resetRateLimitBackoff, } from "./lib/request/rate-limit-backoff.js";
41
41
  import { isEmptyResponse } from "./lib/request/response-handler.js";
@@ -477,12 +477,12 @@ export const OpenAIOAuthPlugin = async ({ client }) => {
477
477
  if (auth.type !== "oauth") {
478
478
  return {};
479
479
  }
480
- // Only handle multi-account auth (identified by multiAccount flag)
481
- // If auth was created by built-in plugin, let built-in handle it
480
+ // Prefer multi-account auth metadata when available, but still handle
481
+ // plain OAuth credentials (for OpenCode versions that inject internal
482
+ // Codex auth first and omit the multiAccount marker).
482
483
  const authWithMulti = auth;
483
484
  if (!authWithMulti.multiAccount) {
484
- logDebug(`[${PLUGIN_NAME}] Auth is not multi-account, skipping loader`);
485
- return {};
485
+ logDebug(`[${PLUGIN_NAME}] Auth is missing multiAccount marker; continuing with single-account compatibility mode`);
486
486
  }
487
487
  // Acquire mutex for thread-safe initialization
488
488
  // Use while loop to handle multiple concurrent waiters correctly
@@ -528,7 +528,10 @@ export const OpenAIOAuthPlugin = async ({ client }) => {
528
528
  const retryAllAccountsRateLimited = getRetryAllAccountsRateLimited(pluginConfig);
529
529
  const retryAllAccountsMaxWaitMs = getRetryAllAccountsMaxWaitMs(pluginConfig);
530
530
  const retryAllAccountsMaxRetries = getRetryAllAccountsMaxRetries(pluginConfig);
531
+ const unsupportedCodexPolicy = getUnsupportedCodexPolicy(pluginConfig);
532
+ const fallbackOnUnsupportedCodexModel = unsupportedCodexPolicy === "fallback";
531
533
  const fallbackToGpt52OnUnsupportedGpt53 = getFallbackToGpt52OnUnsupportedGpt53(pluginConfig);
534
+ const unsupportedCodexFallbackChain = getUnsupportedCodexFallbackChain(pluginConfig);
532
535
  const toastDurationMs = getToastDurationMs(pluginConfig);
533
536
  const perProjectAccounts = getPerProjectAccounts(pluginConfig);
534
537
  const fetchTimeoutMs = getFetchTimeoutMs(pluginConfig);
@@ -547,7 +550,7 @@ export const OpenAIOAuthPlugin = async ({ client }) => {
547
550
  if (fastSessionEnabled) {
548
551
  logDebug("Fast session mode enabled", {
549
552
  reasoningEffort: "none/low",
550
- reasoningSummary: "off",
553
+ reasoningSummary: "auto",
551
554
  textVerbosity: "low",
552
555
  fastSessionStrategy,
553
556
  fastSessionMaxInputItems,
@@ -710,7 +713,10 @@ export const OpenAIOAuthPlugin = async ({ client }) => {
710
713
  };
711
714
  let allRateLimitedRetries = 0;
712
715
  let emptyResponseRetries = 0;
713
- let attemptedGpt53Fallback = false;
716
+ const attemptedUnsupportedFallbackModels = new Set();
717
+ if (model) {
718
+ attemptedUnsupportedFallbackModels.add(model);
719
+ }
714
720
  while (true) {
715
721
  const accountCount = accountManager.getAccountCount();
716
722
  const attempted = new Set();
@@ -834,14 +840,40 @@ export const OpenAIOAuthPlugin = async ({ client }) => {
834
840
  requestCorrelationId,
835
841
  threadId: threadIdCandidate,
836
842
  });
837
- if (fallbackToGpt52OnUnsupportedGpt53 &&
838
- !attemptedGpt53Fallback &&
839
- shouldFallbackToGpt52OnUnsupportedGpt53(model, errorBody)) {
843
+ const unsupportedModelInfo = getUnsupportedCodexModelInfo(errorBody);
844
+ const hasRemainingAccounts = attempted.size < Math.max(1, accountCount);
845
+ // Entitlements can differ by account/workspace, so try remaining
846
+ // accounts before degrading the model via fallback.
847
+ if (unsupportedModelInfo.isUnsupported && hasRemainingAccounts) {
848
+ const blockedModel = unsupportedModelInfo.unsupportedModel ?? model ?? "requested model";
849
+ accountManager.refundToken(account, modelFamily, model);
850
+ accountManager.recordFailure(account, modelFamily, model);
851
+ account.lastSwitchReason = "rotation";
852
+ runtimeMetrics.lastError = `Unsupported model on account ${account.index + 1}: ${blockedModel}`;
853
+ logWarn(`Model ${blockedModel} is unsupported for account ${account.index + 1}. Trying next account/workspace before fallback.`, {
854
+ unsupportedCodexPolicy,
855
+ requestedModel: blockedModel,
856
+ effectiveModel: blockedModel,
857
+ fallbackApplied: false,
858
+ fallbackReason: "unsupported-model-entitlement",
859
+ });
860
+ break;
861
+ }
862
+ const fallbackModel = resolveUnsupportedCodexFallbackModel({
863
+ requestedModel: model,
864
+ errorBody,
865
+ attemptedModels: attemptedUnsupportedFallbackModels,
866
+ fallbackOnUnsupportedCodexModel,
867
+ fallbackToGpt52OnUnsupportedGpt53,
868
+ customChain: unsupportedCodexFallbackChain,
869
+ });
870
+ if (fallbackModel) {
840
871
  const previousModel = model ?? "gpt-5.3-codex";
841
872
  const previousModelFamily = modelFamily;
842
- attemptedGpt53Fallback = true;
873
+ attemptedUnsupportedFallbackModels.add(previousModel);
874
+ attemptedUnsupportedFallbackModels.add(fallbackModel);
843
875
  accountManager.refundToken(account, previousModelFamily, previousModel);
844
- model = "gpt-5.2-codex";
876
+ model = fallbackModel;
845
877
  modelFamily = getModelFamily(model);
846
878
  quotaKey = `${modelFamily}:${model}`;
847
879
  if (transformedBody && typeof transformedBody === "object") {
@@ -870,10 +902,28 @@ export const OpenAIOAuthPlugin = async ({ client }) => {
870
902
  });
871
903
  accountManager.consumeToken(account, modelFamily, model);
872
904
  runtimeMetrics.lastError = `Model fallback: ${previousModel} -> ${model}`;
873
- logWarn(`Model ${previousModel} is unsupported for this ChatGPT account. Falling back to ${model}.`);
905
+ logWarn(`Model ${previousModel} is unsupported for this ChatGPT account. Falling back to ${model}.`, {
906
+ unsupportedCodexPolicy,
907
+ requestedModel: previousModel,
908
+ effectiveModel: model,
909
+ fallbackApplied: true,
910
+ fallbackReason: "unsupported-model-entitlement",
911
+ });
874
912
  await showToast(`Model ${previousModel} is not available for this account. Retrying with ${model}.`, "warning", { duration: toastDurationMs });
875
913
  continue;
876
914
  }
915
+ if (unsupportedModelInfo.isUnsupported && !fallbackOnUnsupportedCodexModel) {
916
+ const blockedModel = unsupportedModelInfo.unsupportedModel ?? model ?? "requested model";
917
+ runtimeMetrics.lastError = `Unsupported model (strict): ${blockedModel}`;
918
+ logWarn(`Model ${blockedModel} is unsupported for this ChatGPT account. Strict policy blocks automatic fallback.`, {
919
+ unsupportedCodexPolicy,
920
+ requestedModel: blockedModel,
921
+ effectiveModel: blockedModel,
922
+ fallbackApplied: false,
923
+ fallbackReason: "unsupported-model-entitlement",
924
+ });
925
+ await showToast(`Model ${blockedModel} is not available for this account. Strict policy blocked automatic fallback.`, "warning", { duration: toastDurationMs });
926
+ }
877
927
  if (recoveryHook && errorBody && isRecoverableError(errorBody)) {
878
928
  const errorType = detectErrorType(errorBody);
879
929
  const toastContent = getRecoveryToastContent(errorType);