ai-cli-mcp 2.14.0 → 2.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/.github/dependabot.yml +28 -0
  2. package/.github/workflows/ci.yml +4 -1
  3. package/.github/workflows/dependency-review.yml +22 -0
  4. package/CHANGELOG.md +14 -0
  5. package/README.ja.md +25 -6
  6. package/README.md +25 -7
  7. package/dist/__tests__/app-cli.test.js +24 -4
  8. package/dist/__tests__/cli-bin-smoke.test.js +43 -0
  9. package/dist/__tests__/cli-builder.test.js +92 -14
  10. package/dist/__tests__/cli-process-service.test.js +187 -0
  11. package/dist/__tests__/cli-utils.test.js +31 -0
  12. package/dist/__tests__/e2e.test.js +77 -51
  13. package/dist/__tests__/mcp-contract.test.js +154 -0
  14. package/dist/__tests__/parsers.test.js +62 -1
  15. package/dist/__tests__/process-management.test.js +1 -1
  16. package/dist/__tests__/server.test.js +35 -6
  17. package/dist/__tests__/utils/opencode-mock.js +91 -0
  18. package/dist/__tests__/validation.test.js +40 -2
  19. package/dist/app/cli.js +4 -4
  20. package/dist/app/mcp.js +8 -4
  21. package/dist/cli-builder.js +66 -27
  22. package/dist/cli-parse.js +11 -5
  23. package/dist/cli-process-service.js +158 -25
  24. package/dist/cli-utils.js +14 -23
  25. package/dist/cli.js +6 -4
  26. package/dist/model-catalog.js +13 -1
  27. package/dist/parsers.js +57 -26
  28. package/dist/process-result.js +9 -2
  29. package/dist/process-service.js +23 -17
  30. package/dist/server.js +1 -2
  31. package/package.json +9 -6
  32. package/src/__tests__/app-cli.test.ts +24 -4
  33. package/src/__tests__/cli-bin-smoke.test.ts +62 -1
  34. package/src/__tests__/cli-builder.test.ts +110 -14
  35. package/src/__tests__/cli-process-service.test.ts +217 -0
  36. package/src/__tests__/cli-utils.test.ts +34 -0
  37. package/src/__tests__/e2e.test.ts +85 -54
  38. package/src/__tests__/mcp-contract.test.ts +179 -0
  39. package/src/__tests__/parsers.test.ts +73 -1
  40. package/src/__tests__/process-management.test.ts +1 -1
  41. package/src/__tests__/server.test.ts +45 -10
  42. package/src/__tests__/utils/opencode-mock.ts +108 -0
  43. package/src/__tests__/validation.test.ts +48 -2
  44. package/src/app/cli.ts +4 -4
  45. package/src/app/mcp.ts +8 -4
  46. package/src/cli-builder.ts +90 -31
  47. package/src/cli-parse.ts +11 -5
  48. package/src/cli-process-service.ts +193 -22
  49. package/src/cli-utils.ts +37 -33
  50. package/src/cli.ts +6 -4
  51. package/src/model-catalog.ts +24 -1
  52. package/src/parsers.ts +77 -31
  53. package/src/process-result.ts +11 -2
  54. package/src/process-service.ts +28 -15
  55. package/src/server.ts +2 -2
  56. package/vitest.config.unit.ts +2 -3
@@ -0,0 +1,28 @@
1
+ version: 2
2
+ updates:
3
+ - package-ecosystem: "npm"
4
+ directory: "/"
5
+ target-branch: "develop"
6
+ schedule:
7
+ interval: "weekly"
8
+ open-pull-requests-limit: 5
9
+ groups:
10
+ npm-production:
11
+ dependency-type: "production"
12
+ patterns:
13
+ - "*"
14
+ npm-development:
15
+ dependency-type: "development"
16
+ patterns:
17
+ - "*"
18
+
19
+ - package-ecosystem: "github-actions"
20
+ directory: "/"
21
+ target-branch: "develop"
22
+ schedule:
23
+ interval: "weekly"
24
+ open-pull-requests-limit: 2
25
+ groups:
26
+ github-actions:
27
+ patterns:
28
+ - "*"
@@ -12,7 +12,7 @@ jobs:
12
12
 
13
13
  strategy:
14
14
  matrix:
15
- node-version: [20.x, 22.x]
15
+ node-version: [20.19.0, 22.12.0, 24.x]
16
16
 
17
17
  steps:
18
18
  - name: Checkout repository
@@ -29,3 +29,6 @@ jobs:
29
29
 
30
30
  - name: Build project
31
31
  run: npm run build
32
+
33
+ - name: Run unit tests
34
+ run: npm run test:unit
@@ -0,0 +1,22 @@
1
+ name: Dependency Review
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - develop
7
+
8
+ permissions:
9
+ contents: read
10
+
11
+ jobs:
12
+ dependency-review:
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - name: Checkout
16
+ uses: actions/checkout@v4
17
+
18
+ - name: Dependency Review
19
+ uses: actions/dependency-review-action@v4
20
+ with:
21
+ fail-on-severity: moderate
22
+ fail-on-scopes: runtime
package/CHANGELOG.md CHANGED
@@ -1,3 +1,17 @@
1
+ # [2.15.0](https://github.com/mkXultra/ai-cli-mcp/compare/v2.14.1...v2.15.0) (2026-04-09)
2
+
3
+
4
+ ### Features
5
+
6
+ * OpenCode CLIを新しいAIバックエンドとして追加 ([0677c57](https://github.com/mkXultra/ai-cli-mcp/commit/0677c57659b36fd1083cd96166c2c608c45038b3))
7
+
8
+ ## [2.14.1](https://github.com/mkXultra/ai-cli-mcp/compare/v2.14.0...v2.14.1) (2026-04-07)
9
+
10
+
11
+ ### Bug Fixes
12
+
13
+ * 削除済み作業フォルダでのプロセス操作クラッシュを修正 ([02d765f](https://github.com/mkXultra/ai-cli-mcp/commit/02d765ff76ebd295118e16a112ea3e7ac6fab111))
14
+
1
15
  # [2.14.0](https://github.com/mkXultra/ai-cli-mcp/compare/v2.13.0...v2.14.0) (2026-04-07)
2
16
 
3
17
 
package/README.ja.md CHANGED
@@ -5,7 +5,7 @@
5
5
 
6
6
  > **📦 パッケージ移行のお知らせ**: 本パッケージは旧名 `@mkxultra/claude-code-mcp` から `ai-cli-mcp` に名称変更されました。これは、複数のAI CLIツールのサポート拡大を反映したものです。
7
7
 
8
- AI CLIツール(Claude, Codex, Gemini, Forge)をバックグラウンドプロセスとして実行し、権限処理を自動化するMCP(Model Context Protocol)サーバーです。
8
+ AI CLIツール(Claude, Codex, Gemini, Forge, OpenCode)をバックグラウンドプロセスとして実行し、権限処理を自動化するMCP(Model Context Protocol)サーバーです。
9
9
 
10
10
  Cursorなどのエディタが、複雑な手順を伴う編集や操作に苦戦していることに気づいたことはありませんか?このサーバーは、強力な統合 `run` ツールを提供し、複数のAIエージェントを活用してコーディングタスクをより効果的に処理できるようにします。
11
11
 
@@ -21,11 +21,13 @@ Cursorなどのエディタが、複雑な手順を伴う編集や操作に苦
21
21
  - 自動承認モードでCodex CLIを実行(`--full-auto` を使用)
22
22
  - 自動承認モードでGemini CLIを実行(`-y` を使用)
23
23
  - Forge CLI を非対話モードで実行(`forge -C <workFolder> -p <prompt>` を使用)
24
+ - OpenCode を非対話 JSON モードで実行(`opencode run --format json --dir <workFolder> <prompt>` を使用)
24
25
  - 複数のAIモデルのサポート:
25
26
  - Claude (sonnet, sonnet[1m], opus, opusplan, haiku)
26
27
  - Codex (gpt-5.4, gpt-5.3-codex, gpt-5.2-codex, gpt-5.1-codex-mini, gpt-5.1-codex-max, など)
27
28
  - Gemini (gemini-2.5-pro, gemini-2.5-flash, gemini-3.1-pro-preview, gemini-3-pro-preview, gemini-3-flash-preview)
28
29
  - Forge (`forge`)
30
+ - OpenCode (`opencode` と `oc-<provider/model>` ラッパー。例: `oc-openai/gpt-5.4`)
29
31
  - PID追跡によるバックグラウンドプロセスの管理
30
32
  - ツールからの構造化された出力の解析と返却
31
33
 
@@ -67,6 +69,7 @@ Cursorなどのエディタが、複雑な手順を伴う編集や操作に苦
67
69
  - **Codex CLI**(オプション): インストール済みで、ログインなどの初期設定が完了していること。
68
70
  - **Gemini CLI**(オプション): インストール済みで、ログインなどの初期設定が完了していること。
69
71
  - **Forge CLI**(オプション): インストール済みで、初期設定が完了していること。
72
+ - **OpenCode**(オプション): インストール済みで、設定が完了していること。この統合では `opencode run --format json` を使用し、明示的なモデル指定は `ai-cli models` が公開する `oc-<provider/model>` 構文に従います。
70
73
 
71
74
  ## インストールと使い方
72
75
 
@@ -116,6 +119,8 @@ npm install -g ai-cli-mcp
116
119
  ai-cli doctor
117
120
  ai-cli models
118
121
  ai-cli run --cwd "$PWD" --model sonnet --prompt "summarize this repository"
122
+ ai-cli run --cwd "$PWD" --model opencode --prompt "OpenCode のデフォルト設定でこのリポジトリを要約して"
123
+ ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --session-id ses_123 --prompt "明示モデル付きでこの OpenCode セッションを続けて"
119
124
  ai-cli ps
120
125
  ai-cli result 12345
121
126
  ai-cli result 12345 --verbose
@@ -132,6 +137,7 @@ ai-cli-mcp
132
137
 
133
138
  ```bash
134
139
  npx -y --package ai-cli-mcp@latest ai-cli run --cwd "$PWD" --model sonnet --prompt "hello"
140
+ npx -y --package ai-cli-mcp@latest ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --prompt "OpenCode で hello"
135
141
  ```
136
142
 
137
143
  ## 重要な初回セットアップ
@@ -185,6 +191,8 @@ macOSでは、これらのツールを初めて実行する際にフォルダへ
185
191
  ai-cli doctor
186
192
  ai-cli models
187
193
  ai-cli run --cwd "$PWD" --model codex-ultra --prompt "fix failing tests"
194
+ ai-cli run --cwd "$PWD" --model opencode --session-id ses_existing --prompt "この OpenCode セッションを継続して"
195
+ ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --prompt "明示的な OpenCode モデルで実行"
188
196
  ai-cli ps
189
197
  ai-cli wait 12345
190
198
  ai-cli wait 12345 --verbose
@@ -195,6 +203,13 @@ ai-cli cleanup
195
203
 
196
204
  `run` の作業ディレクトリ指定は `--cwd` が基本です。互換性のために `--workFolder` / `--work-folder` も受け付けます。
197
205
 
206
+ OpenCode のモデル指定は次の 2 つを受け付けます。
207
+
208
+ - `opencode`: OpenCode 側で設定されたデフォルトモデルを使用
209
+ - `oc-<provider/model>`: 明示的な OpenCode の provider/model を指定。例: `oc-openai/gpt-5.4`
210
+
211
+ `ai-cli models` は OpenCode を機械可読に `opencode: ["opencode"]` と `dynamicModelBackends.opencode` で公開します。実際に利用可能なバックエンドネイティブなモデル一覧は `opencode models` で確認してください。
212
+
198
213
  `doctor` は CLI バイナリの存在確認と path 解決だけを行います。ログイン状態や利用規約同意までは確認しません。
199
214
 
200
215
  ## CLI の状態保存先
@@ -210,12 +225,13 @@ ai-cli cleanup
210
225
  - `meta.json`
211
226
  - `stdout.log`
212
227
  - `stderr.log`
228
+ - `exit-status.json`(detached な OpenCode 実行用)
213
229
 
214
230
  完了済み・失敗済みの実行は `ai-cli cleanup` で削除できます。`running` のものは保持されます。
215
231
 
216
232
  ## 既知の制約
217
233
 
218
- detached 実行された `ai-cli` の自然終了 exit code は、まだ永続化していません。そのため、CLI は出力と running/completed 状態は返せますが、自然終了したバックグラウンド実行の `exitCode` は現時点では保証しません。
234
+ detached 実行された `ai-cli` では、OpenCode バックエンドに限り自然終了時の exit status を永続化します。そのため OpenCode の失敗終了は非ゼロ exit code を含めて `failed` として扱われ、結果では生の `stdout` / `stderr` を保持します。一方、他の detached バックエンドでは従来どおり、より広い exit-status 追跡が追加されるまでは自然終了した実行が信頼できる exit code なしで `completed` と見なされる制約が残ります。
219
235
 
220
236
  ## MCPクライアントへの接続
221
237
 
@@ -229,7 +245,7 @@ detached 実行された `ai-cli` の自然終了 exit code は、まだ永続
229
245
 
230
246
  ### `run`
231
247
 
232
- Claude CLI、Codex CLI、Gemini CLI、または Forge CLI を使用してプロンプトを実行します。モデル名に基づいて適切なCLIが自動的に選択されます。
248
+ Claude CLI、Codex CLI、Gemini CLIForge CLI、または OpenCode を使用してプロンプトを実行します。モデル名に基づいて適切なCLIが自動的に選択されます。
233
249
 
234
250
  **引数:**
235
251
  - `prompt` (string, 任意): AIエージェントに送信するプロンプト。`prompt` または `prompt_file` のいずれかが必須です。
@@ -241,8 +257,9 @@ Claude CLI、Codex CLI、Gemini CLI、または Forge CLI を使用してプロ
241
257
  - Codex: `gpt-5.4`, `gpt-5.3-codex`, `gpt-5.2-codex`, `gpt-5.1-codex-mini`, `gpt-5.1-codex-max`, `gpt-5.2`, `gpt-5.1`, `gpt-5`
242
258
  - Gemini: `gemini-2.5-pro`, `gemini-2.5-flash`, `gemini-3.1-pro-preview`, `gemini-3-pro-preview`, `gemini-3-flash-preview`
243
259
  - Forge: `forge`
244
- - `reasoning_effort` (string, 任意): Claude と Codex の推論制御。Claude では `--effort` を使います(許容値: "low", "medium", "high")。Codex では `model_reasoning_effort` を使います(許容値: "low", "medium", "high", "xhigh")。Forge では `reasoning_effort` はサポートしません。
245
- - `session_id` (string, 任意): 以前のセッションを再開するためのセッションID。対応モデル: haiku, sonnet, opus, gemini-2.5-pro, gemini-2.5-flash, gemini-3.1-pro-preview, gemini-3-pro-preview, gemini-3-flash-preview, forge。
260
+ - OpenCode: `opencode`(設定済みのデフォルトモデル)および `oc-openai/gpt-5.4` のような明示ラッパー
261
+ - `reasoning_effort` (string, 任意): Claude Codex の推論制御。Claude では `--effort` を使います(許容値: "low", "medium", "high")。Codex では `model_reasoning_effort` を使います(許容値: "low", "medium", "high", "xhigh")。Gemini、Forge、OpenCode では `reasoning_effort` はサポートしません。
262
+ - `session_id` (string, 任意): 以前のセッションを再開するためのセッションID。Claude、Codex、Gemini、Forge、OpenCode でサポートされます。OpenCode は `--session` による in-place resume で再開し、`oc-<provider/model>` の明示指定と併用できます。
246
263
 
247
264
  ### `wait`
248
265
 
@@ -311,6 +328,7 @@ npm run test:e2e
311
328
  - `CODEX_CLI_NAME`: Codex CLIのバイナリ名または絶対パスを上書き(デフォルト: `codex`)
312
329
  - `GEMINI_CLI_NAME`: Gemini CLIのバイナリ名または絶対パスを上書き(デフォルト: `gemini`)
313
330
  - `FORGE_CLI_NAME`: Forge CLIのバイナリ名または絶対パスを上書き(デフォルト: `forge`)
331
+ - `OPENCODE_CLI_NAME`: OpenCode CLIのバイナリ名または絶対パスを上書き(デフォルト: `opencode`)
314
332
  - `MCP_CLAUDE_DEBUG`: デバッグログを有効化(`true` に設定すると詳細な出力が表示されます)
315
333
 
316
334
  **CLI名の指定方法:**
@@ -329,7 +347,8 @@ npm run test:e2e
329
347
  ],
330
348
  "env": {
331
349
  "CLAUDE_CLI_NAME": "claude-custom",
332
- "CODEX_CLI_NAME": "codex-custom"
350
+ "CODEX_CLI_NAME": "codex-custom",
351
+ "OPENCODE_CLI_NAME": "opencode-custom"
333
352
  }
334
353
  },
335
354
  ```
package/README.md CHANGED
@@ -7,7 +7,7 @@
7
7
 
8
8
  > **📦 Package Migration Notice**: This package was formerly `@mkxultra/claude-code-mcp` and has been renamed to `ai-cli-mcp` to reflect its expanded support for multiple AI CLI tools.
9
9
 
10
- An MCP (Model Context Protocol) server that allows running AI CLI tools (Claude, Codex, Gemini, and Forge) in background processes with automatic permission handling.
10
+ An MCP (Model Context Protocol) server that allows running AI CLI tools (Claude, Codex, Gemini, Forge, and OpenCode) in background processes with automatic permission handling.
11
11
 
12
12
  Did you notice that Cursor sometimes struggles with complex, multi-step edits or operations? This server, with its powerful unified `run` tool, enables multiple AI agents to handle your coding tasks more effectively.
13
13
 
@@ -23,7 +23,8 @@ This MCP server provides tools that can be used by LLMs to interact with AI CLI
23
23
  - Execute Codex CLI with automatic approval mode (using `--full-auto`)
24
24
  - Execute Gemini CLI with automatic approval mode (using `-y`)
25
25
  - Execute Forge CLI in non-interactive mode (using `forge -C <workFolder> -p <prompt>`)
26
- - Support multiple AI models: Claude (sonnet, sonnet[1m], opus, opusplan, haiku), Codex (gpt-5.4, gpt-5.3-codex, gpt-5.2-codex, gpt-5.1-codex-mini, gpt-5.1-codex-max, gpt-5.2, gpt-5.1, gpt-5.1-codex, gpt-5-codex, gpt-5-codex-mini, gpt-5), Gemini (gemini-2.5-pro, gemini-2.5-flash, gemini-3.1-pro-preview, gemini-3-pro-preview, gemini-3-flash-preview), and Forge (`forge`)
26
+ - Execute OpenCode in non-interactive JSON mode (using `opencode run --format json --dir <workFolder> <prompt>`)
27
+ - Support multiple AI models: Claude (sonnet, sonnet[1m], opus, opusplan, haiku), Codex (gpt-5.4, gpt-5.3-codex, gpt-5.2-codex, gpt-5.1-codex-mini, gpt-5.1-codex-max, gpt-5.2, gpt-5.1, gpt-5.1-codex, gpt-5-codex, gpt-5-codex-mini, gpt-5), Gemini (gemini-2.5-pro, gemini-2.5-flash, gemini-3.1-pro-preview, gemini-3-pro-preview, gemini-3-flash-preview), Forge (`forge`), and OpenCode (`opencode` plus explicit `oc-<provider/model>` wrappers such as `oc-openai/gpt-5.4`)
27
28
  - Manage background processes with PID tracking
28
29
  - Parse and return structured outputs from both tools
29
30
 
@@ -65,6 +66,7 @@ The only prerequisite is that the AI CLI tools you want to use are locally insta
65
66
  - **Codex CLI** (Optional): Installed and initial setup (login etc.) completed.
66
67
  - **Gemini CLI** (Optional): Installed and initial setup (login etc.) completed.
67
68
  - **Forge CLI** (Optional): Installed and initial setup completed.
69
+ - **OpenCode** (Optional): Installed and configured. This integration uses `opencode run --format json`, and explicit provider/model selection follows the `oc-<provider/model>` wrapper syntax exposed by `ai-cli models`.
68
70
 
69
71
  ## Installation & Usage
70
72
 
@@ -114,6 +116,8 @@ Examples:
114
116
  ai-cli doctor
115
117
  ai-cli models
116
118
  ai-cli run --cwd "$PWD" --model sonnet --prompt "summarize this repository"
119
+ ai-cli run --cwd "$PWD" --model opencode --prompt "summarize this repository with OpenCode defaults"
120
+ ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --session-id ses_123 --prompt "continue this session with an explicit OpenCode model"
117
121
  ai-cli ps
118
122
  ai-cli result 12345
119
123
  ai-cli result 12345 --verbose
@@ -130,6 +134,7 @@ Because the published package name is still `ai-cli-mcp`, the shortest `npx` for
130
134
 
131
135
  ```bash
132
136
  npx -y --package ai-cli-mcp@latest ai-cli run --cwd "$PWD" --model sonnet --prompt "hello"
137
+ npx -y --package ai-cli-mcp@latest ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --prompt "hello from OpenCode"
133
138
  ```
134
139
 
135
140
  ## Important First-Time Setup
@@ -183,6 +188,8 @@ Example flow:
183
188
  ai-cli doctor
184
189
  ai-cli models
185
190
  ai-cli run --cwd "$PWD" --model codex-ultra --prompt "fix failing tests"
191
+ ai-cli run --cwd "$PWD" --model opencode --session-id ses_existing --prompt "continue this OpenCode session"
192
+ ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --prompt "run with an explicit OpenCode backend model"
186
193
  ai-cli ps
187
194
  ai-cli wait 12345
188
195
  ai-cli wait 12345 --verbose
@@ -193,6 +200,13 @@ ai-cli cleanup
193
200
 
194
201
  `run` accepts `--cwd` as the primary working-directory flag and also accepts the older aliases `--workFolder` / `--work-folder` for compatibility.
195
202
 
203
+ OpenCode model selection accepts either:
204
+
205
+ - `opencode` for the CLI's configured default model
206
+ - `oc-<provider/model>` for an explicit OpenCode provider/model, for example `oc-openai/gpt-5.4`
207
+
208
+ `ai-cli models` exposes OpenCode machine-readably via `opencode: ["opencode"]` plus `dynamicModelBackends.opencode`, which points users to `opencode models` for backend-native discovery.
209
+
196
210
  `doctor` checks only binary existence and path resolution. It does not verify login state or terms acceptance.
197
211
 
198
212
  ## CLI State Storage
@@ -208,12 +222,13 @@ Each PID directory contains:
208
222
  - `meta.json`
209
223
  - `stdout.log`
210
224
  - `stderr.log`
225
+ - `exit-status.json` for detached OpenCode runs
211
226
 
212
227
  Use `ai-cli cleanup` to remove completed and failed runs. Running processes are preserved.
213
228
 
214
229
  ## Known Limitation
215
230
 
216
- Detached `ai-cli` runs do not currently persist natural process exit codes. As a result, the CLI can report process output and running/completed state, but it does not yet guarantee `exitCode` for naturally finished background runs.
231
+ Detached `ai-cli` runs persist natural exit status for OpenCode-backed runs, including failed exit codes used to preserve raw OpenCode stdout/stderr in result output. Other detached backends still keep the pre-existing limitation: naturally finished runs may be surfaced as completed without a reliable persisted exit code until broader exit tracking is added.
217
232
 
218
233
  ## Connecting to Your MCP Client
219
234
 
@@ -227,7 +242,7 @@ This server exposes the following tools:
227
242
 
228
243
  ### `run`
229
244
 
230
- Executes a prompt using Claude CLI, Codex CLI, Gemini CLI, or Forge CLI. The appropriate CLI is automatically selected based on the model name.
245
+ Executes a prompt using Claude CLI, Codex CLI, Gemini CLI, Forge CLI, or OpenCode. The appropriate CLI is automatically selected based on the model name.
231
246
 
232
247
  **Arguments:**
233
248
  - `prompt` (string, optional): The prompt to send to the AI agent. Either `prompt` or `prompt_file` is required.
@@ -239,8 +254,9 @@ Executes a prompt using Claude CLI, Codex CLI, Gemini CLI, or Forge CLI. The app
239
254
  - Codex: `gpt-5.4`, `gpt-5.3-codex`, `gpt-5.2-codex`, `gpt-5.1-codex-mini`, `gpt-5.1-codex-max`, `gpt-5.2`, `gpt-5.1`, `gpt-5`
240
255
  - Gemini: `gemini-2.5-pro`, `gemini-2.5-flash`, `gemini-3.1-pro-preview`, `gemini-3-pro-preview`, `gemini-3-flash-preview`
241
256
  - Forge: `forge`
242
- - `reasoning_effort` (string, optional): Reasoning control for Claude and Codex. Claude uses `--effort` (allowed: "low", "medium", "high"). Codex uses `model_reasoning_effort` (allowed: "low", "medium", "high", "xhigh"). Forge does not support `reasoning_effort`.
243
- - `session_id` (string, optional): Optional session ID to resume a previous session. Supported for: haiku, sonnet, opus, gemini-2.5-pro, gemini-2.5-flash, gemini-3.1-pro-preview, gemini-3-pro-preview, gemini-3-flash-preview, forge.
257
+ - OpenCode: `opencode` for the configured default backend model, plus explicit wrappers like `oc-openai/gpt-5.4`
258
+ - `reasoning_effort` (string, optional): Reasoning control for Claude and Codex. Claude uses `--effort` (allowed: "low", "medium", "high"). Codex uses `model_reasoning_effort` (allowed: "low", "medium", "high", "xhigh"). Gemini, Forge, and OpenCode do not support `reasoning_effort`.
259
+ - `session_id` (string, optional): Optional session ID to resume a previous session. Supported for Claude, Codex, Gemini, Forge, and OpenCode. OpenCode resumes in place via `--session` and may also be combined with an explicit `oc-<provider/model>` selection.
244
260
 
245
261
  ### `wait`
246
262
 
@@ -295,6 +311,7 @@ Normally not required, but useful for customizing CLI paths or debugging.
295
311
  - `CODEX_CLI_NAME`: Override the Codex CLI binary name or provide an absolute path (default: `codex`)
296
312
  - `GEMINI_CLI_NAME`: Override the Gemini CLI binary name or provide an absolute path (default: `gemini`)
297
313
  - `FORGE_CLI_NAME`: Override the Forge CLI binary name or provide an absolute path (default: `forge`)
314
+ - `OPENCODE_CLI_NAME`: Override the OpenCode CLI binary name or provide an absolute path (default: `opencode`)
298
315
  - `MCP_CLAUDE_DEBUG`: Enable debug logging (set to `true` for verbose output)
299
316
 
300
317
  **CLI Name Specification:**
@@ -313,7 +330,8 @@ Normally not required, but useful for customizing CLI paths or debugging.
313
330
  ],
314
331
  "env": {
315
332
  "CLAUDE_CLI_NAME": "claude-custom",
316
- "CODEX_CLI_NAME": "codex-custom"
333
+ "CODEX_CLI_NAME": "codex-custom",
334
+ "OPENCODE_CLI_NAME": "opencode-custom"
317
335
  }
318
336
  },
319
337
  ```
@@ -180,11 +180,21 @@ describe('ai-cli app', () => {
180
180
  const stdout = vi.fn();
181
181
  const stderr = vi.fn();
182
182
  const exitCode = await runCli(['models'], { stdout, stderr });
183
+ const payload = JSON.parse(stdout.mock.calls[0][0]);
183
184
  expect(exitCode).toBe(0);
184
- expect(stdout).toHaveBeenCalledWith(expect.stringContaining('"aliases"'));
185
- expect(stdout).toHaveBeenCalledWith(expect.stringContaining('"claude-ultra"'));
186
- expect(stdout).toHaveBeenCalledWith(expect.stringContaining('"gpt-5.4"'));
187
- expect(stdout).toHaveBeenCalledWith(expect.stringContaining('"forge"'));
185
+ expect(payload.aliases).toEqual(expect.any(Array));
186
+ expect(payload.claude).toContain('sonnet');
187
+ expect(payload.codex).toContain('gpt-5.4');
188
+ expect(payload.forge).toEqual(['forge']);
189
+ expect(payload.opencode).toEqual(['opencode']);
190
+ expect(payload.dynamicModelBackends).toEqual({
191
+ opencode: {
192
+ explicitPrefix: 'oc-',
193
+ explicitPattern: 'oc-<provider/model>',
194
+ discoveryCommand: 'opencode models',
195
+ modelsAreDynamic: true,
196
+ },
197
+ });
188
198
  expect(stderr).not.toHaveBeenCalled();
189
199
  });
190
200
  it('prints doctor status as structured json', async () => {
@@ -215,6 +225,12 @@ describe('ai-cli app', () => {
215
225
  available: true,
216
226
  lookup: 'path',
217
227
  },
228
+ opencode: {
229
+ configuredCommand: 'opencode',
230
+ resolvedPath: '/tmp/bin/opencode',
231
+ available: true,
232
+ lookup: 'path',
233
+ },
218
234
  });
219
235
  const exitCode = await runCli(['doctor'], { stdout, stderr, getDoctorStatus });
220
236
  expect(exitCode).toBe(0);
@@ -241,6 +257,8 @@ describe('ai-cli app', () => {
241
257
  expect(stdout).toHaveBeenCalledWith(expect.stringContaining('gpt-5.2-codex'));
242
258
  expect(stdout).toHaveBeenCalledWith(expect.stringContaining('gemini-2.5-pro'));
243
259
  expect(stdout).toHaveBeenCalledWith(expect.stringContaining('forge'));
260
+ expect(stdout).toHaveBeenCalledWith(expect.stringContaining('opencode'));
261
+ expect(stdout).toHaveBeenCalledWith(expect.stringContaining('oc-openai/gpt-5.4'));
244
262
  expect(stderr).not.toHaveBeenCalled();
245
263
  });
246
264
  it('prints detailed help for result --help', async () => {
@@ -277,6 +295,7 @@ describe('ai-cli app', () => {
277
295
  const exitCode = await runCli(['doctor', '--help'], { stdout, stderr });
278
296
  expect(exitCode).toBe(0);
279
297
  expect(stdout).toHaveBeenCalledWith(DOCTOR_HELP_TEXT);
298
+ expect(stdout).toHaveBeenCalledWith(expect.stringContaining('OpenCode'));
280
299
  expect(stderr).not.toHaveBeenCalled();
281
300
  });
282
301
  it('prints detailed help for doctor -h', async () => {
@@ -285,6 +304,7 @@ describe('ai-cli app', () => {
285
304
  const exitCode = await runCli(['doctor', '-h'], { stdout, stderr });
286
305
  expect(exitCode).toBe(0);
287
306
  expect(stdout).toHaveBeenCalledWith(DOCTOR_HELP_TEXT);
307
+ expect(stdout).toHaveBeenCalledWith(expect.stringContaining('OpenCode'));
288
308
  expect(stderr).not.toHaveBeenCalled();
289
309
  });
290
310
  it('prints help for --help', async () => {
@@ -19,6 +19,44 @@ afterEach(() => {
19
19
  rmSync(dir, { recursive: true, force: true });
20
20
  }
21
21
  });
22
+ describe('cli helper entrypoint smoke', () => {
23
+ it('prints help for cli.run with OpenCode examples', () => {
24
+ const output = execFileSync('node', ['--import', 'tsx', 'src/cli.ts', '--help'], {
25
+ cwd: process.cwd(),
26
+ encoding: 'utf8',
27
+ env: process.env,
28
+ });
29
+ expect(output).toContain('Usage: npm run -s cli.run -- --model <model> --workFolder <path> --prompt "..." [options]');
30
+ expect(output).toContain('opencode');
31
+ expect(output).toContain('oc-openai/gpt-5.4');
32
+ expect(output).toContain('OpenCode');
33
+ expect(output).toContain('npm run -s cli.run.parse -- --agent opencode < raw.txt');
34
+ });
35
+ it('prints help for cli.run.parse with OpenCode agent support', () => {
36
+ const output = execFileSync('node', ['--import', 'tsx', 'src/cli-parse.ts', '--help'], {
37
+ cwd: process.cwd(),
38
+ encoding: 'utf8',
39
+ env: process.env,
40
+ });
41
+ expect(output).toContain('Usage: npm run -s cli.run.parse -- --agent <claude|codex|gemini|forge|opencode>');
42
+ expect(output).toContain('Agent type: claude, codex, gemini, forge, or opencode');
43
+ expect(output).toContain('npm run -s cli.run.parse -- --agent opencode < raw.txt');
44
+ });
45
+ it('parses OpenCode NDJSON through cli.run.parse', () => {
46
+ const output = execFileSync('node', ['--import', 'tsx', 'src/cli-parse.ts', '--agent', 'opencode'], {
47
+ cwd: process.cwd(),
48
+ encoding: 'utf8',
49
+ env: process.env,
50
+ input: '{"type":"step_start","sessionID":"ses_cli_parse"}\n{"type":"text","sessionID":"ses_cli_parse","part":{"type":"text","text":"Hello from cli.parse"}}\n{"type":"step_finish","sessionID":"ses_cli_parse","part":{"type":"step-finish","tokens":{"total":9},"cost":1}}\n',
51
+ });
52
+ expect(JSON.parse(output)).toEqual({
53
+ message: 'Hello from cli.parse',
54
+ session_id: 'ses_cli_parse',
55
+ tokens: { total: 9 },
56
+ cost: 1,
57
+ });
58
+ });
59
+ });
22
60
  describe('ai-cli entrypoint smoke', () => {
23
61
  it('prints doctor output for the ai-cli entrypoint', () => {
24
62
  const fakeBinDir = makeTempDir('ai-cli-bin-');
@@ -26,6 +64,7 @@ describe('ai-cli entrypoint smoke', () => {
26
64
  writeExecutable(fakeBinDir, 'codex');
27
65
  writeExecutable(fakeBinDir, 'gemini');
28
66
  writeExecutable(fakeBinDir, 'forge');
67
+ writeExecutable(fakeBinDir, 'opencode');
29
68
  const output = execFileSync('node', ['--import', 'tsx', 'src/bin/ai-cli.ts', 'doctor'], {
30
69
  cwd: process.cwd(),
31
70
  encoding: 'utf8',
@@ -36,12 +75,14 @@ describe('ai-cli entrypoint smoke', () => {
36
75
  CODEX_CLI_NAME: 'codex',
37
76
  GEMINI_CLI_NAME: 'gemini',
38
77
  FORGE_CLI_NAME: 'forge',
78
+ OPENCODE_CLI_NAME: 'opencode',
39
79
  },
40
80
  });
41
81
  expect(output).toContain('"claude"');
42
82
  expect(output).toContain('"codex"');
43
83
  expect(output).toContain('"gemini"');
44
84
  expect(output).toContain('"forge"');
85
+ expect(output).toContain('"opencode"');
45
86
  expect(output).toContain('"available": true');
46
87
  });
47
88
  it('prints run help for the ai-cli entrypoint', () => {
@@ -54,5 +95,7 @@ describe('ai-cli entrypoint smoke', () => {
54
95
  expect(output).toContain('--model <model>');
55
96
  expect(output).toContain('claude-ultra');
56
97
  expect(output).toContain('forge');
98
+ expect(output).toContain('opencode');
99
+ expect(output).toContain('oc-openai/gpt-5.4');
57
100
  });
58
101
  });
@@ -15,6 +15,7 @@ const DEFAULT_CLI_PATHS = {
15
15
  codex: '/usr/bin/codex',
16
16
  gemini: '/usr/bin/gemini',
17
17
  forge: '/usr/bin/forge',
18
+ opencode: '/usr/bin/opencode',
18
19
  };
19
20
  describe('cli-builder', () => {
20
21
  beforeEach(() => {
@@ -74,6 +75,10 @@ describe('cli-builder', () => {
74
75
  it('should reject reasoning_effort for forge explicitly', () => {
75
76
  expect(() => getReasoningEffort('forge', 'high')).toThrow('reasoning_effort is not supported for forge.');
76
77
  });
78
+ it('should reject reasoning_effort for opencode explicitly', () => {
79
+ expect(() => getReasoningEffort('opencode', 'high')).toThrow('reasoning_effort is not supported for opencode.');
80
+ expect(() => getReasoningEffort('oc-openai/gpt-5.4', 'high')).toThrow('reasoning_effort is not supported for opencode.');
81
+ });
77
82
  });
78
83
  describe('buildCliCommand', () => {
79
84
  describe('validation', () => {
@@ -326,37 +331,110 @@ describe('cli-builder', () => {
326
331
  expect(cmd.resolvedModel).toBe('gemini-3.1-pro-preview');
327
332
  });
328
333
  });
329
- describe('forge agent', () => {
330
- it('should build forge command without model flags', () => {
334
+ describe('opencode agent', () => {
335
+ it('should build default opencode command without --model', () => {
331
336
  const cmd = buildCliCommand({
332
337
  prompt: 'test',
333
338
  workFolder: '/tmp',
334
- model: 'forge',
339
+ model: 'opencode',
335
340
  cliPaths: DEFAULT_CLI_PATHS,
336
341
  });
337
- expect(cmd.agent).toBe('forge');
338
- expect(cmd.cliPath).toBe('/usr/bin/forge');
339
- expect(cmd.resolvedModel).toBe('forge');
340
- expect(cmd.args).toEqual(['-C', '/tmp', '-p', 'test']);
342
+ expect(cmd.agent).toBe('opencode');
343
+ expect(cmd.cliPath).toBe('/usr/bin/opencode');
344
+ expect(cmd.cwd).toBe('/tmp');
345
+ expect(cmd.args).toEqual(['run', '--format', 'json', '--dir', '/tmp', 'test']);
346
+ expect(cmd.args).not.toContain('--model');
341
347
  });
342
- it('should map session_id to --conversation-id for forge', () => {
348
+ it('should route valid explicit OpenCode model syntax', () => {
343
349
  const cmd = buildCliCommand({
344
350
  prompt: 'test',
345
351
  workFolder: '/tmp',
346
- model: 'forge',
347
- session_id: 'forge-conv-123',
352
+ model: 'oc-openai/gpt-5.4',
348
353
  cliPaths: DEFAULT_CLI_PATHS,
349
354
  });
350
- expect(cmd.args).toEqual(['-C', '/tmp', '--conversation-id', 'forge-conv-123', '-p', 'test']);
355
+ expect(cmd.agent).toBe('opencode');
356
+ expect(cmd.resolvedModel).toBe('oc-openai/gpt-5.4');
357
+ expect(cmd.args).toEqual([
358
+ 'run',
359
+ '--format',
360
+ 'json',
361
+ '--dir',
362
+ '/tmp',
363
+ '--model',
364
+ 'openai/gpt-5.4',
365
+ 'test',
366
+ ]);
367
+ });
368
+ it.each([
369
+ 'oc-',
370
+ 'oc-openai',
371
+ 'oc-/gpt-5.4',
372
+ 'oc-openai/',
373
+ ])('should reject invalid explicit OpenCode syntax: %s', (model) => {
374
+ expect(() => buildCliCommand({
375
+ prompt: 'test',
376
+ workFolder: '/tmp',
377
+ model,
378
+ cliPaths: DEFAULT_CLI_PATHS,
379
+ })).toThrow('Invalid OpenCode model. Expected exact syntax oc-<provider/model>.');
351
380
  });
352
- it('should reject reasoning_effort for forge in command building', () => {
381
+ it.each([' oc-openai/gpt-5.4', 'oc-openai/gpt-5.4 '])('should reject explicit OpenCode models with surrounding whitespace: %s', (model) => {
353
382
  expect(() => buildCliCommand({
354
383
  prompt: 'test',
355
384
  workFolder: '/tmp',
356
- model: 'forge',
385
+ model,
386
+ cliPaths: DEFAULT_CLI_PATHS,
387
+ })).toThrow('Invalid OpenCode model. Expected exact syntax oc-<provider/model>.');
388
+ });
389
+ it('should reject reasoning_effort for OpenCode in command building', () => {
390
+ expect(() => buildCliCommand({
391
+ prompt: 'test',
392
+ workFolder: '/tmp',
393
+ model: 'opencode',
357
394
  reasoning_effort: 'high',
358
395
  cliPaths: DEFAULT_CLI_PATHS,
359
- })).toThrow('reasoning_effort is not supported for forge.');
396
+ })).toThrow('reasoning_effort is not supported for opencode.');
397
+ });
398
+ it('should build resumed default OpenCode command', () => {
399
+ const cmd = buildCliCommand({
400
+ prompt: 'resume prompt',
401
+ workFolder: '/tmp',
402
+ model: 'opencode',
403
+ session_id: 'ses-123',
404
+ cliPaths: DEFAULT_CLI_PATHS,
405
+ });
406
+ expect(cmd.args).toEqual([
407
+ 'run',
408
+ '--format',
409
+ 'json',
410
+ '--dir',
411
+ '/tmp',
412
+ '--session',
413
+ 'ses-123',
414
+ 'resume prompt',
415
+ ]);
416
+ expect(cmd.args).not.toContain('--model');
417
+ });
418
+ it('should build resumed explicit OpenCode command', () => {
419
+ const cmd = buildCliCommand({
420
+ prompt: 'resume prompt',
421
+ workFolder: '/tmp',
422
+ model: 'oc-openai/gpt-5.4',
423
+ session_id: 'ses-456',
424
+ cliPaths: DEFAULT_CLI_PATHS,
425
+ });
426
+ expect(cmd.args).toEqual([
427
+ 'run',
428
+ '--format',
429
+ 'json',
430
+ '--dir',
431
+ '/tmp',
432
+ '--session',
433
+ 'ses-456',
434
+ '--model',
435
+ 'openai/gpt-5.4',
436
+ 'resume prompt',
437
+ ]);
360
438
  });
361
439
  });
362
440
  });