ai-cli-mcp 2.14.1 → 2.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/.github/dependabot.yml +28 -0
  2. package/.github/workflows/ci.yml +4 -1
  3. package/.github/workflows/dependency-review.yml +22 -0
  4. package/CHANGELOG.md +14 -0
  5. package/README.ja.md +83 -6
  6. package/README.md +83 -7
  7. package/dist/__tests__/app-cli.test.js +80 -5
  8. package/dist/__tests__/cli-bin-smoke.test.js +43 -0
  9. package/dist/__tests__/cli-builder.test.js +93 -15
  10. package/dist/__tests__/cli-process-service.test.js +162 -0
  11. package/dist/__tests__/cli-utils.test.js +31 -0
  12. package/dist/__tests__/e2e.test.js +79 -52
  13. package/dist/__tests__/mcp-contract.test.js +162 -0
  14. package/dist/__tests__/parsers.test.js +224 -1
  15. package/dist/__tests__/peek.test.js +35 -0
  16. package/dist/__tests__/process-management.test.js +160 -1
  17. package/dist/__tests__/server.test.js +39 -9
  18. package/dist/__tests__/utils/opencode-mock.js +91 -0
  19. package/dist/__tests__/validation.test.js +40 -2
  20. package/dist/app/cli.js +47 -5
  21. package/dist/app/mcp.js +53 -4
  22. package/dist/cli-builder.js +67 -28
  23. package/dist/cli-parse.js +11 -5
  24. package/dist/cli-process-service.js +241 -20
  25. package/dist/cli-utils.js +14 -23
  26. package/dist/cli.js +6 -4
  27. package/dist/model-catalog.js +13 -1
  28. package/dist/parsers.js +242 -28
  29. package/dist/peek.js +56 -0
  30. package/dist/process-result.js +9 -2
  31. package/dist/process-service.js +103 -17
  32. package/dist/server.js +1 -2
  33. package/package.json +9 -6
  34. package/src/__tests__/app-cli.test.ts +95 -4
  35. package/src/__tests__/cli-bin-smoke.test.ts +62 -1
  36. package/src/__tests__/cli-builder.test.ts +111 -15
  37. package/src/__tests__/cli-process-service.test.ts +180 -0
  38. package/src/__tests__/cli-utils.test.ts +34 -0
  39. package/src/__tests__/e2e.test.ts +87 -55
  40. package/src/__tests__/mcp-contract.test.ts +188 -0
  41. package/src/__tests__/parsers.test.ts +260 -1
  42. package/src/__tests__/peek.test.ts +43 -0
  43. package/src/__tests__/process-management.test.ts +185 -1
  44. package/src/__tests__/server.test.ts +49 -13
  45. package/src/__tests__/utils/opencode-mock.ts +108 -0
  46. package/src/__tests__/validation.test.ts +48 -2
  47. package/src/app/cli.ts +52 -4
  48. package/src/app/mcp.ts +54 -4
  49. package/src/cli-builder.ts +91 -32
  50. package/src/cli-parse.ts +11 -5
  51. package/src/cli-process-service.ts +304 -17
  52. package/src/cli-utils.ts +37 -33
  53. package/src/cli.ts +6 -4
  54. package/src/model-catalog.ts +24 -1
  55. package/src/parsers.ts +299 -33
  56. package/src/peek.ts +88 -0
  57. package/src/process-result.ts +11 -2
  58. package/src/process-service.ts +134 -15
  59. package/src/server.ts +2 -2
  60. package/vitest.config.unit.ts +2 -3
@@ -0,0 +1,28 @@
1
+ version: 2
2
+ updates:
3
+ - package-ecosystem: "npm"
4
+ directory: "/"
5
+ target-branch: "develop"
6
+ schedule:
7
+ interval: "weekly"
8
+ open-pull-requests-limit: 5
9
+ groups:
10
+ npm-production:
11
+ dependency-type: "production"
12
+ patterns:
13
+ - "*"
14
+ npm-development:
15
+ dependency-type: "development"
16
+ patterns:
17
+ - "*"
18
+
19
+ - package-ecosystem: "github-actions"
20
+ directory: "/"
21
+ target-branch: "develop"
22
+ schedule:
23
+ interval: "weekly"
24
+ open-pull-requests-limit: 2
25
+ groups:
26
+ github-actions:
27
+ patterns:
28
+ - "*"
@@ -12,7 +12,7 @@ jobs:
12
12
 
13
13
  strategy:
14
14
  matrix:
15
- node-version: [20.x, 22.x]
15
+ node-version: [20.19.0, 22.12.0, 24.x]
16
16
 
17
17
  steps:
18
18
  - name: Checkout repository
@@ -29,3 +29,6 @@ jobs:
29
29
 
30
30
  - name: Build project
31
31
  run: npm run build
32
+
33
+ - name: Run unit tests
34
+ run: npm run test:unit
@@ -0,0 +1,22 @@
1
+ name: Dependency Review
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - develop
7
+
8
+ permissions:
9
+ contents: read
10
+
11
+ jobs:
12
+ dependency-review:
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - name: Checkout
16
+ uses: actions/checkout@v4
17
+
18
+ - name: Dependency Review
19
+ uses: actions/dependency-review-action@v4
20
+ with:
21
+ fail-on-severity: moderate
22
+ fail-on-scopes: runtime
package/CHANGELOG.md CHANGED
@@ -1,3 +1,17 @@
1
+ # [2.16.0](https://github.com/mkXultra/ai-cli-mcp/compare/v2.15.0...v2.16.0) (2026-04-11)
2
+
3
+
4
+ ### Features
5
+
6
+ * peekコマンドを追加 — 実行中エージェントの自然言語メッセージをワンショット観測 ([c12fd4c](https://github.com/mkXultra/ai-cli-mcp/commit/c12fd4cbe374a05b5223191e10fb2144b5d86bd0))
7
+
8
+ # [2.15.0](https://github.com/mkXultra/ai-cli-mcp/compare/v2.14.1...v2.15.0) (2026-04-09)
9
+
10
+
11
+ ### Features
12
+
13
+ * OpenCode CLIを新しいAIバックエンドとして追加 ([0677c57](https://github.com/mkXultra/ai-cli-mcp/commit/0677c57659b36fd1083cd96166c2c608c45038b3))
14
+
1
15
  ## [2.14.1](https://github.com/mkXultra/ai-cli-mcp/compare/v2.14.0...v2.14.1) (2026-04-07)
2
16
 
3
17
 
package/README.ja.md CHANGED
@@ -5,7 +5,7 @@
5
5
 
6
6
  > **📦 パッケージ移行のお知らせ**: 本パッケージは旧名 `@mkxultra/claude-code-mcp` から `ai-cli-mcp` に名称変更されました。これは、複数のAI CLIツールのサポート拡大を反映したものです。
7
7
 
8
- AI CLIツール(Claude, Codex, Gemini, Forge)をバックグラウンドプロセスとして実行し、権限処理を自動化するMCP(Model Context Protocol)サーバーです。
8
+ AI CLIツール(Claude, Codex, Gemini, Forge, OpenCode)をバックグラウンドプロセスとして実行し、権限処理を自動化するMCP(Model Context Protocol)サーバーです。
9
9
 
10
10
  Cursorなどのエディタが、複雑な手順を伴う編集や操作に苦戦していることに気づいたことはありませんか?このサーバーは、強力な統合 `run` ツールを提供し、複数のAIエージェントを活用してコーディングタスクをより効果的に処理できるようにします。
11
11
 
@@ -21,11 +21,13 @@ Cursorなどのエディタが、複雑な手順を伴う編集や操作に苦
21
21
  - 自動承認モードでCodex CLIを実行(`--full-auto` を使用)
22
22
  - 自動承認モードでGemini CLIを実行(`-y` を使用)
23
23
  - Forge CLI を非対話モードで実行(`forge -C <workFolder> -p <prompt>` を使用)
24
+ - OpenCode を非対話 JSON モードで実行(`opencode run --format json --dir <workFolder> <prompt>` を使用)
24
25
  - 複数のAIモデルのサポート:
25
26
  - Claude (sonnet, sonnet[1m], opus, opusplan, haiku)
26
27
  - Codex (gpt-5.4, gpt-5.3-codex, gpt-5.2-codex, gpt-5.1-codex-mini, gpt-5.1-codex-max, など)
27
28
  - Gemini (gemini-2.5-pro, gemini-2.5-flash, gemini-3.1-pro-preview, gemini-3-pro-preview, gemini-3-flash-preview)
28
29
  - Forge (`forge`)
30
+ - OpenCode (`opencode` と `oc-<provider/model>` ラッパー。例: `oc-openai/gpt-5.4`)
29
31
  - PID追跡によるバックグラウンドプロセスの管理
30
32
  - ツールからの構造化された出力の解析と返却
31
33
 
@@ -67,6 +69,7 @@ Cursorなどのエディタが、複雑な手順を伴う編集や操作に苦
67
69
  - **Codex CLI**(オプション): インストール済みで、ログインなどの初期設定が完了していること。
68
70
  - **Gemini CLI**(オプション): インストール済みで、ログインなどの初期設定が完了していること。
69
71
  - **Forge CLI**(オプション): インストール済みで、初期設定が完了していること。
72
+ - **OpenCode**(オプション): インストール済みで、設定が完了していること。この統合では `opencode run --format json` を使用し、明示的なモデル指定は `ai-cli models` が公開する `oc-<provider/model>` 構文に従います。
70
73
 
71
74
  ## インストールと使い方
72
75
 
@@ -116,9 +119,12 @@ npm install -g ai-cli-mcp
116
119
  ai-cli doctor
117
120
  ai-cli models
118
121
  ai-cli run --cwd "$PWD" --model sonnet --prompt "summarize this repository"
122
+ ai-cli run --cwd "$PWD" --model opencode --prompt "OpenCode のデフォルト設定でこのリポジトリを要約して"
123
+ ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --session-id ses_123 --prompt "明示モデル付きでこの OpenCode セッションを続けて"
119
124
  ai-cli ps
120
125
  ai-cli result 12345
121
126
  ai-cli result 12345 --verbose
127
+ ai-cli peek 12345 --time 10
122
128
  ai-cli wait 12345 --timeout 300
123
129
  ai-cli wait 12345 --verbose
124
130
  ai-cli kill 12345
@@ -132,6 +138,7 @@ ai-cli-mcp
132
138
 
133
139
  ```bash
134
140
  npx -y --package ai-cli-mcp@latest ai-cli run --cwd "$PWD" --model sonnet --prompt "hello"
141
+ npx -y --package ai-cli-mcp@latest ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --prompt "OpenCode で hello"
135
142
  ```
136
143
 
137
144
  ## 重要な初回セットアップ
@@ -172,6 +179,7 @@ macOSでは、これらのツールを初めて実行する際にフォルダへ
172
179
  - `run`
173
180
  - `ps`
174
181
  - `result`
182
+ - `peek`
175
183
  - `wait`
176
184
  - `kill`
177
185
  - `cleanup`
@@ -185,7 +193,11 @@ macOSでは、これらのツールを初めて実行する際にフォルダへ
185
193
  ai-cli doctor
186
194
  ai-cli models
187
195
  ai-cli run --cwd "$PWD" --model codex-ultra --prompt "fix failing tests"
196
+ ai-cli run --cwd "$PWD" --model opencode --session-id ses_existing --prompt "この OpenCode セッションを継続して"
197
+ ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --prompt "明示的な OpenCode モデルで実行"
188
198
  ai-cli ps
199
+ ai-cli peek 12345 --time 10
200
+ ai-cli peek 12345 12346 --time 10
189
201
  ai-cli wait 12345
190
202
  ai-cli wait 12345 --verbose
191
203
  ai-cli result 12345
@@ -195,6 +207,13 @@ ai-cli cleanup
195
207
 
196
208
  `run` の作業ディレクトリ指定は `--cwd` が基本です。互換性のために `--workFolder` / `--work-folder` も受け付けます。
197
209
 
210
+ OpenCode のモデル指定は次の 2 つを受け付けます。
211
+
212
+ - `opencode`: OpenCode 側で設定されたデフォルトモデルを使用
213
+ - `oc-<provider/model>`: 明示的な OpenCode の provider/model を指定。例: `oc-openai/gpt-5.4`
214
+
215
+ `ai-cli models` は OpenCode を機械可読に `opencode: ["opencode"]` と `dynamicModelBackends.opencode` で公開します。実際に利用可能なバックエンドネイティブなモデル一覧は `opencode models` で確認してください。
216
+
198
217
  `doctor` は CLI バイナリの存在確認と path 解決だけを行います。ログイン状態や利用規約同意までは確認しません。
199
218
 
200
219
  ## CLI の状態保存先
@@ -210,12 +229,13 @@ ai-cli cleanup
210
229
  - `meta.json`
211
230
  - `stdout.log`
212
231
  - `stderr.log`
232
+ - `exit-status.json`(detached な OpenCode 実行用)
213
233
 
214
234
  完了済み・失敗済みの実行は `ai-cli cleanup` で削除できます。`running` のものは保持されます。
215
235
 
216
236
  ## 既知の制約
217
237
 
218
- detached 実行された `ai-cli` の自然終了 exit code は、まだ永続化していません。そのため、CLI は出力と running/completed 状態は返せますが、自然終了したバックグラウンド実行の `exitCode` は現時点では保証しません。
238
+ detached 実行された `ai-cli` では、OpenCode バックエンドに限り自然終了時の exit status を永続化します。そのため OpenCode の失敗終了は非ゼロ exit code を含めて `failed` として扱われ、結果では生の `stdout` / `stderr` を保持します。一方、他の detached バックエンドでは従来どおり、より広い exit-status 追跡が追加されるまでは自然終了した実行が信頼できる exit code なしで `completed` と見なされる制約が残ります。
219
239
 
220
240
  ## MCPクライアントへの接続
221
241
 
@@ -229,7 +249,7 @@ detached 実行された `ai-cli` の自然終了 exit code は、まだ永続
229
249
 
230
250
  ### `run`
231
251
 
232
- Claude CLI、Codex CLI、Gemini CLI、または Forge CLI を使用してプロンプトを実行します。モデル名に基づいて適切なCLIが自動的に選択されます。
252
+ Claude CLI、Codex CLI、Gemini CLIForge CLI、または OpenCode を使用してプロンプトを実行します。モデル名に基づいて適切なCLIが自動的に選択されます。
233
253
 
234
254
  **引数:**
235
255
  - `prompt` (string, 任意): AIエージェントに送信するプロンプト。`prompt` または `prompt_file` のいずれかが必須です。
@@ -241,8 +261,9 @@ Claude CLI、Codex CLI、Gemini CLI、または Forge CLI を使用してプロ
241
261
  - Codex: `gpt-5.4`, `gpt-5.3-codex`, `gpt-5.2-codex`, `gpt-5.1-codex-mini`, `gpt-5.1-codex-max`, `gpt-5.2`, `gpt-5.1`, `gpt-5`
242
262
  - Gemini: `gemini-2.5-pro`, `gemini-2.5-flash`, `gemini-3.1-pro-preview`, `gemini-3-pro-preview`, `gemini-3-flash-preview`
243
263
  - Forge: `forge`
244
- - `reasoning_effort` (string, 任意): Claude と Codex の推論制御。Claude では `--effort` を使います(許容値: "low", "medium", "high")。Codex では `model_reasoning_effort` を使います(許容値: "low", "medium", "high", "xhigh")。Forge では `reasoning_effort` はサポートしません。
245
- - `session_id` (string, 任意): 以前のセッションを再開するためのセッションID。対応モデル: haiku, sonnet, opus, gemini-2.5-pro, gemini-2.5-flash, gemini-3.1-pro-preview, gemini-3-pro-preview, gemini-3-flash-preview, forge。
264
+ - OpenCode: `opencode`(設定済みのデフォルトモデル)および `oc-openai/gpt-5.4` のような明示ラッパー
265
+ - `reasoning_effort` (string, 任意): Claude Codex の推論制御。Claude では `--effort` を使います(許容値: "low", "medium", "high")。Codex では `model_reasoning_effort` を使います(許容値: "low", "medium", "high", "xhigh")。Gemini、Forge、OpenCode では `reasoning_effort` はサポートしません。
266
+ - `session_id` (string, 任意): 以前のセッションを再開するためのセッションID。Claude、Codex、Gemini、Forge、OpenCode でサポートされます。OpenCode は `--session` による in-place resume で再開し、`oc-<provider/model>` の明示指定と併用できます。
246
267
 
247
268
  ### `wait`
248
269
 
@@ -255,6 +276,60 @@ Claude CLI、Codex CLI、Gemini CLI、または Forge CLI を使用してプロ
255
276
  - `timeout` (number, 任意): 最大待機時間(秒)。デフォルトは180秒(3分)です。
256
277
  - `verbose` (boolean, 任意): `true` の場合、各結果項目を full 形で返します。デフォルトは `false` です。
257
278
 
279
+ ### `peek`
280
+
281
+ 実行中の子エージェントを短時間だけ観測し、その `peek` 呼び出しの観測ウィンドウ内で ai-cli-mcp が受理した自然言語メッセージだけを返します。履歴APIではなく、欠落のないストリーミングでもなく、シェルの `stdout` / `stderr` tail でもありません。別々の `peek` 呼び出しの間に出たメッセージは取得できない場合があります。v1 では `--follow` はありません。
282
+
283
+ CLI v1:
284
+
285
+ ```bash
286
+ ai-cli peek 123 --time 10
287
+ ai-cli peek 123 456 --time 10
288
+ ```
289
+
290
+ **引数:**
291
+ - `pids` (array of numbers, 必須): `run` が返したプロセスIDを 1..32 件指定します。重複したPIDはサーバー側で重複排除され、最初に出た順序が維持されます。未知または管理外のPIDは、呼び出し全体の失敗ではなく、プロセスごとに `not_found` として返されます。
292
+ - `peek_time_sec` (number, 任意): 観測時間(秒)の正の整数です。デフォルトは10秒、最大60秒です。`0`、負数、小数は無効です。
293
+
294
+ **観測とフィルタリング:**
295
+ - `peek_started_at` と `messages[].ts` は、ai-cli-mcp サーバー側の UTC RFC3339 タイムスタンプです。`peek_started_at` は検証とリスナー登録後に観測ウィンドウが始まった時刻、`messages[].ts` は ai-cli-mcp がメッセージを観測して受理した時刻です。
296
+ - 観測ウィンドウは `peek_time_sec` が経過するか、対象プロセスがすべて終端状態になった時点で終了します。
297
+ - 観測開始前のメッセージは返しません。同じPIDへの同時 `peek` は可能で、それぞれ独立した観測ウィンドウを持つため、メッセージが重複して返ることがあります。
298
+ - 返すのは認識済みの自然言語メッセージだけです。Codex の `agent_message` text、Claude assistant の text content、OpenCode の `type: "text"` かつ `part.type` が `"text"` のイベント、Gemini stream-json の `role` が `"assistant"` の `message` イベントを含めます。raw `stdout` / `stderr`、raw JSONL、reasoning、`tool_use`、`tool_result`、コマンドの `stdout` / `stderr`、command execution メタデータ、token usage、verbose メタデータは除外します。
299
+ - 未知のイベント形状はデフォルトで拒否します。Forge など、自然言語抽出がまだ明示対応されていない管理対象エージェントは、実際のプロセス状態を返しつつ、`messages: []`、`truncated: false`、`error: null` にします。
300
+ - 各PIDごとに、観測ウィンドウ内で最初に観測された50件までを保持します。それ以降のメッセージを捨てた場合は `truncated` が `true` になります。
301
+ - `status` は `running`、`completed`、`failed`、`not_found` のいずれかで、観測ウィンドウ終了時点の状態を表します。
302
+ - `agent` は `claude`、`codex`、`gemini`、`forge`、`opencode`、将来追加される追跡済みエージェント文字列、または `null` です。`null` はプロセスが見つからない、またはエージェント種別を判断できない場合を表します。
303
+
304
+ レスポンス例:
305
+
306
+ ```json
307
+ {
308
+ "peek_started_at": "2026-04-11T12:34:56.789Z",
309
+ "observed_duration_sec": 10.01,
310
+ "processes": [
311
+ {
312
+ "pid": 123,
313
+ "agent": "codex",
314
+ "status": "running",
315
+ "messages": [
316
+ { "ts": "2026-04-11T12:34:59.120Z", "text": "I'm checking the implementation." }
317
+ ],
318
+ "truncated": false,
319
+ "error": null
320
+ },
321
+ {
322
+ "pid": 999,
323
+ "agent": null,
324
+ "status": "not_found",
325
+ "messages": [],
326
+ "truncated": false,
327
+ "error": "process not found"
328
+ }
329
+ ]
330
+ }
331
+ ```
332
+
258
333
  ### `list_processes`
259
334
 
260
335
  実行中および完了したすべてのAIエージェントプロセスを、ステータス、PID、基本情報とともにリストアップします。
@@ -311,6 +386,7 @@ npm run test:e2e
311
386
  - `CODEX_CLI_NAME`: Codex CLIのバイナリ名または絶対パスを上書き(デフォルト: `codex`)
312
387
  - `GEMINI_CLI_NAME`: Gemini CLIのバイナリ名または絶対パスを上書き(デフォルト: `gemini`)
313
388
  - `FORGE_CLI_NAME`: Forge CLIのバイナリ名または絶対パスを上書き(デフォルト: `forge`)
389
+ - `OPENCODE_CLI_NAME`: OpenCode CLIのバイナリ名または絶対パスを上書き(デフォルト: `opencode`)
314
390
  - `MCP_CLAUDE_DEBUG`: デバッグログを有効化(`true` に設定すると詳細な出力が表示されます)
315
391
 
316
392
  **CLI名の指定方法:**
@@ -329,7 +405,8 @@ npm run test:e2e
329
405
  ],
330
406
  "env": {
331
407
  "CLAUDE_CLI_NAME": "claude-custom",
332
- "CODEX_CLI_NAME": "codex-custom"
408
+ "CODEX_CLI_NAME": "codex-custom",
409
+ "OPENCODE_CLI_NAME": "opencode-custom"
333
410
  }
334
411
  },
335
412
  ```
package/README.md CHANGED
@@ -7,7 +7,7 @@
7
7
 
8
8
  > **📦 Package Migration Notice**: This package was formerly `@mkxultra/claude-code-mcp` and has been renamed to `ai-cli-mcp` to reflect its expanded support for multiple AI CLI tools.
9
9
 
10
- An MCP (Model Context Protocol) server that allows running AI CLI tools (Claude, Codex, Gemini, and Forge) in background processes with automatic permission handling.
10
+ An MCP (Model Context Protocol) server that allows running AI CLI tools (Claude, Codex, Gemini, Forge, and OpenCode) in background processes with automatic permission handling.
11
11
 
12
12
  Did you notice that Cursor sometimes struggles with complex, multi-step edits or operations? This server, with its powerful unified `run` tool, enables multiple AI agents to handle your coding tasks more effectively.
13
13
 
@@ -23,7 +23,8 @@ This MCP server provides tools that can be used by LLMs to interact with AI CLI
23
23
  - Execute Codex CLI with automatic approval mode (using `--full-auto`)
24
24
  - Execute Gemini CLI with automatic approval mode (using `-y`)
25
25
  - Execute Forge CLI in non-interactive mode (using `forge -C <workFolder> -p <prompt>`)
26
- - Support multiple AI models: Claude (sonnet, sonnet[1m], opus, opusplan, haiku), Codex (gpt-5.4, gpt-5.3-codex, gpt-5.2-codex, gpt-5.1-codex-mini, gpt-5.1-codex-max, gpt-5.2, gpt-5.1, gpt-5.1-codex, gpt-5-codex, gpt-5-codex-mini, gpt-5), Gemini (gemini-2.5-pro, gemini-2.5-flash, gemini-3.1-pro-preview, gemini-3-pro-preview, gemini-3-flash-preview), and Forge (`forge`)
26
+ - Execute OpenCode in non-interactive JSON mode (using `opencode run --format json --dir <workFolder> <prompt>`)
27
+ - Support multiple AI models: Claude (sonnet, sonnet[1m], opus, opusplan, haiku), Codex (gpt-5.4, gpt-5.3-codex, gpt-5.2-codex, gpt-5.1-codex-mini, gpt-5.1-codex-max, gpt-5.2, gpt-5.1, gpt-5.1-codex, gpt-5-codex, gpt-5-codex-mini, gpt-5), Gemini (gemini-2.5-pro, gemini-2.5-flash, gemini-3.1-pro-preview, gemini-3-pro-preview, gemini-3-flash-preview), Forge (`forge`), and OpenCode (`opencode` plus explicit `oc-<provider/model>` wrappers such as `oc-openai/gpt-5.4`)
27
28
  - Manage background processes with PID tracking
28
29
  - Parse and return structured outputs from both tools
29
30
 
@@ -65,6 +66,7 @@ The only prerequisite is that the AI CLI tools you want to use are locally insta
65
66
  - **Codex CLI** (Optional): Installed and initial setup (login etc.) completed.
66
67
  - **Gemini CLI** (Optional): Installed and initial setup (login etc.) completed.
67
68
  - **Forge CLI** (Optional): Installed and initial setup completed.
69
+ - **OpenCode** (Optional): Installed and configured. This integration uses `opencode run --format json`, and explicit provider/model selection follows the `oc-<provider/model>` wrapper syntax exposed by `ai-cli models`.
68
70
 
69
71
  ## Installation & Usage
70
72
 
@@ -114,9 +116,12 @@ Examples:
114
116
  ai-cli doctor
115
117
  ai-cli models
116
118
  ai-cli run --cwd "$PWD" --model sonnet --prompt "summarize this repository"
119
+ ai-cli run --cwd "$PWD" --model opencode --prompt "summarize this repository with OpenCode defaults"
120
+ ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --session-id ses_123 --prompt "continue this session with an explicit OpenCode model"
117
121
  ai-cli ps
118
122
  ai-cli result 12345
119
123
  ai-cli result 12345 --verbose
124
+ ai-cli peek 12345 --time 10
120
125
  ai-cli wait 12345 --timeout 300
121
126
  ai-cli wait 12345 --verbose
122
127
  ai-cli kill 12345
@@ -130,6 +135,7 @@ Because the published package name is still `ai-cli-mcp`, the shortest `npx` for
130
135
 
131
136
  ```bash
132
137
  npx -y --package ai-cli-mcp@latest ai-cli run --cwd "$PWD" --model sonnet --prompt "hello"
138
+ npx -y --package ai-cli-mcp@latest ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --prompt "hello from OpenCode"
133
139
  ```
134
140
 
135
141
  ## Important First-Time Setup
@@ -170,6 +176,7 @@ macOS might ask for folder permissions the first time any of these tools run. If
170
176
  - `run`
171
177
  - `ps`
172
178
  - `result`
179
+ - `peek`
173
180
  - `wait`
174
181
  - `kill`
175
182
  - `cleanup`
@@ -183,7 +190,11 @@ Example flow:
183
190
  ai-cli doctor
184
191
  ai-cli models
185
192
  ai-cli run --cwd "$PWD" --model codex-ultra --prompt "fix failing tests"
193
+ ai-cli run --cwd "$PWD" --model opencode --session-id ses_existing --prompt "continue this OpenCode session"
194
+ ai-cli run --cwd "$PWD" --model oc-openai/gpt-5.4 --prompt "run with an explicit OpenCode backend model"
186
195
  ai-cli ps
196
+ ai-cli peek 12345 --time 10
197
+ ai-cli peek 12345 12346 --time 10
187
198
  ai-cli wait 12345
188
199
  ai-cli wait 12345 --verbose
189
200
  ai-cli result 12345
@@ -193,6 +204,13 @@ ai-cli cleanup
193
204
 
194
205
  `run` accepts `--cwd` as the primary working-directory flag and also accepts the older aliases `--workFolder` / `--work-folder` for compatibility.
195
206
 
207
+ OpenCode model selection accepts either:
208
+
209
+ - `opencode` for the CLI's configured default model
210
+ - `oc-<provider/model>` for an explicit OpenCode provider/model, for example `oc-openai/gpt-5.4`
211
+
212
+ `ai-cli models` exposes OpenCode machine-readably via `opencode: ["opencode"]` plus `dynamicModelBackends.opencode`, which points users to `opencode models` for backend-native discovery.
213
+
196
214
  `doctor` checks only binary existence and path resolution. It does not verify login state or terms acceptance.
197
215
 
198
216
  ## CLI State Storage
@@ -208,12 +226,13 @@ Each PID directory contains:
208
226
  - `meta.json`
209
227
  - `stdout.log`
210
228
  - `stderr.log`
229
+ - `exit-status.json` for detached OpenCode runs
211
230
 
212
231
  Use `ai-cli cleanup` to remove completed and failed runs. Running processes are preserved.
213
232
 
214
233
  ## Known Limitation
215
234
 
216
- Detached `ai-cli` runs do not currently persist natural process exit codes. As a result, the CLI can report process output and running/completed state, but it does not yet guarantee `exitCode` for naturally finished background runs.
235
+ Detached `ai-cli` runs persist natural exit status for OpenCode-backed runs, including failed exit codes used to preserve raw OpenCode stdout/stderr in result output. Other detached backends still keep the pre-existing limitation: naturally finished runs may be surfaced as completed without a reliable persisted exit code until broader exit tracking is added.
217
236
 
218
237
  ## Connecting to Your MCP Client
219
238
 
@@ -227,7 +246,7 @@ This server exposes the following tools:
227
246
 
228
247
  ### `run`
229
248
 
230
- Executes a prompt using Claude CLI, Codex CLI, Gemini CLI, or Forge CLI. The appropriate CLI is automatically selected based on the model name.
249
+ Executes a prompt using Claude CLI, Codex CLI, Gemini CLI, Forge CLI, or OpenCode. The appropriate CLI is automatically selected based on the model name.
231
250
 
232
251
  **Arguments:**
233
252
  - `prompt` (string, optional): The prompt to send to the AI agent. Either `prompt` or `prompt_file` is required.
@@ -239,8 +258,9 @@ Executes a prompt using Claude CLI, Codex CLI, Gemini CLI, or Forge CLI. The app
239
258
  - Codex: `gpt-5.4`, `gpt-5.3-codex`, `gpt-5.2-codex`, `gpt-5.1-codex-mini`, `gpt-5.1-codex-max`, `gpt-5.2`, `gpt-5.1`, `gpt-5`
240
259
  - Gemini: `gemini-2.5-pro`, `gemini-2.5-flash`, `gemini-3.1-pro-preview`, `gemini-3-pro-preview`, `gemini-3-flash-preview`
241
260
  - Forge: `forge`
242
- - `reasoning_effort` (string, optional): Reasoning control for Claude and Codex. Claude uses `--effort` (allowed: "low", "medium", "high"). Codex uses `model_reasoning_effort` (allowed: "low", "medium", "high", "xhigh"). Forge does not support `reasoning_effort`.
243
- - `session_id` (string, optional): Optional session ID to resume a previous session. Supported for: haiku, sonnet, opus, gemini-2.5-pro, gemini-2.5-flash, gemini-3.1-pro-preview, gemini-3-pro-preview, gemini-3-flash-preview, forge.
261
+ - OpenCode: `opencode` for the configured default backend model, plus explicit wrappers like `oc-openai/gpt-5.4`
262
+ - `reasoning_effort` (string, optional): Reasoning control for Claude and Codex. Claude uses `--effort` (allowed: "low", "medium", "high"). Codex uses `model_reasoning_effort` (allowed: "low", "medium", "high", "xhigh"). Gemini, Forge, and OpenCode do not support `reasoning_effort`.
263
+ - `session_id` (string, optional): Optional session ID to resume a previous session. Supported for Claude, Codex, Gemini, Forge, and OpenCode. OpenCode resumes in place via `--session` and may also be combined with an explicit `oc-<provider/model>` selection.
244
264
 
245
265
  ### `wait`
246
266
 
@@ -253,6 +273,60 @@ By default, each returned result item uses the compact shape shared with `get_re
253
273
  - `timeout` (number, optional): Maximum wait time in seconds. Defaults to 180 (3 minutes).
254
274
  - `verbose` (boolean, optional): If `true`, each result item uses the full result shape. Defaults to `false`.
255
275
 
276
+ ### `peek`
277
+
278
+ Starts a one-shot short observation window for running child agents and returns only natural-language agent messages observed during that specific call. It is not a history API, not gapless streaming, and not shell stdout/stderr tailing. Separate `peek` calls may miss messages emitted between calls; `--follow` is intentionally not part of v1.
279
+
280
+ CLI v1:
281
+
282
+ ```bash
283
+ ai-cli peek 123 --time 10
284
+ ai-cli peek 123 456 --time 10
285
+ ```
286
+
287
+ **Arguments:**
288
+ - `pids` (array of numbers, required): 1..32 process IDs returned by `run`. Duplicate PIDs are deduplicated server-side, preserving first occurrence order. Unknown or unmanaged PIDs are returned per process as `not_found`, not as a whole-call failure.
289
+ - `peek_time_sec` (number, optional): Positive integer observation length in seconds. Defaults to 10 and is capped at 60. `0`, negative values, and fractional values are invalid.
290
+
291
+ **Observation and filtering:**
292
+ - `peek_started_at` and `messages[].ts` are ai-cli-mcp server-side UTC RFC3339 timestamps. `peek_started_at` is when the observation window starts after validation and listener registration; `messages[].ts` is when ai-cli-mcp observed and accepted the message.
293
+ - The window ends when `peek_time_sec` elapses or all target processes reach a terminal state, whichever comes first.
294
+ - Messages emitted before the window starts are not returned. Concurrent `peek` calls for the same PID are allowed; each has an independent window and may return overlapping messages.
295
+ - Only recognized natural-language agent messages are returned: Codex `agent_message` text, Claude assistant text content, OpenCode `type: "text"` events where `part.type` is `"text"`, and Gemini stream-json `message` events where `role` is `"assistant"`. Raw stdout/stderr, raw JSONL, reasoning, `tool_use`, `tool_result`, command stdout/stderr, command execution metadata, token usage, and verbose metadata are excluded.
296
+ - Unknown event shapes are denied by default. Managed agents without supported natural-language extraction, such as Forge until explicitly supported, return their real process status with `messages: []`, `truncated: false`, and `error: null`.
297
+ - Each PID keeps the first 50 messages observed in the window. If later messages are dropped, `truncated` is `true`.
298
+ - `status` is one of `running`, `completed`, `failed`, or `not_found`, and reflects state when the observation window closes.
299
+ - `agent` is `claude`, `codex`, `gemini`, `forge`, `opencode`, a future tracked string value, or `null` when the process is not found or the agent cannot be determined.
300
+
301
+ Example response:
302
+
303
+ ```json
304
+ {
305
+ "peek_started_at": "2026-04-11T12:34:56.789Z",
306
+ "observed_duration_sec": 10.01,
307
+ "processes": [
308
+ {
309
+ "pid": 123,
310
+ "agent": "codex",
311
+ "status": "running",
312
+ "messages": [
313
+ { "ts": "2026-04-11T12:34:59.120Z", "text": "I'm checking the implementation." }
314
+ ],
315
+ "truncated": false,
316
+ "error": null
317
+ },
318
+ {
319
+ "pid": 999,
320
+ "agent": null,
321
+ "status": "not_found",
322
+ "messages": [],
323
+ "truncated": false,
324
+ "error": "process not found"
325
+ }
326
+ ]
327
+ }
328
+ ```
329
+
256
330
  ### `list_processes`
257
331
 
258
332
  Lists all running and completed AI agent processes with their status, PID, and basic info.
@@ -295,6 +369,7 @@ Normally not required, but useful for customizing CLI paths or debugging.
295
369
  - `CODEX_CLI_NAME`: Override the Codex CLI binary name or provide an absolute path (default: `codex`)
296
370
  - `GEMINI_CLI_NAME`: Override the Gemini CLI binary name or provide an absolute path (default: `gemini`)
297
371
  - `FORGE_CLI_NAME`: Override the Forge CLI binary name or provide an absolute path (default: `forge`)
372
+ - `OPENCODE_CLI_NAME`: Override the OpenCode CLI binary name or provide an absolute path (default: `opencode`)
298
373
  - `MCP_CLAUDE_DEBUG`: Enable debug logging (set to `true` for verbose output)
299
374
 
300
375
  **CLI Name Specification:**
@@ -313,7 +388,8 @@ Normally not required, but useful for customizing CLI paths or debugging.
313
388
  ],
314
389
  "env": {
315
390
  "CLAUDE_CLI_NAME": "claude-custom",
316
- "CODEX_CLI_NAME": "codex-custom"
391
+ "CODEX_CLI_NAME": "codex-custom",
392
+ "OPENCODE_CLI_NAME": "opencode-custom"
317
393
  }
318
394
  },
319
395
  ```
@@ -1,5 +1,5 @@
1
1
  import { describe, expect, it, vi } from 'vitest';
2
- import { CLI_HELP_TEXT, DOCTOR_HELP_TEXT, MODELS_HELP_TEXT, RESULT_HELP_TEXT, RUN_HELP_TEXT, WAIT_HELP_TEXT, runCli, } from '../app/cli.js';
2
+ import { CLI_HELP_TEXT, DOCTOR_HELP_TEXT, MODELS_HELP_TEXT, PEEK_HELP_TEXT, RESULT_HELP_TEXT, RUN_HELP_TEXT, WAIT_HELP_TEXT, runCli, } from '../app/cli.js';
3
3
  describe('ai-cli app', () => {
4
4
  it('prints help and exits successfully when no subcommand is provided', async () => {
5
5
  const stdout = vi.fn();
@@ -151,6 +151,52 @@ describe('ai-cli app', () => {
151
151
  expect(stdout).toHaveBeenCalledWith(CLI_HELP_TEXT);
152
152
  expect(waitForProcesses).not.toHaveBeenCalled();
153
153
  });
154
+ it('dispatches peek with deduped pid arguments and time', async () => {
155
+ const stdout = vi.fn();
156
+ const stderr = vi.fn();
157
+ const peekProcesses = vi.fn().mockResolvedValue({
158
+ peek_started_at: '2026-04-11T12:34:56.789Z',
159
+ observed_duration_sec: 0.01,
160
+ processes: [],
161
+ });
162
+ const exitCode = await runCli(['peek', '123', '456', '123', '--time', '5'], {
163
+ stdout,
164
+ stderr,
165
+ peekProcesses,
166
+ });
167
+ expect(exitCode).toBe(0);
168
+ expect(peekProcesses).toHaveBeenCalledWith([123, 456], 5);
169
+ expect(stdout).toHaveBeenCalledWith(expect.stringContaining('"peek_started_at"'));
170
+ expect(stderr).not.toHaveBeenCalled();
171
+ });
172
+ it('defaults peek time and rejects --follow', async () => {
173
+ const stdout = vi.fn();
174
+ const stderr = vi.fn();
175
+ const peekProcesses = vi.fn().mockResolvedValue({
176
+ peek_started_at: '2026-04-11T12:34:56.789Z',
177
+ observed_duration_sec: 0.01,
178
+ processes: [],
179
+ });
180
+ const defaultExitCode = await runCli(['peek', '123'], { stdout, stderr, peekProcesses });
181
+ expect(defaultExitCode).toBe(0);
182
+ expect(peekProcesses).toHaveBeenCalledWith([123], 10);
183
+ const followExitCode = await runCli(['peek', '123', '--follow'], { stdout, stderr, peekProcesses });
184
+ expect(followExitCode).toBe(1);
185
+ expect(stderr).toHaveBeenCalledWith('peek does not support --follow in v1\n');
186
+ });
187
+ it('rejects invalid peek time values', async () => {
188
+ const stdout = vi.fn();
189
+ const stderr = vi.fn();
190
+ const peekProcesses = vi.fn();
191
+ const exitCode = await runCli(['peek', '123', '--time', '1.5'], {
192
+ stdout,
193
+ stderr,
194
+ peekProcesses,
195
+ });
196
+ expect(exitCode).toBe(1);
197
+ expect(stderr).toHaveBeenCalledWith(expect.stringContaining('peek_time_sec must be a positive integer'));
198
+ expect(peekProcesses).not.toHaveBeenCalled();
199
+ });
154
200
  it('dispatches ps, result, and kill', async () => {
155
201
  const stdout = vi.fn();
156
202
  const stderr = vi.fn();
@@ -180,11 +226,21 @@ describe('ai-cli app', () => {
180
226
  const stdout = vi.fn();
181
227
  const stderr = vi.fn();
182
228
  const exitCode = await runCli(['models'], { stdout, stderr });
229
+ const payload = JSON.parse(stdout.mock.calls[0][0]);
183
230
  expect(exitCode).toBe(0);
184
- expect(stdout).toHaveBeenCalledWith(expect.stringContaining('"aliases"'));
185
- expect(stdout).toHaveBeenCalledWith(expect.stringContaining('"claude-ultra"'));
186
- expect(stdout).toHaveBeenCalledWith(expect.stringContaining('"gpt-5.4"'));
187
- expect(stdout).toHaveBeenCalledWith(expect.stringContaining('"forge"'));
231
+ expect(payload.aliases).toEqual(expect.any(Array));
232
+ expect(payload.claude).toContain('sonnet');
233
+ expect(payload.codex).toContain('gpt-5.4');
234
+ expect(payload.forge).toEqual(['forge']);
235
+ expect(payload.opencode).toEqual(['opencode']);
236
+ expect(payload.dynamicModelBackends).toEqual({
237
+ opencode: {
238
+ explicitPrefix: 'oc-',
239
+ explicitPattern: 'oc-<provider/model>',
240
+ discoveryCommand: 'opencode models',
241
+ modelsAreDynamic: true,
242
+ },
243
+ });
188
244
  expect(stderr).not.toHaveBeenCalled();
189
245
  });
190
246
  it('prints doctor status as structured json', async () => {
@@ -215,6 +271,12 @@ describe('ai-cli app', () => {
215
271
  available: true,
216
272
  lookup: 'path',
217
273
  },
274
+ opencode: {
275
+ configuredCommand: 'opencode',
276
+ resolvedPath: '/tmp/bin/opencode',
277
+ available: true,
278
+ lookup: 'path',
279
+ },
218
280
  });
219
281
  const exitCode = await runCli(['doctor'], { stdout, stderr, getDoctorStatus });
220
282
  expect(exitCode).toBe(0);
@@ -241,6 +303,8 @@ describe('ai-cli app', () => {
241
303
  expect(stdout).toHaveBeenCalledWith(expect.stringContaining('gpt-5.2-codex'));
242
304
  expect(stdout).toHaveBeenCalledWith(expect.stringContaining('gemini-2.5-pro'));
243
305
  expect(stdout).toHaveBeenCalledWith(expect.stringContaining('forge'));
306
+ expect(stdout).toHaveBeenCalledWith(expect.stringContaining('opencode'));
307
+ expect(stdout).toHaveBeenCalledWith(expect.stringContaining('oc-openai/gpt-5.4'));
244
308
  expect(stderr).not.toHaveBeenCalled();
245
309
  });
246
310
  it('prints detailed help for result --help', async () => {
@@ -263,6 +327,15 @@ describe('ai-cli app', () => {
263
327
  expect(stdout).toHaveBeenCalledWith(expect.stringContaining('--verbose'));
264
328
  expect(stderr).not.toHaveBeenCalled();
265
329
  });
330
+ it('prints detailed help for peek --help', async () => {
331
+ const stdout = vi.fn();
332
+ const stderr = vi.fn();
333
+ const exitCode = await runCli(['peek', '--help'], { stdout, stderr });
334
+ expect(exitCode).toBe(0);
335
+ expect(stdout).toHaveBeenCalledWith(PEEK_HELP_TEXT);
336
+ expect(stdout).toHaveBeenCalledWith(expect.stringContaining('No --follow mode'));
337
+ expect(stderr).not.toHaveBeenCalled();
338
+ });
266
339
  it('prints detailed help for models --help', async () => {
267
340
  const stdout = vi.fn();
268
341
  const stderr = vi.fn();
@@ -277,6 +350,7 @@ describe('ai-cli app', () => {
277
350
  const exitCode = await runCli(['doctor', '--help'], { stdout, stderr });
278
351
  expect(exitCode).toBe(0);
279
352
  expect(stdout).toHaveBeenCalledWith(DOCTOR_HELP_TEXT);
353
+ expect(stdout).toHaveBeenCalledWith(expect.stringContaining('OpenCode'));
280
354
  expect(stderr).not.toHaveBeenCalled();
281
355
  });
282
356
  it('prints detailed help for doctor -h', async () => {
@@ -285,6 +359,7 @@ describe('ai-cli app', () => {
285
359
  const exitCode = await runCli(['doctor', '-h'], { stdout, stderr });
286
360
  expect(exitCode).toBe(0);
287
361
  expect(stdout).toHaveBeenCalledWith(DOCTOR_HELP_TEXT);
362
+ expect(stdout).toHaveBeenCalledWith(expect.stringContaining('OpenCode'));
288
363
  expect(stderr).not.toHaveBeenCalled();
289
364
  });
290
365
  it('prints help for --help', async () => {