@oh-my-pi/pi-coding-agent 12.7.5 → 12.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +37 -37
- package/README.md +9 -1052
- package/package.json +7 -7
- package/src/cli/args.ts +1 -0
- package/src/cli/update-cli.ts +49 -35
- package/src/cli/web-search-cli.ts +3 -2
- package/src/commands/web-search.ts +1 -0
- package/src/config/model-registry.ts +6 -0
- package/src/config/model-resolver.ts +2 -0
- package/src/config/settings-schema.ts +25 -3
- package/src/config/settings.ts +1 -0
- package/src/extensibility/extensions/wrapper.ts +20 -13
- package/src/extensibility/slash-commands.ts +12 -91
- package/src/lsp/client.ts +24 -27
- package/src/lsp/index.ts +92 -42
- package/src/mcp/config-writer.ts +33 -0
- package/src/mcp/config.ts +6 -1
- package/src/mcp/types.ts +1 -0
- package/src/modes/components/custom-editor.ts +8 -5
- package/src/modes/components/settings-defs.ts +2 -1
- package/src/modes/controllers/command-controller.ts +12 -6
- package/src/modes/controllers/input-controller.ts +21 -186
- package/src/modes/controllers/mcp-command-controller.ts +60 -3
- package/src/modes/interactive-mode.ts +2 -2
- package/src/modes/types.ts +1 -1
- package/src/sdk.ts +23 -1
- package/src/secrets/index.ts +116 -0
- package/src/secrets/obfuscator.ts +269 -0
- package/src/secrets/regex.ts +21 -0
- package/src/session/agent-session.ts +143 -21
- package/src/session/compaction/branch-summarization.ts +2 -2
- package/src/session/compaction/compaction.ts +10 -3
- package/src/session/compaction/utils.ts +25 -1
- package/src/slash-commands/builtin-registry.ts +419 -0
- package/src/web/scrapers/github.ts +50 -12
- package/src/web/search/index.ts +5 -5
- package/src/web/search/provider.ts +13 -2
- package/src/web/search/providers/brave.ts +165 -0
- package/src/web/search/types.ts +1 -1
- package/docs/compaction.md +0 -436
- package/docs/config-usage.md +0 -176
- package/docs/custom-tools.md +0 -585
- package/docs/environment-variables.md +0 -257
- package/docs/extension-loading.md +0 -106
- package/docs/extensions.md +0 -1342
- package/docs/fs-scan-cache-architecture.md +0 -50
- package/docs/hooks.md +0 -906
- package/docs/models.md +0 -234
- package/docs/python-repl.md +0 -110
- package/docs/rpc.md +0 -1173
- package/docs/sdk.md +0 -1039
- package/docs/session-tree-plan.md +0 -84
- package/docs/session.md +0 -368
- package/docs/skills.md +0 -254
- package/docs/theme.md +0 -696
- package/docs/tree.md +0 -206
- package/docs/tui.md +0 -487
package/docs/rpc.md
DELETED
|
@@ -1,1173 +0,0 @@
|
|
|
1
|
-
# RPC Mode
|
|
2
|
-
|
|
3
|
-
RPC mode enables headless operation of the coding agent via a JSON protocol over stdin/stdout. This is useful for embedding the agent in other applications, IDEs, or custom UIs.
|
|
4
|
-
|
|
5
|
-
**Note for Node.js/TypeScript users**: If you're building a Node.js application, consider using `createAgentSession()` from `@oh-my-pi/pi-coding-agent` instead of spawning a subprocess. See [`src/sdk.ts`](../src/sdk.ts) for the SDK API. For a subprocess-based TypeScript client, see [`src/modes/rpc/rpc-client.ts`](../src/modes/rpc/rpc-client.ts).
|
|
6
|
-
|
|
7
|
-
## Starting RPC Mode
|
|
8
|
-
|
|
9
|
-
```bash
|
|
10
|
-
omp --mode rpc [options]
|
|
11
|
-
```
|
|
12
|
-
|
|
13
|
-
Common options:
|
|
14
|
-
|
|
15
|
-
- `--provider <name>`: Set the LLM provider (anthropic, openai, google, etc.)
|
|
16
|
-
- `--model <id>`: Set the model ID
|
|
17
|
-
- `--no-session`: Disable session persistence
|
|
18
|
-
- `--session-dir <path>`: Custom session storage directory
|
|
19
|
-
|
|
20
|
-
## Protocol Overview
|
|
21
|
-
|
|
22
|
-
- **Commands**: JSON objects sent to stdin, one per line
|
|
23
|
-
- **Responses**: JSON objects with `type: "response"` indicating command success/failure
|
|
24
|
-
- **Events**: Agent events streamed to stdout as JSON lines
|
|
25
|
-
|
|
26
|
-
If you're consuming output in Bun, prefer `Bun.JSONL.parse(text)` for buffered JSONL or `Bun.JSONL.parseChunk()` for streaming output instead of splitting and `JSON.parse`.
|
|
27
|
-
|
|
28
|
-
All commands support an optional `id` field for request/response correlation. If provided, the corresponding response will include the same `id`.
|
|
29
|
-
|
|
30
|
-
## Commands
|
|
31
|
-
|
|
32
|
-
### Prompting
|
|
33
|
-
|
|
34
|
-
#### prompt
|
|
35
|
-
|
|
36
|
-
Send a user prompt to the agent. Returns immediately; events stream asynchronously.
|
|
37
|
-
|
|
38
|
-
```json
|
|
39
|
-
{ "id": "req-1", "type": "prompt", "message": "Hello, world!" }
|
|
40
|
-
```
|
|
41
|
-
|
|
42
|
-
With images:
|
|
43
|
-
|
|
44
|
-
```json
|
|
45
|
-
{
|
|
46
|
-
"type": "prompt",
|
|
47
|
-
"message": "What's in this image?",
|
|
48
|
-
"images": [{ "type": "image", "source": { "type": "base64", "mediaType": "image/png", "data": "..." } }]
|
|
49
|
-
}
|
|
50
|
-
```
|
|
51
|
-
|
|
52
|
-
Response:
|
|
53
|
-
|
|
54
|
-
```json
|
|
55
|
-
{ "id": "req-1", "type": "response", "command": "prompt", "success": true }
|
|
56
|
-
```
|
|
57
|
-
|
|
58
|
-
The `images` field is optional. Each image uses `ImageContent` format with base64 or URL source.
|
|
59
|
-
When prompting during streaming, set `"streamingBehavior": "steer"` or `"followUp"` to queue the message.
|
|
60
|
-
|
|
61
|
-
#### steer
|
|
62
|
-
|
|
63
|
-
Queue a steering message to interrupt the agent mid-run. Useful for injecting corrections while streaming.
|
|
64
|
-
|
|
65
|
-
```json
|
|
66
|
-
{ "type": "steer", "message": "Additional context" }
|
|
67
|
-
```
|
|
68
|
-
|
|
69
|
-
Response:
|
|
70
|
-
|
|
71
|
-
```json
|
|
72
|
-
{ "type": "response", "command": "steer", "success": true }
|
|
73
|
-
```
|
|
74
|
-
|
|
75
|
-
#### follow_up
|
|
76
|
-
|
|
77
|
-
Queue a follow-up message to be processed after the current run completes.
|
|
78
|
-
|
|
79
|
-
```json
|
|
80
|
-
{ "type": "follow_up", "message": "Additional context" }
|
|
81
|
-
```
|
|
82
|
-
|
|
83
|
-
Response:
|
|
84
|
-
|
|
85
|
-
```json
|
|
86
|
-
{ "type": "response", "command": "follow_up", "success": true }
|
|
87
|
-
```
|
|
88
|
-
|
|
89
|
-
See [set_steering_mode](#set_steering_mode), [set_follow_up_mode](#set_follow_up_mode), and
|
|
90
|
-
[set_interrupt_mode](#set_interrupt_mode) for controlling queued message handling.
|
|
91
|
-
|
|
92
|
-
#### abort
|
|
93
|
-
|
|
94
|
-
Abort the current agent operation.
|
|
95
|
-
|
|
96
|
-
```json
|
|
97
|
-
{ "type": "abort" }
|
|
98
|
-
```
|
|
99
|
-
|
|
100
|
-
Response:
|
|
101
|
-
|
|
102
|
-
```json
|
|
103
|
-
{ "type": "response", "command": "abort", "success": true }
|
|
104
|
-
```
|
|
105
|
-
|
|
106
|
-
#### new_session
|
|
107
|
-
|
|
108
|
-
Start a fresh session. Can be cancelled by a `session_before_switch` extension handler.
|
|
109
|
-
|
|
110
|
-
```json
|
|
111
|
-
{ "type": "new_session" }
|
|
112
|
-
```
|
|
113
|
-
|
|
114
|
-
With optional parent session tracking:
|
|
115
|
-
|
|
116
|
-
```json
|
|
117
|
-
{ "type": "new_session", "parentSession": "/path/to/parent-session.jsonl" }
|
|
118
|
-
```
|
|
119
|
-
|
|
120
|
-
Response:
|
|
121
|
-
|
|
122
|
-
```json
|
|
123
|
-
{ "type": "response", "command": "new_session", "success": true, "data": { "cancelled": false } }
|
|
124
|
-
```
|
|
125
|
-
|
|
126
|
-
If an extension cancelled:
|
|
127
|
-
|
|
128
|
-
```json
|
|
129
|
-
{ "type": "response", "command": "new_session", "success": true, "data": { "cancelled": true } }
|
|
130
|
-
```
|
|
131
|
-
|
|
132
|
-
### State
|
|
133
|
-
|
|
134
|
-
#### get_state
|
|
135
|
-
|
|
136
|
-
Get current session state.
|
|
137
|
-
|
|
138
|
-
```json
|
|
139
|
-
{ "type": "get_state" }
|
|
140
|
-
```
|
|
141
|
-
|
|
142
|
-
Response:
|
|
143
|
-
|
|
144
|
-
```json
|
|
145
|
-
{
|
|
146
|
-
"type": "response",
|
|
147
|
-
"command": "get_state",
|
|
148
|
-
"success": true,
|
|
149
|
-
"data": {
|
|
150
|
-
"model": {...},
|
|
151
|
-
"thinkingLevel": "medium",
|
|
152
|
-
"isStreaming": false,
|
|
153
|
-
"isCompacting": false,
|
|
154
|
-
"steeringMode": "all",
|
|
155
|
-
"followUpMode": "one-at-a-time",
|
|
156
|
-
"interruptMode": "immediate",
|
|
157
|
-
"sessionFile": "/path/to/session.jsonl",
|
|
158
|
-
"sessionId": "abc123",
|
|
159
|
-
"sessionName": "my-session",
|
|
160
|
-
"autoCompactionEnabled": true,
|
|
161
|
-
"messageCount": 5,
|
|
162
|
-
"queuedMessageCount": 0
|
|
163
|
-
}
|
|
164
|
-
}
|
|
165
|
-
```
|
|
166
|
-
|
|
167
|
-
The `model` field is a full [Model](#model) object or `null`.
|
|
168
|
-
|
|
169
|
-
#### get_messages
|
|
170
|
-
|
|
171
|
-
Get all messages in the conversation.
|
|
172
|
-
|
|
173
|
-
```json
|
|
174
|
-
{ "type": "get_messages" }
|
|
175
|
-
```
|
|
176
|
-
|
|
177
|
-
Response:
|
|
178
|
-
|
|
179
|
-
```json
|
|
180
|
-
{
|
|
181
|
-
"type": "response",
|
|
182
|
-
"command": "get_messages",
|
|
183
|
-
"success": true,
|
|
184
|
-
"data": {"messages": [...]}
|
|
185
|
-
}
|
|
186
|
-
```
|
|
187
|
-
|
|
188
|
-
Messages are `AgentMessage` objects (see [Message Types](#message-types)).
|
|
189
|
-
|
|
190
|
-
### Model
|
|
191
|
-
|
|
192
|
-
#### set_model
|
|
193
|
-
|
|
194
|
-
Switch to a specific model.
|
|
195
|
-
|
|
196
|
-
```json
|
|
197
|
-
{ "type": "set_model", "provider": "anthropic", "modelId": "claude-sonnet-4-20250514" }
|
|
198
|
-
```
|
|
199
|
-
|
|
200
|
-
Response contains the full [Model](#model) object:
|
|
201
|
-
|
|
202
|
-
```json
|
|
203
|
-
{
|
|
204
|
-
"type": "response",
|
|
205
|
-
"command": "set_model",
|
|
206
|
-
"success": true,
|
|
207
|
-
"data": {...}
|
|
208
|
-
}
|
|
209
|
-
```
|
|
210
|
-
|
|
211
|
-
#### cycle_model
|
|
212
|
-
|
|
213
|
-
Cycle to the next available model. Returns `null` data if only one model available.
|
|
214
|
-
|
|
215
|
-
```json
|
|
216
|
-
{ "type": "cycle_model" }
|
|
217
|
-
```
|
|
218
|
-
|
|
219
|
-
Response:
|
|
220
|
-
|
|
221
|
-
```json
|
|
222
|
-
{
|
|
223
|
-
"type": "response",
|
|
224
|
-
"command": "cycle_model",
|
|
225
|
-
"success": true,
|
|
226
|
-
"data": {
|
|
227
|
-
"model": {...},
|
|
228
|
-
"thinkingLevel": "medium",
|
|
229
|
-
"isScoped": false
|
|
230
|
-
}
|
|
231
|
-
}
|
|
232
|
-
```
|
|
233
|
-
|
|
234
|
-
The `model` field is a full [Model](#model) object.
|
|
235
|
-
|
|
236
|
-
#### get_available_models
|
|
237
|
-
|
|
238
|
-
List all configured models.
|
|
239
|
-
|
|
240
|
-
```json
|
|
241
|
-
{ "type": "get_available_models" }
|
|
242
|
-
```
|
|
243
|
-
|
|
244
|
-
Response contains an array of full [Model](#model) objects:
|
|
245
|
-
|
|
246
|
-
```json
|
|
247
|
-
{
|
|
248
|
-
"type": "response",
|
|
249
|
-
"command": "get_available_models",
|
|
250
|
-
"success": true,
|
|
251
|
-
"data": {
|
|
252
|
-
"models": [...]
|
|
253
|
-
}
|
|
254
|
-
}
|
|
255
|
-
```
|
|
256
|
-
|
|
257
|
-
### Thinking
|
|
258
|
-
|
|
259
|
-
#### set_thinking_level
|
|
260
|
-
|
|
261
|
-
Set the reasoning/thinking level for models that support it.
|
|
262
|
-
|
|
263
|
-
```json
|
|
264
|
-
{ "type": "set_thinking_level", "level": "high" }
|
|
265
|
-
```
|
|
266
|
-
|
|
267
|
-
Levels: `"off"`, `"minimal"`, `"low"`, `"medium"`, `"high"`, `"xhigh"`
|
|
268
|
-
|
|
269
|
-
Note: `"xhigh"` is only supported by OpenAI codex-max models.
|
|
270
|
-
|
|
271
|
-
Response:
|
|
272
|
-
|
|
273
|
-
```json
|
|
274
|
-
{ "type": "response", "command": "set_thinking_level", "success": true }
|
|
275
|
-
```
|
|
276
|
-
|
|
277
|
-
#### cycle_thinking_level
|
|
278
|
-
|
|
279
|
-
Cycle through available thinking levels. Returns `null` data if model doesn't support thinking.
|
|
280
|
-
|
|
281
|
-
```json
|
|
282
|
-
{ "type": "cycle_thinking_level" }
|
|
283
|
-
```
|
|
284
|
-
|
|
285
|
-
Response:
|
|
286
|
-
|
|
287
|
-
```json
|
|
288
|
-
{
|
|
289
|
-
"type": "response",
|
|
290
|
-
"command": "cycle_thinking_level",
|
|
291
|
-
"success": true,
|
|
292
|
-
"data": { "level": "high" }
|
|
293
|
-
}
|
|
294
|
-
```
|
|
295
|
-
|
|
296
|
-
### Queue Modes
|
|
297
|
-
|
|
298
|
-
#### set_steering_mode
|
|
299
|
-
|
|
300
|
-
Control how steering messages are injected into the conversation.
|
|
301
|
-
|
|
302
|
-
```json
|
|
303
|
-
{ "type": "set_steering_mode", "mode": "one-at-a-time" }
|
|
304
|
-
```
|
|
305
|
-
|
|
306
|
-
Modes:
|
|
307
|
-
|
|
308
|
-
- `"all"`: Inject all steering messages at the next turn
|
|
309
|
-
- `"one-at-a-time"`: Inject one steering message per turn (default)
|
|
310
|
-
|
|
311
|
-
Response:
|
|
312
|
-
|
|
313
|
-
```json
|
|
314
|
-
{ "type": "response", "command": "set_steering_mode", "success": true }
|
|
315
|
-
```
|
|
316
|
-
|
|
317
|
-
#### set_follow_up_mode
|
|
318
|
-
|
|
319
|
-
Control how follow-up messages are injected into the conversation.
|
|
320
|
-
|
|
321
|
-
```json
|
|
322
|
-
{ "type": "set_follow_up_mode", "mode": "one-at-a-time" }
|
|
323
|
-
```
|
|
324
|
-
|
|
325
|
-
Modes:
|
|
326
|
-
|
|
327
|
-
- `"all"`: Inject all follow-up messages at the next turn
|
|
328
|
-
- `"one-at-a-time"`: Inject one follow-up message per turn (default)
|
|
329
|
-
|
|
330
|
-
Response:
|
|
331
|
-
|
|
332
|
-
```json
|
|
333
|
-
{ "type": "response", "command": "set_follow_up_mode", "success": true }
|
|
334
|
-
```
|
|
335
|
-
|
|
336
|
-
#### set_interrupt_mode
|
|
337
|
-
|
|
338
|
-
Control how the agent handles incoming steering messages while streaming.
|
|
339
|
-
|
|
340
|
-
```json
|
|
341
|
-
{ "type": "set_interrupt_mode", "mode": "wait" }
|
|
342
|
-
```
|
|
343
|
-
|
|
344
|
-
Modes:
|
|
345
|
-
|
|
346
|
-
- `"immediate"`: Interrupt immediately when steering arrives
|
|
347
|
-
- `"wait"`: Wait to apply steering until current tool call completes
|
|
348
|
-
|
|
349
|
-
Response:
|
|
350
|
-
|
|
351
|
-
```json
|
|
352
|
-
{ "type": "response", "command": "set_interrupt_mode", "success": true }
|
|
353
|
-
```
|
|
354
|
-
|
|
355
|
-
### Compaction
|
|
356
|
-
|
|
357
|
-
#### compact
|
|
358
|
-
|
|
359
|
-
Manually compact conversation context to reduce token usage.
|
|
360
|
-
|
|
361
|
-
```json
|
|
362
|
-
{ "type": "compact" }
|
|
363
|
-
```
|
|
364
|
-
|
|
365
|
-
With custom instructions:
|
|
366
|
-
|
|
367
|
-
```json
|
|
368
|
-
{ "type": "compact", "customInstructions": "Focus on code changes" }
|
|
369
|
-
```
|
|
370
|
-
|
|
371
|
-
Response:
|
|
372
|
-
|
|
373
|
-
```json
|
|
374
|
-
{
|
|
375
|
-
"type": "response",
|
|
376
|
-
"command": "compact",
|
|
377
|
-
"success": true,
|
|
378
|
-
"data": {
|
|
379
|
-
"summary": "Summary of conversation...",
|
|
380
|
-
"firstKeptEntryId": "abc123",
|
|
381
|
-
"tokensBefore": 150000,
|
|
382
|
-
"details": {}
|
|
383
|
-
}
|
|
384
|
-
}
|
|
385
|
-
```
|
|
386
|
-
|
|
387
|
-
#### set_auto_compaction
|
|
388
|
-
|
|
389
|
-
Enable or disable automatic compaction when context is nearly full.
|
|
390
|
-
|
|
391
|
-
```json
|
|
392
|
-
{ "type": "set_auto_compaction", "enabled": true }
|
|
393
|
-
```
|
|
394
|
-
|
|
395
|
-
Response:
|
|
396
|
-
|
|
397
|
-
```json
|
|
398
|
-
{ "type": "response", "command": "set_auto_compaction", "success": true }
|
|
399
|
-
```
|
|
400
|
-
|
|
401
|
-
### Retry
|
|
402
|
-
|
|
403
|
-
#### set_auto_retry
|
|
404
|
-
|
|
405
|
-
Enable or disable automatic retry on transient errors (overloaded, rate limit, 5xx).
|
|
406
|
-
|
|
407
|
-
```json
|
|
408
|
-
{ "type": "set_auto_retry", "enabled": true }
|
|
409
|
-
```
|
|
410
|
-
|
|
411
|
-
Response:
|
|
412
|
-
|
|
413
|
-
```json
|
|
414
|
-
{ "type": "response", "command": "set_auto_retry", "success": true }
|
|
415
|
-
```
|
|
416
|
-
|
|
417
|
-
#### abort_retry
|
|
418
|
-
|
|
419
|
-
Abort an in-progress retry (cancel the delay and stop retrying).
|
|
420
|
-
|
|
421
|
-
```json
|
|
422
|
-
{ "type": "abort_retry" }
|
|
423
|
-
```
|
|
424
|
-
|
|
425
|
-
Response:
|
|
426
|
-
|
|
427
|
-
```json
|
|
428
|
-
{ "type": "response", "command": "abort_retry", "success": true }
|
|
429
|
-
```
|
|
430
|
-
|
|
431
|
-
### Bash
|
|
432
|
-
|
|
433
|
-
#### bash
|
|
434
|
-
|
|
435
|
-
Execute a shell command and add output to conversation context.
|
|
436
|
-
|
|
437
|
-
```json
|
|
438
|
-
{ "type": "bash", "command": "ls -la" }
|
|
439
|
-
```
|
|
440
|
-
|
|
441
|
-
Response:
|
|
442
|
-
|
|
443
|
-
```json
|
|
444
|
-
{
|
|
445
|
-
"type": "response",
|
|
446
|
-
"command": "bash",
|
|
447
|
-
"success": true,
|
|
448
|
-
"data": {
|
|
449
|
-
"output": "total 48\ndrwxr-xr-x ...",
|
|
450
|
-
"exitCode": 0,
|
|
451
|
-
"cancelled": false,
|
|
452
|
-
"truncated": false,
|
|
453
|
-
"totalLines": 48,
|
|
454
|
-
"totalBytes": 2048,
|
|
455
|
-
"outputLines": 48,
|
|
456
|
-
"outputBytes": 2048
|
|
457
|
-
}
|
|
458
|
-
}
|
|
459
|
-
```
|
|
460
|
-
|
|
461
|
-
If output was truncated, includes `artifactId`:
|
|
462
|
-
|
|
463
|
-
```json
|
|
464
|
-
{
|
|
465
|
-
"type": "response",
|
|
466
|
-
"command": "bash",
|
|
467
|
-
"success": true,
|
|
468
|
-
"data": {
|
|
469
|
-
"output": "truncated output...",
|
|
470
|
-
"exitCode": 0,
|
|
471
|
-
"cancelled": false,
|
|
472
|
-
"truncated": true,
|
|
473
|
-
"totalLines": 5000,
|
|
474
|
-
"totalBytes": 102400,
|
|
475
|
-
"outputLines": 2000,
|
|
476
|
-
"outputBytes": 51200,
|
|
477
|
-
"artifactId": "abc123"
|
|
478
|
-
}
|
|
479
|
-
}
|
|
480
|
-
```
|
|
481
|
-
|
|
482
|
-
**How bash results reach the LLM:**
|
|
483
|
-
|
|
484
|
-
The `bash` command executes immediately and returns a `BashResult`. Internally, a `BashExecutionMessage` is created and stored in the agent's message state. This message does NOT emit an event.
|
|
485
|
-
|
|
486
|
-
When the next `prompt` command is sent, all messages (including `BashExecutionMessage`) are transformed before being sent to the LLM. The `BashExecutionMessage` is converted to a `UserMessage` with this format:
|
|
487
|
-
|
|
488
|
-
```
|
|
489
|
-
Ran `ls -la`
|
|
490
|
-
\`\`\`
|
|
491
|
-
total 48
|
|
492
|
-
drwxr-xr-x ...
|
|
493
|
-
\`\`\`
|
|
494
|
-
```
|
|
495
|
-
|
|
496
|
-
This means:
|
|
497
|
-
|
|
498
|
-
1. Bash output is included in the LLM context on the **next prompt**, not immediately
|
|
499
|
-
2. Multiple bash commands can be executed before a prompt; all outputs will be included
|
|
500
|
-
3. No event is emitted for the `BashExecutionMessage` itself
|
|
501
|
-
|
|
502
|
-
#### abort_bash
|
|
503
|
-
|
|
504
|
-
Abort a running bash command.
|
|
505
|
-
|
|
506
|
-
```json
|
|
507
|
-
{ "type": "abort_bash" }
|
|
508
|
-
```
|
|
509
|
-
|
|
510
|
-
Response:
|
|
511
|
-
|
|
512
|
-
```json
|
|
513
|
-
{ "type": "response", "command": "abort_bash", "success": true }
|
|
514
|
-
```
|
|
515
|
-
|
|
516
|
-
### Session
|
|
517
|
-
|
|
518
|
-
#### get_session_stats
|
|
519
|
-
|
|
520
|
-
Get token usage and cost statistics.
|
|
521
|
-
|
|
522
|
-
```json
|
|
523
|
-
{ "type": "get_session_stats" }
|
|
524
|
-
```
|
|
525
|
-
|
|
526
|
-
Response:
|
|
527
|
-
|
|
528
|
-
```json
|
|
529
|
-
{
|
|
530
|
-
"type": "response",
|
|
531
|
-
"command": "get_session_stats",
|
|
532
|
-
"success": true,
|
|
533
|
-
"data": {
|
|
534
|
-
"sessionFile": "/path/to/session.jsonl",
|
|
535
|
-
"sessionId": "abc123",
|
|
536
|
-
"userMessages": 5,
|
|
537
|
-
"assistantMessages": 5,
|
|
538
|
-
"toolCalls": 12,
|
|
539
|
-
"toolResults": 12,
|
|
540
|
-
"totalMessages": 22,
|
|
541
|
-
"tokens": {
|
|
542
|
-
"input": 50000,
|
|
543
|
-
"output": 10000,
|
|
544
|
-
"cacheRead": 40000,
|
|
545
|
-
"cacheWrite": 5000,
|
|
546
|
-
"total": 105000
|
|
547
|
-
},
|
|
548
|
-
"cost": 0.45
|
|
549
|
-
}
|
|
550
|
-
}
|
|
551
|
-
```
|
|
552
|
-
|
|
553
|
-
#### export_html
|
|
554
|
-
|
|
555
|
-
Export session to an HTML file.
|
|
556
|
-
|
|
557
|
-
```json
|
|
558
|
-
{ "type": "export_html" }
|
|
559
|
-
```
|
|
560
|
-
|
|
561
|
-
With custom path:
|
|
562
|
-
|
|
563
|
-
```json
|
|
564
|
-
{ "type": "export_html", "outputPath": "/tmp/session.html" }
|
|
565
|
-
```
|
|
566
|
-
|
|
567
|
-
Response:
|
|
568
|
-
|
|
569
|
-
```json
|
|
570
|
-
{
|
|
571
|
-
"type": "response",
|
|
572
|
-
"command": "export_html",
|
|
573
|
-
"success": true,
|
|
574
|
-
"data": { "path": "/tmp/session.html" }
|
|
575
|
-
}
|
|
576
|
-
```
|
|
577
|
-
|
|
578
|
-
#### switch_session
|
|
579
|
-
|
|
580
|
-
Load a different session file. Can be cancelled by a `session_before_switch` extension handler.
|
|
581
|
-
|
|
582
|
-
```json
|
|
583
|
-
{ "type": "switch_session", "sessionPath": "/path/to/session.jsonl" }
|
|
584
|
-
```
|
|
585
|
-
|
|
586
|
-
Response:
|
|
587
|
-
|
|
588
|
-
```json
|
|
589
|
-
{ "type": "response", "command": "switch_session", "success": true, "data": { "cancelled": false } }
|
|
590
|
-
```
|
|
591
|
-
|
|
592
|
-
If an extension cancelled the switch:
|
|
593
|
-
|
|
594
|
-
```json
|
|
595
|
-
{ "type": "response", "command": "switch_session", "success": true, "data": { "cancelled": true } }
|
|
596
|
-
```
|
|
597
|
-
|
|
598
|
-
#### branch
|
|
599
|
-
|
|
600
|
-
Create a new branch from a previous user message. Can be cancelled by a `session_before_branch` extension handler. Returns the text of the message being branched from.
|
|
601
|
-
|
|
602
|
-
```json
|
|
603
|
-
{ "type": "branch", "entryId": "abc123" }
|
|
604
|
-
```
|
|
605
|
-
|
|
606
|
-
Response:
|
|
607
|
-
|
|
608
|
-
```json
|
|
609
|
-
{
|
|
610
|
-
"type": "response",
|
|
611
|
-
"command": "branch",
|
|
612
|
-
"success": true,
|
|
613
|
-
"data": { "text": "The original prompt text...", "cancelled": false }
|
|
614
|
-
}
|
|
615
|
-
```
|
|
616
|
-
|
|
617
|
-
If an extension cancelled the branch:
|
|
618
|
-
|
|
619
|
-
```json
|
|
620
|
-
{
|
|
621
|
-
"type": "response",
|
|
622
|
-
"command": "branch",
|
|
623
|
-
"success": true,
|
|
624
|
-
"data": { "text": "The original prompt text...", "cancelled": true }
|
|
625
|
-
}
|
|
626
|
-
```
|
|
627
|
-
|
|
628
|
-
#### get_branch_messages
|
|
629
|
-
|
|
630
|
-
Get user messages available for branching.
|
|
631
|
-
|
|
632
|
-
```json
|
|
633
|
-
{ "type": "get_branch_messages" }
|
|
634
|
-
```
|
|
635
|
-
|
|
636
|
-
Response:
|
|
637
|
-
|
|
638
|
-
```json
|
|
639
|
-
{
|
|
640
|
-
"type": "response",
|
|
641
|
-
"command": "get_branch_messages",
|
|
642
|
-
"success": true,
|
|
643
|
-
"data": {
|
|
644
|
-
"messages": [
|
|
645
|
-
{ "entryId": "abc123", "text": "First prompt..." },
|
|
646
|
-
{ "entryId": "def456", "text": "Second prompt..." }
|
|
647
|
-
]
|
|
648
|
-
}
|
|
649
|
-
}
|
|
650
|
-
```
|
|
651
|
-
|
|
652
|
-
#### get_last_assistant_text
|
|
653
|
-
|
|
654
|
-
Get the text content of the last assistant message.
|
|
655
|
-
|
|
656
|
-
```json
|
|
657
|
-
{ "type": "get_last_assistant_text" }
|
|
658
|
-
```
|
|
659
|
-
|
|
660
|
-
Response:
|
|
661
|
-
|
|
662
|
-
```json
|
|
663
|
-
{
|
|
664
|
-
"type": "response",
|
|
665
|
-
"command": "get_last_assistant_text",
|
|
666
|
-
"success": true,
|
|
667
|
-
"data": { "text": "The assistant's response..." }
|
|
668
|
-
}
|
|
669
|
-
```
|
|
670
|
-
|
|
671
|
-
Returns `{"text": null}` if no assistant messages exist.
|
|
672
|
-
|
|
673
|
-
#### set_session_name
|
|
674
|
-
|
|
675
|
-
Set a display name for the current session.
|
|
676
|
-
|
|
677
|
-
```json
|
|
678
|
-
{ "type": "set_session_name", "name": "my-session" }
|
|
679
|
-
```
|
|
680
|
-
|
|
681
|
-
Response:
|
|
682
|
-
|
|
683
|
-
```json
|
|
684
|
-
{ "type": "response", "command": "set_session_name", "success": true }
|
|
685
|
-
```
|
|
686
|
-
|
|
687
|
-
Returns an error if the name is empty.
|
|
688
|
-
|
|
689
|
-
## Extension UI (stdout)
|
|
690
|
-
|
|
691
|
-
In RPC mode, extensions receive an [`ExtensionUIContext`](./extensions.md#custom-ui) backed by an extension UI sub-protocol.
|
|
692
|
-
When an extension calls a dialog or UI method, the agent emits an `extension_ui_request` JSON line on stdout. The host must
|
|
693
|
-
respond by writing an `extension_ui_response` JSON line on stdin.
|
|
694
|
-
|
|
695
|
-
If a dialog request includes a `timeout` field, the agent auto-resolves it with a default value when the timeout expires.
|
|
696
|
-
The host does not need to track or enforce timeouts.
|
|
697
|
-
|
|
698
|
-
Example request (stdout):
|
|
699
|
-
|
|
700
|
-
```json
|
|
701
|
-
{ "type": "extension_ui_request", "id": "req-123", "method": "confirm", "title": "Confirm", "message": "Continue?", "timeout": 30000 }
|
|
702
|
-
```
|
|
703
|
-
|
|
704
|
-
Example response (stdin):
|
|
705
|
-
|
|
706
|
-
```json
|
|
707
|
-
{ "type": "extension_ui_response", "id": "req-123", "confirmed": true }
|
|
708
|
-
```
|
|
709
|
-
|
|
710
|
-
### Unsupported / degraded UI methods
|
|
711
|
-
|
|
712
|
-
Some `ExtensionUIContext` methods are not supported or degraded in RPC mode because they require direct TUI access:
|
|
713
|
-
|
|
714
|
-
- `custom()` returns `undefined`
|
|
715
|
-
- `setWorkingMessage()`, `setFooter()`, `setHeader()`, `setEditorComponent()`, `setToolsExpanded()` are no-ops
|
|
716
|
-
- `getEditorText()` returns `""`
|
|
717
|
-
- `getToolsExpanded()` returns `false`
|
|
718
|
-
- `setWidget()` only supports `string[]` (factory functions/components are ignored)
|
|
719
|
-
- `getAllThemes()` returns `[]`
|
|
720
|
-
- `getTheme()` returns `undefined`
|
|
721
|
-
- `setTheme()` returns `{ success: false, error: "Theme switching not supported in RPC mode" }`
|
|
722
|
-
|
|
723
|
-
Note: `ctx.hasUI` is `true` in RPC mode because dialog and fire-and-forget UI methods are functional via this sub-protocol.
|
|
724
|
-
|
|
725
|
-
## Events
|
|
726
|
-
|
|
727
|
-
Events are streamed to stdout as JSON lines during agent operation. Events do NOT include an `id` field (only responses do).
|
|
728
|
-
|
|
729
|
-
### Event Types
|
|
730
|
-
|
|
731
|
-
| Event | Description |
|
|
732
|
-
| ----------------------- | ------------------------------------------------------------ |
|
|
733
|
-
| `agent_start` | Agent begins processing |
|
|
734
|
-
| `agent_end` | Agent completes (includes all generated messages) |
|
|
735
|
-
| `turn_start` | New turn begins |
|
|
736
|
-
| `turn_end` | Turn completes (includes assistant message and tool results) |
|
|
737
|
-
| `message_start` | Message begins |
|
|
738
|
-
| `message_update` | Streaming update (text/thinking/toolcall deltas) |
|
|
739
|
-
| `message_end` | Message completes |
|
|
740
|
-
| `tool_execution_start` | Tool begins execution |
|
|
741
|
-
| `tool_execution_update` | Tool execution progress (streaming output) |
|
|
742
|
-
| `tool_execution_end` | Tool completes |
|
|
743
|
-
| `auto_compaction_start` | Auto-compaction begins |
|
|
744
|
-
| `auto_compaction_end` | Auto-compaction completes |
|
|
745
|
-
| `auto_retry_start` | Auto-retry begins (after transient error) |
|
|
746
|
-
| `auto_retry_end` | Auto-retry completes (success or final failure) |
|
|
747
|
-
| `extension_error` | Extension threw an error |
|
|
748
|
-
|
|
749
|
-
### agent_start
|
|
750
|
-
|
|
751
|
-
Emitted when the agent begins processing a prompt.
|
|
752
|
-
|
|
753
|
-
```json
|
|
754
|
-
{ "type": "agent_start" }
|
|
755
|
-
```
|
|
756
|
-
|
|
757
|
-
### agent_end
|
|
758
|
-
|
|
759
|
-
Emitted when the agent completes. Contains all messages generated during this run.
|
|
760
|
-
|
|
761
|
-
```json
|
|
762
|
-
{
|
|
763
|
-
"type": "agent_end",
|
|
764
|
-
"messages": [...]
|
|
765
|
-
}
|
|
766
|
-
```
|
|
767
|
-
|
|
768
|
-
### turn_start / turn_end
|
|
769
|
-
|
|
770
|
-
A turn consists of one assistant response plus any resulting tool calls and results.
|
|
771
|
-
|
|
772
|
-
```json
|
|
773
|
-
{ "type": "turn_start" }
|
|
774
|
-
```
|
|
775
|
-
|
|
776
|
-
```json
|
|
777
|
-
{
|
|
778
|
-
"type": "turn_end",
|
|
779
|
-
"message": {...},
|
|
780
|
-
"toolResults": [...]
|
|
781
|
-
}
|
|
782
|
-
```
|
|
783
|
-
|
|
784
|
-
### message_start / message_end
|
|
785
|
-
|
|
786
|
-
Emitted when a message begins and completes. The `message` field contains an `AgentMessage`.
|
|
787
|
-
|
|
788
|
-
```json
|
|
789
|
-
{"type": "message_start", "message": {...}}
|
|
790
|
-
{"type": "message_end", "message": {...}}
|
|
791
|
-
```
|
|
792
|
-
|
|
793
|
-
### message_update (Streaming)
|
|
794
|
-
|
|
795
|
-
Emitted during streaming of assistant messages. Contains both the partial message and a streaming delta event.
|
|
796
|
-
|
|
797
|
-
```json
|
|
798
|
-
{
|
|
799
|
-
"type": "message_update",
|
|
800
|
-
"message": {...},
|
|
801
|
-
"assistantMessageEvent": {
|
|
802
|
-
"type": "text_delta",
|
|
803
|
-
"contentIndex": 0,
|
|
804
|
-
"delta": "Hello ",
|
|
805
|
-
"partial": {...}
|
|
806
|
-
}
|
|
807
|
-
}
|
|
808
|
-
```
|
|
809
|
-
|
|
810
|
-
The `assistantMessageEvent` field contains one of these delta types:
|
|
811
|
-
|
|
812
|
-
| Type | Description |
|
|
813
|
-
| ---------------- | ------------------------------------------------------------ |
|
|
814
|
-
| `start` | Message generation started |
|
|
815
|
-
| `text_start` | Text content block started |
|
|
816
|
-
| `text_delta` | Text content chunk |
|
|
817
|
-
| `text_end` | Text content block ended |
|
|
818
|
-
| `thinking_start` | Thinking block started |
|
|
819
|
-
| `thinking_delta` | Thinking content chunk |
|
|
820
|
-
| `thinking_end` | Thinking block ended |
|
|
821
|
-
| `toolcall_start` | Tool call started |
|
|
822
|
-
| `toolcall_delta` | Tool call arguments chunk |
|
|
823
|
-
| `toolcall_end` | Tool call ended (includes full `toolCall` object) |
|
|
824
|
-
| `done` | Message complete (reason: `"stop"`, `"length"`, `"toolUse"`) |
|
|
825
|
-
| `error` | Error occurred (reason: `"aborted"`, `"error"`) |
|
|
826
|
-
|
|
827
|
-
Example streaming a text response:
|
|
828
|
-
|
|
829
|
-
```json
|
|
830
|
-
{"type":"message_update","message":{...},"assistantMessageEvent":{"type":"text_start","contentIndex":0,"partial":{...}}}
|
|
831
|
-
{"type":"message_update","message":{...},"assistantMessageEvent":{"type":"text_delta","contentIndex":0,"delta":"Hello","partial":{...}}}
|
|
832
|
-
{"type":"message_update","message":{...},"assistantMessageEvent":{"type":"text_delta","contentIndex":0,"delta":" world","partial":{...}}}
|
|
833
|
-
{"type":"message_update","message":{...},"assistantMessageEvent":{"type":"text_end","contentIndex":0,"content":"Hello world","partial":{...}}}
|
|
834
|
-
```
|
|
835
|
-
|
|
836
|
-
### tool_execution_start / tool_execution_update / tool_execution_end
|
|
837
|
-
|
|
838
|
-
Emitted when a tool begins, streams progress, and completes execution.
|
|
839
|
-
|
|
840
|
-
```json
|
|
841
|
-
{
|
|
842
|
-
"type": "tool_execution_start",
|
|
843
|
-
"toolCallId": "call_abc123",
|
|
844
|
-
"toolName": "bash",
|
|
845
|
-
"args": { "command": "ls -la" }
|
|
846
|
-
}
|
|
847
|
-
```
|
|
848
|
-
|
|
849
|
-
During execution, `tool_execution_update` events stream partial results (e.g., bash output as it arrives):
|
|
850
|
-
|
|
851
|
-
```json
|
|
852
|
-
{
|
|
853
|
-
"type": "tool_execution_update",
|
|
854
|
-
"toolCallId": "call_abc123",
|
|
855
|
-
"toolName": "bash",
|
|
856
|
-
"args": { "command": "ls -la" },
|
|
857
|
-
"partialResult": {
|
|
858
|
-
"content": [{ "type": "text", "text": "partial output so far..." }],
|
|
859
|
-
"details": {...}
|
|
860
|
-
}
|
|
861
|
-
}
|
|
862
|
-
```
|
|
863
|
-
|
|
864
|
-
When complete:
|
|
865
|
-
|
|
866
|
-
```json
|
|
867
|
-
{
|
|
868
|
-
"type": "tool_execution_end",
|
|
869
|
-
"toolCallId": "call_abc123",
|
|
870
|
-
"toolName": "bash",
|
|
871
|
-
"result": {
|
|
872
|
-
"content": [{"type": "text", "text": "total 48\n..."}],
|
|
873
|
-
"details": {...}
|
|
874
|
-
},
|
|
875
|
-
"isError": false
|
|
876
|
-
}
|
|
877
|
-
```
|
|
878
|
-
|
|
879
|
-
Use `toolCallId` to correlate events. The `partialResult` in `tool_execution_update` contains the accumulated output so far (not just the delta), allowing clients to simply replace their display on each update.
|
|
880
|
-
|
|
881
|
-
### auto_compaction_start / auto_compaction_end
|
|
882
|
-
|
|
883
|
-
Emitted when automatic compaction runs (when context is nearly full).
|
|
884
|
-
|
|
885
|
-
```json
|
|
886
|
-
{ "type": "auto_compaction_start", "reason": "threshold" }
|
|
887
|
-
```
|
|
888
|
-
|
|
889
|
-
The `reason` field is `"threshold"` (context getting large) or `"overflow"` (context exceeded limit).
|
|
890
|
-
|
|
891
|
-
```json
|
|
892
|
-
{
|
|
893
|
-
"type": "auto_compaction_end",
|
|
894
|
-
"result": {
|
|
895
|
-
"summary": "Summary of conversation...",
|
|
896
|
-
"firstKeptEntryId": "abc123",
|
|
897
|
-
"tokensBefore": 150000,
|
|
898
|
-
"details": {}
|
|
899
|
-
},
|
|
900
|
-
"aborted": false,
|
|
901
|
-
"willRetry": false
|
|
902
|
-
}
|
|
903
|
-
```
|
|
904
|
-
|
|
905
|
-
If `reason` was `"overflow"` and compaction succeeds, `willRetry` is `true` and the agent will automatically retry the prompt.
|
|
906
|
-
|
|
907
|
-
If compaction was aborted, `result` is `null` and `aborted` is `true`.
|
|
908
|
-
|
|
909
|
-
### auto_retry_start / auto_retry_end
|
|
910
|
-
|
|
911
|
-
Emitted when automatic retry is triggered after a transient error (overloaded, rate limit, 5xx).
|
|
912
|
-
|
|
913
|
-
```json
|
|
914
|
-
{
|
|
915
|
-
"type": "auto_retry_start",
|
|
916
|
-
"attempt": 1,
|
|
917
|
-
"maxAttempts": 3,
|
|
918
|
-
"delayMs": 2000,
|
|
919
|
-
"errorMessage": "529 {\"type\":\"error\",\"error\":{\"type\":\"overloaded_error\",\"message\":\"Overloaded\"}}"
|
|
920
|
-
}
|
|
921
|
-
```
|
|
922
|
-
|
|
923
|
-
```json
|
|
924
|
-
{
|
|
925
|
-
"type": "auto_retry_end",
|
|
926
|
-
"success": true,
|
|
927
|
-
"attempt": 2
|
|
928
|
-
}
|
|
929
|
-
```
|
|
930
|
-
|
|
931
|
-
On final failure (max retries exceeded):
|
|
932
|
-
|
|
933
|
-
```json
|
|
934
|
-
{
|
|
935
|
-
"type": "auto_retry_end",
|
|
936
|
-
"success": false,
|
|
937
|
-
"attempt": 3,
|
|
938
|
-
"finalError": "529 overloaded_error: Overloaded"
|
|
939
|
-
}
|
|
940
|
-
```
|
|
941
|
-
|
|
942
|
-
### extension_error
|
|
943
|
-
|
|
944
|
-
Emitted when an extension throws an error.
|
|
945
|
-
|
|
946
|
-
```json
|
|
947
|
-
{
|
|
948
|
-
"type": "extension_error",
|
|
949
|
-
"extensionPath": "/path/to/extension.ts",
|
|
950
|
-
"event": "turn_start",
|
|
951
|
-
"error": "Error message..."
|
|
952
|
-
}
|
|
953
|
-
```
|
|
954
|
-
|
|
955
|
-
## Error Handling
|
|
956
|
-
|
|
957
|
-
Failed commands return a response with `success: false`:
|
|
958
|
-
|
|
959
|
-
```json
|
|
960
|
-
{
|
|
961
|
-
"type": "response",
|
|
962
|
-
"command": "set_model",
|
|
963
|
-
"success": false,
|
|
964
|
-
"error": "Model not found: invalid/model"
|
|
965
|
-
}
|
|
966
|
-
```
|
|
967
|
-
|
|
968
|
-
Parse errors:
|
|
969
|
-
|
|
970
|
-
```json
|
|
971
|
-
{
|
|
972
|
-
"type": "response",
|
|
973
|
-
"command": "parse",
|
|
974
|
-
"success": false,
|
|
975
|
-
"error": "Failed to parse command: Unexpected token..."
|
|
976
|
-
}
|
|
977
|
-
```
|
|
978
|
-
|
|
979
|
-
## Types
|
|
980
|
-
|
|
981
|
-
Source files:
|
|
982
|
-
|
|
983
|
-
- [`packages/ai/src/types.ts`](../../ai/src/types.ts) - `Model`, `UserMessage`, `AssistantMessage`, `ToolResultMessage`
|
|
984
|
-
- [`packages/agent/src/types.ts`](../../agent/src/types.ts) - `AgentMessage`, `AgentEvent`
|
|
985
|
-
- [`src/session/messages.ts`](../src/session/messages.ts) - `BashExecutionMessage`
|
|
986
|
-
- [`src/modes/rpc/rpc-types.ts`](../src/modes/rpc/rpc-types.ts) - RPC command/response types
|
|
987
|
-
|
|
988
|
-
### Model
|
|
989
|
-
|
|
990
|
-
```json
|
|
991
|
-
{
|
|
992
|
-
"id": "claude-sonnet-4-20250514",
|
|
993
|
-
"name": "Claude Sonnet 4",
|
|
994
|
-
"api": "anthropic-messages",
|
|
995
|
-
"provider": "anthropic",
|
|
996
|
-
"baseUrl": "https://api.anthropic.com",
|
|
997
|
-
"reasoning": true,
|
|
998
|
-
"input": ["text", "image"],
|
|
999
|
-
"contextWindow": 200000,
|
|
1000
|
-
"maxTokens": 16384,
|
|
1001
|
-
"cost": {
|
|
1002
|
-
"input": 3.0,
|
|
1003
|
-
"output": 15.0,
|
|
1004
|
-
"cacheRead": 0.3,
|
|
1005
|
-
"cacheWrite": 3.75
|
|
1006
|
-
}
|
|
1007
|
-
}
|
|
1008
|
-
```
|
|
1009
|
-
|
|
1010
|
-
### UserMessage
|
|
1011
|
-
|
|
1012
|
-
```json
|
|
1013
|
-
{
|
|
1014
|
-
"role": "user",
|
|
1015
|
-
"content": "Hello!",
|
|
1016
|
-
"timestamp": 1733234567890,
|
|
1017
|
-
"attachments": []
|
|
1018
|
-
}
|
|
1019
|
-
```
|
|
1020
|
-
|
|
1021
|
-
The `content` field can be a string or an array of `TextContent`/`ImageContent` blocks.
|
|
1022
|
-
|
|
1023
|
-
### AssistantMessage
|
|
1024
|
-
|
|
1025
|
-
```json
|
|
1026
|
-
{
|
|
1027
|
-
"role": "assistant",
|
|
1028
|
-
"content": [
|
|
1029
|
-
{ "type": "text", "text": "Hello! How can I help?" },
|
|
1030
|
-
{ "type": "thinking", "thinking": "User is greeting me..." },
|
|
1031
|
-
{ "type": "toolCall", "id": "call_123", "name": "bash", "arguments": { "command": "ls" } }
|
|
1032
|
-
],
|
|
1033
|
-
"api": "anthropic-messages",
|
|
1034
|
-
"provider": "anthropic",
|
|
1035
|
-
"model": "claude-sonnet-4-20250514",
|
|
1036
|
-
"usage": {
|
|
1037
|
-
"input": 100,
|
|
1038
|
-
"output": 50,
|
|
1039
|
-
"cacheRead": 0,
|
|
1040
|
-
"cacheWrite": 0,
|
|
1041
|
-
"cost": { "input": 0.0003, "output": 0.00075, "cacheRead": 0, "cacheWrite": 0, "total": 0.00105 }
|
|
1042
|
-
},
|
|
1043
|
-
"stopReason": "stop",
|
|
1044
|
-
"timestamp": 1733234567890
|
|
1045
|
-
}
|
|
1046
|
-
```
|
|
1047
|
-
|
|
1048
|
-
Stop reasons: `"stop"`, `"length"`, `"toolUse"`, `"error"`, `"aborted"`
|
|
1049
|
-
|
|
1050
|
-
### ToolResultMessage
|
|
1051
|
-
|
|
1052
|
-
```json
|
|
1053
|
-
{
|
|
1054
|
-
"role": "toolResult",
|
|
1055
|
-
"toolCallId": "call_123",
|
|
1056
|
-
"toolName": "bash",
|
|
1057
|
-
"content": [{ "type": "text", "text": "total 48\ndrwxr-xr-x ..." }],
|
|
1058
|
-
"isError": false,
|
|
1059
|
-
"timestamp": 1733234567890
|
|
1060
|
-
}
|
|
1061
|
-
```
|
|
1062
|
-
|
|
1063
|
-
### BashExecutionMessage
|
|
1064
|
-
|
|
1065
|
-
Created by the `bash` RPC command (not by LLM tool calls):
|
|
1066
|
-
|
|
1067
|
-
```json
|
|
1068
|
-
{
|
|
1069
|
-
"role": "bashExecution",
|
|
1070
|
-
"command": "ls -la",
|
|
1071
|
-
"output": "total 48\ndrwxr-xr-x ...",
|
|
1072
|
-
"exitCode": 0,
|
|
1073
|
-
"cancelled": false,
|
|
1074
|
-
"truncated": false,
|
|
1075
|
-
"timestamp": 1733234567890
|
|
1076
|
-
}
|
|
1077
|
-
```
|
|
1078
|
-
|
|
1079
|
-
### Attachment
|
|
1080
|
-
|
|
1081
|
-
```json
|
|
1082
|
-
{
|
|
1083
|
-
"id": "img1",
|
|
1084
|
-
"type": "image",
|
|
1085
|
-
"fileName": "photo.jpg",
|
|
1086
|
-
"mimeType": "image/jpeg",
|
|
1087
|
-
"size": 102400,
|
|
1088
|
-
"content": "base64-encoded-data...",
|
|
1089
|
-
"extractedText": null,
|
|
1090
|
-
"preview": null
|
|
1091
|
-
}
|
|
1092
|
-
```
|
|
1093
|
-
|
|
1094
|
-
## Example: Basic Client (Python)
|
|
1095
|
-
|
|
1096
|
-
```python
|
|
1097
|
-
import subprocess
|
|
1098
|
-
import json
|
|
1099
|
-
import jsonlines
|
|
1100
|
-
|
|
1101
|
-
proc = subprocess.Popen(
|
|
1102
|
-
["omp", "--mode", "rpc", "--no-session"],
|
|
1103
|
-
stdin=subprocess.PIPE,
|
|
1104
|
-
stdout=subprocess.PIPE,
|
|
1105
|
-
text=True
|
|
1106
|
-
)
|
|
1107
|
-
|
|
1108
|
-
def send(cmd):
|
|
1109
|
-
proc.stdin.write(json.dumps(cmd) + "\n")
|
|
1110
|
-
proc.stdin.flush()
|
|
1111
|
-
|
|
1112
|
-
def read_events():
|
|
1113
|
-
with jsonlines.Reader(proc.stdout) as reader:
|
|
1114
|
-
for event in reader:
|
|
1115
|
-
yield event
|
|
1116
|
-
|
|
1117
|
-
# Send prompt
|
|
1118
|
-
send({"type": "prompt", "message": "Hello!"})
|
|
1119
|
-
|
|
1120
|
-
# Process events
|
|
1121
|
-
for event in read_events():
|
|
1122
|
-
if event.get("type") == "message_update":
|
|
1123
|
-
delta = event.get("assistantMessageEvent", {})
|
|
1124
|
-
if delta.get("type") == "text_delta":
|
|
1125
|
-
print(delta["delta"], end="", flush=True)
|
|
1126
|
-
|
|
1127
|
-
if event.get("type") == "agent_end":
|
|
1128
|
-
print()
|
|
1129
|
-
break
|
|
1130
|
-
```
|
|
1131
|
-
|
|
1132
|
-
## Example: Interactive Client (Bun)
|
|
1133
|
-
|
|
1134
|
-
See [`test/rpc-example.ts`](../test/rpc-example.ts) for a complete interactive example, or [`src/modes/rpc/rpc-client.ts`](../src/modes/rpc/rpc-client.ts) for a typed client implementation.
|
|
1135
|
-
|
|
1136
|
-
```javascript
|
|
1137
|
-
const agent = Bun.spawn(["omp", "--mode", "rpc", "--no-session"], {
|
|
1138
|
-
stdin: "pipe",
|
|
1139
|
-
stdout: "pipe",
|
|
1140
|
-
});
|
|
1141
|
-
|
|
1142
|
-
const decoder = new TextDecoder();
|
|
1143
|
-
let buffer = "";
|
|
1144
|
-
|
|
1145
|
-
async function readEvents() {
|
|
1146
|
-
const reader = agent.stdout.getReader();
|
|
1147
|
-
while (true) {
|
|
1148
|
-
const { value, done } = await reader.read();
|
|
1149
|
-
if (done) break;
|
|
1150
|
-
buffer += decoder.decode(value, { stream: true });
|
|
1151
|
-
const result = Bun.JSONL.parseChunk(buffer);
|
|
1152
|
-
buffer = buffer.slice(result.read);
|
|
1153
|
-
for (const event of result.values) {
|
|
1154
|
-
if (event.type === "message_update") {
|
|
1155
|
-
const { assistantMessageEvent } = event;
|
|
1156
|
-
if (assistantMessageEvent.type === "text_delta") {
|
|
1157
|
-
process.stdout.write(assistantMessageEvent.delta);
|
|
1158
|
-
}
|
|
1159
|
-
}
|
|
1160
|
-
}
|
|
1161
|
-
}
|
|
1162
|
-
}
|
|
1163
|
-
|
|
1164
|
-
readEvents();
|
|
1165
|
-
|
|
1166
|
-
// Send prompt
|
|
1167
|
-
agent.stdin.write(JSON.stringify({ type: "prompt", message: "Hello" }) + "\n");
|
|
1168
|
-
|
|
1169
|
-
// Abort on Ctrl+C
|
|
1170
|
-
process.on("SIGINT", () => {
|
|
1171
|
-
agent.stdin.write(JSON.stringify({ type: "abort" }) + "\n");
|
|
1172
|
-
});
|
|
1173
|
-
```
|