@mariozechner/pi-coding-agent 0.16.0 → 0.18.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/CHANGELOG.md +38 -0
  2. package/README.md +58 -1
  3. package/dist/cli/args.d.ts +1 -0
  4. package/dist/cli/args.d.ts.map +1 -1
  5. package/dist/cli/args.js +5 -0
  6. package/dist/cli/args.js.map +1 -1
  7. package/dist/config.d.ts +2 -0
  8. package/dist/config.d.ts.map +1 -1
  9. package/dist/config.js +4 -0
  10. package/dist/config.js.map +1 -1
  11. package/dist/core/agent-session.d.ts +30 -2
  12. package/dist/core/agent-session.d.ts.map +1 -1
  13. package/dist/core/agent-session.js +181 -21
  14. package/dist/core/agent-session.js.map +1 -1
  15. package/dist/core/compaction.d.ts +30 -5
  16. package/dist/core/compaction.d.ts.map +1 -1
  17. package/dist/core/compaction.js +194 -61
  18. package/dist/core/compaction.js.map +1 -1
  19. package/dist/core/hooks/index.d.ts +5 -0
  20. package/dist/core/hooks/index.d.ts.map +1 -0
  21. package/dist/core/hooks/index.js +4 -0
  22. package/dist/core/hooks/index.js.map +1 -0
  23. package/dist/core/hooks/loader.d.ts +56 -0
  24. package/dist/core/hooks/loader.d.ts.map +1 -0
  25. package/dist/core/hooks/loader.js +158 -0
  26. package/dist/core/hooks/loader.js.map +1 -0
  27. package/dist/core/hooks/runner.d.ts +69 -0
  28. package/dist/core/hooks/runner.d.ts.map +1 -0
  29. package/dist/core/hooks/runner.js +203 -0
  30. package/dist/core/hooks/runner.js.map +1 -0
  31. package/dist/core/hooks/tool-wrapper.d.ts +16 -0
  32. package/dist/core/hooks/tool-wrapper.d.ts.map +1 -0
  33. package/dist/core/hooks/tool-wrapper.js +71 -0
  34. package/dist/core/hooks/tool-wrapper.js.map +1 -0
  35. package/dist/core/hooks/types.d.ts +220 -0
  36. package/dist/core/hooks/types.d.ts.map +1 -0
  37. package/dist/core/hooks/types.js +8 -0
  38. package/dist/core/hooks/types.js.map +1 -0
  39. package/dist/core/index.d.ts +1 -0
  40. package/dist/core/index.d.ts.map +1 -1
  41. package/dist/core/index.js +1 -0
  42. package/dist/core/index.js.map +1 -1
  43. package/dist/core/session-manager.d.ts +10 -3
  44. package/dist/core/session-manager.d.ts.map +1 -1
  45. package/dist/core/session-manager.js +78 -28
  46. package/dist/core/session-manager.js.map +1 -1
  47. package/dist/core/settings-manager.d.ts +6 -0
  48. package/dist/core/settings-manager.d.ts.map +1 -1
  49. package/dist/core/settings-manager.js +14 -0
  50. package/dist/core/settings-manager.js.map +1 -1
  51. package/dist/core/system-prompt.d.ts.map +1 -1
  52. package/dist/core/system-prompt.js +5 -3
  53. package/dist/core/system-prompt.js.map +1 -1
  54. package/dist/core/tools/truncate.d.ts +6 -2
  55. package/dist/core/tools/truncate.d.ts.map +1 -1
  56. package/dist/core/tools/truncate.js +11 -1
  57. package/dist/core/tools/truncate.js.map +1 -1
  58. package/dist/index.d.ts +1 -0
  59. package/dist/index.d.ts.map +1 -1
  60. package/dist/index.js.map +1 -1
  61. package/dist/main.d.ts.map +1 -1
  62. package/dist/main.js +23 -12
  63. package/dist/main.js.map +1 -1
  64. package/dist/modes/interactive/components/bash-execution.d.ts +1 -0
  65. package/dist/modes/interactive/components/bash-execution.d.ts.map +1 -1
  66. package/dist/modes/interactive/components/bash-execution.js +17 -6
  67. package/dist/modes/interactive/components/bash-execution.js.map +1 -1
  68. package/dist/modes/interactive/components/hook-input.d.ts +12 -0
  69. package/dist/modes/interactive/components/hook-input.d.ts.map +1 -0
  70. package/dist/modes/interactive/components/hook-input.js +46 -0
  71. package/dist/modes/interactive/components/hook-input.js.map +1 -0
  72. package/dist/modes/interactive/components/hook-selector.d.ts +16 -0
  73. package/dist/modes/interactive/components/hook-selector.d.ts.map +1 -0
  74. package/dist/modes/interactive/components/hook-selector.js +76 -0
  75. package/dist/modes/interactive/components/hook-selector.js.map +1 -0
  76. package/dist/modes/interactive/components/tool-execution.d.ts.map +1 -1
  77. package/dist/modes/interactive/components/tool-execution.js +12 -7
  78. package/dist/modes/interactive/components/tool-execution.js.map +1 -1
  79. package/dist/modes/interactive/interactive-mode.d.ts +37 -0
  80. package/dist/modes/interactive/interactive-mode.d.ts.map +1 -1
  81. package/dist/modes/interactive/interactive-mode.js +190 -7
  82. package/dist/modes/interactive/interactive-mode.js.map +1 -1
  83. package/dist/modes/print-mode.d.ts.map +1 -1
  84. package/dist/modes/print-mode.js +15 -0
  85. package/dist/modes/print-mode.js.map +1 -1
  86. package/dist/modes/rpc/rpc-mode.d.ts +2 -1
  87. package/dist/modes/rpc/rpc-mode.d.ts.map +1 -1
  88. package/dist/modes/rpc/rpc-mode.js +118 -3
  89. package/dist/modes/rpc/rpc-mode.js.map +1 -1
  90. package/dist/modes/rpc/rpc-types.d.ts +41 -0
  91. package/dist/modes/rpc/rpc-types.d.ts.map +1 -1
  92. package/dist/modes/rpc/rpc-types.js.map +1 -1
  93. package/docs/compaction.md +519 -0
  94. package/docs/hooks.md +609 -0
  95. package/docs/rpc.md +870 -0
  96. package/docs/session.md +89 -0
  97. package/docs/theme.md +586 -0
  98. package/docs/truncation.md +235 -0
  99. package/docs/undercompaction.md +313 -0
  100. package/package.json +18 -6
package/docs/rpc.md ADDED
@@ -0,0 +1,870 @@
1
+ # RPC Mode
2
+
3
+ RPC mode enables headless operation of the coding agent via a JSON protocol over stdin/stdout. This is useful for embedding the agent in other applications, IDEs, or custom UIs.
4
+
5
+ **Note for Node.js/TypeScript users**: If you're building a Node.js application, consider using `AgentSession` directly from `@mariozechner/pi-coding-agent` instead of spawning a subprocess. See [`src/core/agent-session.ts`](../src/core/agent-session.ts) for the API. For a subprocess-based TypeScript client, see [`src/modes/rpc/rpc-client.ts`](../src/modes/rpc/rpc-client.ts).
6
+
7
+ ## Starting RPC Mode
8
+
9
+ ```bash
10
+ pi --mode rpc [options]
11
+ ```
12
+
13
+ Common options:
14
+ - `--provider <name>`: Set the LLM provider (anthropic, openai, google, etc.)
15
+ - `--model <id>`: Set the model ID
16
+ - `--no-session`: Disable session persistence
17
+ - `--session-dir <path>`: Custom session storage directory
18
+
19
+ ## Protocol Overview
20
+
21
+ - **Commands**: JSON objects sent to stdin, one per line
22
+ - **Responses**: JSON objects with `type: "response"` indicating command success/failure
23
+ - **Events**: Agent events streamed to stdout as JSON lines
24
+
25
+ All commands support an optional `id` field for request/response correlation. If provided, the corresponding response will include the same `id`.
26
+
27
+ ## Commands
28
+
29
+ ### Prompting
30
+
31
+ #### prompt
32
+
33
+ Send a user prompt to the agent. Returns immediately; events stream asynchronously.
34
+
35
+ ```json
36
+ {"id": "req-1", "type": "prompt", "message": "Hello, world!"}
37
+ ```
38
+
39
+ With attachments:
40
+ ```json
41
+ {"type": "prompt", "message": "What's in this image?", "attachments": [...]}
42
+ ```
43
+
44
+ Response:
45
+ ```json
46
+ {"id": "req-1", "type": "response", "command": "prompt", "success": true}
47
+ ```
48
+
49
+ The `attachments` field is optional. See [Attachments](#attachments) for the schema.
50
+
51
+ #### queue_message
52
+
53
+ Queue a message to be injected at the next agent turn. Queued messages are added to the conversation without triggering a new prompt. Useful for injecting context mid-conversation.
54
+
55
+ ```json
56
+ {"type": "queue_message", "message": "Additional context"}
57
+ ```
58
+
59
+ Response:
60
+ ```json
61
+ {"type": "response", "command": "queue_message", "success": true}
62
+ ```
63
+
64
+ See [set_queue_mode](#set_queue_mode) for controlling how queued messages are processed.
65
+
66
+ #### abort
67
+
68
+ Abort the current agent operation.
69
+
70
+ ```json
71
+ {"type": "abort"}
72
+ ```
73
+
74
+ Response:
75
+ ```json
76
+ {"type": "response", "command": "abort", "success": true}
77
+ ```
78
+
79
+ #### reset
80
+
81
+ Clear context and start a fresh session.
82
+
83
+ ```json
84
+ {"type": "reset"}
85
+ ```
86
+
87
+ Response:
88
+ ```json
89
+ {"type": "response", "command": "reset", "success": true}
90
+ ```
91
+
92
+ ### State
93
+
94
+ #### get_state
95
+
96
+ Get current session state.
97
+
98
+ ```json
99
+ {"type": "get_state"}
100
+ ```
101
+
102
+ Response:
103
+ ```json
104
+ {
105
+ "type": "response",
106
+ "command": "get_state",
107
+ "success": true,
108
+ "data": {
109
+ "model": {...},
110
+ "thinkingLevel": "medium",
111
+ "isStreaming": false,
112
+ "isCompacting": false,
113
+ "queueMode": "all",
114
+ "sessionFile": "/path/to/session.jsonl",
115
+ "sessionId": "abc123",
116
+ "autoCompactionEnabled": true,
117
+ "messageCount": 5,
118
+ "queuedMessageCount": 0
119
+ }
120
+ }
121
+ ```
122
+
123
+ The `model` field is a full [Model](#model) object or `null`.
124
+
125
+ #### get_messages
126
+
127
+ Get all messages in the conversation.
128
+
129
+ ```json
130
+ {"type": "get_messages"}
131
+ ```
132
+
133
+ Response:
134
+ ```json
135
+ {
136
+ "type": "response",
137
+ "command": "get_messages",
138
+ "success": true,
139
+ "data": {"messages": [...]}
140
+ }
141
+ ```
142
+
143
+ Messages are `AppMessage` objects (see [Message Types](#message-types)).
144
+
145
+ ### Model
146
+
147
+ #### set_model
148
+
149
+ Switch to a specific model.
150
+
151
+ ```json
152
+ {"type": "set_model", "provider": "anthropic", "modelId": "claude-sonnet-4-20250514"}
153
+ ```
154
+
155
+ Response contains the full [Model](#model) object:
156
+ ```json
157
+ {
158
+ "type": "response",
159
+ "command": "set_model",
160
+ "success": true,
161
+ "data": {...}
162
+ }
163
+ ```
164
+
165
+ #### cycle_model
166
+
167
+ Cycle to the next available model. Returns `null` data if only one model available.
168
+
169
+ ```json
170
+ {"type": "cycle_model"}
171
+ ```
172
+
173
+ Response:
174
+ ```json
175
+ {
176
+ "type": "response",
177
+ "command": "cycle_model",
178
+ "success": true,
179
+ "data": {
180
+ "model": {...},
181
+ "thinkingLevel": "medium",
182
+ "isScoped": false
183
+ }
184
+ }
185
+ ```
186
+
187
+ The `model` field is a full [Model](#model) object.
188
+
189
+ #### get_available_models
190
+
191
+ List all configured models.
192
+
193
+ ```json
194
+ {"type": "get_available_models"}
195
+ ```
196
+
197
+ Response contains an array of full [Model](#model) objects:
198
+ ```json
199
+ {
200
+ "type": "response",
201
+ "command": "get_available_models",
202
+ "success": true,
203
+ "data": {
204
+ "models": [...]
205
+ }
206
+ }
207
+ ```
208
+
209
+ ### Thinking
210
+
211
+ #### set_thinking_level
212
+
213
+ Set the reasoning/thinking level for models that support it.
214
+
215
+ ```json
216
+ {"type": "set_thinking_level", "level": "high"}
217
+ ```
218
+
219
+ Levels: `"off"`, `"minimal"`, `"low"`, `"medium"`, `"high"`, `"xhigh"`
220
+
221
+ Note: `"xhigh"` is only supported by OpenAI codex-max models.
222
+
223
+ Response:
224
+ ```json
225
+ {"type": "response", "command": "set_thinking_level", "success": true}
226
+ ```
227
+
228
+ #### cycle_thinking_level
229
+
230
+ Cycle through available thinking levels. Returns `null` data if model doesn't support thinking.
231
+
232
+ ```json
233
+ {"type": "cycle_thinking_level"}
234
+ ```
235
+
236
+ Response:
237
+ ```json
238
+ {
239
+ "type": "response",
240
+ "command": "cycle_thinking_level",
241
+ "success": true,
242
+ "data": {"level": "high"}
243
+ }
244
+ ```
245
+
246
+ ### Queue Mode
247
+
248
+ #### set_queue_mode
249
+
250
+ Control how queued messages (from `queue_message`) are injected into the conversation.
251
+
252
+ ```json
253
+ {"type": "set_queue_mode", "mode": "one-at-a-time"}
254
+ ```
255
+
256
+ Modes:
257
+ - `"all"`: Inject all queued messages at the next turn
258
+ - `"one-at-a-time"`: Inject one queued message per turn (default)
259
+
260
+ Response:
261
+ ```json
262
+ {"type": "response", "command": "set_queue_mode", "success": true}
263
+ ```
264
+
265
+ ### Compaction
266
+
267
+ #### compact
268
+
269
+ Manually compact conversation context to reduce token usage.
270
+
271
+ ```json
272
+ {"type": "compact"}
273
+ ```
274
+
275
+ With custom instructions:
276
+ ```json
277
+ {"type": "compact", "customInstructions": "Focus on code changes"}
278
+ ```
279
+
280
+ Response:
281
+ ```json
282
+ {
283
+ "type": "response",
284
+ "command": "compact",
285
+ "success": true,
286
+ "data": {
287
+ "tokensBefore": 150000,
288
+ "summary": "Summary of conversation..."
289
+ }
290
+ }
291
+ ```
292
+
293
+ #### set_auto_compaction
294
+
295
+ Enable or disable automatic compaction when context is nearly full.
296
+
297
+ ```json
298
+ {"type": "set_auto_compaction", "enabled": true}
299
+ ```
300
+
301
+ Response:
302
+ ```json
303
+ {"type": "response", "command": "set_auto_compaction", "success": true}
304
+ ```
305
+
306
+ ### Bash
307
+
308
+ #### bash
309
+
310
+ Execute a shell command and add output to conversation context.
311
+
312
+ ```json
313
+ {"type": "bash", "command": "ls -la"}
314
+ ```
315
+
316
+ Response:
317
+ ```json
318
+ {
319
+ "type": "response",
320
+ "command": "bash",
321
+ "success": true,
322
+ "data": {
323
+ "output": "total 48\ndrwxr-xr-x ...",
324
+ "exitCode": 0,
325
+ "cancelled": false,
326
+ "truncated": false
327
+ }
328
+ }
329
+ ```
330
+
331
+ If output was truncated, includes `fullOutputPath`:
332
+ ```json
333
+ {
334
+ "type": "response",
335
+ "command": "bash",
336
+ "success": true,
337
+ "data": {
338
+ "output": "truncated output...",
339
+ "exitCode": 0,
340
+ "cancelled": false,
341
+ "truncated": true,
342
+ "fullOutputPath": "/tmp/pi-bash-abc123.log"
343
+ }
344
+ }
345
+ ```
346
+
347
+ **How bash results reach the LLM:**
348
+
349
+ The `bash` command executes immediately and returns a `BashResult`. Internally, a `BashExecutionMessage` is created and stored in the agent's message state. This message does NOT emit an event.
350
+
351
+ When the next `prompt` command is sent, all messages (including `BashExecutionMessage`) are transformed before being sent to the LLM. The `BashExecutionMessage` is converted to a `UserMessage` with this format:
352
+
353
+ ```
354
+ Ran `ls -la`
355
+ \`\`\`
356
+ total 48
357
+ drwxr-xr-x ...
358
+ \`\`\`
359
+ ```
360
+
361
+ This means:
362
+ 1. Bash output is included in the LLM context on the **next prompt**, not immediately
363
+ 2. Multiple bash commands can be executed before a prompt; all outputs will be included
364
+ 3. No event is emitted for the `BashExecutionMessage` itself
365
+
366
+ #### abort_bash
367
+
368
+ Abort a running bash command.
369
+
370
+ ```json
371
+ {"type": "abort_bash"}
372
+ ```
373
+
374
+ Response:
375
+ ```json
376
+ {"type": "response", "command": "abort_bash", "success": true}
377
+ ```
378
+
379
+ ### Session
380
+
381
+ #### get_session_stats
382
+
383
+ Get token usage and cost statistics.
384
+
385
+ ```json
386
+ {"type": "get_session_stats"}
387
+ ```
388
+
389
+ Response:
390
+ ```json
391
+ {
392
+ "type": "response",
393
+ "command": "get_session_stats",
394
+ "success": true,
395
+ "data": {
396
+ "sessionFile": "/path/to/session.jsonl",
397
+ "sessionId": "abc123",
398
+ "userMessages": 5,
399
+ "assistantMessages": 5,
400
+ "toolCalls": 12,
401
+ "toolResults": 12,
402
+ "totalMessages": 22,
403
+ "tokens": {
404
+ "input": 50000,
405
+ "output": 10000,
406
+ "cacheRead": 40000,
407
+ "cacheWrite": 5000,
408
+ "total": 105000
409
+ },
410
+ "cost": 0.45
411
+ }
412
+ }
413
+ ```
414
+
415
+ #### export_html
416
+
417
+ Export session to an HTML file.
418
+
419
+ ```json
420
+ {"type": "export_html"}
421
+ ```
422
+
423
+ With custom path:
424
+ ```json
425
+ {"type": "export_html", "outputPath": "/tmp/session.html"}
426
+ ```
427
+
428
+ Response:
429
+ ```json
430
+ {
431
+ "type": "response",
432
+ "command": "export_html",
433
+ "success": true,
434
+ "data": {"path": "/tmp/session.html"}
435
+ }
436
+ ```
437
+
438
+ #### switch_session
439
+
440
+ Load a different session file.
441
+
442
+ ```json
443
+ {"type": "switch_session", "sessionPath": "/path/to/session.jsonl"}
444
+ ```
445
+
446
+ Response:
447
+ ```json
448
+ {"type": "response", "command": "switch_session", "success": true}
449
+ ```
450
+
451
+ #### branch
452
+
453
+ Create a new branch from a previous user message. Returns the text of the message being branched from.
454
+
455
+ ```json
456
+ {"type": "branch", "entryIndex": 2}
457
+ ```
458
+
459
+ Response:
460
+ ```json
461
+ {
462
+ "type": "response",
463
+ "command": "branch",
464
+ "success": true,
465
+ "data": {"text": "The original prompt text..."}
466
+ }
467
+ ```
468
+
469
+ #### get_branch_messages
470
+
471
+ Get user messages available for branching.
472
+
473
+ ```json
474
+ {"type": "get_branch_messages"}
475
+ ```
476
+
477
+ Response:
478
+ ```json
479
+ {
480
+ "type": "response",
481
+ "command": "get_branch_messages",
482
+ "success": true,
483
+ "data": {
484
+ "messages": [
485
+ {"entryIndex": 0, "text": "First prompt..."},
486
+ {"entryIndex": 2, "text": "Second prompt..."}
487
+ ]
488
+ }
489
+ }
490
+ ```
491
+
492
+ #### get_last_assistant_text
493
+
494
+ Get the text content of the last assistant message.
495
+
496
+ ```json
497
+ {"type": "get_last_assistant_text"}
498
+ ```
499
+
500
+ Response:
501
+ ```json
502
+ {
503
+ "type": "response",
504
+ "command": "get_last_assistant_text",
505
+ "success": true,
506
+ "data": {"text": "The assistant's response..."}
507
+ }
508
+ ```
509
+
510
+ Returns `{"text": null}` if no assistant messages exist.
511
+
512
+ ## Events
513
+
514
+ Events are streamed to stdout as JSON lines during agent operation. Events do NOT include an `id` field (only responses do).
515
+
516
+ ### Event Types
517
+
518
+ | Event | Description |
519
+ |-------|-------------|
520
+ | `agent_start` | Agent begins processing |
521
+ | `agent_end` | Agent completes (includes all generated messages) |
522
+ | `turn_start` | New turn begins |
523
+ | `turn_end` | Turn completes (includes assistant message and tool results) |
524
+ | `message_start` | Message begins |
525
+ | `message_update` | Streaming update (text/thinking/toolcall deltas) |
526
+ | `message_end` | Message completes |
527
+ | `tool_execution_start` | Tool begins execution |
528
+ | `tool_execution_end` | Tool completes |
529
+ | `auto_compaction_start` | Auto-compaction begins |
530
+ | `auto_compaction_end` | Auto-compaction completes |
531
+
532
+ ### agent_start
533
+
534
+ Emitted when the agent begins processing a prompt.
535
+
536
+ ```json
537
+ {"type": "agent_start"}
538
+ ```
539
+
540
+ ### agent_end
541
+
542
+ Emitted when the agent completes. Contains all messages generated during this run.
543
+
544
+ ```json
545
+ {
546
+ "type": "agent_end",
547
+ "messages": [...]
548
+ }
549
+ ```
550
+
551
+ ### turn_start / turn_end
552
+
553
+ A turn consists of one assistant response plus any resulting tool calls and results.
554
+
555
+ ```json
556
+ {"type": "turn_start"}
557
+ ```
558
+
559
+ ```json
560
+ {
561
+ "type": "turn_end",
562
+ "message": {...},
563
+ "toolResults": [...]
564
+ }
565
+ ```
566
+
567
+ ### message_start / message_end
568
+
569
+ Emitted when a message begins and completes. The `message` field contains an `AppMessage`.
570
+
571
+ ```json
572
+ {"type": "message_start", "message": {...}}
573
+ {"type": "message_end", "message": {...}}
574
+ ```
575
+
576
+ ### message_update (Streaming)
577
+
578
+ Emitted during streaming of assistant messages. Contains both the partial message and a streaming delta event.
579
+
580
+ ```json
581
+ {
582
+ "type": "message_update",
583
+ "message": {...},
584
+ "assistantMessageEvent": {
585
+ "type": "text_delta",
586
+ "contentIndex": 0,
587
+ "delta": "Hello ",
588
+ "partial": {...}
589
+ }
590
+ }
591
+ ```
592
+
593
+ The `assistantMessageEvent` field contains one of these delta types:
594
+
595
+ | Type | Description |
596
+ |------|-------------|
597
+ | `start` | Message generation started |
598
+ | `text_start` | Text content block started |
599
+ | `text_delta` | Text content chunk |
600
+ | `text_end` | Text content block ended |
601
+ | `thinking_start` | Thinking block started |
602
+ | `thinking_delta` | Thinking content chunk |
603
+ | `thinking_end` | Thinking block ended |
604
+ | `toolcall_start` | Tool call started |
605
+ | `toolcall_delta` | Tool call arguments chunk |
606
+ | `toolcall_end` | Tool call ended (includes full `toolCall` object) |
607
+ | `done` | Message complete (reason: `"stop"`, `"length"`, `"toolUse"`) |
608
+ | `error` | Error occurred (reason: `"aborted"`, `"error"`) |
609
+
610
+ Example streaming a text response:
611
+ ```json
612
+ {"type":"message_update","message":{...},"assistantMessageEvent":{"type":"text_start","contentIndex":0,"partial":{...}}}
613
+ {"type":"message_update","message":{...},"assistantMessageEvent":{"type":"text_delta","contentIndex":0,"delta":"Hello","partial":{...}}}
614
+ {"type":"message_update","message":{...},"assistantMessageEvent":{"type":"text_delta","contentIndex":0,"delta":" world","partial":{...}}}
615
+ {"type":"message_update","message":{...},"assistantMessageEvent":{"type":"text_end","contentIndex":0,"content":"Hello world","partial":{...}}}
616
+ ```
617
+
618
+ ### tool_execution_start / tool_execution_end
619
+
620
+ Emitted when a tool begins and completes execution.
621
+
622
+ ```json
623
+ {
624
+ "type": "tool_execution_start",
625
+ "toolCallId": "call_abc123",
626
+ "toolName": "bash",
627
+ "args": {"command": "ls -la"}
628
+ }
629
+ ```
630
+
631
+ ```json
632
+ {
633
+ "type": "tool_execution_end",
634
+ "toolCallId": "call_abc123",
635
+ "toolName": "bash",
636
+ "result": {
637
+ "content": [{"type": "text", "text": "total 48\n..."}],
638
+ "details": {...}
639
+ },
640
+ "isError": false
641
+ }
642
+ ```
643
+
644
+ Use `toolCallId` to correlate `tool_execution_start` with `tool_execution_end`.
645
+
646
+ ### auto_compaction_start / auto_compaction_end
647
+
648
+ Emitted when automatic compaction runs (when context is nearly full).
649
+
650
+ ```json
651
+ {"type": "auto_compaction_start"}
652
+ ```
653
+
654
+ ```json
655
+ {
656
+ "type": "auto_compaction_end",
657
+ "result": {
658
+ "tokensBefore": 150000,
659
+ "summary": "Summary of conversation..."
660
+ },
661
+ "aborted": false
662
+ }
663
+ ```
664
+
665
+ If compaction was aborted, `result` is `null` and `aborted` is `true`.
666
+
667
+ ## Error Handling
668
+
669
+ Failed commands return a response with `success: false`:
670
+
671
+ ```json
672
+ {
673
+ "type": "response",
674
+ "command": "set_model",
675
+ "success": false,
676
+ "error": "Model not found: invalid/model"
677
+ }
678
+ ```
679
+
680
+ Parse errors:
681
+
682
+ ```json
683
+ {
684
+ "type": "response",
685
+ "command": "parse",
686
+ "success": false,
687
+ "error": "Failed to parse command: Unexpected token..."
688
+ }
689
+ ```
690
+
691
+ ## Types
692
+
693
+ Source files:
694
+ - [`packages/ai/src/types.ts`](../../ai/src/types.ts) - `Model`, `UserMessage`, `AssistantMessage`, `ToolResultMessage`
695
+ - [`packages/agent/src/types.ts`](../../agent/src/types.ts) - `AppMessage`, `Attachment`, `AgentEvent`
696
+ - [`src/core/messages.ts`](../src/core/messages.ts) - `BashExecutionMessage`
697
+ - [`src/modes/rpc/rpc-types.ts`](../src/modes/rpc/rpc-types.ts) - RPC command/response types
698
+
699
+ ### Model
700
+
701
+ ```json
702
+ {
703
+ "id": "claude-sonnet-4-20250514",
704
+ "name": "Claude Sonnet 4",
705
+ "api": "anthropic-messages",
706
+ "provider": "anthropic",
707
+ "baseUrl": "https://api.anthropic.com",
708
+ "reasoning": true,
709
+ "input": ["text", "image"],
710
+ "contextWindow": 200000,
711
+ "maxTokens": 16384,
712
+ "cost": {
713
+ "input": 3.0,
714
+ "output": 15.0,
715
+ "cacheRead": 0.3,
716
+ "cacheWrite": 3.75
717
+ }
718
+ }
719
+ ```
720
+
721
+ ### UserMessage
722
+
723
+ ```json
724
+ {
725
+ "role": "user",
726
+ "content": "Hello!",
727
+ "timestamp": 1733234567890,
728
+ "attachments": []
729
+ }
730
+ ```
731
+
732
+ The `content` field can be a string or an array of `TextContent`/`ImageContent` blocks.
733
+
734
+ ### AssistantMessage
735
+
736
+ ```json
737
+ {
738
+ "role": "assistant",
739
+ "content": [
740
+ {"type": "text", "text": "Hello! How can I help?"},
741
+ {"type": "thinking", "thinking": "User is greeting me..."},
742
+ {"type": "toolCall", "id": "call_123", "name": "bash", "arguments": {"command": "ls"}}
743
+ ],
744
+ "api": "anthropic-messages",
745
+ "provider": "anthropic",
746
+ "model": "claude-sonnet-4-20250514",
747
+ "usage": {
748
+ "input": 100,
749
+ "output": 50,
750
+ "cacheRead": 0,
751
+ "cacheWrite": 0,
752
+ "cost": {"input": 0.0003, "output": 0.00075, "cacheRead": 0, "cacheWrite": 0, "total": 0.00105}
753
+ },
754
+ "stopReason": "stop",
755
+ "timestamp": 1733234567890
756
+ }
757
+ ```
758
+
759
+ Stop reasons: `"stop"`, `"length"`, `"toolUse"`, `"error"`, `"aborted"`
760
+
761
+ ### ToolResultMessage
762
+
763
+ ```json
764
+ {
765
+ "role": "toolResult",
766
+ "toolCallId": "call_123",
767
+ "toolName": "bash",
768
+ "content": [{"type": "text", "text": "total 48\ndrwxr-xr-x ..."}],
769
+ "isError": false,
770
+ "timestamp": 1733234567890
771
+ }
772
+ ```
773
+
774
+ ### BashExecutionMessage
775
+
776
+ Created by the `bash` RPC command (not by LLM tool calls):
777
+
778
+ ```json
779
+ {
780
+ "role": "bashExecution",
781
+ "command": "ls -la",
782
+ "output": "total 48\ndrwxr-xr-x ...",
783
+ "exitCode": 0,
784
+ "cancelled": false,
785
+ "truncated": false,
786
+ "fullOutputPath": null,
787
+ "timestamp": 1733234567890
788
+ }
789
+ ```
790
+
791
+ ### Attachment
792
+
793
+ ```json
794
+ {
795
+ "id": "img1",
796
+ "type": "image",
797
+ "fileName": "photo.jpg",
798
+ "mimeType": "image/jpeg",
799
+ "size": 102400,
800
+ "content": "base64-encoded-data...",
801
+ "extractedText": null,
802
+ "preview": null
803
+ }
804
+ ```
805
+
806
+ ## Example: Basic Client (Python)
807
+
808
+ ```python
809
+ import subprocess
810
+ import json
811
+
812
+ proc = subprocess.Popen(
813
+ ["pi", "--mode", "rpc", "--no-session"],
814
+ stdin=subprocess.PIPE,
815
+ stdout=subprocess.PIPE,
816
+ text=True
817
+ )
818
+
819
+ def send(cmd):
820
+ proc.stdin.write(json.dumps(cmd) + "\n")
821
+ proc.stdin.flush()
822
+
823
+ def read_events():
824
+ for line in proc.stdout:
825
+ yield json.loads(line)
826
+
827
+ # Send prompt
828
+ send({"type": "prompt", "message": "Hello!"})
829
+
830
+ # Process events
831
+ for event in read_events():
832
+ if event.get("type") == "message_update":
833
+ delta = event.get("assistantMessageEvent", {})
834
+ if delta.get("type") == "text_delta":
835
+ print(delta["delta"], end="", flush=True)
836
+
837
+ if event.get("type") == "agent_end":
838
+ print()
839
+ break
840
+ ```
841
+
842
+ ## Example: Interactive Client (Node.js)
843
+
844
+ See [`test/rpc-example.ts`](../test/rpc-example.ts) for a complete interactive example, or [`src/modes/rpc/rpc-client.ts`](../src/modes/rpc/rpc-client.ts) for a typed client implementation.
845
+
846
+ ```javascript
847
+ const { spawn } = require("child_process");
848
+ const readline = require("readline");
849
+
850
+ const agent = spawn("pi", ["--mode", "rpc", "--no-session"]);
851
+
852
+ readline.createInterface({ input: agent.stdout }).on("line", (line) => {
853
+ const event = JSON.parse(line);
854
+
855
+ if (event.type === "message_update") {
856
+ const { assistantMessageEvent } = event;
857
+ if (assistantMessageEvent.type === "text_delta") {
858
+ process.stdout.write(assistantMessageEvent.delta);
859
+ }
860
+ }
861
+ });
862
+
863
+ // Send prompt
864
+ agent.stdin.write(JSON.stringify({ type: "prompt", message: "Hello" }) + "\n");
865
+
866
+ // Abort on Ctrl+C
867
+ process.on("SIGINT", () => {
868
+ agent.stdin.write(JSON.stringify({ type: "abort" }) + "\n");
869
+ });
870
+ ```