juno-code 1.0.46 → 1.0.49

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/README.md +44 -8
  2. package/dist/bin/cli.d.mts +17 -0
  3. package/dist/bin/cli.d.ts +17 -0
  4. package/dist/bin/cli.js +5601 -17505
  5. package/dist/bin/cli.js.map +1 -1
  6. package/dist/bin/cli.mjs +5640 -17542
  7. package/dist/bin/cli.mjs.map +1 -1
  8. package/dist/bin/feedback-collector.d.mts +2 -0
  9. package/dist/bin/feedback-collector.d.ts +2 -0
  10. package/dist/bin/feedback-collector.js.map +1 -1
  11. package/dist/bin/feedback-collector.mjs.map +1 -1
  12. package/dist/index.d.mts +2107 -0
  13. package/dist/index.d.ts +2107 -0
  14. package/dist/index.js +3760 -14728
  15. package/dist/index.js.map +1 -1
  16. package/dist/index.mjs +3761 -14536
  17. package/dist/index.mjs.map +1 -1
  18. package/dist/templates/extensions/pi/juno-skill-preprocessor.ts +239 -0
  19. package/dist/templates/scripts/__pycache__/github.cpython-313.pyc +0 -0
  20. package/dist/templates/scripts/__pycache__/parallel_runner.cpython-313.pyc +0 -0
  21. package/dist/templates/scripts/__pycache__/slack_respond.cpython-313.pyc +0 -0
  22. package/dist/templates/scripts/kanban.sh +18 -4
  23. package/dist/templates/scripts/parallel_runner.sh +2242 -0
  24. package/dist/templates/services/README.md +61 -1
  25. package/dist/templates/services/__pycache__/claude.cpython-313.pyc +0 -0
  26. package/dist/templates/services/__pycache__/codex.cpython-313.pyc +0 -0
  27. package/dist/templates/services/__pycache__/pi.cpython-313.pyc +0 -0
  28. package/dist/templates/services/claude.py +132 -33
  29. package/dist/templates/services/codex.py +179 -66
  30. package/dist/templates/services/gemini.py +117 -27
  31. package/dist/templates/services/pi.py +1753 -0
  32. package/dist/templates/skills/claude/plan-kanban-tasks/SKILL.md +14 -7
  33. package/dist/templates/skills/claude/ralph-loop/SKILL.md +18 -22
  34. package/dist/templates/skills/claude/ralph-loop/references/first_check.md +15 -14
  35. package/dist/templates/skills/claude/ralph-loop/references/implement.md +17 -17
  36. package/dist/templates/skills/claude/ralph-loop/scripts/kanban.sh +18 -4
  37. package/dist/templates/skills/claude/understand-project/SKILL.md +14 -7
  38. package/dist/templates/skills/codex/ralph-loop/SKILL.md +18 -22
  39. package/dist/templates/skills/codex/ralph-loop/references/first_check.md +15 -14
  40. package/dist/templates/skills/codex/ralph-loop/references/implement.md +17 -17
  41. package/dist/templates/skills/codex/ralph-loop/scripts/kanban.sh +18 -4
  42. package/dist/templates/skills/pi/.gitkeep +0 -0
  43. package/dist/templates/skills/pi/plan-kanban-tasks/SKILL.md +32 -0
  44. package/dist/templates/skills/pi/ralph-loop/SKILL.md +39 -0
  45. package/dist/templates/skills/pi/ralph-loop/references/first_check.md +21 -0
  46. package/dist/templates/skills/pi/ralph-loop/references/implement.md +99 -0
  47. package/dist/templates/skills/pi/understand-project/SKILL.md +46 -0
  48. package/package.json +20 -42
  49. package/dist/templates/scripts/__pycache__/attachment_downloader.cpython-38.pyc +0 -0
  50. package/dist/templates/scripts/__pycache__/github.cpython-38.pyc +0 -0
  51. package/dist/templates/scripts/__pycache__/slack_fetch.cpython-38.pyc +0 -0
  52. package/dist/templates/scripts/__pycache__/slack_state.cpython-38.pyc +0 -0
  53. package/dist/templates/services/__pycache__/claude.cpython-38.pyc +0 -0
  54. package/dist/templates/services/__pycache__/codex.cpython-38.pyc +0 -0
@@ -0,0 +1,1753 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Pi Agent Service Script for juno-code
4
+ Headless wrapper around the Pi coding agent CLI with JSON streaming and shorthand model support.
5
+ """
6
+
7
+ import argparse
8
+ import json
9
+ import os
10
+ import subprocess
11
+ import sys
12
+ import threading
13
+ import time
14
+ from datetime import datetime
15
+ from pathlib import Path
16
+ from typing import Dict, List, Optional, Tuple
17
+
18
+
19
+ class PiService:
20
+ """Service wrapper for Pi coding agent headless mode."""
21
+
22
+ DEFAULT_MODEL = "anthropic/claude-sonnet-4-6"
23
+
24
+ # Model shorthands — Pi is multi-provider so shorthands include provider/model format.
25
+ # All colon-prefixed shorthands are expanded before being passed to pi CLI.
26
+ MODEL_SHORTHANDS: Dict[str, str] = {
27
+ # Meta shorthand
28
+ ":pi": "anthropic/claude-sonnet-4-6",
29
+ ":default": "anthropic/claude-sonnet-4-6",
30
+ # Anthropic
31
+ ":sonnet": "anthropic/claude-sonnet-4-6",
32
+ ":opus": "anthropic/claude-opus-4-6",
33
+ ":haiku": "anthropic/claude-haiku-4-5-20251001",
34
+ # OpenAI
35
+ ":gpt-5": "openai/gpt-5",
36
+ ":gpt-4o": "openai/gpt-4o",
37
+ ":o3": "openai/o3",
38
+ ":codex": "openai/gpt-5.3-codex",
39
+ # Google
40
+ ":gemini-pro": "google/gemini-2.5-pro",
41
+ ":gemini-flash": "google/gemini-2.5-flash",
42
+ # Groq
43
+ ":groq": "groq/llama-4-scout-17b-16e-instruct",
44
+ # xAI
45
+ ":grok": "xai/grok-3",
46
+ }
47
+
48
+ # Default stream types to suppress (Pi outputs lifecycle events that are noisy)
49
+ DEFAULT_HIDDEN_STREAM_TYPES = {
50
+ "auto_compaction_start",
51
+ "auto_compaction_end",
52
+ "auto_retry_start",
53
+ "auto_retry_end",
54
+ "session",
55
+ "message_start",
56
+ "message_end",
57
+ "tool_execution_update",
58
+ }
59
+
60
+ # message_update sub-events to suppress (streaming deltas are noisy;
61
+ # completion events like text_end, thinking_end, toolcall_end are kept)
62
+ _PI_HIDDEN_MESSAGE_UPDATE_EVENTS = {
63
+ "text_delta",
64
+ "text_start",
65
+ "thinking_delta",
66
+ "thinking_start",
67
+ "toolcall_delta",
68
+ "toolcall_start",
69
+ }
70
+
71
+ # Prettifier mode constants
72
+ PRETTIFIER_PI = "pi"
73
+ PRETTIFIER_CLAUDE = "claude"
74
+ PRETTIFIER_CODEX = "codex"
75
+ PRETTIFIER_LIVE = "live"
76
+
77
+ def __init__(self):
78
+ self.model_name = self.DEFAULT_MODEL
79
+ self.project_path = os.getcwd()
80
+ self.prompt = ""
81
+ self.verbose = False
82
+ self.last_result_event: Optional[dict] = None
83
+ self.session_id: Optional[str] = None
84
+ self.message_counter = 0
85
+ self.prettifier_mode = self.PRETTIFIER_PI
86
+ # Claude prettifier state
87
+ self.user_message_truncate = int(os.environ.get("CLAUDE_USER_MESSAGE_PRETTY_TRUNCATE", "4"))
88
+ # Codex prettifier state
89
+ self._item_counter = 0
90
+ self._codex_first_assistant_seen = False
91
+ self._codex_tool_result_max_lines = int(os.environ.get("PI_TOOL_RESULT_MAX_LINES", "6"))
92
+ # Keys to hide from intermediate assistant messages in Codex mode
93
+ self._codex_metadata_keys = {"api", "provider", "model", "usage", "stopReason", "timestamp"}
94
+
95
+ def expand_model_shorthand(self, model: str) -> str:
96
+ """Expand shorthand model names (colon-prefixed) to full identifiers."""
97
+ if model.startswith(":"):
98
+ return self.MODEL_SHORTHANDS.get(model, model)
99
+ return model
100
+
101
+ def _detect_prettifier_mode(self, model: str) -> str:
102
+ """Detect which prettifier to use based on the resolved model name.
103
+
104
+ Pi CLI always uses its own event protocol (message, turn_end,
105
+ message_update, agent_end, etc.) regardless of the underlying LLM.
106
+ The exception is Codex models where Pi wraps Codex-format events
107
+ (agent_reasoning, agent_message, exec_command_end).
108
+ Claude models still use Pi's event protocol, NOT Claude CLI events.
109
+ """
110
+ model_lower = model.lower()
111
+ if "codex" in model_lower:
112
+ return self.PRETTIFIER_CODEX
113
+ # All non-Codex models (including Claude) use Pi's native event protocol
114
+ return self.PRETTIFIER_PI
115
+
116
+ def check_pi_installed(self) -> bool:
117
+ """Check if pi CLI is installed and available."""
118
+ try:
119
+ result = subprocess.run(
120
+ ["which", "pi"],
121
+ capture_output=True,
122
+ text=True,
123
+ check=False,
124
+ )
125
+ return result.returncode == 0
126
+ except Exception:
127
+ return False
128
+
129
+ def parse_arguments(self) -> argparse.Namespace:
130
+ """Parse command line arguments for the Pi service."""
131
+ parser = argparse.ArgumentParser(
132
+ description="Pi Agent Service - Wrapper for Pi coding agent headless mode",
133
+ formatter_class=argparse.RawDescriptionHelpFormatter,
134
+ epilog="""
135
+ Examples:
136
+ %(prog)s -p "Review this code" -m :sonnet
137
+ %(prog)s -pp prompt.txt --model openai/gpt-4o
138
+ %(prog)s -p "Refactor module" --thinking high
139
+ %(prog)s -p "Fix bug" --provider anthropic --model claude-sonnet-4-5-20250929
140
+ %(prog)s -p "Audit code" -m :gpt-5 --tools read,bash,edit
141
+
142
+ Model shorthands:
143
+ :pi, :default -> anthropic/claude-sonnet-4-6
144
+ :sonnet -> anthropic/claude-sonnet-4-6
145
+ :opus -> anthropic/claude-opus-4-6
146
+ :haiku -> anthropic/claude-haiku-4-5-20251001
147
+ :gpt-5 -> openai/gpt-5
148
+ :gpt-4o -> openai/gpt-4o
149
+ :o3 -> openai/o3
150
+ :codex -> openai/gpt-5.3-codex
151
+ :gemini-pro -> google/gemini-2.5-pro
152
+ :gemini-flash -> google/gemini-2.5-flash
153
+ :groq -> groq/llama-4-scout-17b-16e-instruct
154
+ :grok -> xai/grok-3
155
+ """,
156
+ )
157
+
158
+ prompt_group = parser.add_mutually_exclusive_group(required=False)
159
+ prompt_group.add_argument("-p", "--prompt", type=str, help="Prompt text to send to Pi")
160
+ prompt_group.add_argument("-pp", "--prompt-file", type=str, help="Path to file containing the prompt")
161
+
162
+ parser.add_argument(
163
+ "--cd",
164
+ type=str,
165
+ default=os.environ.get("PI_PROJECT_PATH", os.getcwd()),
166
+ help="Project path (absolute). Default: current directory (env: PI_PROJECT_PATH)",
167
+ )
168
+
169
+ parser.add_argument(
170
+ "-m",
171
+ "--model",
172
+ type=str,
173
+ default=os.environ.get("PI_MODEL", self.DEFAULT_MODEL),
174
+ help=(
175
+ "Model name. Supports shorthands (:pi, :sonnet, :opus, :gpt-5, :gemini-pro, etc.) "
176
+ f"or provider/model format. Default: {self.DEFAULT_MODEL} (env: PI_MODEL)"
177
+ ),
178
+ )
179
+
180
+ parser.add_argument(
181
+ "--provider",
182
+ type=str,
183
+ default=os.environ.get("PI_PROVIDER", ""),
184
+ help="LLM provider (anthropic, openai, google, etc.). Overrides provider in model string. (env: PI_PROVIDER)",
185
+ )
186
+
187
+ parser.add_argument(
188
+ "--thinking",
189
+ type=str,
190
+ choices=["off", "minimal", "low", "medium", "high", "xhigh"],
191
+ default=os.environ.get("PI_THINKING", None),
192
+ help="Thinking level (off/minimal/low/medium/high/xhigh). (env: PI_THINKING)",
193
+ )
194
+
195
+ parser.add_argument(
196
+ "--tools",
197
+ type=str,
198
+ default=os.environ.get("PI_TOOLS", None),
199
+ help="Comma-separated tool list (read,bash,edit,write,grep,find,ls). (env: PI_TOOLS)",
200
+ )
201
+
202
+ parser.add_argument(
203
+ "--no-tools",
204
+ action="store_true",
205
+ help="Disable all built-in Pi tools.",
206
+ )
207
+
208
+ parser.add_argument(
209
+ "--system-prompt",
210
+ type=str,
211
+ default=os.environ.get("PI_SYSTEM_PROMPT", None),
212
+ help="Replace Pi's system prompt with custom text. (env: PI_SYSTEM_PROMPT)",
213
+ )
214
+
215
+ parser.add_argument(
216
+ "--append-system-prompt",
217
+ type=str,
218
+ default=os.environ.get("PI_APPEND_SYSTEM_PROMPT", None),
219
+ help="Append to Pi's default system prompt. (env: PI_APPEND_SYSTEM_PROMPT)",
220
+ )
221
+
222
+ parser.add_argument(
223
+ "--no-extensions",
224
+ action="store_true",
225
+ help="Disable Pi extensions.",
226
+ )
227
+
228
+ parser.add_argument(
229
+ "--no-skills",
230
+ action="store_true",
231
+ help="Disable Pi skills.",
232
+ )
233
+
234
+ parser.add_argument(
235
+ "--no-session",
236
+ action="store_true",
237
+ default=os.environ.get("PI_NO_SESSION", "false").lower() == "true",
238
+ help="Disable session persistence (ephemeral mode). (env: PI_NO_SESSION)",
239
+ )
240
+
241
+ parser.add_argument(
242
+ "--resume",
243
+ type=str,
244
+ default=None,
245
+ help="Resume a previous session by session ID. Passed to Pi CLI as --session <id>.",
246
+ )
247
+
248
+ parser.add_argument(
249
+ "--auto-instruction",
250
+ type=str,
251
+ default=os.environ.get("PI_AUTO_INSTRUCTION", ""),
252
+ help="Instruction text prepended to the prompt. (env: PI_AUTO_INSTRUCTION)",
253
+ )
254
+
255
+ parser.add_argument(
256
+ "--additional-args",
257
+ type=str,
258
+ default="",
259
+ help="Space-separated additional pi CLI arguments to append.",
260
+ )
261
+
262
+ parser.add_argument(
263
+ "--pretty",
264
+ type=str,
265
+ default=os.environ.get("PI_PRETTY", "true"),
266
+ help="Pretty-print JSON output (true/false). Default: true (env: PI_PRETTY)",
267
+ )
268
+
269
+ parser.add_argument(
270
+ "--verbose",
271
+ action="store_true",
272
+ default=os.environ.get("PI_VERBOSE", "false").lower() == "true",
273
+ help="Verbose mode: print command before execution and enable live stream output with real-time text streaming. (env: PI_VERBOSE)",
274
+ )
275
+
276
+ return parser.parse_args()
277
+
278
+ def read_prompt_file(self, file_path: str) -> str:
279
+ """Read prompt content from a file."""
280
+ try:
281
+ with open(file_path, "r", encoding="utf-8") as f:
282
+ return f.read().strip()
283
+ except FileNotFoundError:
284
+ print(f"Error: Prompt file not found: {file_path}", file=sys.stderr)
285
+ sys.exit(1)
286
+ except Exception as e:
287
+ print(f"Error reading prompt file: {e}", file=sys.stderr)
288
+ sys.exit(1)
289
+
290
+ def build_pi_command(self, args: argparse.Namespace) -> Tuple[List[str], Optional[str]]:
291
+ """Construct the Pi CLI command for headless JSON streaming execution.
292
+
293
+ Returns (cmd, stdin_prompt): cmd is the argument list, stdin_prompt is
294
+ the prompt text to pipe via stdin (or None to pass as positional arg).
295
+ For multiline or large prompts we pipe via stdin so Pi reads it
296
+ naturally without command-line quoting issues.
297
+ """
298
+ cmd = ["pi", "--mode", "json"]
299
+
300
+ # Model: if provider/model format, split and pass separately
301
+ model = self.model_name
302
+ provider = args.provider.strip() if args.provider else ""
303
+
304
+ if "/" in model and not provider:
305
+ # Split provider/model-id format
306
+ parts = model.split("/", 1)
307
+ provider = parts[0]
308
+ model = parts[1]
309
+
310
+ if provider:
311
+ cmd.extend(["--provider", provider])
312
+
313
+ cmd.extend(["--model", model])
314
+
315
+ # Thinking level
316
+ if args.thinking:
317
+ cmd.extend(["--thinking", args.thinking])
318
+
319
+ # Tool control
320
+ if args.no_tools:
321
+ cmd.append("--no-tools")
322
+ elif args.tools:
323
+ cmd.extend(["--tools", args.tools])
324
+
325
+ # System prompt
326
+ if args.system_prompt:
327
+ cmd.extend(["--system-prompt", args.system_prompt])
328
+ elif args.append_system_prompt:
329
+ cmd.extend(["--append-system-prompt", args.append_system_prompt])
330
+
331
+ # Extension/skill control
332
+ if args.no_extensions:
333
+ cmd.append("--no-extensions")
334
+ if args.no_skills:
335
+ cmd.append("--no-skills")
336
+
337
+ # Session control
338
+ if getattr(args, "resume", None):
339
+ cmd.extend(["--session", args.resume])
340
+ elif args.no_session:
341
+ cmd.append("--no-session")
342
+
343
+ # Build prompt with optional auto-instruction
344
+ full_prompt = self.prompt
345
+ if args.auto_instruction:
346
+ full_prompt = f"{args.auto_instruction}\n\n{full_prompt}"
347
+
348
+ # For multiline or large prompts, pipe via stdin to avoid command-line
349
+ # argument issues. Pi CLI reads stdin when isTTY is false and
350
+ # automatically prepends it to messages in print mode.
351
+ # For simple single-line prompts, pass as positional arg + -p flag.
352
+ stdin_prompt: Optional[str] = None
353
+ if "\n" in full_prompt or len(full_prompt) > 4096:
354
+ # Pipe via stdin — Pi auto-enables print mode when stdin has data
355
+ stdin_prompt = full_prompt
356
+ else:
357
+ # Print mode for non-interactive execution + positional arg
358
+ cmd.append("-p")
359
+ cmd.append(full_prompt)
360
+
361
+ # Additional raw arguments
362
+ if args.additional_args:
363
+ extra = args.additional_args.strip().split()
364
+ if extra:
365
+ cmd.extend(extra)
366
+
367
+ return cmd, stdin_prompt
368
+
369
+ # ── Codex prettifier helpers ──────────────────────────────────────────
370
+
371
+ def _first_nonempty_str(self, *values) -> str:
372
+ """Return the first non-empty string value."""
373
+ for val in values:
374
+ if isinstance(val, str) and val != "":
375
+ return val
376
+ return ""
377
+
378
+ def _extract_content_text(self, payload: dict) -> str:
379
+ """Join text-like fields from content arrays (item.* schema)."""
380
+ content = payload.get("content") if isinstance(payload, dict) else None
381
+ parts: List[str] = []
382
+ if isinstance(content, list):
383
+ for entry in content:
384
+ if not isinstance(entry, dict):
385
+ continue
386
+ text_val = (
387
+ entry.get("text")
388
+ or entry.get("message")
389
+ or entry.get("output_text")
390
+ or entry.get("input_text")
391
+ )
392
+ if isinstance(text_val, str) and text_val != "":
393
+ parts.append(text_val)
394
+ return "\n".join(parts) if parts else ""
395
+
396
+ def _extract_command_output_text(self, payload: dict) -> str:
397
+ """Extract aggregated/command output from various item.* layouts."""
398
+ if not isinstance(payload, dict):
399
+ return ""
400
+ result = payload.get("result") if isinstance(payload.get("result"), dict) else None
401
+ content_text = self._extract_content_text(payload)
402
+ return self._first_nonempty_str(
403
+ payload.get("aggregated_output"),
404
+ payload.get("output"),
405
+ payload.get("formatted_output"),
406
+ result.get("aggregated_output") if result else None,
407
+ result.get("output") if result else None,
408
+ result.get("formatted_output") if result else None,
409
+ content_text,
410
+ )
411
+
412
+ def _extract_reasoning_text(self, payload: dict) -> str:
413
+ """Extract reasoning text from legacy and item.* schemas."""
414
+ if not isinstance(payload, dict):
415
+ return ""
416
+ reasoning_obj = payload.get("reasoning") if isinstance(payload.get("reasoning"), dict) else None
417
+ result_obj = payload.get("result") if isinstance(payload.get("result"), dict) else None
418
+ content_text = self._extract_content_text(payload)
419
+ return self._first_nonempty_str(
420
+ payload.get("text"),
421
+ payload.get("reasoning_text"),
422
+ reasoning_obj.get("text") if reasoning_obj else None,
423
+ result_obj.get("text") if result_obj else None,
424
+ content_text,
425
+ )
426
+
427
+ def _extract_message_text_codex(self, payload: dict) -> str:
428
+ """Extract final/assistant message text from item.* schemas."""
429
+ if not isinstance(payload, dict):
430
+ return ""
431
+ result_obj = payload.get("result") if isinstance(payload.get("result"), dict) else None
432
+ content_text = self._extract_content_text(payload)
433
+ return self._first_nonempty_str(
434
+ payload.get("message"),
435
+ payload.get("text"),
436
+ payload.get("final"),
437
+ result_obj.get("message") if result_obj else None,
438
+ result_obj.get("text") if result_obj else None,
439
+ content_text,
440
+ )
441
+
442
+ def _normalize_codex_event(self, obj_dict: dict):
443
+ """Normalize legacy (msg-based) and new item.* schemas into a common tuple."""
444
+ msg = obj_dict.get("msg") if isinstance(obj_dict.get("msg"), dict) else {}
445
+ outer_type = (obj_dict.get("type") or "").strip()
446
+ item = obj_dict.get("item") if isinstance(obj_dict.get("item"), dict) else None
447
+
448
+ msg_type = (msg.get("type") or "").strip() if isinstance(msg, dict) else ""
449
+ payload = msg if isinstance(msg, dict) else {}
450
+
451
+ if not msg_type and item is not None:
452
+ msg_type = (item.get("type") or "").strip() or outer_type
453
+ payload = item
454
+ elif not msg_type:
455
+ msg_type = outer_type
456
+
457
+ return msg_type, payload, outer_type
458
+
459
+ def _normalize_item_id(self, payload: dict, outer_type: str) -> Optional[str]:
460
+ """Prefer existing id on item.* payloads; otherwise synthesize sequential item_{n}."""
461
+ item_id = payload.get("id") if isinstance(payload, dict) else None
462
+ if isinstance(item_id, str) and item_id.strip():
463
+ parsed = self._parse_item_number(item_id)
464
+ if parsed is not None and parsed + 1 > self._item_counter:
465
+ self._item_counter = parsed + 1
466
+ return item_id.strip()
467
+
468
+ if isinstance(outer_type, str) and outer_type.startswith("item."):
469
+ generated = f"item_{self._item_counter}"
470
+ self._item_counter += 1
471
+ return generated
472
+
473
+ return None
474
+
475
+ def _parse_item_number(self, item_id: str) -> Optional[int]:
476
+ """Return numeric component from item_{n} ids or None if unparseable."""
477
+ if not isinstance(item_id, str):
478
+ return None
479
+ item_id = item_id.strip()
480
+ if not item_id.startswith("item_"):
481
+ return None
482
+ try:
483
+ return int(item_id.split("item_", 1)[1])
484
+ except Exception:
485
+ return None
486
+
487
+ def _strip_thinking_signature(self, content_list: list) -> list:
488
+ """Remove thinkingSignature, textSignature, and encrypted_content from content items."""
489
+ if not isinstance(content_list, list):
490
+ return content_list
491
+ for item in content_list:
492
+ if isinstance(item, dict):
493
+ item.pop("thinkingSignature", None)
494
+ item.pop("textSignature", None)
495
+ item.pop("encrypted_content", None)
496
+ return content_list
497
+
498
+ def _sanitize_codex_event(self, obj: dict, strip_metadata: bool = True) -> dict:
499
+ """Deep-sanitize a Codex event: strip thinkingSignature and encrypted_content
500
+ from any nested content arrays, and optionally remove metadata keys from
501
+ nested message dicts.
502
+
503
+ Handles Pi-wrapped events like message_update which nest messages under
504
+ 'partial', 'message', 'assistantMessageEvent', etc.
505
+ """
506
+ if not isinstance(obj, dict):
507
+ return obj
508
+
509
+ # Strip thinkingSignature from top-level content
510
+ if isinstance(obj.get("content"), list):
511
+ self._strip_thinking_signature(obj["content"])
512
+
513
+ # Remove encrypted signatures and encrypted_content anywhere
514
+ obj.pop("encrypted_content", None)
515
+ obj.pop("textSignature", None)
516
+
517
+ # Remove metadata keys from this level
518
+ if strip_metadata:
519
+ for mk in self._codex_metadata_keys:
520
+ obj.pop(mk, None)
521
+
522
+ # Recurse into known nested message containers
523
+ for nested_key in ("partial", "message", "assistantMessageEvent"):
524
+ nested = obj.get(nested_key)
525
+ if isinstance(nested, dict):
526
+ self._sanitize_codex_event(nested, strip_metadata)
527
+
528
+ # Recurse into content arrays to strip encrypted_content from items
529
+ content = obj.get("content")
530
+ if isinstance(content, list):
531
+ for item in content:
532
+ if isinstance(item, dict):
533
+ item.pop("encrypted_content", None)
534
+ item.pop("thinkingSignature", None)
535
+ # If thinkingSignature was a string containing encrypted_content, it's already removed
536
+ # Also recurse deeper if needed
537
+ self._sanitize_codex_event(item, strip_metadata=False)
538
+
539
+ return obj
540
+
541
+ def _truncate_tool_result_text(self, text: str) -> str:
542
+ """Truncate tool result text to max lines, rendering newlines properly."""
543
+ if not isinstance(text, str):
544
+ return text
545
+ # Unescape JSON-escaped newlines for human-readable display
546
+ display_text = text.replace("\\n", "\n").replace("\\t", "\t")
547
+ lines = display_text.split("\n")
548
+ max_lines = self._codex_tool_result_max_lines
549
+ if len(lines) <= max_lines:
550
+ return display_text
551
+ shown = "\n".join(lines[:max_lines])
552
+ remaining_text = "\n".join(lines[max_lines:])
553
+ remaining_chars = len(remaining_text)
554
+ return f"{shown}\n[{remaining_chars} characters remaining]"
555
+
556
+ def _is_codex_final_message(self, parsed: dict) -> bool:
557
+ """Detect if this is the final assistant message (contains type=text content or stopReason=stop)."""
558
+ if not isinstance(parsed, dict):
559
+ return False
560
+ if parsed.get("stopReason") == "stop":
561
+ return True
562
+ content = parsed.get("content")
563
+ if isinstance(content, list):
564
+ for item in content:
565
+ if isinstance(item, dict) and item.get("type") == "text":
566
+ return True
567
+ return False
568
+
569
+ def _format_pi_codex_message(self, parsed: dict) -> Optional[str]:
570
+ """Format a Pi-wrapped Codex message (role-based with content arrays).
571
+
572
+ Handles:
573
+ - Stripping thinkingSignature from thinking content
574
+ - Truncating toolResult text to configured max lines
575
+ - Hiding metadata keys from intermediate assistant messages
576
+ """
577
+ if not isinstance(parsed, dict):
578
+ return None
579
+
580
+ role = parsed.get("role", "")
581
+ now = datetime.now().strftime("%I:%M:%S %p")
582
+ self.message_counter += 1
583
+
584
+ # --- toolResult role: truncate text content ---
585
+ if role == "toolResult":
586
+ header: Dict = {
587
+ "type": "toolResult",
588
+ "datetime": now,
589
+ "counter": f"#{self.message_counter}",
590
+ "toolName": parsed.get("toolName", ""),
591
+ }
592
+ is_error = parsed.get("isError", False)
593
+ if is_error:
594
+ header["isError"] = True
595
+
596
+ content = parsed.get("content")
597
+ if isinstance(content, list):
598
+ for item in content:
599
+ if isinstance(item, dict) and item.get("type") == "text":
600
+ text_val = item.get("text", "")
601
+ truncated = self._truncate_tool_result_text(text_val)
602
+ if "\n" in truncated:
603
+ return json.dumps(header, ensure_ascii=False) + "\ncontent:\n" + truncated
604
+ header["content"] = truncated
605
+ return json.dumps(header, ensure_ascii=False)
606
+
607
+ return json.dumps(header, ensure_ascii=False)
608
+
609
+ # --- assistant role: strip thinkingSignature and manage metadata ---
610
+ if role == "assistant":
611
+ content = parsed.get("content")
612
+ if isinstance(content, list):
613
+ self._strip_thinking_signature(content)
614
+
615
+ is_final = self._is_codex_final_message(parsed)
616
+ is_first = not self._codex_first_assistant_seen
617
+ self._codex_first_assistant_seen = True
618
+
619
+ show_metadata = is_first or is_final
620
+
621
+ # Build display object
622
+ display: Dict = {}
623
+ for key, value in parsed.items():
624
+ if not show_metadata and key in self._codex_metadata_keys:
625
+ continue
626
+ display[key] = value
627
+
628
+ # Add datetime and counter
629
+ display["datetime"] = now
630
+ display["counter"] = f"#{self.message_counter}"
631
+
632
+ # Extract main content for pretty display
633
+ if isinstance(content, list):
634
+ parts = []
635
+ for item in content:
636
+ if isinstance(item, dict):
637
+ if item.get("type") == "thinking":
638
+ thinking_text = item.get("thinking", "")
639
+ if thinking_text:
640
+ parts.append(f"[thinking] {thinking_text}")
641
+ elif item.get("type") == "toolCall":
642
+ name = item.get("name", "")
643
+ args = item.get("arguments", {})
644
+ if isinstance(args, dict):
645
+ cmd = args.get("command", "")
646
+ if cmd:
647
+ parts.append(f"[toolCall] {name}: {cmd}")
648
+ else:
649
+ args_str = json.dumps(args, ensure_ascii=False)
650
+ if len(args_str) > 200:
651
+ args_str = args_str[:200] + "..."
652
+ parts.append(f"[toolCall] {name}: {args_str}")
653
+ else:
654
+ parts.append(f"[toolCall] {name}")
655
+ elif item.get("type") == "text":
656
+ text_val = item.get("text", "")
657
+ if text_val:
658
+ parts.append(text_val)
659
+
660
+ if parts:
661
+ combined = "\n".join(parts)
662
+ header_obj: Dict = {"type": "assistant", "datetime": now, "counter": f"#{self.message_counter}"}
663
+ if show_metadata:
664
+ for mk in ("api", "provider", "model", "stopReason"):
665
+ if mk in parsed:
666
+ header_obj[mk] = parsed[mk]
667
+ if "usage" in parsed and is_final:
668
+ header_obj["usage"] = parsed["usage"]
669
+ if "\n" in combined:
670
+ return json.dumps(header_obj, ensure_ascii=False) + "\ncontent:\n" + combined
671
+ header_obj["content"] = combined
672
+ return json.dumps(header_obj, ensure_ascii=False)
673
+
674
+ # Fallback: dump the filtered display object
675
+ return json.dumps(display, ensure_ascii=False)
676
+
677
+ return None
678
+
679
+ # Event subtypes to suppress in message_update (streaming deltas are noisy)
680
+ _CODEX_HIDDEN_MESSAGE_UPDATE_SUBTYPES = {
681
+ "text_delta", "text_start",
682
+ "thinking_delta", "thinking_start",
683
+ "toolcall_delta", "toolcall_start",
684
+ }
685
+
686
+ def _format_pi_codex_event(self, parsed: dict) -> Optional[str]:
687
+ """Format Pi-wrapped events when in Codex prettifier mode.
688
+
689
+ Handles Pi event types (message_update, turn_end, message_start, etc.)
690
+ that wrap Codex-style content. Returns formatted string, empty string
691
+ to suppress, or None if this method doesn't handle the event type.
692
+ """
693
+ event_type = parsed.get("type", "")
694
+ if not event_type:
695
+ return None
696
+
697
+ now = datetime.now().strftime("%I:%M:%S %p")
698
+
699
+ # --- message_update: filter by assistantMessageEvent subtype ---
700
+ if event_type == "message_update":
701
+ ame = parsed.get("assistantMessageEvent", {})
702
+ if isinstance(ame, dict):
703
+ ame_type = ame.get("type", "")
704
+
705
+ # Suppress noisy streaming delta/start events
706
+ if ame_type in self._CODEX_HIDDEN_MESSAGE_UPDATE_SUBTYPES:
707
+ return "" # suppress
708
+
709
+ # text_end: show the complete text content
710
+ if ame_type == "text_end":
711
+ self.message_counter += 1
712
+ content_text = ame.get("content", "")
713
+ header: Dict = {
714
+ "type": "text_end",
715
+ "datetime": now,
716
+ "counter": f"#{self.message_counter}",
717
+ }
718
+ if isinstance(content_text, str) and content_text.strip():
719
+ if "\n" in content_text:
720
+ return json.dumps(header, ensure_ascii=False) + "\ncontent:\n" + content_text
721
+ header["content"] = content_text
722
+ return json.dumps(header, ensure_ascii=False)
723
+
724
+ # thinking_end: show the final thinking summary
725
+ if ame_type == "thinking_end":
726
+ self.message_counter += 1
727
+ thinking_text = ame.get("content", "")
728
+ header = {
729
+ "type": "thinking_end",
730
+ "datetime": now,
731
+ "counter": f"#{self.message_counter}",
732
+ }
733
+ if isinstance(thinking_text, str) and thinking_text.strip():
734
+ header["thinking"] = thinking_text
735
+ return json.dumps(header, ensure_ascii=False)
736
+
737
+ # toolcall_end: show tool name and arguments
738
+ if ame_type == "toolcall_end":
739
+ self.message_counter += 1
740
+ tool_call = ame.get("toolCall", {})
741
+ header = {
742
+ "type": "toolcall_end",
743
+ "datetime": now,
744
+ "counter": f"#{self.message_counter}",
745
+ }
746
+ if isinstance(tool_call, dict):
747
+ header["tool"] = tool_call.get("name", "")
748
+ args = tool_call.get("arguments", {})
749
+ if isinstance(args, dict):
750
+ cmd = args.get("command", "")
751
+ if cmd:
752
+ header["command"] = cmd
753
+ else:
754
+ args_str = json.dumps(args, ensure_ascii=False)
755
+ if len(args_str) > 200:
756
+ args_str = args_str[:200] + "..."
757
+ header["args"] = args_str if isinstance(args_str, str) else args
758
+ return json.dumps(header, ensure_ascii=False)
759
+
760
+ # Other message_update subtypes: suppress by default
761
+ return ""
762
+
763
+ # --- turn_end: metadata only (text already shown by text_end/thinking_end/toolcall_end) ---
764
+ if event_type == "turn_end":
765
+ self.message_counter += 1
766
+ header = {
767
+ "type": "turn_end",
768
+ "datetime": now,
769
+ "counter": f"#{self.message_counter}",
770
+ }
771
+ tool_results = parsed.get("toolResults")
772
+ if isinstance(tool_results, list):
773
+ header["tool_results_count"] = len(tool_results)
774
+ return json.dumps(header, ensure_ascii=False)
775
+
776
+ # --- message_start: minimal header ---
777
+ if event_type == "message_start":
778
+ self.message_counter += 1
779
+ message = parsed.get("message", {})
780
+ header = {
781
+ "type": "message_start",
782
+ "datetime": now,
783
+ "counter": f"#{self.message_counter}",
784
+ }
785
+ if isinstance(message, dict):
786
+ role = message.get("role")
787
+ if role:
788
+ header["role"] = role
789
+ return json.dumps(header, ensure_ascii=False)
790
+
791
+ # --- message_end: metadata only (text already shown by text_end/thinking_end/toolcall_end) ---
792
+ if event_type == "message_end":
793
+ self.message_counter += 1
794
+ header = {
795
+ "type": "message_end",
796
+ "datetime": now,
797
+ "counter": f"#{self.message_counter}",
798
+ }
799
+ return json.dumps(header, ensure_ascii=False)
800
+
801
+ # --- tool_execution_start ---
802
+ if event_type == "tool_execution_start":
803
+ self.message_counter += 1
804
+ header = {
805
+ "type": "tool_execution_start",
806
+ "datetime": now,
807
+ "counter": f"#{self.message_counter}",
808
+ "tool": parsed.get("toolName", ""),
809
+ }
810
+ args_val = parsed.get("args")
811
+ if isinstance(args_val, dict):
812
+ args_str = json.dumps(args_val, ensure_ascii=False)
813
+ if len(args_str) > 200:
814
+ header["args"] = args_str[:200] + "..."
815
+ else:
816
+ header["args"] = args_val
817
+ return json.dumps(header, ensure_ascii=False)
818
+
819
+ # --- tool_execution_end ---
820
+ if event_type == "tool_execution_end":
821
+ self.message_counter += 1
822
+ header = {
823
+ "type": "tool_execution_end",
824
+ "datetime": now,
825
+ "counter": f"#{self.message_counter}",
826
+ "tool": parsed.get("toolName", ""),
827
+ }
828
+ is_error = parsed.get("isError", False)
829
+ if is_error:
830
+ header["isError"] = True
831
+ result_val = parsed.get("result")
832
+ if isinstance(result_val, dict):
833
+ # Extract text content from result
834
+ result_content = result_val.get("content")
835
+ if isinstance(result_content, list):
836
+ for rc_item in result_content:
837
+ if isinstance(rc_item, dict) and rc_item.get("type") == "text":
838
+ text = rc_item.get("text", "")
839
+ truncated = self._truncate_tool_result_text(text)
840
+ if "\n" in truncated:
841
+ return json.dumps(header, ensure_ascii=False) + "\nresult:\n" + truncated
842
+ header["result"] = truncated
843
+ return json.dumps(header, ensure_ascii=False)
844
+ return json.dumps(header, ensure_ascii=False)
845
+
846
+ # --- agent_start, turn_start: simple headers ---
847
+ if event_type in ("agent_start", "turn_start"):
848
+ self.message_counter += 1
849
+ return json.dumps({
850
+ "type": event_type,
851
+ "datetime": now,
852
+ "counter": f"#{self.message_counter}",
853
+ }, ensure_ascii=False)
854
+
855
+ # --- agent_end: capture and show summary ---
856
+ if event_type == "agent_end":
857
+ self.message_counter += 1
858
+ header = {
859
+ "type": "agent_end",
860
+ "datetime": now,
861
+ "counter": f"#{self.message_counter}",
862
+ }
863
+ messages = parsed.get("messages")
864
+ if isinstance(messages, list):
865
+ header["message_count"] = len(messages)
866
+ return json.dumps(header, ensure_ascii=False)
867
+
868
+ # Not a Pi-wrapped event type we handle
869
+ return None
870
+
871
+ def _format_event_pretty_codex(self, payload: dict) -> Optional[str]:
872
+ """Format a Codex-schema JSON event for human-readable output."""
873
+ try:
874
+ msg_type, msg_payload, outer_type = self._normalize_codex_event(payload)
875
+ item_id = self._normalize_item_id(msg_payload, outer_type)
876
+
877
+ now = datetime.now().strftime("%I:%M:%S %p")
878
+ self.message_counter += 1
879
+ header_type = (outer_type or msg_type).strip()
880
+ base_type = header_type or msg_type or "message"
881
+
882
+ def make_header(type_value: str):
883
+ hdr: Dict = {"type": type_value, "datetime": now}
884
+ if item_id:
885
+ hdr["id"] = item_id
886
+ if outer_type and msg_type and outer_type != msg_type:
887
+ hdr["item_type"] = msg_type
888
+ return hdr
889
+
890
+ header = make_header(base_type)
891
+
892
+ if isinstance(msg_payload, dict):
893
+ if item_id and "id" not in msg_payload:
894
+ msg_payload["id"] = item_id
895
+ if msg_payload.get("command"):
896
+ header["command"] = msg_payload.get("command")
897
+ if msg_payload.get("status"):
898
+ header["status"] = msg_payload.get("status")
899
+ if msg_payload.get("state") and not header.get("status"):
900
+ header["status"] = msg_payload.get("state")
901
+
902
+ # agent_reasoning
903
+ if msg_type in {"agent_reasoning", "reasoning"}:
904
+ content = self._extract_reasoning_text(msg_payload)
905
+ header = make_header(header_type or msg_type)
906
+ if "\n" in content:
907
+ return json.dumps(header, ensure_ascii=False) + "\ntext:\n" + content
908
+ header["text"] = content
909
+ return json.dumps(header, ensure_ascii=False)
910
+
911
+ # agent_message / assistant
912
+ if msg_type in {"agent_message", "message", "assistant_message", "assistant"}:
913
+ content = self._extract_message_text_codex(msg_payload)
914
+ header = make_header(header_type or msg_type)
915
+ if "\n" in content:
916
+ return json.dumps(header, ensure_ascii=False) + "\nmessage:\n" + content
917
+ if content != "":
918
+ header["message"] = content
919
+ return json.dumps(header, ensure_ascii=False)
920
+ if header_type:
921
+ return json.dumps(header, ensure_ascii=False)
922
+
923
+ # exec_command_end
924
+ if msg_type == "exec_command_end":
925
+ formatted_output = msg_payload.get("formatted_output", "") if isinstance(msg_payload, dict) else ""
926
+ header = {"type": msg_type, "datetime": now}
927
+ if "\n" in formatted_output:
928
+ return json.dumps(header, ensure_ascii=False) + "\nformatted_output:\n" + formatted_output
929
+ header["formatted_output"] = formatted_output
930
+ return json.dumps(header, ensure_ascii=False)
931
+
932
+ # command_execution
933
+ if msg_type == "command_execution":
934
+ aggregated_output = self._extract_command_output_text(msg_payload)
935
+ if "\n" in aggregated_output:
936
+ return json.dumps(header, ensure_ascii=False) + "\naggregated_output:\n" + aggregated_output
937
+ if aggregated_output:
938
+ header["aggregated_output"] = aggregated_output
939
+ return json.dumps(header, ensure_ascii=False)
940
+ if header_type:
941
+ return json.dumps(header, ensure_ascii=False)
942
+
943
+ return None
944
+ except Exception:
945
+ return None
946
+
947
+ # ── Claude prettifier ─────────────────────────────────────────────────
948
+
949
+ def _format_event_pretty_claude(self, json_line: str) -> Optional[str]:
950
+ """Format a Claude-schema JSON event for human-readable output."""
951
+ try:
952
+ data = json.loads(json_line) if isinstance(json_line, str) else json_line
953
+ self.message_counter += 1
954
+ now = datetime.now().strftime("%I:%M:%S %p")
955
+
956
+ if data.get("type") == "user":
957
+ message = data.get("message", {})
958
+ content_list = message.get("content", [])
959
+ text_content = ""
960
+ for item in content_list:
961
+ if isinstance(item, dict) and item.get("type") == "text":
962
+ text_content = item.get("text", "")
963
+ break
964
+
965
+ if self.user_message_truncate != -1:
966
+ lines = text_content.split('\n')
967
+ if len(lines) > self.user_message_truncate:
968
+ text_content = '\n'.join(lines[:self.user_message_truncate]) + '\n[Truncated...]'
969
+
970
+ metadata: Dict = {"type": "user", "datetime": now, "counter": f"#{self.message_counter}"}
971
+ if '\n' in text_content:
972
+ return json.dumps(metadata, ensure_ascii=False) + "\ncontent:\n" + text_content
973
+ metadata["content"] = text_content
974
+ return json.dumps(metadata, ensure_ascii=False)
975
+
976
+ elif data.get("type") == "progress":
977
+ progress_data = data.get("data", {})
978
+ progress_type = progress_data.get("type", "")
979
+
980
+ if progress_type == "hook_progress":
981
+ return None
982
+
983
+ if progress_type == "bash_progress":
984
+ output_text = progress_data.get("output", "")
985
+ elapsed_time = progress_data.get("elapsedTimeSeconds", 0)
986
+ total_lines = progress_data.get("totalLines", 0)
987
+ simplified: Dict = {
988
+ "type": "progress", "progress_type": "bash_progress",
989
+ "datetime": now, "counter": f"#{self.message_counter}",
990
+ "elapsed": f"{elapsed_time}s", "lines": total_lines,
991
+ }
992
+ if '\n' in output_text:
993
+ return json.dumps(simplified, ensure_ascii=False) + "\n[Progress] output:\n" + output_text
994
+ simplified["output"] = output_text
995
+ return f"[Progress] {json.dumps(simplified, ensure_ascii=False)}"
996
+
997
+ return json.dumps({
998
+ "type": "progress", "progress_type": progress_type,
999
+ "datetime": now, "counter": f"#{self.message_counter}",
1000
+ "data": progress_data,
1001
+ }, ensure_ascii=False)
1002
+
1003
+ elif data.get("type") == "assistant":
1004
+ message = data.get("message", {})
1005
+ content_list = message.get("content", [])
1006
+ text_content = ""
1007
+ tool_use_data = None
1008
+
1009
+ for item in content_list:
1010
+ if isinstance(item, dict):
1011
+ if item.get("type") == "text":
1012
+ text_content = item.get("text", "")
1013
+ break
1014
+ elif item.get("type") == "tool_use":
1015
+ tool_use_data = {"name": item.get("name", ""), "input": item.get("input", {})}
1016
+ break
1017
+
1018
+ metadata = {"type": "assistant", "datetime": now, "counter": f"#{self.message_counter}"}
1019
+
1020
+ if tool_use_data:
1021
+ tool_input = tool_use_data.get("input", {})
1022
+ prompt_field = tool_input.get("prompt", "")
1023
+ if isinstance(prompt_field, str) and '\n' in prompt_field:
1024
+ tool_use_copy = {
1025
+ "name": tool_use_data.get("name", ""),
1026
+ "input": {k: v for k, v in tool_input.items() if k != "prompt"},
1027
+ }
1028
+ metadata["tool_use"] = tool_use_copy
1029
+ return json.dumps(metadata, ensure_ascii=False) + "\nprompt:\n" + prompt_field
1030
+ metadata["tool_use"] = tool_use_data
1031
+ return json.dumps(metadata, ensure_ascii=False)
1032
+ else:
1033
+ if '\n' in text_content:
1034
+ return json.dumps(metadata, ensure_ascii=False) + "\ncontent:\n" + text_content
1035
+ metadata["content"] = text_content
1036
+ return json.dumps(metadata, ensure_ascii=False)
1037
+
1038
+ else:
1039
+ message = data.get("message", {})
1040
+ content_list = message.get("content", [])
1041
+ if content_list and isinstance(content_list, list) and len(content_list) > 0:
1042
+ nested_item = content_list[0]
1043
+ if isinstance(nested_item, dict) and nested_item.get("type") in ["tool_result"]:
1044
+ flattened: Dict = {"datetime": now, "counter": f"#{self.message_counter}"}
1045
+ if "tool_use_id" in nested_item:
1046
+ flattened["tool_use_id"] = nested_item["tool_use_id"]
1047
+ flattened["type"] = nested_item["type"]
1048
+ nested_content = nested_item.get("content", "")
1049
+ if isinstance(nested_content, str) and '\n' in nested_content:
1050
+ return json.dumps(flattened, ensure_ascii=False) + "\ncontent:\n" + nested_content
1051
+ flattened["content"] = nested_content
1052
+ return json.dumps(flattened, ensure_ascii=False)
1053
+
1054
+ output: Dict = {"datetime": now, "counter": f"#{self.message_counter}", **data}
1055
+ if "result" in output and isinstance(output["result"], str) and '\n' in output["result"]:
1056
+ result_value = output.pop("result")
1057
+ return json.dumps(output, ensure_ascii=False) + "\nresult:\n" + result_value
1058
+ return json.dumps(output, ensure_ascii=False)
1059
+
1060
+ except json.JSONDecodeError:
1061
+ return json_line if isinstance(json_line, str) else None
1062
+ except Exception:
1063
+ return json_line if isinstance(json_line, str) else None
1064
+
1065
+ # ── Pi prettifier helpers ─────────────────────────────────────────────
1066
+
1067
+ def _extract_text_from_message(self, message: dict) -> str:
1068
+ """Extract human-readable text from a Pi message object."""
1069
+ if not isinstance(message, dict):
1070
+ return ""
1071
+
1072
+ # Direct text/content fields
1073
+ for field in ("text", "content", "message", "response", "output"):
1074
+ val = message.get(field)
1075
+ if isinstance(val, str) and val.strip():
1076
+ return val
1077
+
1078
+ # content array (Claude-style)
1079
+ content = message.get("content")
1080
+ if isinstance(content, list):
1081
+ parts = []
1082
+ for item in content:
1083
+ if isinstance(item, dict):
1084
+ text = item.get("text") or item.get("content")
1085
+ if isinstance(text, str) and text.strip():
1086
+ parts.append(text)
1087
+ elif isinstance(item, str) and item.strip():
1088
+ parts.append(item)
1089
+ if parts:
1090
+ return "\n".join(parts)
1091
+
1092
+ return ""
1093
+
1094
+ def _format_event_pretty(self, payload: dict) -> Optional[str]:
1095
+ """
1096
+ Format a Pi JSON streaming event for human-readable output.
1097
+ Returns formatted string or None to skip the event.
1098
+ """
1099
+ try:
1100
+ event_type = payload.get("type", "")
1101
+ now = datetime.now().strftime("%I:%M:%S %p")
1102
+ self.message_counter += 1
1103
+
1104
+ header: Dict = {
1105
+ "type": event_type,
1106
+ "datetime": now,
1107
+ "counter": f"#{self.message_counter}",
1108
+ }
1109
+
1110
+ # --- Session header ---
1111
+ if event_type == "session":
1112
+ header["version"] = payload.get("version")
1113
+ header["id"] = payload.get("id")
1114
+ return json.dumps(header, ensure_ascii=False)
1115
+
1116
+ # --- Agent lifecycle events ---
1117
+ if event_type in ("agent_start", "turn_start"):
1118
+ return json.dumps(header, ensure_ascii=False)
1119
+
1120
+ if event_type == "agent_end":
1121
+ messages = payload.get("messages")
1122
+ if isinstance(messages, list):
1123
+ header["message_count"] = len(messages)
1124
+ return json.dumps(header, ensure_ascii=False)
1125
+
1126
+ if event_type == "turn_end":
1127
+ tool_results = payload.get("toolResults")
1128
+ if isinstance(tool_results, list):
1129
+ header["tool_results_count"] = len(tool_results)
1130
+ # Skip message text - already displayed by text_end/thinking_end/toolcall_end
1131
+ return json.dumps(header, ensure_ascii=False)
1132
+
1133
+ # --- Message events (assistant streaming) ---
1134
+ if event_type == "message_start":
1135
+ message = payload.get("message", {})
1136
+ role = message.get("role") if isinstance(message, dict) else None
1137
+ if role:
1138
+ header["role"] = role
1139
+ return json.dumps(header, ensure_ascii=False)
1140
+
1141
+ if event_type == "message_update":
1142
+ # Check for noisy streaming sub-events and suppress them
1143
+ ame = payload.get("assistantMessageEvent", {})
1144
+ ame_type = ame.get("type", "") if isinstance(ame, dict) else ""
1145
+ event_subtype = payload.get("event", ame_type)
1146
+ if event_subtype in self._PI_HIDDEN_MESSAGE_UPDATE_EVENTS:
1147
+ return None # Suppress noisy streaming deltas
1148
+
1149
+ message = payload.get("message", {})
1150
+ text = self._extract_text_from_message(message) if isinstance(message, dict) else ""
1151
+
1152
+ # Also check assistantMessageEvent for completion text
1153
+ if isinstance(ame, dict):
1154
+ if ame_type:
1155
+ header["event"] = ame_type
1156
+ delta_text = ame.get("text") or ame.get("delta") or ""
1157
+ if isinstance(delta_text, str) and delta_text.strip():
1158
+ if not text:
1159
+ text = delta_text
1160
+
1161
+ if text and "\n" in text:
1162
+ return json.dumps(header, ensure_ascii=False) + "\ncontent:\n" + text
1163
+ elif text:
1164
+ header["content"] = text
1165
+ return json.dumps(header, ensure_ascii=False)
1166
+
1167
+ if event_type == "message_end":
1168
+ # Skip message text - already displayed by text_end/thinking_end/toolcall_end
1169
+ return json.dumps(header, ensure_ascii=False)
1170
+
1171
+ # --- Tool execution events ---
1172
+ if event_type == "tool_execution_start":
1173
+ header["tool"] = payload.get("toolName", "")
1174
+ tool_call_id = payload.get("toolCallId")
1175
+ if tool_call_id:
1176
+ header["id"] = tool_call_id
1177
+ args_val = payload.get("args")
1178
+ if isinstance(args_val, dict):
1179
+ # Show abbreviated args inline
1180
+ args_str = json.dumps(args_val, ensure_ascii=False)
1181
+ if len(args_str) > 200:
1182
+ # Truncate for readability
1183
+ header["args"] = args_str[:200] + "..."
1184
+ else:
1185
+ header["args"] = args_val
1186
+ elif isinstance(args_val, str) and args_val.strip():
1187
+ if "\n" in args_val:
1188
+ return json.dumps(header, ensure_ascii=False) + "\nargs:\n" + args_val
1189
+ header["args"] = args_val
1190
+ return json.dumps(header, ensure_ascii=False)
1191
+
1192
+ if event_type == "tool_execution_update":
1193
+ header["tool"] = payload.get("toolName", "")
1194
+ tool_call_id = payload.get("toolCallId")
1195
+ if tool_call_id:
1196
+ header["id"] = tool_call_id
1197
+ partial = payload.get("partialResult")
1198
+ if isinstance(partial, str) and partial.strip():
1199
+ if "\n" in partial:
1200
+ return json.dumps(header, ensure_ascii=False) + "\npartialResult:\n" + partial
1201
+ header["partialResult"] = partial
1202
+ return json.dumps(header, ensure_ascii=False)
1203
+
1204
+ if event_type == "tool_execution_end":
1205
+ header["tool"] = payload.get("toolName", "")
1206
+ tool_call_id = payload.get("toolCallId")
1207
+ if tool_call_id:
1208
+ header["id"] = tool_call_id
1209
+ is_error = payload.get("isError", False)
1210
+ if is_error:
1211
+ header["isError"] = True
1212
+ result_val = payload.get("result")
1213
+ if isinstance(result_val, str) and result_val.strip():
1214
+ if "\n" in result_val:
1215
+ return json.dumps(header, ensure_ascii=False) + "\nresult:\n" + result_val
1216
+ header["result"] = result_val
1217
+ elif isinstance(result_val, (dict, list)):
1218
+ result_str = json.dumps(result_val, ensure_ascii=False)
1219
+ if "\n" in result_str or len(result_str) > 200:
1220
+ return json.dumps(header, ensure_ascii=False) + "\nresult:\n" + result_str
1221
+ header["result"] = result_val
1222
+ return json.dumps(header, ensure_ascii=False)
1223
+
1224
+ # --- Retry/compaction events ---
1225
+ if event_type == "auto_retry_start":
1226
+ header["attempt"] = payload.get("attempt")
1227
+ header["maxAttempts"] = payload.get("maxAttempts")
1228
+ header["delayMs"] = payload.get("delayMs")
1229
+ error_msg = payload.get("errorMessage", "")
1230
+ if error_msg:
1231
+ header["error"] = error_msg
1232
+ return json.dumps(header, ensure_ascii=False)
1233
+
1234
+ if event_type == "auto_retry_end":
1235
+ header["success"] = payload.get("success")
1236
+ header["attempt"] = payload.get("attempt")
1237
+ final_err = payload.get("finalError")
1238
+ if final_err:
1239
+ header["finalError"] = final_err
1240
+ return json.dumps(header, ensure_ascii=False)
1241
+
1242
+ # --- Fallback: emit raw with datetime ---
1243
+ header.update({k: v for k, v in payload.items() if k not in ("type",)})
1244
+ return json.dumps(header, ensure_ascii=False)
1245
+
1246
+ except Exception:
1247
+ return json.dumps(payload, ensure_ascii=False)
1248
+
1249
+ # ── Live stream prettifier ─────────────────────────────────────────────
1250
+
1251
+ def _format_event_live(self, parsed: dict) -> Optional[str]:
1252
+ """Format Pi events for live streaming mode.
1253
+
1254
+ Returns:
1255
+ str ending with \\n: a complete line to print
1256
+ str NOT ending with \\n: a delta to append (streaming text)
1257
+ "": suppress this event
1258
+ None: use raw JSON fallback
1259
+ """
1260
+ event_type = parsed.get("type", "")
1261
+ now = datetime.now().strftime("%I:%M:%S %p")
1262
+
1263
+ if event_type == "message_update":
1264
+ ame = parsed.get("assistantMessageEvent", {})
1265
+ ame_type = ame.get("type", "") if isinstance(ame, dict) else ""
1266
+
1267
+ # Stream text deltas directly (no JSON, no newline)
1268
+ if ame_type == "text_delta":
1269
+ delta = ame.get("delta", "")
1270
+ if isinstance(delta, str) and delta:
1271
+ return delta # raw text, no newline
1272
+ return ""
1273
+
1274
+ if ame_type == "thinking_delta":
1275
+ delta = ame.get("delta", "")
1276
+ if isinstance(delta, str) and delta:
1277
+ return delta
1278
+ return ""
1279
+
1280
+ # Section start markers
1281
+ if ame_type == "text_start":
1282
+ return json.dumps({"type": "text_start", "datetime": now}) + "\n"
1283
+
1284
+ if ame_type == "thinking_start":
1285
+ return json.dumps({"type": "thinking_start", "datetime": now}) + "\n"
1286
+
1287
+ # Section end markers (text was already streamed)
1288
+ if ame_type == "text_end":
1289
+ return "\n" + json.dumps({"type": "text_end", "datetime": now}) + "\n"
1290
+
1291
+ if ame_type == "thinking_end":
1292
+ return "\n" + json.dumps({"type": "thinking_end", "datetime": now}) + "\n"
1293
+
1294
+ # Tool call end: show tool info
1295
+ if ame_type == "toolcall_end":
1296
+ tc = ame.get("toolCall", {})
1297
+ header = {"type": "toolcall_end", "datetime": now}
1298
+ if isinstance(tc, dict):
1299
+ header["tool"] = tc.get("name", "")
1300
+ args = tc.get("arguments", {})
1301
+ if isinstance(args, dict):
1302
+ cmd = args.get("command", "")
1303
+ if cmd:
1304
+ header["command"] = cmd
1305
+ else:
1306
+ args_str = json.dumps(args, ensure_ascii=False)
1307
+ header["args"] = args_str[:200] + "..." if len(args_str) > 200 else args
1308
+ return json.dumps(header, ensure_ascii=False) + "\n"
1309
+
1310
+ # Suppress all other message_update subtypes (toolcall_start, toolcall_delta, etc.)
1311
+ return ""
1312
+
1313
+ # Suppress redundant events
1314
+ if event_type in ("message_start", "message_end"):
1315
+ return ""
1316
+
1317
+ # tool_execution_start
1318
+ if event_type == "tool_execution_start":
1319
+ header = {
1320
+ "type": "tool_execution_start",
1321
+ "datetime": now,
1322
+ "tool": parsed.get("toolName", ""),
1323
+ }
1324
+ args_val = parsed.get("args")
1325
+ if isinstance(args_val, dict):
1326
+ args_str = json.dumps(args_val, ensure_ascii=False)
1327
+ if len(args_str) > 200:
1328
+ header["args"] = args_str[:200] + "..."
1329
+ else:
1330
+ header["args"] = args_val
1331
+ return json.dumps(header, ensure_ascii=False) + "\n"
1332
+
1333
+ # tool_execution_end
1334
+ if event_type == "tool_execution_end":
1335
+ header = {
1336
+ "type": "tool_execution_end",
1337
+ "datetime": now,
1338
+ "tool": parsed.get("toolName", ""),
1339
+ }
1340
+ is_error = parsed.get("isError", False)
1341
+ if is_error:
1342
+ header["isError"] = True
1343
+ result_val = parsed.get("result")
1344
+ if isinstance(result_val, str) and result_val.strip():
1345
+ truncated = self._truncate_tool_result_text(result_val)
1346
+ if "\n" in truncated:
1347
+ return json.dumps(header, ensure_ascii=False) + "\nresult:\n" + truncated + "\n"
1348
+ header["result"] = truncated
1349
+ elif isinstance(result_val, dict):
1350
+ result_content = result_val.get("content")
1351
+ if isinstance(result_content, list):
1352
+ for rc_item in result_content:
1353
+ if isinstance(rc_item, dict) and rc_item.get("type") == "text":
1354
+ text = rc_item.get("text", "")
1355
+ truncated = self._truncate_tool_result_text(text)
1356
+ if "\n" in truncated:
1357
+ return json.dumps(header, ensure_ascii=False) + "\nresult:\n" + truncated + "\n"
1358
+ header["result"] = truncated
1359
+ break
1360
+ return json.dumps(header, ensure_ascii=False) + "\n"
1361
+
1362
+ # turn_end: metadata only
1363
+ if event_type == "turn_end":
1364
+ header = {"type": "turn_end", "datetime": now}
1365
+ tool_results = parsed.get("toolResults")
1366
+ if isinstance(tool_results, list):
1367
+ header["tool_results_count"] = len(tool_results)
1368
+ return json.dumps(header, ensure_ascii=False) + "\n"
1369
+
1370
+ # agent_start, turn_start
1371
+ if event_type in ("agent_start", "turn_start"):
1372
+ return json.dumps({"type": event_type, "datetime": now}) + "\n"
1373
+
1374
+ # agent_end
1375
+ if event_type == "agent_end":
1376
+ header = {"type": "agent_end", "datetime": now}
1377
+ messages = parsed.get("messages")
1378
+ if isinstance(messages, list):
1379
+ header["message_count"] = len(messages)
1380
+ return json.dumps(header, ensure_ascii=False) + "\n"
1381
+
1382
+ # Fallback: not handled
1383
+ return None
1384
+
1385
+ def _build_hide_types(self) -> set:
1386
+ """Build the set of event types to suppress from output."""
1387
+ hide_types = set(self.DEFAULT_HIDDEN_STREAM_TYPES)
1388
+ for env_name in ("PI_HIDE_STREAM_TYPES", "JUNO_CODE_HIDE_STREAM_TYPES"):
1389
+ env_val = os.environ.get(env_name, "")
1390
+ if env_val:
1391
+ parts = [p.strip() for p in env_val.split(",") if p.strip()]
1392
+ hide_types.update(parts)
1393
+ return hide_types
1394
+
1395
+ @staticmethod
1396
+ def _sanitize_sub_agent_response(event: dict) -> dict:
1397
+ """Strip bulky fields (messages, type) from sub_agent_response to reduce token usage."""
1398
+ return {k: v for k, v in event.items() if k not in ("messages", "type")}
1399
+
1400
+ def _write_capture_file(self, capture_path: Optional[str]) -> None:
1401
+ """Write final result event to capture file for shell backend."""
1402
+ if not capture_path or not self.last_result_event:
1403
+ return
1404
+ try:
1405
+ Path(capture_path).write_text(
1406
+ json.dumps(self.last_result_event, ensure_ascii=False),
1407
+ encoding="utf-8",
1408
+ )
1409
+ except Exception as e:
1410
+ print(f"Warning: Could not write capture file: {e}", file=sys.stderr)
1411
+
1412
+ def run_pi(self, cmd: List[str], args: argparse.Namespace,
1413
+ stdin_prompt: Optional[str] = None) -> int:
1414
+ """Execute the Pi CLI and stream/format its JSON output.
1415
+
1416
+ Args:
1417
+ cmd: Command argument list from build_pi_command.
1418
+ args: Parsed argparse namespace.
1419
+ stdin_prompt: If set, pipe this text via stdin to the Pi CLI
1420
+ (used for multiline/large prompts).
1421
+ """
1422
+ verbose = args.verbose
1423
+ pretty = args.pretty.lower() != "false"
1424
+ capture_path = os.environ.get("JUNO_SUBAGENT_CAPTURE_PATH")
1425
+ hide_types = self._build_hide_types()
1426
+
1427
+ if verbose:
1428
+ # Truncate prompt in display to avoid confusing multi-line output
1429
+ display_cmd = list(cmd)
1430
+ if stdin_prompt:
1431
+ first_line = stdin_prompt.split("\n")[0][:60]
1432
+ display_cmd.append(f'[stdin: "{first_line}..." ({len(stdin_prompt)} chars)]')
1433
+ else:
1434
+ filtered = []
1435
+ skip_next = False
1436
+ for i, part in enumerate(cmd):
1437
+ if skip_next:
1438
+ skip_next = False
1439
+ continue
1440
+ if part == "-p" and i + 1 < len(cmd):
1441
+ prompt_val = cmd[i + 1]
1442
+ if len(prompt_val) > 80 or "\n" in prompt_val:
1443
+ first_line = prompt_val.split("\n")[0][:60]
1444
+ filtered.append(f'-p "{first_line}..." ({len(prompt_val)} chars)')
1445
+ else:
1446
+ filtered.append(f"-p {prompt_val}")
1447
+ skip_next = True
1448
+ else:
1449
+ filtered.append(part)
1450
+ display_cmd = filtered
1451
+ # Only show Executing once: skip when running under juno-code shell backend
1452
+ # (shell backend already logs the command in debug mode)
1453
+ if not capture_path:
1454
+ print(f"Executing: {' '.join(display_cmd)}", file=sys.stderr)
1455
+ print("-" * 80, file=sys.stderr)
1456
+
1457
+ try:
1458
+ process = subprocess.Popen(
1459
+ cmd,
1460
+ stdin=subprocess.PIPE if stdin_prompt else subprocess.DEVNULL,
1461
+ stdout=subprocess.PIPE,
1462
+ stderr=subprocess.PIPE,
1463
+ text=True,
1464
+ bufsize=1,
1465
+ universal_newlines=True,
1466
+ cwd=self.project_path,
1467
+ )
1468
+
1469
+ # Pipe the prompt via stdin if using stdin mode (multiline/large prompts).
1470
+ # Pi CLI reads stdin when isTTY is false and prepends it to messages.
1471
+ if stdin_prompt and process.stdin:
1472
+ try:
1473
+ process.stdin.write(stdin_prompt)
1474
+ process.stdin.close()
1475
+ except BrokenPipeError:
1476
+ pass # Process may have exited early
1477
+
1478
+ # Watchdog thread: handles stdout pipe blocking after process exit.
1479
+ wait_timeout = int(os.environ.get("PI_WAIT_TIMEOUT", "30"))
1480
+ output_done = threading.Event()
1481
+
1482
+ def _stdout_watchdog():
1483
+ """Terminate process and close stdout pipe if it hangs after output."""
1484
+ while not output_done.is_set():
1485
+ if process.poll() is not None:
1486
+ break
1487
+ output_done.wait(timeout=1)
1488
+
1489
+ if output_done.is_set() and process.poll() is None:
1490
+ try:
1491
+ process.wait(timeout=wait_timeout)
1492
+ except subprocess.TimeoutExpired:
1493
+ print(
1494
+ f"Warning: Pi process did not exit within {wait_timeout}s after output. Terminating.",
1495
+ file=sys.stderr,
1496
+ )
1497
+ process.terminate()
1498
+ try:
1499
+ process.wait(timeout=5)
1500
+ except subprocess.TimeoutExpired:
1501
+ print("Warning: Pi process did not respond to SIGTERM. Killing.", file=sys.stderr)
1502
+ process.kill()
1503
+ try:
1504
+ process.wait(timeout=5)
1505
+ except subprocess.TimeoutExpired:
1506
+ pass
1507
+
1508
+ time.sleep(2)
1509
+ try:
1510
+ if process.stdout and not process.stdout.closed:
1511
+ process.stdout.close()
1512
+ except Exception:
1513
+ pass
1514
+
1515
+ watchdog = threading.Thread(target=_stdout_watchdog, daemon=True)
1516
+ watchdog.start()
1517
+
1518
+ # Stream stderr in a separate thread so Pi diagnostic output is visible
1519
+ def _stderr_reader():
1520
+ """Read stderr and forward to our stderr for visibility."""
1521
+ try:
1522
+ if process.stderr:
1523
+ for stderr_line in process.stderr:
1524
+ print(stderr_line, end="", file=sys.stderr, flush=True)
1525
+ except (ValueError, OSError):
1526
+ pass
1527
+
1528
+ stderr_thread = threading.Thread(target=_stderr_reader, daemon=True)
1529
+ stderr_thread.start()
1530
+
1531
+ if process.stdout:
1532
+ try:
1533
+ for raw_line in process.stdout:
1534
+ line = raw_line.rstrip("\n\r")
1535
+ if not line.strip():
1536
+ continue
1537
+
1538
+ # Try to parse as JSON
1539
+ try:
1540
+ parsed = json.loads(line)
1541
+ except json.JSONDecodeError:
1542
+ # Non-JSON output — print as-is
1543
+ print(line, flush=True)
1544
+ continue
1545
+
1546
+ event_type = parsed.get("type", "")
1547
+
1548
+ # Capture session ID from the session event (sent at stream start)
1549
+ if event_type == "session":
1550
+ self.session_id = parsed.get("id")
1551
+
1552
+ # Capture result event for shell backend
1553
+ if event_type == "agent_end":
1554
+ # agent_end has a 'messages' array; extract final assistant text
1555
+ messages = parsed.get("messages", [])
1556
+ text = ""
1557
+ if isinstance(messages, list):
1558
+ # Walk messages in reverse to find last assistant message with text
1559
+ for m in reversed(messages):
1560
+ if isinstance(m, dict) and m.get("role") == "assistant":
1561
+ text = self._extract_text_from_message(m)
1562
+ if text:
1563
+ break
1564
+ if text:
1565
+ self.last_result_event = {
1566
+ "type": "result",
1567
+ "subtype": "success",
1568
+ "is_error": False,
1569
+ "result": text,
1570
+ "session_id": self.session_id,
1571
+ "sub_agent_response": self._sanitize_sub_agent_response(parsed),
1572
+ }
1573
+ else:
1574
+ self.last_result_event = parsed
1575
+ elif event_type == "message":
1576
+ # OpenAI-compatible format: capture last assistant message
1577
+ msg = parsed.get("message", {})
1578
+ if isinstance(msg, dict) and msg.get("role") == "assistant":
1579
+ text = self._extract_text_from_message(msg)
1580
+ if text:
1581
+ self.last_result_event = {
1582
+ "type": "result",
1583
+ "subtype": "success",
1584
+ "is_error": False,
1585
+ "result": text,
1586
+ "session_id": self.session_id,
1587
+ "sub_agent_response": self._sanitize_sub_agent_response(parsed),
1588
+ }
1589
+ elif event_type == "turn_end":
1590
+ # turn_end may contain the final assistant message
1591
+ msg = parsed.get("message", {})
1592
+ if isinstance(msg, dict):
1593
+ text = self._extract_text_from_message(msg)
1594
+ if text:
1595
+ self.last_result_event = {
1596
+ "type": "result",
1597
+ "subtype": "success",
1598
+ "is_error": False,
1599
+ "result": text,
1600
+ "session_id": self.session_id,
1601
+ "sub_agent_response": self._sanitize_sub_agent_response(parsed),
1602
+ }
1603
+
1604
+ # Filter hidden stream types (live mode handles its own filtering)
1605
+ if event_type in hide_types and self.prettifier_mode != self.PRETTIFIER_LIVE:
1606
+ continue
1607
+
1608
+ # Live stream mode: stream deltas in real-time
1609
+ if self.prettifier_mode == self.PRETTIFIER_LIVE:
1610
+ if event_type in hide_types:
1611
+ # In live mode, still suppress session/compaction/retry events
1612
+ # but NOT message_start/message_end (handled by _format_event_live)
1613
+ if event_type not in ("message_start", "message_end"):
1614
+ continue
1615
+ formatted = self._format_event_live(parsed)
1616
+ if formatted is not None:
1617
+ if formatted == "":
1618
+ continue
1619
+ sys.stdout.write(formatted)
1620
+ sys.stdout.flush()
1621
+ else:
1622
+ # Fallback: print raw JSON for unhandled event types
1623
+ print(json.dumps(parsed, ensure_ascii=False), flush=True)
1624
+ continue
1625
+
1626
+ # Format and print using model-appropriate prettifier
1627
+ if pretty:
1628
+ if self.prettifier_mode == self.PRETTIFIER_CODEX:
1629
+ # Try Pi-wrapped Codex format first (role-based messages)
1630
+ if "role" in parsed:
1631
+ formatted = self._format_pi_codex_message(parsed)
1632
+ else:
1633
+ # Try Pi event handler (message_update, turn_end, etc.)
1634
+ formatted = self._format_pi_codex_event(parsed)
1635
+ if formatted is not None:
1636
+ # Empty string means "suppress this event"
1637
+ if formatted == "":
1638
+ continue
1639
+ else:
1640
+ # Try native Codex event handler
1641
+ formatted = self._format_event_pretty_codex(parsed)
1642
+ if formatted is None:
1643
+ # Sanitize before raw JSON fallback: strip thinkingSignature,
1644
+ # encrypted_content, and metadata from nested Codex events.
1645
+ self._sanitize_codex_event(parsed, strip_metadata=True)
1646
+ formatted = json.dumps(parsed, ensure_ascii=False)
1647
+ elif self.prettifier_mode == self.PRETTIFIER_CLAUDE:
1648
+ formatted = self._format_event_pretty_claude(parsed)
1649
+ else:
1650
+ formatted = self._format_event_pretty(parsed)
1651
+ if formatted is not None:
1652
+ print(formatted, flush=True)
1653
+ else:
1654
+ print(line, flush=True)
1655
+
1656
+ except ValueError:
1657
+ # Watchdog closed stdout — expected when process exits but pipe stays open.
1658
+ pass
1659
+
1660
+ # Signal watchdog that output loop is done
1661
+ output_done.set()
1662
+
1663
+ # Write capture file for shell backend
1664
+ self._write_capture_file(capture_path)
1665
+
1666
+ # Wait for process cleanup
1667
+ try:
1668
+ process.wait(timeout=5)
1669
+ except subprocess.TimeoutExpired:
1670
+ pass
1671
+
1672
+ # Wait for stderr thread to finish
1673
+ stderr_thread.join(timeout=3)
1674
+
1675
+ return process.returncode or 0
1676
+
1677
+ except KeyboardInterrupt:
1678
+ print("\nInterrupted by user", file=sys.stderr)
1679
+ try:
1680
+ process.terminate()
1681
+ try:
1682
+ process.wait(timeout=5)
1683
+ except subprocess.TimeoutExpired:
1684
+ process.kill()
1685
+ process.wait(timeout=5)
1686
+ except Exception:
1687
+ pass
1688
+ self._write_capture_file(capture_path)
1689
+ return 130
1690
+
1691
+ except Exception as e:
1692
+ print(f"Error executing pi: {e}", file=sys.stderr)
1693
+ try:
1694
+ if process.poll() is None:
1695
+ process.terminate()
1696
+ process.wait(timeout=5)
1697
+ except Exception:
1698
+ pass
1699
+ self._write_capture_file(capture_path)
1700
+ return 1
1701
+
1702
+ def run(self) -> int:
1703
+ """Main execution flow."""
1704
+ args = self.parse_arguments()
1705
+
1706
+ # Prompt handling
1707
+ prompt_value = args.prompt or os.environ.get("JUNO_INSTRUCTION")
1708
+ if not prompt_value and not args.prompt_file:
1709
+ print("Error: Either -p/--prompt or -pp/--prompt-file is required.", file=sys.stderr)
1710
+ print("\nRun 'pi.py --help' for usage information.", file=sys.stderr)
1711
+ return 1
1712
+
1713
+ if not self.check_pi_installed():
1714
+ print(
1715
+ "Error: Pi CLI is not available. Please install it:\n"
1716
+ " npm install -g @mariozechner/pi-coding-agent\n"
1717
+ "See: https://pi.dev/",
1718
+ file=sys.stderr,
1719
+ )
1720
+ return 1
1721
+
1722
+ self.project_path = os.path.abspath(args.cd)
1723
+ if not os.path.isdir(self.project_path):
1724
+ print(f"Error: Project path does not exist: {self.project_path}", file=sys.stderr)
1725
+ return 1
1726
+
1727
+ self.model_name = self.expand_model_shorthand(args.model)
1728
+ self.prettifier_mode = self._detect_prettifier_mode(self.model_name)
1729
+ self.verbose = args.verbose
1730
+
1731
+ # Verbose mode enables live stream prettifier for real-time output
1732
+ if args.verbose:
1733
+ self.prettifier_mode = self.PRETTIFIER_LIVE
1734
+
1735
+ if self.verbose:
1736
+ print(f"Prettifier mode: {self.prettifier_mode} (model: {self.model_name})", file=sys.stderr)
1737
+
1738
+ if args.prompt_file:
1739
+ self.prompt = self.read_prompt_file(args.prompt_file)
1740
+ else:
1741
+ self.prompt = prompt_value
1742
+
1743
+ cmd, stdin_prompt = self.build_pi_command(args)
1744
+ return self.run_pi(cmd, args, stdin_prompt=stdin_prompt)
1745
+
1746
+
1747
+ def main():
1748
+ service = PiService()
1749
+ sys.exit(service.run())
1750
+
1751
+
1752
+ if __name__ == "__main__":
1753
+ main()