juno-code 1.0.47 → 1.0.50
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +455 -205
- package/dist/bin/cli.d.mts +17 -0
- package/dist/bin/cli.d.ts +17 -0
- package/dist/bin/cli.js +6456 -17604
- package/dist/bin/cli.js.map +1 -1
- package/dist/bin/cli.mjs +6443 -17589
- package/dist/bin/cli.mjs.map +1 -1
- package/dist/bin/feedback-collector.d.mts +2 -0
- package/dist/bin/feedback-collector.d.ts +2 -0
- package/dist/bin/feedback-collector.js.map +1 -1
- package/dist/bin/feedback-collector.mjs.map +1 -1
- package/dist/index.d.mts +2133 -0
- package/dist/index.d.ts +2133 -0
- package/dist/index.js +3916 -14711
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +3914 -14516
- package/dist/index.mjs.map +1 -1
- package/dist/templates/extensions/pi/juno-skill-preprocessor.ts +239 -0
- package/dist/templates/scripts/__pycache__/github.cpython-313.pyc +0 -0
- package/dist/templates/scripts/__pycache__/parallel_runner.cpython-313.pyc +0 -0
- package/dist/templates/scripts/__pycache__/slack_respond.cpython-313.pyc +0 -0
- package/dist/templates/scripts/install_requirements.sh +41 -3
- package/dist/templates/scripts/kanban.sh +22 -4
- package/dist/templates/scripts/parallel_runner.sh +2242 -0
- package/dist/templates/services/README.md +61 -1
- package/dist/templates/services/__pycache__/claude.cpython-313.pyc +0 -0
- package/dist/templates/services/__pycache__/codex.cpython-313.pyc +0 -0
- package/dist/templates/services/__pycache__/pi.cpython-313.pyc +0 -0
- package/dist/templates/services/claude.py +132 -33
- package/dist/templates/services/codex.py +179 -66
- package/dist/templates/services/gemini.py +117 -27
- package/dist/templates/services/pi.py +2796 -0
- package/dist/templates/skills/claude/kanban-workflow/SKILL.md +138 -0
- package/dist/templates/skills/claude/plan-kanban-tasks/SKILL.md +15 -8
- package/dist/templates/skills/claude/ralph-loop/SKILL.md +18 -22
- package/dist/templates/skills/claude/ralph-loop/references/first_check.md +15 -14
- package/dist/templates/skills/claude/ralph-loop/references/implement.md +17 -17
- package/dist/templates/skills/claude/ralph-loop/scripts/kanban.sh +22 -4
- package/dist/templates/skills/claude/understand-project/SKILL.md +15 -8
- package/dist/templates/skills/codex/kanban-workflow/SKILL.md +139 -0
- package/dist/templates/skills/codex/plan-kanban-tasks/SKILL.md +32 -0
- package/dist/templates/skills/codex/ralph-loop/SKILL.md +18 -22
- package/dist/templates/skills/codex/ralph-loop/references/first_check.md +15 -14
- package/dist/templates/skills/codex/ralph-loop/references/implement.md +17 -17
- package/dist/templates/skills/codex/ralph-loop/scripts/kanban.sh +22 -4
- package/dist/templates/skills/codex/understand-project/SKILL.md +46 -0
- package/dist/templates/skills/pi/.gitkeep +0 -0
- package/dist/templates/skills/pi/kanban-workflow/SKILL.md +139 -0
- package/dist/templates/skills/pi/plan-kanban-tasks/SKILL.md +32 -0
- package/dist/templates/skills/pi/ralph-loop/SKILL.md +43 -0
- package/dist/templates/skills/pi/ralph-loop/references/first_check.md +21 -0
- package/dist/templates/skills/pi/ralph-loop/references/implement.md +99 -0
- package/dist/templates/skills/pi/understand-project/SKILL.md +46 -0
- package/package.json +26 -46
- package/dist/templates/scripts/__pycache__/attachment_downloader.cpython-38.pyc +0 -0
- package/dist/templates/scripts/__pycache__/github.cpython-38.pyc +0 -0
- package/dist/templates/scripts/__pycache__/slack_fetch.cpython-38.pyc +0 -0
- package/dist/templates/scripts/__pycache__/slack_state.cpython-38.pyc +0 -0
- package/dist/templates/services/__pycache__/claude.cpython-38.pyc +0 -0
- package/dist/templates/services/__pycache__/codex.cpython-38.pyc +0 -0
|
@@ -0,0 +1,2796 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Pi Agent Service Script for juno-code
|
|
4
|
+
Headless wrapper around the Pi coding agent CLI with JSON streaming and shorthand model support.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import argparse
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
import re
|
|
11
|
+
import subprocess
|
|
12
|
+
import sys
|
|
13
|
+
import threading
|
|
14
|
+
import time
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Dict, List, Optional, Set, Tuple
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class PiService:
|
|
21
|
+
"""Service wrapper for Pi coding agent headless mode."""
|
|
22
|
+
|
|
23
|
+
DEFAULT_MODEL = "anthropic/claude-sonnet-4-6"
|
|
24
|
+
|
|
25
|
+
# Model shorthands — Pi is multi-provider so shorthands include provider/model format.
|
|
26
|
+
# All colon-prefixed shorthands are expanded before being passed to pi CLI.
|
|
27
|
+
MODEL_SHORTHANDS: Dict[str, str] = {
|
|
28
|
+
# Meta shorthand
|
|
29
|
+
":pi": "anthropic/claude-sonnet-4-6",
|
|
30
|
+
":default": "anthropic/claude-sonnet-4-6",
|
|
31
|
+
# Anthropic
|
|
32
|
+
":sonnet": "anthropic/claude-sonnet-4-6",
|
|
33
|
+
":opus": "anthropic/claude-opus-4-6",
|
|
34
|
+
":haiku": "anthropic/claude-haiku-4-5-20251001",
|
|
35
|
+
# OpenAI
|
|
36
|
+
":gpt-5": "openai/gpt-5",
|
|
37
|
+
":gpt-4o": "openai/gpt-4o",
|
|
38
|
+
":o3": "openai/o3",
|
|
39
|
+
":codex": "openai-codex/gpt-5.3-codex",
|
|
40
|
+
":api-codex": "openai/gpt-5.3-codex",
|
|
41
|
+
# Google
|
|
42
|
+
":gemini-pro": "google/gemini-2.5-pro",
|
|
43
|
+
":gemini-flash": "google/gemini-2.5-flash",
|
|
44
|
+
# Groq
|
|
45
|
+
":groq": "groq/llama-4-scout-17b-16e-instruct",
|
|
46
|
+
# xAI
|
|
47
|
+
":grok": "xai/grok-3",
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
# Default stream types to suppress (Pi outputs lifecycle events that are noisy)
|
|
51
|
+
DEFAULT_HIDDEN_STREAM_TYPES = {
|
|
52
|
+
"auto_compaction_start",
|
|
53
|
+
"auto_compaction_end",
|
|
54
|
+
"auto_retry_start",
|
|
55
|
+
"auto_retry_end",
|
|
56
|
+
"session",
|
|
57
|
+
"message_start",
|
|
58
|
+
"message_end",
|
|
59
|
+
"tool_execution_update",
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
# message_update sub-events to suppress (streaming deltas are noisy;
|
|
63
|
+
# completion events like text_end, thinking_end, toolcall_end are kept)
|
|
64
|
+
_PI_HIDDEN_MESSAGE_UPDATE_EVENTS = {
|
|
65
|
+
"text_delta",
|
|
66
|
+
"text_start",
|
|
67
|
+
"thinking_delta",
|
|
68
|
+
"thinking_start",
|
|
69
|
+
"toolcall_delta",
|
|
70
|
+
"toolcall_start",
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
# Prettifier mode constants
|
|
74
|
+
PRETTIFIER_PI = "pi"
|
|
75
|
+
PRETTIFIER_CLAUDE = "claude"
|
|
76
|
+
PRETTIFIER_CODEX = "codex"
|
|
77
|
+
PRETTIFIER_LIVE = "live"
|
|
78
|
+
|
|
79
|
+
# ANSI colors for tool prettifier output.
|
|
80
|
+
# - command/args blocks are green for readability
|
|
81
|
+
# - error results are red
|
|
82
|
+
ANSI_GREEN = "\x1b[38;5;40m"
|
|
83
|
+
ANSI_RED = "\x1b[38;5;203m"
|
|
84
|
+
ANSI_RESET = "\x1b[0m"
|
|
85
|
+
|
|
86
|
+
# Keep tool args readable while preventing giant inline payloads.
|
|
87
|
+
TOOL_ARG_STRING_MAX_CHARS = 400
|
|
88
|
+
_ANSI_ESCAPE_RE = re.compile(r"\x1b\[[0-?]*[ -/]*[@-~]")
|
|
89
|
+
|
|
90
|
+
def __init__(self):
|
|
91
|
+
self.model_name = self.DEFAULT_MODEL
|
|
92
|
+
self.project_path = os.getcwd()
|
|
93
|
+
self.prompt = ""
|
|
94
|
+
self.verbose = False
|
|
95
|
+
self.last_result_event: Optional[dict] = None
|
|
96
|
+
self.session_id: Optional[str] = None
|
|
97
|
+
self.message_counter = 0
|
|
98
|
+
self.prettifier_mode = self.PRETTIFIER_PI
|
|
99
|
+
# Tool call grouping: buffer toolcall_end until tool_execution_end arrives
|
|
100
|
+
self._pending_tool_calls: Dict[str, dict] = {} # toolCallId -> {tool, args/command}
|
|
101
|
+
# Buffer tool_execution_start data for fallback + timing (when toolcall_end arrives late)
|
|
102
|
+
self._pending_exec_starts: Dict[str, dict] = {} # toolCallId -> {tool, args/command, started_at}
|
|
103
|
+
# Track whether we're inside a tool execution
|
|
104
|
+
self._in_tool_execution: bool = False
|
|
105
|
+
# Buffer raw non-JSON tool stdout so it doesn't interleave with structured events
|
|
106
|
+
self._buffered_tool_stdout_lines: List[str] = []
|
|
107
|
+
# Per-run usage/cost accumulation (used for result + agent_end total cost visibility)
|
|
108
|
+
self._run_usage_totals: Optional[dict] = None
|
|
109
|
+
self._run_total_cost_usd: Optional[float] = None
|
|
110
|
+
self._run_seen_usage_keys: Set[str] = set()
|
|
111
|
+
# Claude prettifier state
|
|
112
|
+
self.user_message_truncate = int(os.environ.get("CLAUDE_USER_MESSAGE_PRETTY_TRUNCATE", "4"))
|
|
113
|
+
# Codex prettifier state
|
|
114
|
+
self._item_counter = 0
|
|
115
|
+
self._codex_first_assistant_seen = False
|
|
116
|
+
self._codex_tool_result_max_lines = int(os.environ.get("PI_TOOL_RESULT_MAX_LINES", "6"))
|
|
117
|
+
# Keys to hide from intermediate assistant messages in Codex mode
|
|
118
|
+
self._codex_metadata_keys = {"api", "provider", "model", "usage", "stopReason", "timestamp"}
|
|
119
|
+
|
|
120
|
+
def _color_enabled(self) -> bool:
|
|
121
|
+
"""Check if ANSI color output is appropriate (TTY + NO_COLOR not set)."""
|
|
122
|
+
if os.environ.get("NO_COLOR") is not None:
|
|
123
|
+
return False
|
|
124
|
+
return hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
|
|
125
|
+
|
|
126
|
+
def _colorize_lines(self, text: str, color_code: str) -> str:
|
|
127
|
+
"""Apply ANSI coloring per line so line-based renderers keep colors stable."""
|
|
128
|
+
if "\n" not in text:
|
|
129
|
+
return f"{color_code}{text}{self.ANSI_RESET}"
|
|
130
|
+
return "\n".join(f"{color_code}{line}{self.ANSI_RESET}" for line in text.split("\n"))
|
|
131
|
+
|
|
132
|
+
def _colorize_result(self, text: str, is_error: bool = False) -> str:
|
|
133
|
+
"""Colorize tool output only for errors; success stays terminal-default."""
|
|
134
|
+
if not self._color_enabled():
|
|
135
|
+
return text
|
|
136
|
+
if not is_error:
|
|
137
|
+
return text
|
|
138
|
+
return self._colorize_lines(text, self.ANSI_RED)
|
|
139
|
+
|
|
140
|
+
def _colorize_command(self, text: str) -> str:
|
|
141
|
+
"""Colorize tool command/args blocks in green when ANSI color is enabled."""
|
|
142
|
+
if not self._color_enabled():
|
|
143
|
+
return text
|
|
144
|
+
return self._colorize_lines(text, self.ANSI_GREEN)
|
|
145
|
+
|
|
146
|
+
def _normalize_multiline_tool_text(self, text: str) -> str:
|
|
147
|
+
"""Render escaped newline sequences as real newlines for tool command/args blocks."""
|
|
148
|
+
if "\n" in text:
|
|
149
|
+
return text
|
|
150
|
+
if "\\n" in text:
|
|
151
|
+
return text.replace("\\n", "\n")
|
|
152
|
+
return text
|
|
153
|
+
|
|
154
|
+
def _format_tool_invocation_header(self, header: Dict) -> str:
|
|
155
|
+
"""Serialize a tool header and render multiline command/args as separate readable blocks."""
|
|
156
|
+
metadata = dict(header)
|
|
157
|
+
block_label: Optional[str] = None
|
|
158
|
+
block_text: Optional[str] = None
|
|
159
|
+
|
|
160
|
+
command_val = metadata.get("command")
|
|
161
|
+
if isinstance(command_val, str) and command_val.strip():
|
|
162
|
+
command_text = self._normalize_multiline_tool_text(command_val)
|
|
163
|
+
if "\n" in command_text:
|
|
164
|
+
metadata.pop("command", None)
|
|
165
|
+
block_label = "command:"
|
|
166
|
+
block_text = self._colorize_command(command_text)
|
|
167
|
+
|
|
168
|
+
if block_text is None:
|
|
169
|
+
args_val = metadata.get("args")
|
|
170
|
+
if isinstance(args_val, str) and args_val.strip():
|
|
171
|
+
args_text = self._normalize_multiline_tool_text(args_val)
|
|
172
|
+
if "\n" in args_text:
|
|
173
|
+
metadata.pop("args", None)
|
|
174
|
+
block_label = "args:"
|
|
175
|
+
block_text = self._colorize_command(args_text)
|
|
176
|
+
|
|
177
|
+
output = json.dumps(metadata, ensure_ascii=False)
|
|
178
|
+
if block_text is None:
|
|
179
|
+
return output
|
|
180
|
+
return output + "\n" + block_label + "\n" + block_text
|
|
181
|
+
|
|
182
|
+
def _strip_ansi_sequences(self, text: str) -> str:
|
|
183
|
+
"""Remove ANSI escape sequences to prevent color bleed in prettified output."""
|
|
184
|
+
if not isinstance(text, str) or "\x1b" not in text:
|
|
185
|
+
return text
|
|
186
|
+
return self._ANSI_ESCAPE_RE.sub("", text)
|
|
187
|
+
|
|
188
|
+
def _sanitize_tool_argument_value(self, value):
|
|
189
|
+
"""Recursively sanitize tool args while preserving JSON structure."""
|
|
190
|
+
if isinstance(value, str):
|
|
191
|
+
clean = self._strip_ansi_sequences(value)
|
|
192
|
+
if len(clean) > self.TOOL_ARG_STRING_MAX_CHARS:
|
|
193
|
+
return clean[:self.TOOL_ARG_STRING_MAX_CHARS] + "..."
|
|
194
|
+
return clean
|
|
195
|
+
if isinstance(value, dict):
|
|
196
|
+
return {k: self._sanitize_tool_argument_value(v) for k, v in value.items()}
|
|
197
|
+
if isinstance(value, list):
|
|
198
|
+
return [self._sanitize_tool_argument_value(v) for v in value]
|
|
199
|
+
return value
|
|
200
|
+
|
|
201
|
+
def _format_execution_time(self, payload: dict, pending: Optional[dict] = None) -> Optional[str]:
|
|
202
|
+
"""Return execution time string (e.g. 0.12s) from payload or measured start time."""
|
|
203
|
+
seconds: Optional[float] = None
|
|
204
|
+
|
|
205
|
+
# Prefer explicit durations if Pi adds them in future versions.
|
|
206
|
+
for key in ("executionTimeSeconds", "durationSeconds", "elapsedSeconds"):
|
|
207
|
+
value = payload.get(key)
|
|
208
|
+
if isinstance(value, (int, float)):
|
|
209
|
+
seconds = float(value)
|
|
210
|
+
break
|
|
211
|
+
|
|
212
|
+
if seconds is None:
|
|
213
|
+
for key in ("executionTimeMs", "durationMs", "elapsedMs"):
|
|
214
|
+
value = payload.get(key)
|
|
215
|
+
if isinstance(value, (int, float)):
|
|
216
|
+
seconds = float(value) / 1000.0
|
|
217
|
+
break
|
|
218
|
+
|
|
219
|
+
if seconds is None and isinstance(pending, dict):
|
|
220
|
+
started_at = pending.get("started_at")
|
|
221
|
+
if isinstance(started_at, (int, float)):
|
|
222
|
+
seconds = max(0.0, time.perf_counter() - started_at)
|
|
223
|
+
|
|
224
|
+
if seconds is None:
|
|
225
|
+
return None
|
|
226
|
+
return f"{seconds:.2f}s"
|
|
227
|
+
|
|
228
|
+
def expand_model_shorthand(self, model: str) -> str:
|
|
229
|
+
"""Expand shorthand model names (colon-prefixed) to full identifiers."""
|
|
230
|
+
if model.startswith(":"):
|
|
231
|
+
return self.MODEL_SHORTHANDS.get(model, model)
|
|
232
|
+
return model
|
|
233
|
+
|
|
234
|
+
def _detect_prettifier_mode(self, model: str) -> str:
|
|
235
|
+
"""Detect which prettifier to use based on the resolved model name.
|
|
236
|
+
|
|
237
|
+
Pi CLI always uses its own event protocol (message, turn_end,
|
|
238
|
+
message_update, agent_end, etc.) regardless of the underlying LLM.
|
|
239
|
+
Codex models also use Pi's event protocol but may additionally emit
|
|
240
|
+
native Codex events (agent_reasoning, agent_message, exec_command_end).
|
|
241
|
+
The LIVE prettifier handles both Pi-native and Codex-native events,
|
|
242
|
+
giving real-time streaming output for all model types.
|
|
243
|
+
Claude models still use Pi's event protocol, NOT Claude CLI events.
|
|
244
|
+
"""
|
|
245
|
+
model_lower = model.lower()
|
|
246
|
+
if "codex" in model_lower:
|
|
247
|
+
return self.PRETTIFIER_LIVE
|
|
248
|
+
# All non-Codex models (including Claude) use Pi's native event protocol
|
|
249
|
+
return self.PRETTIFIER_PI
|
|
250
|
+
|
|
251
|
+
def check_pi_installed(self) -> bool:
|
|
252
|
+
"""Check if pi CLI is installed and available."""
|
|
253
|
+
try:
|
|
254
|
+
result = subprocess.run(
|
|
255
|
+
["which", "pi"],
|
|
256
|
+
capture_output=True,
|
|
257
|
+
text=True,
|
|
258
|
+
check=False,
|
|
259
|
+
)
|
|
260
|
+
return result.returncode == 0
|
|
261
|
+
except Exception:
|
|
262
|
+
return False
|
|
263
|
+
|
|
264
|
+
def parse_arguments(self) -> argparse.Namespace:
|
|
265
|
+
"""Parse command line arguments for the Pi service."""
|
|
266
|
+
parser = argparse.ArgumentParser(
|
|
267
|
+
description="Pi Agent Service - Wrapper for Pi coding agent headless mode",
|
|
268
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
269
|
+
epilog="""
|
|
270
|
+
Examples:
|
|
271
|
+
%(prog)s -p "Review this code" -m :sonnet
|
|
272
|
+
%(prog)s -pp prompt.txt --model openai/gpt-4o
|
|
273
|
+
%(prog)s -p "Refactor module" --thinking high
|
|
274
|
+
%(prog)s -p "Fix bug" --provider anthropic --model claude-sonnet-4-5-20250929
|
|
275
|
+
%(prog)s -p "Audit code" -m :gpt-5 --tools read,bash,edit
|
|
276
|
+
|
|
277
|
+
Model shorthands:
|
|
278
|
+
:pi, :default -> anthropic/claude-sonnet-4-6
|
|
279
|
+
:sonnet -> anthropic/claude-sonnet-4-6
|
|
280
|
+
:opus -> anthropic/claude-opus-4-6
|
|
281
|
+
:haiku -> anthropic/claude-haiku-4-5-20251001
|
|
282
|
+
:gpt-5 -> openai/gpt-5
|
|
283
|
+
:gpt-4o -> openai/gpt-4o
|
|
284
|
+
:o3 -> openai/o3
|
|
285
|
+
:codex -> openai-codex/gpt-5.3-codex
|
|
286
|
+
:api-codex -> openai/gpt-5.3-codex
|
|
287
|
+
:gemini-pro -> google/gemini-2.5-pro
|
|
288
|
+
:gemini-flash -> google/gemini-2.5-flash
|
|
289
|
+
:groq -> groq/llama-4-scout-17b-16e-instruct
|
|
290
|
+
:grok -> xai/grok-3
|
|
291
|
+
""",
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
prompt_group = parser.add_mutually_exclusive_group(required=False)
|
|
295
|
+
prompt_group.add_argument("-p", "--prompt", type=str, help="Prompt text to send to Pi")
|
|
296
|
+
prompt_group.add_argument("-pp", "--prompt-file", type=str, help="Path to file containing the prompt")
|
|
297
|
+
|
|
298
|
+
parser.add_argument(
|
|
299
|
+
"--cd",
|
|
300
|
+
type=str,
|
|
301
|
+
default=os.environ.get("PI_PROJECT_PATH", os.getcwd()),
|
|
302
|
+
help="Project path (absolute). Default: current directory (env: PI_PROJECT_PATH)",
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
parser.add_argument(
|
|
306
|
+
"-m",
|
|
307
|
+
"--model",
|
|
308
|
+
type=str,
|
|
309
|
+
default=os.environ.get("PI_MODEL", self.DEFAULT_MODEL),
|
|
310
|
+
help=(
|
|
311
|
+
"Model name. Supports shorthands (:pi, :sonnet, :opus, :gpt-5, :gemini-pro, etc.) "
|
|
312
|
+
f"or provider/model format. Default: {self.DEFAULT_MODEL} (env: PI_MODEL)"
|
|
313
|
+
),
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
parser.add_argument(
|
|
317
|
+
"--provider",
|
|
318
|
+
type=str,
|
|
319
|
+
default=os.environ.get("PI_PROVIDER", ""),
|
|
320
|
+
help="LLM provider (anthropic, openai, google, etc.). Overrides provider in model string. (env: PI_PROVIDER)",
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
parser.add_argument(
|
|
324
|
+
"--thinking",
|
|
325
|
+
type=str,
|
|
326
|
+
choices=["off", "minimal", "low", "medium", "high", "xhigh"],
|
|
327
|
+
default=os.environ.get("PI_THINKING", None),
|
|
328
|
+
help="Thinking level (off/minimal/low/medium/high/xhigh). (env: PI_THINKING)",
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
parser.add_argument(
|
|
332
|
+
"--tools",
|
|
333
|
+
type=str,
|
|
334
|
+
default=os.environ.get("PI_TOOLS", None),
|
|
335
|
+
help="Comma-separated tool list (read,bash,edit,write,grep,find,ls). (env: PI_TOOLS)",
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
parser.add_argument(
|
|
339
|
+
"--no-tools",
|
|
340
|
+
action="store_true",
|
|
341
|
+
help="Disable all built-in Pi tools.",
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
parser.add_argument(
|
|
345
|
+
"--system-prompt",
|
|
346
|
+
type=str,
|
|
347
|
+
default=os.environ.get("PI_SYSTEM_PROMPT", None),
|
|
348
|
+
help="Replace Pi's system prompt with custom text. (env: PI_SYSTEM_PROMPT)",
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
parser.add_argument(
|
|
352
|
+
"--append-system-prompt",
|
|
353
|
+
type=str,
|
|
354
|
+
default=os.environ.get("PI_APPEND_SYSTEM_PROMPT", None),
|
|
355
|
+
help="Append to Pi's default system prompt. (env: PI_APPEND_SYSTEM_PROMPT)",
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
parser.add_argument(
|
|
359
|
+
"--no-extensions",
|
|
360
|
+
action="store_true",
|
|
361
|
+
help="Disable Pi extensions.",
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
parser.add_argument(
|
|
365
|
+
"--no-skills",
|
|
366
|
+
action="store_true",
|
|
367
|
+
help="Disable Pi skills.",
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
parser.add_argument(
|
|
371
|
+
"--no-session",
|
|
372
|
+
action="store_true",
|
|
373
|
+
default=os.environ.get("PI_NO_SESSION", "false").lower() == "true",
|
|
374
|
+
help="Disable session persistence (ephemeral mode). (env: PI_NO_SESSION)",
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
parser.add_argument(
|
|
378
|
+
"--resume",
|
|
379
|
+
type=str,
|
|
380
|
+
default=None,
|
|
381
|
+
help="Resume a previous session by session ID. Passed to Pi CLI as --session <id>.",
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
parser.add_argument(
|
|
385
|
+
"--auto-instruction",
|
|
386
|
+
type=str,
|
|
387
|
+
default=os.environ.get("PI_AUTO_INSTRUCTION", ""),
|
|
388
|
+
help="Instruction text prepended to the prompt. (env: PI_AUTO_INSTRUCTION)",
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
parser.add_argument(
|
|
392
|
+
"--additional-args",
|
|
393
|
+
type=str,
|
|
394
|
+
default="",
|
|
395
|
+
help="Space-separated additional pi CLI arguments to append.",
|
|
396
|
+
)
|
|
397
|
+
|
|
398
|
+
parser.add_argument(
|
|
399
|
+
"--pretty",
|
|
400
|
+
type=str,
|
|
401
|
+
default=os.environ.get("PI_PRETTY", "true"),
|
|
402
|
+
help="Pretty-print JSON output (true/false). Default: true (env: PI_PRETTY)",
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
parser.add_argument(
|
|
406
|
+
"--verbose",
|
|
407
|
+
action="store_true",
|
|
408
|
+
default=os.environ.get("PI_VERBOSE", "false").lower() == "true",
|
|
409
|
+
help="Verbose mode: print command before execution and enable live stream output with real-time text streaming. (env: PI_VERBOSE)",
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
return parser.parse_args()
|
|
413
|
+
|
|
414
|
+
def read_prompt_file(self, file_path: str) -> str:
|
|
415
|
+
"""Read prompt content from a file."""
|
|
416
|
+
try:
|
|
417
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
|
418
|
+
return f.read().strip()
|
|
419
|
+
except FileNotFoundError:
|
|
420
|
+
print(f"Error: Prompt file not found: {file_path}", file=sys.stderr)
|
|
421
|
+
sys.exit(1)
|
|
422
|
+
except Exception as e:
|
|
423
|
+
print(f"Error reading prompt file: {e}", file=sys.stderr)
|
|
424
|
+
sys.exit(1)
|
|
425
|
+
|
|
426
|
+
def build_pi_command(self, args: argparse.Namespace) -> Tuple[List[str], Optional[str]]:
|
|
427
|
+
"""Construct the Pi CLI command for headless JSON streaming execution.
|
|
428
|
+
|
|
429
|
+
Returns (cmd, stdin_prompt): cmd is the argument list, stdin_prompt is
|
|
430
|
+
the prompt text to pipe via stdin (or None to pass as positional arg).
|
|
431
|
+
For multiline or large prompts we pipe via stdin so Pi reads it
|
|
432
|
+
naturally without command-line quoting issues.
|
|
433
|
+
"""
|
|
434
|
+
cmd = ["pi", "--mode", "json"]
|
|
435
|
+
|
|
436
|
+
# Model: if provider/model format, split and pass separately
|
|
437
|
+
model = self.model_name
|
|
438
|
+
provider = args.provider.strip() if args.provider else ""
|
|
439
|
+
|
|
440
|
+
if "/" in model and not provider:
|
|
441
|
+
# Split provider/model-id format
|
|
442
|
+
parts = model.split("/", 1)
|
|
443
|
+
provider = parts[0]
|
|
444
|
+
model = parts[1]
|
|
445
|
+
|
|
446
|
+
if provider:
|
|
447
|
+
cmd.extend(["--provider", provider])
|
|
448
|
+
|
|
449
|
+
cmd.extend(["--model", model])
|
|
450
|
+
|
|
451
|
+
# Thinking level
|
|
452
|
+
if args.thinking:
|
|
453
|
+
cmd.extend(["--thinking", args.thinking])
|
|
454
|
+
|
|
455
|
+
# Tool control
|
|
456
|
+
if args.no_tools:
|
|
457
|
+
cmd.append("--no-tools")
|
|
458
|
+
elif args.tools:
|
|
459
|
+
cmd.extend(["--tools", args.tools])
|
|
460
|
+
|
|
461
|
+
# System prompt
|
|
462
|
+
if args.system_prompt:
|
|
463
|
+
cmd.extend(["--system-prompt", args.system_prompt])
|
|
464
|
+
elif args.append_system_prompt:
|
|
465
|
+
cmd.extend(["--append-system-prompt", args.append_system_prompt])
|
|
466
|
+
|
|
467
|
+
# Extension/skill control
|
|
468
|
+
if args.no_extensions:
|
|
469
|
+
cmd.append("--no-extensions")
|
|
470
|
+
if args.no_skills:
|
|
471
|
+
cmd.append("--no-skills")
|
|
472
|
+
|
|
473
|
+
# Session control
|
|
474
|
+
if getattr(args, "resume", None):
|
|
475
|
+
cmd.extend(["--session", args.resume])
|
|
476
|
+
elif args.no_session:
|
|
477
|
+
cmd.append("--no-session")
|
|
478
|
+
|
|
479
|
+
# Build prompt with optional auto-instruction
|
|
480
|
+
full_prompt = self.prompt
|
|
481
|
+
if args.auto_instruction:
|
|
482
|
+
full_prompt = f"{args.auto_instruction}\n\n{full_prompt}"
|
|
483
|
+
|
|
484
|
+
# For multiline or large prompts, pipe via stdin to avoid command-line
|
|
485
|
+
# argument issues. Pi CLI reads stdin when isTTY is false and
|
|
486
|
+
# automatically prepends it to messages in print mode.
|
|
487
|
+
# For simple single-line prompts, pass as positional arg + -p flag.
|
|
488
|
+
stdin_prompt: Optional[str] = None
|
|
489
|
+
if "\n" in full_prompt or len(full_prompt) > 4096:
|
|
490
|
+
# Pipe via stdin — Pi auto-enables print mode when stdin has data
|
|
491
|
+
stdin_prompt = full_prompt
|
|
492
|
+
else:
|
|
493
|
+
# Print mode for non-interactive execution + positional arg
|
|
494
|
+
cmd.append("-p")
|
|
495
|
+
cmd.append(full_prompt)
|
|
496
|
+
|
|
497
|
+
# Additional raw arguments
|
|
498
|
+
if args.additional_args:
|
|
499
|
+
extra = args.additional_args.strip().split()
|
|
500
|
+
if extra:
|
|
501
|
+
cmd.extend(extra)
|
|
502
|
+
|
|
503
|
+
return cmd, stdin_prompt
|
|
504
|
+
|
|
505
|
+
# ── Codex prettifier helpers ──────────────────────────────────────────
|
|
506
|
+
|
|
507
|
+
def _first_nonempty_str(self, *values) -> str:
|
|
508
|
+
"""Return the first non-empty string value."""
|
|
509
|
+
for val in values:
|
|
510
|
+
if isinstance(val, str) and val != "":
|
|
511
|
+
return val
|
|
512
|
+
return ""
|
|
513
|
+
|
|
514
|
+
def _extract_content_text(self, payload: dict) -> str:
|
|
515
|
+
"""Join text-like fields from content arrays (item.* schema)."""
|
|
516
|
+
content = payload.get("content") if isinstance(payload, dict) else None
|
|
517
|
+
parts: List[str] = []
|
|
518
|
+
if isinstance(content, list):
|
|
519
|
+
for entry in content:
|
|
520
|
+
if not isinstance(entry, dict):
|
|
521
|
+
continue
|
|
522
|
+
text_val = (
|
|
523
|
+
entry.get("text")
|
|
524
|
+
or entry.get("message")
|
|
525
|
+
or entry.get("output_text")
|
|
526
|
+
or entry.get("input_text")
|
|
527
|
+
)
|
|
528
|
+
if isinstance(text_val, str) and text_val != "":
|
|
529
|
+
parts.append(text_val)
|
|
530
|
+
return "\n".join(parts) if parts else ""
|
|
531
|
+
|
|
532
|
+
def _extract_command_output_text(self, payload: dict) -> str:
|
|
533
|
+
"""Extract aggregated/command output from various item.* layouts."""
|
|
534
|
+
if not isinstance(payload, dict):
|
|
535
|
+
return ""
|
|
536
|
+
result = payload.get("result") if isinstance(payload.get("result"), dict) else None
|
|
537
|
+
content_text = self._extract_content_text(payload)
|
|
538
|
+
return self._first_nonempty_str(
|
|
539
|
+
payload.get("aggregated_output"),
|
|
540
|
+
payload.get("output"),
|
|
541
|
+
payload.get("formatted_output"),
|
|
542
|
+
result.get("aggregated_output") if result else None,
|
|
543
|
+
result.get("output") if result else None,
|
|
544
|
+
result.get("formatted_output") if result else None,
|
|
545
|
+
content_text,
|
|
546
|
+
)
|
|
547
|
+
|
|
548
|
+
def _extract_reasoning_text(self, payload: dict) -> str:
|
|
549
|
+
"""Extract reasoning text from legacy and item.* schemas."""
|
|
550
|
+
if not isinstance(payload, dict):
|
|
551
|
+
return ""
|
|
552
|
+
reasoning_obj = payload.get("reasoning") if isinstance(payload.get("reasoning"), dict) else None
|
|
553
|
+
result_obj = payload.get("result") if isinstance(payload.get("result"), dict) else None
|
|
554
|
+
content_text = self._extract_content_text(payload)
|
|
555
|
+
return self._first_nonempty_str(
|
|
556
|
+
payload.get("text"),
|
|
557
|
+
payload.get("reasoning_text"),
|
|
558
|
+
reasoning_obj.get("text") if reasoning_obj else None,
|
|
559
|
+
result_obj.get("text") if result_obj else None,
|
|
560
|
+
content_text,
|
|
561
|
+
)
|
|
562
|
+
|
|
563
|
+
def _extract_message_text_codex(self, payload: dict) -> str:
|
|
564
|
+
"""Extract final/assistant message text from item.* schemas."""
|
|
565
|
+
if not isinstance(payload, dict):
|
|
566
|
+
return ""
|
|
567
|
+
result_obj = payload.get("result") if isinstance(payload.get("result"), dict) else None
|
|
568
|
+
content_text = self._extract_content_text(payload)
|
|
569
|
+
return self._first_nonempty_str(
|
|
570
|
+
payload.get("message"),
|
|
571
|
+
payload.get("text"),
|
|
572
|
+
payload.get("final"),
|
|
573
|
+
result_obj.get("message") if result_obj else None,
|
|
574
|
+
result_obj.get("text") if result_obj else None,
|
|
575
|
+
content_text,
|
|
576
|
+
)
|
|
577
|
+
|
|
578
|
+
def _normalize_codex_event(self, obj_dict: dict):
|
|
579
|
+
"""Normalize legacy (msg-based) and new item.* schemas into a common tuple."""
|
|
580
|
+
msg = obj_dict.get("msg") if isinstance(obj_dict.get("msg"), dict) else {}
|
|
581
|
+
outer_type = (obj_dict.get("type") or "").strip()
|
|
582
|
+
item = obj_dict.get("item") if isinstance(obj_dict.get("item"), dict) else None
|
|
583
|
+
|
|
584
|
+
msg_type = (msg.get("type") or "").strip() if isinstance(msg, dict) else ""
|
|
585
|
+
payload = msg if isinstance(msg, dict) else {}
|
|
586
|
+
|
|
587
|
+
if not msg_type and item is not None:
|
|
588
|
+
msg_type = (item.get("type") or "").strip() or outer_type
|
|
589
|
+
payload = item
|
|
590
|
+
elif not msg_type:
|
|
591
|
+
msg_type = outer_type
|
|
592
|
+
|
|
593
|
+
return msg_type, payload, outer_type
|
|
594
|
+
|
|
595
|
+
def _normalize_item_id(self, payload: dict, outer_type: str) -> Optional[str]:
|
|
596
|
+
"""Prefer existing id on item.* payloads; otherwise synthesize sequential item_{n}."""
|
|
597
|
+
item_id = payload.get("id") if isinstance(payload, dict) else None
|
|
598
|
+
if isinstance(item_id, str) and item_id.strip():
|
|
599
|
+
parsed = self._parse_item_number(item_id)
|
|
600
|
+
if parsed is not None and parsed + 1 > self._item_counter:
|
|
601
|
+
self._item_counter = parsed + 1
|
|
602
|
+
return item_id.strip()
|
|
603
|
+
|
|
604
|
+
if isinstance(outer_type, str) and outer_type.startswith("item."):
|
|
605
|
+
generated = f"item_{self._item_counter}"
|
|
606
|
+
self._item_counter += 1
|
|
607
|
+
return generated
|
|
608
|
+
|
|
609
|
+
return None
|
|
610
|
+
|
|
611
|
+
def _parse_item_number(self, item_id: str) -> Optional[int]:
|
|
612
|
+
"""Return numeric component from item_{n} ids or None if unparseable."""
|
|
613
|
+
if not isinstance(item_id, str):
|
|
614
|
+
return None
|
|
615
|
+
item_id = item_id.strip()
|
|
616
|
+
if not item_id.startswith("item_"):
|
|
617
|
+
return None
|
|
618
|
+
try:
|
|
619
|
+
return int(item_id.split("item_", 1)[1])
|
|
620
|
+
except Exception:
|
|
621
|
+
return None
|
|
622
|
+
|
|
623
|
+
def _strip_thinking_signature(self, content_list: list) -> list:
|
|
624
|
+
"""Remove thinkingSignature, textSignature, and encrypted_content from content items."""
|
|
625
|
+
if not isinstance(content_list, list):
|
|
626
|
+
return content_list
|
|
627
|
+
for item in content_list:
|
|
628
|
+
if isinstance(item, dict):
|
|
629
|
+
item.pop("thinkingSignature", None)
|
|
630
|
+
item.pop("textSignature", None)
|
|
631
|
+
item.pop("encrypted_content", None)
|
|
632
|
+
return content_list
|
|
633
|
+
|
|
634
|
+
def _sanitize_codex_event(self, obj: dict, strip_metadata: bool = True) -> dict:
|
|
635
|
+
"""Deep-sanitize a Codex event: strip thinkingSignature and encrypted_content
|
|
636
|
+
from any nested content arrays, and optionally remove metadata keys from
|
|
637
|
+
nested message dicts.
|
|
638
|
+
|
|
639
|
+
Handles Pi-wrapped events like message_update which nest messages under
|
|
640
|
+
'partial', 'message', 'assistantMessageEvent', etc.
|
|
641
|
+
"""
|
|
642
|
+
if not isinstance(obj, dict):
|
|
643
|
+
return obj
|
|
644
|
+
|
|
645
|
+
# Strip thinkingSignature from top-level content
|
|
646
|
+
if isinstance(obj.get("content"), list):
|
|
647
|
+
self._strip_thinking_signature(obj["content"])
|
|
648
|
+
|
|
649
|
+
# Remove encrypted signatures and encrypted_content anywhere
|
|
650
|
+
obj.pop("encrypted_content", None)
|
|
651
|
+
obj.pop("textSignature", None)
|
|
652
|
+
|
|
653
|
+
# Remove metadata keys from this level
|
|
654
|
+
if strip_metadata:
|
|
655
|
+
for mk in self._codex_metadata_keys:
|
|
656
|
+
obj.pop(mk, None)
|
|
657
|
+
|
|
658
|
+
# Recurse into known nested message containers
|
|
659
|
+
for nested_key in ("partial", "message", "assistantMessageEvent"):
|
|
660
|
+
nested = obj.get(nested_key)
|
|
661
|
+
if isinstance(nested, dict):
|
|
662
|
+
self._sanitize_codex_event(nested, strip_metadata)
|
|
663
|
+
|
|
664
|
+
# Recurse into content arrays to strip encrypted_content from items
|
|
665
|
+
content = obj.get("content")
|
|
666
|
+
if isinstance(content, list):
|
|
667
|
+
for item in content:
|
|
668
|
+
if isinstance(item, dict):
|
|
669
|
+
item.pop("encrypted_content", None)
|
|
670
|
+
item.pop("thinkingSignature", None)
|
|
671
|
+
# If thinkingSignature was a string containing encrypted_content, it's already removed
|
|
672
|
+
# Also recurse deeper if needed
|
|
673
|
+
self._sanitize_codex_event(item, strip_metadata=False)
|
|
674
|
+
|
|
675
|
+
return obj
|
|
676
|
+
|
|
677
|
+
def _truncate_tool_result_text(self, text: str) -> str:
|
|
678
|
+
"""Truncate tool result text to max lines, rendering newlines properly."""
|
|
679
|
+
if not isinstance(text, str):
|
|
680
|
+
return text
|
|
681
|
+
# Unescape JSON-escaped newlines for human-readable display
|
|
682
|
+
display_text = text.replace("\\n", "\n").replace("\\t", "\t")
|
|
683
|
+
display_text = self._strip_ansi_sequences(display_text)
|
|
684
|
+
lines = display_text.split("\n")
|
|
685
|
+
max_lines = self._codex_tool_result_max_lines
|
|
686
|
+
if len(lines) <= max_lines:
|
|
687
|
+
return display_text
|
|
688
|
+
shown = "\n".join(lines[:max_lines])
|
|
689
|
+
remaining_text = "\n".join(lines[max_lines:])
|
|
690
|
+
remaining_chars = len(remaining_text)
|
|
691
|
+
return f"{shown}\n[{remaining_chars} characters remaining]"
|
|
692
|
+
|
|
693
|
+
def _is_codex_final_message(self, parsed: dict) -> bool:
|
|
694
|
+
"""Detect if this is the final assistant message (contains type=text content or stopReason=stop)."""
|
|
695
|
+
if not isinstance(parsed, dict):
|
|
696
|
+
return False
|
|
697
|
+
if parsed.get("stopReason") == "stop":
|
|
698
|
+
return True
|
|
699
|
+
content = parsed.get("content")
|
|
700
|
+
if isinstance(content, list):
|
|
701
|
+
for item in content:
|
|
702
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
|
703
|
+
return True
|
|
704
|
+
return False
|
|
705
|
+
|
|
706
|
+
def _format_pi_codex_message(self, parsed: dict) -> Optional[str]:
|
|
707
|
+
"""Format a Pi-wrapped Codex message (role-based with content arrays).
|
|
708
|
+
|
|
709
|
+
Handles:
|
|
710
|
+
- Stripping thinkingSignature from thinking content
|
|
711
|
+
- Truncating toolResult text to configured max lines
|
|
712
|
+
- Hiding metadata keys from intermediate assistant messages
|
|
713
|
+
"""
|
|
714
|
+
if not isinstance(parsed, dict):
|
|
715
|
+
return None
|
|
716
|
+
|
|
717
|
+
role = parsed.get("role", "")
|
|
718
|
+
now = datetime.now().strftime("%I:%M:%S %p")
|
|
719
|
+
self.message_counter += 1
|
|
720
|
+
|
|
721
|
+
# --- toolResult role: truncate text content ---
|
|
722
|
+
if role == "toolResult":
|
|
723
|
+
header: Dict = {
|
|
724
|
+
"type": "toolResult",
|
|
725
|
+
"datetime": now,
|
|
726
|
+
"counter": f"#{self.message_counter}",
|
|
727
|
+
"toolName": parsed.get("toolName", ""),
|
|
728
|
+
}
|
|
729
|
+
is_error = parsed.get("isError", False)
|
|
730
|
+
if is_error:
|
|
731
|
+
header["isError"] = True
|
|
732
|
+
|
|
733
|
+
content = parsed.get("content")
|
|
734
|
+
if isinstance(content, list):
|
|
735
|
+
for item in content:
|
|
736
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
|
737
|
+
text_val = item.get("text", "")
|
|
738
|
+
truncated = self._truncate_tool_result_text(text_val)
|
|
739
|
+
if "\n" in truncated:
|
|
740
|
+
return json.dumps(header, ensure_ascii=False) + "\ncontent:\n" + truncated
|
|
741
|
+
header["content"] = truncated
|
|
742
|
+
return json.dumps(header, ensure_ascii=False)
|
|
743
|
+
|
|
744
|
+
return json.dumps(header, ensure_ascii=False)
|
|
745
|
+
|
|
746
|
+
# --- assistant role: strip thinkingSignature and manage metadata ---
|
|
747
|
+
if role == "assistant":
|
|
748
|
+
content = parsed.get("content")
|
|
749
|
+
if isinstance(content, list):
|
|
750
|
+
self._strip_thinking_signature(content)
|
|
751
|
+
|
|
752
|
+
is_final = self._is_codex_final_message(parsed)
|
|
753
|
+
is_first = not self._codex_first_assistant_seen
|
|
754
|
+
self._codex_first_assistant_seen = True
|
|
755
|
+
|
|
756
|
+
show_metadata = is_first or is_final
|
|
757
|
+
|
|
758
|
+
# Build display object
|
|
759
|
+
display: Dict = {}
|
|
760
|
+
for key, value in parsed.items():
|
|
761
|
+
if not show_metadata and key in self._codex_metadata_keys:
|
|
762
|
+
continue
|
|
763
|
+
display[key] = value
|
|
764
|
+
|
|
765
|
+
# Add datetime and counter
|
|
766
|
+
display["datetime"] = now
|
|
767
|
+
display["counter"] = f"#{self.message_counter}"
|
|
768
|
+
|
|
769
|
+
# Extract main content for pretty display
|
|
770
|
+
if isinstance(content, list):
|
|
771
|
+
parts = []
|
|
772
|
+
for item in content:
|
|
773
|
+
if isinstance(item, dict):
|
|
774
|
+
if item.get("type") == "thinking":
|
|
775
|
+
thinking_text = item.get("thinking", "")
|
|
776
|
+
if thinking_text:
|
|
777
|
+
parts.append(f"[thinking] {thinking_text}")
|
|
778
|
+
elif item.get("type") == "toolCall":
|
|
779
|
+
name = item.get("name", "")
|
|
780
|
+
args = item.get("arguments", {})
|
|
781
|
+
if isinstance(args, dict):
|
|
782
|
+
cmd = args.get("command", "")
|
|
783
|
+
if isinstance(cmd, str) and cmd:
|
|
784
|
+
parts.append(f"[toolCall] {name}: {self._sanitize_tool_argument_value(cmd)}")
|
|
785
|
+
else:
|
|
786
|
+
args_clean = self._sanitize_tool_argument_value(args)
|
|
787
|
+
args_str = json.dumps(args_clean, ensure_ascii=False)
|
|
788
|
+
parts.append(f"[toolCall] {name}: {args_str}")
|
|
789
|
+
else:
|
|
790
|
+
parts.append(f"[toolCall] {name}")
|
|
791
|
+
elif item.get("type") == "text":
|
|
792
|
+
text_val = item.get("text", "")
|
|
793
|
+
if text_val:
|
|
794
|
+
parts.append(text_val)
|
|
795
|
+
|
|
796
|
+
if parts:
|
|
797
|
+
combined = "\n".join(parts)
|
|
798
|
+
header_obj: Dict = {"type": "assistant", "datetime": now, "counter": f"#{self.message_counter}"}
|
|
799
|
+
if show_metadata:
|
|
800
|
+
for mk in ("api", "provider", "model", "stopReason"):
|
|
801
|
+
if mk in parsed:
|
|
802
|
+
header_obj[mk] = parsed[mk]
|
|
803
|
+
if "usage" in parsed and is_final:
|
|
804
|
+
header_obj["usage"] = parsed["usage"]
|
|
805
|
+
if "\n" in combined:
|
|
806
|
+
return json.dumps(header_obj, ensure_ascii=False) + "\ncontent:\n" + combined
|
|
807
|
+
header_obj["content"] = combined
|
|
808
|
+
return json.dumps(header_obj, ensure_ascii=False)
|
|
809
|
+
|
|
810
|
+
# Fallback: dump the filtered display object
|
|
811
|
+
return json.dumps(display, ensure_ascii=False)
|
|
812
|
+
|
|
813
|
+
return None
|
|
814
|
+
|
|
815
|
+
# Event subtypes to suppress in message_update (streaming deltas are noisy)
|
|
816
|
+
_CODEX_HIDDEN_MESSAGE_UPDATE_SUBTYPES = {
|
|
817
|
+
"text_delta", "text_start",
|
|
818
|
+
"thinking_delta", "thinking_start",
|
|
819
|
+
"toolcall_delta", "toolcall_start",
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
def _format_pi_codex_event(self, parsed: dict) -> Optional[str]:
|
|
823
|
+
"""Format Pi-wrapped events when in Codex prettifier mode.
|
|
824
|
+
|
|
825
|
+
Handles Pi event types (message_update, turn_end, message_start, etc.)
|
|
826
|
+
that wrap Codex-style content. Returns formatted string, empty string
|
|
827
|
+
to suppress, or None if this method doesn't handle the event type.
|
|
828
|
+
"""
|
|
829
|
+
event_type = parsed.get("type", "")
|
|
830
|
+
if not event_type:
|
|
831
|
+
return None
|
|
832
|
+
|
|
833
|
+
now = datetime.now().strftime("%I:%M:%S %p")
|
|
834
|
+
|
|
835
|
+
# --- message_update: filter by assistantMessageEvent subtype ---
|
|
836
|
+
if event_type == "message_update":
|
|
837
|
+
ame = parsed.get("assistantMessageEvent", {})
|
|
838
|
+
if isinstance(ame, dict):
|
|
839
|
+
ame_type = ame.get("type", "")
|
|
840
|
+
|
|
841
|
+
# Suppress noisy streaming delta/start events
|
|
842
|
+
if ame_type in self._CODEX_HIDDEN_MESSAGE_UPDATE_SUBTYPES:
|
|
843
|
+
return "" # suppress
|
|
844
|
+
|
|
845
|
+
# text_end: show the complete text content
|
|
846
|
+
if ame_type == "text_end":
|
|
847
|
+
self.message_counter += 1
|
|
848
|
+
content_text = ame.get("content", "")
|
|
849
|
+
header: Dict = {
|
|
850
|
+
"type": "text_end",
|
|
851
|
+
"datetime": now,
|
|
852
|
+
"counter": f"#{self.message_counter}",
|
|
853
|
+
}
|
|
854
|
+
if isinstance(content_text, str) and content_text.strip():
|
|
855
|
+
if "\n" in content_text:
|
|
856
|
+
return json.dumps(header, ensure_ascii=False) + "\ncontent:\n" + content_text
|
|
857
|
+
header["content"] = content_text
|
|
858
|
+
return json.dumps(header, ensure_ascii=False)
|
|
859
|
+
|
|
860
|
+
# thinking_end: show the final thinking summary
|
|
861
|
+
if ame_type == "thinking_end":
|
|
862
|
+
self.message_counter += 1
|
|
863
|
+
thinking_text = ame.get("content", "")
|
|
864
|
+
header = {
|
|
865
|
+
"type": "thinking_end",
|
|
866
|
+
"datetime": now,
|
|
867
|
+
"counter": f"#{self.message_counter}",
|
|
868
|
+
}
|
|
869
|
+
if isinstance(thinking_text, str) and thinking_text.strip():
|
|
870
|
+
header["thinking"] = thinking_text
|
|
871
|
+
return json.dumps(header, ensure_ascii=False)
|
|
872
|
+
|
|
873
|
+
# toolcall_end: buffer for grouping with tool_execution_end
|
|
874
|
+
if ame_type == "toolcall_end":
|
|
875
|
+
tool_call = ame.get("toolCall", {})
|
|
876
|
+
if self._buffer_tool_call_end(tool_call, now):
|
|
877
|
+
return "" # suppress — will emit combined event on tool_execution_end
|
|
878
|
+
# No toolCallId — fallback to original format
|
|
879
|
+
self.message_counter += 1
|
|
880
|
+
header = {
|
|
881
|
+
"type": "toolcall_end",
|
|
882
|
+
"datetime": now,
|
|
883
|
+
"counter": f"#{self.message_counter}",
|
|
884
|
+
}
|
|
885
|
+
if isinstance(tool_call, dict):
|
|
886
|
+
header["tool"] = tool_call.get("name", "")
|
|
887
|
+
args = tool_call.get("arguments", {})
|
|
888
|
+
if isinstance(args, dict):
|
|
889
|
+
cmd = args.get("command", "")
|
|
890
|
+
if isinstance(cmd, str) and cmd:
|
|
891
|
+
header["command"] = self._sanitize_tool_argument_value(cmd)
|
|
892
|
+
else:
|
|
893
|
+
header["args"] = self._sanitize_tool_argument_value(args)
|
|
894
|
+
elif isinstance(args, str) and args.strip():
|
|
895
|
+
header["args"] = self._sanitize_tool_argument_value(args)
|
|
896
|
+
return self._format_tool_invocation_header(header)
|
|
897
|
+
|
|
898
|
+
# Other message_update subtypes: suppress by default
|
|
899
|
+
return ""
|
|
900
|
+
|
|
901
|
+
# --- turn_end: metadata only (text already shown by text_end/thinking_end/toolcall_end) ---
|
|
902
|
+
if event_type == "turn_end":
|
|
903
|
+
self.message_counter += 1
|
|
904
|
+
header = {
|
|
905
|
+
"type": "turn_end",
|
|
906
|
+
"datetime": now,
|
|
907
|
+
"counter": f"#{self.message_counter}",
|
|
908
|
+
}
|
|
909
|
+
tool_results = parsed.get("toolResults")
|
|
910
|
+
if isinstance(tool_results, list):
|
|
911
|
+
header["tool_results_count"] = len(tool_results)
|
|
912
|
+
return json.dumps(header, ensure_ascii=False)
|
|
913
|
+
|
|
914
|
+
# --- message_start: minimal header (no counter — only *_end events get counters) ---
|
|
915
|
+
if event_type == "message_start":
|
|
916
|
+
message = parsed.get("message", {})
|
|
917
|
+
header = {
|
|
918
|
+
"type": "message_start",
|
|
919
|
+
"datetime": now,
|
|
920
|
+
}
|
|
921
|
+
if isinstance(message, dict):
|
|
922
|
+
role = message.get("role")
|
|
923
|
+
if role:
|
|
924
|
+
header["role"] = role
|
|
925
|
+
return json.dumps(header, ensure_ascii=False)
|
|
926
|
+
|
|
927
|
+
# --- message_end: metadata only (text already shown by text_end/thinking_end/toolcall_end) ---
|
|
928
|
+
if event_type == "message_end":
|
|
929
|
+
self.message_counter += 1
|
|
930
|
+
header = {
|
|
931
|
+
"type": "message_end",
|
|
932
|
+
"datetime": now,
|
|
933
|
+
"counter": f"#{self.message_counter}",
|
|
934
|
+
}
|
|
935
|
+
return json.dumps(header, ensure_ascii=False)
|
|
936
|
+
|
|
937
|
+
# --- tool_execution_start: always suppress, buffer args ---
|
|
938
|
+
if event_type == "tool_execution_start":
|
|
939
|
+
self._buffer_exec_start(parsed)
|
|
940
|
+
self._in_tool_execution = True
|
|
941
|
+
return "" # suppress
|
|
942
|
+
|
|
943
|
+
# --- tool_execution_end: combine with buffered data ---
|
|
944
|
+
if event_type == "tool_execution_end":
|
|
945
|
+
self._in_tool_execution = False
|
|
946
|
+
tool_call_id = parsed.get("toolCallId")
|
|
947
|
+
|
|
948
|
+
pending_tool = self._pending_tool_calls.pop(tool_call_id, None) if tool_call_id else None
|
|
949
|
+
pending_exec = self._pending_exec_starts.pop(tool_call_id, None) if tool_call_id else None
|
|
950
|
+
if pending_tool and pending_exec and "started_at" in pending_exec:
|
|
951
|
+
pending_tool["started_at"] = pending_exec["started_at"]
|
|
952
|
+
pending = pending_tool or pending_exec
|
|
953
|
+
|
|
954
|
+
if pending:
|
|
955
|
+
return self._build_combined_tool_event(pending, parsed, now)
|
|
956
|
+
|
|
957
|
+
# No buffered data — minimal fallback
|
|
958
|
+
self.message_counter += 1
|
|
959
|
+
header = {
|
|
960
|
+
"type": "tool",
|
|
961
|
+
"datetime": now,
|
|
962
|
+
"counter": f"#{self.message_counter}",
|
|
963
|
+
"tool": parsed.get("toolName", ""),
|
|
964
|
+
}
|
|
965
|
+
execution_time = self._format_execution_time(parsed)
|
|
966
|
+
if execution_time:
|
|
967
|
+
header["execution_time"] = execution_time
|
|
968
|
+
|
|
969
|
+
is_error = parsed.get("isError", False)
|
|
970
|
+
if is_error:
|
|
971
|
+
header["isError"] = True
|
|
972
|
+
|
|
973
|
+
result_val = parsed.get("result")
|
|
974
|
+
colorize_error = self._color_enabled() and bool(is_error)
|
|
975
|
+
|
|
976
|
+
if isinstance(result_val, str) and result_val.strip():
|
|
977
|
+
truncated = self._truncate_tool_result_text(result_val)
|
|
978
|
+
if "\n" in truncated or colorize_error:
|
|
979
|
+
label = "result:"
|
|
980
|
+
colored = self._colorize_result(truncated, is_error=bool(is_error))
|
|
981
|
+
if colorize_error:
|
|
982
|
+
label = self._colorize_result(label, is_error=True)
|
|
983
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored
|
|
984
|
+
header["result"] = truncated
|
|
985
|
+
return self._format_tool_invocation_header(header)
|
|
986
|
+
|
|
987
|
+
if isinstance(result_val, dict):
|
|
988
|
+
result_content = result_val.get("content")
|
|
989
|
+
if isinstance(result_content, list):
|
|
990
|
+
for rc_item in result_content:
|
|
991
|
+
if isinstance(rc_item, dict) and rc_item.get("type") == "text":
|
|
992
|
+
text = rc_item.get("text", "")
|
|
993
|
+
truncated = self._truncate_tool_result_text(text)
|
|
994
|
+
if "\n" in truncated or colorize_error:
|
|
995
|
+
label = "result:"
|
|
996
|
+
colored = self._colorize_result(truncated, is_error=bool(is_error))
|
|
997
|
+
if colorize_error:
|
|
998
|
+
label = self._colorize_result(label, is_error=True)
|
|
999
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored
|
|
1000
|
+
header["result"] = truncated
|
|
1001
|
+
return self._format_tool_invocation_header(header)
|
|
1002
|
+
|
|
1003
|
+
result_json = self._strip_ansi_sequences(json.dumps(result_val, ensure_ascii=False))
|
|
1004
|
+
if "\n" in result_json or colorize_error:
|
|
1005
|
+
label = "result:"
|
|
1006
|
+
colored = self._colorize_result(result_json, is_error=bool(is_error))
|
|
1007
|
+
if colorize_error:
|
|
1008
|
+
label = self._colorize_result(label, is_error=True)
|
|
1009
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored
|
|
1010
|
+
header["result"] = result_json
|
|
1011
|
+
return self._format_tool_invocation_header(header)
|
|
1012
|
+
|
|
1013
|
+
if isinstance(result_val, list):
|
|
1014
|
+
result_json = self._strip_ansi_sequences(json.dumps(result_val, ensure_ascii=False))
|
|
1015
|
+
if "\n" in result_json or colorize_error:
|
|
1016
|
+
label = "result:"
|
|
1017
|
+
colored = self._colorize_result(result_json, is_error=bool(is_error))
|
|
1018
|
+
if colorize_error:
|
|
1019
|
+
label = self._colorize_result(label, is_error=True)
|
|
1020
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored
|
|
1021
|
+
header["result"] = result_json
|
|
1022
|
+
return self._format_tool_invocation_header(header)
|
|
1023
|
+
|
|
1024
|
+
return self._format_tool_invocation_header(header)
|
|
1025
|
+
|
|
1026
|
+
# --- turn_start: suppress (no user-visible value) ---
|
|
1027
|
+
if event_type == "turn_start":
|
|
1028
|
+
return ""
|
|
1029
|
+
|
|
1030
|
+
# --- agent_start: simple header (no counter — only *_end events get counters) ---
|
|
1031
|
+
if event_type == "agent_start":
|
|
1032
|
+
return json.dumps({
|
|
1033
|
+
"type": event_type,
|
|
1034
|
+
"datetime": now,
|
|
1035
|
+
}, ensure_ascii=False)
|
|
1036
|
+
|
|
1037
|
+
# --- agent_end: capture and show summary ---
|
|
1038
|
+
if event_type == "agent_end":
|
|
1039
|
+
self.message_counter += 1
|
|
1040
|
+
header = {
|
|
1041
|
+
"type": "agent_end",
|
|
1042
|
+
"datetime": now,
|
|
1043
|
+
"counter": f"#{self.message_counter}",
|
|
1044
|
+
}
|
|
1045
|
+
messages = parsed.get("messages")
|
|
1046
|
+
if isinstance(messages, list):
|
|
1047
|
+
header["message_count"] = len(messages)
|
|
1048
|
+
total_cost_usd = self._extract_total_cost_usd(parsed)
|
|
1049
|
+
if total_cost_usd is not None:
|
|
1050
|
+
header["total_cost_usd"] = total_cost_usd
|
|
1051
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1052
|
+
|
|
1053
|
+
# Not a Pi-wrapped event type we handle
|
|
1054
|
+
return None
|
|
1055
|
+
|
|
1056
|
+
def _format_event_pretty_codex(self, payload: dict) -> Optional[str]:
|
|
1057
|
+
"""Format a Codex-schema JSON event for human-readable output."""
|
|
1058
|
+
try:
|
|
1059
|
+
msg_type, msg_payload, outer_type = self._normalize_codex_event(payload)
|
|
1060
|
+
item_id = self._normalize_item_id(msg_payload, outer_type)
|
|
1061
|
+
|
|
1062
|
+
now = datetime.now().strftime("%I:%M:%S %p")
|
|
1063
|
+
self.message_counter += 1
|
|
1064
|
+
header_type = (outer_type or msg_type).strip()
|
|
1065
|
+
base_type = header_type or msg_type or "message"
|
|
1066
|
+
|
|
1067
|
+
def make_header(type_value: str):
|
|
1068
|
+
hdr: Dict = {"type": type_value, "datetime": now, "counter": f"#{self.message_counter}"}
|
|
1069
|
+
if item_id:
|
|
1070
|
+
hdr["id"] = item_id
|
|
1071
|
+
if outer_type and msg_type and outer_type != msg_type:
|
|
1072
|
+
hdr["item_type"] = msg_type
|
|
1073
|
+
return hdr
|
|
1074
|
+
|
|
1075
|
+
header = make_header(base_type)
|
|
1076
|
+
|
|
1077
|
+
if isinstance(msg_payload, dict):
|
|
1078
|
+
if item_id and "id" not in msg_payload:
|
|
1079
|
+
msg_payload["id"] = item_id
|
|
1080
|
+
if msg_payload.get("command"):
|
|
1081
|
+
header["command"] = msg_payload.get("command")
|
|
1082
|
+
if msg_payload.get("status"):
|
|
1083
|
+
header["status"] = msg_payload.get("status")
|
|
1084
|
+
if msg_payload.get("state") and not header.get("status"):
|
|
1085
|
+
header["status"] = msg_payload.get("state")
|
|
1086
|
+
|
|
1087
|
+
# agent_reasoning
|
|
1088
|
+
if msg_type in {"agent_reasoning", "reasoning"}:
|
|
1089
|
+
content = self._extract_reasoning_text(msg_payload)
|
|
1090
|
+
header = make_header(header_type or msg_type)
|
|
1091
|
+
if "\n" in content:
|
|
1092
|
+
return json.dumps(header, ensure_ascii=False) + "\ntext:\n" + content
|
|
1093
|
+
header["text"] = content
|
|
1094
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1095
|
+
|
|
1096
|
+
# agent_message / assistant
|
|
1097
|
+
if msg_type in {"agent_message", "message", "assistant_message", "assistant"}:
|
|
1098
|
+
content = self._extract_message_text_codex(msg_payload)
|
|
1099
|
+
header = make_header(header_type or msg_type)
|
|
1100
|
+
if "\n" in content:
|
|
1101
|
+
return json.dumps(header, ensure_ascii=False) + "\nmessage:\n" + content
|
|
1102
|
+
if content != "":
|
|
1103
|
+
header["message"] = content
|
|
1104
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1105
|
+
if header_type:
|
|
1106
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1107
|
+
|
|
1108
|
+
# exec_command_end
|
|
1109
|
+
if msg_type == "exec_command_end":
|
|
1110
|
+
formatted_output = msg_payload.get("formatted_output", "") if isinstance(msg_payload, dict) else ""
|
|
1111
|
+
header = {"type": msg_type, "datetime": now}
|
|
1112
|
+
if "\n" in formatted_output:
|
|
1113
|
+
return json.dumps(header, ensure_ascii=False) + "\nformatted_output:\n" + formatted_output
|
|
1114
|
+
header["formatted_output"] = formatted_output
|
|
1115
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1116
|
+
|
|
1117
|
+
# command_execution
|
|
1118
|
+
if msg_type == "command_execution":
|
|
1119
|
+
aggregated_output = self._extract_command_output_text(msg_payload)
|
|
1120
|
+
if "\n" in aggregated_output:
|
|
1121
|
+
return json.dumps(header, ensure_ascii=False) + "\naggregated_output:\n" + aggregated_output
|
|
1122
|
+
if aggregated_output:
|
|
1123
|
+
header["aggregated_output"] = aggregated_output
|
|
1124
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1125
|
+
if header_type:
|
|
1126
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1127
|
+
|
|
1128
|
+
return None
|
|
1129
|
+
except Exception:
|
|
1130
|
+
return None
|
|
1131
|
+
|
|
1132
|
+
# ── Claude prettifier ─────────────────────────────────────────────────
|
|
1133
|
+
|
|
1134
|
+
def _format_event_pretty_claude(self, json_line: str) -> Optional[str]:
|
|
1135
|
+
"""Format a Claude-schema JSON event for human-readable output."""
|
|
1136
|
+
try:
|
|
1137
|
+
data = json.loads(json_line) if isinstance(json_line, str) else json_line
|
|
1138
|
+
self.message_counter += 1
|
|
1139
|
+
now = datetime.now().strftime("%I:%M:%S %p")
|
|
1140
|
+
|
|
1141
|
+
if data.get("type") == "user":
|
|
1142
|
+
message = data.get("message", {})
|
|
1143
|
+
content_list = message.get("content", [])
|
|
1144
|
+
text_content = ""
|
|
1145
|
+
for item in content_list:
|
|
1146
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
|
1147
|
+
text_content = item.get("text", "")
|
|
1148
|
+
break
|
|
1149
|
+
|
|
1150
|
+
if self.user_message_truncate != -1:
|
|
1151
|
+
lines = text_content.split('\n')
|
|
1152
|
+
if len(lines) > self.user_message_truncate:
|
|
1153
|
+
text_content = '\n'.join(lines[:self.user_message_truncate]) + '\n[Truncated...]'
|
|
1154
|
+
|
|
1155
|
+
metadata: Dict = {"type": "user", "datetime": now, "counter": f"#{self.message_counter}"}
|
|
1156
|
+
if '\n' in text_content:
|
|
1157
|
+
return json.dumps(metadata, ensure_ascii=False) + "\ncontent:\n" + text_content
|
|
1158
|
+
metadata["content"] = text_content
|
|
1159
|
+
return json.dumps(metadata, ensure_ascii=False)
|
|
1160
|
+
|
|
1161
|
+
elif data.get("type") == "progress":
|
|
1162
|
+
progress_data = data.get("data", {})
|
|
1163
|
+
progress_type = progress_data.get("type", "")
|
|
1164
|
+
|
|
1165
|
+
if progress_type == "hook_progress":
|
|
1166
|
+
return None
|
|
1167
|
+
|
|
1168
|
+
if progress_type == "bash_progress":
|
|
1169
|
+
output_text = progress_data.get("output", "")
|
|
1170
|
+
elapsed_time = progress_data.get("elapsedTimeSeconds", 0)
|
|
1171
|
+
total_lines = progress_data.get("totalLines", 0)
|
|
1172
|
+
simplified: Dict = {
|
|
1173
|
+
"type": "progress", "progress_type": "bash_progress",
|
|
1174
|
+
"datetime": now, "counter": f"#{self.message_counter}",
|
|
1175
|
+
"elapsed": f"{elapsed_time}s", "lines": total_lines,
|
|
1176
|
+
}
|
|
1177
|
+
if '\n' in output_text:
|
|
1178
|
+
return json.dumps(simplified, ensure_ascii=False) + "\n[Progress] output:\n" + output_text
|
|
1179
|
+
simplified["output"] = output_text
|
|
1180
|
+
return f"[Progress] {json.dumps(simplified, ensure_ascii=False)}"
|
|
1181
|
+
|
|
1182
|
+
return json.dumps({
|
|
1183
|
+
"type": "progress", "progress_type": progress_type,
|
|
1184
|
+
"datetime": now, "counter": f"#{self.message_counter}",
|
|
1185
|
+
"data": progress_data,
|
|
1186
|
+
}, ensure_ascii=False)
|
|
1187
|
+
|
|
1188
|
+
elif data.get("type") == "assistant":
|
|
1189
|
+
message = data.get("message", {})
|
|
1190
|
+
content_list = message.get("content", [])
|
|
1191
|
+
text_content = ""
|
|
1192
|
+
tool_use_data = None
|
|
1193
|
+
|
|
1194
|
+
for item in content_list:
|
|
1195
|
+
if isinstance(item, dict):
|
|
1196
|
+
if item.get("type") == "text":
|
|
1197
|
+
text_content = item.get("text", "")
|
|
1198
|
+
break
|
|
1199
|
+
elif item.get("type") == "tool_use":
|
|
1200
|
+
tool_use_data = {"name": item.get("name", ""), "input": item.get("input", {})}
|
|
1201
|
+
break
|
|
1202
|
+
|
|
1203
|
+
metadata = {"type": "assistant", "datetime": now, "counter": f"#{self.message_counter}"}
|
|
1204
|
+
|
|
1205
|
+
if tool_use_data:
|
|
1206
|
+
tool_input = tool_use_data.get("input", {})
|
|
1207
|
+
prompt_field = tool_input.get("prompt", "")
|
|
1208
|
+
if isinstance(prompt_field, str) and '\n' in prompt_field:
|
|
1209
|
+
tool_use_copy = {
|
|
1210
|
+
"name": tool_use_data.get("name", ""),
|
|
1211
|
+
"input": {k: v for k, v in tool_input.items() if k != "prompt"},
|
|
1212
|
+
}
|
|
1213
|
+
metadata["tool_use"] = tool_use_copy
|
|
1214
|
+
return json.dumps(metadata, ensure_ascii=False) + "\nprompt:\n" + prompt_field
|
|
1215
|
+
metadata["tool_use"] = tool_use_data
|
|
1216
|
+
return json.dumps(metadata, ensure_ascii=False)
|
|
1217
|
+
else:
|
|
1218
|
+
if '\n' in text_content:
|
|
1219
|
+
return json.dumps(metadata, ensure_ascii=False) + "\ncontent:\n" + text_content
|
|
1220
|
+
metadata["content"] = text_content
|
|
1221
|
+
return json.dumps(metadata, ensure_ascii=False)
|
|
1222
|
+
|
|
1223
|
+
else:
|
|
1224
|
+
message = data.get("message", {})
|
|
1225
|
+
content_list = message.get("content", [])
|
|
1226
|
+
if content_list and isinstance(content_list, list) and len(content_list) > 0:
|
|
1227
|
+
nested_item = content_list[0]
|
|
1228
|
+
if isinstance(nested_item, dict) and nested_item.get("type") in ["tool_result"]:
|
|
1229
|
+
flattened: Dict = {"datetime": now, "counter": f"#{self.message_counter}"}
|
|
1230
|
+
if "tool_use_id" in nested_item:
|
|
1231
|
+
flattened["tool_use_id"] = nested_item["tool_use_id"]
|
|
1232
|
+
flattened["type"] = nested_item["type"]
|
|
1233
|
+
nested_content = nested_item.get("content", "")
|
|
1234
|
+
if isinstance(nested_content, str) and '\n' in nested_content:
|
|
1235
|
+
return json.dumps(flattened, ensure_ascii=False) + "\ncontent:\n" + nested_content
|
|
1236
|
+
flattened["content"] = nested_content
|
|
1237
|
+
return json.dumps(flattened, ensure_ascii=False)
|
|
1238
|
+
|
|
1239
|
+
output: Dict = {"datetime": now, "counter": f"#{self.message_counter}", **data}
|
|
1240
|
+
if "result" in output and isinstance(output["result"], str) and '\n' in output["result"]:
|
|
1241
|
+
result_value = output.pop("result")
|
|
1242
|
+
return json.dumps(output, ensure_ascii=False) + "\nresult:\n" + result_value
|
|
1243
|
+
return json.dumps(output, ensure_ascii=False)
|
|
1244
|
+
|
|
1245
|
+
except json.JSONDecodeError:
|
|
1246
|
+
return json_line if isinstance(json_line, str) else None
|
|
1247
|
+
except Exception:
|
|
1248
|
+
return json_line if isinstance(json_line, str) else None
|
|
1249
|
+
|
|
1250
|
+
# ── Pi prettifier helpers ─────────────────────────────────────────────
|
|
1251
|
+
|
|
1252
|
+
def _extract_text_from_message(self, message: dict) -> str:
|
|
1253
|
+
"""Extract human-readable text from a Pi message object."""
|
|
1254
|
+
if not isinstance(message, dict):
|
|
1255
|
+
return ""
|
|
1256
|
+
|
|
1257
|
+
# Direct text/content fields
|
|
1258
|
+
for field in ("text", "content", "message", "response", "output"):
|
|
1259
|
+
val = message.get(field)
|
|
1260
|
+
if isinstance(val, str) and val.strip():
|
|
1261
|
+
return val
|
|
1262
|
+
|
|
1263
|
+
# content array (Claude-style)
|
|
1264
|
+
content = message.get("content")
|
|
1265
|
+
if isinstance(content, list):
|
|
1266
|
+
parts = []
|
|
1267
|
+
for item in content:
|
|
1268
|
+
if isinstance(item, dict):
|
|
1269
|
+
text = item.get("text") or item.get("content")
|
|
1270
|
+
if isinstance(text, str) and text.strip():
|
|
1271
|
+
parts.append(text)
|
|
1272
|
+
elif isinstance(item, str) and item.strip():
|
|
1273
|
+
parts.append(item)
|
|
1274
|
+
if parts:
|
|
1275
|
+
return "\n".join(parts)
|
|
1276
|
+
|
|
1277
|
+
return ""
|
|
1278
|
+
|
|
1279
|
+
def _buffer_tool_call_end(self, tool_call: dict, now: str) -> bool:
|
|
1280
|
+
"""Buffer toolcall_end info for grouping with tool_execution_end.
|
|
1281
|
+
|
|
1282
|
+
Returns True if successfully buffered (caller should suppress output),
|
|
1283
|
+
False if no toolCallId present (caller should emit normally).
|
|
1284
|
+
"""
|
|
1285
|
+
tc_id = tool_call.get("toolCallId", "") if isinstance(tool_call, dict) else ""
|
|
1286
|
+
if not tc_id:
|
|
1287
|
+
return False
|
|
1288
|
+
|
|
1289
|
+
pending: Dict = {"tool": tool_call.get("name", ""), "datetime": now}
|
|
1290
|
+
args = tool_call.get("arguments", {})
|
|
1291
|
+
|
|
1292
|
+
if isinstance(args, dict):
|
|
1293
|
+
cmd = args.get("command", "")
|
|
1294
|
+
if isinstance(cmd, str) and cmd:
|
|
1295
|
+
pending["command"] = self._sanitize_tool_argument_value(cmd)
|
|
1296
|
+
else:
|
|
1297
|
+
pending["args"] = self._sanitize_tool_argument_value(args)
|
|
1298
|
+
elif isinstance(args, str) and args.strip():
|
|
1299
|
+
pending["args"] = self._sanitize_tool_argument_value(args)
|
|
1300
|
+
|
|
1301
|
+
self._pending_tool_calls[tc_id] = pending
|
|
1302
|
+
return True
|
|
1303
|
+
|
|
1304
|
+
def _buffer_exec_start(self, payload: dict) -> None:
|
|
1305
|
+
"""Buffer tool_execution_start data for tool_execution_end fallback + timing."""
|
|
1306
|
+
tc_id = payload.get("toolCallId", "")
|
|
1307
|
+
if not tc_id:
|
|
1308
|
+
return
|
|
1309
|
+
|
|
1310
|
+
pending: Dict = {
|
|
1311
|
+
"tool": payload.get("toolName", ""),
|
|
1312
|
+
"started_at": time.perf_counter(),
|
|
1313
|
+
}
|
|
1314
|
+
args_val = payload.get("args")
|
|
1315
|
+
if isinstance(args_val, dict):
|
|
1316
|
+
cmd = args_val.get("command", "")
|
|
1317
|
+
if isinstance(cmd, str) and cmd:
|
|
1318
|
+
pending["command"] = self._sanitize_tool_argument_value(cmd)
|
|
1319
|
+
else:
|
|
1320
|
+
pending["args"] = self._sanitize_tool_argument_value(args_val)
|
|
1321
|
+
elif isinstance(args_val, str) and args_val.strip():
|
|
1322
|
+
pending["args"] = self._sanitize_tool_argument_value(args_val)
|
|
1323
|
+
|
|
1324
|
+
self._pending_exec_starts[tc_id] = pending
|
|
1325
|
+
|
|
1326
|
+
def _build_combined_tool_event(self, pending: dict, payload: dict, now: str) -> str:
|
|
1327
|
+
"""Build a combined 'tool' event from buffered toolcall_end + tool_execution_end."""
|
|
1328
|
+
self.message_counter += 1
|
|
1329
|
+
header: Dict = {
|
|
1330
|
+
"type": "tool",
|
|
1331
|
+
"datetime": now,
|
|
1332
|
+
"counter": f"#{self.message_counter}",
|
|
1333
|
+
"tool": pending.get("tool", payload.get("toolName", "")),
|
|
1334
|
+
}
|
|
1335
|
+
|
|
1336
|
+
# Args from buffered toolcall/tool_execution_start
|
|
1337
|
+
if "command" in pending:
|
|
1338
|
+
header["command"] = pending["command"]
|
|
1339
|
+
elif "args" in pending:
|
|
1340
|
+
header["args"] = pending["args"]
|
|
1341
|
+
|
|
1342
|
+
# Execution time (source of truth: tool_execution_start -> tool_execution_end)
|
|
1343
|
+
execution_time = self._format_execution_time(payload, pending)
|
|
1344
|
+
if execution_time:
|
|
1345
|
+
header["execution_time"] = execution_time
|
|
1346
|
+
|
|
1347
|
+
is_error = payload.get("isError", False)
|
|
1348
|
+
if is_error:
|
|
1349
|
+
header["isError"] = True
|
|
1350
|
+
|
|
1351
|
+
# Result extraction (handles string, dict with content array, and list)
|
|
1352
|
+
result_val = payload.get("result")
|
|
1353
|
+
result_text = None
|
|
1354
|
+
if isinstance(result_val, str) and result_val.strip():
|
|
1355
|
+
result_text = self._truncate_tool_result_text(result_val)
|
|
1356
|
+
elif isinstance(result_val, dict):
|
|
1357
|
+
result_content = result_val.get("content")
|
|
1358
|
+
if isinstance(result_content, list):
|
|
1359
|
+
for rc_item in result_content:
|
|
1360
|
+
if isinstance(rc_item, dict) and rc_item.get("type") == "text":
|
|
1361
|
+
result_text = self._truncate_tool_result_text(rc_item.get("text", ""))
|
|
1362
|
+
break
|
|
1363
|
+
if result_text is None:
|
|
1364
|
+
result_text = self._strip_ansi_sequences(json.dumps(result_val, ensure_ascii=False))
|
|
1365
|
+
elif isinstance(result_val, list):
|
|
1366
|
+
result_text = self._strip_ansi_sequences(json.dumps(result_val, ensure_ascii=False))
|
|
1367
|
+
|
|
1368
|
+
if result_text:
|
|
1369
|
+
colorize_error = self._color_enabled() and bool(is_error)
|
|
1370
|
+
if "\n" in result_text or colorize_error:
|
|
1371
|
+
label = "result:"
|
|
1372
|
+
colored_text = self._colorize_result(result_text, is_error=bool(is_error))
|
|
1373
|
+
if colorize_error:
|
|
1374
|
+
label = self._colorize_result(label, is_error=True)
|
|
1375
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored_text
|
|
1376
|
+
header["result"] = result_text
|
|
1377
|
+
|
|
1378
|
+
return self._format_tool_invocation_header(header)
|
|
1379
|
+
|
|
1380
|
+
def _format_event_pretty(self, payload: dict) -> Optional[str]:
|
|
1381
|
+
"""
|
|
1382
|
+
Format a Pi JSON streaming event for human-readable output.
|
|
1383
|
+
Returns formatted string or None to skip the event.
|
|
1384
|
+
"""
|
|
1385
|
+
try:
|
|
1386
|
+
event_type = payload.get("type", "")
|
|
1387
|
+
now = datetime.now().strftime("%I:%M:%S %p")
|
|
1388
|
+
|
|
1389
|
+
# Counter is only added to *_end events (below, per-branch)
|
|
1390
|
+
header: Dict = {
|
|
1391
|
+
"type": event_type,
|
|
1392
|
+
"datetime": now,
|
|
1393
|
+
}
|
|
1394
|
+
|
|
1395
|
+
# --- Session header (no counter) ---
|
|
1396
|
+
if event_type == "session":
|
|
1397
|
+
header["version"] = payload.get("version")
|
|
1398
|
+
header["id"] = payload.get("id")
|
|
1399
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1400
|
+
|
|
1401
|
+
# --- turn_start: suppress (no user-visible value) ---
|
|
1402
|
+
if event_type == "turn_start":
|
|
1403
|
+
return None
|
|
1404
|
+
|
|
1405
|
+
# --- agent_start: simple header (no counter) ---
|
|
1406
|
+
if event_type == "agent_start":
|
|
1407
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1408
|
+
|
|
1409
|
+
if event_type == "agent_end":
|
|
1410
|
+
self.message_counter += 1
|
|
1411
|
+
header["counter"] = f"#{self.message_counter}"
|
|
1412
|
+
messages = payload.get("messages")
|
|
1413
|
+
if isinstance(messages, list):
|
|
1414
|
+
header["message_count"] = len(messages)
|
|
1415
|
+
total_cost_usd = self._extract_total_cost_usd(payload)
|
|
1416
|
+
if total_cost_usd is not None:
|
|
1417
|
+
header["total_cost_usd"] = total_cost_usd
|
|
1418
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1419
|
+
|
|
1420
|
+
if event_type == "turn_end":
|
|
1421
|
+
self.message_counter += 1
|
|
1422
|
+
header["counter"] = f"#{self.message_counter}"
|
|
1423
|
+
tool_results = payload.get("toolResults")
|
|
1424
|
+
if isinstance(tool_results, list):
|
|
1425
|
+
header["tool_results_count"] = len(tool_results)
|
|
1426
|
+
# Skip message text - already displayed by text_end/thinking_end/toolcall_end
|
|
1427
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1428
|
+
|
|
1429
|
+
# --- Message events (assistant streaming) ---
|
|
1430
|
+
if event_type == "message_start":
|
|
1431
|
+
message = payload.get("message", {})
|
|
1432
|
+
role = message.get("role") if isinstance(message, dict) else None
|
|
1433
|
+
if role:
|
|
1434
|
+
header["role"] = role
|
|
1435
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1436
|
+
|
|
1437
|
+
if event_type == "message_update":
|
|
1438
|
+
# Check for noisy streaming sub-events and suppress them
|
|
1439
|
+
ame = payload.get("assistantMessageEvent", {})
|
|
1440
|
+
ame_type = ame.get("type", "") if isinstance(ame, dict) else ""
|
|
1441
|
+
event_subtype = payload.get("event", ame_type)
|
|
1442
|
+
if event_subtype in self._PI_HIDDEN_MESSAGE_UPDATE_EVENTS:
|
|
1443
|
+
return None # Suppress noisy streaming deltas
|
|
1444
|
+
|
|
1445
|
+
# toolcall_end: buffer for grouping with tool_execution_end
|
|
1446
|
+
if isinstance(ame, dict) and ame_type == "toolcall_end":
|
|
1447
|
+
tool_call = ame.get("toolCall", {})
|
|
1448
|
+
if self._buffer_tool_call_end(tool_call, now):
|
|
1449
|
+
return None # suppress — will emit combined event on tool_execution_end
|
|
1450
|
+
# No toolCallId — fallback to original format
|
|
1451
|
+
self.message_counter += 1
|
|
1452
|
+
header["counter"] = f"#{self.message_counter}"
|
|
1453
|
+
header["event"] = ame_type
|
|
1454
|
+
if isinstance(tool_call, dict):
|
|
1455
|
+
header["tool"] = tool_call.get("name", "")
|
|
1456
|
+
args = tool_call.get("arguments", {})
|
|
1457
|
+
if isinstance(args, dict):
|
|
1458
|
+
cmd = args.get("command", "")
|
|
1459
|
+
if isinstance(cmd, str) and cmd:
|
|
1460
|
+
header["command"] = self._sanitize_tool_argument_value(cmd)
|
|
1461
|
+
else:
|
|
1462
|
+
header["args"] = self._sanitize_tool_argument_value(args)
|
|
1463
|
+
elif isinstance(args, str) and args.strip():
|
|
1464
|
+
header["args"] = self._sanitize_tool_argument_value(args)
|
|
1465
|
+
return self._format_tool_invocation_header(header)
|
|
1466
|
+
|
|
1467
|
+
# thinking_end: show thinking content (*_end → gets counter)
|
|
1468
|
+
if isinstance(ame, dict) and ame_type == "thinking_end":
|
|
1469
|
+
self.message_counter += 1
|
|
1470
|
+
header["counter"] = f"#{self.message_counter}"
|
|
1471
|
+
header["event"] = ame_type
|
|
1472
|
+
thinking_text = ame.get("thinking", "") or ame.get("content", "") or ame.get("text", "")
|
|
1473
|
+
if isinstance(thinking_text, str) and thinking_text.strip():
|
|
1474
|
+
header["thinking"] = thinking_text
|
|
1475
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1476
|
+
|
|
1477
|
+
# Any other *_end subtypes (e.g. text_end) get counter
|
|
1478
|
+
if isinstance(ame, dict) and ame_type and ame_type.endswith("_end"):
|
|
1479
|
+
self.message_counter += 1
|
|
1480
|
+
header["counter"] = f"#{self.message_counter}"
|
|
1481
|
+
|
|
1482
|
+
message = payload.get("message", {})
|
|
1483
|
+
text = self._extract_text_from_message(message) if isinstance(message, dict) else ""
|
|
1484
|
+
|
|
1485
|
+
# Also check assistantMessageEvent for completion text
|
|
1486
|
+
if isinstance(ame, dict):
|
|
1487
|
+
if ame_type:
|
|
1488
|
+
header["event"] = ame_type
|
|
1489
|
+
delta_text = ame.get("text") or ame.get("delta") or ""
|
|
1490
|
+
if isinstance(delta_text, str) and delta_text.strip():
|
|
1491
|
+
if not text:
|
|
1492
|
+
text = delta_text
|
|
1493
|
+
|
|
1494
|
+
if text and "\n" in text:
|
|
1495
|
+
return json.dumps(header, ensure_ascii=False) + "\ncontent:\n" + text
|
|
1496
|
+
elif text:
|
|
1497
|
+
header["content"] = text
|
|
1498
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1499
|
+
|
|
1500
|
+
if event_type == "message_end":
|
|
1501
|
+
self.message_counter += 1
|
|
1502
|
+
header["counter"] = f"#{self.message_counter}"
|
|
1503
|
+
# Skip message text - already displayed by text_end/thinking_end/toolcall_end
|
|
1504
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1505
|
+
|
|
1506
|
+
# --- Tool execution events ---
|
|
1507
|
+
# Always suppress tool_execution_start: buffer its args for
|
|
1508
|
+
# tool_execution_end to use. The user sees nothing until the
|
|
1509
|
+
# tool finishes, then gets a single combined "tool" event.
|
|
1510
|
+
if event_type == "tool_execution_start":
|
|
1511
|
+
self._buffer_exec_start(payload)
|
|
1512
|
+
self._in_tool_execution = True
|
|
1513
|
+
return None
|
|
1514
|
+
|
|
1515
|
+
if event_type == "tool_execution_update":
|
|
1516
|
+
# Suppress updates — result will arrive in tool_execution_end
|
|
1517
|
+
return None
|
|
1518
|
+
|
|
1519
|
+
if event_type == "tool_execution_end":
|
|
1520
|
+
self._in_tool_execution = False
|
|
1521
|
+
tool_call_id = payload.get("toolCallId")
|
|
1522
|
+
|
|
1523
|
+
pending_tool = self._pending_tool_calls.pop(tool_call_id, None) if tool_call_id else None
|
|
1524
|
+
pending_exec = self._pending_exec_starts.pop(tool_call_id, None) if tool_call_id else None
|
|
1525
|
+
if pending_tool and pending_exec and "started_at" in pending_exec:
|
|
1526
|
+
pending_tool["started_at"] = pending_exec["started_at"]
|
|
1527
|
+
pending = pending_tool or pending_exec
|
|
1528
|
+
|
|
1529
|
+
if pending:
|
|
1530
|
+
return self._build_combined_tool_event(pending, payload, now)
|
|
1531
|
+
|
|
1532
|
+
# No buffered data at all — minimal fallback
|
|
1533
|
+
self.message_counter += 1
|
|
1534
|
+
header["type"] = "tool"
|
|
1535
|
+
header["counter"] = f"#{self.message_counter}"
|
|
1536
|
+
header["tool"] = payload.get("toolName", "")
|
|
1537
|
+
|
|
1538
|
+
execution_time = self._format_execution_time(payload)
|
|
1539
|
+
if execution_time:
|
|
1540
|
+
header["execution_time"] = execution_time
|
|
1541
|
+
|
|
1542
|
+
is_error = payload.get("isError", False)
|
|
1543
|
+
if is_error:
|
|
1544
|
+
header["isError"] = True
|
|
1545
|
+
|
|
1546
|
+
result_val = payload.get("result")
|
|
1547
|
+
colorize_error = self._color_enabled() and bool(is_error)
|
|
1548
|
+
|
|
1549
|
+
if isinstance(result_val, str) and result_val.strip():
|
|
1550
|
+
truncated = self._truncate_tool_result_text(result_val)
|
|
1551
|
+
if "\n" in truncated or colorize_error:
|
|
1552
|
+
label = "result:"
|
|
1553
|
+
colored = self._colorize_result(truncated, is_error=bool(is_error))
|
|
1554
|
+
if colorize_error:
|
|
1555
|
+
label = self._colorize_result(label, is_error=True)
|
|
1556
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored
|
|
1557
|
+
header["result"] = truncated
|
|
1558
|
+
return self._format_tool_invocation_header(header)
|
|
1559
|
+
|
|
1560
|
+
if isinstance(result_val, dict):
|
|
1561
|
+
result_content = result_val.get("content")
|
|
1562
|
+
if isinstance(result_content, list):
|
|
1563
|
+
for rc_item in result_content:
|
|
1564
|
+
if isinstance(rc_item, dict) and rc_item.get("type") == "text":
|
|
1565
|
+
text = rc_item.get("text", "")
|
|
1566
|
+
truncated = self._truncate_tool_result_text(text)
|
|
1567
|
+
if "\n" in truncated or colorize_error:
|
|
1568
|
+
label = "result:"
|
|
1569
|
+
colored = self._colorize_result(truncated, is_error=bool(is_error))
|
|
1570
|
+
if colorize_error:
|
|
1571
|
+
label = self._colorize_result(label, is_error=True)
|
|
1572
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored
|
|
1573
|
+
header["result"] = truncated
|
|
1574
|
+
return self._format_tool_invocation_header(header)
|
|
1575
|
+
|
|
1576
|
+
result_str = self._strip_ansi_sequences(json.dumps(result_val, ensure_ascii=False))
|
|
1577
|
+
if "\n" in result_str or len(result_str) > 200 or colorize_error:
|
|
1578
|
+
label = "result:"
|
|
1579
|
+
colored = self._colorize_result(result_str, is_error=bool(is_error))
|
|
1580
|
+
if colorize_error:
|
|
1581
|
+
label = self._colorize_result(label, is_error=True)
|
|
1582
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored
|
|
1583
|
+
header["result"] = result_str
|
|
1584
|
+
return self._format_tool_invocation_header(header)
|
|
1585
|
+
|
|
1586
|
+
if isinstance(result_val, list):
|
|
1587
|
+
result_str = self._strip_ansi_sequences(json.dumps(result_val, ensure_ascii=False))
|
|
1588
|
+
if "\n" in result_str or len(result_str) > 200 or colorize_error:
|
|
1589
|
+
label = "result:"
|
|
1590
|
+
colored = self._colorize_result(result_str, is_error=bool(is_error))
|
|
1591
|
+
if colorize_error:
|
|
1592
|
+
label = self._colorize_result(label, is_error=True)
|
|
1593
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored
|
|
1594
|
+
header["result"] = result_str
|
|
1595
|
+
return self._format_tool_invocation_header(header)
|
|
1596
|
+
|
|
1597
|
+
return self._format_tool_invocation_header(header)
|
|
1598
|
+
|
|
1599
|
+
# --- Retry/compaction events ---
|
|
1600
|
+
if event_type == "auto_retry_start":
|
|
1601
|
+
header["attempt"] = payload.get("attempt")
|
|
1602
|
+
header["maxAttempts"] = payload.get("maxAttempts")
|
|
1603
|
+
header["delayMs"] = payload.get("delayMs")
|
|
1604
|
+
error_msg = payload.get("errorMessage", "")
|
|
1605
|
+
if error_msg:
|
|
1606
|
+
header["error"] = error_msg
|
|
1607
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1608
|
+
|
|
1609
|
+
if event_type == "auto_retry_end":
|
|
1610
|
+
self.message_counter += 1
|
|
1611
|
+
header["counter"] = f"#{self.message_counter}"
|
|
1612
|
+
header["success"] = payload.get("success")
|
|
1613
|
+
header["attempt"] = payload.get("attempt")
|
|
1614
|
+
final_err = payload.get("finalError")
|
|
1615
|
+
if final_err:
|
|
1616
|
+
header["finalError"] = final_err
|
|
1617
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1618
|
+
|
|
1619
|
+
# --- Fallback: emit raw with datetime ---
|
|
1620
|
+
header.update({k: v for k, v in payload.items() if k not in ("type",)})
|
|
1621
|
+
return json.dumps(header, ensure_ascii=False)
|
|
1622
|
+
|
|
1623
|
+
except Exception:
|
|
1624
|
+
return json.dumps(payload, ensure_ascii=False)
|
|
1625
|
+
|
|
1626
|
+
# ── Live stream prettifier ─────────────────────────────────────────────
|
|
1627
|
+
|
|
1628
|
+
def _format_event_live(self, parsed: dict) -> Optional[str]:
|
|
1629
|
+
"""Format Pi events for live streaming mode.
|
|
1630
|
+
|
|
1631
|
+
Returns:
|
|
1632
|
+
str ending with \\n: a complete line to print
|
|
1633
|
+
str NOT ending with \\n: a delta to append (streaming text)
|
|
1634
|
+
"": suppress this event
|
|
1635
|
+
None: use raw JSON fallback
|
|
1636
|
+
"""
|
|
1637
|
+
event_type = parsed.get("type", "")
|
|
1638
|
+
now = datetime.now().strftime("%I:%M:%S %p")
|
|
1639
|
+
|
|
1640
|
+
if event_type == "message_update":
|
|
1641
|
+
ame = parsed.get("assistantMessageEvent", {})
|
|
1642
|
+
ame_type = ame.get("type", "") if isinstance(ame, dict) else ""
|
|
1643
|
+
|
|
1644
|
+
# Stream text deltas directly (no JSON, no newline)
|
|
1645
|
+
if ame_type == "text_delta":
|
|
1646
|
+
delta = ame.get("delta", "")
|
|
1647
|
+
if isinstance(delta, str) and delta:
|
|
1648
|
+
return delta # raw text, no newline
|
|
1649
|
+
return ""
|
|
1650
|
+
|
|
1651
|
+
if ame_type == "thinking_delta":
|
|
1652
|
+
delta = ame.get("delta", "")
|
|
1653
|
+
if isinstance(delta, str) and delta:
|
|
1654
|
+
return delta
|
|
1655
|
+
return ""
|
|
1656
|
+
|
|
1657
|
+
# Section start markers (no counter — only *_end events get counters)
|
|
1658
|
+
if ame_type == "text_start":
|
|
1659
|
+
return json.dumps({"type": "text_start", "datetime": now}) + "\n"
|
|
1660
|
+
|
|
1661
|
+
if ame_type == "thinking_start":
|
|
1662
|
+
return json.dumps({"type": "thinking_start", "datetime": now}) + "\n"
|
|
1663
|
+
|
|
1664
|
+
# Section end markers (text was already streamed)
|
|
1665
|
+
if ame_type == "text_end":
|
|
1666
|
+
self.message_counter += 1
|
|
1667
|
+
return "\n" + json.dumps({"type": "text_end", "datetime": now, "counter": f"#{self.message_counter}"}) + "\n"
|
|
1668
|
+
|
|
1669
|
+
if ame_type == "thinking_end":
|
|
1670
|
+
self.message_counter += 1
|
|
1671
|
+
return "\n" + json.dumps({"type": "thinking_end", "datetime": now, "counter": f"#{self.message_counter}"}) + "\n"
|
|
1672
|
+
|
|
1673
|
+
# Tool call end: buffer for grouping with tool_execution_end
|
|
1674
|
+
if ame_type == "toolcall_end":
|
|
1675
|
+
tc = ame.get("toolCall", {})
|
|
1676
|
+
if self._buffer_tool_call_end(tc, now):
|
|
1677
|
+
return "" # suppress — will emit combined event on tool_execution_end
|
|
1678
|
+
# No toolCallId — fallback to original format
|
|
1679
|
+
self.message_counter += 1
|
|
1680
|
+
header = {"type": "toolcall_end", "datetime": now, "counter": f"#{self.message_counter}"}
|
|
1681
|
+
if isinstance(tc, dict):
|
|
1682
|
+
header["tool"] = tc.get("name", "")
|
|
1683
|
+
args = tc.get("arguments", {})
|
|
1684
|
+
if isinstance(args, dict):
|
|
1685
|
+
cmd = args.get("command", "")
|
|
1686
|
+
if isinstance(cmd, str) and cmd:
|
|
1687
|
+
header["command"] = self._sanitize_tool_argument_value(cmd)
|
|
1688
|
+
else:
|
|
1689
|
+
header["args"] = self._sanitize_tool_argument_value(args)
|
|
1690
|
+
elif isinstance(args, str) and args.strip():
|
|
1691
|
+
header["args"] = self._sanitize_tool_argument_value(args)
|
|
1692
|
+
return self._format_tool_invocation_header(header) + "\n"
|
|
1693
|
+
|
|
1694
|
+
# Suppress all other message_update subtypes (toolcall_start, toolcall_delta, etc.)
|
|
1695
|
+
return ""
|
|
1696
|
+
|
|
1697
|
+
# Suppress redundant events
|
|
1698
|
+
if event_type in ("message_start", "message_end"):
|
|
1699
|
+
return ""
|
|
1700
|
+
|
|
1701
|
+
# tool_execution_start: always suppress, buffer args
|
|
1702
|
+
if event_type == "tool_execution_start":
|
|
1703
|
+
self._buffer_exec_start(parsed)
|
|
1704
|
+
self._in_tool_execution = True
|
|
1705
|
+
return "" # suppress
|
|
1706
|
+
|
|
1707
|
+
# tool_execution_end: combine with buffered data
|
|
1708
|
+
if event_type == "tool_execution_end":
|
|
1709
|
+
self._in_tool_execution = False
|
|
1710
|
+
tool_call_id = parsed.get("toolCallId")
|
|
1711
|
+
|
|
1712
|
+
pending_tool = self._pending_tool_calls.pop(tool_call_id, None) if tool_call_id else None
|
|
1713
|
+
pending_exec = self._pending_exec_starts.pop(tool_call_id, None) if tool_call_id else None
|
|
1714
|
+
if pending_tool and pending_exec and "started_at" in pending_exec:
|
|
1715
|
+
pending_tool["started_at"] = pending_exec["started_at"]
|
|
1716
|
+
pending = pending_tool or pending_exec
|
|
1717
|
+
|
|
1718
|
+
if pending:
|
|
1719
|
+
return self._build_combined_tool_event(pending, parsed, now) + "\n"
|
|
1720
|
+
|
|
1721
|
+
# No buffered data — minimal fallback
|
|
1722
|
+
self.message_counter += 1
|
|
1723
|
+
header = {
|
|
1724
|
+
"type": "tool",
|
|
1725
|
+
"datetime": now,
|
|
1726
|
+
"counter": f"#{self.message_counter}",
|
|
1727
|
+
"tool": parsed.get("toolName", ""),
|
|
1728
|
+
}
|
|
1729
|
+
execution_time = self._format_execution_time(parsed)
|
|
1730
|
+
if execution_time:
|
|
1731
|
+
header["execution_time"] = execution_time
|
|
1732
|
+
|
|
1733
|
+
is_error = parsed.get("isError", False)
|
|
1734
|
+
if is_error:
|
|
1735
|
+
header["isError"] = True
|
|
1736
|
+
|
|
1737
|
+
result_val = parsed.get("result")
|
|
1738
|
+
colorize_error = self._color_enabled() and bool(is_error)
|
|
1739
|
+
|
|
1740
|
+
if isinstance(result_val, str) and result_val.strip():
|
|
1741
|
+
truncated = self._truncate_tool_result_text(result_val)
|
|
1742
|
+
if "\n" in truncated or colorize_error:
|
|
1743
|
+
label = "result:"
|
|
1744
|
+
colored = self._colorize_result(truncated, is_error=bool(is_error))
|
|
1745
|
+
if colorize_error:
|
|
1746
|
+
label = self._colorize_result(label, is_error=True)
|
|
1747
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored + "\n"
|
|
1748
|
+
header["result"] = truncated
|
|
1749
|
+
return self._format_tool_invocation_header(header) + "\n"
|
|
1750
|
+
|
|
1751
|
+
if isinstance(result_val, dict):
|
|
1752
|
+
result_content = result_val.get("content")
|
|
1753
|
+
if isinstance(result_content, list):
|
|
1754
|
+
for rc_item in result_content:
|
|
1755
|
+
if isinstance(rc_item, dict) and rc_item.get("type") == "text":
|
|
1756
|
+
text = rc_item.get("text", "")
|
|
1757
|
+
truncated = self._truncate_tool_result_text(text)
|
|
1758
|
+
if "\n" in truncated or colorize_error:
|
|
1759
|
+
label = "result:"
|
|
1760
|
+
colored = self._colorize_result(truncated, is_error=bool(is_error))
|
|
1761
|
+
if colorize_error:
|
|
1762
|
+
label = self._colorize_result(label, is_error=True)
|
|
1763
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored + "\n"
|
|
1764
|
+
header["result"] = truncated
|
|
1765
|
+
return self._format_tool_invocation_header(header) + "\n"
|
|
1766
|
+
|
|
1767
|
+
result_json = self._strip_ansi_sequences(json.dumps(result_val, ensure_ascii=False))
|
|
1768
|
+
if "\n" in result_json or colorize_error:
|
|
1769
|
+
label = "result:"
|
|
1770
|
+
colored = self._colorize_result(result_json, is_error=bool(is_error))
|
|
1771
|
+
if colorize_error:
|
|
1772
|
+
label = self._colorize_result(label, is_error=True)
|
|
1773
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored + "\n"
|
|
1774
|
+
header["result"] = result_json
|
|
1775
|
+
return self._format_tool_invocation_header(header) + "\n"
|
|
1776
|
+
|
|
1777
|
+
if isinstance(result_val, list):
|
|
1778
|
+
result_json = self._strip_ansi_sequences(json.dumps(result_val, ensure_ascii=False))
|
|
1779
|
+
if "\n" in result_json or colorize_error:
|
|
1780
|
+
label = "result:"
|
|
1781
|
+
colored = self._colorize_result(result_json, is_error=bool(is_error))
|
|
1782
|
+
if colorize_error:
|
|
1783
|
+
label = self._colorize_result(label, is_error=True)
|
|
1784
|
+
return self._format_tool_invocation_header(header) + "\n" + label + "\n" + colored + "\n"
|
|
1785
|
+
header["result"] = result_json
|
|
1786
|
+
return self._format_tool_invocation_header(header) + "\n"
|
|
1787
|
+
|
|
1788
|
+
return self._format_tool_invocation_header(header) + "\n"
|
|
1789
|
+
|
|
1790
|
+
# turn_end: metadata only
|
|
1791
|
+
if event_type == "turn_end":
|
|
1792
|
+
self.message_counter += 1
|
|
1793
|
+
header = {"type": "turn_end", "datetime": now, "counter": f"#{self.message_counter}"}
|
|
1794
|
+
tool_results = parsed.get("toolResults")
|
|
1795
|
+
if isinstance(tool_results, list):
|
|
1796
|
+
header["tool_results_count"] = len(tool_results)
|
|
1797
|
+
return json.dumps(header, ensure_ascii=False) + "\n"
|
|
1798
|
+
|
|
1799
|
+
# turn_start: suppress (no user-visible value)
|
|
1800
|
+
if event_type == "turn_start":
|
|
1801
|
+
return ""
|
|
1802
|
+
|
|
1803
|
+
# agent_start (no counter — only *_end events get counters)
|
|
1804
|
+
if event_type == "agent_start":
|
|
1805
|
+
return json.dumps({"type": event_type, "datetime": now}) + "\n"
|
|
1806
|
+
|
|
1807
|
+
# agent_end
|
|
1808
|
+
if event_type == "agent_end":
|
|
1809
|
+
self.message_counter += 1
|
|
1810
|
+
header = {"type": "agent_end", "datetime": now, "counter": f"#{self.message_counter}"}
|
|
1811
|
+
messages = parsed.get("messages")
|
|
1812
|
+
if isinstance(messages, list):
|
|
1813
|
+
header["message_count"] = len(messages)
|
|
1814
|
+
total_cost_usd = self._extract_total_cost_usd(parsed)
|
|
1815
|
+
if total_cost_usd is not None:
|
|
1816
|
+
header["total_cost_usd"] = total_cost_usd
|
|
1817
|
+
return json.dumps(header, ensure_ascii=False) + "\n"
|
|
1818
|
+
|
|
1819
|
+
# --- Role-based messages (Pi-wrapped Codex messages) ---
|
|
1820
|
+
role = parsed.get("role", "")
|
|
1821
|
+
if role == "toolResult":
|
|
1822
|
+
self.message_counter += 1
|
|
1823
|
+
header = {
|
|
1824
|
+
"type": "toolResult",
|
|
1825
|
+
"datetime": now,
|
|
1826
|
+
"counter": f"#{self.message_counter}",
|
|
1827
|
+
"toolName": parsed.get("toolName", ""),
|
|
1828
|
+
}
|
|
1829
|
+
is_error = parsed.get("isError", False)
|
|
1830
|
+
if is_error:
|
|
1831
|
+
header["isError"] = True
|
|
1832
|
+
content = parsed.get("content")
|
|
1833
|
+
if isinstance(content, list):
|
|
1834
|
+
for item in content:
|
|
1835
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
|
1836
|
+
text_val = item.get("text", "")
|
|
1837
|
+
truncated = self._truncate_tool_result_text(text_val)
|
|
1838
|
+
use_color = self._color_enabled()
|
|
1839
|
+
if "\n" in truncated or use_color:
|
|
1840
|
+
colored = self._colorize_result(truncated, is_error=bool(is_error))
|
|
1841
|
+
label = self._colorize_result("content:", is_error=bool(is_error))
|
|
1842
|
+
return json.dumps(header, ensure_ascii=False) + "\n" + label + "\n" + colored + "\n"
|
|
1843
|
+
header["content"] = truncated
|
|
1844
|
+
return json.dumps(header, ensure_ascii=False) + "\n"
|
|
1845
|
+
return json.dumps(header, ensure_ascii=False) + "\n"
|
|
1846
|
+
|
|
1847
|
+
if role == "assistant":
|
|
1848
|
+
self.message_counter += 1
|
|
1849
|
+
content = parsed.get("content")
|
|
1850
|
+
if isinstance(content, list):
|
|
1851
|
+
self._strip_thinking_signature(content)
|
|
1852
|
+
header = {"type": "assistant", "datetime": now, "counter": f"#{self.message_counter}"}
|
|
1853
|
+
text_parts = []
|
|
1854
|
+
if isinstance(content, list):
|
|
1855
|
+
for item in content:
|
|
1856
|
+
if isinstance(item, dict):
|
|
1857
|
+
if item.get("type") == "text":
|
|
1858
|
+
text_parts.append(item.get("text", ""))
|
|
1859
|
+
elif item.get("type") == "thinking":
|
|
1860
|
+
text_parts.append(f"[thinking] {item.get('thinking', '')}")
|
|
1861
|
+
elif item.get("type") == "toolCall":
|
|
1862
|
+
name = item.get("name", "")
|
|
1863
|
+
args = item.get("arguments", {})
|
|
1864
|
+
cmd = args.get("command", "") if isinstance(args, dict) else ""
|
|
1865
|
+
text_parts.append(f"[toolCall] {name}: {cmd}" if cmd else f"[toolCall] {name}")
|
|
1866
|
+
if text_parts:
|
|
1867
|
+
combined = "\n".join(text_parts)
|
|
1868
|
+
if "\n" in combined:
|
|
1869
|
+
return json.dumps(header, ensure_ascii=False) + "\n" + combined + "\n"
|
|
1870
|
+
header["content"] = combined
|
|
1871
|
+
return json.dumps(header, ensure_ascii=False) + "\n"
|
|
1872
|
+
|
|
1873
|
+
if role:
|
|
1874
|
+
# Other roles — minimal JSON header
|
|
1875
|
+
self.message_counter += 1
|
|
1876
|
+
return json.dumps({"type": role, "datetime": now, "counter": f"#{self.message_counter}"}, ensure_ascii=False) + "\n"
|
|
1877
|
+
|
|
1878
|
+
# --- Native Codex events (agent_reasoning, agent_message, exec_command_end, etc.) ---
|
|
1879
|
+
msg_type, payload, outer_type = self._normalize_codex_event(parsed)
|
|
1880
|
+
|
|
1881
|
+
if msg_type in ("agent_reasoning", "reasoning"):
|
|
1882
|
+
self.message_counter += 1
|
|
1883
|
+
content = self._extract_reasoning_text(payload)
|
|
1884
|
+
header = {"type": msg_type, "datetime": now, "counter": f"#{self.message_counter}"}
|
|
1885
|
+
if "\n" in content:
|
|
1886
|
+
return json.dumps(header, ensure_ascii=False) + "\ntext:\n" + content + "\n"
|
|
1887
|
+
if content:
|
|
1888
|
+
header["text"] = content
|
|
1889
|
+
return json.dumps(header, ensure_ascii=False) + "\n"
|
|
1890
|
+
|
|
1891
|
+
if msg_type in ("agent_message", "assistant_message"):
|
|
1892
|
+
self.message_counter += 1
|
|
1893
|
+
content = self._extract_message_text_codex(payload)
|
|
1894
|
+
header = {"type": msg_type, "datetime": now, "counter": f"#{self.message_counter}"}
|
|
1895
|
+
if "\n" in content:
|
|
1896
|
+
return json.dumps(header, ensure_ascii=False) + "\nmessage:\n" + content + "\n"
|
|
1897
|
+
if content:
|
|
1898
|
+
header["message"] = content
|
|
1899
|
+
return json.dumps(header, ensure_ascii=False) + "\n"
|
|
1900
|
+
|
|
1901
|
+
if msg_type == "exec_command_end":
|
|
1902
|
+
self.message_counter += 1
|
|
1903
|
+
formatted_output = payload.get("formatted_output", "") if isinstance(payload, dict) else ""
|
|
1904
|
+
header = {"type": msg_type, "datetime": now, "counter": f"#{self.message_counter}"}
|
|
1905
|
+
if "\n" in formatted_output:
|
|
1906
|
+
return json.dumps(header, ensure_ascii=False) + "\nformatted_output:\n" + formatted_output + "\n"
|
|
1907
|
+
if formatted_output:
|
|
1908
|
+
header["formatted_output"] = formatted_output
|
|
1909
|
+
return json.dumps(header, ensure_ascii=False) + "\n"
|
|
1910
|
+
|
|
1911
|
+
if msg_type == "command_execution":
|
|
1912
|
+
self.message_counter += 1
|
|
1913
|
+
aggregated_output = self._extract_command_output_text(payload)
|
|
1914
|
+
header = {"type": msg_type, "datetime": now, "counter": f"#{self.message_counter}"}
|
|
1915
|
+
if "\n" in aggregated_output:
|
|
1916
|
+
return json.dumps(header, ensure_ascii=False) + "\naggregated_output:\n" + aggregated_output + "\n"
|
|
1917
|
+
if aggregated_output:
|
|
1918
|
+
header["aggregated_output"] = aggregated_output
|
|
1919
|
+
return json.dumps(header, ensure_ascii=False) + "\n"
|
|
1920
|
+
|
|
1921
|
+
# Fallback: not handled
|
|
1922
|
+
return None
|
|
1923
|
+
|
|
1924
|
+
def _build_hide_types(self) -> set:
|
|
1925
|
+
"""Build the set of event types to suppress from output."""
|
|
1926
|
+
hide_types = set(self.DEFAULT_HIDDEN_STREAM_TYPES)
|
|
1927
|
+
for env_name in ("PI_HIDE_STREAM_TYPES", "JUNO_CODE_HIDE_STREAM_TYPES"):
|
|
1928
|
+
env_val = os.environ.get(env_name, "")
|
|
1929
|
+
if env_val:
|
|
1930
|
+
parts = [p.strip() for p in env_val.split(",") if p.strip()]
|
|
1931
|
+
hide_types.update(parts)
|
|
1932
|
+
return hide_types
|
|
1933
|
+
|
|
1934
|
+
@staticmethod
|
|
1935
|
+
def _toolcall_end_delay_seconds() -> float:
|
|
1936
|
+
"""Return delay for fallback toolcall_end visibility (default 3s)."""
|
|
1937
|
+
raw = os.environ.get("PI_TOOLCALL_END_DELAY_SECONDS", "3")
|
|
1938
|
+
try:
|
|
1939
|
+
delay = float(raw)
|
|
1940
|
+
except (TypeError, ValueError):
|
|
1941
|
+
delay = 3.0
|
|
1942
|
+
return max(0.0, delay)
|
|
1943
|
+
|
|
1944
|
+
@staticmethod
|
|
1945
|
+
def _sanitize_sub_agent_response(event: dict) -> dict:
|
|
1946
|
+
"""Strip bulky fields (messages, type) from sub_agent_response to reduce token usage."""
|
|
1947
|
+
return {k: v for k, v in event.items() if k not in ("messages", "type")}
|
|
1948
|
+
|
|
1949
|
+
def _reset_run_cost_tracking(self) -> None:
|
|
1950
|
+
"""Reset per-run usage/cost accumulation state."""
|
|
1951
|
+
self._run_usage_totals = None
|
|
1952
|
+
self._run_total_cost_usd = None
|
|
1953
|
+
self._run_seen_usage_keys.clear()
|
|
1954
|
+
|
|
1955
|
+
@staticmethod
|
|
1956
|
+
def _is_numeric_value(value: object) -> bool:
|
|
1957
|
+
"""True for int/float values (excluding bool)."""
|
|
1958
|
+
return isinstance(value, (int, float)) and not isinstance(value, bool)
|
|
1959
|
+
|
|
1960
|
+
@staticmethod
|
|
1961
|
+
def _normalize_usage_payload(usage: dict) -> Optional[dict]:
|
|
1962
|
+
"""Normalize usage payload into numeric totals for accumulation."""
|
|
1963
|
+
if not isinstance(usage, dict):
|
|
1964
|
+
return None
|
|
1965
|
+
|
|
1966
|
+
usage_cost = usage.get("cost")
|
|
1967
|
+
cost_payload = usage_cost if isinstance(usage_cost, dict) else {}
|
|
1968
|
+
|
|
1969
|
+
input_tokens = float(usage.get("input")) if PiService._is_numeric_value(usage.get("input")) else 0.0
|
|
1970
|
+
output_tokens = float(usage.get("output")) if PiService._is_numeric_value(usage.get("output")) else 0.0
|
|
1971
|
+
cache_read_tokens = float(usage.get("cacheRead")) if PiService._is_numeric_value(usage.get("cacheRead")) else 0.0
|
|
1972
|
+
cache_write_tokens = float(usage.get("cacheWrite")) if PiService._is_numeric_value(usage.get("cacheWrite")) else 0.0
|
|
1973
|
+
|
|
1974
|
+
total_tokens_raw = usage.get("totalTokens")
|
|
1975
|
+
total_tokens = (
|
|
1976
|
+
float(total_tokens_raw)
|
|
1977
|
+
if PiService._is_numeric_value(total_tokens_raw)
|
|
1978
|
+
else input_tokens + output_tokens + cache_read_tokens + cache_write_tokens
|
|
1979
|
+
)
|
|
1980
|
+
|
|
1981
|
+
cost_input = float(cost_payload.get("input")) if PiService._is_numeric_value(cost_payload.get("input")) else 0.0
|
|
1982
|
+
cost_output = float(cost_payload.get("output")) if PiService._is_numeric_value(cost_payload.get("output")) else 0.0
|
|
1983
|
+
cost_cache_read = (
|
|
1984
|
+
float(cost_payload.get("cacheRead")) if PiService._is_numeric_value(cost_payload.get("cacheRead")) else 0.0
|
|
1985
|
+
)
|
|
1986
|
+
cost_cache_write = (
|
|
1987
|
+
float(cost_payload.get("cacheWrite")) if PiService._is_numeric_value(cost_payload.get("cacheWrite")) else 0.0
|
|
1988
|
+
)
|
|
1989
|
+
|
|
1990
|
+
cost_total_raw = cost_payload.get("total")
|
|
1991
|
+
cost_total = (
|
|
1992
|
+
float(cost_total_raw)
|
|
1993
|
+
if PiService._is_numeric_value(cost_total_raw)
|
|
1994
|
+
else cost_input + cost_output + cost_cache_read + cost_cache_write
|
|
1995
|
+
)
|
|
1996
|
+
|
|
1997
|
+
has_any_value = any(
|
|
1998
|
+
PiService._is_numeric_value(v)
|
|
1999
|
+
for v in (
|
|
2000
|
+
usage.get("input"),
|
|
2001
|
+
usage.get("output"),
|
|
2002
|
+
usage.get("cacheRead"),
|
|
2003
|
+
usage.get("cacheWrite"),
|
|
2004
|
+
usage.get("totalTokens"),
|
|
2005
|
+
cost_payload.get("input"),
|
|
2006
|
+
cost_payload.get("output"),
|
|
2007
|
+
cost_payload.get("cacheRead"),
|
|
2008
|
+
cost_payload.get("cacheWrite"),
|
|
2009
|
+
cost_payload.get("total"),
|
|
2010
|
+
)
|
|
2011
|
+
)
|
|
2012
|
+
|
|
2013
|
+
if not has_any_value:
|
|
2014
|
+
return None
|
|
2015
|
+
|
|
2016
|
+
return {
|
|
2017
|
+
"input": input_tokens,
|
|
2018
|
+
"output": output_tokens,
|
|
2019
|
+
"cacheRead": cache_read_tokens,
|
|
2020
|
+
"cacheWrite": cache_write_tokens,
|
|
2021
|
+
"totalTokens": total_tokens,
|
|
2022
|
+
"cost": {
|
|
2023
|
+
"input": cost_input,
|
|
2024
|
+
"output": cost_output,
|
|
2025
|
+
"cacheRead": cost_cache_read,
|
|
2026
|
+
"cacheWrite": cost_cache_write,
|
|
2027
|
+
"total": cost_total,
|
|
2028
|
+
},
|
|
2029
|
+
}
|
|
2030
|
+
|
|
2031
|
+
@staticmethod
|
|
2032
|
+
def _merge_usage_payloads(base: Optional[dict], delta: Optional[dict]) -> Optional[dict]:
|
|
2033
|
+
"""Merge normalized usage payloads by summing token/cost fields."""
|
|
2034
|
+
if not isinstance(base, dict):
|
|
2035
|
+
return delta
|
|
2036
|
+
if not isinstance(delta, dict):
|
|
2037
|
+
return base
|
|
2038
|
+
|
|
2039
|
+
base_cost = base.get("cost") if isinstance(base.get("cost"), dict) else {}
|
|
2040
|
+
delta_cost = delta.get("cost") if isinstance(delta.get("cost"), dict) else {}
|
|
2041
|
+
|
|
2042
|
+
return {
|
|
2043
|
+
"input": float(base.get("input", 0.0)) + float(delta.get("input", 0.0)),
|
|
2044
|
+
"output": float(base.get("output", 0.0)) + float(delta.get("output", 0.0)),
|
|
2045
|
+
"cacheRead": float(base.get("cacheRead", 0.0)) + float(delta.get("cacheRead", 0.0)),
|
|
2046
|
+
"cacheWrite": float(base.get("cacheWrite", 0.0)) + float(delta.get("cacheWrite", 0.0)),
|
|
2047
|
+
"totalTokens": float(base.get("totalTokens", 0.0)) + float(delta.get("totalTokens", 0.0)),
|
|
2048
|
+
"cost": {
|
|
2049
|
+
"input": float(base_cost.get("input", 0.0)) + float(delta_cost.get("input", 0.0)),
|
|
2050
|
+
"output": float(base_cost.get("output", 0.0)) + float(delta_cost.get("output", 0.0)),
|
|
2051
|
+
"cacheRead": float(base_cost.get("cacheRead", 0.0)) + float(delta_cost.get("cacheRead", 0.0)),
|
|
2052
|
+
"cacheWrite": float(base_cost.get("cacheWrite", 0.0)) + float(delta_cost.get("cacheWrite", 0.0)),
|
|
2053
|
+
"total": float(base_cost.get("total", 0.0)) + float(delta_cost.get("total", 0.0)),
|
|
2054
|
+
},
|
|
2055
|
+
}
|
|
2056
|
+
|
|
2057
|
+
@staticmethod
|
|
2058
|
+
def _aggregate_assistant_usages(messages: list) -> Optional[dict]:
|
|
2059
|
+
"""Aggregate assistant usage payloads from an event messages array."""
|
|
2060
|
+
if not isinstance(messages, list):
|
|
2061
|
+
return None
|
|
2062
|
+
|
|
2063
|
+
assistant_usages: List[dict] = []
|
|
2064
|
+
for msg in messages:
|
|
2065
|
+
if isinstance(msg, dict) and msg.get("role") == "assistant":
|
|
2066
|
+
usage = msg.get("usage")
|
|
2067
|
+
if isinstance(usage, dict):
|
|
2068
|
+
assistant_usages.append(usage)
|
|
2069
|
+
|
|
2070
|
+
if not assistant_usages:
|
|
2071
|
+
return None
|
|
2072
|
+
if len(assistant_usages) == 1:
|
|
2073
|
+
return assistant_usages[0]
|
|
2074
|
+
|
|
2075
|
+
totals: Optional[dict] = None
|
|
2076
|
+
for usage in assistant_usages:
|
|
2077
|
+
normalized = PiService._normalize_usage_payload(usage)
|
|
2078
|
+
totals = PiService._merge_usage_payloads(totals, normalized)
|
|
2079
|
+
|
|
2080
|
+
return totals
|
|
2081
|
+
|
|
2082
|
+
def _assistant_usage_dedupe_key(self, message: dict, usage: dict) -> Optional[str]:
|
|
2083
|
+
"""Build a stable dedupe key for assistant usage seen across message/turn_end events."""
|
|
2084
|
+
if not isinstance(message, dict) or not isinstance(usage, dict):
|
|
2085
|
+
return None
|
|
2086
|
+
|
|
2087
|
+
for id_key in ("id", "messageId", "message_id"):
|
|
2088
|
+
value = message.get(id_key)
|
|
2089
|
+
if isinstance(value, str) and value.strip():
|
|
2090
|
+
return f"id:{value.strip()}"
|
|
2091
|
+
|
|
2092
|
+
timestamp = message.get("timestamp")
|
|
2093
|
+
if self._is_numeric_value(timestamp):
|
|
2094
|
+
return f"ts:{int(float(timestamp))}"
|
|
2095
|
+
if isinstance(timestamp, str) and timestamp.strip():
|
|
2096
|
+
return f"ts:{timestamp.strip()}"
|
|
2097
|
+
|
|
2098
|
+
usage_cost = usage.get("cost") if isinstance(usage.get("cost"), dict) else {}
|
|
2099
|
+
signature: Dict[str, object] = {
|
|
2100
|
+
"stopReason": message.get("stopReason") if isinstance(message.get("stopReason"), str) else "",
|
|
2101
|
+
"input": usage.get("input", 0.0),
|
|
2102
|
+
"output": usage.get("output", 0.0),
|
|
2103
|
+
"cacheRead": usage.get("cacheRead", 0.0),
|
|
2104
|
+
"cacheWrite": usage.get("cacheWrite", 0.0),
|
|
2105
|
+
"totalTokens": usage.get("totalTokens", 0.0),
|
|
2106
|
+
"costTotal": usage_cost.get("total", 0.0),
|
|
2107
|
+
}
|
|
2108
|
+
|
|
2109
|
+
text = self._extract_text_from_message(message)
|
|
2110
|
+
if text:
|
|
2111
|
+
signature["text"] = text[:120]
|
|
2112
|
+
|
|
2113
|
+
return "sig:" + json.dumps(signature, sort_keys=True, ensure_ascii=False)
|
|
2114
|
+
|
|
2115
|
+
def _track_assistant_usage_from_event(self, event: dict) -> None:
|
|
2116
|
+
"""Accumulate per-run assistant usage from stream events."""
|
|
2117
|
+
if not isinstance(event, dict):
|
|
2118
|
+
return
|
|
2119
|
+
|
|
2120
|
+
event_type = event.get("type")
|
|
2121
|
+
if event_type not in ("message", "message_end", "turn_end"):
|
|
2122
|
+
return
|
|
2123
|
+
|
|
2124
|
+
message = event.get("message")
|
|
2125
|
+
if not isinstance(message, dict) or message.get("role") != "assistant":
|
|
2126
|
+
return
|
|
2127
|
+
|
|
2128
|
+
normalized_usage = self._normalize_usage_payload(message.get("usage"))
|
|
2129
|
+
if not isinstance(normalized_usage, dict):
|
|
2130
|
+
return
|
|
2131
|
+
|
|
2132
|
+
usage_key = self._assistant_usage_dedupe_key(message, normalized_usage)
|
|
2133
|
+
if usage_key and usage_key in self._run_seen_usage_keys:
|
|
2134
|
+
return
|
|
2135
|
+
if usage_key:
|
|
2136
|
+
self._run_seen_usage_keys.add(usage_key)
|
|
2137
|
+
|
|
2138
|
+
self._run_usage_totals = self._merge_usage_payloads(self._run_usage_totals, normalized_usage)
|
|
2139
|
+
self._run_total_cost_usd = self._extract_total_cost_usd(
|
|
2140
|
+
{"usage": self._run_usage_totals},
|
|
2141
|
+
self._run_usage_totals,
|
|
2142
|
+
)
|
|
2143
|
+
|
|
2144
|
+
def _get_accumulated_total_cost_usd(self) -> Optional[float]:
|
|
2145
|
+
"""Return accumulated per-run total cost when available."""
|
|
2146
|
+
if self._is_numeric_value(self._run_total_cost_usd):
|
|
2147
|
+
return float(self._run_total_cost_usd)
|
|
2148
|
+
if isinstance(self._run_usage_totals, dict):
|
|
2149
|
+
return self._extract_total_cost_usd({"usage": self._run_usage_totals}, self._run_usage_totals)
|
|
2150
|
+
return None
|
|
2151
|
+
|
|
2152
|
+
@staticmethod
|
|
2153
|
+
def _extract_usage_from_event(event: dict) -> Optional[dict]:
|
|
2154
|
+
"""Extract usage payload from Pi event shapes (event/message/messages)."""
|
|
2155
|
+
if not isinstance(event, dict):
|
|
2156
|
+
return None
|
|
2157
|
+
|
|
2158
|
+
messages = event.get("messages")
|
|
2159
|
+
if event.get("type") == "agent_end" and isinstance(messages, list):
|
|
2160
|
+
aggregated = PiService._aggregate_assistant_usages(messages)
|
|
2161
|
+
if isinstance(aggregated, dict):
|
|
2162
|
+
return aggregated
|
|
2163
|
+
|
|
2164
|
+
direct_usage = event.get("usage")
|
|
2165
|
+
if isinstance(direct_usage, dict):
|
|
2166
|
+
return direct_usage
|
|
2167
|
+
|
|
2168
|
+
message = event.get("message")
|
|
2169
|
+
if isinstance(message, dict):
|
|
2170
|
+
message_usage = message.get("usage")
|
|
2171
|
+
if isinstance(message_usage, dict):
|
|
2172
|
+
return message_usage
|
|
2173
|
+
|
|
2174
|
+
if isinstance(messages, list):
|
|
2175
|
+
aggregated = PiService._aggregate_assistant_usages(messages)
|
|
2176
|
+
if isinstance(aggregated, dict):
|
|
2177
|
+
return aggregated
|
|
2178
|
+
|
|
2179
|
+
return None
|
|
2180
|
+
|
|
2181
|
+
@staticmethod
|
|
2182
|
+
def _extract_total_cost_usd(event: dict, usage: Optional[dict] = None) -> Optional[float]:
|
|
2183
|
+
"""Extract total USD cost from explicit fields or usage.cost.total."""
|
|
2184
|
+
if not isinstance(event, dict):
|
|
2185
|
+
return None
|
|
2186
|
+
|
|
2187
|
+
for key in ("total_cost_usd", "totalCostUsd", "totalCostUSD"):
|
|
2188
|
+
value = event.get(key)
|
|
2189
|
+
if PiService._is_numeric_value(value):
|
|
2190
|
+
return float(value)
|
|
2191
|
+
|
|
2192
|
+
direct_cost = event.get("cost")
|
|
2193
|
+
if PiService._is_numeric_value(direct_cost):
|
|
2194
|
+
return float(direct_cost)
|
|
2195
|
+
if isinstance(direct_cost, dict):
|
|
2196
|
+
total = direct_cost.get("total")
|
|
2197
|
+
if PiService._is_numeric_value(total):
|
|
2198
|
+
return float(total)
|
|
2199
|
+
|
|
2200
|
+
usage_payload = usage if isinstance(usage, dict) else None
|
|
2201
|
+
if usage_payload is None:
|
|
2202
|
+
usage_payload = PiService._extract_usage_from_event(event)
|
|
2203
|
+
|
|
2204
|
+
if isinstance(usage_payload, dict):
|
|
2205
|
+
usage_cost = usage_payload.get("cost")
|
|
2206
|
+
if isinstance(usage_cost, dict):
|
|
2207
|
+
total = usage_cost.get("total")
|
|
2208
|
+
if PiService._is_numeric_value(total):
|
|
2209
|
+
return float(total)
|
|
2210
|
+
|
|
2211
|
+
return None
|
|
2212
|
+
|
|
2213
|
+
def _build_success_result_event(self, text: str, event: dict) -> dict:
|
|
2214
|
+
"""Build standardized success envelope for shell-backend capture."""
|
|
2215
|
+
usage = self._extract_usage_from_event(event)
|
|
2216
|
+
if isinstance(self._run_usage_totals, dict):
|
|
2217
|
+
usage = self._run_usage_totals
|
|
2218
|
+
|
|
2219
|
+
total_cost_usd = self._extract_total_cost_usd(event, usage)
|
|
2220
|
+
accumulated_total_cost = self._get_accumulated_total_cost_usd()
|
|
2221
|
+
if accumulated_total_cost is not None:
|
|
2222
|
+
total_cost_usd = accumulated_total_cost
|
|
2223
|
+
|
|
2224
|
+
result_event: Dict = {
|
|
2225
|
+
"type": "result",
|
|
2226
|
+
"subtype": "success",
|
|
2227
|
+
"is_error": False,
|
|
2228
|
+
"result": text,
|
|
2229
|
+
"session_id": self.session_id,
|
|
2230
|
+
"sub_agent_response": self._sanitize_sub_agent_response(event),
|
|
2231
|
+
}
|
|
2232
|
+
|
|
2233
|
+
if isinstance(usage, dict):
|
|
2234
|
+
result_event["usage"] = usage
|
|
2235
|
+
if total_cost_usd is not None:
|
|
2236
|
+
result_event["total_cost_usd"] = total_cost_usd
|
|
2237
|
+
|
|
2238
|
+
return result_event
|
|
2239
|
+
|
|
2240
|
+
def _write_capture_file(self, capture_path: Optional[str]) -> None:
|
|
2241
|
+
"""Write final result event to capture file for shell backend."""
|
|
2242
|
+
if not capture_path or not self.last_result_event:
|
|
2243
|
+
return
|
|
2244
|
+
try:
|
|
2245
|
+
Path(capture_path).write_text(
|
|
2246
|
+
json.dumps(self.last_result_event, ensure_ascii=False),
|
|
2247
|
+
encoding="utf-8",
|
|
2248
|
+
)
|
|
2249
|
+
except Exception as e:
|
|
2250
|
+
print(f"Warning: Could not write capture file: {e}", file=sys.stderr)
|
|
2251
|
+
|
|
2252
|
+
def run_pi(self, cmd: List[str], args: argparse.Namespace,
|
|
2253
|
+
stdin_prompt: Optional[str] = None) -> int:
|
|
2254
|
+
"""Execute the Pi CLI and stream/format its JSON output.
|
|
2255
|
+
|
|
2256
|
+
Args:
|
|
2257
|
+
cmd: Command argument list from build_pi_command.
|
|
2258
|
+
args: Parsed argparse namespace.
|
|
2259
|
+
stdin_prompt: If set, pipe this text via stdin to the Pi CLI
|
|
2260
|
+
(used for multiline/large prompts).
|
|
2261
|
+
"""
|
|
2262
|
+
verbose = args.verbose
|
|
2263
|
+
pretty = args.pretty.lower() != "false"
|
|
2264
|
+
capture_path = os.environ.get("JUNO_SUBAGENT_CAPTURE_PATH")
|
|
2265
|
+
hide_types = self._build_hide_types()
|
|
2266
|
+
self._buffered_tool_stdout_lines.clear()
|
|
2267
|
+
self._reset_run_cost_tracking()
|
|
2268
|
+
cancel_delayed_toolcalls = lambda: None
|
|
2269
|
+
|
|
2270
|
+
if verbose:
|
|
2271
|
+
# Truncate prompt in display to avoid confusing multi-line output
|
|
2272
|
+
display_cmd = list(cmd)
|
|
2273
|
+
if stdin_prompt:
|
|
2274
|
+
first_line = stdin_prompt.split("\n")[0][:60]
|
|
2275
|
+
display_cmd.append(f'[stdin: "{first_line}..." ({len(stdin_prompt)} chars)]')
|
|
2276
|
+
else:
|
|
2277
|
+
filtered = []
|
|
2278
|
+
skip_next = False
|
|
2279
|
+
for i, part in enumerate(cmd):
|
|
2280
|
+
if skip_next:
|
|
2281
|
+
skip_next = False
|
|
2282
|
+
continue
|
|
2283
|
+
if part == "-p" and i + 1 < len(cmd):
|
|
2284
|
+
prompt_val = cmd[i + 1]
|
|
2285
|
+
if len(prompt_val) > 80 or "\n" in prompt_val:
|
|
2286
|
+
first_line = prompt_val.split("\n")[0][:60]
|
|
2287
|
+
filtered.append(f'-p "{first_line}..." ({len(prompt_val)} chars)')
|
|
2288
|
+
else:
|
|
2289
|
+
filtered.append(f"-p {prompt_val}")
|
|
2290
|
+
skip_next = True
|
|
2291
|
+
else:
|
|
2292
|
+
filtered.append(part)
|
|
2293
|
+
display_cmd = filtered
|
|
2294
|
+
# Only show Executing once: skip when running under juno-code shell backend
|
|
2295
|
+
# (shell backend already logs the command in debug mode)
|
|
2296
|
+
if not capture_path:
|
|
2297
|
+
print(f"Executing: {' '.join(display_cmd)}", file=sys.stderr)
|
|
2298
|
+
print("-" * 80, file=sys.stderr)
|
|
2299
|
+
|
|
2300
|
+
try:
|
|
2301
|
+
process = subprocess.Popen(
|
|
2302
|
+
cmd,
|
|
2303
|
+
stdin=subprocess.PIPE if stdin_prompt else subprocess.DEVNULL,
|
|
2304
|
+
stdout=subprocess.PIPE,
|
|
2305
|
+
stderr=subprocess.PIPE,
|
|
2306
|
+
text=True,
|
|
2307
|
+
bufsize=1,
|
|
2308
|
+
universal_newlines=True,
|
|
2309
|
+
cwd=self.project_path,
|
|
2310
|
+
)
|
|
2311
|
+
|
|
2312
|
+
# Pipe the prompt via stdin if using stdin mode (multiline/large prompts).
|
|
2313
|
+
# Pi CLI reads stdin when isTTY is false and prepends it to messages.
|
|
2314
|
+
if stdin_prompt and process.stdin:
|
|
2315
|
+
try:
|
|
2316
|
+
process.stdin.write(stdin_prompt)
|
|
2317
|
+
process.stdin.close()
|
|
2318
|
+
except BrokenPipeError:
|
|
2319
|
+
pass # Process may have exited early
|
|
2320
|
+
|
|
2321
|
+
# Watchdog thread: handles stdout pipe blocking after process exit.
|
|
2322
|
+
wait_timeout = int(os.environ.get("PI_WAIT_TIMEOUT", "30"))
|
|
2323
|
+
output_done = threading.Event()
|
|
2324
|
+
|
|
2325
|
+
def _stdout_watchdog():
|
|
2326
|
+
"""Terminate process and close stdout pipe if it hangs after output."""
|
|
2327
|
+
while not output_done.is_set():
|
|
2328
|
+
if process.poll() is not None:
|
|
2329
|
+
break
|
|
2330
|
+
output_done.wait(timeout=1)
|
|
2331
|
+
|
|
2332
|
+
if output_done.is_set() and process.poll() is None:
|
|
2333
|
+
try:
|
|
2334
|
+
process.wait(timeout=wait_timeout)
|
|
2335
|
+
except subprocess.TimeoutExpired:
|
|
2336
|
+
print(
|
|
2337
|
+
f"Warning: Pi process did not exit within {wait_timeout}s after output. Terminating.",
|
|
2338
|
+
file=sys.stderr,
|
|
2339
|
+
)
|
|
2340
|
+
process.terminate()
|
|
2341
|
+
try:
|
|
2342
|
+
process.wait(timeout=5)
|
|
2343
|
+
except subprocess.TimeoutExpired:
|
|
2344
|
+
print("Warning: Pi process did not respond to SIGTERM. Killing.", file=sys.stderr)
|
|
2345
|
+
process.kill()
|
|
2346
|
+
try:
|
|
2347
|
+
process.wait(timeout=5)
|
|
2348
|
+
except subprocess.TimeoutExpired:
|
|
2349
|
+
pass
|
|
2350
|
+
|
|
2351
|
+
time.sleep(2)
|
|
2352
|
+
try:
|
|
2353
|
+
if process.stdout and not process.stdout.closed:
|
|
2354
|
+
process.stdout.close()
|
|
2355
|
+
except Exception:
|
|
2356
|
+
pass
|
|
2357
|
+
|
|
2358
|
+
watchdog = threading.Thread(target=_stdout_watchdog, daemon=True)
|
|
2359
|
+
watchdog.start()
|
|
2360
|
+
|
|
2361
|
+
# Stream stderr in a separate thread so Pi diagnostic output is visible
|
|
2362
|
+
def _stderr_reader():
|
|
2363
|
+
"""Read stderr and forward to our stderr for visibility."""
|
|
2364
|
+
try:
|
|
2365
|
+
if process.stderr:
|
|
2366
|
+
for stderr_line in process.stderr:
|
|
2367
|
+
print(stderr_line, end="", file=sys.stderr, flush=True)
|
|
2368
|
+
except (ValueError, OSError):
|
|
2369
|
+
pass
|
|
2370
|
+
|
|
2371
|
+
stderr_thread = threading.Thread(target=_stderr_reader, daemon=True)
|
|
2372
|
+
stderr_thread.start()
|
|
2373
|
+
|
|
2374
|
+
cancel_delayed_toolcalls = lambda: None
|
|
2375
|
+
|
|
2376
|
+
if process.stdout:
|
|
2377
|
+
pending_tool_execution_end: Optional[dict] = None
|
|
2378
|
+
pending_turn_end_after_tool: Optional[dict] = None
|
|
2379
|
+
toolcall_end_delay_seconds = self._toolcall_end_delay_seconds()
|
|
2380
|
+
pending_delayed_toolcalls: Dict[int, dict] = {}
|
|
2381
|
+
delayed_toolcalls_lock = threading.Lock()
|
|
2382
|
+
delayed_toolcall_seq = 0
|
|
2383
|
+
|
|
2384
|
+
def _extract_fallback_toolcall_name(parsed_event: dict) -> Optional[str]:
|
|
2385
|
+
if parsed_event.get("type") != "message_update":
|
|
2386
|
+
return None
|
|
2387
|
+
assistant_event = parsed_event.get("assistantMessageEvent")
|
|
2388
|
+
if not isinstance(assistant_event, dict) or assistant_event.get("type") != "toolcall_end":
|
|
2389
|
+
return None
|
|
2390
|
+
tool_call = assistant_event.get("toolCall")
|
|
2391
|
+
if not isinstance(tool_call, dict):
|
|
2392
|
+
return None
|
|
2393
|
+
tool_call_id = tool_call.get("toolCallId")
|
|
2394
|
+
if isinstance(tool_call_id, str) and tool_call_id.strip():
|
|
2395
|
+
return None
|
|
2396
|
+
name = tool_call.get("name", "")
|
|
2397
|
+
return name if isinstance(name, str) else ""
|
|
2398
|
+
|
|
2399
|
+
def _format_deferred_toolcall(parsed_event: dict, mode: str) -> Optional[str]:
|
|
2400
|
+
if mode == self.PRETTIFIER_LIVE:
|
|
2401
|
+
return self._format_event_live(parsed_event)
|
|
2402
|
+
if mode == self.PRETTIFIER_CODEX:
|
|
2403
|
+
return self._format_pi_codex_event(parsed_event)
|
|
2404
|
+
if mode == self.PRETTIFIER_CLAUDE:
|
|
2405
|
+
return self._format_event_pretty_claude(parsed_event)
|
|
2406
|
+
return self._format_event_pretty(parsed_event)
|
|
2407
|
+
|
|
2408
|
+
def _emit_stdout(formatted: str, raw: bool = False) -> None:
|
|
2409
|
+
if raw:
|
|
2410
|
+
sys.stdout.write(formatted)
|
|
2411
|
+
sys.stdout.flush()
|
|
2412
|
+
return
|
|
2413
|
+
print(formatted, flush=True)
|
|
2414
|
+
|
|
2415
|
+
def _schedule_delayed_toolcall(parsed_event: dict, tool_name: str, mode: str) -> None:
|
|
2416
|
+
nonlocal delayed_toolcall_seq
|
|
2417
|
+
|
|
2418
|
+
def _emit_delayed_toolcall(event_payload: dict, event_mode: str) -> None:
|
|
2419
|
+
formatted = _format_deferred_toolcall(event_payload, event_mode)
|
|
2420
|
+
if not formatted:
|
|
2421
|
+
return
|
|
2422
|
+
_emit_stdout(formatted, raw=event_mode == self.PRETTIFIER_LIVE)
|
|
2423
|
+
|
|
2424
|
+
if toolcall_end_delay_seconds <= 0:
|
|
2425
|
+
_emit_delayed_toolcall(parsed_event, mode)
|
|
2426
|
+
return
|
|
2427
|
+
|
|
2428
|
+
delayed_toolcall_seq += 1
|
|
2429
|
+
entry_id = delayed_toolcall_seq
|
|
2430
|
+
entry: Dict = {
|
|
2431
|
+
"id": entry_id,
|
|
2432
|
+
"tool": tool_name,
|
|
2433
|
+
"event": parsed_event,
|
|
2434
|
+
"mode": mode,
|
|
2435
|
+
}
|
|
2436
|
+
|
|
2437
|
+
def _timer_emit() -> None:
|
|
2438
|
+
with delayed_toolcalls_lock:
|
|
2439
|
+
pending = pending_delayed_toolcalls.pop(entry_id, None)
|
|
2440
|
+
if not pending:
|
|
2441
|
+
return
|
|
2442
|
+
_emit_delayed_toolcall(pending["event"], pending["mode"])
|
|
2443
|
+
|
|
2444
|
+
timer = threading.Timer(toolcall_end_delay_seconds, _timer_emit)
|
|
2445
|
+
timer.daemon = True
|
|
2446
|
+
entry["timer"] = timer
|
|
2447
|
+
with delayed_toolcalls_lock:
|
|
2448
|
+
pending_delayed_toolcalls[entry_id] = entry
|
|
2449
|
+
timer.start()
|
|
2450
|
+
|
|
2451
|
+
def _cancel_delayed_toolcall(tool_name: str) -> None:
|
|
2452
|
+
with delayed_toolcalls_lock:
|
|
2453
|
+
if not pending_delayed_toolcalls:
|
|
2454
|
+
return
|
|
2455
|
+
|
|
2456
|
+
selected_id: Optional[int] = None
|
|
2457
|
+
if tool_name:
|
|
2458
|
+
for entry_id, entry in pending_delayed_toolcalls.items():
|
|
2459
|
+
if entry.get("tool") == tool_name:
|
|
2460
|
+
selected_id = entry_id
|
|
2461
|
+
break
|
|
2462
|
+
|
|
2463
|
+
if selected_id is None:
|
|
2464
|
+
selected_id = min(pending_delayed_toolcalls.keys())
|
|
2465
|
+
|
|
2466
|
+
pending = pending_delayed_toolcalls.pop(selected_id, None)
|
|
2467
|
+
|
|
2468
|
+
if pending:
|
|
2469
|
+
timer = pending.get("timer")
|
|
2470
|
+
if timer:
|
|
2471
|
+
timer.cancel()
|
|
2472
|
+
|
|
2473
|
+
def _cancel_all_delayed_toolcalls() -> None:
|
|
2474
|
+
with delayed_toolcalls_lock:
|
|
2475
|
+
pending = list(pending_delayed_toolcalls.values())
|
|
2476
|
+
pending_delayed_toolcalls.clear()
|
|
2477
|
+
for entry in pending:
|
|
2478
|
+
timer = entry.get("timer")
|
|
2479
|
+
if timer:
|
|
2480
|
+
timer.cancel()
|
|
2481
|
+
|
|
2482
|
+
cancel_delayed_toolcalls = _cancel_all_delayed_toolcalls
|
|
2483
|
+
|
|
2484
|
+
def _emit_parsed_event(parsed_event: dict, raw_json_line: Optional[str] = None) -> None:
|
|
2485
|
+
event_type = parsed_event.get("type", "")
|
|
2486
|
+
|
|
2487
|
+
# Capture session ID from the session event (sent at stream start)
|
|
2488
|
+
if event_type == "session":
|
|
2489
|
+
self.session_id = parsed_event.get("id")
|
|
2490
|
+
|
|
2491
|
+
# Track per-run assistant usage from stream events.
|
|
2492
|
+
self._track_assistant_usage_from_event(parsed_event)
|
|
2493
|
+
|
|
2494
|
+
# Ensure agent_end reflects cumulative per-run totals when available.
|
|
2495
|
+
if event_type == "agent_end":
|
|
2496
|
+
accumulated_total_cost = self._get_accumulated_total_cost_usd()
|
|
2497
|
+
if accumulated_total_cost is not None:
|
|
2498
|
+
parsed_event["total_cost_usd"] = accumulated_total_cost
|
|
2499
|
+
if isinstance(self._run_usage_totals, dict):
|
|
2500
|
+
parsed_event["usage"] = self._run_usage_totals
|
|
2501
|
+
|
|
2502
|
+
# Capture result event for shell backend
|
|
2503
|
+
if event_type == "agent_end":
|
|
2504
|
+
# agent_end has a 'messages' array; extract final assistant text
|
|
2505
|
+
messages = parsed_event.get("messages", [])
|
|
2506
|
+
text = ""
|
|
2507
|
+
if isinstance(messages, list):
|
|
2508
|
+
# Walk messages in reverse to find last assistant message with text
|
|
2509
|
+
for m in reversed(messages):
|
|
2510
|
+
if isinstance(m, dict) and m.get("role") == "assistant":
|
|
2511
|
+
text = self._extract_text_from_message(m)
|
|
2512
|
+
if text:
|
|
2513
|
+
break
|
|
2514
|
+
if text:
|
|
2515
|
+
self.last_result_event = self._build_success_result_event(text, parsed_event)
|
|
2516
|
+
else:
|
|
2517
|
+
self.last_result_event = parsed_event
|
|
2518
|
+
elif event_type == "message":
|
|
2519
|
+
# OpenAI-compatible format: capture last assistant message
|
|
2520
|
+
msg = parsed_event.get("message", {})
|
|
2521
|
+
if isinstance(msg, dict) and msg.get("role") == "assistant":
|
|
2522
|
+
text = self._extract_text_from_message(msg)
|
|
2523
|
+
if text:
|
|
2524
|
+
self.last_result_event = self._build_success_result_event(text, parsed_event)
|
|
2525
|
+
elif event_type == "turn_end":
|
|
2526
|
+
# turn_end may contain the final assistant message
|
|
2527
|
+
msg = parsed_event.get("message", {})
|
|
2528
|
+
if isinstance(msg, dict):
|
|
2529
|
+
text = self._extract_text_from_message(msg)
|
|
2530
|
+
if text:
|
|
2531
|
+
self.last_result_event = self._build_success_result_event(text, parsed_event)
|
|
2532
|
+
|
|
2533
|
+
# Filter hidden stream types (live mode handles its own filtering)
|
|
2534
|
+
if event_type in hide_types and self.prettifier_mode != self.PRETTIFIER_LIVE:
|
|
2535
|
+
return
|
|
2536
|
+
|
|
2537
|
+
# Fallback toolcall_end events (without toolCallId) are delayed so
|
|
2538
|
+
# short tool executions only show the final combined tool event.
|
|
2539
|
+
if pretty:
|
|
2540
|
+
fallback_tool_name = _extract_fallback_toolcall_name(parsed_event)
|
|
2541
|
+
if fallback_tool_name is not None:
|
|
2542
|
+
_schedule_delayed_toolcall(parsed_event, fallback_tool_name, self.prettifier_mode)
|
|
2543
|
+
return
|
|
2544
|
+
|
|
2545
|
+
# Live stream mode: stream deltas in real-time
|
|
2546
|
+
if self.prettifier_mode == self.PRETTIFIER_LIVE:
|
|
2547
|
+
if event_type in hide_types:
|
|
2548
|
+
# In live mode, still suppress session/compaction/retry events
|
|
2549
|
+
# but NOT message_start/message_end (handled by _format_event_live)
|
|
2550
|
+
if event_type not in ("message_start", "message_end"):
|
|
2551
|
+
return
|
|
2552
|
+
formatted_live = self._format_event_live(parsed_event)
|
|
2553
|
+
if formatted_live is not None:
|
|
2554
|
+
if formatted_live == "":
|
|
2555
|
+
return
|
|
2556
|
+
sys.stdout.write(formatted_live)
|
|
2557
|
+
sys.stdout.flush()
|
|
2558
|
+
else:
|
|
2559
|
+
# Fallback: print raw JSON for unhandled event types
|
|
2560
|
+
print(json.dumps(parsed_event, ensure_ascii=False), flush=True)
|
|
2561
|
+
return
|
|
2562
|
+
|
|
2563
|
+
# Format and print using model-appropriate prettifier
|
|
2564
|
+
if pretty:
|
|
2565
|
+
if self.prettifier_mode == self.PRETTIFIER_CODEX:
|
|
2566
|
+
# Try Pi-wrapped Codex format first (role-based messages)
|
|
2567
|
+
if "role" in parsed_event:
|
|
2568
|
+
formatted = self._format_pi_codex_message(parsed_event)
|
|
2569
|
+
else:
|
|
2570
|
+
# Try Pi event handler (message_update, turn_end, etc.)
|
|
2571
|
+
formatted = self._format_pi_codex_event(parsed_event)
|
|
2572
|
+
if formatted is None:
|
|
2573
|
+
# Try native Codex event handler
|
|
2574
|
+
formatted = self._format_event_pretty_codex(parsed_event)
|
|
2575
|
+
if formatted is None:
|
|
2576
|
+
# Sanitize before raw JSON fallback: strip thinkingSignature,
|
|
2577
|
+
# encrypted_content, and metadata from nested Codex events.
|
|
2578
|
+
self._sanitize_codex_event(parsed_event, strip_metadata=True)
|
|
2579
|
+
formatted = json.dumps(parsed_event, ensure_ascii=False)
|
|
2580
|
+
elif formatted == "":
|
|
2581
|
+
return
|
|
2582
|
+
elif self.prettifier_mode == self.PRETTIFIER_CLAUDE:
|
|
2583
|
+
formatted = self._format_event_pretty_claude(parsed_event)
|
|
2584
|
+
else:
|
|
2585
|
+
formatted = self._format_event_pretty(parsed_event)
|
|
2586
|
+
if formatted is not None:
|
|
2587
|
+
print(formatted, flush=True)
|
|
2588
|
+
else:
|
|
2589
|
+
if raw_json_line is not None:
|
|
2590
|
+
print(raw_json_line, flush=True)
|
|
2591
|
+
else:
|
|
2592
|
+
print(json.dumps(parsed_event, ensure_ascii=False), flush=True)
|
|
2593
|
+
|
|
2594
|
+
def _merge_buffered_tool_stdout_into(event_payload: dict) -> None:
|
|
2595
|
+
buffered_text = "\n".join(self._buffered_tool_stdout_lines).strip()
|
|
2596
|
+
if not buffered_text:
|
|
2597
|
+
self._buffered_tool_stdout_lines.clear()
|
|
2598
|
+
return
|
|
2599
|
+
|
|
2600
|
+
result_val = event_payload.get("result")
|
|
2601
|
+
if result_val in (None, "", [], {}):
|
|
2602
|
+
event_payload["result"] = buffered_text
|
|
2603
|
+
elif isinstance(result_val, str):
|
|
2604
|
+
existing = self._strip_ansi_sequences(result_val)
|
|
2605
|
+
if existing:
|
|
2606
|
+
if not existing.endswith("\n"):
|
|
2607
|
+
existing += "\n"
|
|
2608
|
+
event_payload["result"] = existing + buffered_text
|
|
2609
|
+
else:
|
|
2610
|
+
event_payload["result"] = buffered_text
|
|
2611
|
+
else:
|
|
2612
|
+
# Keep complex result structures untouched; print trailing raw lines
|
|
2613
|
+
# before the next structured event for stable transcript ordering.
|
|
2614
|
+
print(buffered_text, flush=True)
|
|
2615
|
+
|
|
2616
|
+
self._buffered_tool_stdout_lines.clear()
|
|
2617
|
+
|
|
2618
|
+
def _flush_pending_tool_events() -> None:
|
|
2619
|
+
nonlocal pending_tool_execution_end, pending_turn_end_after_tool
|
|
2620
|
+
if pending_tool_execution_end is not None:
|
|
2621
|
+
_merge_buffered_tool_stdout_into(pending_tool_execution_end)
|
|
2622
|
+
_emit_parsed_event(pending_tool_execution_end)
|
|
2623
|
+
pending_tool_execution_end = None
|
|
2624
|
+
|
|
2625
|
+
if pending_turn_end_after_tool is not None:
|
|
2626
|
+
if self._buffered_tool_stdout_lines:
|
|
2627
|
+
print("\n".join(self._buffered_tool_stdout_lines), flush=True)
|
|
2628
|
+
self._buffered_tool_stdout_lines.clear()
|
|
2629
|
+
_emit_parsed_event(pending_turn_end_after_tool)
|
|
2630
|
+
pending_turn_end_after_tool = None
|
|
2631
|
+
|
|
2632
|
+
try:
|
|
2633
|
+
for raw_line in process.stdout:
|
|
2634
|
+
line = raw_line.rstrip("\n\r")
|
|
2635
|
+
if not line.strip():
|
|
2636
|
+
continue
|
|
2637
|
+
|
|
2638
|
+
# Try to parse as JSON
|
|
2639
|
+
try:
|
|
2640
|
+
parsed = json.loads(line)
|
|
2641
|
+
except json.JSONDecodeError:
|
|
2642
|
+
# Non-JSON output (raw tool stdout). In pretty mode, buffer raw
|
|
2643
|
+
# lines while tool execution events are pending to avoid
|
|
2644
|
+
# interleaving with structured events (e.g. turn_end).
|
|
2645
|
+
if pretty and (
|
|
2646
|
+
self._in_tool_execution
|
|
2647
|
+
or pending_tool_execution_end is not None
|
|
2648
|
+
or pending_turn_end_after_tool is not None
|
|
2649
|
+
):
|
|
2650
|
+
self._buffered_tool_stdout_lines.append(self._strip_ansi_sequences(line))
|
|
2651
|
+
continue
|
|
2652
|
+
print(line, flush=True)
|
|
2653
|
+
continue
|
|
2654
|
+
|
|
2655
|
+
event_type = parsed.get("type", "")
|
|
2656
|
+
|
|
2657
|
+
if pretty and event_type == "tool_execution_start":
|
|
2658
|
+
# Reset raw tool stdout buffer per tool execution.
|
|
2659
|
+
self._buffered_tool_stdout_lines.clear()
|
|
2660
|
+
|
|
2661
|
+
if pretty and event_type == "tool_execution_end":
|
|
2662
|
+
# Tool finished before the delayed fallback timer fired — suppress
|
|
2663
|
+
# the pending fallback toolcall_end preview.
|
|
2664
|
+
tool_name = parsed.get("toolName", "")
|
|
2665
|
+
_cancel_delayed_toolcall(tool_name if isinstance(tool_name, str) else "")
|
|
2666
|
+
|
|
2667
|
+
# Defer emission so any trailing raw stdout can be grouped before
|
|
2668
|
+
# downstream structured metadata like turn_end.
|
|
2669
|
+
pending_tool_execution_end = parsed
|
|
2670
|
+
continue
|
|
2671
|
+
|
|
2672
|
+
if pretty and event_type == "turn_end" and pending_tool_execution_end is not None:
|
|
2673
|
+
# Hold turn_end until buffered trailing raw stdout is flushed with
|
|
2674
|
+
# the pending tool event.
|
|
2675
|
+
pending_turn_end_after_tool = parsed
|
|
2676
|
+
continue
|
|
2677
|
+
|
|
2678
|
+
if pretty and (
|
|
2679
|
+
pending_tool_execution_end is not None or pending_turn_end_after_tool is not None
|
|
2680
|
+
):
|
|
2681
|
+
_flush_pending_tool_events()
|
|
2682
|
+
|
|
2683
|
+
_emit_parsed_event(parsed, raw_json_line=line)
|
|
2684
|
+
|
|
2685
|
+
# Flush any deferred tool/turn events at end-of-stream.
|
|
2686
|
+
if pretty and (
|
|
2687
|
+
pending_tool_execution_end is not None or pending_turn_end_after_tool is not None
|
|
2688
|
+
):
|
|
2689
|
+
_flush_pending_tool_events()
|
|
2690
|
+
elif self._buffered_tool_stdout_lines:
|
|
2691
|
+
print("\n".join(self._buffered_tool_stdout_lines), flush=True)
|
|
2692
|
+
self._buffered_tool_stdout_lines.clear()
|
|
2693
|
+
|
|
2694
|
+
except ValueError:
|
|
2695
|
+
# Watchdog closed stdout — expected when process exits but pipe stays open.
|
|
2696
|
+
pass
|
|
2697
|
+
|
|
2698
|
+
# Signal watchdog that output loop is done
|
|
2699
|
+
output_done.set()
|
|
2700
|
+
cancel_delayed_toolcalls()
|
|
2701
|
+
|
|
2702
|
+
# Write capture file for shell backend
|
|
2703
|
+
self._write_capture_file(capture_path)
|
|
2704
|
+
|
|
2705
|
+
# Wait for process cleanup
|
|
2706
|
+
try:
|
|
2707
|
+
process.wait(timeout=5)
|
|
2708
|
+
except subprocess.TimeoutExpired:
|
|
2709
|
+
pass
|
|
2710
|
+
|
|
2711
|
+
# Wait for stderr thread to finish
|
|
2712
|
+
stderr_thread.join(timeout=3)
|
|
2713
|
+
|
|
2714
|
+
return process.returncode or 0
|
|
2715
|
+
|
|
2716
|
+
except KeyboardInterrupt:
|
|
2717
|
+
print("\nInterrupted by user", file=sys.stderr)
|
|
2718
|
+
cancel_delayed_toolcalls()
|
|
2719
|
+
try:
|
|
2720
|
+
process.terminate()
|
|
2721
|
+
try:
|
|
2722
|
+
process.wait(timeout=5)
|
|
2723
|
+
except subprocess.TimeoutExpired:
|
|
2724
|
+
process.kill()
|
|
2725
|
+
process.wait(timeout=5)
|
|
2726
|
+
except Exception:
|
|
2727
|
+
pass
|
|
2728
|
+
self._write_capture_file(capture_path)
|
|
2729
|
+
return 130
|
|
2730
|
+
|
|
2731
|
+
except Exception as e:
|
|
2732
|
+
print(f"Error executing pi: {e}", file=sys.stderr)
|
|
2733
|
+
cancel_delayed_toolcalls()
|
|
2734
|
+
try:
|
|
2735
|
+
if process.poll() is None:
|
|
2736
|
+
process.terminate()
|
|
2737
|
+
process.wait(timeout=5)
|
|
2738
|
+
except Exception:
|
|
2739
|
+
pass
|
|
2740
|
+
self._write_capture_file(capture_path)
|
|
2741
|
+
return 1
|
|
2742
|
+
|
|
2743
|
+
def run(self) -> int:
|
|
2744
|
+
"""Main execution flow."""
|
|
2745
|
+
args = self.parse_arguments()
|
|
2746
|
+
|
|
2747
|
+
# Prompt handling
|
|
2748
|
+
prompt_value = args.prompt or os.environ.get("JUNO_INSTRUCTION")
|
|
2749
|
+
if not prompt_value and not args.prompt_file:
|
|
2750
|
+
print("Error: Either -p/--prompt or -pp/--prompt-file is required.", file=sys.stderr)
|
|
2751
|
+
print("\nRun 'pi.py --help' for usage information.", file=sys.stderr)
|
|
2752
|
+
return 1
|
|
2753
|
+
|
|
2754
|
+
if not self.check_pi_installed():
|
|
2755
|
+
print(
|
|
2756
|
+
"Error: Pi CLI is not available. Please install it:\n"
|
|
2757
|
+
" npm install -g @mariozechner/pi-coding-agent\n"
|
|
2758
|
+
"See: https://pi.dev/",
|
|
2759
|
+
file=sys.stderr,
|
|
2760
|
+
)
|
|
2761
|
+
return 1
|
|
2762
|
+
|
|
2763
|
+
self.project_path = os.path.abspath(args.cd)
|
|
2764
|
+
if not os.path.isdir(self.project_path):
|
|
2765
|
+
print(f"Error: Project path does not exist: {self.project_path}", file=sys.stderr)
|
|
2766
|
+
return 1
|
|
2767
|
+
|
|
2768
|
+
self.model_name = self.expand_model_shorthand(args.model)
|
|
2769
|
+
self.prettifier_mode = self._detect_prettifier_mode(self.model_name)
|
|
2770
|
+
self.verbose = args.verbose
|
|
2771
|
+
|
|
2772
|
+
# Verbose mode enables live stream prettifier for real-time output.
|
|
2773
|
+
# Codex models already default to LIVE; this ensures all models get
|
|
2774
|
+
# real-time streaming when -v is used.
|
|
2775
|
+
if args.verbose:
|
|
2776
|
+
self.prettifier_mode = self.PRETTIFIER_LIVE
|
|
2777
|
+
|
|
2778
|
+
if self.verbose:
|
|
2779
|
+
print(f"Prettifier mode: {self.prettifier_mode} (model: {self.model_name})", file=sys.stderr)
|
|
2780
|
+
|
|
2781
|
+
if args.prompt_file:
|
|
2782
|
+
self.prompt = self.read_prompt_file(args.prompt_file)
|
|
2783
|
+
else:
|
|
2784
|
+
self.prompt = prompt_value
|
|
2785
|
+
|
|
2786
|
+
cmd, stdin_prompt = self.build_pi_command(args)
|
|
2787
|
+
return self.run_pi(cmd, args, stdin_prompt=stdin_prompt)
|
|
2788
|
+
|
|
2789
|
+
|
|
2790
|
+
def main():
|
|
2791
|
+
service = PiService()
|
|
2792
|
+
sys.exit(service.run())
|
|
2793
|
+
|
|
2794
|
+
|
|
2795
|
+
if __name__ == "__main__":
|
|
2796
|
+
main()
|