juno-code 1.0.32 → 1.0.34
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bin/cli.js +339 -213
- package/dist/bin/cli.js.map +1 -1
- package/dist/bin/cli.mjs +301 -175
- package/dist/bin/cli.mjs.map +1 -1
- package/dist/index.js +3 -7
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +3 -7
- package/dist/index.mjs.map +1 -1
- package/dist/templates/services/__pycache__/codex.cpython-38.pyc +0 -0
- package/dist/templates/services/claude.py +31 -1
- package/dist/templates/services/codex.py +388 -16
- package/package.json +1 -1
|
Binary file
|
|
@@ -51,6 +51,7 @@ class ClaudeService:
|
|
|
51
51
|
self.verbose = False
|
|
52
52
|
# User message truncation: -1 = no truncation, N = truncate to N lines
|
|
53
53
|
self.user_message_truncate = int(os.environ.get("CLAUDE_USER_MESSAGE_PRETTY_TRUNCATE", "4"))
|
|
54
|
+
self.last_result_event: Optional[Dict[str, Any]] = None
|
|
54
55
|
|
|
55
56
|
def expand_model_shorthand(self, model: str) -> str:
|
|
56
57
|
"""
|
|
@@ -529,6 +530,20 @@ Environment Variables:
|
|
|
529
530
|
print(f"Executing: {' '.join(cmd)}", file=sys.stderr)
|
|
530
531
|
print("-" * 80, file=sys.stderr)
|
|
531
532
|
|
|
533
|
+
capture_path = os.environ.get("JUNO_SUBAGENT_CAPTURE_PATH")
|
|
534
|
+
|
|
535
|
+
def write_capture_file():
|
|
536
|
+
"""Persist the final result event for programmatic capture without affecting screen output."""
|
|
537
|
+
if not capture_path or not self.last_result_event:
|
|
538
|
+
return
|
|
539
|
+
try:
|
|
540
|
+
Path(capture_path).write_text(
|
|
541
|
+
json.dumps(self.last_result_event, ensure_ascii=False),
|
|
542
|
+
encoding="utf-8"
|
|
543
|
+
)
|
|
544
|
+
except Exception as e:
|
|
545
|
+
print(f"Warning: Failed to write capture file: {e}", file=sys.stderr)
|
|
546
|
+
|
|
532
547
|
try:
|
|
533
548
|
# Change to project directory before running
|
|
534
549
|
original_cwd = os.getcwd()
|
|
@@ -549,9 +564,19 @@ Environment Variables:
|
|
|
549
564
|
# This allows users to pipe to jq and see output as it streams
|
|
550
565
|
if process.stdout:
|
|
551
566
|
for line in process.stdout:
|
|
567
|
+
raw_line = line.strip()
|
|
568
|
+
# Capture the raw final result event for programmatic consumption
|
|
569
|
+
try:
|
|
570
|
+
parsed_raw = json.loads(raw_line)
|
|
571
|
+
if isinstance(parsed_raw, dict) and parsed_raw.get("type") == "result":
|
|
572
|
+
self.last_result_event = parsed_raw
|
|
573
|
+
except json.JSONDecodeError:
|
|
574
|
+
# Ignore non-JSON lines here; pretty formatter will handle them
|
|
575
|
+
pass
|
|
576
|
+
|
|
552
577
|
# Apply pretty formatting if enabled
|
|
553
578
|
if pretty:
|
|
554
|
-
formatted_line = self.pretty_format_json(
|
|
579
|
+
formatted_line = self.pretty_format_json(raw_line)
|
|
555
580
|
if formatted_line:
|
|
556
581
|
print(formatted_line, flush=True)
|
|
557
582
|
else:
|
|
@@ -567,6 +592,9 @@ Environment Variables:
|
|
|
567
592
|
if stderr_output:
|
|
568
593
|
print(stderr_output, file=sys.stderr)
|
|
569
594
|
|
|
595
|
+
# Persist the raw final result event for programmatic capture
|
|
596
|
+
write_capture_file()
|
|
597
|
+
|
|
570
598
|
# Restore original working directory
|
|
571
599
|
os.chdir(original_cwd)
|
|
572
600
|
|
|
@@ -577,12 +605,14 @@ Environment Variables:
|
|
|
577
605
|
if process:
|
|
578
606
|
process.terminate()
|
|
579
607
|
process.wait()
|
|
608
|
+
write_capture_file()
|
|
580
609
|
# Restore original working directory
|
|
581
610
|
if 'original_cwd' in locals():
|
|
582
611
|
os.chdir(original_cwd)
|
|
583
612
|
return 130
|
|
584
613
|
except Exception as e:
|
|
585
614
|
print(f"Error executing claude: {e}", file=sys.stderr)
|
|
615
|
+
write_capture_file()
|
|
586
616
|
# Restore original working directory
|
|
587
617
|
if 'original_cwd' in locals():
|
|
588
618
|
os.chdir(original_cwd)
|
|
@@ -8,16 +8,25 @@ import argparse
|
|
|
8
8
|
import os
|
|
9
9
|
import subprocess
|
|
10
10
|
import sys
|
|
11
|
-
|
|
11
|
+
import json
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from typing import List, Optional
|
|
12
14
|
|
|
13
15
|
|
|
14
16
|
class CodexService:
|
|
15
17
|
"""Service wrapper for OpenAI Codex CLI"""
|
|
16
18
|
|
|
17
19
|
# Default configuration
|
|
18
|
-
DEFAULT_MODEL = "
|
|
20
|
+
DEFAULT_MODEL = "codex-5.1-max"
|
|
19
21
|
DEFAULT_AUTO_INSTRUCTION = """You are an AI coding assistant. Follow the instructions provided and generate high-quality code."""
|
|
20
22
|
|
|
23
|
+
# Model shorthand mappings (colon-prefixed names expand to full model IDs)
|
|
24
|
+
MODEL_SHORTHANDS = {
|
|
25
|
+
":codex": "codex-5.1-codex-max",
|
|
26
|
+
":gpt-5": "gpt-5",
|
|
27
|
+
":mini": "gpt-5-codex-mini",
|
|
28
|
+
}
|
|
29
|
+
|
|
21
30
|
def __init__(self):
|
|
22
31
|
self.model_name = self.DEFAULT_MODEL
|
|
23
32
|
self.auto_instruction = self.DEFAULT_AUTO_INSTRUCTION
|
|
@@ -26,6 +35,17 @@ class CodexService:
|
|
|
26
35
|
self.additional_args: List[str] = []
|
|
27
36
|
self.verbose = False
|
|
28
37
|
|
|
38
|
+
def expand_model_shorthand(self, model: str) -> str:
|
|
39
|
+
"""
|
|
40
|
+
Expand model shorthand names to full model IDs.
|
|
41
|
+
|
|
42
|
+
If the model starts with ':', look it up in MODEL_SHORTHANDS.
|
|
43
|
+
Otherwise, return the model name as-is.
|
|
44
|
+
"""
|
|
45
|
+
if model.startswith(":"):
|
|
46
|
+
return self.MODEL_SHORTHANDS.get(model, model)
|
|
47
|
+
return model
|
|
48
|
+
|
|
29
49
|
def check_codex_installed(self) -> bool:
|
|
30
50
|
"""Check if codex CLI is installed and available"""
|
|
31
51
|
try:
|
|
@@ -49,6 +69,13 @@ Examples:
|
|
|
49
69
|
%(prog)s -p "Write a hello world function"
|
|
50
70
|
%(prog)s -pp prompt.txt --cd /path/to/project
|
|
51
71
|
%(prog)s -p "Add tests" -m gpt-4 -c custom_arg=value
|
|
72
|
+
%(prog)s -p "Optimize code" -m :codex # uses codex-5.1-codex-max
|
|
73
|
+
|
|
74
|
+
Environment Variables:
|
|
75
|
+
CODEX_MODEL Model name (supports shorthand, default: codex-5.1-max)
|
|
76
|
+
CODEX_HIDE_STREAM_TYPES Comma-separated list of streaming msg types to hide
|
|
77
|
+
Default: turn_diff,token_count,exec_command_output_delta
|
|
78
|
+
JUNO_CODE_HIDE_STREAM_TYPES Same as CODEX_HIDE_STREAM_TYPES (alias)
|
|
52
79
|
"""
|
|
53
80
|
)
|
|
54
81
|
|
|
@@ -75,8 +102,8 @@ Examples:
|
|
|
75
102
|
parser.add_argument(
|
|
76
103
|
"-m", "--model",
|
|
77
104
|
type=str,
|
|
78
|
-
default=self.DEFAULT_MODEL,
|
|
79
|
-
help=f"Model name. Default: {self.DEFAULT_MODEL}"
|
|
105
|
+
default=os.environ.get("CODEX_MODEL", self.DEFAULT_MODEL),
|
|
106
|
+
help=f"Model name. Supports shorthand (e.g., ':codex', ':gpt-5', ':mini') or full model ID. Default: {self.DEFAULT_MODEL} (env: CODEX_MODEL)"
|
|
80
107
|
)
|
|
81
108
|
|
|
82
109
|
parser.add_argument(
|
|
@@ -101,6 +128,77 @@ Examples:
|
|
|
101
128
|
|
|
102
129
|
return parser.parse_args()
|
|
103
130
|
|
|
131
|
+
def _first_nonempty_str(self, *values: Optional[str]) -> str:
|
|
132
|
+
"""Return the first non-empty string value."""
|
|
133
|
+
for val in values:
|
|
134
|
+
if isinstance(val, str) and val != "":
|
|
135
|
+
return val
|
|
136
|
+
return ""
|
|
137
|
+
|
|
138
|
+
def _extract_content_text(self, payload: dict) -> str:
|
|
139
|
+
"""Join text-like fields from content arrays (item.* schema)."""
|
|
140
|
+
content = payload.get("content") if isinstance(payload, dict) else None
|
|
141
|
+
parts: List[str] = []
|
|
142
|
+
if isinstance(content, list):
|
|
143
|
+
for entry in content:
|
|
144
|
+
if not isinstance(entry, dict):
|
|
145
|
+
continue
|
|
146
|
+
text_val = (
|
|
147
|
+
entry.get("text")
|
|
148
|
+
or entry.get("message")
|
|
149
|
+
or entry.get("output_text")
|
|
150
|
+
or entry.get("input_text")
|
|
151
|
+
)
|
|
152
|
+
if isinstance(text_val, str) and text_val != "":
|
|
153
|
+
parts.append(text_val)
|
|
154
|
+
return "\n".join(parts) if parts else ""
|
|
155
|
+
|
|
156
|
+
def _extract_command_output_text(self, payload: dict) -> str:
|
|
157
|
+
"""Extract aggregated/command output from various item.* layouts."""
|
|
158
|
+
if not isinstance(payload, dict):
|
|
159
|
+
return ""
|
|
160
|
+
result = payload.get("result") if isinstance(payload.get("result"), dict) else None
|
|
161
|
+
content_text = self._extract_content_text(payload)
|
|
162
|
+
return self._first_nonempty_str(
|
|
163
|
+
payload.get("aggregated_output"),
|
|
164
|
+
payload.get("output"),
|
|
165
|
+
payload.get("formatted_output"),
|
|
166
|
+
result.get("aggregated_output") if result else None,
|
|
167
|
+
result.get("output") if result else None,
|
|
168
|
+
result.get("formatted_output") if result else None,
|
|
169
|
+
content_text,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
def _extract_reasoning_text(self, payload: dict) -> str:
|
|
173
|
+
"""Extract reasoning text from legacy and item.* schemas."""
|
|
174
|
+
if not isinstance(payload, dict):
|
|
175
|
+
return ""
|
|
176
|
+
reasoning_obj = payload.get("reasoning") if isinstance(payload.get("reasoning"), dict) else None
|
|
177
|
+
result_obj = payload.get("result") if isinstance(payload.get("result"), dict) else None
|
|
178
|
+
content_text = self._extract_content_text(payload)
|
|
179
|
+
return self._first_nonempty_str(
|
|
180
|
+
payload.get("text"),
|
|
181
|
+
payload.get("reasoning_text"),
|
|
182
|
+
reasoning_obj.get("text") if reasoning_obj else None,
|
|
183
|
+
result_obj.get("text") if result_obj else None,
|
|
184
|
+
content_text,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
def _extract_message_text(self, payload: dict) -> str:
|
|
188
|
+
"""Extract final/assistant message text from item.* schemas."""
|
|
189
|
+
if not isinstance(payload, dict):
|
|
190
|
+
return ""
|
|
191
|
+
result_obj = payload.get("result") if isinstance(payload.get("result"), dict) else None
|
|
192
|
+
content_text = self._extract_content_text(payload)
|
|
193
|
+
return self._first_nonempty_str(
|
|
194
|
+
payload.get("message"),
|
|
195
|
+
payload.get("text"),
|
|
196
|
+
payload.get("final"),
|
|
197
|
+
result_obj.get("message") if result_obj else None,
|
|
198
|
+
result_obj.get("text") if result_obj else None,
|
|
199
|
+
content_text,
|
|
200
|
+
)
|
|
201
|
+
|
|
104
202
|
def read_prompt_file(self, file_path: str) -> str:
|
|
105
203
|
"""Read prompt from a file"""
|
|
106
204
|
try:
|
|
@@ -163,34 +261,305 @@ Examples:
|
|
|
163
261
|
|
|
164
262
|
return cmd
|
|
165
263
|
|
|
264
|
+
def _format_msg_pretty(
|
|
265
|
+
self,
|
|
266
|
+
msg_type: str,
|
|
267
|
+
payload: dict,
|
|
268
|
+
outer_type: str = "",
|
|
269
|
+
) -> Optional[str]:
|
|
270
|
+
"""
|
|
271
|
+
Pretty format for specific msg types to be human readable while
|
|
272
|
+
preserving a compact JSON header line that includes the msg.type.
|
|
273
|
+
|
|
274
|
+
- agent_message/message/assistant: render message text as multi-line block
|
|
275
|
+
- agent_reasoning: render 'text' field as multi-line text
|
|
276
|
+
- exec_command_end: only output 'formatted_output' (suppress other fields)
|
|
277
|
+
- token_count: fully suppressed (no final summary emission)
|
|
278
|
+
|
|
279
|
+
Returns a string to print, or None to fall back to raw printing.
|
|
280
|
+
"""
|
|
281
|
+
try:
|
|
282
|
+
now = datetime.now().strftime("%I:%M:%S %p")
|
|
283
|
+
msg_type = (msg_type or "").strip()
|
|
284
|
+
header_type = (outer_type or msg_type).strip()
|
|
285
|
+
header = {"type": header_type or msg_type or "message", "datetime": now}
|
|
286
|
+
|
|
287
|
+
if outer_type and msg_type and outer_type != msg_type:
|
|
288
|
+
header["item_type"] = msg_type
|
|
289
|
+
|
|
290
|
+
if isinstance(payload, dict):
|
|
291
|
+
if payload.get("command"):
|
|
292
|
+
header["command"] = payload.get("command")
|
|
293
|
+
if payload.get("status"):
|
|
294
|
+
header["status"] = payload.get("status")
|
|
295
|
+
if payload.get("state") and not header.get("status"):
|
|
296
|
+
header["status"] = payload.get("state")
|
|
297
|
+
|
|
298
|
+
# agent_reasoning → show 'text' human-readable
|
|
299
|
+
if msg_type in {"agent_reasoning", "reasoning"}:
|
|
300
|
+
content = self._extract_reasoning_text(payload)
|
|
301
|
+
header = {"type": header_type or msg_type, "datetime": now}
|
|
302
|
+
if outer_type and msg_type and outer_type != msg_type:
|
|
303
|
+
header["item_type"] = msg_type
|
|
304
|
+
if "\n" in content:
|
|
305
|
+
return json.dumps(header, ensure_ascii=False) + "\ntext:\n" + content
|
|
306
|
+
header["text"] = content
|
|
307
|
+
return json.dumps(header, ensure_ascii=False)
|
|
308
|
+
|
|
309
|
+
if msg_type in {"agent_message", "message", "assistant_message", "assistant"}:
|
|
310
|
+
content = self._extract_message_text(payload)
|
|
311
|
+
header = {"type": header_type or msg_type, "datetime": now}
|
|
312
|
+
if outer_type and msg_type and outer_type != msg_type:
|
|
313
|
+
header["item_type"] = msg_type
|
|
314
|
+
if "\n" in content:
|
|
315
|
+
return json.dumps(header, ensure_ascii=False) + "\nmessage:\n" + content
|
|
316
|
+
if content != "":
|
|
317
|
+
header["message"] = content
|
|
318
|
+
return json.dumps(header, ensure_ascii=False)
|
|
319
|
+
if header_type:
|
|
320
|
+
return json.dumps(header, ensure_ascii=False)
|
|
321
|
+
|
|
322
|
+
# exec_command_end → only show 'formatted_output'
|
|
323
|
+
if msg_type == "exec_command_end":
|
|
324
|
+
formatted_output = payload.get("formatted_output", "") if isinstance(payload, dict) else ""
|
|
325
|
+
header = {"type": msg_type, "datetime": now}
|
|
326
|
+
if "\n" in formatted_output:
|
|
327
|
+
return json.dumps(header, ensure_ascii=False) + "\nformatted_output:\n" + formatted_output
|
|
328
|
+
header["formatted_output"] = formatted_output
|
|
329
|
+
return json.dumps(header, ensure_ascii=False)
|
|
330
|
+
|
|
331
|
+
# item.* schema → command_execution blocks
|
|
332
|
+
if msg_type == "command_execution":
|
|
333
|
+
aggregated_output = self._extract_command_output_text(payload)
|
|
334
|
+
if "\n" in aggregated_output:
|
|
335
|
+
return json.dumps(header, ensure_ascii=False) + "\naggregated_output:\n" + aggregated_output
|
|
336
|
+
if aggregated_output:
|
|
337
|
+
header["aggregated_output"] = aggregated_output
|
|
338
|
+
return json.dumps(header, ensure_ascii=False)
|
|
339
|
+
# No output (likely item.started) – still show header if it carries context
|
|
340
|
+
if header_type:
|
|
341
|
+
return json.dumps(header, ensure_ascii=False)
|
|
342
|
+
|
|
343
|
+
return None
|
|
344
|
+
except Exception:
|
|
345
|
+
return None
|
|
346
|
+
|
|
347
|
+
def _normalize_event(self, obj_dict: dict):
|
|
348
|
+
"""
|
|
349
|
+
Normalize legacy (msg-based) and new item.* schemas into a common tuple.
|
|
350
|
+
Returns (msg_type, payload_dict, outer_type).
|
|
351
|
+
"""
|
|
352
|
+
msg = obj_dict.get("msg") if isinstance(obj_dict.get("msg"), dict) else {}
|
|
353
|
+
outer_type = (obj_dict.get("type") or "").strip()
|
|
354
|
+
item = obj_dict.get("item") if isinstance(obj_dict.get("item"), dict) else None
|
|
355
|
+
|
|
356
|
+
msg_type = (msg.get("type") or "").strip() if isinstance(msg, dict) else ""
|
|
357
|
+
payload = msg if isinstance(msg, dict) else {}
|
|
358
|
+
|
|
359
|
+
if not msg_type and item is not None:
|
|
360
|
+
msg_type = (item.get("type") or "").strip() or outer_type
|
|
361
|
+
payload = item
|
|
362
|
+
elif not msg_type:
|
|
363
|
+
msg_type = outer_type
|
|
364
|
+
|
|
365
|
+
return msg_type, payload, outer_type
|
|
366
|
+
|
|
166
367
|
def run_codex(self, cmd: List[str], verbose: bool = False) -> int:
|
|
167
|
-
"""Execute the codex command and stream output
|
|
368
|
+
"""Execute the codex command and stream output with filtering and pretty-printing
|
|
369
|
+
|
|
370
|
+
Robustness improvements:
|
|
371
|
+
- Attempts to parse JSON even if the line has extra prefix/suffix noise
|
|
372
|
+
- Falls back to string suppression for known noisy types if JSON parsing fails
|
|
373
|
+
- Never emits token_count or exec_command_output_delta even on malformed lines
|
|
374
|
+
"""
|
|
168
375
|
if verbose:
|
|
169
376
|
print(f"Executing: {' '.join(cmd)}", file=sys.stderr)
|
|
170
377
|
print("-" * 80, file=sys.stderr)
|
|
171
378
|
|
|
379
|
+
# Resolve hidden stream types (ENV configurable)
|
|
380
|
+
default_hidden = {"turn_diff", "token_count", "exec_command_output_delta"}
|
|
381
|
+
env_hide_1 = os.environ.get("CODEX_HIDE_STREAM_TYPES", "")
|
|
382
|
+
env_hide_2 = os.environ.get("JUNO_CODE_HIDE_STREAM_TYPES", "")
|
|
383
|
+
hide_types = set(default_hidden)
|
|
384
|
+
for env_val in (env_hide_1, env_hide_2):
|
|
385
|
+
if env_val:
|
|
386
|
+
parts = [p.strip() for p in env_val.split(",") if p.strip()]
|
|
387
|
+
hide_types.update(parts)
|
|
388
|
+
|
|
389
|
+
# We fully suppress all token_count events (do not emit even at end)
|
|
390
|
+
last_token_count = None
|
|
391
|
+
|
|
172
392
|
try:
|
|
173
393
|
# Run the command and stream output
|
|
174
|
-
# Use line buffering (bufsize=1) to ensure each JSON line is output immediately
|
|
175
394
|
process = subprocess.Popen(
|
|
176
395
|
cmd,
|
|
177
396
|
stdout=subprocess.PIPE,
|
|
178
397
|
stderr=subprocess.PIPE,
|
|
179
398
|
text=True,
|
|
180
|
-
bufsize=1,
|
|
399
|
+
bufsize=1,
|
|
181
400
|
universal_newlines=True
|
|
182
401
|
)
|
|
183
402
|
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
403
|
+
def split_json_stream(text: str):
|
|
404
|
+
objs = []
|
|
405
|
+
buf: List[str] = []
|
|
406
|
+
depth = 0
|
|
407
|
+
in_str = False
|
|
408
|
+
esc = False
|
|
409
|
+
started = False
|
|
410
|
+
for ch in text:
|
|
411
|
+
if in_str:
|
|
412
|
+
buf.append(ch)
|
|
413
|
+
if esc:
|
|
414
|
+
esc = False
|
|
415
|
+
elif ch == '\\':
|
|
416
|
+
esc = True
|
|
417
|
+
elif ch == '"':
|
|
418
|
+
in_str = False
|
|
419
|
+
continue
|
|
420
|
+
if ch == '"':
|
|
421
|
+
in_str = True
|
|
422
|
+
buf.append(ch)
|
|
423
|
+
continue
|
|
424
|
+
if ch == '{':
|
|
425
|
+
depth += 1
|
|
426
|
+
started = True
|
|
427
|
+
buf.append(ch)
|
|
428
|
+
continue
|
|
429
|
+
if ch == '}':
|
|
430
|
+
depth -= 1
|
|
431
|
+
buf.append(ch)
|
|
432
|
+
if started and depth == 0:
|
|
433
|
+
candidate = ''.join(buf).strip().strip("'\"")
|
|
434
|
+
if candidate:
|
|
435
|
+
objs.append(candidate)
|
|
436
|
+
buf = []
|
|
437
|
+
started = False
|
|
438
|
+
continue
|
|
439
|
+
if started:
|
|
440
|
+
buf.append(ch)
|
|
441
|
+
remainder = ''.join(buf) if buf else ""
|
|
442
|
+
return objs, remainder
|
|
443
|
+
|
|
444
|
+
def handle_obj(obj_dict: dict):
|
|
445
|
+
nonlocal last_token_count
|
|
446
|
+
msg_type_inner, payload_inner, outer_type_inner = self._normalize_event(obj_dict)
|
|
447
|
+
|
|
448
|
+
if msg_type_inner == "token_count":
|
|
449
|
+
last_token_count = obj_dict
|
|
450
|
+
return # suppress
|
|
451
|
+
|
|
452
|
+
if msg_type_inner and msg_type_inner in hide_types:
|
|
453
|
+
return # suppress
|
|
454
|
+
|
|
455
|
+
pretty_line_inner = self._format_msg_pretty(msg_type_inner, payload_inner, outer_type_inner)
|
|
456
|
+
if pretty_line_inner is not None:
|
|
457
|
+
print(pretty_line_inner, flush=True)
|
|
458
|
+
else:
|
|
459
|
+
# print normalized JSON
|
|
460
|
+
print(json.dumps(obj_dict, ensure_ascii=False), flush=True)
|
|
461
|
+
|
|
462
|
+
pending = ""
|
|
190
463
|
|
|
191
|
-
|
|
464
|
+
if process.stdout:
|
|
465
|
+
for raw_line in process.stdout:
|
|
466
|
+
combined = pending + raw_line
|
|
467
|
+
if not combined.strip():
|
|
468
|
+
pending = ""
|
|
469
|
+
continue
|
|
470
|
+
|
|
471
|
+
# If no braces present at all, treat as plain text (with suppression)
|
|
472
|
+
if "{" not in combined and "}" not in combined:
|
|
473
|
+
lower = combined.lower()
|
|
474
|
+
if (
|
|
475
|
+
'"token_count"' in lower
|
|
476
|
+
or '"exec_command_output_delta"' in lower
|
|
477
|
+
or '"turn_diff"' in lower
|
|
478
|
+
):
|
|
479
|
+
pending = ""
|
|
480
|
+
continue
|
|
481
|
+
print(combined, end="" if combined.endswith("\n") else "\n", flush=True)
|
|
482
|
+
pending = ""
|
|
483
|
+
continue
|
|
484
|
+
|
|
485
|
+
# Preserve and emit any prefix before the first brace
|
|
486
|
+
first_brace = combined.find("{")
|
|
487
|
+
if first_brace > 0:
|
|
488
|
+
prefix = combined[:first_brace]
|
|
489
|
+
lower_prefix = prefix.lower()
|
|
490
|
+
if (
|
|
491
|
+
'"token_count"' not in lower_prefix
|
|
492
|
+
and '"exec_command_output_delta"' not in lower_prefix
|
|
493
|
+
and '"turn_diff"' not in lower_prefix
|
|
494
|
+
and prefix.strip()
|
|
495
|
+
):
|
|
496
|
+
print(prefix, end="" if prefix.endswith("\n") else "\n", flush=True)
|
|
497
|
+
combined = combined[first_brace:]
|
|
498
|
+
|
|
499
|
+
parts, pending = split_json_stream(combined)
|
|
500
|
+
|
|
501
|
+
if parts:
|
|
502
|
+
for part in parts:
|
|
503
|
+
try:
|
|
504
|
+
sub = json.loads(part)
|
|
505
|
+
if isinstance(sub, dict):
|
|
506
|
+
handle_obj(sub)
|
|
507
|
+
else:
|
|
508
|
+
low = part.lower()
|
|
509
|
+
if (
|
|
510
|
+
'"token_count"' in low
|
|
511
|
+
or '"exec_command_output_delta"' in low
|
|
512
|
+
or '"turn_diff"' in low
|
|
513
|
+
):
|
|
514
|
+
continue
|
|
515
|
+
print(part, flush=True)
|
|
516
|
+
except Exception:
|
|
517
|
+
low = part.lower()
|
|
518
|
+
if (
|
|
519
|
+
'"token_count"' in low
|
|
520
|
+
or '"exec_command_output_delta"' in low
|
|
521
|
+
or '"turn_diff"' in low
|
|
522
|
+
):
|
|
523
|
+
continue
|
|
524
|
+
print(part, flush=True)
|
|
525
|
+
continue
|
|
526
|
+
|
|
527
|
+
# No complete object found yet; keep buffering if likely in the middle of one
|
|
528
|
+
if pending:
|
|
529
|
+
continue
|
|
530
|
+
|
|
531
|
+
# Fallback for malformed/non-JSON lines that still contain braces
|
|
532
|
+
lower = combined.lower()
|
|
533
|
+
if (
|
|
534
|
+
'"token_count"' in lower
|
|
535
|
+
or '"exec_command_output_delta"' in lower
|
|
536
|
+
or '"turn_diff"' in lower
|
|
537
|
+
):
|
|
538
|
+
continue
|
|
539
|
+
print(combined, end="" if combined.endswith("\n") else "\n", flush=True)
|
|
540
|
+
|
|
541
|
+
# Flush any pending buffered content after the stream ends
|
|
542
|
+
if pending.strip():
|
|
543
|
+
try:
|
|
544
|
+
tail_obj = json.loads(pending)
|
|
545
|
+
if isinstance(tail_obj, dict):
|
|
546
|
+
handle_obj(tail_obj)
|
|
547
|
+
else:
|
|
548
|
+
print(pending, flush=True)
|
|
549
|
+
except Exception:
|
|
550
|
+
low_tail = pending.lower()
|
|
551
|
+
if (
|
|
552
|
+
'"token_count"' not in low_tail
|
|
553
|
+
and '"exec_command_output_delta"' not in low_tail
|
|
554
|
+
and '"turn_diff"' not in low_tail
|
|
555
|
+
):
|
|
556
|
+
print(pending, flush=True)
|
|
557
|
+
|
|
558
|
+
# Wait for process completion
|
|
192
559
|
process.wait()
|
|
193
560
|
|
|
561
|
+
# Do not emit token_count summary; fully suppressed per user feedback
|
|
562
|
+
|
|
194
563
|
# Print stderr if there were errors
|
|
195
564
|
if process.stderr and process.returncode != 0:
|
|
196
565
|
stderr_output = process.stderr.read()
|
|
@@ -201,9 +570,11 @@ Examples:
|
|
|
201
570
|
|
|
202
571
|
except KeyboardInterrupt:
|
|
203
572
|
print("\nInterrupted by user", file=sys.stderr)
|
|
204
|
-
|
|
573
|
+
try:
|
|
205
574
|
process.terminate()
|
|
206
575
|
process.wait()
|
|
576
|
+
except Exception:
|
|
577
|
+
pass
|
|
207
578
|
return 130
|
|
208
579
|
except Exception as e:
|
|
209
580
|
print(f"Error executing codex: {e}", file=sys.stderr)
|
|
@@ -237,7 +608,8 @@ Examples:
|
|
|
237
608
|
|
|
238
609
|
# Set configuration from arguments
|
|
239
610
|
self.project_path = os.path.abspath(args.cd)
|
|
240
|
-
|
|
611
|
+
# Expand model shorthand
|
|
612
|
+
self.model_name = self.expand_model_shorthand(args.model)
|
|
241
613
|
self.auto_instruction = args.auto_instruction
|
|
242
614
|
|
|
243
615
|
# Get prompt from file or argument
|