klaude-code 1.2.6__py3-none-any.whl → 1.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/auth/__init__.py +24 -0
- klaude_code/auth/codex/__init__.py +20 -0
- klaude_code/auth/codex/exceptions.py +17 -0
- klaude_code/auth/codex/jwt_utils.py +45 -0
- klaude_code/auth/codex/oauth.py +229 -0
- klaude_code/auth/codex/token_manager.py +84 -0
- klaude_code/cli/main.py +63 -0
- klaude_code/command/status_cmd.py +13 -5
- klaude_code/config/list_model.py +53 -0
- klaude_code/core/prompt.py +10 -14
- klaude_code/core/prompts/prompt-codex-gpt-5-1-codex-max.md +117 -0
- klaude_code/core/prompts/prompt-subagent-explore.md +3 -1
- klaude_code/core/reminders.py +14 -5
- klaude_code/core/task.py +1 -4
- klaude_code/core/tool/truncation.py +4 -0
- klaude_code/core/turn.py +66 -41
- klaude_code/llm/__init__.py +2 -0
- klaude_code/llm/anthropic/client.py +2 -2
- klaude_code/llm/anthropic/input.py +25 -10
- klaude_code/llm/codex/__init__.py +5 -0
- klaude_code/llm/codex/client.py +129 -0
- klaude_code/llm/openai_compatible/client.py +2 -2
- klaude_code/llm/responses/client.py +181 -163
- klaude_code/llm/usage.py +3 -0
- klaude_code/protocol/events.py +1 -0
- klaude_code/protocol/llm_param.py +3 -1
- klaude_code/protocol/model.py +2 -3
- klaude_code/protocol/sub_agent.py +2 -1
- klaude_code/session/export.py +9 -14
- klaude_code/session/session.py +5 -0
- klaude_code/session/templates/export_session.html +5 -0
- klaude_code/ui/modes/repl/completers.py +41 -8
- klaude_code/ui/modes/repl/event_handler.py +15 -23
- klaude_code/ui/modes/repl/renderer.py +2 -0
- klaude_code/ui/renderers/developer.py +9 -8
- klaude_code/ui/renderers/metadata.py +9 -5
- klaude_code/ui/renderers/user_input.py +23 -10
- klaude_code/ui/rich/theme.py +2 -0
- {klaude_code-1.2.6.dist-info → klaude_code-1.2.8.dist-info}/METADATA +1 -1
- {klaude_code-1.2.6.dist-info → klaude_code-1.2.8.dist-info}/RECORD +43 -34
- /klaude_code/core/prompts/{prompt-codex.md → prompt-codex-gpt-5-1.md} +0 -0
- {klaude_code-1.2.6.dist-info → klaude_code-1.2.8.dist-info}/WHEEL +0 -0
- {klaude_code-1.2.6.dist-info → klaude_code-1.2.8.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.
|
|
2
|
+
|
|
3
|
+
## General
|
|
4
|
+
|
|
5
|
+
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
|
|
6
|
+
|
|
7
|
+
## Editing constraints
|
|
8
|
+
|
|
9
|
+
- Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them.
|
|
10
|
+
- Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like "Assigns the value to the variable", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare.
|
|
11
|
+
- Try to use apply_patch for single file edits, but it is fine to explore other options to make the edit if it does not work well. Do not use apply_patch for changes that are auto-generated (i.e. generating package.json or running a lint or format command like gofmt) or when scripting is more efficient (such as search and replacing a string across a codebase).
|
|
12
|
+
- You may be in a dirty git worktree.
|
|
13
|
+
* NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user.
|
|
14
|
+
* If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes.
|
|
15
|
+
* If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them.
|
|
16
|
+
* If the changes are in unrelated files, just ignore them and don't revert them.
|
|
17
|
+
- Do not amend a commit unless explicitly requested to do so.
|
|
18
|
+
- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed.
|
|
19
|
+
- **NEVER** use destructive commands like `git reset --hard` or `git checkout --` unless specifically requested or approved by the user.
|
|
20
|
+
|
|
21
|
+
## Plan tool
|
|
22
|
+
|
|
23
|
+
When using the planning tool:
|
|
24
|
+
- Skip using the planning tool for straightforward tasks (roughly the easiest 25%).
|
|
25
|
+
- Do not make single-step plans.
|
|
26
|
+
- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan.
|
|
27
|
+
|
|
28
|
+
## Codex CLI harness, sandboxing, and approvals
|
|
29
|
+
|
|
30
|
+
The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.
|
|
31
|
+
|
|
32
|
+
Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are:
|
|
33
|
+
- **read-only**: The sandbox only permits reading files.
|
|
34
|
+
- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval.
|
|
35
|
+
- **danger-full-access**: No filesystem sandboxing - all commands are permitted.
|
|
36
|
+
|
|
37
|
+
Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are:
|
|
38
|
+
- **restricted**: Requires approval
|
|
39
|
+
- **enabled**: No approval needed
|
|
40
|
+
|
|
41
|
+
Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are
|
|
42
|
+
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
|
43
|
+
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
|
44
|
+
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
|
45
|
+
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
|
46
|
+
|
|
47
|
+
When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
|
48
|
+
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
|
|
49
|
+
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
|
50
|
+
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
|
51
|
+
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `with_escalated_permissions` and `justification` parameters - do not message the user before requesting approval for the command.
|
|
52
|
+
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
|
53
|
+
- (for all of these, you should weigh alternative paths that do not require approval)
|
|
54
|
+
|
|
55
|
+
When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read.
|
|
56
|
+
|
|
57
|
+
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
|
|
58
|
+
|
|
59
|
+
Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
|
|
60
|
+
|
|
61
|
+
When requesting approval to execute a command that will require escalated privileges:
|
|
62
|
+
- Provide the `with_escalated_permissions` parameter with the boolean value true
|
|
63
|
+
- Include a short, 1 sentence explanation for why you need to enable `with_escalated_permissions` in the justification parameter
|
|
64
|
+
|
|
65
|
+
## Special user requests
|
|
66
|
+
|
|
67
|
+
- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so.
|
|
68
|
+
- If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps.
|
|
69
|
+
|
|
70
|
+
## Frontend tasks
|
|
71
|
+
When doing frontend design tasks, avoid collapsing into "AI slop" or safe, average-looking layouts.
|
|
72
|
+
Aim for interfaces that feel intentional, bold, and a bit surprising.
|
|
73
|
+
- Typography: Use expressive, purposeful fonts and avoid default stacks (Inter, Roboto, Arial, system).
|
|
74
|
+
- Color & Look: Choose a clear visual direction; define CSS variables; avoid purple-on-white defaults. No purple bias or dark mode bias.
|
|
75
|
+
- Motion: Use a few meaningful animations (page-load, staggered reveals) instead of generic micro-motions.
|
|
76
|
+
- Background: Don't rely on flat, single-color backgrounds; use gradients, shapes, or subtle patterns to build atmosphere.
|
|
77
|
+
- Overall: Avoid boilerplate layouts and interchangeable UI patterns. Vary themes, type families, and visual languages across outputs.
|
|
78
|
+
- Ensure the page loads properly on both desktop and mobile
|
|
79
|
+
|
|
80
|
+
Exception: If working within an existing website or design system, preserve the established patterns, structure, and visual language.
|
|
81
|
+
|
|
82
|
+
## Presenting your work and final message
|
|
83
|
+
|
|
84
|
+
You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value.
|
|
85
|
+
|
|
86
|
+
- Default: be very concise; friendly coding teammate tone.
|
|
87
|
+
- Ask only when needed; suggest ideas; mirror the user's style.
|
|
88
|
+
- For substantial work, summarize clearly; follow final‑answer formatting.
|
|
89
|
+
- Skip heavy formatting for simple confirmations.
|
|
90
|
+
- Don't dump large files you've written; reference paths only.
|
|
91
|
+
- No "save/copy this file" - User is on the same machine.
|
|
92
|
+
- Offer logical next steps (tests, commits, build) briefly; add verify steps if you couldn't do something.
|
|
93
|
+
- For code changes:
|
|
94
|
+
* Lead with a quick explanation of the change, and then give more details on the context covering where and why a change was made. Do not start this explanation with "summary", just jump right in.
|
|
95
|
+
* If there are natural next steps the user may want to take, suggest them at the end of your response. Do not make suggestions if there are no natural next steps.
|
|
96
|
+
* When suggesting multiple options, use numeric lists for the suggestions so the user can quickly respond with a single number.
|
|
97
|
+
- The user does not command execution outputs. When asked to show the output of a command (e.g. `git show`), relay the important details in your answer or summarize the key lines so the user understands the result.
|
|
98
|
+
|
|
99
|
+
### Final answer structure and style guidelines
|
|
100
|
+
|
|
101
|
+
- Plain text; CLI handles styling. Use structure only when it helps scanability.
|
|
102
|
+
- Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help.
|
|
103
|
+
- Bullets: use - ; merge related points; keep to one line when possible; 4–6 per list ordered by importance; keep phrasing consistent.
|
|
104
|
+
- Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **.
|
|
105
|
+
- Code samples or multi-line snippets should be wrapped in fenced code blocks; include an info string as often as possible.
|
|
106
|
+
- Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task.
|
|
107
|
+
- Tone: collaborative, concise, factual; present tense, active voice; self‑contained; no "above/below"; parallel wording.
|
|
108
|
+
- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers.
|
|
109
|
+
- Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets.
|
|
110
|
+
- File References: When referencing files in your response follow the below rules:
|
|
111
|
+
* Use inline code to make file paths clickable.
|
|
112
|
+
* Each reference should have a stand alone path. Even if it's the same file.
|
|
113
|
+
* Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix.
|
|
114
|
+
* Optionally include line/column (1‑based): :line[:column] or #Lline[Ccolumn] (column defaults to 1).
|
|
115
|
+
* Do not use URIs like file://, vscode://, or https://.
|
|
116
|
+
* Do not provide range of lines
|
|
117
|
+
* Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
You are a
|
|
1
|
+
You are a powerful code search agent.
|
|
2
2
|
|
|
3
3
|
CRITICAL: This is a READ-ONLY exploration task. You MUST NOT create, write, or modify any files under any circumstances. Your role is strictly to search and analyze existing code.
|
|
4
4
|
|
|
@@ -14,6 +14,8 @@ Guidelines:
|
|
|
14
14
|
- Use Bash ONLY for read-only operations (ls, git status, git log, git diff, find, cat, head, tail). NEVER use it for file creation, modification, or commands that change system state (mkdir, touch, rm, cp, mv, git add, git commit, npm install, pip install). NEVER use redirect operators (>, >>, |) or heredocs to create files
|
|
15
15
|
- Adapt your search approach based on the thoroughness level specified by the caller
|
|
16
16
|
- quick = scan obvious targets; medium = cover all related modules; very thorough = exhaustive sweep with validation
|
|
17
|
+
- For maximum efficiency, whenever you need to perform multiple independent operations, invoke all relevant tools simultaneously rather than sequentially.
|
|
18
|
+
- Only your last message is surfaced back to the agent as the final answer.
|
|
17
19
|
- Return file paths as absolute paths in your final response
|
|
18
20
|
- For clear communication, avoid using emojis
|
|
19
21
|
- Do not create any files, or run bash commands that modify the user's system state in any way (This includes temporary files in the /tmp folder. Never create these files, instead communicate your final report directly as a regular message)
|
klaude_code/core/reminders.py
CHANGED
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
|
+
import re
|
|
3
|
+
import shlex
|
|
2
4
|
from pathlib import Path
|
|
3
5
|
from typing import Awaitable, Callable
|
|
4
6
|
|
|
@@ -12,6 +14,9 @@ from klaude_code.session import Session
|
|
|
12
14
|
type Reminder = Callable[[Session], Awaitable[model.DeveloperMessageItem | None]]
|
|
13
15
|
|
|
14
16
|
|
|
17
|
+
AT_FILE_PATTERN = re.compile(r'(?<!\S)@("(?P<quoted>[^\"]+)"|(?P<plain>\S+))')
|
|
18
|
+
|
|
19
|
+
|
|
15
20
|
def get_last_new_user_input(session: Session) -> str | None:
|
|
16
21
|
"""Get last user input & developer message (CLAUDE.md) from conversation history. if there's a tool result after user input, return None"""
|
|
17
22
|
result: list[str] = []
|
|
@@ -31,14 +36,17 @@ async def at_file_reader_reminder(
|
|
|
31
36
|
) -> model.DeveloperMessageItem | None:
|
|
32
37
|
"""Parse @foo/bar to read"""
|
|
33
38
|
last_user_input = get_last_new_user_input(session)
|
|
34
|
-
if not last_user_input or "@" not in last_user_input
|
|
39
|
+
if not last_user_input or "@" not in last_user_input:
|
|
35
40
|
return None
|
|
36
41
|
|
|
37
42
|
at_patterns: list[str] = []
|
|
38
43
|
|
|
39
|
-
for
|
|
40
|
-
|
|
41
|
-
|
|
44
|
+
for match in AT_FILE_PATTERN.finditer(last_user_input):
|
|
45
|
+
quoted = match.group("quoted")
|
|
46
|
+
plain = match.group("plain")
|
|
47
|
+
path_str = quoted if quoted is not None else plain
|
|
48
|
+
if path_str:
|
|
49
|
+
at_patterns.append(path_str)
|
|
42
50
|
|
|
43
51
|
if len(at_patterns) == 0:
|
|
44
52
|
return None
|
|
@@ -65,7 +73,8 @@ async def at_file_reader_reminder(
|
|
|
65
73
|
if tool_result.images:
|
|
66
74
|
collected_images.extend(tool_result.images)
|
|
67
75
|
elif path.exists() and path.is_dir():
|
|
68
|
-
|
|
76
|
+
quoted_path = shlex.quote(str(path))
|
|
77
|
+
args = BashTool.BashArguments(command=f"ls {quoted_path}")
|
|
69
78
|
tool_result = await BashTool.call_with_args(args)
|
|
70
79
|
at_files[str(path)] = model.AtPatternParseResult(
|
|
71
80
|
path=str(path) + "/",
|
klaude_code/core/task.py
CHANGED
|
@@ -43,6 +43,7 @@ class MetadataAccumulator:
|
|
|
43
43
|
acc_usage.reasoning_tokens += usage.reasoning_tokens
|
|
44
44
|
acc_usage.output_tokens += usage.output_tokens
|
|
45
45
|
acc_usage.total_tokens += usage.total_tokens
|
|
46
|
+
acc_usage.currency = usage.currency
|
|
46
47
|
|
|
47
48
|
if usage.context_usage_percent is not None:
|
|
48
49
|
acc_usage.context_usage_percent = usage.context_usage_percent
|
|
@@ -78,10 +79,6 @@ class MetadataAccumulator:
|
|
|
78
79
|
accumulated.model_name = turn_metadata.model_name
|
|
79
80
|
if turn_metadata.response_id:
|
|
80
81
|
accumulated.response_id = turn_metadata.response_id
|
|
81
|
-
if turn_metadata.status is not None:
|
|
82
|
-
accumulated.status = turn_metadata.status
|
|
83
|
-
if turn_metadata.error_reason is not None:
|
|
84
|
-
accumulated.error_reason = turn_metadata.error_reason
|
|
85
82
|
|
|
86
83
|
def finalize(self, task_duration_s: float) -> model.ResponseMetadataItem:
|
|
87
84
|
"""Return the final accumulated metadata with computed throughput and duration."""
|
|
@@ -110,6 +110,10 @@ class SmartTruncationStrategy(TruncationStrategy):
|
|
|
110
110
|
return None
|
|
111
111
|
|
|
112
112
|
def truncate(self, output: str, tool_call: model.ToolCallItem | None = None) -> TruncationResult:
|
|
113
|
+
if tool_call and tool_call.name == tools.READ:
|
|
114
|
+
# Do not truncate Read tool outputs
|
|
115
|
+
return TruncationResult(output=output, was_truncated=False, original_length=len(output))
|
|
116
|
+
|
|
113
117
|
original_length = len(output)
|
|
114
118
|
|
|
115
119
|
if original_length <= self.max_length:
|
klaude_code/core/turn.py
CHANGED
|
@@ -38,6 +38,16 @@ class TurnExecutionContext:
|
|
|
38
38
|
todo_context: TodoContext
|
|
39
39
|
|
|
40
40
|
|
|
41
|
+
@dataclass
|
|
42
|
+
class TurnResult:
|
|
43
|
+
"""Aggregated state produced while executing a turn."""
|
|
44
|
+
|
|
45
|
+
reasoning_items: list[model.ReasoningTextItem | model.ReasoningEncryptedItem]
|
|
46
|
+
assistant_message: model.AssistantMessageItem | None
|
|
47
|
+
tool_calls: list[model.ToolCallItem]
|
|
48
|
+
stream_error: model.StreamErrorItem | None
|
|
49
|
+
|
|
50
|
+
|
|
41
51
|
def build_events_from_tool_executor_event(session_id: str, event: ToolExecutorEvent) -> list[events.Event]:
|
|
42
52
|
"""Translate internal tool executor events into public protocol events."""
|
|
43
53
|
|
|
@@ -113,12 +123,34 @@ class TurnExecutor:
|
|
|
113
123
|
|
|
114
124
|
yield events.TurnStartEvent(session_id=ctx.session_id)
|
|
115
125
|
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
126
|
+
turn_result = TurnResult(
|
|
127
|
+
reasoning_items=[],
|
|
128
|
+
assistant_message=None,
|
|
129
|
+
tool_calls=[],
|
|
130
|
+
stream_error=None,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
async for event in self._consume_llm_stream(turn_result):
|
|
134
|
+
yield event
|
|
135
|
+
|
|
136
|
+
if turn_result.stream_error is not None:
|
|
137
|
+
ctx.append_history([turn_result.stream_error])
|
|
138
|
+
yield events.TurnEndEvent(session_id=ctx.session_id)
|
|
139
|
+
raise TurnError(turn_result.stream_error.error)
|
|
140
|
+
|
|
141
|
+
self._append_success_history(turn_result)
|
|
142
|
+
self._has_tool_call = bool(turn_result.tool_calls)
|
|
143
|
+
|
|
144
|
+
if turn_result.tool_calls:
|
|
145
|
+
async for ui_event in self._run_tool_executor(turn_result.tool_calls):
|
|
146
|
+
yield ui_event
|
|
147
|
+
|
|
148
|
+
yield events.TurnEndEvent(session_id=ctx.session_id)
|
|
121
149
|
|
|
150
|
+
async def _consume_llm_stream(self, turn_result: TurnResult) -> AsyncGenerator[events.Event, None]:
|
|
151
|
+
"""Stream events from LLM and update turn_result in place."""
|
|
152
|
+
|
|
153
|
+
ctx = self._context
|
|
122
154
|
async for response_item in ctx.llm_client.call(
|
|
123
155
|
llm_param.LLMCallParameter(
|
|
124
156
|
input=ctx.get_conversation_history(),
|
|
@@ -136,16 +168,16 @@ class TurnExecutor:
|
|
|
136
168
|
)
|
|
137
169
|
match response_item:
|
|
138
170
|
case model.StartItem():
|
|
139
|
-
|
|
171
|
+
continue
|
|
140
172
|
case model.ReasoningTextItem() as item:
|
|
141
|
-
|
|
173
|
+
turn_result.reasoning_items.append(item)
|
|
142
174
|
yield events.ThinkingEvent(
|
|
143
175
|
content=item.content,
|
|
144
176
|
response_id=item.response_id,
|
|
145
177
|
session_id=ctx.session_id,
|
|
146
178
|
)
|
|
147
179
|
case model.ReasoningEncryptedItem() as item:
|
|
148
|
-
|
|
180
|
+
turn_result.reasoning_items.append(item)
|
|
149
181
|
case model.AssistantMessageDelta() as item:
|
|
150
182
|
yield events.AssistantMessageDeltaEvent(
|
|
151
183
|
content=item.content,
|
|
@@ -153,7 +185,7 @@ class TurnExecutor:
|
|
|
153
185
|
session_id=ctx.session_id,
|
|
154
186
|
)
|
|
155
187
|
case model.AssistantMessageItem() as item:
|
|
156
|
-
|
|
188
|
+
turn_result.assistant_message = item
|
|
157
189
|
yield events.AssistantMessageEvent(
|
|
158
190
|
content=item.content or "",
|
|
159
191
|
response_id=item.response_id,
|
|
@@ -164,13 +196,8 @@ class TurnExecutor:
|
|
|
164
196
|
session_id=ctx.session_id,
|
|
165
197
|
metadata=item,
|
|
166
198
|
)
|
|
167
|
-
status = item.status
|
|
168
|
-
if status is not None and status != "completed":
|
|
169
|
-
response_failed = True
|
|
170
|
-
error_message = f"Response status: {status}"
|
|
171
199
|
case model.StreamErrorItem() as item:
|
|
172
|
-
|
|
173
|
-
error_message = item.error
|
|
200
|
+
turn_result.stream_error = item
|
|
174
201
|
log_debug(
|
|
175
202
|
"[StreamError]",
|
|
176
203
|
item.error,
|
|
@@ -186,35 +213,33 @@ class TurnExecutor:
|
|
|
186
213
|
arguments="",
|
|
187
214
|
)
|
|
188
215
|
case model.ToolCallItem() as item:
|
|
189
|
-
|
|
216
|
+
turn_result.tool_calls.append(item)
|
|
190
217
|
case _:
|
|
191
|
-
|
|
218
|
+
continue
|
|
192
219
|
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
if
|
|
199
|
-
ctx.append_history(
|
|
200
|
-
if
|
|
201
|
-
ctx.append_history(
|
|
202
|
-
if turn_tool_calls:
|
|
203
|
-
ctx.append_history(turn_tool_calls)
|
|
204
|
-
self._has_tool_call = True
|
|
205
|
-
|
|
206
|
-
# Execute tools
|
|
207
|
-
if turn_tool_calls:
|
|
208
|
-
with tool_context(ctx.file_tracker, ctx.todo_context):
|
|
209
|
-
executor = ToolExecutor(
|
|
210
|
-
registry=ctx.tool_registry,
|
|
211
|
-
append_history=ctx.append_history,
|
|
212
|
-
)
|
|
213
|
-
self._tool_executor = executor
|
|
220
|
+
def _append_success_history(self, turn_result: TurnResult) -> None:
|
|
221
|
+
"""Persist successful turn artifacts to conversation history."""
|
|
222
|
+
ctx = self._context
|
|
223
|
+
if turn_result.reasoning_items:
|
|
224
|
+
ctx.append_history(turn_result.reasoning_items)
|
|
225
|
+
if turn_result.assistant_message:
|
|
226
|
+
ctx.append_history([turn_result.assistant_message])
|
|
227
|
+
if turn_result.tool_calls:
|
|
228
|
+
ctx.append_history(turn_result.tool_calls)
|
|
214
229
|
|
|
215
|
-
|
|
230
|
+
async def _run_tool_executor(self, tool_calls: list[model.ToolCallItem]) -> AsyncGenerator[events.Event, None]:
|
|
231
|
+
"""Run tools for the turn and translate executor events to UI events."""
|
|
232
|
+
|
|
233
|
+
ctx = self._context
|
|
234
|
+
with tool_context(ctx.file_tracker, ctx.todo_context):
|
|
235
|
+
executor = ToolExecutor(
|
|
236
|
+
registry=ctx.tool_registry,
|
|
237
|
+
append_history=ctx.append_history,
|
|
238
|
+
)
|
|
239
|
+
self._tool_executor = executor
|
|
240
|
+
try:
|
|
241
|
+
async for exec_event in executor.run_tools(tool_calls):
|
|
216
242
|
for ui_event in build_events_from_tool_executor_event(ctx.session_id, exec_event):
|
|
217
243
|
yield ui_event
|
|
244
|
+
finally:
|
|
218
245
|
self._tool_executor = None
|
|
219
|
-
|
|
220
|
-
yield events.TurnEndEvent(session_id=ctx.session_id)
|
klaude_code/llm/__init__.py
CHANGED
|
@@ -6,6 +6,7 @@ become available via the registry.
|
|
|
6
6
|
|
|
7
7
|
from .anthropic import AnthropicClient
|
|
8
8
|
from .client import LLMClientABC
|
|
9
|
+
from .codex import CodexClient
|
|
9
10
|
from .openai_compatible import OpenAICompatibleClient
|
|
10
11
|
from .openrouter import OpenRouterClient
|
|
11
12
|
from .registry import create_llm_client
|
|
@@ -17,5 +18,6 @@ __all__ = [
|
|
|
17
18
|
"OpenAICompatibleClient",
|
|
18
19
|
"OpenRouterClient",
|
|
19
20
|
"AnthropicClient",
|
|
21
|
+
"CodexClient",
|
|
20
22
|
"create_llm_client",
|
|
21
23
|
]
|
|
@@ -5,7 +5,7 @@ from typing import override
|
|
|
5
5
|
|
|
6
6
|
import anthropic
|
|
7
7
|
import httpx
|
|
8
|
-
from anthropic import
|
|
8
|
+
from anthropic import APIError
|
|
9
9
|
from anthropic.types.beta.beta_input_json_delta import BetaInputJSONDelta
|
|
10
10
|
from anthropic.types.beta.beta_raw_content_block_delta_event import BetaRawContentBlockDeltaEvent
|
|
11
11
|
from anthropic.types.beta.beta_raw_content_block_start_event import BetaRawContentBlockStartEvent
|
|
@@ -217,5 +217,5 @@ class AnthropicClient(LLMClientABC):
|
|
|
217
217
|
)
|
|
218
218
|
case _:
|
|
219
219
|
pass
|
|
220
|
-
except
|
|
220
|
+
except (APIError, httpx.HTTPError) as e:
|
|
221
221
|
yield model.StreamErrorItem(error=f"{e.__class__.__name__} {str(e)}")
|
|
@@ -73,7 +73,8 @@ def _user_group_to_message(group: UserGroup) -> BetaMessageParam:
|
|
|
73
73
|
return {"role": "user", "content": blocks}
|
|
74
74
|
|
|
75
75
|
|
|
76
|
-
def
|
|
76
|
+
def _tool_group_to_block(group: ToolGroup) -> dict[str, object]:
|
|
77
|
+
"""Convert a single ToolGroup to a tool_result block."""
|
|
77
78
|
tool_content: list[BetaTextBlockParam | BetaImageBlockParam] = []
|
|
78
79
|
merged_text = merge_reminder_text(
|
|
79
80
|
group.tool_result.output or "<system-reminder>Tool ran without output or errors</system-reminder>",
|
|
@@ -84,16 +85,19 @@ def _tool_group_to_message(group: ToolGroup) -> BetaMessageParam:
|
|
|
84
85
|
tool_content.append(_image_part_to_block(image))
|
|
85
86
|
for image in group.reminder_images:
|
|
86
87
|
tool_content.append(_image_part_to_block(image))
|
|
88
|
+
return {
|
|
89
|
+
"type": "tool_result",
|
|
90
|
+
"tool_use_id": group.tool_result.call_id,
|
|
91
|
+
"is_error": group.tool_result.status == "error",
|
|
92
|
+
"content": tool_content,
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _tool_groups_to_message(groups: list[ToolGroup]) -> BetaMessageParam:
|
|
97
|
+
"""Convert one or more ToolGroups to a single user message with multiple tool_result blocks."""
|
|
87
98
|
return {
|
|
88
99
|
"role": "user",
|
|
89
|
-
"content": [
|
|
90
|
-
{
|
|
91
|
-
"type": "tool_result",
|
|
92
|
-
"tool_use_id": group.tool_result.call_id,
|
|
93
|
-
"is_error": group.tool_result.status == "error",
|
|
94
|
-
"content": tool_content,
|
|
95
|
-
}
|
|
96
|
-
],
|
|
100
|
+
"content": [_tool_group_to_block(group) for group in groups],
|
|
97
101
|
}
|
|
98
102
|
|
|
99
103
|
|
|
@@ -165,15 +169,26 @@ def convert_history_to_input(
|
|
|
165
169
|
model_name: Model name. Used to verify that signatures are valid for the same model
|
|
166
170
|
"""
|
|
167
171
|
messages: list[BetaMessageParam] = []
|
|
172
|
+
pending_tool_groups: list[ToolGroup] = []
|
|
173
|
+
|
|
174
|
+
def flush_tool_groups() -> None:
|
|
175
|
+
nonlocal pending_tool_groups
|
|
176
|
+
if pending_tool_groups:
|
|
177
|
+
messages.append(_tool_groups_to_message(pending_tool_groups))
|
|
178
|
+
pending_tool_groups = []
|
|
179
|
+
|
|
168
180
|
for group in parse_message_groups(history):
|
|
169
181
|
match group:
|
|
170
182
|
case UserGroup():
|
|
183
|
+
flush_tool_groups()
|
|
171
184
|
messages.append(_user_group_to_message(group))
|
|
172
185
|
case ToolGroup():
|
|
173
|
-
|
|
186
|
+
pending_tool_groups.append(group)
|
|
174
187
|
case AssistantGroup():
|
|
188
|
+
flush_tool_groups()
|
|
175
189
|
messages.append(_assistant_group_to_message(group, model_name))
|
|
176
190
|
|
|
191
|
+
flush_tool_groups()
|
|
177
192
|
_add_cache_control(messages)
|
|
178
193
|
return messages
|
|
179
194
|
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
"""Codex LLM client using ChatGPT subscription via OAuth."""
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from collections.abc import AsyncGenerator
|
|
5
|
+
from typing import override
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
import openai
|
|
9
|
+
from openai import AsyncOpenAI
|
|
10
|
+
|
|
11
|
+
from klaude_code.auth.codex.exceptions import CodexNotLoggedInError
|
|
12
|
+
from klaude_code.auth.codex.oauth import CodexOAuth
|
|
13
|
+
from klaude_code.auth.codex.token_manager import CodexTokenManager
|
|
14
|
+
from klaude_code.llm.client import LLMClientABC, call_with_logged_payload
|
|
15
|
+
from klaude_code.llm.input_common import apply_config_defaults
|
|
16
|
+
from klaude_code.llm.registry import register
|
|
17
|
+
from klaude_code.llm.responses.client import parse_responses_stream
|
|
18
|
+
from klaude_code.llm.responses.input import convert_history_to_input, convert_tool_schema
|
|
19
|
+
from klaude_code.protocol import llm_param, model
|
|
20
|
+
|
|
21
|
+
# Codex API configuration
|
|
22
|
+
CODEX_BASE_URL = "https://chatgpt.com/backend-api/codex"
|
|
23
|
+
CODEX_HEADERS = {
|
|
24
|
+
"originator": "codex_cli_rs",
|
|
25
|
+
# Mocked Codex-style user agent string
|
|
26
|
+
"User-Agent": "codex_cli_rs/0.0.0-klaude",
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@register(llm_param.LLMClientProtocol.CODEX)
|
|
31
|
+
class CodexClient(LLMClientABC):
|
|
32
|
+
"""LLM client for Codex API using ChatGPT subscription."""
|
|
33
|
+
|
|
34
|
+
def __init__(self, config: llm_param.LLMConfigParameter):
|
|
35
|
+
super().__init__(config)
|
|
36
|
+
self._token_manager = CodexTokenManager()
|
|
37
|
+
self._oauth = CodexOAuth(self._token_manager)
|
|
38
|
+
|
|
39
|
+
if not self._token_manager.is_logged_in():
|
|
40
|
+
raise CodexNotLoggedInError("Codex authentication required. Run 'klaude login codex' first.")
|
|
41
|
+
|
|
42
|
+
self.client = self._create_client()
|
|
43
|
+
|
|
44
|
+
def _create_client(self) -> AsyncOpenAI:
|
|
45
|
+
"""Create OpenAI client with Codex configuration."""
|
|
46
|
+
state = self._token_manager.get_state()
|
|
47
|
+
if state is None:
|
|
48
|
+
raise CodexNotLoggedInError("Not logged in to Codex. Run 'klaude login codex' first.")
|
|
49
|
+
|
|
50
|
+
return AsyncOpenAI(
|
|
51
|
+
api_key=state.access_token,
|
|
52
|
+
base_url=CODEX_BASE_URL,
|
|
53
|
+
timeout=httpx.Timeout(300.0, connect=15.0, read=285.0),
|
|
54
|
+
default_headers={
|
|
55
|
+
**CODEX_HEADERS,
|
|
56
|
+
"chatgpt-account-id": state.account_id,
|
|
57
|
+
},
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
def _ensure_valid_token(self) -> None:
|
|
61
|
+
"""Ensure token is valid, refresh if needed."""
|
|
62
|
+
state = self._token_manager.get_state()
|
|
63
|
+
if state is None:
|
|
64
|
+
raise CodexNotLoggedInError("Not logged in to Codex. Run 'klaude login codex' first.")
|
|
65
|
+
|
|
66
|
+
if state.is_expired():
|
|
67
|
+
self._oauth.refresh()
|
|
68
|
+
# Recreate client with new token
|
|
69
|
+
self.client = self._create_client()
|
|
70
|
+
|
|
71
|
+
@classmethod
|
|
72
|
+
@override
|
|
73
|
+
def create(cls, config: llm_param.LLMConfigParameter) -> "LLMClientABC":
|
|
74
|
+
return cls(config)
|
|
75
|
+
|
|
76
|
+
@override
|
|
77
|
+
async def call(self, param: llm_param.LLMCallParameter) -> AsyncGenerator[model.ConversationItem, None]:
|
|
78
|
+
# Ensure token is valid before API call
|
|
79
|
+
self._ensure_valid_token()
|
|
80
|
+
|
|
81
|
+
param = apply_config_defaults(param, self.get_llm_config())
|
|
82
|
+
|
|
83
|
+
# Codex API requires store=False
|
|
84
|
+
param.store = False
|
|
85
|
+
|
|
86
|
+
request_start_time = time.time()
|
|
87
|
+
|
|
88
|
+
inputs = convert_history_to_input(param.input, param.model)
|
|
89
|
+
tools = convert_tool_schema(param.tools)
|
|
90
|
+
|
|
91
|
+
session_id = param.session_id or ""
|
|
92
|
+
# Must send conversation_id/session_id headers to improve ChatGPT backend prompt cache hit rate.
|
|
93
|
+
extra_headers: dict[str, str] = {}
|
|
94
|
+
if session_id:
|
|
95
|
+
extra_headers["conversation_id"] = session_id
|
|
96
|
+
extra_headers["session_id"] = session_id
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
stream = await call_with_logged_payload(
|
|
100
|
+
self.client.responses.create,
|
|
101
|
+
model=str(param.model),
|
|
102
|
+
tool_choice="auto",
|
|
103
|
+
parallel_tool_calls=True,
|
|
104
|
+
include=[
|
|
105
|
+
"reasoning.encrypted_content",
|
|
106
|
+
],
|
|
107
|
+
store=False, # Always False for Codex
|
|
108
|
+
stream=True,
|
|
109
|
+
input=inputs,
|
|
110
|
+
instructions=param.system,
|
|
111
|
+
tools=tools,
|
|
112
|
+
text={
|
|
113
|
+
"verbosity": param.verbosity,
|
|
114
|
+
},
|
|
115
|
+
prompt_cache_key=session_id,
|
|
116
|
+
reasoning={
|
|
117
|
+
"effort": param.thinking.reasoning_effort,
|
|
118
|
+
"summary": param.thinking.reasoning_summary,
|
|
119
|
+
}
|
|
120
|
+
if param.thinking and param.thinking.reasoning_effort
|
|
121
|
+
else None,
|
|
122
|
+
extra_headers=extra_headers,
|
|
123
|
+
)
|
|
124
|
+
except (openai.OpenAIError, httpx.HTTPError) as e:
|
|
125
|
+
yield model.StreamErrorItem(error=f"{e.__class__.__name__} {str(e)}")
|
|
126
|
+
return
|
|
127
|
+
|
|
128
|
+
async for item in parse_responses_stream(stream, param, self._config.cost, request_start_time):
|
|
129
|
+
yield item
|
|
@@ -4,7 +4,7 @@ from typing import Literal, override
|
|
|
4
4
|
|
|
5
5
|
import httpx
|
|
6
6
|
import openai
|
|
7
|
-
|
|
7
|
+
|
|
8
8
|
|
|
9
9
|
from klaude_code.llm.client import LLMClientABC, call_with_logged_payload
|
|
10
10
|
from klaude_code.llm.input_common import apply_config_defaults
|
|
@@ -193,7 +193,7 @@ class OpenAICompatibleClient(LLMClientABC):
|
|
|
193
193
|
name=tc.function.name,
|
|
194
194
|
)
|
|
195
195
|
accumulated_tool_calls.add(delta.tool_calls)
|
|
196
|
-
except (
|
|
196
|
+
except (openai.OpenAIError, httpx.HTTPError) as e:
|
|
197
197
|
yield model.StreamErrorItem(error=f"{e.__class__.__name__} {str(e)}")
|
|
198
198
|
|
|
199
199
|
# Finalize
|