klaude-code 1.2.11__py3-none-any.whl → 1.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/command/__init__.py +29 -26
- klaude_code/command/clear_cmd.py +0 -2
- klaude_code/command/diff_cmd.py +0 -2
- klaude_code/command/export_cmd.py +0 -2
- klaude_code/command/help_cmd.py +0 -2
- klaude_code/command/model_cmd.py +0 -2
- klaude_code/command/refresh_cmd.py +0 -2
- klaude_code/command/registry.py +4 -8
- klaude_code/command/release_notes_cmd.py +0 -2
- klaude_code/command/status_cmd.py +2 -4
- klaude_code/command/terminal_setup_cmd.py +0 -2
- klaude_code/command/thinking_cmd.py +227 -0
- klaude_code/config/select_model.py +5 -15
- klaude_code/const/__init__.py +1 -1
- klaude_code/core/agent.py +1 -8
- klaude_code/core/prompt.py +1 -1
- klaude_code/core/task.py +2 -3
- klaude_code/core/turn.py +0 -1
- klaude_code/llm/anthropic/client.py +56 -47
- klaude_code/llm/client.py +1 -19
- klaude_code/llm/codex/client.py +49 -30
- klaude_code/llm/openai_compatible/client.py +52 -34
- klaude_code/llm/openrouter/client.py +63 -41
- klaude_code/llm/responses/client.py +56 -39
- klaude_code/llm/usage.py +1 -49
- klaude_code/protocol/commands.py +1 -0
- klaude_code/protocol/llm_param.py +1 -9
- klaude_code/protocol/model.py +3 -5
- klaude_code/session/export.py +1 -8
- klaude_code/session/selector.py +12 -7
- klaude_code/ui/modes/repl/completers.py +3 -3
- klaude_code/ui/renderers/metadata.py +1 -12
- {klaude_code-1.2.11.dist-info → klaude_code-1.2.12.dist-info}/METADATA +1 -1
- {klaude_code-1.2.11.dist-info → klaude_code-1.2.12.dist-info}/RECORD +36 -35
- {klaude_code-1.2.11.dist-info → klaude_code-1.2.12.dist-info}/WHEEL +0 -0
- {klaude_code-1.2.11.dist-info → klaude_code-1.2.12.dist-info}/entry_points.txt +0 -0
|
@@ -6,20 +6,56 @@ import httpx
|
|
|
6
6
|
import openai
|
|
7
7
|
from openai import AsyncAzureOpenAI, AsyncOpenAI
|
|
8
8
|
from openai.types import responses
|
|
9
|
+
from openai.types.responses.response_create_params import ResponseCreateParamsStreaming
|
|
9
10
|
|
|
10
|
-
from klaude_code.llm.client import LLMClientABC
|
|
11
|
+
from klaude_code.llm.client import LLMClientABC
|
|
11
12
|
from klaude_code.llm.input_common import apply_config_defaults
|
|
12
13
|
from klaude_code.llm.registry import register
|
|
13
14
|
from klaude_code.llm.responses.input import convert_history_to_input, convert_tool_schema
|
|
14
|
-
from klaude_code.llm.usage import MetadataTracker
|
|
15
|
+
from klaude_code.llm.usage import MetadataTracker
|
|
15
16
|
from klaude_code.protocol import llm_param, model
|
|
16
17
|
from klaude_code.trace import DebugType, log_debug
|
|
17
18
|
|
|
19
|
+
|
|
18
20
|
if TYPE_CHECKING:
|
|
19
21
|
from openai import AsyncStream
|
|
20
22
|
from openai.types.responses import ResponseStreamEvent
|
|
21
23
|
|
|
22
24
|
|
|
25
|
+
def build_payload(param: llm_param.LLMCallParameter) -> ResponseCreateParamsStreaming:
|
|
26
|
+
"""Build OpenAI Responses API request parameters."""
|
|
27
|
+
inputs = convert_history_to_input(param.input, param.model)
|
|
28
|
+
tools = convert_tool_schema(param.tools)
|
|
29
|
+
|
|
30
|
+
payload: ResponseCreateParamsStreaming = {
|
|
31
|
+
"model": str(param.model),
|
|
32
|
+
"tool_choice": "auto",
|
|
33
|
+
"parallel_tool_calls": True,
|
|
34
|
+
"include": [
|
|
35
|
+
"reasoning.encrypted_content",
|
|
36
|
+
],
|
|
37
|
+
"store": False,
|
|
38
|
+
"stream": True,
|
|
39
|
+
"temperature": param.temperature,
|
|
40
|
+
"max_output_tokens": param.max_tokens,
|
|
41
|
+
"input": inputs,
|
|
42
|
+
"instructions": param.system,
|
|
43
|
+
"tools": tools,
|
|
44
|
+
"prompt_cache_key": param.session_id or "",
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
if param.thinking and param.thinking.reasoning_effort:
|
|
48
|
+
payload["reasoning"] = {
|
|
49
|
+
"effort": param.thinking.reasoning_effort,
|
|
50
|
+
"summary": param.thinking.reasoning_summary,
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
if param.verbosity:
|
|
54
|
+
payload["text"] = {"verbosity": param.verbosity}
|
|
55
|
+
|
|
56
|
+
return payload
|
|
57
|
+
|
|
58
|
+
|
|
23
59
|
async def parse_responses_stream(
|
|
24
60
|
stream: "AsyncStream[ResponseStreamEvent]",
|
|
25
61
|
param: llm_param.LLMCallParameter,
|
|
@@ -95,16 +131,17 @@ async def parse_responses_stream(
|
|
|
95
131
|
if event.response.incomplete_details is not None:
|
|
96
132
|
error_reason = event.response.incomplete_details.reason
|
|
97
133
|
if event.response.usage is not None:
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
134
|
+
metadata_tracker.set_usage(
|
|
135
|
+
model.Usage(
|
|
136
|
+
input_tokens=event.response.usage.input_tokens,
|
|
137
|
+
output_tokens=event.response.usage.output_tokens,
|
|
138
|
+
cached_tokens=event.response.usage.input_tokens_details.cached_tokens,
|
|
139
|
+
reasoning_tokens=event.response.usage.output_tokens_details.reasoning_tokens,
|
|
140
|
+
context_size=event.response.usage.total_tokens,
|
|
141
|
+
context_limit=param.context_limit,
|
|
142
|
+
max_tokens=param.max_tokens,
|
|
143
|
+
)
|
|
106
144
|
)
|
|
107
|
-
metadata_tracker.set_usage(usage)
|
|
108
145
|
metadata_tracker.set_model_name(str(param.model))
|
|
109
146
|
metadata_tracker.set_response_id(response_id)
|
|
110
147
|
yield metadata_tracker.finalize()
|
|
@@ -162,36 +199,16 @@ class ResponsesClient(LLMClientABC):
|
|
|
162
199
|
|
|
163
200
|
metadata_tracker = MetadataTracker(cost_config=self.get_llm_config().cost)
|
|
164
201
|
|
|
165
|
-
|
|
166
|
-
tools = convert_tool_schema(param.tools)
|
|
202
|
+
payload = build_payload(param)
|
|
167
203
|
|
|
204
|
+
log_debug(
|
|
205
|
+
json.dumps(payload, ensure_ascii=False, default=str),
|
|
206
|
+
style="yellow",
|
|
207
|
+
debug_type=DebugType.LLM_PAYLOAD,
|
|
208
|
+
)
|
|
168
209
|
try:
|
|
169
|
-
stream = await
|
|
170
|
-
|
|
171
|
-
model=str(param.model),
|
|
172
|
-
tool_choice="auto",
|
|
173
|
-
parallel_tool_calls=True,
|
|
174
|
-
include=[
|
|
175
|
-
"reasoning.encrypted_content",
|
|
176
|
-
],
|
|
177
|
-
store=param.store,
|
|
178
|
-
previous_response_id=param.previous_response_id,
|
|
179
|
-
stream=True,
|
|
180
|
-
temperature=param.temperature,
|
|
181
|
-
max_output_tokens=param.max_tokens,
|
|
182
|
-
input=inputs,
|
|
183
|
-
instructions=param.system,
|
|
184
|
-
tools=tools,
|
|
185
|
-
text={
|
|
186
|
-
"verbosity": param.verbosity,
|
|
187
|
-
},
|
|
188
|
-
prompt_cache_key=param.session_id or "",
|
|
189
|
-
reasoning={
|
|
190
|
-
"effort": param.thinking.reasoning_effort,
|
|
191
|
-
"summary": param.thinking.reasoning_summary,
|
|
192
|
-
}
|
|
193
|
-
if param.thinking and param.thinking.reasoning_effort
|
|
194
|
-
else None,
|
|
210
|
+
stream = await self.client.responses.create(
|
|
211
|
+
**payload,
|
|
195
212
|
extra_headers={"extra": json.dumps({"session_id": param.session_id}, sort_keys=True)},
|
|
196
213
|
)
|
|
197
214
|
except (openai.OpenAIError, httpx.HTTPError) as e:
|
klaude_code/llm/usage.py
CHANGED
|
@@ -108,55 +108,7 @@ def convert_usage(
|
|
|
108
108
|
reasoning_tokens=(usage.completion_tokens_details.reasoning_tokens if usage.completion_tokens_details else 0)
|
|
109
109
|
or 0,
|
|
110
110
|
output_tokens=usage.completion_tokens,
|
|
111
|
-
|
|
112
|
-
context_limit=context_limit,
|
|
113
|
-
max_tokens=max_tokens,
|
|
114
|
-
)
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
def convert_anthropic_usage(
|
|
118
|
-
input_tokens: int,
|
|
119
|
-
output_tokens: int,
|
|
120
|
-
cached_tokens: int,
|
|
121
|
-
context_limit: int | None = None,
|
|
122
|
-
max_tokens: int | None = None,
|
|
123
|
-
) -> model.Usage:
|
|
124
|
-
"""Convert Anthropic usage data to internal Usage model.
|
|
125
|
-
|
|
126
|
-
context_token is computed from input + cached + output tokens,
|
|
127
|
-
representing the actual context window usage for this turn.
|
|
128
|
-
"""
|
|
129
|
-
context_token = input_tokens + cached_tokens + output_tokens
|
|
130
|
-
return model.Usage(
|
|
131
|
-
input_tokens=input_tokens,
|
|
132
|
-
output_tokens=output_tokens,
|
|
133
|
-
cached_tokens=cached_tokens,
|
|
134
|
-
context_token=context_token,
|
|
135
|
-
context_limit=context_limit,
|
|
136
|
-
max_tokens=max_tokens,
|
|
137
|
-
)
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
def convert_responses_usage(
|
|
141
|
-
input_tokens: int,
|
|
142
|
-
output_tokens: int,
|
|
143
|
-
cached_tokens: int,
|
|
144
|
-
reasoning_tokens: int,
|
|
145
|
-
total_tokens: int,
|
|
146
|
-
context_limit: int | None = None,
|
|
147
|
-
max_tokens: int | None = None,
|
|
148
|
-
) -> model.Usage:
|
|
149
|
-
"""Convert OpenAI Responses API usage data to internal Usage model.
|
|
150
|
-
|
|
151
|
-
context_token is set to total_tokens from the API response,
|
|
152
|
-
representing the actual context window usage for this turn.
|
|
153
|
-
"""
|
|
154
|
-
return model.Usage(
|
|
155
|
-
input_tokens=input_tokens,
|
|
156
|
-
output_tokens=output_tokens,
|
|
157
|
-
cached_tokens=cached_tokens,
|
|
158
|
-
reasoning_tokens=reasoning_tokens,
|
|
159
|
-
context_token=total_tokens,
|
|
111
|
+
context_size=usage.total_tokens,
|
|
160
112
|
context_limit=context_limit,
|
|
161
113
|
max_tokens=max_tokens,
|
|
162
114
|
)
|
klaude_code/protocol/commands.py
CHANGED
|
@@ -13,6 +13,7 @@ class CommandName(str, Enum):
|
|
|
13
13
|
EXPORT = "export"
|
|
14
14
|
STATUS = "status"
|
|
15
15
|
RELEASE_NOTES = "release-notes"
|
|
16
|
+
THINKING = "thinking"
|
|
16
17
|
# PLAN and DOC are dynamically registered now, but kept here if needed for reference
|
|
17
18
|
# or we can remove them if no code explicitly imports them.
|
|
18
19
|
# PLAN = "plan"
|
|
@@ -28,7 +28,7 @@ class Thinking(BaseModel):
|
|
|
28
28
|
"""
|
|
29
29
|
|
|
30
30
|
# OpenAI Reasoning Style
|
|
31
|
-
reasoning_effort: Literal["high", "medium", "low", "minimal", "none"] | None = None
|
|
31
|
+
reasoning_effort: Literal["high", "medium", "low", "minimal", "none", "xhigh"] | None = None
|
|
32
32
|
reasoning_summary: Literal["auto", "concise", "detailed"] | None = None
|
|
33
33
|
|
|
34
34
|
# Claude/Gemini Thinking Style
|
|
@@ -138,12 +138,4 @@ class LLMCallParameter(LLMConfigModelParameter):
|
|
|
138
138
|
input: list[ConversationItem]
|
|
139
139
|
system: str | None = None
|
|
140
140
|
tools: list[ToolSchema] | None = None
|
|
141
|
-
|
|
142
|
-
stream: Literal[True] = True # Always True
|
|
143
|
-
|
|
144
|
-
# OpenAI Responses
|
|
145
|
-
include: list[str] | None = None
|
|
146
|
-
store: bool = True
|
|
147
|
-
previous_response_id: str | None = None
|
|
148
|
-
|
|
149
141
|
session_id: str | None = None
|
klaude_code/protocol/model.py
CHANGED
|
@@ -20,9 +20,7 @@ class Usage(BaseModel):
|
|
|
20
20
|
output_tokens: int = 0
|
|
21
21
|
|
|
22
22
|
# Context window tracking
|
|
23
|
-
|
|
24
|
-
context_delta: int | None = None # Context growth since last task (for cache ratio calculation)
|
|
25
|
-
last_turn_output_token: int | None = None # Context growth since last task (for cache ratio calculation)
|
|
23
|
+
context_size: int | None = None # Peak total_tokens seen (for context usage display)
|
|
26
24
|
context_limit: int | None = None # Model's context limit
|
|
27
25
|
max_tokens: int | None = None # Max output tokens for this request
|
|
28
26
|
|
|
@@ -55,12 +53,12 @@ class Usage(BaseModel):
|
|
|
55
53
|
"""Context usage percentage computed from context_token / (context_limit - max_tokens)."""
|
|
56
54
|
if self.context_limit is None or self.context_limit <= 0:
|
|
57
55
|
return None
|
|
58
|
-
if self.
|
|
56
|
+
if self.context_size is None:
|
|
59
57
|
return None
|
|
60
58
|
effective_limit = self.context_limit - (self.max_tokens or const.DEFAULT_MAX_TOKENS)
|
|
61
59
|
if effective_limit <= 0:
|
|
62
60
|
return None
|
|
63
|
-
return (self.
|
|
61
|
+
return (self.context_size / effective_limit) * 100
|
|
64
62
|
|
|
65
63
|
|
|
66
64
|
class TodoItem(BaseModel):
|
klaude_code/session/export.py
CHANGED
|
@@ -194,18 +194,11 @@ def _render_single_metadata(
|
|
|
194
194
|
input_stat += f"({_format_cost(u.input_cost, u.currency)})"
|
|
195
195
|
parts.append(f'<span class="metadata-stat">{input_stat}</span>')
|
|
196
196
|
|
|
197
|
-
# Cached with cost
|
|
197
|
+
# Cached with cost
|
|
198
198
|
if u.cached_tokens > 0:
|
|
199
199
|
cached_stat = f"cached: {_format_token_count(u.cached_tokens)}"
|
|
200
200
|
if u.cache_read_cost is not None:
|
|
201
201
|
cached_stat += f"({_format_cost(u.cache_read_cost, u.currency)})"
|
|
202
|
-
# Cache ratio: (cached + context_delta - last_turn_output) / input tokens
|
|
203
|
-
# Shows how much of the input was cached (not new context growth)
|
|
204
|
-
if u.input_tokens > 0:
|
|
205
|
-
context_delta = u.context_delta or 0
|
|
206
|
-
last_turn_output_token = u.last_turn_output_token or 0
|
|
207
|
-
cache_ratio = (u.cached_tokens + context_delta - last_turn_output_token) / u.input_tokens * 100
|
|
208
|
-
cached_stat += f"[{cache_ratio:.0f}%]"
|
|
209
202
|
parts.append(f'<span class="metadata-stat">{cached_stat}</span>')
|
|
210
203
|
|
|
211
204
|
# Output with cost
|
klaude_code/session/selector.py
CHANGED
|
@@ -10,6 +10,11 @@ from .session import Session
|
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
def resume_select_session() -> str | None:
|
|
13
|
+
# Column widths
|
|
14
|
+
UPDATED_AT_WIDTH = 16
|
|
15
|
+
MSG_COUNT_WIDTH = 3
|
|
16
|
+
MODEL_WIDTH = 25
|
|
17
|
+
FIRST_MESSAGE_WIDTH = 50
|
|
13
18
|
sessions = Session.list_sessions()
|
|
14
19
|
if not sessions:
|
|
15
20
|
log("No sessions found for this project.")
|
|
@@ -31,20 +36,20 @@ def resume_select_session() -> str | None:
|
|
|
31
36
|
model_display = s.model_name or "N/A"
|
|
32
37
|
|
|
33
38
|
title = [
|
|
34
|
-
("class:d", f"{_fmt(s.updated_at):<
|
|
35
|
-
("class:b", f"{msg_count_display:>
|
|
39
|
+
("class:d", f"{_fmt(s.updated_at):<{UPDATED_AT_WIDTH}} "),
|
|
40
|
+
("class:b", f"{msg_count_display:>{MSG_COUNT_WIDTH}} "),
|
|
36
41
|
(
|
|
37
42
|
"class:t",
|
|
38
|
-
f"{model_display[:
|
|
43
|
+
f"{model_display[:MODEL_WIDTH - 1] + '…' if len(model_display) > MODEL_WIDTH else model_display:<{MODEL_WIDTH}} ",
|
|
39
44
|
),
|
|
40
45
|
(
|
|
41
46
|
"class:t",
|
|
42
|
-
f"{first_user_message.strip().replace('\n', ' ↩ '):<
|
|
47
|
+
f"{first_user_message.strip().replace('\n', ' ↩ '):<{FIRST_MESSAGE_WIDTH}}",
|
|
43
48
|
),
|
|
44
49
|
]
|
|
45
50
|
choices.append(questionary.Choice(title=title, value=s.id))
|
|
46
51
|
return questionary.select(
|
|
47
|
-
message=f"{' Updated at':<
|
|
52
|
+
message=f"{' Updated at':<{UPDATED_AT_WIDTH + 1}} {'Msg':>{MSG_COUNT_WIDTH}} {'Model':<{MODEL_WIDTH}} {'First message':<{FIRST_MESSAGE_WIDTH}}",
|
|
48
53
|
choices=choices,
|
|
49
54
|
pointer="→",
|
|
50
55
|
instruction="↑↓ to move",
|
|
@@ -63,8 +68,8 @@ def resume_select_session() -> str | None:
|
|
|
63
68
|
msg_count_display = "N/A" if s.messages_count == -1 else str(s.messages_count)
|
|
64
69
|
model_display = s.model_name or "N/A"
|
|
65
70
|
print(
|
|
66
|
-
f"{i}. {_fmt(s.updated_at)} {msg_count_display:>
|
|
67
|
-
f"{model_display[:
|
|
71
|
+
f"{i}. {_fmt(s.updated_at)} {msg_count_display:>{MSG_COUNT_WIDTH}} "
|
|
72
|
+
f"{model_display[:MODEL_WIDTH - 1] + '…' if len(model_display) > MODEL_WIDTH else model_display:<{MODEL_WIDTH}} {s.id} {s.work_dir}"
|
|
68
73
|
)
|
|
69
74
|
try:
|
|
70
75
|
raw = input("Select a session number: ").strip()
|
|
@@ -81,9 +81,9 @@ class _SlashCommandCompleter(Completer):
|
|
|
81
81
|
# Get available commands
|
|
82
82
|
commands = get_commands()
|
|
83
83
|
|
|
84
|
-
# Filter commands that match the fragment
|
|
84
|
+
# Filter commands that match the fragment (preserve registration order)
|
|
85
85
|
matched: list[tuple[str, object, str]] = []
|
|
86
|
-
for cmd_name, cmd_obj in
|
|
86
|
+
for cmd_name, cmd_obj in commands.items():
|
|
87
87
|
if cmd_name.startswith(frag):
|
|
88
88
|
hint = " [args]" if cmd_obj.support_addition_params else ""
|
|
89
89
|
matched.append((cmd_name, cmd_obj, hint))
|
|
@@ -103,7 +103,7 @@ class _SlashCommandCompleter(Completer):
|
|
|
103
103
|
|
|
104
104
|
# Using HTML for formatting: bold command name, normal hint, gray summary
|
|
105
105
|
display_text = HTML(
|
|
106
|
-
f"<b>{cmd_name}</b>{hint}{padding}<style color='ansibrightblack'
|
|
106
|
+
f"<b>{cmd_name}</b>{hint}{padding}<style color='ansibrightblack'>{cmd_obj.summary}</style>" # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
|
|
107
107
|
)
|
|
108
108
|
completion_text = f"/{cmd_name} "
|
|
109
109
|
yield Completion(
|
|
@@ -77,17 +77,6 @@ def _render_task_metadata_block(
|
|
|
77
77
|
]
|
|
78
78
|
if metadata.usage.cache_read_cost is not None:
|
|
79
79
|
cached_parts.append((f"({currency_symbol}{metadata.usage.cache_read_cost:.4f})", ThemeKey.METADATA_DIM))
|
|
80
|
-
# Cache ratio: (content + cached - last turn output) / input tokens, this might caclulate over 100% if system prompt is cached in first turn
|
|
81
|
-
# Shows how much of the input was cached (not new context growth)
|
|
82
|
-
if show_context_and_time and metadata.usage.input_tokens > 0:
|
|
83
|
-
context_delta = metadata.usage.context_delta or 0
|
|
84
|
-
last_turn_output_token = metadata.usage.last_turn_output_token or 0
|
|
85
|
-
cache_ratio = (
|
|
86
|
-
(metadata.usage.cached_tokens + context_delta - last_turn_output_token)
|
|
87
|
-
/ metadata.usage.input_tokens
|
|
88
|
-
* 100
|
|
89
|
-
)
|
|
90
|
-
cached_parts.append((f"[{cache_ratio:.0f}%]", ThemeKey.METADATA_DIM))
|
|
91
80
|
parts2.append(Text.assemble(*cached_parts))
|
|
92
81
|
|
|
93
82
|
# Output
|
|
@@ -129,7 +118,7 @@ def _render_task_metadata_block(
|
|
|
129
118
|
if metadata.usage is not None:
|
|
130
119
|
# Context (only for main agent)
|
|
131
120
|
if show_context_and_time and metadata.usage.context_usage_percent is not None:
|
|
132
|
-
context_size = format_number(metadata.usage.
|
|
121
|
+
context_size = format_number(metadata.usage.context_size or 0)
|
|
133
122
|
parts3.append(
|
|
134
123
|
Text.assemble(
|
|
135
124
|
("context", ThemeKey.METADATA_DIM),
|
|
@@ -9,37 +9,38 @@ klaude_code/cli/__init__.py,sha256=YzlAoWAr5rx5oe6B_4zPxRFS4QaZauuy1AFwampP5fg,4
|
|
|
9
9
|
klaude_code/cli/main.py,sha256=_Srm_OiwJKcM9SUXPGI3jhr6XNs7bzpCpPQvBmn3QLc,12082
|
|
10
10
|
klaude_code/cli/runtime.py,sha256=qrw3SLaiI65v_rD78SGcJ1UXoZxBIK2mitGhDobvM_M,12582
|
|
11
11
|
klaude_code/cli/session_cmd.py,sha256=cIBm3uUurke-TfBvQHz9mGW29LOAh22FIpXVyypnwDo,2549
|
|
12
|
-
klaude_code/command/__init__.py,sha256=
|
|
13
|
-
klaude_code/command/clear_cmd.py,sha256=
|
|
12
|
+
klaude_code/command/__init__.py,sha256=VSbJaTufstDILBqTPHPKLWFt2g5C7fjxb37sonreC4A,3041
|
|
13
|
+
klaude_code/command/clear_cmd.py,sha256=diIe1pscX1ko7bRN4FGylsTvDSAF6HHPwnsbXqTtHP0,657
|
|
14
14
|
klaude_code/command/command_abc.py,sha256=1Wwp94Q3W08GNCraYYEGcjjNC7JLIei6E953zSZ2lZ4,2605
|
|
15
|
-
klaude_code/command/diff_cmd.py,sha256=
|
|
16
|
-
klaude_code/command/export_cmd.py,sha256=
|
|
17
|
-
klaude_code/command/help_cmd.py,sha256=
|
|
18
|
-
klaude_code/command/model_cmd.py,sha256=
|
|
15
|
+
klaude_code/command/diff_cmd.py,sha256=mQu-FedUsZabE3-KwZV2JmOfm67-A41C2gz7rr6N9W8,5251
|
|
16
|
+
klaude_code/command/export_cmd.py,sha256=MhcyWcT7NqsqJEHZogiXjxQPKXqNbNdJQYRQn_4O5tQ,3484
|
|
17
|
+
klaude_code/command/help_cmd.py,sha256=N9X9q2hw7AXrmvBszmzL6tYz3GNZR768wMQqmC0Vp1Q,1692
|
|
18
|
+
klaude_code/command/model_cmd.py,sha256=Zy5oQV1bgnd7l1cn7quCZx4qZS2gFcWnYKZWM08QFKE,1492
|
|
19
19
|
klaude_code/command/prompt-deslop.md,sha256=YGaAXqem39zd0UWCFjWUj83Cf7cvUJq1768aJExFqeg,1346
|
|
20
20
|
klaude_code/command/prompt-dev-docs-update.md,sha256=g1IWIWIa-3qlNOw5mBA4N9H1_nvYcw8AKo7XoQw_AZQ,1855
|
|
21
21
|
klaude_code/command/prompt-dev-docs.md,sha256=PU9iT6XdUEH6grfSjHVma7xKOQcA__ZTKlEDkbbO0hA,1783
|
|
22
22
|
klaude_code/command/prompt-init.md,sha256=a4_FQ3gKizqs2vl9oEY5jtG6HNhv3f-1b5RSCFq0A18,1873
|
|
23
23
|
klaude_code/command/prompt_command.py,sha256=8jBUcfSmC9tXAYkLAB-u81KFqSKtCAHfHMnTQDzpgcg,2607
|
|
24
|
-
klaude_code/command/refresh_cmd.py,sha256=
|
|
25
|
-
klaude_code/command/registry.py,sha256=
|
|
26
|
-
klaude_code/command/release_notes_cmd.py,sha256=
|
|
27
|
-
klaude_code/command/status_cmd.py,sha256=
|
|
28
|
-
klaude_code/command/terminal_setup_cmd.py,sha256=
|
|
24
|
+
klaude_code/command/refresh_cmd.py,sha256=8TB1ibGn7w0xFemYTzIuoB0VXWU9Klem3wu-HfFfGlk,1271
|
|
25
|
+
klaude_code/command/registry.py,sha256=KzuAKFLm37bVrN1Got8QUopd_Fz0vqIEoUrpoV21C0Y,3830
|
|
26
|
+
klaude_code/command/release_notes_cmd.py,sha256=lDeAjuMDOSUISM0yYKZKbkjrYvFmvA5_fylkalTPaBU,2707
|
|
27
|
+
klaude_code/command/status_cmd.py,sha256=F7XgfivBm80kJEsCgRHGXWOALAT_Y2QyLQ38ooc_ZSE,5393
|
|
28
|
+
klaude_code/command/terminal_setup_cmd.py,sha256=2B12yUEUx0I04bJHNUAqfGrwD9kjJ8Iog5JFsb-E1dg,10943
|
|
29
|
+
klaude_code/command/thinking_cmd.py,sha256=hb7N9prhojd9mZC6IdZQlU4_R3K1df0nTLS9YUb0qBY,7975
|
|
29
30
|
klaude_code/config/__init__.py,sha256=9XVCYYqzJtCi46I94hbUmJ2yTFuZ-UlH-QTx7OpLAkQ,292
|
|
30
31
|
klaude_code/config/config.py,sha256=Vc9u7-40T81Rbx1OdMqSWZLh3vf9aj4wmBUnIOH7jAw,6526
|
|
31
32
|
klaude_code/config/list_model.py,sha256=08vLxar7YAcUNzGTN6bUbPtAoXXyfO5y6LjaaXMbsyQ,8019
|
|
32
|
-
klaude_code/config/select_model.py,sha256=
|
|
33
|
-
klaude_code/const/__init__.py,sha256=
|
|
33
|
+
klaude_code/config/select_model.py,sha256=aOizajRXcc_IOy0bSzK_KOZhbMQSx4g6IeNkgLsyV1c,2168
|
|
34
|
+
klaude_code/const/__init__.py,sha256=msApH-AQh_ttCgtYVTTW-e4AhCf6nurOcpTEkR0r1M4,3980
|
|
34
35
|
klaude_code/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
35
|
-
klaude_code/core/agent.py,sha256=
|
|
36
|
+
klaude_code/core/agent.py,sha256=4nGp60QGwYv3l-PP5Mzd4CnJ9TqUbftSAlI3O1j3fUo,7621
|
|
36
37
|
klaude_code/core/executor.py,sha256=OkebxW6_64hecQBjHPgDQlw8JgOa_k5meBpNsMBB4qg,18512
|
|
37
38
|
klaude_code/core/manager/__init__.py,sha256=6CswltCHXBUcezlW7xui2S1swDp8JTkS1YiEHmq4-no,658
|
|
38
39
|
klaude_code/core/manager/agent_manager.py,sha256=IG07QD3wJWroJrTLdTZKwiCJrQjQikz8MoTyI7KCK0A,5267
|
|
39
40
|
klaude_code/core/manager/llm_clients.py,sha256=sIFCweup7SJL5o9LbPgRLYE3fDuhYA4ZdhtSF2M0FAQ,2225
|
|
40
41
|
klaude_code/core/manager/llm_clients_builder.py,sha256=SjSEBJWS77FY5xctu3uxT0JaiQKGtRJnjTZYandJwU0,1772
|
|
41
42
|
klaude_code/core/manager/sub_agent_manager.py,sha256=SnDFu8ovcQjBkQSJrSD8e0Yf63NhIjYAt8WyQhBcpy0,3760
|
|
42
|
-
klaude_code/core/prompt.py,sha256=
|
|
43
|
+
klaude_code/core/prompt.py,sha256=m6xqUywaSY8My81UFseiGxJq7OA9P0KyWr6OxZU6LkM,3304
|
|
43
44
|
klaude_code/core/prompts/prompt-claude-code.md,sha256=c7kNgwjJqnbwQuKWGJoMx-AMbf1gxAFC3ZFDhngBe74,8293
|
|
44
45
|
klaude_code/core/prompts/prompt-codex-gpt-5-1-codex-max.md,sha256=SW-y8AmR99JL_9j26k9YVAOQuZ18vR12aT5CWHkZDc4,11741
|
|
45
46
|
klaude_code/core/prompts/prompt-codex-gpt-5-1.md,sha256=jNi593_4L3EoMvjS0TwltF2b684gtDBsYHa9npxO34A,24239
|
|
@@ -50,7 +51,7 @@ klaude_code/core/prompts/prompt-subagent-oracle.md,sha256=hGtyDm_6UhJZUJwfXt5A-1
|
|
|
50
51
|
klaude_code/core/prompts/prompt-subagent-webfetch.md,sha256=kHtJINbCRiRDrip_q6idHHU3CwbDfrVlpgtSZvugOWI,2304
|
|
51
52
|
klaude_code/core/prompts/prompt-subagent.md,sha256=dmmdsOenbAOfqG6FmdR88spOLZkXmntDBs-cmZ9DN_g,897
|
|
52
53
|
klaude_code/core/reminders.py,sha256=S5ZbYYrIoQKPGtLqtqg8yPCwZRD5Vdlkzf1wu86bw8g,18123
|
|
53
|
-
klaude_code/core/task.py,sha256=
|
|
54
|
+
klaude_code/core/task.py,sha256=2j7FA_j6oE75oxQCUQ9nON-Ch2d6cOwBasaLBdzensE,10561
|
|
54
55
|
klaude_code/core/tool/__init__.py,sha256=-pxK4iCkvcdLpav74foMNdeIjsC6PMkVaw_q5kajivg,2170
|
|
55
56
|
klaude_code/core/tool/file/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
56
57
|
klaude_code/core/tool/file/_utils.py,sha256=UjXO9Bwyr7YtDeSALiA8cp4KQlQskibZlbk6A33F0q4,779
|
|
@@ -92,41 +93,41 @@ klaude_code/core/tool/web/mermaid_tool.md,sha256=Ketpxpr7lz8238p5Q7ZzcyWchWd4dU6
|
|
|
92
93
|
klaude_code/core/tool/web/mermaid_tool.py,sha256=Ok0A27oHLnV1c__74bheUuy3wpqDJ1zaXUSxuuqsNPI,2630
|
|
93
94
|
klaude_code/core/tool/web/web_fetch_tool.md,sha256=_5U-LSoI86rD26nPb0D5BQCr6hj8eyF0UELSiyLznCA,347
|
|
94
95
|
klaude_code/core/tool/web/web_fetch_tool.py,sha256=iu6kM_-90K8mqHbK9Loui96vICV7d8rmtss68rcFqw0,4958
|
|
95
|
-
klaude_code/core/turn.py,sha256=
|
|
96
|
+
klaude_code/core/turn.py,sha256=26Gz-cJ1bNdu3QSr6YHcz4mZovdfuzV9QXsqcLWpvrw,9488
|
|
96
97
|
klaude_code/llm/__init__.py,sha256=b4AsqnrMIs0a5qR_ti6rZcHwFzAReTwOW96EqozEoSo,287
|
|
97
98
|
klaude_code/llm/anthropic/__init__.py,sha256=PWETvaeNAAX3ue0ww1uRUIxTJG0RpWiutkn7MlwKxBs,67
|
|
98
|
-
klaude_code/llm/anthropic/client.py,sha256=
|
|
99
|
+
klaude_code/llm/anthropic/client.py,sha256=g5mEPmVL0ckTYYxW1_aE-jWfqTgeMdZC4ivIjo-GAIE,9532
|
|
99
100
|
klaude_code/llm/anthropic/input.py,sha256=qPo4nmhnhSfLqef4UUVoIz8EjoXTxvlsrfsc_6qqM_s,8039
|
|
100
|
-
klaude_code/llm/client.py,sha256=
|
|
101
|
+
klaude_code/llm/client.py,sha256=1yMT5GE_Cx5SZTxmqBwc_FSF5WCPFSKqDThW_upPn68,859
|
|
101
102
|
klaude_code/llm/codex/__init__.py,sha256=8vN2j2ezWB_UVpfqQ8ooStsBeLL5SY4SUMXOXdWiMaI,132
|
|
102
|
-
klaude_code/llm/codex/client.py,sha256=
|
|
103
|
+
klaude_code/llm/codex/client.py,sha256=fdNSFa2za2EDQR9sCqmooMtBHAg8QfYIW3uwaHgbmjA,5338
|
|
103
104
|
klaude_code/llm/input_common.py,sha256=purxHHMo_yahNvv0Y1pH7WmfTfZgZqUqyo2JvdFiVO0,8526
|
|
104
105
|
klaude_code/llm/openai_compatible/__init__.py,sha256=ACGpnki7k53mMcCl591aw99pm9jZOZk0ghr7atOfNps,81
|
|
105
|
-
klaude_code/llm/openai_compatible/client.py,sha256=
|
|
106
|
+
klaude_code/llm/openai_compatible/client.py,sha256=TOC98acMdx6t7J7ME4UKM4Ux5oWHdbgJmcrLlOLtFVk,7761
|
|
106
107
|
klaude_code/llm/openai_compatible/input.py,sha256=rtWVjpwb9tLrinucezmncQXet8MerUxE5Gxc32sfDr4,3750
|
|
107
108
|
klaude_code/llm/openai_compatible/stream_processor.py,sha256=ckOBWZ_iUqgcESD5pnvjqJxvPI7YA4k9DYZkZ37KbmE,3388
|
|
108
109
|
klaude_code/llm/openai_compatible/tool_call_accumulator.py,sha256=kuw3ceDgenQz2Ccc9KYqBkDo6F1sDb5Aga6m41AIECA,4071
|
|
109
110
|
klaude_code/llm/openrouter/__init__.py,sha256=_As8lHjwj6vapQhLorZttTpukk5ZiCdhFdGT38_ASPo,69
|
|
110
|
-
klaude_code/llm/openrouter/client.py,sha256=
|
|
111
|
+
klaude_code/llm/openrouter/client.py,sha256=782c7LEpP03zLv6MDXZc5jnNR0JZ3CohcPgdeflfJtM,8141
|
|
111
112
|
klaude_code/llm/openrouter/input.py,sha256=2GuDVHd_74ZtHQyFyTGhZqtWjM7m5GYqFtKf--AvmlI,5059
|
|
112
113
|
klaude_code/llm/openrouter/reasoning_handler.py,sha256=TYIHdwMopi8DVqOpeN3vpyp-GcWOZgTeRnT5QvlK70U,8100
|
|
113
114
|
klaude_code/llm/registry.py,sha256=bbxZ_Mb7C2xCk_OVUQoZnsyPgmO3tbfJ292qaCtr1Ts,1763
|
|
114
115
|
klaude_code/llm/responses/__init__.py,sha256=WsiyvnNiIytaYcaAqNiB8GI-5zcpjjeODPbMlteeFjA,67
|
|
115
|
-
klaude_code/llm/responses/client.py,sha256=
|
|
116
|
+
klaude_code/llm/responses/client.py,sha256=I0gScfKSFeqynyqKrcqa1NVynvywEhtgeW6U8AKMEHE,9625
|
|
116
117
|
klaude_code/llm/responses/input.py,sha256=noNmalXvxw6UXo6ngkhFBroECxK6igmgEQ49YVhY0xg,6049
|
|
117
|
-
klaude_code/llm/usage.py,sha256=
|
|
118
|
+
klaude_code/llm/usage.py,sha256=cq6yZNSKBhRVVjFqBYJQrK3mw9ZSLXaTpbDeal-BjBQ,4205
|
|
118
119
|
klaude_code/protocol/__init__.py,sha256=aGUgzhYqvhuT3Mk2vj7lrHGriH4h9TSbqV1RsRFAZjQ,194
|
|
119
|
-
klaude_code/protocol/commands.py,sha256=
|
|
120
|
+
klaude_code/protocol/commands.py,sha256=WX7EW3DbZs7oV7zhnKXHQhDZdIZTN35MTBJ4hWMAHjM,606
|
|
120
121
|
klaude_code/protocol/events.py,sha256=exOriAIxdIzS7WDKCw3am-uw1egx_8tVbvcgO6s3nMI,3562
|
|
121
|
-
klaude_code/protocol/llm_param.py,sha256=
|
|
122
|
-
klaude_code/protocol/model.py,sha256=
|
|
122
|
+
klaude_code/protocol/llm_param.py,sha256=cb4ubLq21PIsMOC8WJb0aid12z_sT1b7FsbNJMr-jLg,4255
|
|
123
|
+
klaude_code/protocol/model.py,sha256=q6OQKv9yiNOSIVEXimR8m32H_aRpyMpCOo0kYXfiEZE,12137
|
|
123
124
|
klaude_code/protocol/op.py,sha256=hdQTzD6zAsRMJJFaLOPvDX9gokhtIBSYNQuZ20TusI4,2824
|
|
124
125
|
klaude_code/protocol/op_handler.py,sha256=_lnv3-RxKkrTfGTNBlQ23gbHJBEtMLC8O48SYWDtPjE,843
|
|
125
126
|
klaude_code/protocol/sub_agent.py,sha256=NZib4kubDY8Js7toE6G2eKDNH4sWrmVYnH9FSTqKkZI,13666
|
|
126
127
|
klaude_code/protocol/tools.py,sha256=hkjVirnQqGTJS46IWvVKXWR4usPPUgDZDnm34LzAVSc,348
|
|
127
128
|
klaude_code/session/__init__.py,sha256=oXcDA5w-gJCbzmlF8yuWy3ezIW9DgFBNUs-gJHUJ-Rc,121
|
|
128
|
-
klaude_code/session/export.py,sha256=
|
|
129
|
-
klaude_code/session/selector.py,sha256=
|
|
129
|
+
klaude_code/session/export.py,sha256=3xyY0F39SbIAf1-h2IO_anNMfjBasncNJZXHh2STGow,24643
|
|
130
|
+
klaude_code/session/selector.py,sha256=F5L1zV1HZGrx-1VLydzqQTAz4cThKxyG2Sc9MUh34DU,2886
|
|
130
131
|
klaude_code/session/session.py,sha256=vm7I-IODBQr7JNkdx2jWfvDZgiLzMRrb0HBebfz7wnU,20051
|
|
131
132
|
klaude_code/session/templates/export_session.html,sha256=jvyVM_ZrRoQIqWslfmo6ASprVYLhbOtT_QTzgkrdXHs,46389
|
|
132
133
|
klaude_code/trace/__init__.py,sha256=B-S4qdCj8W88AaC_gVmhTaejH6eLYClBVh2Q6aGAVBk,184
|
|
@@ -143,7 +144,7 @@ klaude_code/ui/modes/exec/__init__.py,sha256=RsYa-DmDJj6g7iXb4H9mm2_Cu-KDQOD10RJ
|
|
|
143
144
|
klaude_code/ui/modes/exec/display.py,sha256=m2kkgaUoGD9rEVUmcm7Vs_PyAI2iruKCJYRhANjSsKo,1965
|
|
144
145
|
klaude_code/ui/modes/repl/__init__.py,sha256=JursXYxevw0hrezE-urGo25962InIXVPj_uYnPafN-U,1528
|
|
145
146
|
klaude_code/ui/modes/repl/clipboard.py,sha256=ZCpk7kRSXGhh0Q_BWtUUuSYT7ZOqRjAoRcg9T9n48Wo,5137
|
|
146
|
-
klaude_code/ui/modes/repl/completers.py,sha256=
|
|
147
|
+
klaude_code/ui/modes/repl/completers.py,sha256=EIKlkwBtxkjGJALc8onQLhJgPMAFzg46l2hlbzw43G8,18271
|
|
147
148
|
klaude_code/ui/modes/repl/display.py,sha256=v-Jxe7MWpOCEsx9FFEzqKaIg0jLS7ZU9bevooBjxxEQ,2242
|
|
148
149
|
klaude_code/ui/modes/repl/event_handler.py,sha256=oReGpdmqgrNmwOMehykscZS2t_XzTWcCvyxZq28MxCc,18604
|
|
149
150
|
klaude_code/ui/modes/repl/input_prompt_toolkit.py,sha256=EAIAtcL9EHVPmVK6oOHg0xCeZ0IOnG5S5KsaL85OHOk,6368
|
|
@@ -155,7 +156,7 @@ klaude_code/ui/renderers/common.py,sha256=TPH7LCbeJGqB8ArTsVitqJHEyOxHU6nwnRtvF0
|
|
|
155
156
|
klaude_code/ui/renderers/developer.py,sha256=fE-9LRzVLiKnK3ctFcuDDP_eehohhsgPCH_tYaOp-xs,6378
|
|
156
157
|
klaude_code/ui/renderers/diffs.py,sha256=P--aLjvZy4z77FDx6uM9LlIYVjYlyZwj0MncdJTO2AA,7691
|
|
157
158
|
klaude_code/ui/renderers/errors.py,sha256=c_fbnoNOnvuI3Bb24IujwV8Mpes-qWS_xCWfAcBvg6A,517
|
|
158
|
-
klaude_code/ui/renderers/metadata.py,sha256=
|
|
159
|
+
klaude_code/ui/renderers/metadata.py,sha256=QQev-3S3AS7GuWUhekjbnFJj3cFcbNxNtEYxzAl1Xm8,9121
|
|
159
160
|
klaude_code/ui/renderers/sub_agent.py,sha256=3cyn95pu4IniOJyWW4vfQ-X72iLufQ3LT9CkAQMuF4k,2686
|
|
160
161
|
klaude_code/ui/renderers/thinking.py,sha256=jzDfvYuwpafndmBMMb6UumGxur9iFi_X0LYIo08eDlw,1179
|
|
161
162
|
klaude_code/ui/renderers/tools.py,sha256=iVgTo10gzS981LmbLU36Zt17JMW3YTPYRqA3mUJ_d8c,19975
|
|
@@ -176,7 +177,7 @@ klaude_code/ui/utils/__init__.py,sha256=YEsCLjbCPaPza-UXTPUMTJTrc9BmNBUP5CbFWlsh
|
|
|
176
177
|
klaude_code/ui/utils/common.py,sha256=xzw-Mgj0agxrf22QxpH7YzVIpkMXIRY6SgXWtLYF0yU,2881
|
|
177
178
|
klaude_code/ui/utils/debouncer.py,sha256=TFF1z7B7-FxONEigkYohhShDlqo4cOcqydE9zz7JBHc,1270
|
|
178
179
|
klaude_code/version.py,sha256=x2OeiACPdzS87EWtaSi_UP13htm81Uq7mlV3kFy5jko,4815
|
|
179
|
-
klaude_code-1.2.
|
|
180
|
-
klaude_code-1.2.
|
|
181
|
-
klaude_code-1.2.
|
|
182
|
-
klaude_code-1.2.
|
|
180
|
+
klaude_code-1.2.12.dist-info/WHEEL,sha256=eh7sammvW2TypMMMGKgsM83HyA_3qQ5Lgg3ynoecH3M,79
|
|
181
|
+
klaude_code-1.2.12.dist-info/entry_points.txt,sha256=7CWKjolvs6dZiYHpelhA_FRJ-sVDh43eu3iWuOhKc_w,53
|
|
182
|
+
klaude_code-1.2.12.dist-info/METADATA,sha256=G1F2CoxKcSkdlJBBzLqJ90PWVj9OHEOvJYKpEGVCbF8,5067
|
|
183
|
+
klaude_code-1.2.12.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|