klaude-code 1.2.26__py3-none-any.whl → 1.2.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/cli/config_cmd.py +1 -5
- klaude_code/cli/debug.py +9 -1
- klaude_code/cli/list_model.py +170 -129
- klaude_code/cli/main.py +76 -19
- klaude_code/cli/runtime.py +15 -11
- klaude_code/cli/self_update.py +2 -1
- klaude_code/cli/session_cmd.py +1 -1
- klaude_code/command/__init__.py +3 -0
- klaude_code/command/export_online_cmd.py +15 -12
- klaude_code/command/fork_session_cmd.py +42 -0
- klaude_code/config/__init__.py +3 -1
- klaude_code/config/assets/__init__.py +1 -0
- klaude_code/config/assets/builtin_config.yaml +233 -0
- klaude_code/config/builtin_config.py +37 -0
- klaude_code/config/config.py +332 -112
- klaude_code/config/select_model.py +46 -8
- klaude_code/core/executor.py +6 -3
- klaude_code/core/manager/llm_clients_builder.py +4 -1
- klaude_code/core/reminders.py +52 -16
- klaude_code/core/tool/file/edit_tool.py +4 -4
- klaude_code/core/tool/file/write_tool.py +4 -4
- klaude_code/core/tool/shell/bash_tool.py +2 -2
- klaude_code/core/tool/web/mermaid_tool.md +17 -0
- klaude_code/core/tool/web/mermaid_tool.py +2 -2
- klaude_code/llm/openai_compatible/stream.py +2 -1
- klaude_code/llm/openai_compatible/tool_call_accumulator.py +17 -1
- klaude_code/protocol/commands.py +1 -0
- klaude_code/protocol/model.py +1 -0
- klaude_code/session/export.py +52 -7
- klaude_code/session/selector.py +2 -2
- klaude_code/session/session.py +26 -4
- klaude_code/trace/log.py +7 -1
- klaude_code/ui/modes/repl/__init__.py +3 -44
- klaude_code/ui/modes/repl/completers.py +39 -7
- klaude_code/ui/modes/repl/event_handler.py +8 -6
- klaude_code/ui/modes/repl/input_prompt_toolkit.py +33 -66
- klaude_code/ui/modes/repl/key_bindings.py +4 -4
- klaude_code/ui/modes/repl/renderer.py +1 -6
- klaude_code/ui/renderers/common.py +11 -4
- klaude_code/ui/renderers/developer.py +17 -0
- klaude_code/ui/renderers/diffs.py +1 -1
- klaude_code/ui/renderers/errors.py +10 -5
- klaude_code/ui/renderers/metadata.py +2 -2
- klaude_code/ui/renderers/tools.py +8 -4
- klaude_code/ui/rich/markdown.py +5 -5
- klaude_code/ui/rich/theme.py +7 -3
- klaude_code/ui/terminal/color.py +1 -1
- klaude_code/ui/terminal/control.py +4 -4
- {klaude_code-1.2.26.dist-info → klaude_code-1.2.28.dist-info}/METADATA +121 -127
- {klaude_code-1.2.26.dist-info → klaude_code-1.2.28.dist-info}/RECORD +52 -48
- {klaude_code-1.2.26.dist-info → klaude_code-1.2.28.dist-info}/entry_points.txt +1 -0
- {klaude_code-1.2.26.dist-info → klaude_code-1.2.28.dist-info}/WHEEL +0 -0
klaude_code/command/__init__.py
CHANGED
|
@@ -31,6 +31,7 @@ def ensure_commands_loaded() -> None:
|
|
|
31
31
|
from .debug_cmd import DebugCommand
|
|
32
32
|
from .export_cmd import ExportCommand
|
|
33
33
|
from .export_online_cmd import ExportOnlineCommand
|
|
34
|
+
from .fork_session_cmd import ForkSessionCommand
|
|
34
35
|
from .help_cmd import HelpCommand
|
|
35
36
|
from .model_cmd import ModelCommand
|
|
36
37
|
from .refresh_cmd import RefreshTerminalCommand
|
|
@@ -45,6 +46,7 @@ def ensure_commands_loaded() -> None:
|
|
|
45
46
|
register(RefreshTerminalCommand())
|
|
46
47
|
register(ThinkingCommand())
|
|
47
48
|
register(ModelCommand())
|
|
49
|
+
register(ForkSessionCommand())
|
|
48
50
|
load_prompt_commands()
|
|
49
51
|
register(StatusCommand())
|
|
50
52
|
register(HelpCommand())
|
|
@@ -63,6 +65,7 @@ def __getattr__(name: str) -> object:
|
|
|
63
65
|
"DebugCommand": "debug_cmd",
|
|
64
66
|
"ExportCommand": "export_cmd",
|
|
65
67
|
"ExportOnlineCommand": "export_online_cmd",
|
|
68
|
+
"ForkSessionCommand": "fork_session_cmd",
|
|
66
69
|
"HelpCommand": "help_cmd",
|
|
67
70
|
"ModelCommand": "model_cmd",
|
|
68
71
|
"RefreshTerminalCommand": "refresh_cmd",
|
|
@@ -47,20 +47,23 @@ class ExportOnlineCommand(CommandABC):
|
|
|
47
47
|
)
|
|
48
48
|
return CommandResult(events=[event])
|
|
49
49
|
|
|
50
|
-
# Check if user is logged in to surge
|
|
51
|
-
if not self._is_surge_logged_in(surge_cmd):
|
|
52
|
-
login_cmd = " ".join([*surge_cmd, "login"])
|
|
53
|
-
event = events.DeveloperMessageEvent(
|
|
54
|
-
session_id=agent.session.id,
|
|
55
|
-
item=model.DeveloperMessageItem(
|
|
56
|
-
content=f"Not logged in to surge.sh. Please run: {login_cmd}",
|
|
57
|
-
command_output=model.CommandOutput(command_name=self.name, is_error=True),
|
|
58
|
-
),
|
|
59
|
-
)
|
|
60
|
-
return CommandResult(events=[event])
|
|
61
|
-
|
|
62
50
|
try:
|
|
63
51
|
console = Console()
|
|
52
|
+
# Check login status inside status context since npx surge whoami can be slow
|
|
53
|
+
with console.status(Text("Checking surge.sh login status...", style="dim"), spinner_style="dim"):
|
|
54
|
+
logged_in = self._is_surge_logged_in(surge_cmd)
|
|
55
|
+
|
|
56
|
+
if not logged_in:
|
|
57
|
+
login_cmd = " ".join([*surge_cmd, "login"])
|
|
58
|
+
event = events.DeveloperMessageEvent(
|
|
59
|
+
session_id=agent.session.id,
|
|
60
|
+
item=model.DeveloperMessageItem(
|
|
61
|
+
content=f"Not logged in to surge.sh. Please run: {login_cmd}",
|
|
62
|
+
command_output=model.CommandOutput(command_name=self.name, is_error=True),
|
|
63
|
+
),
|
|
64
|
+
)
|
|
65
|
+
return CommandResult(events=[event])
|
|
66
|
+
|
|
64
67
|
with console.status(Text("Deploying to surge.sh...", style="dim"), spinner_style="dim"):
|
|
65
68
|
html_doc = self._build_html(agent)
|
|
66
69
|
domain = self._generate_domain()
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from klaude_code.command.command_abc import Agent, CommandABC, CommandResult
|
|
2
|
+
from klaude_code.protocol import commands, events, model
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ForkSessionCommand(CommandABC):
|
|
6
|
+
"""Fork current session to a new session id and show a resume command."""
|
|
7
|
+
|
|
8
|
+
@property
|
|
9
|
+
def name(self) -> commands.CommandName:
|
|
10
|
+
return commands.CommandName.FORK_SESSION
|
|
11
|
+
|
|
12
|
+
@property
|
|
13
|
+
def summary(self) -> str:
|
|
14
|
+
return "Fork the current session and show a resume-by-id command"
|
|
15
|
+
|
|
16
|
+
async def run(self, agent: Agent, user_input: model.UserInputPayload) -> CommandResult:
|
|
17
|
+
del user_input # unused
|
|
18
|
+
|
|
19
|
+
if agent.session.messages_count == 0:
|
|
20
|
+
event = events.DeveloperMessageEvent(
|
|
21
|
+
session_id=agent.session.id,
|
|
22
|
+
item=model.DeveloperMessageItem(
|
|
23
|
+
content="(no messages to fork)",
|
|
24
|
+
command_output=model.CommandOutput(command_name=self.name),
|
|
25
|
+
),
|
|
26
|
+
)
|
|
27
|
+
return CommandResult(events=[event])
|
|
28
|
+
|
|
29
|
+
new_session = agent.session.fork()
|
|
30
|
+
await new_session.wait_for_flush()
|
|
31
|
+
|
|
32
|
+
event = events.DeveloperMessageEvent(
|
|
33
|
+
session_id=agent.session.id,
|
|
34
|
+
item=model.DeveloperMessageItem(
|
|
35
|
+
content=f"Session forked successfully. New session id: {new_session.id}",
|
|
36
|
+
command_output=model.CommandOutput(
|
|
37
|
+
command_name=self.name,
|
|
38
|
+
ui_extra=model.SessionIdUIExtra(session_id=new_session.id),
|
|
39
|
+
),
|
|
40
|
+
),
|
|
41
|
+
)
|
|
42
|
+
return CommandResult(events=[event])
|
klaude_code/config/__init__.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
|
-
from .config import Config, config_path, load_config
|
|
1
|
+
from .config import Config, UserConfig, config_path, load_config, print_no_available_models_hint
|
|
2
2
|
|
|
3
3
|
__all__ = [
|
|
4
4
|
"Config",
|
|
5
|
+
"UserConfig",
|
|
5
6
|
"config_path",
|
|
6
7
|
"load_config",
|
|
8
|
+
"print_no_available_models_hint",
|
|
7
9
|
]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Asset files for config module
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
# Built-in provider and model configurations
|
|
2
|
+
# Users can start using klaude by simply setting environment variables
|
|
3
|
+
# (ANTHROPIC_API_KEY, OPENAI_API_KEY, etc.) without manual configuration.
|
|
4
|
+
|
|
5
|
+
provider_list:
|
|
6
|
+
- provider_name: anthropic
|
|
7
|
+
protocol: anthropic
|
|
8
|
+
api_key: ${ANTHROPIC_API_KEY}
|
|
9
|
+
model_list:
|
|
10
|
+
- model_name: sonnet
|
|
11
|
+
model_params:
|
|
12
|
+
model: claude-sonnet-4-5-20250929
|
|
13
|
+
context_limit: 200000
|
|
14
|
+
provider_routing:
|
|
15
|
+
sort: throughput
|
|
16
|
+
cost:
|
|
17
|
+
input: 3.0
|
|
18
|
+
output: 15.0
|
|
19
|
+
cache_read: 0.3
|
|
20
|
+
cache_write: 3.75
|
|
21
|
+
- model_name: opus
|
|
22
|
+
model_params:
|
|
23
|
+
model: claude-opus-4-5-20251101
|
|
24
|
+
context_limit: 200000
|
|
25
|
+
verbosity: high
|
|
26
|
+
thinking:
|
|
27
|
+
type: enabled
|
|
28
|
+
budget_tokens: 2048
|
|
29
|
+
cost:
|
|
30
|
+
input: 5.0
|
|
31
|
+
output: 25.0
|
|
32
|
+
cache_read: 0.5
|
|
33
|
+
cache_write: 6.25
|
|
34
|
+
|
|
35
|
+
- provider_name: openai
|
|
36
|
+
protocol: responses
|
|
37
|
+
api_key: ${OPENAI_API_KEY}
|
|
38
|
+
model_list:
|
|
39
|
+
- model_name: gpt-5.2
|
|
40
|
+
model_params:
|
|
41
|
+
model: gpt-5.2
|
|
42
|
+
max_tokens: 128000
|
|
43
|
+
context_limit: 400000
|
|
44
|
+
verbosity: high
|
|
45
|
+
thinking:
|
|
46
|
+
reasoning_effort: high
|
|
47
|
+
cost:
|
|
48
|
+
input: 1.75
|
|
49
|
+
output: 14.0
|
|
50
|
+
cache_read: 0.17
|
|
51
|
+
|
|
52
|
+
- provider_name: openrouter
|
|
53
|
+
protocol: openrouter
|
|
54
|
+
api_key: ${OPENROUTER_API_KEY}
|
|
55
|
+
model_list:
|
|
56
|
+
- model_name: gpt-5.1-codex-max
|
|
57
|
+
model_params:
|
|
58
|
+
model: openai/gpt-5.1-codex-max
|
|
59
|
+
max_tokens: 128000
|
|
60
|
+
context_limit: 400000
|
|
61
|
+
thinking:
|
|
62
|
+
reasoning_effort: medium
|
|
63
|
+
cost:
|
|
64
|
+
input: 1.25
|
|
65
|
+
output: 10.0
|
|
66
|
+
cache_read: 0.13
|
|
67
|
+
- model_name: gpt-5.2
|
|
68
|
+
model_params:
|
|
69
|
+
model: openai/gpt-5.2
|
|
70
|
+
max_tokens: 128000
|
|
71
|
+
context_limit: 400000
|
|
72
|
+
verbosity: high
|
|
73
|
+
thinking:
|
|
74
|
+
reasoning_effort: high
|
|
75
|
+
cost:
|
|
76
|
+
input: 1.75
|
|
77
|
+
output: 14.0
|
|
78
|
+
cache_read: 0.17
|
|
79
|
+
- model_name: gpt-5.2-fast
|
|
80
|
+
model_params:
|
|
81
|
+
model: openai/gpt-5.2
|
|
82
|
+
max_tokens: 128000
|
|
83
|
+
context_limit: 400000
|
|
84
|
+
verbosity: low
|
|
85
|
+
thinking:
|
|
86
|
+
reasoning_effort: none
|
|
87
|
+
cost:
|
|
88
|
+
input: 1.75
|
|
89
|
+
output: 14.0
|
|
90
|
+
cache_read: 0.17
|
|
91
|
+
- model_name: kimi
|
|
92
|
+
model_params:
|
|
93
|
+
model: moonshotai/kimi-k2-thinking
|
|
94
|
+
context_limit: 262144
|
|
95
|
+
provider_routing:
|
|
96
|
+
only:
|
|
97
|
+
- moonshotai/turbo
|
|
98
|
+
cost:
|
|
99
|
+
input: 0.6
|
|
100
|
+
output: 2.5
|
|
101
|
+
cache_read: 0.15
|
|
102
|
+
- model_name: haiku
|
|
103
|
+
model_params:
|
|
104
|
+
model: anthropic/claude-haiku-4.5
|
|
105
|
+
context_limit: 200000
|
|
106
|
+
cost:
|
|
107
|
+
input: 1.0
|
|
108
|
+
output: 5.0
|
|
109
|
+
cache_read: 0.1
|
|
110
|
+
cache_write: 1.25
|
|
111
|
+
- model_name: sonnet
|
|
112
|
+
model_params:
|
|
113
|
+
model: anthropic/claude-4.5-sonnet
|
|
114
|
+
context_limit: 200000
|
|
115
|
+
provider_routing:
|
|
116
|
+
sort: throughput
|
|
117
|
+
cost:
|
|
118
|
+
input: 3.0
|
|
119
|
+
output: 15.0
|
|
120
|
+
cache_read: 0.3
|
|
121
|
+
cache_write: 3.75
|
|
122
|
+
- model_name: opus
|
|
123
|
+
model_params:
|
|
124
|
+
model: anthropic/claude-4.5-opus
|
|
125
|
+
context_limit: 200000
|
|
126
|
+
verbosity: high
|
|
127
|
+
thinking:
|
|
128
|
+
type: enabled
|
|
129
|
+
budget_tokens: 2048
|
|
130
|
+
cost:
|
|
131
|
+
input: 5.0
|
|
132
|
+
output: 25.0
|
|
133
|
+
cache_read: 0.5
|
|
134
|
+
cache_write: 6.25
|
|
135
|
+
- model_name: gemini-pro
|
|
136
|
+
model_params:
|
|
137
|
+
model: google/gemini-3-pro-preview
|
|
138
|
+
context_limit: 1048576
|
|
139
|
+
thinking:
|
|
140
|
+
reasoning_effort: high
|
|
141
|
+
cost:
|
|
142
|
+
input: 2.0
|
|
143
|
+
output: 12.0
|
|
144
|
+
cache_read: 0.2
|
|
145
|
+
- model_name: gemini-flash
|
|
146
|
+
model_params:
|
|
147
|
+
model: google/gemini-3-flash-preview
|
|
148
|
+
context_limit: 1048576
|
|
149
|
+
thinking:
|
|
150
|
+
reasoning_effort: medium
|
|
151
|
+
cost:
|
|
152
|
+
input: 0.5
|
|
153
|
+
output: 3.0
|
|
154
|
+
cache_read: 0.05
|
|
155
|
+
- model_name: grok
|
|
156
|
+
model_params:
|
|
157
|
+
model: x-ai/grok-4.1-fast
|
|
158
|
+
context_limit: 2000000
|
|
159
|
+
thinking:
|
|
160
|
+
type: enabled
|
|
161
|
+
budget_tokens: 2048
|
|
162
|
+
cost:
|
|
163
|
+
input: 0.2
|
|
164
|
+
output: 0.5
|
|
165
|
+
cache_read: 0.05
|
|
166
|
+
- model_name: minimax
|
|
167
|
+
model_params:
|
|
168
|
+
model: minimax/minimax-m2.1
|
|
169
|
+
context_limit: 204800
|
|
170
|
+
cost:
|
|
171
|
+
input: 0.3
|
|
172
|
+
output: 1.2
|
|
173
|
+
cache_read: 0.03
|
|
174
|
+
- model_name: glm
|
|
175
|
+
model_params:
|
|
176
|
+
model: z-ai/glm-4.7
|
|
177
|
+
context_limit: 200000
|
|
178
|
+
provider_routing:
|
|
179
|
+
only:
|
|
180
|
+
- z-ai
|
|
181
|
+
cost:
|
|
182
|
+
input: 0.44
|
|
183
|
+
output: 1.74
|
|
184
|
+
cache_read: 0.04
|
|
185
|
+
|
|
186
|
+
- provider_name: deepseek
|
|
187
|
+
protocol: anthropic
|
|
188
|
+
api_key: ${DEEPSEEK_API_KEY}
|
|
189
|
+
base_url: https://api.deepseek.com/anthropic
|
|
190
|
+
model_list:
|
|
191
|
+
- model_name: deepseek
|
|
192
|
+
provider: deepseek
|
|
193
|
+
model_params:
|
|
194
|
+
model: deepseek-reasoner
|
|
195
|
+
context_limit: 128000
|
|
196
|
+
thinking:
|
|
197
|
+
type: enabled
|
|
198
|
+
budget_tokens: 2048
|
|
199
|
+
cost:
|
|
200
|
+
input: 2
|
|
201
|
+
output: 3
|
|
202
|
+
cache_read: 0.2
|
|
203
|
+
currency: CNY
|
|
204
|
+
|
|
205
|
+
- provider_name: moonshot
|
|
206
|
+
protocol: anthropic
|
|
207
|
+
api_key: ${MOONSHOT_API_KEY}
|
|
208
|
+
base_url: https://api.moonshot.cn/anthropic
|
|
209
|
+
model_list:
|
|
210
|
+
- model_name: kimi@moonshot
|
|
211
|
+
model_params:
|
|
212
|
+
model: kimi-k2-thinking
|
|
213
|
+
context_limit: 262144
|
|
214
|
+
thinking:
|
|
215
|
+
type: enabled
|
|
216
|
+
budget_tokens: 8192
|
|
217
|
+
cost:
|
|
218
|
+
input: 4.0
|
|
219
|
+
output: 16.0
|
|
220
|
+
cache_read: 1.0
|
|
221
|
+
currency: CNY
|
|
222
|
+
|
|
223
|
+
- provider_name: codex
|
|
224
|
+
protocol: codex
|
|
225
|
+
model_list:
|
|
226
|
+
- model_name: gpt-5.2-codex
|
|
227
|
+
provider: codex
|
|
228
|
+
model_params:
|
|
229
|
+
model: gpt-5.2-codex
|
|
230
|
+
thinking:
|
|
231
|
+
reasoning_effort: medium
|
|
232
|
+
context_limit: 400000
|
|
233
|
+
max_tokens: 128000
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""Built-in provider and model configurations.
|
|
2
|
+
|
|
3
|
+
These configurations allow users to start using klaude by simply setting
|
|
4
|
+
environment variables (ANTHROPIC_API_KEY, OPENAI_API_KEY, etc.) without
|
|
5
|
+
manually configuring providers.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from functools import lru_cache
|
|
9
|
+
from importlib import resources
|
|
10
|
+
from typing import TYPE_CHECKING
|
|
11
|
+
|
|
12
|
+
import yaml
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from klaude_code.config.config import ProviderConfig
|
|
16
|
+
|
|
17
|
+
# All supported API key environment variables
|
|
18
|
+
SUPPORTED_API_KEY_ENVS = [
|
|
19
|
+
"ANTHROPIC_API_KEY",
|
|
20
|
+
"OPENAI_API_KEY",
|
|
21
|
+
"OPENROUTER_API_KEY",
|
|
22
|
+
"DEEPSEEK_API_KEY",
|
|
23
|
+
"MOONSHOT_API_KEY",
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@lru_cache(maxsize=1)
|
|
28
|
+
def get_builtin_provider_configs() -> list["ProviderConfig"]:
|
|
29
|
+
"""Load built-in provider configurations from YAML asset."""
|
|
30
|
+
# Import here to avoid circular import
|
|
31
|
+
from klaude_code.config.config import ProviderConfig
|
|
32
|
+
|
|
33
|
+
assets = resources.files("klaude_code.config.assets")
|
|
34
|
+
yaml_content = (assets / "builtin_config.yaml").read_text()
|
|
35
|
+
data = yaml.safe_load(yaml_content)
|
|
36
|
+
|
|
37
|
+
return [ProviderConfig.model_validate(p) for p in data.get("provider_list", [])]
|