klaude-code 2.3.0__py3-none-any.whl → 2.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/cli/list_model.py +3 -3
- klaude_code/cli/main.py +2 -2
- klaude_code/config/assets/builtin_config.yaml +165 -307
- klaude_code/config/config.py +17 -17
- klaude_code/config/{select_model.py → model_matcher.py} +7 -7
- klaude_code/config/sub_agent_model_helper.py +1 -10
- klaude_code/config/thinking.py +2 -2
- klaude_code/core/agent_profile.py +9 -23
- klaude_code/core/executor.py +72 -70
- klaude_code/core/tool/file/diff_builder.py +25 -18
- klaude_code/llm/anthropic/client.py +5 -5
- klaude_code/llm/client.py +1 -1
- klaude_code/llm/codex/client.py +2 -2
- klaude_code/llm/google/client.py +6 -6
- klaude_code/llm/input_common.py +2 -2
- klaude_code/llm/openai_compatible/client.py +3 -3
- klaude_code/llm/openai_compatible/stream.py +1 -1
- klaude_code/llm/openrouter/client.py +4 -4
- klaude_code/llm/openrouter/input.py +1 -3
- klaude_code/llm/responses/client.py +5 -5
- klaude_code/protocol/events/__init__.py +7 -1
- klaude_code/protocol/events/chat.py +10 -0
- klaude_code/protocol/llm_param.py +1 -1
- klaude_code/protocol/model.py +0 -26
- klaude_code/protocol/op.py +0 -5
- klaude_code/session/session.py +4 -2
- klaude_code/tui/command/clear_cmd.py +0 -1
- klaude_code/tui/command/command_abc.py +6 -4
- klaude_code/tui/command/copy_cmd.py +10 -10
- klaude_code/tui/command/debug_cmd.py +11 -10
- klaude_code/tui/command/export_online_cmd.py +18 -23
- klaude_code/tui/command/fork_session_cmd.py +39 -43
- klaude_code/tui/command/model_cmd.py +5 -7
- klaude_code/tui/command/{model_select.py → model_picker.py} +3 -5
- klaude_code/tui/command/refresh_cmd.py +0 -1
- klaude_code/tui/command/registry.py +15 -21
- klaude_code/tui/command/resume_cmd.py +10 -16
- klaude_code/tui/command/status_cmd.py +8 -12
- klaude_code/tui/command/sub_agent_model_cmd.py +11 -16
- klaude_code/tui/command/terminal_setup_cmd.py +8 -11
- klaude_code/tui/command/thinking_cmd.py +4 -6
- klaude_code/tui/commands.py +5 -0
- klaude_code/tui/components/command_output.py +96 -0
- klaude_code/tui/components/developer.py +3 -110
- klaude_code/tui/components/welcome.py +2 -2
- klaude_code/tui/input/prompt_toolkit.py +6 -8
- klaude_code/tui/machine.py +5 -0
- klaude_code/tui/renderer.py +5 -5
- klaude_code/tui/runner.py +0 -6
- klaude_code/tui/terminal/selector.py +7 -8
- {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/METADATA +21 -74
- {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/RECORD +54 -53
- {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/WHEEL +0 -0
- {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/entry_points.txt +0 -0
klaude_code/cli/list_model.py
CHANGED
|
@@ -181,7 +181,7 @@ def format_env_var_display(value: str | None) -> Text:
|
|
|
181
181
|
|
|
182
182
|
def _get_model_params_display(model: ModelConfig) -> list[Text]:
|
|
183
183
|
"""Get display elements for model parameters."""
|
|
184
|
-
param_strings = format_model_params(model
|
|
184
|
+
param_strings = format_model_params(model)
|
|
185
185
|
if param_strings:
|
|
186
186
|
return [Text(s) for s in param_strings]
|
|
187
187
|
return [Text("")]
|
|
@@ -286,7 +286,7 @@ def _build_models_table(
|
|
|
286
286
|
|
|
287
287
|
if not provider_available:
|
|
288
288
|
name = Text.assemble((prefix, ThemeKey.LINES), (model.model_name, "dim"))
|
|
289
|
-
model_id = Text(model.
|
|
289
|
+
model_id = Text(model.model_id or "", style="dim")
|
|
290
290
|
params = Text("(unavailable)", style="dim")
|
|
291
291
|
else:
|
|
292
292
|
# Build role tags for this model
|
|
@@ -305,7 +305,7 @@ def _build_models_table(
|
|
|
305
305
|
)
|
|
306
306
|
else:
|
|
307
307
|
name = Text.assemble((prefix, ThemeKey.LINES), (model.model_name, ThemeKey.CONFIG_ITEM_NAME))
|
|
308
|
-
model_id = Text(model.
|
|
308
|
+
model_id = Text(model.model_id or "")
|
|
309
309
|
params = Text(" · ").join(_get_model_params_display(model))
|
|
310
310
|
|
|
311
311
|
models_table.add_row(name, model_id, params)
|
klaude_code/cli/main.py
CHANGED
|
@@ -124,7 +124,7 @@ def main_callback(
|
|
|
124
124
|
raise typer.Exit(2)
|
|
125
125
|
|
|
126
126
|
from klaude_code.app.runtime import AppInitConfig
|
|
127
|
-
from klaude_code.tui.command.
|
|
127
|
+
from klaude_code.tui.command.model_picker import ModelSelectStatus, select_model_interactive
|
|
128
128
|
from klaude_code.tui.runner import run_interactive
|
|
129
129
|
|
|
130
130
|
update_terminal_title()
|
|
@@ -193,7 +193,7 @@ def main_callback(
|
|
|
193
193
|
matches = [
|
|
194
194
|
m.selector
|
|
195
195
|
for m in cfg.iter_model_entries()
|
|
196
|
-
if (m.
|
|
196
|
+
if (m.model_id or "").strip().lower() == raw_model.lower()
|
|
197
197
|
]
|
|
198
198
|
if len(matches) == 1:
|
|
199
199
|
chosen_model = matches[0]
|
|
@@ -1,280 +1,176 @@
|
|
|
1
|
+
---
|
|
1
2
|
# Built-in provider and model configurations
|
|
2
3
|
# Users can start using klaude by simply setting environment variables
|
|
3
4
|
# (ANTHROPIC_API_KEY, OPENAI_API_KEY, etc.) without manual configuration.
|
|
4
|
-
|
|
5
5
|
provider_list:
|
|
6
6
|
- provider_name: anthropic
|
|
7
7
|
protocol: anthropic
|
|
8
8
|
api_key: ${ANTHROPIC_API_KEY}
|
|
9
9
|
model_list:
|
|
10
10
|
- model_name: sonnet
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
cost:
|
|
17
|
-
input: 3.0
|
|
18
|
-
output: 15.0
|
|
19
|
-
cache_read: 0.3
|
|
20
|
-
cache_write: 3.75
|
|
11
|
+
model_id: claude-sonnet-4-5-20250929
|
|
12
|
+
context_limit: 200000
|
|
13
|
+
provider_routing:
|
|
14
|
+
sort: throughput
|
|
15
|
+
cost: {input: 3, output: 15, cache_read: 0.3, cache_write: 3.75}
|
|
21
16
|
- model_name: opus
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
cost:
|
|
30
|
-
input: 5.0
|
|
31
|
-
output: 25.0
|
|
32
|
-
cache_read: 0.5
|
|
33
|
-
cache_write: 6.25
|
|
34
|
-
|
|
17
|
+
model_id: claude-opus-4-5-20251101
|
|
18
|
+
context_limit: 200000
|
|
19
|
+
verbosity: high
|
|
20
|
+
thinking:
|
|
21
|
+
type: enabled
|
|
22
|
+
budget_tokens: 2048
|
|
23
|
+
cost: {input: 5, output: 25, cache_read: 0.5, cache_write: 6.25}
|
|
35
24
|
- provider_name: openai
|
|
36
25
|
protocol: responses
|
|
37
26
|
api_key: ${OPENAI_API_KEY}
|
|
38
27
|
model_list:
|
|
39
28
|
- model_name: gpt-5.2
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
cost:
|
|
48
|
-
input: 1.75
|
|
49
|
-
output: 14.0
|
|
50
|
-
cache_read: 0.17
|
|
51
|
-
|
|
29
|
+
model_id: gpt-5.2
|
|
30
|
+
max_tokens: 128000
|
|
31
|
+
context_limit: 400000
|
|
32
|
+
verbosity: high
|
|
33
|
+
thinking:
|
|
34
|
+
reasoning_effort: high
|
|
35
|
+
cost: {input: 1.75, output: 14, cache_read: 0.17}
|
|
52
36
|
- provider_name: openrouter
|
|
53
37
|
protocol: openrouter
|
|
54
38
|
api_key: ${OPENROUTER_API_KEY}
|
|
55
39
|
model_list:
|
|
56
|
-
- model_name: gpt-5-mini
|
|
57
|
-
model_params:
|
|
58
|
-
model: openai/gpt-5-mini
|
|
59
|
-
max_tokens: 128000
|
|
60
|
-
context_limit: 400000
|
|
61
|
-
thinking:
|
|
62
|
-
reasoning_effort: high
|
|
63
|
-
cost:
|
|
64
|
-
input: 0.25
|
|
65
|
-
output: 2.0
|
|
66
|
-
cache_read: 0.03
|
|
67
40
|
- model_name: gpt-5.1-codex-max
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
cost:
|
|
75
|
-
input: 1.25
|
|
76
|
-
output: 10.0
|
|
77
|
-
cache_read: 0.13
|
|
41
|
+
model_id: openai/gpt-5.1-codex-max
|
|
42
|
+
max_tokens: 128000
|
|
43
|
+
context_limit: 400000
|
|
44
|
+
thinking:
|
|
45
|
+
reasoning_effort: medium
|
|
46
|
+
cost: {input: 1.25, output: 10, cache_read: 0.13}
|
|
78
47
|
- model_name: gpt-5.2
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
cost:
|
|
87
|
-
input: 1.75
|
|
88
|
-
output: 14.0
|
|
89
|
-
cache_read: 0.17
|
|
48
|
+
model_id: openai/gpt-5.2
|
|
49
|
+
max_tokens: 128000
|
|
50
|
+
context_limit: 400000
|
|
51
|
+
verbosity: high
|
|
52
|
+
thinking:
|
|
53
|
+
reasoning_effort: high
|
|
54
|
+
cost: {input: 1.75, output: 14, cache_read: 0.17}
|
|
90
55
|
- model_name: gpt-5.2-medium
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
cost:
|
|
99
|
-
input: 1.75
|
|
100
|
-
output: 14.0
|
|
101
|
-
cache_read: 0.17
|
|
56
|
+
model_id: openai/gpt-5.2
|
|
57
|
+
max_tokens: 128000
|
|
58
|
+
context_limit: 400000
|
|
59
|
+
verbosity: high
|
|
60
|
+
thinking:
|
|
61
|
+
reasoning_effort: medium
|
|
62
|
+
cost: {input: 1.75, output: 14, cache_read: 0.17}
|
|
102
63
|
- model_name: gpt-5.2-low
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
cost:
|
|
111
|
-
input: 1.75
|
|
112
|
-
output: 14.0
|
|
113
|
-
cache_read: 0.17
|
|
64
|
+
model_id: openai/gpt-5.2
|
|
65
|
+
max_tokens: 128000
|
|
66
|
+
context_limit: 400000
|
|
67
|
+
verbosity: low
|
|
68
|
+
thinking:
|
|
69
|
+
reasoning_effort: low
|
|
70
|
+
cost: {input: 1.75, output: 14, cache_read: 0.17}
|
|
114
71
|
- model_name: gpt-5.2-fast
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
cost:
|
|
123
|
-
input: 1.75
|
|
124
|
-
output: 14.0
|
|
125
|
-
cache_read: 0.17
|
|
72
|
+
model_id: openai/gpt-5.2
|
|
73
|
+
max_tokens: 128000
|
|
74
|
+
context_limit: 400000
|
|
75
|
+
verbosity: low
|
|
76
|
+
thinking:
|
|
77
|
+
reasoning_effort: none
|
|
78
|
+
cost: {input: 1.75, output: 14, cache_read: 0.17}
|
|
126
79
|
- model_name: kimi
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
cost:
|
|
134
|
-
input: 0.6
|
|
135
|
-
output: 2.5
|
|
136
|
-
cache_read: 0.15
|
|
80
|
+
model_id: moonshotai/kimi-k2-thinking
|
|
81
|
+
context_limit: 262144
|
|
82
|
+
provider_routing:
|
|
83
|
+
only:
|
|
84
|
+
- moonshotai/turbo
|
|
85
|
+
cost: {input: 0.6, output: 2.5, cache_read: 0.15}
|
|
137
86
|
- model_name: haiku
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
cost:
|
|
142
|
-
input: 1.0
|
|
143
|
-
output: 5.0
|
|
144
|
-
cache_read: 0.1
|
|
145
|
-
cache_write: 1.25
|
|
87
|
+
model_id: anthropic/claude-haiku-4.5
|
|
88
|
+
context_limit: 200000
|
|
89
|
+
cost: {input: 1, output: 5, cache_read: 0.1, cache_write: 1.25}
|
|
146
90
|
- model_name: sonnet
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
cost:
|
|
153
|
-
input: 3.0
|
|
154
|
-
output: 15.0
|
|
155
|
-
cache_read: 0.3
|
|
156
|
-
cache_write: 3.75
|
|
91
|
+
model_id: anthropic/claude-4.5-sonnet
|
|
92
|
+
context_limit: 200000
|
|
93
|
+
provider_routing:
|
|
94
|
+
sort: throughput
|
|
95
|
+
cost: {input: 3, output: 15, cache_read: 0.3, cache_write: 3.75}
|
|
157
96
|
- model_name: opus
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
cost:
|
|
166
|
-
input: 5.0
|
|
167
|
-
output: 25.0
|
|
168
|
-
cache_read: 0.5
|
|
169
|
-
cache_write: 6.25
|
|
97
|
+
model_id: anthropic/claude-4.5-opus
|
|
98
|
+
context_limit: 200000
|
|
99
|
+
verbosity: high
|
|
100
|
+
thinking:
|
|
101
|
+
type: enabled
|
|
102
|
+
budget_tokens: 2048
|
|
103
|
+
cost: {input: 5, output: 25, cache_read: 0.5, cache_write: 6.25}
|
|
170
104
|
- model_name: gemini-pro
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
cost:
|
|
177
|
-
input: 2.0
|
|
178
|
-
output: 12.0
|
|
179
|
-
cache_read: 0.2
|
|
105
|
+
model_id: google/gemini-3-pro-preview
|
|
106
|
+
context_limit: 1048576
|
|
107
|
+
thinking:
|
|
108
|
+
reasoning_effort: high
|
|
109
|
+
cost: {input: 2, output: 12, cache_read: 0.2}
|
|
180
110
|
- model_name: gemini-flash
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
cost:
|
|
187
|
-
input: 0.5
|
|
188
|
-
output: 3.0
|
|
189
|
-
cache_read: 0.05
|
|
111
|
+
model_id: google/gemini-3-flash-preview
|
|
112
|
+
context_limit: 1048576
|
|
113
|
+
thinking:
|
|
114
|
+
reasoning_effort: medium
|
|
115
|
+
cost: {input: 0.5, output: 3, cache_read: 0.05}
|
|
190
116
|
- model_name: nano-banana-pro
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
cost:
|
|
198
|
-
input: 2
|
|
199
|
-
output: 12
|
|
200
|
-
cache_read: 0.2
|
|
201
|
-
image: 120
|
|
117
|
+
model_id: google/gemini-3-pro-image-preview
|
|
118
|
+
context_limit: 66000
|
|
119
|
+
modalities:
|
|
120
|
+
- image
|
|
121
|
+
- text
|
|
122
|
+
cost: {input: 2, output: 12, cache_read: 0.2, image: 120}
|
|
202
123
|
- model_name: nano-banana
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
cost:
|
|
210
|
-
input: 0.3
|
|
211
|
-
output: 2.5
|
|
212
|
-
cache_read: 0.03
|
|
213
|
-
image: 30
|
|
124
|
+
model_id: google/gemini-2.5-flash-image
|
|
125
|
+
context_limit: 33000
|
|
126
|
+
modalities:
|
|
127
|
+
- image
|
|
128
|
+
- text
|
|
129
|
+
cost: {input: 0.3, output: 2.5, cache_read: 0.03, image: 30}
|
|
214
130
|
- model_name: grok
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
cost:
|
|
222
|
-
input: 0.2
|
|
223
|
-
output: 0.5
|
|
224
|
-
cache_read: 0.05
|
|
131
|
+
model_id: x-ai/grok-4.1-fast
|
|
132
|
+
context_limit: 2000000
|
|
133
|
+
thinking:
|
|
134
|
+
type: enabled
|
|
135
|
+
budget_tokens: 2048
|
|
136
|
+
cost: {input: 0.2, output: 0.5, cache_read: 0.05}
|
|
225
137
|
- model_name: minimax
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
cost:
|
|
230
|
-
input: 0.3
|
|
231
|
-
output: 1.2
|
|
232
|
-
cache_read: 0.03
|
|
138
|
+
model_id: minimax/minimax-m2.1
|
|
139
|
+
context_limit: 204800
|
|
140
|
+
cost: {input: 0.3, output: 1.2, cache_read: 0.03}
|
|
233
141
|
- model_name: glm
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
142
|
+
model_id: z-ai/glm-4.7
|
|
143
|
+
context_limit: 200000
|
|
144
|
+
provider_routing:
|
|
145
|
+
only:
|
|
146
|
+
- z-ai
|
|
147
|
+
cost: {input: 0.44, output: 1.74, cache_read: 0.04}
|
|
148
|
+
- model_name: seedream
|
|
149
|
+
model_id: bytedance-seed/seedream-4.5
|
|
150
|
+
context_limit: 4000
|
|
151
|
+
cost: {input: 0, output: 9.581, image: 9.581}
|
|
152
|
+
modalities:
|
|
153
|
+
- image
|
|
154
|
+
- text
|
|
245
155
|
- provider_name: google
|
|
246
156
|
protocol: google
|
|
247
157
|
api_key: ${GOOGLE_API_KEY}
|
|
248
158
|
model_list:
|
|
249
159
|
- model_name: gemini-pro
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
cost:
|
|
254
|
-
input: 2.0
|
|
255
|
-
output: 12.0
|
|
256
|
-
cache_read: 0.2
|
|
160
|
+
model_id: gemini-3-pro-preview
|
|
161
|
+
context_limit: 1048576
|
|
162
|
+
cost: {input: 2, output: 12, cache_read: 0.2}
|
|
257
163
|
- model_name: gemini-flash
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
cost:
|
|
262
|
-
input: 0.5
|
|
263
|
-
output: 3.0
|
|
264
|
-
cache_read: 0.05
|
|
164
|
+
model_id: gemini-3-flash-preview
|
|
165
|
+
context_limit: 1048576
|
|
166
|
+
cost: {input: 0.5, output: 3, cache_read: 0.05}
|
|
265
167
|
- model_name: nano-banana-pro
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
cost:
|
|
273
|
-
input: 2
|
|
274
|
-
output: 12
|
|
275
|
-
cache_read: 0.2
|
|
276
|
-
image: 120
|
|
277
|
-
|
|
168
|
+
model_id: gemini-3-pro-image-preview
|
|
169
|
+
context_limit: 66000
|
|
170
|
+
modalities:
|
|
171
|
+
- image
|
|
172
|
+
- text
|
|
173
|
+
cost: {input: 2, output: 12, cache_read: 0.2, image: 120}
|
|
278
174
|
- provider_name: bedrock
|
|
279
175
|
protocol: bedrock
|
|
280
176
|
aws_access_key: ${AWS_ACCESS_KEY_ID}
|
|
@@ -282,97 +178,59 @@ provider_list:
|
|
|
282
178
|
aws_region: ${AWS_REGION}
|
|
283
179
|
model_list:
|
|
284
180
|
- model_name: sonnet
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
cost:
|
|
289
|
-
input: 3.0
|
|
290
|
-
output: 15.0
|
|
291
|
-
cache_read: 0.3
|
|
292
|
-
cache_write: 3.75
|
|
293
|
-
|
|
181
|
+
model_id: us.anthropic.claude-sonnet-4-5-20250929-v1:0
|
|
182
|
+
context_limit: 200000
|
|
183
|
+
cost: {input: 3, output: 15, cache_read: 0.3, cache_write: 3.75}
|
|
294
184
|
- provider_name: deepseek
|
|
295
185
|
protocol: anthropic
|
|
296
186
|
api_key: ${DEEPSEEK_API_KEY}
|
|
297
187
|
base_url: https://api.deepseek.com/anthropic
|
|
298
188
|
model_list:
|
|
299
189
|
- model_name: deepseek
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
cost:
|
|
307
|
-
input: 2
|
|
308
|
-
output: 3
|
|
309
|
-
cache_read: 0.2
|
|
310
|
-
currency: CNY
|
|
311
|
-
|
|
190
|
+
model_id: deepseek-reasoner
|
|
191
|
+
context_limit: 128000
|
|
192
|
+
thinking:
|
|
193
|
+
type: enabled
|
|
194
|
+
budget_tokens: 2048
|
|
195
|
+
cost: {input: 2, output: 3, cache_read: 0.2, currency: CNY}
|
|
312
196
|
- provider_name: moonshot
|
|
313
197
|
protocol: anthropic
|
|
314
198
|
api_key: ${MOONSHOT_API_KEY}
|
|
315
199
|
base_url: https://api.moonshot.cn/anthropic
|
|
316
200
|
model_list:
|
|
317
201
|
- model_name: kimi
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
cost:
|
|
325
|
-
input: 4.0
|
|
326
|
-
output: 16.0
|
|
327
|
-
cache_read: 1.0
|
|
328
|
-
currency: CNY
|
|
329
|
-
|
|
202
|
+
model_id: kimi-k2-thinking
|
|
203
|
+
context_limit: 262144
|
|
204
|
+
thinking:
|
|
205
|
+
type: enabled
|
|
206
|
+
budget_tokens: 8192
|
|
207
|
+
cost: {input: 4, output: 16, cache_read: 1, currency: CNY}
|
|
330
208
|
- provider_name: claude-max
|
|
331
209
|
protocol: claude_oauth
|
|
332
210
|
model_list:
|
|
333
211
|
- model_name: sonnet
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
cost:
|
|
338
|
-
input: 3.0
|
|
339
|
-
output: 15.0
|
|
340
|
-
cache_read: 0.3
|
|
341
|
-
cache_write: 3.75
|
|
212
|
+
model_id: claude-sonnet-4-5-20250929
|
|
213
|
+
context_limit: 200000
|
|
214
|
+
cost: {input: 3, output: 15, cache_read: 0.3, cache_write: 3.75}
|
|
342
215
|
- model_name: opus
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
cost:
|
|
351
|
-
input: 5.0
|
|
352
|
-
output: 25.0
|
|
353
|
-
cache_read: 0.5
|
|
354
|
-
cache_write: 6.25
|
|
216
|
+
model_id: claude-opus-4-5-20251101
|
|
217
|
+
context_limit: 200000
|
|
218
|
+
verbosity: high
|
|
219
|
+
thinking:
|
|
220
|
+
type: enabled
|
|
221
|
+
budget_tokens: 2048
|
|
222
|
+
cost: {input: 5, output: 25, cache_read: 0.5, cache_write: 6.25}
|
|
355
223
|
- model_name: haiku
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
cost:
|
|
360
|
-
input: 1.0
|
|
361
|
-
output: 5.0
|
|
362
|
-
cache_read: 0.1
|
|
363
|
-
cache_write: 1.25
|
|
364
|
-
|
|
224
|
+
model_id: claude-haiku-4-5-20251001
|
|
225
|
+
context_limit: 200000
|
|
226
|
+
cost: {input: 1, output: 5, cache_read: 0.1, cache_write: 1.25}
|
|
365
227
|
- provider_name: codex
|
|
366
228
|
protocol: codex_oauth
|
|
367
229
|
model_list:
|
|
368
230
|
- model_name: gpt-5.2-codex
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
cost:
|
|
376
|
-
input: 1.75
|
|
377
|
-
output: 14.0
|
|
378
|
-
cache_read: 0.17
|
|
231
|
+
model_id: gpt-5.2-codex
|
|
232
|
+
thinking:
|
|
233
|
+
reasoning_effort: medium
|
|
234
|
+
context_limit: 400000
|
|
235
|
+
max_tokens: 128000
|
|
236
|
+
cost: {input: 1.75, output: 14, cache_read: 0.17}
|
klaude_code/config/config.py
CHANGED
|
@@ -58,9 +58,10 @@ config_path = Path.home() / ".klaude" / "klaude-config.yaml"
|
|
|
58
58
|
example_config_path = Path.home() / ".klaude" / "klaude-config.example.yaml"
|
|
59
59
|
|
|
60
60
|
|
|
61
|
-
class ModelConfig(
|
|
61
|
+
class ModelConfig(llm_param.LLMConfigModelParameter):
|
|
62
|
+
"""Model configuration that flattens LLMConfigModelParameter fields."""
|
|
63
|
+
|
|
62
64
|
model_name: str
|
|
63
|
-
model_params: llm_param.LLMConfigModelParameter
|
|
64
65
|
|
|
65
66
|
|
|
66
67
|
class ProviderConfig(llm_param.LLMConfigProviderParameter):
|
|
@@ -135,10 +136,11 @@ class UserProviderConfig(BaseModel):
|
|
|
135
136
|
model_list: list[ModelConfig] = Field(default_factory=lambda: [])
|
|
136
137
|
|
|
137
138
|
|
|
138
|
-
class ModelEntry(
|
|
139
|
+
class ModelEntry(llm_param.LLMConfigModelParameter):
|
|
140
|
+
"""Model entry with provider info, flattens LLMConfigModelParameter fields."""
|
|
141
|
+
|
|
139
142
|
model_name: str
|
|
140
143
|
provider: str
|
|
141
|
-
model_params: llm_param.LLMConfigModelParameter
|
|
142
144
|
|
|
143
145
|
@property
|
|
144
146
|
def selector(self) -> str:
|
|
@@ -325,7 +327,7 @@ class Config(BaseModel):
|
|
|
325
327
|
provider_dump["api_key"] = api_key
|
|
326
328
|
return llm_param.LLMConfigParameter(
|
|
327
329
|
**provider_dump,
|
|
328
|
-
**model.
|
|
330
|
+
**model.model_dump(exclude={"model_name"}),
|
|
329
331
|
)
|
|
330
332
|
|
|
331
333
|
raise ValueError(f"Unknown model: {model_name}")
|
|
@@ -340,7 +342,7 @@ class Config(BaseModel):
|
|
|
340
342
|
ModelEntry(
|
|
341
343
|
model_name=model.model_name,
|
|
342
344
|
provider=provider.provider_name,
|
|
343
|
-
|
|
345
|
+
**model.model_dump(exclude={"model_name"}),
|
|
344
346
|
)
|
|
345
347
|
for provider in self.provider_list
|
|
346
348
|
if not only_available or not provider.is_api_key_missing()
|
|
@@ -350,7 +352,7 @@ class Config(BaseModel):
|
|
|
350
352
|
def has_available_image_model(self) -> bool:
|
|
351
353
|
"""Check if any image generation model is available."""
|
|
352
354
|
for entry in self.iter_model_entries(only_available=True):
|
|
353
|
-
if entry.
|
|
355
|
+
if entry.modalities and "image" in entry.modalities:
|
|
354
356
|
return True
|
|
355
357
|
return False
|
|
356
358
|
|
|
@@ -364,7 +366,7 @@ class Config(BaseModel):
|
|
|
364
366
|
def get_first_available_image_model(self) -> str | None:
|
|
365
367
|
"""Get the first available image generation model, or None."""
|
|
366
368
|
for entry in self.iter_model_entries(only_available=True):
|
|
367
|
-
if entry.
|
|
369
|
+
if entry.modalities and "image" in entry.modalities:
|
|
368
370
|
return entry.model_name
|
|
369
371
|
return None
|
|
370
372
|
|
|
@@ -409,15 +411,13 @@ def get_example_config() -> UserConfig:
|
|
|
409
411
|
model_list=[
|
|
410
412
|
ModelConfig(
|
|
411
413
|
model_name="my-model",
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
cache_read=0.1,
|
|
420
|
-
),
|
|
414
|
+
model_id="model-id-from-provider",
|
|
415
|
+
max_tokens=16000,
|
|
416
|
+
context_limit=200000,
|
|
417
|
+
cost=llm_param.Cost(
|
|
418
|
+
input=1,
|
|
419
|
+
output=10,
|
|
420
|
+
cache_read=0.1,
|
|
421
421
|
),
|
|
422
422
|
),
|
|
423
423
|
],
|