klaude-code 2.2.0__py3-none-any.whl → 2.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/app/runtime.py +2 -15
- klaude_code/cli/list_model.py +30 -13
- klaude_code/cli/main.py +26 -10
- klaude_code/config/assets/builtin_config.yaml +177 -310
- klaude_code/config/config.py +158 -21
- klaude_code/config/{select_model.py → model_matcher.py} +41 -16
- klaude_code/config/sub_agent_model_helper.py +217 -0
- klaude_code/config/thinking.py +2 -2
- klaude_code/const.py +1 -1
- klaude_code/core/agent_profile.py +43 -5
- klaude_code/core/executor.py +129 -47
- klaude_code/core/manager/llm_clients_builder.py +17 -11
- klaude_code/core/prompts/prompt-nano-banana.md +1 -1
- klaude_code/core/tool/file/diff_builder.py +25 -18
- klaude_code/core/tool/sub_agent_tool.py +2 -1
- klaude_code/llm/anthropic/client.py +12 -9
- klaude_code/llm/anthropic/input.py +54 -29
- klaude_code/llm/client.py +1 -1
- klaude_code/llm/codex/client.py +2 -2
- klaude_code/llm/google/client.py +7 -7
- klaude_code/llm/google/input.py +23 -2
- klaude_code/llm/input_common.py +2 -2
- klaude_code/llm/openai_compatible/client.py +3 -3
- klaude_code/llm/openai_compatible/input.py +22 -13
- klaude_code/llm/openai_compatible/stream.py +1 -1
- klaude_code/llm/openrouter/client.py +4 -4
- klaude_code/llm/openrouter/input.py +35 -25
- klaude_code/llm/responses/client.py +5 -5
- klaude_code/llm/responses/input.py +96 -57
- klaude_code/protocol/commands.py +1 -2
- klaude_code/protocol/events/__init__.py +7 -1
- klaude_code/protocol/events/chat.py +10 -0
- klaude_code/protocol/events/system.py +4 -0
- klaude_code/protocol/llm_param.py +1 -1
- klaude_code/protocol/model.py +0 -26
- klaude_code/protocol/op.py +17 -5
- klaude_code/protocol/op_handler.py +5 -0
- klaude_code/protocol/sub_agent/AGENTS.md +28 -0
- klaude_code/protocol/sub_agent/__init__.py +10 -14
- klaude_code/protocol/sub_agent/image_gen.py +2 -1
- klaude_code/session/codec.py +2 -6
- klaude_code/session/session.py +13 -3
- klaude_code/skill/assets/create-plan/SKILL.md +3 -5
- klaude_code/tui/command/__init__.py +3 -6
- klaude_code/tui/command/clear_cmd.py +0 -1
- klaude_code/tui/command/command_abc.py +6 -4
- klaude_code/tui/command/copy_cmd.py +10 -10
- klaude_code/tui/command/debug_cmd.py +11 -10
- klaude_code/tui/command/export_online_cmd.py +18 -23
- klaude_code/tui/command/fork_session_cmd.py +39 -43
- klaude_code/tui/command/model_cmd.py +10 -49
- klaude_code/tui/command/model_picker.py +142 -0
- klaude_code/tui/command/refresh_cmd.py +0 -1
- klaude_code/tui/command/registry.py +15 -21
- klaude_code/tui/command/resume_cmd.py +10 -16
- klaude_code/tui/command/status_cmd.py +8 -12
- klaude_code/tui/command/sub_agent_model_cmd.py +185 -0
- klaude_code/tui/command/terminal_setup_cmd.py +8 -11
- klaude_code/tui/command/thinking_cmd.py +4 -6
- klaude_code/tui/commands.py +5 -0
- klaude_code/tui/components/bash_syntax.py +1 -1
- klaude_code/tui/components/command_output.py +96 -0
- klaude_code/tui/components/common.py +1 -1
- klaude_code/tui/components/developer.py +3 -115
- klaude_code/tui/components/metadata.py +1 -63
- klaude_code/tui/components/rich/cjk_wrap.py +3 -2
- klaude_code/tui/components/rich/status.py +49 -3
- klaude_code/tui/components/rich/theme.py +2 -0
- klaude_code/tui/components/sub_agent.py +25 -46
- klaude_code/tui/components/welcome.py +99 -0
- klaude_code/tui/input/prompt_toolkit.py +19 -8
- klaude_code/tui/machine.py +5 -0
- klaude_code/tui/renderer.py +7 -8
- klaude_code/tui/runner.py +0 -6
- klaude_code/tui/terminal/selector.py +8 -6
- {klaude_code-2.2.0.dist-info → klaude_code-2.4.0.dist-info}/METADATA +21 -74
- {klaude_code-2.2.0.dist-info → klaude_code-2.4.0.dist-info}/RECORD +79 -76
- klaude_code/tui/command/help_cmd.py +0 -51
- klaude_code/tui/command/model_select.py +0 -84
- klaude_code/tui/command/release_notes_cmd.py +0 -85
- {klaude_code-2.2.0.dist-info → klaude_code-2.4.0.dist-info}/WHEEL +0 -0
- {klaude_code-2.2.0.dist-info → klaude_code-2.4.0.dist-info}/entry_points.txt +0 -0
klaude_code/config/config.py
CHANGED
|
@@ -58,9 +58,10 @@ config_path = Path.home() / ".klaude" / "klaude-config.yaml"
|
|
|
58
58
|
example_config_path = Path.home() / ".klaude" / "klaude-config.example.yaml"
|
|
59
59
|
|
|
60
60
|
|
|
61
|
-
class ModelConfig(
|
|
61
|
+
class ModelConfig(llm_param.LLMConfigModelParameter):
|
|
62
|
+
"""Model configuration that flattens LLMConfigModelParameter fields."""
|
|
63
|
+
|
|
62
64
|
model_name: str
|
|
63
|
-
model_params: llm_param.LLMConfigModelParameter
|
|
64
65
|
|
|
65
66
|
|
|
66
67
|
class ProviderConfig(llm_param.LLMConfigProviderParameter):
|
|
@@ -135,10 +136,21 @@ class UserProviderConfig(BaseModel):
|
|
|
135
136
|
model_list: list[ModelConfig] = Field(default_factory=lambda: [])
|
|
136
137
|
|
|
137
138
|
|
|
138
|
-
class ModelEntry(
|
|
139
|
+
class ModelEntry(llm_param.LLMConfigModelParameter):
|
|
140
|
+
"""Model entry with provider info, flattens LLMConfigModelParameter fields."""
|
|
141
|
+
|
|
139
142
|
model_name: str
|
|
140
143
|
provider: str
|
|
141
|
-
|
|
144
|
+
|
|
145
|
+
@property
|
|
146
|
+
def selector(self) -> str:
|
|
147
|
+
"""Return a provider-qualified model selector.
|
|
148
|
+
|
|
149
|
+
This selector can be persisted in user config (e.g. ``sonnet@openrouter``)
|
|
150
|
+
and later resolved via :meth:`Config.get_model_config`.
|
|
151
|
+
"""
|
|
152
|
+
|
|
153
|
+
return f"{self.model_name}@{self.provider}"
|
|
142
154
|
|
|
143
155
|
|
|
144
156
|
class UserConfig(BaseModel):
|
|
@@ -191,8 +203,103 @@ class Config(BaseModel):
|
|
|
191
203
|
"""Set the user config reference for saving."""
|
|
192
204
|
object.__setattr__(self, "_user_config", user_config)
|
|
193
205
|
|
|
206
|
+
@classmethod
|
|
207
|
+
def _split_model_selector(cls, model_selector: str) -> tuple[str, str | None]:
|
|
208
|
+
"""Split a model selector into (model_name, provider_name).
|
|
209
|
+
|
|
210
|
+
Supported forms:
|
|
211
|
+
- ``sonnet``: unqualified; caller should pick the first matching provider.
|
|
212
|
+
- ``sonnet@openrouter``: provider-qualified.
|
|
213
|
+
|
|
214
|
+
Note: the provider segment is normalized for backwards compatibility.
|
|
215
|
+
"""
|
|
216
|
+
|
|
217
|
+
trimmed = model_selector.strip()
|
|
218
|
+
if "@" not in trimmed:
|
|
219
|
+
return trimmed, None
|
|
220
|
+
|
|
221
|
+
base, provider = trimmed.rsplit("@", 1)
|
|
222
|
+
base = base.strip()
|
|
223
|
+
provider = provider.strip()
|
|
224
|
+
if not base or not provider:
|
|
225
|
+
raise ValueError(f"Invalid model selector: {model_selector!r}")
|
|
226
|
+
return base, provider
|
|
227
|
+
|
|
228
|
+
def has_model_config_name(self, model_selector: str) -> bool:
|
|
229
|
+
"""Return True if the selector points to a configured model.
|
|
230
|
+
|
|
231
|
+
This check is configuration-only: it does not require a valid API key or
|
|
232
|
+
OAuth login.
|
|
233
|
+
"""
|
|
234
|
+
|
|
235
|
+
model_name, provider_name = self._split_model_selector(model_selector)
|
|
236
|
+
if provider_name is not None:
|
|
237
|
+
for provider in self.provider_list:
|
|
238
|
+
if provider.provider_name.casefold() != provider_name.casefold():
|
|
239
|
+
continue
|
|
240
|
+
return any(m.model_name == model_name for m in provider.model_list)
|
|
241
|
+
return False
|
|
242
|
+
|
|
243
|
+
return any(any(m.model_name == model_name for m in provider.model_list) for provider in self.provider_list)
|
|
244
|
+
|
|
245
|
+
def resolve_model_location(self, model_selector: str) -> tuple[str, str] | None:
|
|
246
|
+
"""Resolve a selector to (model_name, provider_name), without auth checks.
|
|
247
|
+
|
|
248
|
+
- If the selector is provider-qualified, returns that provider.
|
|
249
|
+
- If unqualified, returns the first provider that defines the model.
|
|
250
|
+
"""
|
|
251
|
+
|
|
252
|
+
model_name, provider_name = self._split_model_selector(model_selector)
|
|
253
|
+
if provider_name is not None:
|
|
254
|
+
for provider in self.provider_list:
|
|
255
|
+
if provider.provider_name.casefold() != provider_name.casefold():
|
|
256
|
+
continue
|
|
257
|
+
if any(m.model_name == model_name for m in provider.model_list):
|
|
258
|
+
return model_name, provider.provider_name
|
|
259
|
+
return None
|
|
260
|
+
|
|
261
|
+
for provider in self.provider_list:
|
|
262
|
+
if any(m.model_name == model_name for m in provider.model_list):
|
|
263
|
+
return model_name, provider.provider_name
|
|
264
|
+
return None
|
|
265
|
+
|
|
266
|
+
def resolve_model_location_prefer_available(self, model_selector: str) -> tuple[str, str] | None:
|
|
267
|
+
"""Resolve a selector to (model_name, provider_name), preferring usable providers.
|
|
268
|
+
|
|
269
|
+
This uses the same availability logic as :meth:`get_model_config` (API-key
|
|
270
|
+
presence for non-OAuth protocols).
|
|
271
|
+
"""
|
|
272
|
+
|
|
273
|
+
requested_model, requested_provider = self._split_model_selector(model_selector)
|
|
274
|
+
|
|
275
|
+
for provider in self.provider_list:
|
|
276
|
+
if requested_provider is not None and provider.provider_name.casefold() != requested_provider.casefold():
|
|
277
|
+
continue
|
|
278
|
+
|
|
279
|
+
api_key = provider.get_resolved_api_key()
|
|
280
|
+
if (
|
|
281
|
+
provider.protocol
|
|
282
|
+
not in {
|
|
283
|
+
llm_param.LLMClientProtocol.CODEX_OAUTH,
|
|
284
|
+
llm_param.LLMClientProtocol.CLAUDE_OAUTH,
|
|
285
|
+
llm_param.LLMClientProtocol.BEDROCK,
|
|
286
|
+
}
|
|
287
|
+
and not api_key
|
|
288
|
+
):
|
|
289
|
+
continue
|
|
290
|
+
|
|
291
|
+
if any(m.model_name == requested_model for m in provider.model_list):
|
|
292
|
+
return requested_model, provider.provider_name
|
|
293
|
+
|
|
294
|
+
return None
|
|
295
|
+
|
|
194
296
|
def get_model_config(self, model_name: str) -> llm_param.LLMConfigParameter:
|
|
297
|
+
requested_model, requested_provider = self._split_model_selector(model_name)
|
|
298
|
+
|
|
195
299
|
for provider in self.provider_list:
|
|
300
|
+
if requested_provider is not None and provider.provider_name.casefold() != requested_provider.casefold():
|
|
301
|
+
continue
|
|
302
|
+
|
|
196
303
|
# Resolve ${ENV_VAR} syntax for api_key
|
|
197
304
|
api_key = provider.get_resolved_api_key()
|
|
198
305
|
|
|
@@ -206,15 +313,22 @@ class Config(BaseModel):
|
|
|
206
313
|
}
|
|
207
314
|
and not api_key
|
|
208
315
|
):
|
|
316
|
+
# When provider is explicitly requested, fail fast with a clearer error.
|
|
317
|
+
if requested_provider is not None:
|
|
318
|
+
raise ValueError(
|
|
319
|
+
f"Provider '{provider.provider_name}' is not available (missing API key) for: {model_name}"
|
|
320
|
+
)
|
|
209
321
|
continue
|
|
322
|
+
|
|
210
323
|
for model in provider.model_list:
|
|
211
|
-
if model.model_name
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
)
|
|
324
|
+
if model.model_name != requested_model:
|
|
325
|
+
continue
|
|
326
|
+
provider_dump = provider.model_dump(exclude={"model_list"})
|
|
327
|
+
provider_dump["api_key"] = api_key
|
|
328
|
+
return llm_param.LLMConfigParameter(
|
|
329
|
+
**provider_dump,
|
|
330
|
+
**model.model_dump(exclude={"model_name"}),
|
|
331
|
+
)
|
|
218
332
|
|
|
219
333
|
raise ValueError(f"Unknown model: {model_name}")
|
|
220
334
|
|
|
@@ -228,13 +342,34 @@ class Config(BaseModel):
|
|
|
228
342
|
ModelEntry(
|
|
229
343
|
model_name=model.model_name,
|
|
230
344
|
provider=provider.provider_name,
|
|
231
|
-
|
|
345
|
+
**model.model_dump(exclude={"model_name"}),
|
|
232
346
|
)
|
|
233
347
|
for provider in self.provider_list
|
|
234
348
|
if not only_available or not provider.is_api_key_missing()
|
|
235
349
|
for model in provider.model_list
|
|
236
350
|
]
|
|
237
351
|
|
|
352
|
+
def has_available_image_model(self) -> bool:
|
|
353
|
+
"""Check if any image generation model is available."""
|
|
354
|
+
for entry in self.iter_model_entries(only_available=True):
|
|
355
|
+
if entry.modalities and "image" in entry.modalities:
|
|
356
|
+
return True
|
|
357
|
+
return False
|
|
358
|
+
|
|
359
|
+
def get_first_available_nano_banana_model(self) -> str | None:
|
|
360
|
+
"""Get the first available nano-banana model, or None."""
|
|
361
|
+
for entry in self.iter_model_entries(only_available=True):
|
|
362
|
+
if "nano-banana" in entry.model_name:
|
|
363
|
+
return entry.model_name
|
|
364
|
+
return None
|
|
365
|
+
|
|
366
|
+
def get_first_available_image_model(self) -> str | None:
|
|
367
|
+
"""Get the first available image generation model, or None."""
|
|
368
|
+
for entry in self.iter_model_entries(only_available=True):
|
|
369
|
+
if entry.modalities and "image" in entry.modalities:
|
|
370
|
+
return entry.model_name
|
|
371
|
+
return None
|
|
372
|
+
|
|
238
373
|
async def save(self) -> None:
|
|
239
374
|
"""Save user config to file (excludes builtin providers).
|
|
240
375
|
|
|
@@ -276,15 +411,13 @@ def get_example_config() -> UserConfig:
|
|
|
276
411
|
model_list=[
|
|
277
412
|
ModelConfig(
|
|
278
413
|
model_name="my-model",
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
cache_read=0.1,
|
|
287
|
-
),
|
|
414
|
+
model_id="model-id-from-provider",
|
|
415
|
+
max_tokens=16000,
|
|
416
|
+
context_limit=200000,
|
|
417
|
+
cost=llm_param.Cost(
|
|
418
|
+
input=1,
|
|
419
|
+
output=10,
|
|
420
|
+
cache_read=0.1,
|
|
288
421
|
),
|
|
289
422
|
),
|
|
290
423
|
],
|
|
@@ -418,6 +551,10 @@ def create_example_config() -> bool:
|
|
|
418
551
|
header = "# Example configuration for klaude-code\n"
|
|
419
552
|
header += "# Copy this file to klaude-config.yaml and modify as needed.\n"
|
|
420
553
|
header += "# Run `klaude list` to see available models.\n"
|
|
554
|
+
header += "# Tip: you can pick a provider explicitly with `model@provider` (e.g. `sonnet@openrouter`).\n"
|
|
555
|
+
header += (
|
|
556
|
+
"# If you omit `@provider` (e.g. `sonnet`), klaude picks the first configured provider with credentials.\n"
|
|
557
|
+
)
|
|
421
558
|
header += "#\n"
|
|
422
559
|
header += "# Built-in providers (anthropic, openai, openrouter, deepseek) are available automatically.\n"
|
|
423
560
|
header += "# Just set the corresponding API key environment variable to use them.\n\n"
|
|
@@ -50,7 +50,8 @@ def match_model_from_config(preferred: str | None = None) -> ModelMatchResult:
|
|
|
50
50
|
|
|
51
51
|
# Only show models from providers with valid API keys
|
|
52
52
|
models: list[ModelEntry] = sorted(
|
|
53
|
-
config.iter_model_entries(only_available=True),
|
|
53
|
+
config.iter_model_entries(only_available=True),
|
|
54
|
+
key=lambda m: (m.provider.lower(), m.model_name.lower()),
|
|
54
55
|
)
|
|
55
56
|
|
|
56
57
|
if not models:
|
|
@@ -62,26 +63,42 @@ def match_model_from_config(preferred: str | None = None) -> ModelMatchResult:
|
|
|
62
63
|
error_message="No models available",
|
|
63
64
|
)
|
|
64
65
|
|
|
65
|
-
|
|
66
|
+
selectors: list[str] = [m.selector for m in models]
|
|
66
67
|
|
|
67
68
|
# Try to match preferred model name
|
|
68
69
|
filter_hint = preferred
|
|
69
70
|
if preferred and preferred.strip():
|
|
70
71
|
preferred = preferred.strip()
|
|
71
|
-
|
|
72
|
-
|
|
72
|
+
|
|
73
|
+
# Exact match on selector (e.g. sonnet@openrouter)
|
|
74
|
+
if preferred in selectors:
|
|
73
75
|
return ModelMatchResult(matched_model=preferred, filtered_models=models, filter_hint=None)
|
|
74
76
|
|
|
77
|
+
# Exact match on base model name (e.g. sonnet)
|
|
78
|
+
exact_base_matches = [m for m in models if m.model_name == preferred]
|
|
79
|
+
if len(exact_base_matches) == 1:
|
|
80
|
+
return ModelMatchResult(
|
|
81
|
+
matched_model=exact_base_matches[0].selector,
|
|
82
|
+
filtered_models=models,
|
|
83
|
+
filter_hint=None,
|
|
84
|
+
)
|
|
85
|
+
if len(exact_base_matches) > 1:
|
|
86
|
+
return ModelMatchResult(matched_model=None, filtered_models=exact_base_matches, filter_hint=filter_hint)
|
|
87
|
+
|
|
75
88
|
preferred_lower = preferred.lower()
|
|
76
|
-
# Case-insensitive exact match (model_name
|
|
89
|
+
# Case-insensitive exact match (selector/model_name/model_id)
|
|
77
90
|
exact_ci_matches = [
|
|
78
91
|
m
|
|
79
92
|
for m in models
|
|
80
|
-
if preferred_lower == m.
|
|
93
|
+
if preferred_lower == m.selector.lower()
|
|
94
|
+
or preferred_lower == m.model_name.lower()
|
|
95
|
+
or preferred_lower == (m.model_id or "").lower()
|
|
81
96
|
]
|
|
82
97
|
if len(exact_ci_matches) == 1:
|
|
83
98
|
return ModelMatchResult(
|
|
84
|
-
matched_model=exact_ci_matches[0].
|
|
99
|
+
matched_model=exact_ci_matches[0].selector,
|
|
100
|
+
filtered_models=models,
|
|
101
|
+
filter_hint=None,
|
|
85
102
|
)
|
|
86
103
|
|
|
87
104
|
# Normalized matching (e.g. gpt52 == gpt-5.2, gpt52 in gpt-5.2-2025-...)
|
|
@@ -91,35 +108,43 @@ def match_model_from_config(preferred: str | None = None) -> ModelMatchResult:
|
|
|
91
108
|
normalized_matches = [
|
|
92
109
|
m
|
|
93
110
|
for m in models
|
|
94
|
-
if preferred_norm == _normalize_model_key(m.
|
|
95
|
-
or preferred_norm == _normalize_model_key(m.
|
|
111
|
+
if preferred_norm == _normalize_model_key(m.selector)
|
|
112
|
+
or preferred_norm == _normalize_model_key(m.model_name)
|
|
113
|
+
or preferred_norm == _normalize_model_key(m.model_id or "")
|
|
96
114
|
]
|
|
97
115
|
if len(normalized_matches) == 1:
|
|
98
116
|
return ModelMatchResult(
|
|
99
|
-
matched_model=normalized_matches[0].
|
|
117
|
+
matched_model=normalized_matches[0].selector,
|
|
118
|
+
filtered_models=models,
|
|
119
|
+
filter_hint=None,
|
|
100
120
|
)
|
|
101
121
|
|
|
102
122
|
if not normalized_matches and len(preferred_norm) >= 4:
|
|
103
123
|
normalized_matches = [
|
|
104
124
|
m
|
|
105
125
|
for m in models
|
|
106
|
-
if preferred_norm in _normalize_model_key(m.
|
|
107
|
-
or preferred_norm in _normalize_model_key(m.
|
|
126
|
+
if preferred_norm in _normalize_model_key(m.selector)
|
|
127
|
+
or preferred_norm in _normalize_model_key(m.model_name)
|
|
128
|
+
or preferred_norm in _normalize_model_key(m.model_id or "")
|
|
108
129
|
]
|
|
109
130
|
if len(normalized_matches) == 1:
|
|
110
131
|
return ModelMatchResult(
|
|
111
|
-
matched_model=normalized_matches[0].
|
|
132
|
+
matched_model=normalized_matches[0].selector,
|
|
133
|
+
filtered_models=models,
|
|
134
|
+
filter_hint=None,
|
|
112
135
|
)
|
|
113
136
|
|
|
114
|
-
# Partial match (case-insensitive) on model_name or
|
|
137
|
+
# Partial match (case-insensitive) on model_name or model_id.
|
|
115
138
|
# If normalized matching found candidates (even if multiple), prefer those as the filter set.
|
|
116
139
|
matches = normalized_matches or [
|
|
117
140
|
m
|
|
118
141
|
for m in models
|
|
119
|
-
if preferred_lower in m.
|
|
142
|
+
if preferred_lower in m.selector.lower()
|
|
143
|
+
or preferred_lower in m.model_name.lower()
|
|
144
|
+
or preferred_lower in (m.model_id or "").lower()
|
|
120
145
|
]
|
|
121
146
|
if len(matches) == 1:
|
|
122
|
-
return ModelMatchResult(matched_model=matches[0].
|
|
147
|
+
return ModelMatchResult(matched_model=matches[0].selector, filtered_models=models, filter_hint=None)
|
|
123
148
|
if matches:
|
|
124
149
|
# Multiple matches: filter the list for interactive selection
|
|
125
150
|
return ModelMatchResult(matched_model=None, filtered_models=matches, filter_hint=filter_hint)
|
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
"""Helper for sub-agent model availability and selection logic."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
7
|
+
|
|
8
|
+
from klaude_code.protocol.sub_agent import (
|
|
9
|
+
AVAILABILITY_IMAGE_MODEL,
|
|
10
|
+
SubAgentProfile,
|
|
11
|
+
get_sub_agent_profile,
|
|
12
|
+
get_sub_agent_profile_by_tool,
|
|
13
|
+
iter_sub_agent_profiles,
|
|
14
|
+
sub_agent_tool_names,
|
|
15
|
+
)
|
|
16
|
+
from klaude_code.protocol.tools import SubAgentType
|
|
17
|
+
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from klaude_code.config.config import Config, ModelEntry
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class SubAgentModelInfo:
|
|
24
|
+
"""Sub-agent and its current model configuration."""
|
|
25
|
+
|
|
26
|
+
profile: SubAgentProfile
|
|
27
|
+
# Explicitly configured model selector (from config), if any.
|
|
28
|
+
configured_model: str | None
|
|
29
|
+
|
|
30
|
+
# Effective model name used by this sub-agent.
|
|
31
|
+
# - When configured_model is set: equals configured_model.
|
|
32
|
+
# - When requirement-based default applies (e.g. ImageGen): resolved model.
|
|
33
|
+
# - When inheriting from main agent: None.
|
|
34
|
+
effective_model: str | None
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass(frozen=True, slots=True)
|
|
38
|
+
class EmptySubAgentModelBehavior:
|
|
39
|
+
"""Human-facing description for an unset (empty) sub-agent model config."""
|
|
40
|
+
|
|
41
|
+
# Summary text for UI (kept UI-framework agnostic).
|
|
42
|
+
description: str
|
|
43
|
+
|
|
44
|
+
# Best-effort resolved model name (if any). For ImageGen this is usually the
|
|
45
|
+
# first available image model; for other sub-agents it's the main model.
|
|
46
|
+
resolved_model_name: str | None
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class SubAgentModelHelper:
|
|
50
|
+
"""Centralized logic for sub-agent availability and model selection."""
|
|
51
|
+
|
|
52
|
+
def __init__(self, config: Config) -> None:
|
|
53
|
+
self._config = config
|
|
54
|
+
|
|
55
|
+
def check_availability_requirement(self, requirement: str | None) -> bool:
|
|
56
|
+
"""Check if a sub-agent's availability requirement is met.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
requirement: The availability requirement constant (e.g., AVAILABILITY_IMAGE_MODEL).
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
True if the requirement is met or if there's no requirement.
|
|
63
|
+
"""
|
|
64
|
+
if requirement is None:
|
|
65
|
+
return True
|
|
66
|
+
|
|
67
|
+
if requirement == AVAILABILITY_IMAGE_MODEL:
|
|
68
|
+
return self._config.has_available_image_model()
|
|
69
|
+
|
|
70
|
+
return True
|
|
71
|
+
|
|
72
|
+
def resolve_model_for_requirement(self, requirement: str | None) -> str | None:
|
|
73
|
+
"""Resolve the model name for a given availability requirement.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
requirement: The availability requirement constant.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
The model name if found, None otherwise.
|
|
80
|
+
"""
|
|
81
|
+
if requirement == AVAILABILITY_IMAGE_MODEL:
|
|
82
|
+
return self._config.get_first_available_image_model()
|
|
83
|
+
return None
|
|
84
|
+
|
|
85
|
+
def resolve_default_model_override(self, sub_agent_type: str) -> str | None:
|
|
86
|
+
"""Resolve the default model override for a sub-agent when unset.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
- None for sub-agents that default to inheriting the main agent.
|
|
90
|
+
- A model name for sub-agents that require a dedicated model (e.g. ImageGen).
|
|
91
|
+
|
|
92
|
+
Note: This intentionally ignores any explicit user config; callers use this
|
|
93
|
+
when they want the *unset* behavior.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
profile = get_sub_agent_profile(sub_agent_type)
|
|
97
|
+
if profile.availability_requirement is None:
|
|
98
|
+
return None
|
|
99
|
+
return self.resolve_model_for_requirement(profile.availability_requirement)
|
|
100
|
+
|
|
101
|
+
def describe_empty_model_config_behavior(
|
|
102
|
+
self,
|
|
103
|
+
sub_agent_type: str,
|
|
104
|
+
*,
|
|
105
|
+
main_model_name: str,
|
|
106
|
+
) -> EmptySubAgentModelBehavior:
|
|
107
|
+
"""Describe what happens when a sub-agent model is not configured.
|
|
108
|
+
|
|
109
|
+
Most sub-agents default to inheriting the main model.
|
|
110
|
+
|
|
111
|
+
Sub-agents with an availability requirement (e.g. ImageGen) do NOT
|
|
112
|
+
inherit from the main model; instead they auto-resolve a suitable model
|
|
113
|
+
(currently: the first available image model).
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
profile = get_sub_agent_profile(sub_agent_type)
|
|
117
|
+
|
|
118
|
+
requirement = profile.availability_requirement
|
|
119
|
+
if requirement is None:
|
|
120
|
+
return EmptySubAgentModelBehavior(
|
|
121
|
+
description=f"inherit from main agent: {main_model_name}",
|
|
122
|
+
resolved_model_name=main_model_name,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
resolved = self.resolve_model_for_requirement(requirement)
|
|
126
|
+
if requirement == AVAILABILITY_IMAGE_MODEL:
|
|
127
|
+
if resolved:
|
|
128
|
+
return EmptySubAgentModelBehavior(
|
|
129
|
+
description=f"auto-select first available image model: {resolved}",
|
|
130
|
+
resolved_model_name=resolved,
|
|
131
|
+
)
|
|
132
|
+
return EmptySubAgentModelBehavior(
|
|
133
|
+
description="auto-select first available image model",
|
|
134
|
+
resolved_model_name=None,
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
if resolved:
|
|
138
|
+
return EmptySubAgentModelBehavior(
|
|
139
|
+
description=f"auto-select model for requirement '{requirement}': {resolved}",
|
|
140
|
+
resolved_model_name=resolved,
|
|
141
|
+
)
|
|
142
|
+
return EmptySubAgentModelBehavior(
|
|
143
|
+
description=f"auto-select model for requirement '{requirement}'",
|
|
144
|
+
resolved_model_name=None,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
def get_available_sub_agents(self) -> list[SubAgentModelInfo]:
|
|
148
|
+
"""Return all available sub-agents with their current model config.
|
|
149
|
+
|
|
150
|
+
Only returns sub-agents that:
|
|
151
|
+
1. Are enabled by default
|
|
152
|
+
2. Have their availability requirements met
|
|
153
|
+
|
|
154
|
+
For sub-agents without explicit config, resolves model based on availability_requirement.
|
|
155
|
+
"""
|
|
156
|
+
result: list[SubAgentModelInfo] = []
|
|
157
|
+
for profile in iter_sub_agent_profiles(enabled_only=True):
|
|
158
|
+
if not self.check_availability_requirement(profile.availability_requirement):
|
|
159
|
+
continue
|
|
160
|
+
configured_model = self._config.sub_agent_models.get(profile.name)
|
|
161
|
+
effective_model = configured_model
|
|
162
|
+
if not effective_model and profile.availability_requirement:
|
|
163
|
+
effective_model = self.resolve_model_for_requirement(profile.availability_requirement)
|
|
164
|
+
result.append(
|
|
165
|
+
SubAgentModelInfo(
|
|
166
|
+
profile=profile,
|
|
167
|
+
configured_model=configured_model,
|
|
168
|
+
effective_model=effective_model,
|
|
169
|
+
)
|
|
170
|
+
)
|
|
171
|
+
return result
|
|
172
|
+
|
|
173
|
+
def get_selectable_models(self, sub_agent_type: str) -> list[ModelEntry]:
|
|
174
|
+
"""Return selectable models for a specific sub-agent type.
|
|
175
|
+
|
|
176
|
+
For sub-agents with availability_requirement (e.g., ImageGen):
|
|
177
|
+
- Only returns models matching the requirement (e.g., image models)
|
|
178
|
+
|
|
179
|
+
For other sub-agents:
|
|
180
|
+
- Returns all available models
|
|
181
|
+
"""
|
|
182
|
+
profile = get_sub_agent_profile(sub_agent_type)
|
|
183
|
+
all_models = self._config.iter_model_entries(only_available=True)
|
|
184
|
+
|
|
185
|
+
if profile.availability_requirement == AVAILABILITY_IMAGE_MODEL:
|
|
186
|
+
return [m for m in all_models if m.modalities and "image" in m.modalities]
|
|
187
|
+
|
|
188
|
+
return all_models
|
|
189
|
+
|
|
190
|
+
def get_enabled_sub_agent_tool_names(self) -> list[str]:
|
|
191
|
+
"""Return sub-agent tool names that should be added to main agent's tool list."""
|
|
192
|
+
result: list[str] = []
|
|
193
|
+
for name in sub_agent_tool_names(enabled_only=True):
|
|
194
|
+
profile = get_sub_agent_profile_by_tool(name)
|
|
195
|
+
if profile is not None and self.check_availability_requirement(profile.availability_requirement):
|
|
196
|
+
result.append(name)
|
|
197
|
+
return result
|
|
198
|
+
|
|
199
|
+
def get_enabled_sub_agent_types(self) -> set[SubAgentType]:
|
|
200
|
+
"""Return set of sub-agent types that are enabled and available."""
|
|
201
|
+
enabled: set[SubAgentType] = set()
|
|
202
|
+
for name in sub_agent_tool_names(enabled_only=True):
|
|
203
|
+
profile = get_sub_agent_profile_by_tool(name)
|
|
204
|
+
if profile is not None and self.check_availability_requirement(profile.availability_requirement):
|
|
205
|
+
enabled.add(profile.name)
|
|
206
|
+
return enabled
|
|
207
|
+
|
|
208
|
+
def build_sub_agent_client_configs(self) -> dict[SubAgentType, str]:
|
|
209
|
+
"""Return model names for each sub-agent that needs a dedicated client."""
|
|
210
|
+
result: dict[SubAgentType, str] = {}
|
|
211
|
+
for profile in iter_sub_agent_profiles():
|
|
212
|
+
model_name = self._config.sub_agent_models.get(profile.name)
|
|
213
|
+
if not model_name and profile.availability_requirement:
|
|
214
|
+
model_name = self.resolve_model_for_requirement(profile.availability_requirement)
|
|
215
|
+
if model_name:
|
|
216
|
+
result[profile.name] = model_name
|
|
217
|
+
return result
|
klaude_code/config/thinking.py
CHANGED
|
@@ -104,7 +104,7 @@ def format_current_thinking(config: llm_param.LLMConfigParameter) -> str:
|
|
|
104
104
|
return "not set"
|
|
105
105
|
|
|
106
106
|
if protocol == llm_param.LLMClientProtocol.OPENROUTER:
|
|
107
|
-
if is_openrouter_model_with_reasoning_effort(config.
|
|
107
|
+
if is_openrouter_model_with_reasoning_effort(config.model_id):
|
|
108
108
|
if thinking.reasoning_effort:
|
|
109
109
|
return f"reasoning_effort={thinking.reasoning_effort}"
|
|
110
110
|
else:
|
|
@@ -198,7 +198,7 @@ def get_thinking_picker_data(config: llm_param.LLMConfigParameter) -> ThinkingPi
|
|
|
198
198
|
ThinkingPickerData with options and current value, or None if protocol doesn't support thinking.
|
|
199
199
|
"""
|
|
200
200
|
protocol = config.protocol
|
|
201
|
-
model_name = config.
|
|
201
|
+
model_name = config.model_id
|
|
202
202
|
thinking = config.thinking
|
|
203
203
|
|
|
204
204
|
if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX_OAUTH):
|
klaude_code/const.py
CHANGED
|
@@ -123,7 +123,7 @@ TAB_EXPAND_WIDTH = 8 # Tab expansion width for text rendering
|
|
|
123
123
|
DIFF_PREFIX_WIDTH = 4 # Width of line number prefix in diff display
|
|
124
124
|
MAX_DIFF_LINES = 500 # Maximum lines to show in diff output
|
|
125
125
|
INVALID_TOOL_CALL_MAX_LENGTH = 200 # Maximum length for invalid tool call display
|
|
126
|
-
TRUNCATE_DISPLAY_MAX_LINE_LENGTH =
|
|
126
|
+
TRUNCATE_DISPLAY_MAX_LINE_LENGTH = 500 # Maximum line length for truncated display
|
|
127
127
|
TRUNCATE_DISPLAY_MAX_LINES = 4 # Maximum lines for truncated display
|
|
128
128
|
MIN_HIDDEN_LINES_FOR_INDICATOR = 5 # Minimum hidden lines before showing truncation indicator
|
|
129
129
|
SUB_AGENT_RESULT_MAX_LINES = 10 # Maximum lines for sub-agent result display
|