klaude-code 1.4.3__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,171 +1,37 @@
1
1
  import asyncio
2
- from typing import Literal, cast
3
2
 
4
3
  from prompt_toolkit.styles import Style
5
4
 
6
5
  from klaude_code.command.command_abc import Agent, CommandABC, CommandResult
7
- from klaude_code.protocol import commands, events, llm_param, model
6
+ from klaude_code.config.thinking import get_thinking_picker_data, parse_thinking_value
7
+ from klaude_code.protocol import commands, events, llm_param, model, op
8
8
  from klaude_code.ui.terminal.selector import SelectItem, select_one
9
9
 
10
- ReasoningEffort = Literal["high", "medium", "low", "minimal", "none", "xhigh"]
11
-
12
- # Thinking level options for different protocols
13
- RESPONSES_LEVELS = ["low", "medium", "high"]
14
- RESPONSES_GPT51_LEVELS = ["none", "low", "medium", "high"]
15
- RESPONSES_GPT52_LEVELS = ["none", "low", "medium", "high", "xhigh"]
16
- RESPONSES_CODEX_MAX_LEVELS = ["medium", "high", "xhigh"]
17
- RESPONSES_GEMINI_FLASH_LEVELS = ["minimal", "low", "medium", "high"]
18
-
19
- ANTHROPIC_LEVELS: list[tuple[str, int | None]] = [
20
- ("off", 0),
21
- ("low (2048 tokens)", 2048),
22
- ("medium (8192 tokens)", 8192),
23
- ("high (31999 tokens)", 31999),
24
- ]
25
-
26
-
27
- def _is_openrouter_model_with_reasoning_effort(model_name: str | None) -> bool:
28
- """Check if the model is GPT series, Grok or Gemini 3."""
29
- if not model_name:
30
- return False
31
- model_lower = model_name.lower()
32
- return model_lower.startswith(("openai/gpt-", "x-ai/grok-", "google/gemini-3"))
33
-
34
-
35
- def _is_gpt51_model(model_name: str | None) -> bool:
36
- """Check if the model is GPT-5.1."""
37
- if not model_name:
38
- return False
39
- return model_name.lower() in ["gpt-5.1", "openai/gpt-5.1", "gpt-5.1-codex-2025-11-13"]
40
-
41
-
42
- def _is_gpt52_model(model_name: str | None) -> bool:
43
- """Check if the model is GPT-5.2."""
44
- if not model_name:
45
- return False
46
- return model_name.lower() in ["gpt-5.2", "openai/gpt-5.2"]
47
-
48
-
49
- def _is_codex_max_model(model_name: str | None) -> bool:
50
- """Check if the model is GPT-5.1-codex-max."""
51
- if not model_name:
52
- return False
53
- return "codex-max" in model_name.lower()
54
-
55
-
56
- def _is_gemini_flash_model(model_name: str | None) -> bool:
57
- """Check if the model is Gemini 3 Flash."""
58
- if not model_name:
59
- return False
60
- return "gemini-3-flash" in model_name.lower()
61
-
62
-
63
- def should_auto_trigger_thinking(model_name: str | None) -> bool:
64
- """Check if model should auto-trigger thinking selection on switch."""
65
- if not model_name:
66
- return False
67
- model_lower = model_name.lower()
68
- return "gpt-5" in model_lower or "gemini-3" in model_lower or "opus" in model_lower
69
-
70
-
71
- def _get_levels_for_responses(model_name: str | None) -> list[str]:
72
- """Get thinking levels for responses protocol."""
73
- if _is_codex_max_model(model_name):
74
- return RESPONSES_CODEX_MAX_LEVELS
75
- if _is_gpt52_model(model_name):
76
- return RESPONSES_GPT52_LEVELS
77
- if _is_gpt51_model(model_name):
78
- return RESPONSES_GPT51_LEVELS
79
- if _is_gemini_flash_model(model_name):
80
- return RESPONSES_GEMINI_FLASH_LEVELS
81
- return RESPONSES_LEVELS
82
-
83
-
84
- def format_current_thinking(config: llm_param.LLMConfigParameter) -> str:
85
- """Format the current thinking configuration for display."""
86
- thinking = config.thinking
87
- if not thinking:
88
- return "not configured"
89
-
90
- protocol = config.protocol
91
-
92
- if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
93
- if thinking.reasoning_effort:
94
- return f"reasoning_effort={thinking.reasoning_effort}"
95
- return "not set"
96
-
97
- if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
98
- if thinking.type == "disabled":
99
- return "off"
100
- if thinking.type == "enabled":
101
- return f"enabled (budget_tokens={thinking.budget_tokens})"
102
- return "not set"
103
-
104
- if protocol == llm_param.LLMClientProtocol.OPENROUTER:
105
- if _is_openrouter_model_with_reasoning_effort(config.model):
106
- if thinking.reasoning_effort:
107
- return f"reasoning_effort={thinking.reasoning_effort}"
108
- else:
109
- if thinking.type == "disabled":
110
- return "off"
111
- if thinking.type == "enabled":
112
- return f"enabled (budget_tokens={thinking.budget_tokens})"
113
- return "not set"
114
-
115
- if protocol == llm_param.LLMClientProtocol.OPENAI:
116
- if thinking.type == "disabled":
117
- return "off"
118
- if thinking.type == "enabled":
119
- return f"enabled (budget_tokens={thinking.budget_tokens})"
120
- return "not set"
121
-
122
- return "unknown protocol"
123
-
124
-
125
10
  SELECT_STYLE = Style(
126
11
  [
127
12
  ("instruction", "ansibrightblack"),
128
13
  ("pointer", "ansigreen"),
129
14
  ("highlighted", "ansigreen"),
130
15
  ("text", "ansibrightblack"),
131
- ("question", ""),
16
+ ("question", "bold"),
132
17
  ]
133
18
  )
134
19
 
135
20
 
136
- def _select_responses_thinking_sync(model_name: str | None) -> llm_param.Thinking | None:
137
- """Select thinking level for responses/codex protocol (sync version)."""
138
- levels = _get_levels_for_responses(model_name)
139
- items: list[SelectItem[str]] = [
140
- SelectItem(title=[("class:text", level + "\n")], value=level, search_text=level) for level in levels
141
- ]
142
-
143
- try:
144
- result = select_one(
145
- message="Select reasoning effort:",
146
- items=items,
147
- pointer="→",
148
- style=SELECT_STYLE,
149
- use_search_filter=False,
150
- )
151
-
152
- if result is None:
153
- return None
154
- return llm_param.Thinking(reasoning_effort=cast(ReasoningEffort, result))
155
- except KeyboardInterrupt:
21
+ def _select_thinking_sync(config: llm_param.LLMConfigParameter) -> llm_param.Thinking | None:
22
+ """Select thinking level (sync version)."""
23
+ data = get_thinking_picker_data(config)
24
+ if data is None:
156
25
  return None
157
26
 
158
-
159
- def _select_anthropic_thinking_sync() -> llm_param.Thinking | None:
160
- """Select thinking level for anthropic/openai_compatible protocol (sync version)."""
161
- items: list[SelectItem[int]] = [
162
- SelectItem(title=[("class:text", label + "\n")], value=tokens or 0, search_text=label)
163
- for label, tokens in ANTHROPIC_LEVELS
27
+ items: list[SelectItem[str]] = [
28
+ SelectItem(title=[("class:text", opt.label + "\n")], value=opt.value, search_text=opt.label)
29
+ for opt in data.options
164
30
  ]
165
31
 
166
32
  try:
167
33
  result = select_one(
168
- message="Select thinking level:",
34
+ message=data.message,
169
35
  items=items,
170
36
  pointer="→",
171
37
  style=SELECT_STYLE,
@@ -173,9 +39,7 @@ def _select_anthropic_thinking_sync() -> llm_param.Thinking | None:
173
39
  )
174
40
  if result is None:
175
41
  return None
176
- if result == 0:
177
- return llm_param.Thinking(type="disabled", budget_tokens=0)
178
- return llm_param.Thinking(type="enabled", budget_tokens=result)
42
+ return parse_thinking_value(result)
179
43
  except KeyboardInterrupt:
180
44
  return None
181
45
 
@@ -185,24 +49,7 @@ async def select_thinking_for_protocol(config: llm_param.LLMConfigParameter) ->
185
49
 
186
50
  Returns the selected Thinking config, or None if user cancelled.
187
51
  """
188
- protocol = config.protocol
189
- model_name = config.model
190
-
191
- if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
192
- return await asyncio.to_thread(_select_responses_thinking_sync, model_name)
193
-
194
- if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
195
- return await asyncio.to_thread(_select_anthropic_thinking_sync)
196
-
197
- if protocol == llm_param.LLMClientProtocol.OPENROUTER:
198
- if _is_openrouter_model_with_reasoning_effort(model_name):
199
- return await asyncio.to_thread(_select_responses_thinking_sync, model_name)
200
- return await asyncio.to_thread(_select_anthropic_thinking_sync)
201
-
202
- if protocol == llm_param.LLMClientProtocol.OPENAI:
203
- return await asyncio.to_thread(_select_anthropic_thinking_sync)
204
-
205
- return None
52
+ return await asyncio.to_thread(_select_thinking_sync, config)
206
53
 
207
54
 
208
55
  class ThinkingCommand(CommandABC):
@@ -222,46 +69,30 @@ class ThinkingCommand(CommandABC):
222
69
 
223
70
  async def run(self, agent: Agent, user_input: model.UserInputPayload) -> CommandResult:
224
71
  del user_input # unused
225
- if not agent.profile:
226
- return self._no_change_result(agent, "No profile configured")
72
+ if agent.profile is None:
73
+ return CommandResult(events=[])
227
74
 
228
75
  config = agent.profile.llm_client.get_llm_config()
229
- current = format_current_thinking(config)
230
-
231
76
  new_thinking = await select_thinking_for_protocol(config)
232
- if new_thinking is None:
233
- return self._no_change_result(agent, "(no change)")
234
77
 
235
- # Apply the new thinking configuration
236
- config.thinking = new_thinking
237
- agent.session.model_thinking = new_thinking
238
- new_status = format_current_thinking(config)
239
-
240
- return CommandResult(
241
- events=[
242
- events.DeveloperMessageEvent(
243
- session_id=agent.session.id,
244
- item=model.DeveloperMessageItem(
245
- content=f"Thinking changed: {current} -> {new_status}",
246
- command_output=model.CommandOutput(command_name=self.name),
247
- ),
248
- ),
249
- events.WelcomeEvent(
250
- work_dir=str(agent.session.work_dir),
251
- llm_config=config,
252
- ),
253
- ]
254
- )
78
+ if new_thinking is None:
79
+ return CommandResult(
80
+ events=[
81
+ events.DeveloperMessageEvent(
82
+ session_id=agent.session.id,
83
+ item=model.DeveloperMessageItem(
84
+ content="(no change)",
85
+ command_output=model.CommandOutput(command_name=self.name),
86
+ ),
87
+ )
88
+ ]
89
+ )
255
90
 
256
- def _no_change_result(self, agent: "Agent", message: str) -> CommandResult:
257
91
  return CommandResult(
258
- events=[
259
- events.DeveloperMessageEvent(
92
+ operations=[
93
+ op.ChangeThinkingOperation(
260
94
  session_id=agent.session.id,
261
- item=model.DeveloperMessageItem(
262
- content=message,
263
- command_output=model.CommandOutput(command_name=self.name),
264
- ),
95
+ thinking=new_thinking,
265
96
  )
266
97
  ]
267
98
  )
@@ -1,4 +1,4 @@
1
- import sys
1
+ from dataclasses import dataclass
2
2
 
3
3
  from klaude_code.config.config import ModelEntry, load_config, print_no_available_models_hint
4
4
  from klaude_code.trace import log
@@ -17,15 +17,34 @@ def _normalize_model_key(value: str) -> str:
17
17
  return "".join(ch for ch in value.casefold() if ch.isalnum())
18
18
 
19
19
 
20
- def select_model_from_config(preferred: str | None = None) -> str | None:
20
+ @dataclass
21
+ class ModelMatchResult:
22
+ """Result of model matching.
23
+
24
+ Attributes:
25
+ matched_model: The single matched model name, or None if ambiguous/no match.
26
+ filtered_models: List of filtered models for interactive selection.
27
+ filter_hint: The filter hint to show (original preferred value), or None.
28
+ error_message: Error message if no models available, or None.
21
29
  """
22
- Interactive single-choice model selector.
23
- for `--select-model`
30
+
31
+ matched_model: str | None
32
+ filtered_models: list[ModelEntry]
33
+ filter_hint: str | None
34
+ error_message: str | None = None
35
+
36
+
37
+ def match_model_from_config(preferred: str | None = None) -> ModelMatchResult:
38
+ """Match model from config without interactive selection.
24
39
 
25
40
  If preferred is provided:
26
- - Exact match: return immediately
27
- - Single partial match (case-insensitive): return immediately
28
- - Otherwise: fall through to interactive selection
41
+ - Exact match: returns matched_model
42
+ - Single partial match (case-insensitive): returns matched_model
43
+ - Multiple matches: returns filtered_models for interactive selection
44
+ - No matches: returns all models with filter_hint=None
45
+
46
+ Returns:
47
+ ModelMatchResult with match state.
29
48
  """
30
49
  config = load_config()
31
50
 
@@ -36,17 +55,22 @@ def select_model_from_config(preferred: str | None = None) -> str | None:
36
55
 
37
56
  if not models:
38
57
  print_no_available_models_hint()
39
- return None
58
+ return ModelMatchResult(
59
+ matched_model=None,
60
+ filtered_models=[],
61
+ filter_hint=None,
62
+ error_message="No models available",
63
+ )
40
64
 
41
65
  names: list[str] = [m.model_name for m in models]
42
66
 
43
67
  # Try to match preferred model name
44
- filtered_models = models
68
+ filter_hint = preferred
45
69
  if preferred and preferred.strip():
46
70
  preferred = preferred.strip()
47
71
  # Exact match
48
72
  if preferred in names:
49
- return preferred
73
+ return ModelMatchResult(matched_model=preferred, filtered_models=models, filter_hint=None)
50
74
 
51
75
  preferred_lower = preferred.lower()
52
76
  # Case-insensitive exact match (model_name or model_params.model)
@@ -56,7 +80,9 @@ def select_model_from_config(preferred: str | None = None) -> str | None:
56
80
  if preferred_lower == m.model_name.lower() or preferred_lower == (m.model_params.model or "").lower()
57
81
  ]
58
82
  if len(exact_ci_matches) == 1:
59
- return exact_ci_matches[0].model_name
83
+ return ModelMatchResult(
84
+ matched_model=exact_ci_matches[0].model_name, filtered_models=models, filter_hint=None
85
+ )
60
86
 
61
87
  # Normalized matching (e.g. gpt52 == gpt-5.2, gpt52 in gpt-5.2-2025-...)
62
88
  preferred_norm = _normalize_model_key(preferred)
@@ -69,7 +95,9 @@ def select_model_from_config(preferred: str | None = None) -> str | None:
69
95
  or preferred_norm == _normalize_model_key(m.model_params.model or "")
70
96
  ]
71
97
  if len(normalized_matches) == 1:
72
- return normalized_matches[0].model_name
98
+ return ModelMatchResult(
99
+ matched_model=normalized_matches[0].model_name, filtered_models=models, filter_hint=None
100
+ )
73
101
 
74
102
  if not normalized_matches and len(preferred_norm) >= 4:
75
103
  normalized_matches = [
@@ -79,7 +107,9 @@ def select_model_from_config(preferred: str | None = None) -> str | None:
79
107
  or preferred_norm in _normalize_model_key(m.model_params.model or "")
80
108
  ]
81
109
  if len(normalized_matches) == 1:
82
- return normalized_matches[0].model_name
110
+ return ModelMatchResult(
111
+ matched_model=normalized_matches[0].model_name, filtered_models=models, filter_hint=None
112
+ )
83
113
 
84
114
  # Partial match (case-insensitive) on model_name or model_params.model.
85
115
  # If normalized matching found candidates (even if multiple), prefer those as the filter set.
@@ -89,93 +119,13 @@ def select_model_from_config(preferred: str | None = None) -> str | None:
89
119
  if preferred_lower in m.model_name.lower() or preferred_lower in (m.model_params.model or "").lower()
90
120
  ]
91
121
  if len(matches) == 1:
92
- return matches[0].model_name
122
+ return ModelMatchResult(matched_model=matches[0].model_name, filtered_models=models, filter_hint=None)
93
123
  if matches:
94
124
  # Multiple matches: filter the list for interactive selection
95
- filtered_models = matches
125
+ return ModelMatchResult(matched_model=None, filtered_models=matches, filter_hint=filter_hint)
96
126
  else:
97
127
  # No matches: show all models without filter hint
98
- preferred = None
99
128
  log(("No matching models found. Showing all models.", "yellow"))
129
+ return ModelMatchResult(matched_model=None, filtered_models=models, filter_hint=None)
100
130
 
101
- # Non-interactive environments (CI/pipes) should never enter an interactive prompt.
102
- # If we couldn't resolve to a single model deterministically above, fail with a clear hint.
103
- if not sys.stdin.isatty() or not sys.stdout.isatty():
104
- log(("Error: cannot use interactive model selection without a TTY", "red"))
105
- log(("Hint: pass --model <config-name> or set main_model in ~/.klaude/klaude-config.yaml", "yellow"))
106
- if preferred:
107
- log((f"Hint: '{preferred}' did not resolve to a single configured model", "yellow"))
108
- return None
109
-
110
- try:
111
- from prompt_toolkit.styles import Style
112
-
113
- from klaude_code.ui.terminal.selector import SelectItem, select_one
114
-
115
- max_model_name_length = max(len(m.model_name) for m in filtered_models)
116
-
117
- def _thinking_info(m: ModelEntry) -> str:
118
- thinking = m.model_params.thinking
119
- if not thinking:
120
- return ""
121
- if thinking.reasoning_effort:
122
- return f"reasoning {thinking.reasoning_effort}"
123
- if thinking.budget_tokens:
124
- return f"thinking budget {thinking.budget_tokens}"
125
- return "thinking (configured)"
126
-
127
- items: list[SelectItem[str]] = []
128
- for m in filtered_models:
129
- model_id = m.model_params.model or "N/A"
130
- first_line_prefix = f"{m.model_name:<{max_model_name_length}} → "
131
- thinking_info = _thinking_info(m)
132
- meta_parts: list[str] = [m.provider]
133
- if thinking_info:
134
- meta_parts.append(thinking_info)
135
- if m.model_params.verbosity:
136
- meta_parts.append(f"verbosity {m.model_params.verbosity}")
137
- meta_str = " · ".join(meta_parts)
138
- title = [
139
- ("class:msg", first_line_prefix),
140
- ("class:msg bold", model_id),
141
- ("class:meta", f" {meta_str}\n"),
142
- ]
143
- search_text = f"{m.model_name} {model_id} {m.provider}"
144
- items.append(SelectItem(title=title, value=m.model_name, search_text=search_text))
145
-
146
- try:
147
- message = f"Select a model (filtered by '{preferred}'):" if preferred else "Select a model:"
148
- result = select_one(
149
- message=message,
150
- items=items,
151
- pointer="→",
152
- use_search_filter=True,
153
- initial_value=config.main_model,
154
- style=Style(
155
- [
156
- ("pointer", "ansigreen"),
157
- ("highlighted", "ansigreen"),
158
- ("msg", ""),
159
- ("meta", "fg:ansibrightblack"),
160
- ("text", "ansibrightblack"),
161
- ("question", "bold"),
162
- ("search_prefix", "ansibrightblack"),
163
- # search filter colors at the bottom
164
- ("search_success", "noinherit fg:ansigreen"),
165
- ("search_none", "noinherit fg:ansired"),
166
- ]
167
- ),
168
- )
169
- if isinstance(result, str) and result in names:
170
- return result
171
- except KeyboardInterrupt:
172
- return None
173
- except Exception as e:
174
- log((f"Failed to use prompt_toolkit for model selection: {e}", "yellow"))
175
- # Never return an unvalidated model name here.
176
- # If we can't interactively select, fall back to a known configured model.
177
- if isinstance(preferred, str) and preferred in names:
178
- return preferred
179
- if config.main_model and config.main_model in names:
180
- return config.main_model
181
- return None
131
+ return ModelMatchResult(matched_model=None, filtered_models=models, filter_hint=None)