klaude-code 1.4.3__py3-none-any.whl → 1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/cli/main.py +22 -11
- klaude_code/cli/runtime.py +171 -34
- klaude_code/command/__init__.py +4 -0
- klaude_code/command/fork_session_cmd.py +220 -2
- klaude_code/command/help_cmd.py +2 -1
- klaude_code/command/model_cmd.py +3 -5
- klaude_code/command/model_select.py +84 -0
- klaude_code/command/refresh_cmd.py +4 -4
- klaude_code/command/registry.py +23 -0
- klaude_code/command/resume_cmd.py +62 -2
- klaude_code/command/thinking_cmd.py +30 -199
- klaude_code/config/select_model.py +47 -97
- klaude_code/config/thinking.py +255 -0
- klaude_code/core/executor.py +53 -63
- klaude_code/llm/usage.py +1 -1
- klaude_code/protocol/commands.py +11 -0
- klaude_code/protocol/op.py +15 -0
- klaude_code/session/__init__.py +2 -2
- klaude_code/session/selector.py +65 -65
- klaude_code/session/session.py +18 -12
- klaude_code/ui/modes/repl/completers.py +27 -15
- klaude_code/ui/modes/repl/event_handler.py +24 -33
- klaude_code/ui/modes/repl/input_prompt_toolkit.py +393 -57
- klaude_code/ui/modes/repl/key_bindings.py +30 -10
- klaude_code/ui/modes/repl/renderer.py +1 -1
- klaude_code/ui/renderers/developer.py +2 -2
- klaude_code/ui/renderers/metadata.py +11 -6
- klaude_code/ui/renderers/user_input.py +18 -1
- klaude_code/ui/rich/markdown.py +41 -9
- klaude_code/ui/rich/status.py +83 -22
- klaude_code/ui/rich/theme.py +2 -2
- klaude_code/ui/terminal/notifier.py +42 -0
- klaude_code/ui/terminal/selector.py +488 -136
- {klaude_code-1.4.3.dist-info → klaude_code-1.6.0.dist-info}/METADATA +1 -1
- {klaude_code-1.4.3.dist-info → klaude_code-1.6.0.dist-info}/RECORD +37 -35
- {klaude_code-1.4.3.dist-info → klaude_code-1.6.0.dist-info}/WHEEL +0 -0
- {klaude_code-1.4.3.dist-info → klaude_code-1.6.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""Interactive model selection for CLI."""
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
|
|
5
|
+
from klaude_code.config.config import load_config
|
|
6
|
+
from klaude_code.config.select_model import match_model_from_config
|
|
7
|
+
from klaude_code.trace import log
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def select_model_interactive(preferred: str | None = None) -> str | None:
|
|
11
|
+
"""Interactive single-choice model selector.
|
|
12
|
+
|
|
13
|
+
This function combines matching logic with interactive UI selection.
|
|
14
|
+
For CLI usage.
|
|
15
|
+
|
|
16
|
+
If preferred is provided:
|
|
17
|
+
- Exact match: return immediately
|
|
18
|
+
- Single partial match (case-insensitive): return immediately
|
|
19
|
+
- Otherwise: fall through to interactive selection
|
|
20
|
+
"""
|
|
21
|
+
result = match_model_from_config(preferred)
|
|
22
|
+
|
|
23
|
+
if result.error_message:
|
|
24
|
+
return None
|
|
25
|
+
|
|
26
|
+
if result.matched_model:
|
|
27
|
+
return result.matched_model
|
|
28
|
+
|
|
29
|
+
# Non-interactive environments (CI/pipes) should never enter an interactive prompt.
|
|
30
|
+
# If we couldn't resolve to a single model deterministically above, fail with a clear hint.
|
|
31
|
+
if not sys.stdin.isatty() or not sys.stdout.isatty():
|
|
32
|
+
log(("Error: cannot use interactive model selection without a TTY", "red"))
|
|
33
|
+
log(("Hint: pass --model <config-name> or set main_model in ~/.klaude/klaude-config.yaml", "yellow"))
|
|
34
|
+
if preferred:
|
|
35
|
+
log((f"Hint: '{preferred}' did not resolve to a single configured model", "yellow"))
|
|
36
|
+
return None
|
|
37
|
+
|
|
38
|
+
# Interactive selection
|
|
39
|
+
from prompt_toolkit.styles import Style
|
|
40
|
+
|
|
41
|
+
from klaude_code.ui.terminal.selector import build_model_select_items, select_one
|
|
42
|
+
|
|
43
|
+
config = load_config()
|
|
44
|
+
names = [m.model_name for m in result.filtered_models]
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
items = build_model_select_items(result.filtered_models)
|
|
48
|
+
|
|
49
|
+
message = f"Select a model (filtered by '{result.filter_hint}'):" if result.filter_hint else "Select a model:"
|
|
50
|
+
selected = select_one(
|
|
51
|
+
message=message,
|
|
52
|
+
items=items,
|
|
53
|
+
pointer="->",
|
|
54
|
+
use_search_filter=True,
|
|
55
|
+
initial_value=config.main_model,
|
|
56
|
+
style=Style(
|
|
57
|
+
[
|
|
58
|
+
("pointer", "ansigreen"),
|
|
59
|
+
("highlighted", "ansigreen"),
|
|
60
|
+
("msg", ""),
|
|
61
|
+
("meta", "fg:ansibrightblack"),
|
|
62
|
+
("text", "ansibrightblack"),
|
|
63
|
+
("question", "bold"),
|
|
64
|
+
("search_prefix", "ansibrightblack"),
|
|
65
|
+
# search filter colors at the bottom
|
|
66
|
+
("search_success", "noinherit fg:ansigreen"),
|
|
67
|
+
("search_none", "noinherit fg:ansired"),
|
|
68
|
+
]
|
|
69
|
+
),
|
|
70
|
+
)
|
|
71
|
+
if isinstance(selected, str) and selected in names:
|
|
72
|
+
return selected
|
|
73
|
+
except KeyboardInterrupt:
|
|
74
|
+
return None
|
|
75
|
+
except Exception as e:
|
|
76
|
+
log((f"Failed to use prompt_toolkit for model selection: {e}", "yellow"))
|
|
77
|
+
# Never return an unvalidated model name here.
|
|
78
|
+
# If we can't interactively select, fall back to a known configured model.
|
|
79
|
+
if isinstance(preferred, str) and preferred in names:
|
|
80
|
+
return preferred
|
|
81
|
+
if config.main_model and config.main_model in names:
|
|
82
|
+
return config.main_model
|
|
83
|
+
|
|
84
|
+
return None
|
|
@@ -23,7 +23,7 @@ class RefreshTerminalCommand(CommandABC):
|
|
|
23
23
|
|
|
24
24
|
os.system("cls" if os.name == "nt" else "clear")
|
|
25
25
|
|
|
26
|
-
|
|
26
|
+
return CommandResult(
|
|
27
27
|
events=[
|
|
28
28
|
events.WelcomeEvent(
|
|
29
29
|
work_dir=str(agent.session.work_dir),
|
|
@@ -35,7 +35,7 @@ class RefreshTerminalCommand(CommandABC):
|
|
|
35
35
|
updated_at=agent.session.updated_at,
|
|
36
36
|
is_load=False,
|
|
37
37
|
),
|
|
38
|
-
]
|
|
38
|
+
],
|
|
39
|
+
persist_user_input=False,
|
|
40
|
+
persist_events=False,
|
|
39
41
|
)
|
|
40
|
-
|
|
41
|
-
return result
|
klaude_code/command/registry.py
CHANGED
|
@@ -105,6 +105,29 @@ def get_commands() -> dict[commands.CommandName | str, "CommandABC"]:
|
|
|
105
105
|
return _COMMANDS.copy()
|
|
106
106
|
|
|
107
107
|
|
|
108
|
+
def get_command_info_list() -> list[commands.CommandInfo]:
|
|
109
|
+
"""Get lightweight command metadata for UI purposes.
|
|
110
|
+
|
|
111
|
+
Returns CommandInfo list in registration order (display order).
|
|
112
|
+
"""
|
|
113
|
+
_ensure_commands_loaded()
|
|
114
|
+
return [
|
|
115
|
+
commands.CommandInfo(
|
|
116
|
+
name=_command_key_to_str(cmd.name),
|
|
117
|
+
summary=cmd.summary,
|
|
118
|
+
support_addition_params=cmd.support_addition_params,
|
|
119
|
+
placeholder=cmd.placeholder,
|
|
120
|
+
)
|
|
121
|
+
for cmd in _COMMANDS.values()
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def get_command_names() -> frozenset[str]:
|
|
126
|
+
"""Get all registered command names as a frozen set for fast lookup."""
|
|
127
|
+
_ensure_commands_loaded()
|
|
128
|
+
return frozenset(_command_key_to_str(key) for key in _COMMANDS)
|
|
129
|
+
|
|
130
|
+
|
|
108
131
|
def is_slash_command_name(name: str) -> bool:
|
|
109
132
|
_ensure_commands_loaded()
|
|
110
133
|
return _resolve_command_key(name) is not None
|
|
@@ -1,8 +1,68 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
|
|
3
|
+
from prompt_toolkit.styles import Style
|
|
4
|
+
|
|
3
5
|
from klaude_code.command.command_abc import Agent, CommandABC, CommandResult
|
|
4
6
|
from klaude_code.protocol import commands, events, model, op
|
|
5
|
-
from klaude_code.session.selector import
|
|
7
|
+
from klaude_code.session.selector import build_session_select_options, format_user_messages_display
|
|
8
|
+
from klaude_code.trace import log
|
|
9
|
+
from klaude_code.ui.terminal.selector import SelectItem, select_one
|
|
10
|
+
|
|
11
|
+
SESSION_SELECT_STYLE = Style(
|
|
12
|
+
[
|
|
13
|
+
("msg", "fg:ansibrightblack"),
|
|
14
|
+
("meta", ""),
|
|
15
|
+
("pointer", "bold fg:ansigreen"),
|
|
16
|
+
("highlighted", "fg:ansigreen"),
|
|
17
|
+
("search_prefix", "fg:ansibrightblack"),
|
|
18
|
+
("search_success", "noinherit fg:ansigreen"),
|
|
19
|
+
("search_none", "noinherit fg:ansired"),
|
|
20
|
+
("question", "bold"),
|
|
21
|
+
("text", ""),
|
|
22
|
+
]
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def select_session_sync() -> str | None:
|
|
27
|
+
"""Interactive session selection (sync version for asyncio.to_thread)."""
|
|
28
|
+
options = build_session_select_options()
|
|
29
|
+
if not options:
|
|
30
|
+
log("No sessions found for this project.")
|
|
31
|
+
return None
|
|
32
|
+
|
|
33
|
+
items: list[SelectItem[str]] = []
|
|
34
|
+
for idx, opt in enumerate(options, 1):
|
|
35
|
+
display_msgs = format_user_messages_display(opt.user_messages)
|
|
36
|
+
title: list[tuple[str, str]] = []
|
|
37
|
+
title.append(("fg:ansibrightblack", f"{idx:2}. "))
|
|
38
|
+
title.append(
|
|
39
|
+
("class:meta", f"{opt.relative_time} · {opt.messages_count} · {opt.model_name} · {opt.session_id}\n")
|
|
40
|
+
)
|
|
41
|
+
for msg in display_msgs:
|
|
42
|
+
if msg == "⋮":
|
|
43
|
+
title.append(("class:msg", f" {msg}\n"))
|
|
44
|
+
else:
|
|
45
|
+
title.append(("class:msg", f" > {msg}\n"))
|
|
46
|
+
title.append(("", "\n"))
|
|
47
|
+
|
|
48
|
+
search_text = " ".join(opt.user_messages) + f" {opt.model_name} {opt.session_id}"
|
|
49
|
+
items.append(
|
|
50
|
+
SelectItem(
|
|
51
|
+
title=title,
|
|
52
|
+
value=opt.session_id,
|
|
53
|
+
search_text=search_text,
|
|
54
|
+
)
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
return select_one(
|
|
59
|
+
message="Select a session to resume:",
|
|
60
|
+
items=items,
|
|
61
|
+
pointer="→",
|
|
62
|
+
style=SESSION_SELECT_STYLE,
|
|
63
|
+
)
|
|
64
|
+
except KeyboardInterrupt:
|
|
65
|
+
return None
|
|
6
66
|
|
|
7
67
|
|
|
8
68
|
class ResumeCommand(CommandABC):
|
|
@@ -33,7 +93,7 @@ class ResumeCommand(CommandABC):
|
|
|
33
93
|
)
|
|
34
94
|
return CommandResult(events=[event], persist_user_input=False, persist_events=False)
|
|
35
95
|
|
|
36
|
-
selected_session_id = await asyncio.to_thread(
|
|
96
|
+
selected_session_id = await asyncio.to_thread(select_session_sync)
|
|
37
97
|
if selected_session_id is None:
|
|
38
98
|
event = events.DeveloperMessageEvent(
|
|
39
99
|
session_id=agent.session.id,
|
|
@@ -1,171 +1,37 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
-
from typing import Literal, cast
|
|
3
2
|
|
|
4
3
|
from prompt_toolkit.styles import Style
|
|
5
4
|
|
|
6
5
|
from klaude_code.command.command_abc import Agent, CommandABC, CommandResult
|
|
7
|
-
from klaude_code.
|
|
6
|
+
from klaude_code.config.thinking import get_thinking_picker_data, parse_thinking_value
|
|
7
|
+
from klaude_code.protocol import commands, events, llm_param, model, op
|
|
8
8
|
from klaude_code.ui.terminal.selector import SelectItem, select_one
|
|
9
9
|
|
|
10
|
-
ReasoningEffort = Literal["high", "medium", "low", "minimal", "none", "xhigh"]
|
|
11
|
-
|
|
12
|
-
# Thinking level options for different protocols
|
|
13
|
-
RESPONSES_LEVELS = ["low", "medium", "high"]
|
|
14
|
-
RESPONSES_GPT51_LEVELS = ["none", "low", "medium", "high"]
|
|
15
|
-
RESPONSES_GPT52_LEVELS = ["none", "low", "medium", "high", "xhigh"]
|
|
16
|
-
RESPONSES_CODEX_MAX_LEVELS = ["medium", "high", "xhigh"]
|
|
17
|
-
RESPONSES_GEMINI_FLASH_LEVELS = ["minimal", "low", "medium", "high"]
|
|
18
|
-
|
|
19
|
-
ANTHROPIC_LEVELS: list[tuple[str, int | None]] = [
|
|
20
|
-
("off", 0),
|
|
21
|
-
("low (2048 tokens)", 2048),
|
|
22
|
-
("medium (8192 tokens)", 8192),
|
|
23
|
-
("high (31999 tokens)", 31999),
|
|
24
|
-
]
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def _is_openrouter_model_with_reasoning_effort(model_name: str | None) -> bool:
|
|
28
|
-
"""Check if the model is GPT series, Grok or Gemini 3."""
|
|
29
|
-
if not model_name:
|
|
30
|
-
return False
|
|
31
|
-
model_lower = model_name.lower()
|
|
32
|
-
return model_lower.startswith(("openai/gpt-", "x-ai/grok-", "google/gemini-3"))
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
def _is_gpt51_model(model_name: str | None) -> bool:
|
|
36
|
-
"""Check if the model is GPT-5.1."""
|
|
37
|
-
if not model_name:
|
|
38
|
-
return False
|
|
39
|
-
return model_name.lower() in ["gpt-5.1", "openai/gpt-5.1", "gpt-5.1-codex-2025-11-13"]
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
def _is_gpt52_model(model_name: str | None) -> bool:
|
|
43
|
-
"""Check if the model is GPT-5.2."""
|
|
44
|
-
if not model_name:
|
|
45
|
-
return False
|
|
46
|
-
return model_name.lower() in ["gpt-5.2", "openai/gpt-5.2"]
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
def _is_codex_max_model(model_name: str | None) -> bool:
|
|
50
|
-
"""Check if the model is GPT-5.1-codex-max."""
|
|
51
|
-
if not model_name:
|
|
52
|
-
return False
|
|
53
|
-
return "codex-max" in model_name.lower()
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
def _is_gemini_flash_model(model_name: str | None) -> bool:
|
|
57
|
-
"""Check if the model is Gemini 3 Flash."""
|
|
58
|
-
if not model_name:
|
|
59
|
-
return False
|
|
60
|
-
return "gemini-3-flash" in model_name.lower()
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
def should_auto_trigger_thinking(model_name: str | None) -> bool:
|
|
64
|
-
"""Check if model should auto-trigger thinking selection on switch."""
|
|
65
|
-
if not model_name:
|
|
66
|
-
return False
|
|
67
|
-
model_lower = model_name.lower()
|
|
68
|
-
return "gpt-5" in model_lower or "gemini-3" in model_lower or "opus" in model_lower
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
def _get_levels_for_responses(model_name: str | None) -> list[str]:
|
|
72
|
-
"""Get thinking levels for responses protocol."""
|
|
73
|
-
if _is_codex_max_model(model_name):
|
|
74
|
-
return RESPONSES_CODEX_MAX_LEVELS
|
|
75
|
-
if _is_gpt52_model(model_name):
|
|
76
|
-
return RESPONSES_GPT52_LEVELS
|
|
77
|
-
if _is_gpt51_model(model_name):
|
|
78
|
-
return RESPONSES_GPT51_LEVELS
|
|
79
|
-
if _is_gemini_flash_model(model_name):
|
|
80
|
-
return RESPONSES_GEMINI_FLASH_LEVELS
|
|
81
|
-
return RESPONSES_LEVELS
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
def format_current_thinking(config: llm_param.LLMConfigParameter) -> str:
|
|
85
|
-
"""Format the current thinking configuration for display."""
|
|
86
|
-
thinking = config.thinking
|
|
87
|
-
if not thinking:
|
|
88
|
-
return "not configured"
|
|
89
|
-
|
|
90
|
-
protocol = config.protocol
|
|
91
|
-
|
|
92
|
-
if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
|
|
93
|
-
if thinking.reasoning_effort:
|
|
94
|
-
return f"reasoning_effort={thinking.reasoning_effort}"
|
|
95
|
-
return "not set"
|
|
96
|
-
|
|
97
|
-
if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
|
|
98
|
-
if thinking.type == "disabled":
|
|
99
|
-
return "off"
|
|
100
|
-
if thinking.type == "enabled":
|
|
101
|
-
return f"enabled (budget_tokens={thinking.budget_tokens})"
|
|
102
|
-
return "not set"
|
|
103
|
-
|
|
104
|
-
if protocol == llm_param.LLMClientProtocol.OPENROUTER:
|
|
105
|
-
if _is_openrouter_model_with_reasoning_effort(config.model):
|
|
106
|
-
if thinking.reasoning_effort:
|
|
107
|
-
return f"reasoning_effort={thinking.reasoning_effort}"
|
|
108
|
-
else:
|
|
109
|
-
if thinking.type == "disabled":
|
|
110
|
-
return "off"
|
|
111
|
-
if thinking.type == "enabled":
|
|
112
|
-
return f"enabled (budget_tokens={thinking.budget_tokens})"
|
|
113
|
-
return "not set"
|
|
114
|
-
|
|
115
|
-
if protocol == llm_param.LLMClientProtocol.OPENAI:
|
|
116
|
-
if thinking.type == "disabled":
|
|
117
|
-
return "off"
|
|
118
|
-
if thinking.type == "enabled":
|
|
119
|
-
return f"enabled (budget_tokens={thinking.budget_tokens})"
|
|
120
|
-
return "not set"
|
|
121
|
-
|
|
122
|
-
return "unknown protocol"
|
|
123
|
-
|
|
124
|
-
|
|
125
10
|
SELECT_STYLE = Style(
|
|
126
11
|
[
|
|
127
12
|
("instruction", "ansibrightblack"),
|
|
128
13
|
("pointer", "ansigreen"),
|
|
129
14
|
("highlighted", "ansigreen"),
|
|
130
15
|
("text", "ansibrightblack"),
|
|
131
|
-
("question", ""),
|
|
16
|
+
("question", "bold"),
|
|
132
17
|
]
|
|
133
18
|
)
|
|
134
19
|
|
|
135
20
|
|
|
136
|
-
def
|
|
137
|
-
"""Select thinking level
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
SelectItem(title=[("class:text", level + "\n")], value=level, search_text=level) for level in levels
|
|
141
|
-
]
|
|
142
|
-
|
|
143
|
-
try:
|
|
144
|
-
result = select_one(
|
|
145
|
-
message="Select reasoning effort:",
|
|
146
|
-
items=items,
|
|
147
|
-
pointer="→",
|
|
148
|
-
style=SELECT_STYLE,
|
|
149
|
-
use_search_filter=False,
|
|
150
|
-
)
|
|
151
|
-
|
|
152
|
-
if result is None:
|
|
153
|
-
return None
|
|
154
|
-
return llm_param.Thinking(reasoning_effort=cast(ReasoningEffort, result))
|
|
155
|
-
except KeyboardInterrupt:
|
|
21
|
+
def _select_thinking_sync(config: llm_param.LLMConfigParameter) -> llm_param.Thinking | None:
|
|
22
|
+
"""Select thinking level (sync version)."""
|
|
23
|
+
data = get_thinking_picker_data(config)
|
|
24
|
+
if data is None:
|
|
156
25
|
return None
|
|
157
26
|
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
items: list[SelectItem[int]] = [
|
|
162
|
-
SelectItem(title=[("class:text", label + "\n")], value=tokens or 0, search_text=label)
|
|
163
|
-
for label, tokens in ANTHROPIC_LEVELS
|
|
27
|
+
items: list[SelectItem[str]] = [
|
|
28
|
+
SelectItem(title=[("class:text", opt.label + "\n")], value=opt.value, search_text=opt.label)
|
|
29
|
+
for opt in data.options
|
|
164
30
|
]
|
|
165
31
|
|
|
166
32
|
try:
|
|
167
33
|
result = select_one(
|
|
168
|
-
message=
|
|
34
|
+
message=data.message,
|
|
169
35
|
items=items,
|
|
170
36
|
pointer="→",
|
|
171
37
|
style=SELECT_STYLE,
|
|
@@ -173,9 +39,7 @@ def _select_anthropic_thinking_sync() -> llm_param.Thinking | None:
|
|
|
173
39
|
)
|
|
174
40
|
if result is None:
|
|
175
41
|
return None
|
|
176
|
-
|
|
177
|
-
return llm_param.Thinking(type="disabled", budget_tokens=0)
|
|
178
|
-
return llm_param.Thinking(type="enabled", budget_tokens=result)
|
|
42
|
+
return parse_thinking_value(result)
|
|
179
43
|
except KeyboardInterrupt:
|
|
180
44
|
return None
|
|
181
45
|
|
|
@@ -185,24 +49,7 @@ async def select_thinking_for_protocol(config: llm_param.LLMConfigParameter) ->
|
|
|
185
49
|
|
|
186
50
|
Returns the selected Thinking config, or None if user cancelled.
|
|
187
51
|
"""
|
|
188
|
-
|
|
189
|
-
model_name = config.model
|
|
190
|
-
|
|
191
|
-
if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
|
|
192
|
-
return await asyncio.to_thread(_select_responses_thinking_sync, model_name)
|
|
193
|
-
|
|
194
|
-
if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
|
|
195
|
-
return await asyncio.to_thread(_select_anthropic_thinking_sync)
|
|
196
|
-
|
|
197
|
-
if protocol == llm_param.LLMClientProtocol.OPENROUTER:
|
|
198
|
-
if _is_openrouter_model_with_reasoning_effort(model_name):
|
|
199
|
-
return await asyncio.to_thread(_select_responses_thinking_sync, model_name)
|
|
200
|
-
return await asyncio.to_thread(_select_anthropic_thinking_sync)
|
|
201
|
-
|
|
202
|
-
if protocol == llm_param.LLMClientProtocol.OPENAI:
|
|
203
|
-
return await asyncio.to_thread(_select_anthropic_thinking_sync)
|
|
204
|
-
|
|
205
|
-
return None
|
|
52
|
+
return await asyncio.to_thread(_select_thinking_sync, config)
|
|
206
53
|
|
|
207
54
|
|
|
208
55
|
class ThinkingCommand(CommandABC):
|
|
@@ -222,46 +69,30 @@ class ThinkingCommand(CommandABC):
|
|
|
222
69
|
|
|
223
70
|
async def run(self, agent: Agent, user_input: model.UserInputPayload) -> CommandResult:
|
|
224
71
|
del user_input # unused
|
|
225
|
-
if
|
|
226
|
-
return
|
|
72
|
+
if agent.profile is None:
|
|
73
|
+
return CommandResult(events=[])
|
|
227
74
|
|
|
228
75
|
config = agent.profile.llm_client.get_llm_config()
|
|
229
|
-
current = format_current_thinking(config)
|
|
230
|
-
|
|
231
76
|
new_thinking = await select_thinking_for_protocol(config)
|
|
232
|
-
if new_thinking is None:
|
|
233
|
-
return self._no_change_result(agent, "(no change)")
|
|
234
77
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
),
|
|
248
|
-
),
|
|
249
|
-
events.WelcomeEvent(
|
|
250
|
-
work_dir=str(agent.session.work_dir),
|
|
251
|
-
llm_config=config,
|
|
252
|
-
),
|
|
253
|
-
]
|
|
254
|
-
)
|
|
78
|
+
if new_thinking is None:
|
|
79
|
+
return CommandResult(
|
|
80
|
+
events=[
|
|
81
|
+
events.DeveloperMessageEvent(
|
|
82
|
+
session_id=agent.session.id,
|
|
83
|
+
item=model.DeveloperMessageItem(
|
|
84
|
+
content="(no change)",
|
|
85
|
+
command_output=model.CommandOutput(command_name=self.name),
|
|
86
|
+
),
|
|
87
|
+
)
|
|
88
|
+
]
|
|
89
|
+
)
|
|
255
90
|
|
|
256
|
-
def _no_change_result(self, agent: "Agent", message: str) -> CommandResult:
|
|
257
91
|
return CommandResult(
|
|
258
|
-
|
|
259
|
-
|
|
92
|
+
operations=[
|
|
93
|
+
op.ChangeThinkingOperation(
|
|
260
94
|
session_id=agent.session.id,
|
|
261
|
-
|
|
262
|
-
content=message,
|
|
263
|
-
command_output=model.CommandOutput(command_name=self.name),
|
|
264
|
-
),
|
|
95
|
+
thinking=new_thinking,
|
|
265
96
|
)
|
|
266
97
|
]
|
|
267
98
|
)
|