klaude-code 1.2.10__py3-none-any.whl → 1.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. klaude_code/cli/main.py +2 -7
  2. klaude_code/cli/runtime.py +23 -19
  3. klaude_code/command/__init__.py +29 -26
  4. klaude_code/command/clear_cmd.py +0 -2
  5. klaude_code/command/diff_cmd.py +0 -2
  6. klaude_code/command/export_cmd.py +0 -2
  7. klaude_code/command/help_cmd.py +0 -2
  8. klaude_code/command/model_cmd.py +0 -2
  9. klaude_code/command/refresh_cmd.py +0 -2
  10. klaude_code/command/registry.py +4 -8
  11. klaude_code/command/release_notes_cmd.py +0 -2
  12. klaude_code/command/status_cmd.py +2 -4
  13. klaude_code/command/terminal_setup_cmd.py +0 -2
  14. klaude_code/command/thinking_cmd.py +227 -0
  15. klaude_code/config/select_model.py +5 -15
  16. klaude_code/const/__init__.py +1 -1
  17. klaude_code/core/agent.py +1 -1
  18. klaude_code/core/executor.py +1 -4
  19. klaude_code/core/manager/agent_manager.py +15 -9
  20. klaude_code/core/manager/llm_clients_builder.py +4 -7
  21. klaude_code/core/prompt.py +5 -5
  22. klaude_code/core/prompts/prompt-claude-code.md +1 -12
  23. klaude_code/core/prompts/prompt-minimal.md +12 -0
  24. klaude_code/core/task.py +5 -2
  25. klaude_code/core/tool/memory/memory_tool.md +4 -0
  26. klaude_code/core/tool/memory/skill_loader.py +1 -1
  27. klaude_code/core/tool/todo/todo_write_tool.md +0 -157
  28. klaude_code/core/tool/todo/todo_write_tool_raw.md +182 -0
  29. klaude_code/core/tool/tool_registry.py +3 -4
  30. klaude_code/core/turn.py +0 -1
  31. klaude_code/llm/anthropic/client.py +56 -47
  32. klaude_code/llm/client.py +1 -19
  33. klaude_code/llm/codex/client.py +49 -30
  34. klaude_code/llm/openai_compatible/client.py +52 -34
  35. klaude_code/llm/openrouter/client.py +63 -41
  36. klaude_code/llm/responses/client.py +56 -39
  37. klaude_code/llm/usage.py +1 -49
  38. klaude_code/protocol/commands.py +1 -0
  39. klaude_code/protocol/llm_param.py +1 -9
  40. klaude_code/protocol/model.py +4 -3
  41. klaude_code/protocol/op.py +5 -2
  42. klaude_code/protocol/sub_agent.py +1 -0
  43. klaude_code/session/export.py +3 -0
  44. klaude_code/session/selector.py +12 -7
  45. klaude_code/session/session.py +1 -5
  46. klaude_code/session/templates/export_session.html +155 -0
  47. klaude_code/ui/modes/repl/completers.py +3 -3
  48. klaude_code/ui/modes/repl/event_handler.py +1 -5
  49. klaude_code/ui/modes/repl/input_prompt_toolkit.py +3 -34
  50. klaude_code/ui/renderers/metadata.py +11 -1
  51. klaude_code/ui/renderers/tools.py +13 -2
  52. klaude_code/ui/rich/markdown.py +4 -1
  53. klaude_code/ui/terminal/__init__.py +55 -0
  54. {klaude_code-1.2.10.dist-info → klaude_code-1.2.12.dist-info}/METADATA +1 -4
  55. {klaude_code-1.2.10.dist-info → klaude_code-1.2.12.dist-info}/RECORD +57 -54
  56. {klaude_code-1.2.10.dist-info → klaude_code-1.2.12.dist-info}/WHEEL +0 -0
  57. {klaude_code-1.2.10.dist-info → klaude_code-1.2.12.dist-info}/entry_points.txt +0 -0
klaude_code/cli/main.py CHANGED
@@ -3,7 +3,6 @@ import datetime
3
3
  import os
4
4
  import subprocess
5
5
  import sys
6
- import uuid
7
6
  from importlib.metadata import PackageNotFoundError
8
7
  from importlib.metadata import version as pkg_version
9
8
 
@@ -340,8 +339,8 @@ def main_callback(
340
339
  return
341
340
 
342
341
  # Resolve session id before entering asyncio loop
342
+ # session_id=None means create a new session
343
343
  session_id: str | None = None
344
- is_new_session = False
345
344
  if resume:
346
345
  session_id = resume_select_session()
347
346
  if session_id is None:
@@ -349,10 +348,7 @@ def main_callback(
349
348
  # If user didn't pick, allow fallback to --continue
350
349
  if session_id is None and continue_:
351
350
  session_id = Session.most_recent_session_id()
352
- # If still no session_id, generate a new one for a new session
353
- if session_id is None:
354
- session_id = uuid.uuid4().hex
355
- is_new_session = True
351
+ # If still no session_id, leave as None to create a new session
356
352
 
357
353
  debug_enabled, debug_filters = resolve_debug_settings(debug, debug_filter)
358
354
 
@@ -367,6 +363,5 @@ def main_callback(
367
363
  run_interactive(
368
364
  init_config=init_config,
369
365
  session_id=session_id,
370
- is_new_session=is_new_session,
371
366
  )
372
367
  )
@@ -1,6 +1,5 @@
1
1
  import asyncio
2
2
  import sys
3
- import uuid
4
3
  from dataclasses import dataclass
5
4
  from typing import Any, Protocol
6
5
 
@@ -15,7 +14,6 @@ from klaude_code.core.executor import Executor
15
14
  from klaude_code.core.manager import build_llm_clients
16
15
  from klaude_code.protocol import events, op
17
16
  from klaude_code.protocol.model import UserInputPayload
18
- from klaude_code.protocol.sub_agent import iter_sub_agent_profiles
19
17
  from klaude_code.trace import DebugType, log, set_debug_logging
20
18
  from klaude_code.ui.modes.repl import build_repl_status_snapshot
21
19
  from klaude_code.ui.modes.repl.input_prompt_toolkit import REPLStatusSnapshot
@@ -97,11 +95,9 @@ async def initialize_app_components(init_config: AppInitConfig) -> AppComponents
97
95
 
98
96
  # Initialize LLM clients
99
97
  try:
100
- enabled_sub_agents = [p.name for p in iter_sub_agent_profiles()]
101
98
  llm_clients = build_llm_clients(
102
99
  config,
103
100
  model_override=init_config.model,
104
- enabled_sub_agents=enabled_sub_agents,
105
101
  )
106
102
  except ValueError as exc:
107
103
  if init_config.model:
@@ -207,12 +203,14 @@ async def run_exec(init_config: AppInitConfig, input_content: str) -> None:
207
203
  components = await initialize_app_components(init_config)
208
204
 
209
205
  try:
210
- # Generate a new session ID for exec mode
211
- session_id = uuid.uuid4().hex
212
-
213
- await components.executor.submit_and_wait(op.InitAgentOperation(session_id=session_id, is_new_session=True))
206
+ # Initialize a new session (session_id=None means create new)
207
+ await components.executor.submit_and_wait(op.InitAgentOperation())
214
208
  await components.event_queue.join()
215
209
 
210
+ # Get the session_id from the newly created agent
211
+ session_ids = components.executor.context.agent_manager.active_session_ids()
212
+ session_id = session_ids[0] if session_ids else None
213
+
216
214
  # Submit the input content directly
217
215
  await components.executor.submit_and_wait(
218
216
  op.UserInputOperation(input=UserInputPayload(text=input_content), session_id=session_id)
@@ -224,11 +222,12 @@ async def run_exec(init_config: AppInitConfig, input_content: str) -> None:
224
222
  await cleanup_app_components(components)
225
223
 
226
224
 
227
- async def run_interactive(
228
- init_config: AppInitConfig, session_id: str | None = None, *, is_new_session: bool = False
229
- ) -> None:
230
- """Run the interactive REPL using the provided configuration."""
225
+ async def run_interactive(init_config: AppInitConfig, session_id: str | None = None) -> None:
226
+ """Run the interactive REPL using the provided configuration.
231
227
 
228
+ If session_id is None, a new session is created with an auto-generated ID.
229
+ If session_id is provided, attempts to resume that session.
230
+ """
232
231
  components = await initialize_app_components(init_config)
233
232
 
234
233
  # No theme persistence from CLI anymore; config.theme controls theme when set.
@@ -236,8 +235,10 @@ async def run_interactive(
236
235
  # Create status provider for bottom toolbar
237
236
  def _status_provider() -> REPLStatusSnapshot:
238
237
  agent: Agent | None = None
239
- if session_id and session_id in components.executor.context.active_agents:
240
- agent = components.executor.context.active_agents[session_id]
238
+ # Get the first active agent (there should only be one in interactive mode)
239
+ active_agents = components.executor.context.active_agents
240
+ if active_agents:
241
+ agent = next(iter(active_agents.values()), None)
241
242
 
242
243
  # Check for updates (returns None if uv not available)
243
244
  update_message = get_update_message()
@@ -279,10 +280,13 @@ async def run_interactive(
279
280
  restore_sigint = install_sigint_double_press_exit(_show_toast_once, _hide_progress)
280
281
 
281
282
  try:
282
- await components.executor.submit_and_wait(
283
- op.InitAgentOperation(session_id=session_id, is_new_session=is_new_session)
284
- )
283
+ await components.executor.submit_and_wait(op.InitAgentOperation(session_id=session_id))
285
284
  await components.event_queue.join()
285
+
286
+ # Get the actual session_id (may have been auto-generated if None was passed)
287
+ active_session_ids = components.executor.context.agent_manager.active_session_ids()
288
+ active_session_id = active_session_ids[0] if active_session_ids else session_id
289
+
286
290
  # Input
287
291
  await input_provider.start()
288
292
  async for user_input in input_provider.iter_inputs():
@@ -293,7 +297,7 @@ async def run_interactive(
293
297
  continue
294
298
  # Submit user input operation - directly use the payload from iter_inputs
295
299
  submission_id = await components.executor.submit(
296
- op.UserInputOperation(input=user_input, session_id=session_id)
300
+ op.UserInputOperation(input=user_input, session_id=active_session_id)
297
301
  )
298
302
  # If it's an interactive command (e.g., /model), avoid starting the ESC monitor
299
303
  # to prevent TTY conflicts with interactive prompts (questionary/prompt_toolkit).
@@ -302,7 +306,7 @@ async def run_interactive(
302
306
  else:
303
307
  # Esc monitor for long-running, interruptible operations
304
308
  async def _on_esc_interrupt() -> None:
305
- await components.executor.submit(op.InterruptOperation(target_session_id=session_id))
309
+ await components.executor.submit(op.InterruptOperation(target_session_id=active_session_id))
306
310
 
307
311
  stop_event, esc_task = start_esc_interrupt_monitor(_on_esc_interrupt)
308
312
  # Wait for this specific task to complete before accepting next input
@@ -5,7 +5,7 @@ from .registry import (
5
5
  has_interactive_command,
6
6
  is_slash_command_name,
7
7
  load_prompt_commands,
8
- register_command,
8
+ register,
9
9
  )
10
10
 
11
11
  # Lazy load commands to avoid heavy imports at module load time
@@ -17,38 +17,41 @@ def ensure_commands_loaded() -> None:
17
17
 
18
18
  This function is called internally by registry functions like get_commands(),
19
19
  dispatch_command(), etc. It can also be called explicitly if early loading is desired.
20
+
21
+ Commands are registered in display order - the order here determines
22
+ the order shown in slash command completion.
20
23
  """
21
24
  global _commands_loaded
22
25
  if _commands_loaded:
23
26
  return
24
27
  _commands_loaded = True
25
28
 
26
- # Import command modules to trigger @register_command decorators
27
- from . import clear_cmd as _clear_cmd # noqa: F401
28
- from . import diff_cmd as _diff_cmd # noqa: F401
29
- from . import export_cmd as _export_cmd # noqa: F401
30
- from . import help_cmd as _help_cmd # noqa: F401
31
- from . import model_cmd as _model_cmd # noqa: F401
32
- from . import refresh_cmd as _refresh_cmd # noqa: F401
33
- from . import release_notes_cmd as _release_notes_cmd # noqa: F401
34
- from . import status_cmd as _status_cmd # noqa: F401
35
- from . import terminal_setup_cmd as _terminal_setup_cmd # noqa: F401
36
-
37
- # Suppress unused variable warnings
38
- _ = (
39
- _clear_cmd,
40
- _diff_cmd,
41
- _export_cmd,
42
- _help_cmd,
43
- _model_cmd,
44
- _refresh_cmd,
45
- _release_notes_cmd,
46
- _status_cmd,
47
- _terminal_setup_cmd,
48
- )
29
+ # Import and register commands in display order
30
+ from .clear_cmd import ClearCommand
31
+ from .model_cmd import ModelCommand
32
+ from .status_cmd import StatusCommand
33
+ from .diff_cmd import DiffCommand
34
+ from .export_cmd import ExportCommand
35
+ from .thinking_cmd import ThinkingCommand
36
+ from .help_cmd import HelpCommand
37
+ from .refresh_cmd import RefreshTerminalCommand
38
+ from .terminal_setup_cmd import TerminalSetupCommand
39
+ from .release_notes_cmd import ReleaseNotesCommand
49
40
 
50
- # Load prompt-based commands
41
+ # Register in desired display order
42
+ register(ExportCommand())
43
+ register(RefreshTerminalCommand())
44
+ register(ThinkingCommand())
45
+ register(ModelCommand())
51
46
  load_prompt_commands()
47
+ register(ClearCommand())
48
+ register(StatusCommand())
49
+ register(DiffCommand())
50
+ register(HelpCommand())
51
+ register(ReleaseNotesCommand())
52
+ register(TerminalSetupCommand())
53
+
54
+ # Load prompt-based commands (appended after built-in commands)
52
55
 
53
56
 
54
57
  # Lazy accessors for command classes
@@ -63,6 +66,7 @@ def __getattr__(name: str) -> object:
63
66
  "ReleaseNotesCommand": "release_notes_cmd",
64
67
  "StatusCommand": "status_cmd",
65
68
  "TerminalSetupCommand": "terminal_setup_cmd",
69
+ "ThinkingCommand": "thinking_cmd",
66
70
  }
67
71
  if name in _commands_map:
68
72
  import importlib
@@ -77,7 +81,6 @@ __all__ = [
77
81
  # "ClearCommand", "DiffCommand", "HelpCommand", "ModelCommand",
78
82
  # "ExportCommand", "RefreshTerminalCommand", "ReleaseNotesCommand",
79
83
  # "StatusCommand", "TerminalSetupCommand",
80
- "register_command",
81
84
  "CommandABC",
82
85
  "CommandResult",
83
86
  "InputAction",
@@ -1,14 +1,12 @@
1
1
  from typing import TYPE_CHECKING
2
2
 
3
3
  from klaude_code.command.command_abc import CommandABC, CommandResult, InputAction
4
- from klaude_code.command.registry import register_command
5
4
  from klaude_code.protocol import commands
6
5
 
7
6
  if TYPE_CHECKING:
8
7
  from klaude_code.core.agent import Agent
9
8
 
10
9
 
11
- @register_command
12
10
  class ClearCommand(CommandABC):
13
11
  """Clear current session and start a new conversation"""
14
12
 
@@ -3,14 +3,12 @@ from pathlib import Path
3
3
  from typing import TYPE_CHECKING
4
4
 
5
5
  from klaude_code.command.command_abc import CommandABC, CommandResult
6
- from klaude_code.command.registry import register_command
7
6
  from klaude_code.protocol import commands, events, model
8
7
 
9
8
  if TYPE_CHECKING:
10
9
  from klaude_code.core.agent import Agent
11
10
 
12
11
 
13
- @register_command
14
12
  class DiffCommand(CommandABC):
15
13
  """Show git diff for the current repository."""
16
14
 
@@ -5,7 +5,6 @@ from pathlib import Path
5
5
  from typing import TYPE_CHECKING
6
6
 
7
7
  from klaude_code.command.command_abc import CommandABC, CommandResult
8
- from klaude_code.command.registry import register_command
9
8
  from klaude_code.protocol import commands, events, model
10
9
  from klaude_code.session.export import build_export_html, get_default_export_path
11
10
 
@@ -13,7 +12,6 @@ if TYPE_CHECKING:
13
12
  from klaude_code.core.agent import Agent
14
13
 
15
14
 
16
- @register_command
17
15
  class ExportCommand(CommandABC):
18
16
  """Export the current session into a standalone HTML transcript."""
19
17
 
@@ -1,14 +1,12 @@
1
1
  from typing import TYPE_CHECKING
2
2
 
3
3
  from klaude_code.command.command_abc import CommandABC, CommandResult
4
- from klaude_code.command.registry import register_command
5
4
  from klaude_code.protocol import commands, events, model
6
5
 
7
6
  if TYPE_CHECKING:
8
7
  from klaude_code.core.agent import Agent
9
8
 
10
9
 
11
- @register_command
12
10
  class HelpCommand(CommandABC):
13
11
  """Display help information for all available slash commands."""
14
12
 
@@ -2,7 +2,6 @@ import asyncio
2
2
  from typing import TYPE_CHECKING
3
3
 
4
4
  from klaude_code.command.command_abc import CommandABC, CommandResult, InputAction
5
- from klaude_code.command.registry import register_command
6
5
  from klaude_code.config import select_model_from_config
7
6
  from klaude_code.protocol import commands, events, model
8
7
 
@@ -10,7 +9,6 @@ if TYPE_CHECKING:
10
9
  from klaude_code.core.agent import Agent
11
10
 
12
11
 
13
- @register_command
14
12
  class ModelCommand(CommandABC):
15
13
  """Display or change the model configuration."""
16
14
 
@@ -1,14 +1,12 @@
1
1
  from typing import TYPE_CHECKING
2
2
 
3
3
  from klaude_code.command.command_abc import CommandABC, CommandResult
4
- from klaude_code.command.registry import register_command
5
4
  from klaude_code.protocol import commands, events
6
5
 
7
6
  if TYPE_CHECKING:
8
7
  from klaude_code.core.agent import Agent
9
8
 
10
9
 
11
- @register_command
12
10
  class RefreshTerminalCommand(CommandABC):
13
11
  """Refresh terminal display"""
14
12
 
@@ -1,5 +1,5 @@
1
1
  from importlib.resources import files
2
- from typing import TYPE_CHECKING, TypeVar
2
+ from typing import TYPE_CHECKING
3
3
 
4
4
  from klaude_code.command.command_abc import CommandResult, InputAction
5
5
  from klaude_code.command.prompt_command import PromptCommand
@@ -13,14 +13,10 @@ if TYPE_CHECKING:
13
13
 
14
14
  _COMMANDS: dict[commands.CommandName | str, "CommandABC"] = {}
15
15
 
16
- T = TypeVar("T", bound="CommandABC")
17
16
 
18
-
19
- def register_command(cls: type[T]) -> type[T]:
20
- """Decorator to register a command class in the global registry."""
21
- instance = cls()
22
- _COMMANDS[instance.name] = instance
23
- return cls
17
+ def register(cmd: "CommandABC") -> None:
18
+ """Register a command instance. Order of registration determines display order."""
19
+ _COMMANDS[cmd.name] = cmd
24
20
 
25
21
 
26
22
  def load_prompt_commands():
@@ -2,7 +2,6 @@ from pathlib import Path
2
2
  from typing import TYPE_CHECKING
3
3
 
4
4
  from klaude_code.command.command_abc import CommandABC, CommandResult
5
- from klaude_code.command.registry import register_command
6
5
  from klaude_code.protocol import commands, events, model
7
6
 
8
7
  if TYPE_CHECKING:
@@ -62,7 +61,6 @@ def _extract_releases(changelog: str, count: int = 1) -> str:
62
61
  return "\n".join("\n".join(release) for release in releases).strip()
63
62
 
64
63
 
65
- @register_command
66
64
  class ReleaseNotesCommand(CommandABC):
67
65
  """Display the latest release notes from CHANGELOG.md."""
68
66
 
@@ -1,7 +1,6 @@
1
1
  from typing import TYPE_CHECKING
2
2
 
3
3
  from klaude_code.command.command_abc import CommandABC, CommandResult
4
- from klaude_code.command.registry import register_command
5
4
  from klaude_code.protocol import commands, events, model
6
5
  from klaude_code.session.session import Session
7
6
 
@@ -60,8 +59,8 @@ def accumulate_session_usage(session: Session) -> AggregatedUsage:
60
59
  total.cache_read_cost = (total.cache_read_cost or 0.0) + usage.cache_read_cost
61
60
 
62
61
  # Track peak context window size (max across all tasks)
63
- if usage.context_token is not None:
64
- total.context_token = usage.context_token
62
+ if usage.context_size is not None:
63
+ total.context_size = usage.context_size
65
64
 
66
65
  # Keep the latest context_limit for computed context_usage_percent
67
66
  if usage.context_limit is not None:
@@ -127,7 +126,6 @@ def format_status_content(aggregated: AggregatedUsage) -> str:
127
126
  return "\n".join(lines)
128
127
 
129
128
 
130
- @register_command
131
129
  class StatusCommand(CommandABC):
132
130
  """Display session usage statistics."""
133
131
 
@@ -4,14 +4,12 @@ from pathlib import Path
4
4
  from typing import TYPE_CHECKING
5
5
 
6
6
  from klaude_code.command.command_abc import CommandABC, CommandResult
7
- from klaude_code.command.registry import register_command
8
7
  from klaude_code.protocol import commands, events, model
9
8
 
10
9
  if TYPE_CHECKING:
11
10
  from klaude_code.core.agent import Agent
12
11
 
13
12
 
14
- @register_command
15
13
  class TerminalSetupCommand(CommandABC):
16
14
  """Setup shift+enter newline functionality in terminal"""
17
15
 
@@ -0,0 +1,227 @@
1
+ import asyncio
2
+ from typing import TYPE_CHECKING
3
+
4
+ import questionary
5
+
6
+ from klaude_code.command.command_abc import CommandABC, CommandResult
7
+ from klaude_code.protocol import commands, events, llm_param, model
8
+
9
+ if TYPE_CHECKING:
10
+ from klaude_code.core.agent import Agent
11
+
12
+
13
+ # Thinking level options for different protocols
14
+ RESPONSES_LEVELS = ["minimal", "low", "medium", "high"]
15
+ RESPONSES_GPT51_LEVELS = ["none", "minimal", "low", "medium", "high"]
16
+ RESPONSES_CODEX_MAX_LEVELS = ["medium", "high", "xhigh"]
17
+
18
+ ANTHROPIC_LEVELS: list[tuple[str, int | None]] = [
19
+ ("off", 0),
20
+ ("low (2048 tokens)", 2048),
21
+ ("medium (8192 tokens)", 8192),
22
+ ("high (31999 tokens)", 31999),
23
+ ]
24
+
25
+
26
+ def _is_openrouter_model_with_reasoning_effort(model_name: str | None) -> bool:
27
+ """Check if the model is GPT series, Grok or Gemini 3."""
28
+ if not model_name:
29
+ return False
30
+ model_lower = model_name.lower()
31
+ return model_lower.startswith(("openai/gpt-", "x-ai/grok-", "google/gemini-3"))
32
+
33
+
34
+ def _is_gpt51_model(model_name: str | None) -> bool:
35
+ """Check if the model is GPT-5.1."""
36
+ if not model_name:
37
+ return False
38
+ return model_name.lower() in ["gpt5.1", "openai/gpt-5.1", "gpt-5.1-codex-2025-11-13"]
39
+
40
+
41
+ def _is_codex_max_model(model_name: str | None) -> bool:
42
+ """Check if the model is GPT-5.1-codex-max."""
43
+ if not model_name:
44
+ return False
45
+ return "codex-max" in model_name.lower()
46
+
47
+
48
+ def _get_levels_for_responses(model_name: str | None) -> list[str]:
49
+ """Get thinking levels for responses protocol."""
50
+ if _is_codex_max_model(model_name):
51
+ return RESPONSES_CODEX_MAX_LEVELS
52
+ if _is_gpt51_model(model_name):
53
+ return RESPONSES_GPT51_LEVELS
54
+ return RESPONSES_LEVELS
55
+
56
+
57
+ def _format_current_thinking(config: llm_param.LLMConfigParameter) -> str:
58
+ """Format the current thinking configuration for display."""
59
+ thinking = config.thinking
60
+ if not thinking:
61
+ return "not configured"
62
+
63
+ protocol = config.protocol
64
+
65
+ if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
66
+ if thinking.reasoning_effort:
67
+ return f"reasoning_effort={thinking.reasoning_effort}"
68
+ return "not set"
69
+
70
+ if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
71
+ if thinking.type == "disabled":
72
+ return "off"
73
+ if thinking.type == "enabled":
74
+ return f"enabled (budget_tokens={thinking.budget_tokens})"
75
+ return "not set"
76
+
77
+ if protocol == llm_param.LLMClientProtocol.OPENROUTER:
78
+ if _is_openrouter_model_with_reasoning_effort(config.model):
79
+ if thinking.reasoning_effort:
80
+ return f"reasoning_effort={thinking.reasoning_effort}"
81
+ else:
82
+ if thinking.type == "disabled":
83
+ return "off"
84
+ if thinking.type == "enabled":
85
+ return f"enabled (budget_tokens={thinking.budget_tokens})"
86
+ return "not set"
87
+
88
+ if protocol == llm_param.LLMClientProtocol.OPENAI:
89
+ if thinking.type == "disabled":
90
+ return "off"
91
+ if thinking.type == "enabled":
92
+ return f"enabled (budget_tokens={thinking.budget_tokens})"
93
+ return "not set"
94
+
95
+ return "unknown protocol"
96
+
97
+
98
+ SELECT_STYLE = questionary.Style([
99
+ ("instruction", "ansibrightblack"),
100
+ ("pointer", "ansicyan"),
101
+ ("highlighted", "ansicyan"),
102
+ ("text", "ansibrightblack"),
103
+ ])
104
+
105
+
106
+ def _select_responses_thinking_sync(model_name: str | None) -> llm_param.Thinking | None:
107
+ """Select thinking level for responses/codex protocol (sync version)."""
108
+ levels = _get_levels_for_responses(model_name)
109
+ choices: list[questionary.Choice] = [questionary.Choice(title=level, value=level) for level in levels]
110
+
111
+ try:
112
+ result = questionary.select(
113
+ message="Select reasoning effort:",
114
+ choices=choices,
115
+ pointer="→",
116
+ instruction="Use arrow keys to move, Enter to select",
117
+ use_jk_keys=False,
118
+ style=SELECT_STYLE,
119
+ ).ask()
120
+
121
+ if result is None:
122
+ return None
123
+ return llm_param.Thinking(reasoning_effort=result)
124
+ except KeyboardInterrupt:
125
+ return None
126
+
127
+
128
+ def _select_anthropic_thinking_sync() -> llm_param.Thinking | None:
129
+ """Select thinking level for anthropic/openai_compatible protocol (sync version)."""
130
+ choices: list[questionary.Choice] = [
131
+ questionary.Choice(title=label, value=tokens) for label, tokens in ANTHROPIC_LEVELS
132
+ ]
133
+
134
+ try:
135
+ result = questionary.select(
136
+ message="Select thinking level:",
137
+ choices=choices,
138
+ pointer="→",
139
+ instruction="Use arrow keys to move, Enter to select",
140
+ use_jk_keys=False,
141
+ style=SELECT_STYLE,
142
+ ).ask()
143
+ if result is None:
144
+ return llm_param.Thinking(type="disabled", budget_tokens=0)
145
+ return llm_param.Thinking(type="enabled", budget_tokens=result or 0)
146
+ except KeyboardInterrupt:
147
+ return None
148
+
149
+
150
+ class ThinkingCommand(CommandABC):
151
+ """Configure model thinking/reasoning level."""
152
+
153
+ @property
154
+ def name(self) -> commands.CommandName:
155
+ return commands.CommandName.THINKING
156
+
157
+ @property
158
+ def summary(self) -> str:
159
+ return "Configure model thinking/reasoning level"
160
+
161
+ @property
162
+ def is_interactive(self) -> bool:
163
+ return True
164
+
165
+ async def run(self, raw: str, agent: "Agent") -> CommandResult:
166
+ if not agent.profile:
167
+ return self._no_change_result(agent, "No profile configured")
168
+
169
+ config = agent.profile.llm_client.get_llm_config()
170
+ protocol = config.protocol
171
+ model_name = config.model
172
+
173
+ current = _format_current_thinking(config)
174
+
175
+ # Select new thinking configuration based on protocol
176
+ new_thinking: llm_param.Thinking | None = None
177
+
178
+ if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
179
+ new_thinking = await asyncio.to_thread(_select_responses_thinking_sync, model_name)
180
+
181
+ elif protocol == llm_param.LLMClientProtocol.ANTHROPIC:
182
+ new_thinking = await asyncio.to_thread(_select_anthropic_thinking_sync)
183
+
184
+ elif protocol == llm_param.LLMClientProtocol.OPENROUTER:
185
+ if _is_openrouter_model_with_reasoning_effort(model_name):
186
+ new_thinking = await asyncio.to_thread(_select_responses_thinking_sync, model_name)
187
+ else:
188
+ new_thinking = await asyncio.to_thread(_select_anthropic_thinking_sync)
189
+
190
+ elif protocol == llm_param.LLMClientProtocol.OPENAI:
191
+ # openai_compatible uses anthropic style
192
+ new_thinking = await asyncio.to_thread(_select_anthropic_thinking_sync)
193
+
194
+ else:
195
+ return self._no_change_result(agent, f"Unsupported protocol: {protocol}")
196
+
197
+ if new_thinking is None:
198
+ return self._no_change_result(agent, "(no change)")
199
+
200
+ # Apply the new thinking configuration
201
+ config.thinking = new_thinking
202
+ new_status = _format_current_thinking(config)
203
+
204
+ return CommandResult(
205
+ events=[
206
+ events.DeveloperMessageEvent(
207
+ session_id=agent.session.id,
208
+ item=model.DeveloperMessageItem(
209
+ content=f"Thinking changed: {current} -> {new_status}",
210
+ command_output=model.CommandOutput(command_name=self.name),
211
+ ),
212
+ )
213
+ ]
214
+ )
215
+
216
+ def _no_change_result(self, agent: "Agent", message: str) -> CommandResult:
217
+ return CommandResult(
218
+ events=[
219
+ events.DeveloperMessageEvent(
220
+ session_id=agent.session.id,
221
+ item=model.DeveloperMessageItem(
222
+ content=message,
223
+ command_output=model.CommandOutput(command_name=self.name),
224
+ ),
225
+ )
226
+ ]
227
+ )