klaude-code 1.2.12__py3-none-any.whl → 1.2.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. klaude_code/auth/codex/oauth.py +3 -3
  2. klaude_code/cli/auth_cmd.py +73 -0
  3. klaude_code/cli/config_cmd.py +88 -0
  4. klaude_code/cli/debug.py +72 -0
  5. klaude_code/cli/main.py +31 -142
  6. klaude_code/cli/runtime.py +19 -58
  7. klaude_code/cli/session_cmd.py +9 -9
  8. klaude_code/command/__init__.py +6 -6
  9. klaude_code/command/export_cmd.py +3 -3
  10. klaude_code/command/model_cmd.py +1 -1
  11. klaude_code/command/registry.py +1 -1
  12. klaude_code/command/terminal_setup_cmd.py +2 -2
  13. klaude_code/command/thinking_cmd.py +8 -6
  14. klaude_code/config/__init__.py +1 -5
  15. klaude_code/config/config.py +31 -4
  16. klaude_code/config/list_model.py +1 -1
  17. klaude_code/const/__init__.py +8 -3
  18. klaude_code/core/agent.py +14 -62
  19. klaude_code/core/executor.py +11 -10
  20. klaude_code/core/manager/agent_manager.py +4 -4
  21. klaude_code/core/manager/llm_clients.py +10 -49
  22. klaude_code/core/manager/llm_clients_builder.py +8 -21
  23. klaude_code/core/manager/sub_agent_manager.py +3 -3
  24. klaude_code/core/prompt.py +12 -7
  25. klaude_code/core/reminders.py +1 -1
  26. klaude_code/core/task.py +2 -2
  27. klaude_code/core/tool/__init__.py +16 -25
  28. klaude_code/core/tool/file/_utils.py +1 -1
  29. klaude_code/core/tool/file/apply_patch.py +17 -25
  30. klaude_code/core/tool/file/apply_patch_tool.py +4 -7
  31. klaude_code/core/tool/file/edit_tool.py +4 -11
  32. klaude_code/core/tool/file/multi_edit_tool.py +2 -3
  33. klaude_code/core/tool/file/read_tool.py +3 -4
  34. klaude_code/core/tool/file/write_tool.py +2 -3
  35. klaude_code/core/tool/memory/memory_tool.py +2 -8
  36. klaude_code/core/tool/memory/skill_loader.py +3 -2
  37. klaude_code/core/tool/shell/command_safety.py +0 -1
  38. klaude_code/core/tool/tool_context.py +1 -3
  39. klaude_code/core/tool/tool_registry.py +2 -1
  40. klaude_code/core/tool/tool_runner.py +1 -1
  41. klaude_code/core/tool/truncation.py +2 -5
  42. klaude_code/core/turn.py +9 -3
  43. klaude_code/llm/anthropic/client.py +6 -2
  44. klaude_code/llm/client.py +5 -1
  45. klaude_code/llm/codex/client.py +2 -2
  46. klaude_code/llm/input_common.py +2 -2
  47. klaude_code/llm/openai_compatible/client.py +11 -8
  48. klaude_code/llm/openai_compatible/stream_processor.py +2 -1
  49. klaude_code/llm/openrouter/client.py +22 -9
  50. klaude_code/llm/openrouter/reasoning_handler.py +19 -132
  51. klaude_code/llm/registry.py +6 -5
  52. klaude_code/llm/responses/client.py +10 -5
  53. klaude_code/protocol/events.py +9 -2
  54. klaude_code/protocol/model.py +7 -1
  55. klaude_code/protocol/sub_agent.py +2 -2
  56. klaude_code/session/export.py +58 -0
  57. klaude_code/session/selector.py +2 -2
  58. klaude_code/session/session.py +37 -7
  59. klaude_code/session/templates/export_session.html +46 -0
  60. klaude_code/trace/__init__.py +2 -2
  61. klaude_code/trace/log.py +144 -5
  62. klaude_code/ui/__init__.py +4 -9
  63. klaude_code/ui/core/stage_manager.py +7 -4
  64. klaude_code/ui/modes/debug/display.py +2 -1
  65. klaude_code/ui/modes/repl/__init__.py +1 -1
  66. klaude_code/ui/modes/repl/completers.py +6 -7
  67. klaude_code/ui/modes/repl/display.py +3 -4
  68. klaude_code/ui/modes/repl/event_handler.py +63 -5
  69. klaude_code/ui/modes/repl/key_bindings.py +2 -3
  70. klaude_code/ui/modes/repl/renderer.py +52 -62
  71. klaude_code/ui/renderers/diffs.py +1 -4
  72. klaude_code/ui/renderers/tools.py +4 -0
  73. klaude_code/ui/rich/markdown.py +3 -3
  74. klaude_code/ui/rich/searchable_text.py +6 -6
  75. klaude_code/ui/rich/status.py +3 -4
  76. klaude_code/ui/rich/theme.py +2 -5
  77. klaude_code/ui/terminal/control.py +7 -16
  78. klaude_code/ui/terminal/notifier.py +2 -4
  79. klaude_code/ui/utils/common.py +1 -1
  80. klaude_code/ui/utils/debouncer.py +2 -2
  81. {klaude_code-1.2.12.dist-info → klaude_code-1.2.14.dist-info}/METADATA +1 -1
  82. {klaude_code-1.2.12.dist-info → klaude_code-1.2.14.dist-info}/RECORD +84 -81
  83. {klaude_code-1.2.12.dist-info → klaude_code-1.2.14.dist-info}/WHEEL +0 -0
  84. {klaude_code-1.2.12.dist-info → klaude_code-1.2.14.dist-info}/entry_points.txt +0 -0
@@ -28,15 +28,15 @@ def ensure_commands_loaded() -> None:
28
28
 
29
29
  # Import and register commands in display order
30
30
  from .clear_cmd import ClearCommand
31
- from .model_cmd import ModelCommand
32
- from .status_cmd import StatusCommand
33
31
  from .diff_cmd import DiffCommand
34
32
  from .export_cmd import ExportCommand
35
- from .thinking_cmd import ThinkingCommand
36
33
  from .help_cmd import HelpCommand
34
+ from .model_cmd import ModelCommand
37
35
  from .refresh_cmd import RefreshTerminalCommand
38
- from .terminal_setup_cmd import TerminalSetupCommand
39
36
  from .release_notes_cmd import ReleaseNotesCommand
37
+ from .status_cmd import StatusCommand
38
+ from .terminal_setup_cmd import TerminalSetupCommand
39
+ from .thinking_cmd import ThinkingCommand
40
40
 
41
41
  # Register in desired display order
42
42
  register(ExportCommand())
@@ -86,8 +86,8 @@ __all__ = [
86
86
  "InputAction",
87
87
  "InputActionType",
88
88
  "dispatch_command",
89
+ "ensure_commands_loaded",
89
90
  "get_commands",
90
- "is_slash_command_name",
91
91
  "has_interactive_command",
92
- "ensure_commands_loaded",
92
+ "is_slash_command_name",
93
93
  ]
@@ -31,7 +31,7 @@ class ExportCommand(CommandABC):
31
31
  def is_interactive(self) -> bool:
32
32
  return False
33
33
 
34
- async def run(self, raw: str, agent: "Agent") -> CommandResult:
34
+ async def run(self, raw: str, agent: Agent) -> CommandResult:
35
35
  try:
36
36
  output_path = self._resolve_output_path(raw, agent)
37
37
  html_doc = self._build_html(agent)
@@ -58,7 +58,7 @@ class ExportCommand(CommandABC):
58
58
  )
59
59
  return CommandResult(events=[event])
60
60
 
61
- def _resolve_output_path(self, raw: str, agent: "Agent") -> Path:
61
+ def _resolve_output_path(self, raw: str, agent: Agent) -> Path:
62
62
  trimmed = raw.strip()
63
63
  if trimmed:
64
64
  candidate = Path(trimmed).expanduser()
@@ -79,7 +79,7 @@ class ExportCommand(CommandABC):
79
79
  msg = f"Failed to open HTML with `open`: {exc}"
80
80
  raise RuntimeError(msg) from exc
81
81
 
82
- def _build_html(self, agent: "Agent") -> str:
82
+ def _build_html(self, agent: Agent) -> str:
83
83
  profile = agent.profile
84
84
  system_prompt = (profile.system_prompt if profile else "") or ""
85
85
  tools = profile.tools if profile else []
@@ -2,7 +2,7 @@ import asyncio
2
2
  from typing import TYPE_CHECKING
3
3
 
4
4
  from klaude_code.command.command_abc import CommandABC, CommandResult, InputAction
5
- from klaude_code.config import select_model_from_config
5
+ from klaude_code.config.select_model import select_model_from_config
6
6
  from klaude_code.protocol import commands, events, model
7
7
 
8
8
  if TYPE_CHECKING:
@@ -94,7 +94,7 @@ async def dispatch_command(raw: str, agent: "Agent") -> CommandResult:
94
94
  events.DeveloperMessageEvent(
95
95
  session_id=agent.session.id,
96
96
  item=model.DeveloperMessageItem(
97
- content=f"Command {command_identifier} error: [{e.__class__.__name__}] {str(e)}",
97
+ content=f"Command {command_identifier} error: [{e.__class__.__name__}] {e!s}",
98
98
  command_output=command_output,
99
99
  ),
100
100
  )
@@ -43,7 +43,7 @@ class TerminalSetupCommand(CommandABC):
43
43
  return self._create_success_result(agent, message)
44
44
 
45
45
  except Exception as e:
46
- return self._create_error_result(agent, f"Error configuring terminal: {str(e)}")
46
+ return self._create_error_result(agent, f"Error configuring terminal: {e!s}")
47
47
 
48
48
  def _setup_ghostty(self) -> str:
49
49
  """Configure shift+enter newline for Ghostty terminal"""
@@ -113,7 +113,7 @@ class TerminalSetupCommand(CommandABC):
113
113
  )
114
114
 
115
115
  except Exception as e:
116
- raise Exception(f"Error configuring iTerm: {str(e)}")
116
+ raise Exception(f"Error configuring iTerm: {e!s}") from e
117
117
 
118
118
  def _setup_vscode_family(self) -> str:
119
119
  """Configure shift+enter newline for VS Code family terminals (VS Code, Windsurf, Cursor).
@@ -95,12 +95,14 @@ def _format_current_thinking(config: llm_param.LLMConfigParameter) -> str:
95
95
  return "unknown protocol"
96
96
 
97
97
 
98
- SELECT_STYLE = questionary.Style([
99
- ("instruction", "ansibrightblack"),
100
- ("pointer", "ansicyan"),
101
- ("highlighted", "ansicyan"),
102
- ("text", "ansibrightblack"),
103
- ])
98
+ SELECT_STYLE = questionary.Style(
99
+ [
100
+ ("instruction", "ansibrightblack"),
101
+ ("pointer", "ansicyan"),
102
+ ("highlighted", "ansicyan"),
103
+ ("text", "ansibrightblack"),
104
+ ]
105
+ )
104
106
 
105
107
 
106
108
  def _select_responses_thinking_sync(model_name: str | None) -> llm_param.Thinking | None:
@@ -1,11 +1,7 @@
1
1
  from .config import Config, config_path, load_config
2
- from .list_model import display_models_and_providers
3
- from .select_model import select_model_from_config
4
2
 
5
3
  __all__ = [
6
4
  "Config",
7
- "load_config",
8
5
  "config_path",
9
- "display_models_and_providers",
10
- "select_model_from_config",
6
+ "load_config",
11
7
  ]
@@ -71,7 +71,8 @@ class Config(BaseModel):
71
71
 
72
72
  def _save_config() -> None:
73
73
  config_path.parent.mkdir(parents=True, exist_ok=True)
74
- _ = config_path.write_text(yaml.dump(config_dict, default_flow_style=False, sort_keys=False))
74
+ yaml_content = yaml.dump(config_dict, default_flow_style=False, sort_keys=False)
75
+ _ = config_path.write_text(str(yaml_content or ""))
75
76
 
76
77
  await asyncio.to_thread(_save_config)
77
78
 
@@ -142,8 +143,7 @@ def get_example_config() -> Config:
142
143
  )
143
144
 
144
145
 
145
- @lru_cache(maxsize=1)
146
- def load_config() -> Config | None:
146
+ def _load_config_uncached() -> Config | None:
147
147
  if not config_path.exists():
148
148
  log(f"Config file not found: {config_path}")
149
149
  example_config = get_example_config()
@@ -151,7 +151,7 @@ def load_config() -> Config | None:
151
151
  config_dict = example_config.model_dump(mode="json", exclude_none=True)
152
152
 
153
153
  # Comment out all example config lines
154
- yaml_str = yaml.dump(config_dict, default_flow_style=False, sort_keys=False)
154
+ yaml_str = yaml.dump(config_dict, default_flow_style=False, sort_keys=False) or ""
155
155
  commented_yaml = "\n".join(f"# {line}" if line.strip() else "#" for line in yaml_str.splitlines())
156
156
  _ = config_path.write_text(commented_yaml)
157
157
 
@@ -175,3 +175,30 @@ def load_config() -> Config | None:
175
175
  raise ValueError(f"Invalid config file: {config_path}") from e
176
176
 
177
177
  return config
178
+
179
+
180
+ @lru_cache(maxsize=1)
181
+ def _load_config_cached() -> Config | None:
182
+ return _load_config_uncached()
183
+
184
+
185
+ def load_config() -> Config | None:
186
+ """Load config from disk, caching only successful parses.
187
+
188
+ Returns:
189
+ Config object on success, or None when the config is missing/empty/commented out.
190
+ """
191
+
192
+ try:
193
+ config = _load_config_cached()
194
+ except ValueError:
195
+ _load_config_cached.cache_clear()
196
+ raise
197
+
198
+ if config is None:
199
+ _load_config_cached.cache_clear()
200
+ return config
201
+
202
+
203
+ # Expose cache control for tests and callers that need to invalidate the cache.
204
+ load_config.cache_clear = _load_config_cached.cache_clear # type: ignore[attr-defined]
@@ -34,7 +34,7 @@ def _display_codex_status(console: Console) -> None:
34
34
  )
35
35
  )
36
36
  else:
37
- expires_dt = datetime.datetime.fromtimestamp(state.expires_at, tz=datetime.timezone.utc)
37
+ expires_dt = datetime.datetime.fromtimestamp(state.expires_at, tz=datetime.UTC)
38
38
  console.print(
39
39
  Text.assemble(
40
40
  ("Codex Status: ", "bold"),
@@ -4,6 +4,8 @@ This module consolidates all magic numbers and configuration values
4
4
  that were previously scattered across the codebase.
5
5
  """
6
6
 
7
+ from pathlib import Path
8
+
7
9
  # =============================================================================
8
10
  # Agent Configuration
9
11
  # =============================================================================
@@ -116,15 +118,18 @@ STATUS_SHIMMER_ALPHA_SCALE = 0.7
116
118
  # Spinner breathing animation
117
119
  # Duration in seconds for one full breathe-in + breathe-out cycle
118
120
  # Keep in sync with STATUS_SHIMMER_SWEEP_SECONDS for visual consistency
119
- SPINNER_BREATH_PERIOD_SECONDS = 2
121
+ SPINNER_BREATH_PERIOD_SECONDS: float = 2.0
120
122
 
121
123
 
122
124
  # =============================================================================
123
125
  # Debug / Logging
124
126
  # =============================================================================
125
127
 
126
- # Default debug log file path
127
- DEFAULT_DEBUG_LOG_FILE = "debug.log"
128
+ # Default debug log directory (user cache)
129
+ DEFAULT_DEBUG_LOG_DIR = Path.home() / ".klaude" / "logs"
130
+
131
+ # Default debug log file path (symlink to latest session)
132
+ DEFAULT_DEBUG_LOG_FILE = DEFAULT_DEBUG_LOG_DIR / "debug.log"
128
133
 
129
134
  # Maximum log file size before rotation (10MB)
130
135
  LOG_MAX_BYTES = 10 * 1024 * 1024
klaude_code/core/agent.py CHANGED
@@ -1,8 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
- from collections.abc import AsyncGenerator, Callable, Iterable
3
+ from collections.abc import AsyncGenerator, Iterable
4
4
  from dataclasses import dataclass
5
- from typing import TYPE_CHECKING, Protocol
5
+ from typing import Protocol
6
6
 
7
7
  from klaude_code.core.prompt import load_system_prompt
8
8
  from klaude_code.core.reminders import Reminder, load_agent_reminders
@@ -14,38 +14,21 @@ from klaude_code.protocol.model import UserInputPayload
14
14
  from klaude_code.session import Session
15
15
  from klaude_code.trace import DebugType, log_debug
16
16
 
17
- if TYPE_CHECKING:
18
- from klaude_code.core.manager.llm_clients import LLMClients
19
-
20
17
 
21
18
  @dataclass(frozen=True)
22
19
  class AgentProfile:
23
20
  """Encapsulates the active LLM client plus prompts/tools/reminders."""
24
21
 
25
- llm_client_factory: Callable[[], LLMClientABC]
22
+ llm_client: LLMClientABC
26
23
  system_prompt: str | None
27
24
  tools: list[llm_param.ToolSchema]
28
25
  reminders: list[Reminder]
29
26
 
30
- _llm_client: LLMClientABC | None = None
31
-
32
- @property
33
- def llm_client(self) -> LLMClientABC:
34
- if self._llm_client is None:
35
- object.__setattr__(self, "_llm_client", self.llm_client_factory())
36
- return self._llm_client # type: ignore[return-value]
37
-
38
27
 
39
28
  class ModelProfileProvider(Protocol):
40
29
  """Strategy interface for constructing agent profiles."""
41
30
 
42
31
  def build_profile(
43
- self,
44
- llm_clients: LLMClients,
45
- sub_agent_type: tools.SubAgentType | None = None,
46
- ) -> AgentProfile: ...
47
-
48
- def build_profile_eager(
49
32
  self,
50
33
  llm_client: LLMClientABC,
51
34
  sub_agent_type: tools.SubAgentType | None = None,
@@ -56,27 +39,14 @@ class DefaultModelProfileProvider(ModelProfileProvider):
56
39
  """Default provider backed by global prompts/tool/reminder registries."""
57
40
 
58
41
  def build_profile(
59
- self,
60
- llm_clients: LLMClients,
61
- sub_agent_type: tools.SubAgentType | None = None,
62
- ) -> AgentProfile:
63
- model_name = llm_clients.main_model_name
64
- return AgentProfile(
65
- llm_client_factory=lambda: llm_clients.main,
66
- system_prompt=load_system_prompt(model_name, sub_agent_type),
67
- tools=load_agent_tools(model_name, sub_agent_type),
68
- reminders=load_agent_reminders(model_name, sub_agent_type),
69
- )
70
-
71
- def build_profile_eager(
72
42
  self,
73
43
  llm_client: LLMClientABC,
74
44
  sub_agent_type: tools.SubAgentType | None = None,
75
45
  ) -> AgentProfile:
76
46
  model_name = llm_client.model_name
77
47
  return AgentProfile(
78
- llm_client_factory=lambda: llm_client,
79
- system_prompt=load_system_prompt(model_name, sub_agent_type),
48
+ llm_client=llm_client,
49
+ system_prompt=load_system_prompt(model_name, llm_client.protocol, sub_agent_type),
80
50
  tools=load_agent_tools(model_name, sub_agent_type),
81
51
  reminders=load_agent_reminders(model_name, sub_agent_type),
82
52
  )
@@ -86,26 +56,13 @@ class VanillaModelProfileProvider(ModelProfileProvider):
86
56
  """Provider that strips prompts, reminders, and tools for vanilla mode."""
87
57
 
88
58
  def build_profile(
89
- self,
90
- llm_clients: LLMClients,
91
- sub_agent_type: tools.SubAgentType | None = None,
92
- ) -> AgentProfile:
93
- model_name = llm_clients.main_model_name
94
- return AgentProfile(
95
- llm_client_factory=lambda: llm_clients.main,
96
- system_prompt=None,
97
- tools=load_agent_tools(model_name, vanilla=True),
98
- reminders=load_agent_reminders(model_name, vanilla=True),
99
- )
100
-
101
- def build_profile_eager(
102
59
  self,
103
60
  llm_client: LLMClientABC,
104
61
  sub_agent_type: tools.SubAgentType | None = None,
105
62
  ) -> AgentProfile:
106
63
  model_name = llm_client.model_name
107
64
  return AgentProfile(
108
- llm_client_factory=lambda: llm_client,
65
+ llm_client=llm_client,
109
66
  system_prompt=None,
110
67
  tools=load_agent_tools(model_name, vanilla=True),
111
68
  reminders=load_agent_reminders(model_name, vanilla=True),
@@ -117,13 +74,12 @@ class Agent:
117
74
  self,
118
75
  session: Session,
119
76
  profile: AgentProfile,
120
- model_name: str | None = None,
121
77
  ):
122
78
  self.session: Session = session
123
79
  self.profile: AgentProfile = profile
124
80
  self._current_task: TaskExecutor | None = None
125
- if not self.session.model_name and model_name:
126
- self.session.model_name = model_name
81
+ if not self.session.model_name:
82
+ self.session.model_name = profile.llm_client.model_name
127
83
 
128
84
  def cancel(self) -> Iterable[events.Event]:
129
85
  """Handle agent cancellation and persist an interrupt marker and tool cancellations.
@@ -136,8 +92,7 @@ class Agent:
136
92
  """
137
93
  # First, cancel any running task so it stops emitting events.
138
94
  if self._current_task is not None:
139
- for ui_event in self._current_task.cancel():
140
- yield ui_event
95
+ yield from self._current_task.cancel()
141
96
  self._current_task = None
142
97
 
143
98
  # Record an interrupt marker in the session history
@@ -148,7 +103,7 @@ class Agent:
148
103
  debug_type=DebugType.EXECUTION,
149
104
  )
150
105
 
151
- async def run_task(self, user_input: UserInputPayload) -> AsyncGenerator[events.Event, None]:
106
+ async def run_task(self, user_input: UserInputPayload) -> AsyncGenerator[events.Event]:
152
107
  session_ctx = SessionContext(
153
108
  session_id=self.session.id,
154
109
  get_conversation_history=lambda: self.session.conversation_history,
@@ -173,7 +128,7 @@ class Agent:
173
128
  finally:
174
129
  self._current_task = None
175
130
 
176
- async def replay_history(self) -> AsyncGenerator[events.Event, None]:
131
+ async def replay_history(self) -> AsyncGenerator[events.Event]:
177
132
  """Yield UI events reconstructed from saved conversation history."""
178
133
 
179
134
  if len(self.session.conversation_history) == 0:
@@ -185,21 +140,18 @@ class Agent:
185
140
  session_id=self.session.id,
186
141
  )
187
142
 
188
- async def _process_reminder(self, reminder: Reminder) -> AsyncGenerator[events.DeveloperMessageEvent, None]:
143
+ async def _process_reminder(self, reminder: Reminder) -> AsyncGenerator[events.DeveloperMessageEvent]:
189
144
  """Process a single reminder and yield events if it produces output."""
190
145
  item = await reminder(self.session)
191
146
  if item is not None:
192
147
  self.session.append_history([item])
193
148
  yield events.DeveloperMessageEvent(session_id=self.session.id, item=item)
194
149
 
195
- def set_model_profile(self, profile: AgentProfile, model_name: str | None = None) -> None:
150
+ def set_model_profile(self, profile: AgentProfile) -> None:
196
151
  """Apply a fully constructed profile to the agent."""
197
152
 
198
153
  self.profile = profile
199
- if model_name:
200
- self.session.model_name = model_name
201
- elif not self.session.model_name:
202
- self.session.model_name = profile.llm_client.model_name
154
+ self.session.model_name = profile.llm_client.model_name
203
155
 
204
156
  def get_llm_client(self) -> LLMClientABC:
205
157
  return self.profile.llm_client
@@ -264,14 +264,14 @@ class ExecutorContext:
264
264
  import traceback
265
265
 
266
266
  log_debug(
267
- f"Agent task {task_id} failed: {str(e)}",
267
+ f"Agent task {task_id} failed: {e!s}",
268
268
  style="red",
269
269
  debug_type=DebugType.EXECUTION,
270
270
  )
271
271
  log_debug(traceback.format_exc(), style="red", debug_type=DebugType.EXECUTION)
272
272
  await self.emit_event(
273
273
  events.ErrorEvent(
274
- error_message=f"Agent task failed: [{e.__class__.__name__}] {str(e)}",
274
+ error_message=f"Agent task failed: [{e.__class__.__name__}] {e!s}",
275
275
  can_retry=False,
276
276
  )
277
277
  )
@@ -317,6 +317,7 @@ class Executor:
317
317
  self.submission_queue: asyncio.Queue[op.Submission] = asyncio.Queue()
318
318
  # Track completion events for all submissions (not just those with ActiveTask)
319
319
  self._completion_events: dict[str, asyncio.Event] = {}
320
+ self._background_tasks: set[asyncio.Task[None]] = set()
320
321
 
321
322
  async def submit(self, operation: op.Operation) -> str:
322
323
  """
@@ -388,12 +389,12 @@ class Executor:
388
389
  except Exception as e:
389
390
  # Handle unexpected errors
390
391
  log_debug(
391
- f"Executor error: {str(e)}",
392
+ f"Executor error: {e!s}",
392
393
  style="red",
393
394
  debug_type=DebugType.EXECUTION,
394
395
  )
395
396
  await self.context.emit_event(
396
- events.ErrorEvent(error_message=f"Executor error: {str(e)}", can_retry=False)
397
+ events.ErrorEvent(error_message=f"Executor error: {e!s}", can_retry=False)
397
398
  )
398
399
 
399
400
  async def stop(self) -> None:
@@ -420,7 +421,7 @@ class Executor:
420
421
  await self.submission_queue.put(submission)
421
422
  except Exception as e:
422
423
  log_debug(
423
- f"Failed to send EndOperation: {str(e)}",
424
+ f"Failed to send EndOperation: {e!s}",
424
425
  style="red",
425
426
  debug_type=DebugType.EXECUTION,
426
427
  )
@@ -460,17 +461,17 @@ class Executor:
460
461
  event.set()
461
462
  else:
462
463
  # Run in background so the submission loop can continue (e.g., to handle interrupts)
463
- asyncio.create_task(_await_agent_and_complete(task))
464
+ background_task = asyncio.create_task(_await_agent_and_complete(task))
465
+ self._background_tasks.add(background_task)
466
+ background_task.add_done_callback(self._background_tasks.discard)
464
467
 
465
468
  except Exception as e:
466
469
  log_debug(
467
- f"Failed to handle submission {submission.id}: {str(e)}",
470
+ f"Failed to handle submission {submission.id}: {e!s}",
468
471
  style="red",
469
472
  debug_type=DebugType.EXECUTION,
470
473
  )
471
- await self.context.emit_event(
472
- events.ErrorEvent(error_message=f"Operation failed: {str(e)}", can_retry=False)
473
- )
474
+ await self.context.emit_event(events.ErrorEvent(error_message=f"Operation failed: {e!s}", can_retry=False))
474
475
  # Set completion event even on error to prevent wait_for_completion from hanging
475
476
  event = self._completion_events.get(submission.id)
476
477
  if event is not None:
@@ -51,8 +51,8 @@ class AgentManager:
51
51
  if agent is not None:
52
52
  return agent
53
53
  session = Session.load(session_id)
54
- profile = self._model_profile_provider.build_profile(self._llm_clients)
55
- agent = Agent(session=session, profile=profile, model_name=self._llm_clients.main_model_name)
54
+ profile = self._model_profile_provider.build_profile(self._llm_clients.main)
55
+ agent = Agent(session=session, profile=profile)
56
56
 
57
57
  async for evt in agent.replay_history():
58
58
  await self.emit_event(evt)
@@ -60,7 +60,7 @@ class AgentManager:
60
60
  await self.emit_event(
61
61
  events.WelcomeEvent(
62
62
  work_dir=str(session.work_dir),
63
- llm_config=self._llm_clients.get_llm_config(),
63
+ llm_config=self._llm_clients.main.get_llm_config(),
64
64
  )
65
65
  )
66
66
 
@@ -81,7 +81,7 @@ class AgentManager:
81
81
 
82
82
  llm_config = config.get_model_config(model_name)
83
83
  llm_client = create_llm_client(llm_config)
84
- agent.set_model_profile(self._model_profile_provider.build_profile_eager(llm_client), model_name=model_name)
84
+ agent.set_model_profile(self._model_profile_provider.build_profile(llm_client))
85
85
 
86
86
  developer_item = model.DeveloperMessageItem(
87
87
  content=f"switched to model: {model_name}",
@@ -2,66 +2,27 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from collections.abc import Callable
5
+ from dataclasses import dataclass
6
+ from dataclasses import field as dataclass_field
6
7
 
7
8
  from klaude_code.llm.client import LLMClientABC
8
- from klaude_code.protocol import llm_param
9
9
  from klaude_code.protocol.tools import SubAgentType
10
10
 
11
11
 
12
- class LLMClients:
13
- """Container for LLM clients used by main agent and sub-agents."""
14
-
15
- def __init__(
16
- self,
17
- main_factory: Callable[[], LLMClientABC],
18
- main_model_name: str,
19
- main_llm_config: llm_param.LLMConfigParameter,
20
- ) -> None:
21
- self._main_factory: Callable[[], LLMClientABC] | None = main_factory
22
- self._main_client: LLMClientABC | None = None
23
- self._main_model_name: str = main_model_name
24
- self._main_llm_config: llm_param.LLMConfigParameter = main_llm_config
25
- self._sub_clients: dict[SubAgentType, LLMClientABC] = {}
26
- self._sub_factories: dict[SubAgentType, Callable[[], LLMClientABC]] = {}
12
+ def _default_sub_clients() -> dict[SubAgentType, LLMClientABC]:
13
+ return {}
27
14
 
28
- @property
29
- def main_model_name(self) -> str:
30
- return self._main_model_name
31
15
 
32
- def get_llm_config(self) -> llm_param.LLMConfigParameter:
33
- return self._main_llm_config
34
-
35
- @property
36
- def main(self) -> LLMClientABC:
37
- if self._main_client is None:
38
- if self._main_factory is None:
39
- raise RuntimeError("Main client factory not set")
40
- self._main_client = self._main_factory()
41
- self._main_factory = None
42
- return self._main_client
16
+ @dataclass
17
+ class LLMClients:
18
+ """Container for LLM clients used by main agent and sub-agents."""
43
19
 
44
- def register_sub_client_factory(
45
- self,
46
- sub_agent_type: SubAgentType,
47
- factory: Callable[[], LLMClientABC],
48
- ) -> None:
49
- self._sub_factories[sub_agent_type] = factory
20
+ main: LLMClientABC
21
+ sub_clients: dict[SubAgentType, LLMClientABC] = dataclass_field(default_factory=_default_sub_clients)
50
22
 
51
23
  def get_client(self, sub_agent_type: SubAgentType | None = None) -> LLMClientABC:
52
24
  """Return client for a sub-agent type or the main client."""
53
25
 
54
26
  if sub_agent_type is None:
55
27
  return self.main
56
-
57
- existing = self._sub_clients.get(sub_agent_type)
58
- if existing is not None:
59
- return existing
60
-
61
- factory = self._sub_factories.get(sub_agent_type)
62
- if factory is None:
63
- return self.main
64
-
65
- client = factory()
66
- self._sub_clients[sub_agent_type] = client
67
- return client
28
+ return self.sub_clients.get(sub_agent_type) or self.main
@@ -7,6 +7,7 @@ from klaude_code.core.manager.llm_clients import LLMClients
7
7
  from klaude_code.llm.client import LLMClientABC
8
8
  from klaude_code.llm.registry import create_llm_client
9
9
  from klaude_code.protocol.sub_agent import iter_sub_agent_profiles
10
+ from klaude_code.protocol.tools import SubAgentType
10
11
  from klaude_code.trace import DebugType, log_debug
11
12
 
12
13
 
@@ -18,10 +19,7 @@ def build_llm_clients(
18
19
  """Create an ``LLMClients`` bundle driven by application config."""
19
20
 
20
21
  # Resolve main agent LLM config
21
- if model_override:
22
- llm_config = config.get_model_config(model_override)
23
- else:
24
- llm_config = config.get_main_model_config()
22
+ llm_config = config.get_model_config(model_override) if model_override else config.get_main_model_config()
25
23
 
26
24
  log_debug(
27
25
  "Main LLM config",
@@ -30,29 +28,18 @@ def build_llm_clients(
30
28
  debug_type=DebugType.LLM_CONFIG,
31
29
  )
32
30
 
33
- main_model_name = str(llm_config.model)
34
-
35
- def _main_factory() -> LLMClientABC:
36
- return create_llm_client(llm_config)
37
-
38
- clients = LLMClients(
39
- main_factory=_main_factory,
40
- main_model_name=main_model_name,
41
- main_llm_config=llm_config,
42
- )
31
+ main_client = create_llm_client(llm_config)
32
+ sub_clients: dict[SubAgentType, LLMClientABC] = {}
43
33
 
44
34
  for profile in iter_sub_agent_profiles():
45
35
  model_name = config.subagent_models.get(profile.name)
46
36
  if not model_name:
47
37
  continue
48
38
 
49
- if not profile.enabled_for_model(main_model_name):
39
+ if not profile.enabled_for_model(main_client.model_name):
50
40
  continue
51
41
 
52
- def _factory(model_name_for_factory: str = model_name) -> LLMClientABC:
53
- sub_llm_config = config.get_model_config(model_name_for_factory)
54
- return create_llm_client(sub_llm_config)
55
-
56
- clients.register_sub_client_factory(profile.name, _factory)
42
+ sub_llm_config = config.get_model_config(model_name)
43
+ sub_clients[profile.name] = create_llm_client(sub_llm_config)
57
44
 
58
- return clients
45
+ return LLMClients(main=main_client, sub_clients=sub_clients)
@@ -43,7 +43,7 @@ class SubAgentManager:
43
43
  child_session = Session(work_dir=parent_session.work_dir)
44
44
  child_session.sub_agent_state = state
45
45
 
46
- child_profile = self._model_profile_provider.build_profile_eager(
46
+ child_profile = self._model_profile_provider.build_profile(
47
47
  self._llm_clients.get_client(state.sub_agent_type),
48
48
  state.sub_agent_type,
49
49
  )
@@ -79,12 +79,12 @@ class SubAgentManager:
79
79
  raise
80
80
  except Exception as exc: # pragma: no cover - defensive logging
81
81
  log_debug(
82
- f"Subagent task failed: [{exc.__class__.__name__}] {str(exc)}",
82
+ f"Subagent task failed: [{exc.__class__.__name__}] {exc!s}",
83
83
  style="red",
84
84
  debug_type=DebugType.EXECUTION,
85
85
  )
86
86
  return SubAgentResult(
87
- task_result=f"Subagent task failed: [{exc.__class__.__name__}] {str(exc)}",
87
+ task_result=f"Subagent task failed: [{exc.__class__.__name__}] {exc!s}",
88
88
  session_id="",
89
89
  error=True,
90
90
  )