klaude-code 2.2.0__py3-none-any.whl → 2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. klaude_code/app/runtime.py +2 -15
  2. klaude_code/cli/list_model.py +30 -13
  3. klaude_code/cli/main.py +26 -10
  4. klaude_code/config/assets/builtin_config.yaml +177 -310
  5. klaude_code/config/config.py +158 -21
  6. klaude_code/config/{select_model.py → model_matcher.py} +41 -16
  7. klaude_code/config/sub_agent_model_helper.py +217 -0
  8. klaude_code/config/thinking.py +2 -2
  9. klaude_code/const.py +1 -1
  10. klaude_code/core/agent_profile.py +43 -5
  11. klaude_code/core/executor.py +129 -47
  12. klaude_code/core/manager/llm_clients_builder.py +17 -11
  13. klaude_code/core/prompts/prompt-nano-banana.md +1 -1
  14. klaude_code/core/tool/file/diff_builder.py +25 -18
  15. klaude_code/core/tool/sub_agent_tool.py +2 -1
  16. klaude_code/llm/anthropic/client.py +12 -9
  17. klaude_code/llm/anthropic/input.py +54 -29
  18. klaude_code/llm/client.py +1 -1
  19. klaude_code/llm/codex/client.py +2 -2
  20. klaude_code/llm/google/client.py +7 -7
  21. klaude_code/llm/google/input.py +23 -2
  22. klaude_code/llm/input_common.py +2 -2
  23. klaude_code/llm/openai_compatible/client.py +3 -3
  24. klaude_code/llm/openai_compatible/input.py +22 -13
  25. klaude_code/llm/openai_compatible/stream.py +1 -1
  26. klaude_code/llm/openrouter/client.py +4 -4
  27. klaude_code/llm/openrouter/input.py +35 -25
  28. klaude_code/llm/responses/client.py +5 -5
  29. klaude_code/llm/responses/input.py +96 -57
  30. klaude_code/protocol/commands.py +1 -2
  31. klaude_code/protocol/events/__init__.py +7 -1
  32. klaude_code/protocol/events/chat.py +10 -0
  33. klaude_code/protocol/events/system.py +4 -0
  34. klaude_code/protocol/llm_param.py +1 -1
  35. klaude_code/protocol/model.py +0 -26
  36. klaude_code/protocol/op.py +17 -5
  37. klaude_code/protocol/op_handler.py +5 -0
  38. klaude_code/protocol/sub_agent/AGENTS.md +28 -0
  39. klaude_code/protocol/sub_agent/__init__.py +10 -14
  40. klaude_code/protocol/sub_agent/image_gen.py +2 -1
  41. klaude_code/session/codec.py +2 -6
  42. klaude_code/session/session.py +13 -3
  43. klaude_code/skill/assets/create-plan/SKILL.md +3 -5
  44. klaude_code/tui/command/__init__.py +3 -6
  45. klaude_code/tui/command/clear_cmd.py +0 -1
  46. klaude_code/tui/command/command_abc.py +6 -4
  47. klaude_code/tui/command/copy_cmd.py +10 -10
  48. klaude_code/tui/command/debug_cmd.py +11 -10
  49. klaude_code/tui/command/export_online_cmd.py +18 -23
  50. klaude_code/tui/command/fork_session_cmd.py +39 -43
  51. klaude_code/tui/command/model_cmd.py +10 -49
  52. klaude_code/tui/command/model_picker.py +142 -0
  53. klaude_code/tui/command/refresh_cmd.py +0 -1
  54. klaude_code/tui/command/registry.py +15 -21
  55. klaude_code/tui/command/resume_cmd.py +10 -16
  56. klaude_code/tui/command/status_cmd.py +8 -12
  57. klaude_code/tui/command/sub_agent_model_cmd.py +185 -0
  58. klaude_code/tui/command/terminal_setup_cmd.py +8 -11
  59. klaude_code/tui/command/thinking_cmd.py +4 -6
  60. klaude_code/tui/commands.py +5 -0
  61. klaude_code/tui/components/bash_syntax.py +1 -1
  62. klaude_code/tui/components/command_output.py +96 -0
  63. klaude_code/tui/components/common.py +1 -1
  64. klaude_code/tui/components/developer.py +3 -115
  65. klaude_code/tui/components/metadata.py +1 -63
  66. klaude_code/tui/components/rich/cjk_wrap.py +3 -2
  67. klaude_code/tui/components/rich/status.py +49 -3
  68. klaude_code/tui/components/rich/theme.py +2 -0
  69. klaude_code/tui/components/sub_agent.py +25 -46
  70. klaude_code/tui/components/welcome.py +99 -0
  71. klaude_code/tui/input/prompt_toolkit.py +19 -8
  72. klaude_code/tui/machine.py +5 -0
  73. klaude_code/tui/renderer.py +7 -8
  74. klaude_code/tui/runner.py +0 -6
  75. klaude_code/tui/terminal/selector.py +8 -6
  76. {klaude_code-2.2.0.dist-info → klaude_code-2.4.0.dist-info}/METADATA +21 -74
  77. {klaude_code-2.2.0.dist-info → klaude_code-2.4.0.dist-info}/RECORD +79 -76
  78. klaude_code/tui/command/help_cmd.py +0 -51
  79. klaude_code/tui/command/model_select.py +0 -84
  80. klaude_code/tui/command/release_notes_cmd.py +0 -85
  81. {klaude_code-2.2.0.dist-info → klaude_code-2.4.0.dist-info}/WHEEL +0 -0
  82. {klaude_code-2.2.0.dist-info → klaude_code-2.4.0.dist-info}/entry_points.txt +0 -0
@@ -7,8 +7,12 @@ from dataclasses import dataclass
7
7
  from functools import cache
8
8
  from importlib.resources import files
9
9
  from pathlib import Path
10
- from typing import Any, Protocol
10
+ from typing import TYPE_CHECKING, Any, Protocol
11
11
 
12
+ if TYPE_CHECKING:
13
+ from klaude_code.config.config import Config
14
+
15
+ from klaude_code.config.sub_agent_model_helper import SubAgentModelHelper
12
16
  from klaude_code.core.reminders import (
13
17
  at_file_reader_reminder,
14
18
  empty_todo_reminder,
@@ -23,7 +27,7 @@ from klaude_code.core.tool.report_back_tool import ReportBackTool
23
27
  from klaude_code.core.tool.tool_registry import get_tool_schemas
24
28
  from klaude_code.llm import LLMClientABC
25
29
  from klaude_code.protocol import llm_param, message, tools
26
- from klaude_code.protocol.sub_agent import get_sub_agent_profile, sub_agent_tool_names
30
+ from klaude_code.protocol.sub_agent import get_sub_agent_profile
27
31
  from klaude_code.session import Session
28
32
 
29
33
  type Reminder = Callable[[Session], Awaitable[message.DeveloperMessage | None]]
@@ -169,12 +173,14 @@ def load_system_prompt(
169
173
  def load_agent_tools(
170
174
  model_name: str,
171
175
  sub_agent_type: tools.SubAgentType | None = None,
176
+ config: Config | None = None,
172
177
  ) -> list[llm_param.ToolSchema]:
173
178
  """Get tools for an agent based on model and agent type.
174
179
 
175
180
  Args:
176
181
  model_name: The model name.
177
182
  sub_agent_type: If None, returns main agent tools. Otherwise returns sub-agent tools.
183
+ config: Config for checking sub-agent availability (e.g., image model availability).
178
184
  """
179
185
 
180
186
  if sub_agent_type is not None:
@@ -183,13 +189,20 @@ def load_agent_tools(
183
189
 
184
190
  # Main agent tools
185
191
  if "gpt-5" in model_name:
186
- tool_names = [tools.BASH, tools.READ, tools.APPLY_PATCH, tools.UPDATE_PLAN]
192
+ tool_names: list[str] = [tools.BASH, tools.READ, tools.APPLY_PATCH, tools.UPDATE_PLAN]
187
193
  elif "gemini-3" in model_name:
188
194
  tool_names = [tools.BASH, tools.READ, tools.EDIT, tools.WRITE]
189
195
  else:
190
196
  tool_names = [tools.BASH, tools.READ, tools.EDIT, tools.WRITE, tools.TODO_WRITE]
191
197
 
192
- tool_names.extend(sub_agent_tool_names(enabled_only=True, model_name=model_name))
198
+ if config is not None:
199
+ helper = SubAgentModelHelper(config)
200
+ tool_names.extend(helper.get_enabled_sub_agent_tool_names())
201
+ else:
202
+ from klaude_code.protocol.sub_agent import sub_agent_tool_names
203
+
204
+ tool_names.extend(sub_agent_tool_names(enabled_only=True))
205
+
193
206
  tool_names.extend([tools.MERMAID])
194
207
  # tool_names.extend([tools.MEMORY])
195
208
  return get_tool_schemas(tool_names)
@@ -249,10 +262,17 @@ class ModelProfileProvider(Protocol):
249
262
  output_schema: dict[str, Any] | None = None,
250
263
  ) -> AgentProfile: ...
251
264
 
265
+ def enabled_sub_agent_types(self) -> set[tools.SubAgentType]:
266
+ """Return set of sub-agent types enabled for this provider."""
267
+ ...
268
+
252
269
 
253
270
  class DefaultModelProfileProvider(ModelProfileProvider):
254
271
  """Default provider backed by global prompts/tool/reminder registries."""
255
272
 
273
+ def __init__(self, config: Config | None = None) -> None:
274
+ self._config = config
275
+
256
276
  def build_profile(
257
277
  self,
258
278
  llm_client: LLMClientABC,
@@ -264,13 +284,25 @@ class DefaultModelProfileProvider(ModelProfileProvider):
264
284
  profile = AgentProfile(
265
285
  llm_client=llm_client,
266
286
  system_prompt=load_system_prompt(model_name, llm_client.protocol, sub_agent_type),
267
- tools=load_agent_tools(model_name, sub_agent_type),
287
+ tools=load_agent_tools(model_name, sub_agent_type, config=self._config),
268
288
  reminders=load_agent_reminders(model_name, sub_agent_type),
269
289
  )
270
290
  if output_schema:
271
291
  return with_structured_output(profile, output_schema)
272
292
  return profile
273
293
 
294
+ def enabled_sub_agent_types(self) -> set[tools.SubAgentType]:
295
+ if self._config is None:
296
+ from klaude_code.protocol.sub_agent import get_sub_agent_profile_by_tool, sub_agent_tool_names
297
+
298
+ return {
299
+ profile.name
300
+ for name in sub_agent_tool_names(enabled_only=True)
301
+ if (profile := get_sub_agent_profile_by_tool(name)) is not None
302
+ }
303
+ helper = SubAgentModelHelper(self._config)
304
+ return helper.get_enabled_sub_agent_types()
305
+
274
306
 
275
307
  class VanillaModelProfileProvider(ModelProfileProvider):
276
308
  """Provider that strips prompts, reminders, and tools for vanilla mode."""
@@ -293,6 +325,9 @@ class VanillaModelProfileProvider(ModelProfileProvider):
293
325
  return with_structured_output(profile, output_schema)
294
326
  return profile
295
327
 
328
+ def enabled_sub_agent_types(self) -> set[tools.SubAgentType]:
329
+ return set()
330
+
296
331
 
297
332
  class NanoBananaModelProfileProvider(ModelProfileProvider):
298
333
  """Provider for the Nano Banana image generation model.
@@ -317,3 +352,6 @@ class NanoBananaModelProfileProvider(ModelProfileProvider):
317
352
  if output_schema:
318
353
  return with_structured_output(profile, output_schema)
319
354
  return profile
355
+
356
+ def enabled_sub_agent_types(self) -> set[tools.SubAgentType]:
357
+ return set()
@@ -15,6 +15,7 @@ from dataclasses import dataclass
15
15
  from pathlib import Path
16
16
 
17
17
  from klaude_code.config import load_config
18
+ from klaude_code.config.sub_agent_model_helper import SubAgentModelHelper
18
19
  from klaude_code.core.agent import Agent
19
20
  from klaude_code.core.agent_profile import DefaultModelProfileProvider, ModelProfileProvider
20
21
  from klaude_code.core.manager import LLMClients, SubAgentManager
@@ -109,6 +110,15 @@ class AgentRuntime:
109
110
  def current_agent(self) -> Agent | None:
110
111
  return self._agent
111
112
 
113
+ def _get_sub_agent_models(self) -> dict[str, LLMConfigParameter]:
114
+ """Build a dict of sub-agent type to LLMConfigParameter."""
115
+ enabled = self._model_profile_provider.enabled_sub_agent_types()
116
+ return {
117
+ sub_agent_type: client.get_llm_config()
118
+ for sub_agent_type, client in self._llm_clients.sub_clients.items()
119
+ if sub_agent_type in enabled
120
+ }
121
+
112
122
  async def ensure_agent(self, session_id: str | None = None) -> Agent:
113
123
  """Return the active agent, creating or loading a session as needed."""
114
124
 
@@ -135,6 +145,7 @@ class AgentRuntime:
135
145
  session_id=session.id,
136
146
  work_dir=str(session.work_dir),
137
147
  llm_config=self._llm_clients.main.get_llm_config(),
148
+ sub_agent_models=self._get_sub_agent_models(),
138
149
  )
139
150
  )
140
151
 
@@ -151,27 +162,16 @@ class AgentRuntime:
151
162
 
152
163
  async def run_agent(self, operation: op.RunAgentOperation) -> None:
153
164
  agent = await self.ensure_agent(operation.session_id)
154
-
155
- if operation.emit_user_message_event:
156
- await self._emit_event(
157
- events.UserMessageEvent(
158
- content=operation.input.text,
159
- session_id=agent.session.id,
160
- images=operation.input.images,
161
- )
162
- )
163
-
164
- if operation.persist_user_input:
165
- agent.session.append_history(
166
- [
167
- message.UserMessage(
168
- parts=message.parts_from_text_and_images(
169
- operation.input.text,
170
- operation.input.images,
171
- )
165
+ agent.session.append_history(
166
+ [
167
+ message.UserMessage(
168
+ parts=message.parts_from_text_and_images(
169
+ operation.input.text,
170
+ operation.input.images,
172
171
  )
173
- ]
174
- )
172
+ )
173
+ ]
174
+ )
175
175
 
176
176
  existing_active = self._task_manager.get(operation.id)
177
177
  if existing_active is not None and not existing_active.task.done():
@@ -190,16 +190,19 @@ class AgentRuntime:
190
190
  new_session.model_thinking = agent.session.model_thinking
191
191
  agent.session = new_session
192
192
 
193
- developer_item = message.DeveloperMessage(
194
- parts=message.text_parts_from_str("started new conversation"),
195
- ui_extra=model.build_command_output_extra(commands.CommandName.CLEAR),
193
+ await self._emit_event(
194
+ events.CommandOutputEvent(
195
+ session_id=agent.session.id,
196
+ command_name=commands.CommandName.CLEAR,
197
+ content="started new conversation",
198
+ )
196
199
  )
197
- await self._emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
198
200
  await self._emit_event(
199
201
  events.WelcomeEvent(
200
202
  session_id=agent.session.id,
201
203
  work_dir=str(agent.session.work_dir),
202
204
  llm_config=self._llm_clients.main.get_llm_config(),
205
+ sub_agent_models=self._get_sub_agent_models(),
203
206
  )
204
207
  )
205
208
 
@@ -223,6 +226,7 @@ class AgentRuntime:
223
226
  session_id=target_session.id,
224
227
  work_dir=str(target_session.work_dir),
225
228
  llm_config=self._llm_clients.main.get_llm_config(),
229
+ sub_agent_models=self._get_sub_agent_models(),
226
230
  )
227
231
  )
228
232
 
@@ -406,6 +410,15 @@ class ExecutorContext:
406
410
  """Emit an event to the UI display system."""
407
411
  await self.event_queue.put(event)
408
412
 
413
+ def _get_sub_agent_models(self) -> dict[str, LLMConfigParameter]:
414
+ """Build a dict of sub-agent type to LLMConfigParameter."""
415
+ enabled = self.model_profile_provider.enabled_sub_agent_types()
416
+ return {
417
+ sub_agent_type: client.get_llm_config()
418
+ for sub_agent_type, client in self.llm_clients.sub_clients.items()
419
+ if sub_agent_type in enabled
420
+ }
421
+
409
422
  def current_session_id(self) -> str | None:
410
423
  """Return the primary active session id, if any.
411
424
 
@@ -438,12 +451,13 @@ class ExecutorContext:
438
451
 
439
452
  if operation.emit_switch_message:
440
453
  default_note = " (saved as default)" if operation.save_as_default else ""
441
- developer_item = message.DeveloperMessage(
442
- parts=message.text_parts_from_str(f"Switched to: {llm_config.model}{default_note}"),
443
- ui_extra=model.build_command_output_extra(commands.CommandName.MODEL),
454
+ await self.emit_event(
455
+ events.CommandOutputEvent(
456
+ session_id=agent.session.id,
457
+ command_name=commands.CommandName.MODEL,
458
+ content=f"Switched to: {llm_config.model_id}{default_note}",
459
+ )
444
460
  )
445
- agent.session.append_history([developer_item])
446
- await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
447
461
 
448
462
  if self._on_model_change is not None:
449
463
  self._on_model_change(llm_client_name)
@@ -455,6 +469,7 @@ class ExecutorContext:
455
469
  llm_config=llm_config,
456
470
  work_dir=str(agent.session.work_dir),
457
471
  show_klaude_code_info=False,
472
+ show_sub_agent_models=False,
458
473
  )
459
474
  )
460
475
 
@@ -487,12 +502,13 @@ class ExecutorContext:
487
502
  new_status = _format_thinking_for_display(operation.thinking)
488
503
 
489
504
  if operation.emit_switch_message:
490
- developer_item = message.DeveloperMessage(
491
- parts=message.text_parts_from_str(f"Thinking changed: {current} -> {new_status}"),
492
- ui_extra=model.build_command_output_extra(commands.CommandName.THINKING),
505
+ await self.emit_event(
506
+ events.CommandOutputEvent(
507
+ session_id=agent.session.id,
508
+ command_name=commands.CommandName.THINKING,
509
+ content=f"Thinking changed: {current} -> {new_status}",
510
+ )
493
511
  )
494
- agent.session.append_history([developer_item])
495
- await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
496
512
 
497
513
  if operation.emit_welcome_event:
498
514
  await self.emit_event(
@@ -501,9 +517,62 @@ class ExecutorContext:
501
517
  work_dir=str(agent.session.work_dir),
502
518
  llm_config=agent.profile.llm_client.get_llm_config(),
503
519
  show_klaude_code_info=False,
520
+ show_sub_agent_models=False,
504
521
  )
505
522
  )
506
523
 
524
+ async def handle_change_sub_agent_model(self, operation: op.ChangeSubAgentModelOperation) -> None:
525
+ """Handle a change sub-agent model operation."""
526
+ agent = await self._agent_runtime.ensure_agent(operation.session_id)
527
+ config = load_config()
528
+
529
+ helper = SubAgentModelHelper(config)
530
+
531
+ sub_agent_type = operation.sub_agent_type
532
+ model_name = operation.model_name
533
+
534
+ if model_name is None:
535
+ # Clear explicit override and revert to sub-agent default behavior.
536
+ behavior = helper.describe_empty_model_config_behavior(
537
+ sub_agent_type,
538
+ main_model_name=self.llm_clients.main.model_name,
539
+ )
540
+
541
+ resolved = helper.resolve_default_model_override(sub_agent_type)
542
+ if resolved is None:
543
+ # Default: inherit from main client.
544
+ self.llm_clients.sub_clients.pop(sub_agent_type, None)
545
+ else:
546
+ # Default: use a dedicated model (e.g. first available image model).
547
+ llm_config = config.get_model_config(resolved)
548
+ new_client = create_llm_client(llm_config)
549
+ self.llm_clients.sub_clients[sub_agent_type] = new_client
550
+
551
+ display_model = f"({behavior.description})"
552
+ else:
553
+ # Create new client for the sub-agent
554
+ llm_config = config.get_model_config(model_name)
555
+ new_client = create_llm_client(llm_config)
556
+ self.llm_clients.sub_clients[sub_agent_type] = new_client
557
+ display_model = new_client.model_name
558
+
559
+ if operation.save_as_default:
560
+ if model_name is None:
561
+ # Remove from config to inherit
562
+ config.sub_agent_models.pop(sub_agent_type, None)
563
+ else:
564
+ config.sub_agent_models[sub_agent_type] = model_name
565
+ await config.save()
566
+
567
+ saved_note = " (saved in ~/.klaude/klaude-config.yaml)" if operation.save_as_default else ""
568
+ await self.emit_event(
569
+ events.CommandOutputEvent(
570
+ session_id=agent.session.id,
571
+ command_name=commands.CommandName.SUB_AGENT_MODEL,
572
+ content=f"{sub_agent_type} model: {display_model}{saved_note}",
573
+ )
574
+ )
575
+
507
576
  async def handle_clear_session(self, operation: op.ClearSessionOperation) -> None:
508
577
  await self._agent_runtime.clear_session(operation.session_id)
509
578
 
@@ -518,21 +587,24 @@ class ExecutorContext:
518
587
  await asyncio.to_thread(output_path.parent.mkdir, parents=True, exist_ok=True)
519
588
  await asyncio.to_thread(output_path.write_text, html_doc, "utf-8")
520
589
  await asyncio.to_thread(self._open_file, output_path)
521
- developer_item = message.DeveloperMessage(
522
- parts=message.text_parts_from_str(f"Session exported and opened: {output_path}"),
523
- ui_extra=model.build_command_output_extra(commands.CommandName.EXPORT),
590
+ await self.emit_event(
591
+ events.CommandOutputEvent(
592
+ session_id=agent.session.id,
593
+ command_name=commands.CommandName.EXPORT,
594
+ content=f"Session exported and opened: {output_path}",
595
+ )
524
596
  )
525
- agent.session.append_history([developer_item])
526
- await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
527
597
  except Exception as exc: # pragma: no cover
528
598
  import traceback
529
599
 
530
- developer_item = message.DeveloperMessage(
531
- parts=message.text_parts_from_str(f"Failed to export session: {exc}\n{traceback.format_exc()}"),
532
- ui_extra=model.build_command_output_extra(commands.CommandName.EXPORT, is_error=True),
600
+ await self.emit_event(
601
+ events.CommandOutputEvent(
602
+ session_id=agent.session.id,
603
+ command_name=commands.CommandName.EXPORT,
604
+ content=f"Failed to export session: {exc}\n{traceback.format_exc()}",
605
+ is_error=True,
606
+ )
533
607
  )
534
- agent.session.append_history([developer_item])
535
- await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
536
608
 
537
609
  def _resolve_export_output_path(self, raw: str | None, session: Session) -> Path:
538
610
  trimmed = (raw or "").strip()
@@ -626,12 +698,15 @@ class Executor:
626
698
  Unique submission ID for tracking
627
699
  """
628
700
 
629
- submission = op.Submission(id=operation.id, operation=operation)
630
- await self.submission_queue.put(submission)
701
+ if operation.id in self._completion_events:
702
+ raise RuntimeError(f"Submission already registered: {operation.id}")
631
703
 
632
- # Create completion event for tracking
704
+ # Create completion event before queueing to avoid races.
633
705
  self._completion_events[operation.id] = asyncio.Event()
634
706
 
707
+ submission = op.Submission(id=operation.id, operation=operation)
708
+ await self.submission_queue.put(submission)
709
+
635
710
  log_debug(
636
711
  f"Submitted operation {operation.type} with ID {operation.id}",
637
712
  style="blue",
@@ -711,9 +786,16 @@ class Executor:
711
786
  if tasks_to_await:
712
787
  await asyncio.gather(*tasks_to_await, return_exceptions=True)
713
788
 
789
+ if self._background_tasks:
790
+ await asyncio.gather(*self._background_tasks, return_exceptions=True)
791
+ self._background_tasks.clear()
792
+
714
793
  # Clear the active task manager
715
794
  self.context.task_manager.clear()
716
795
 
796
+ for event in self._completion_events.values():
797
+ event.set()
798
+
717
799
  # Send EndOperation to wake up the start() loop
718
800
  try:
719
801
  end_operation = op.EndOperation()
@@ -3,11 +3,11 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from klaude_code.config import Config
6
+ from klaude_code.config.sub_agent_model_helper import SubAgentModelHelper
6
7
  from klaude_code.core.manager.llm_clients import LLMClients
7
8
  from klaude_code.llm.client import LLMClientABC
8
9
  from klaude_code.llm.registry import create_llm_client
9
10
  from klaude_code.log import DebugType, log_debug
10
- from klaude_code.protocol.sub_agent import iter_sub_agent_profiles
11
11
  from klaude_code.protocol.tools import SubAgentType
12
12
 
13
13
 
@@ -15,8 +15,15 @@ def build_llm_clients(
15
15
  config: Config,
16
16
  *,
17
17
  model_override: str | None = None,
18
+ skip_sub_agents: bool = False,
18
19
  ) -> LLMClients:
19
- """Create an ``LLMClients`` bundle driven by application config."""
20
+ """Create an ``LLMClients`` bundle driven by application config.
21
+
22
+ Args:
23
+ config: Application configuration.
24
+ model_override: Override for the main model name.
25
+ skip_sub_agents: If True, skip initializing sub-agent clients (e.g., for vanilla/banana modes).
26
+ """
20
27
 
21
28
  # Resolve main agent LLM config
22
29
  model_name = model_override or config.main_model
@@ -32,17 +39,16 @@ def build_llm_clients(
32
39
  )
33
40
 
34
41
  main_client = create_llm_client(llm_config)
35
- sub_clients: dict[SubAgentType, LLMClientABC] = {}
36
42
 
37
- for profile in iter_sub_agent_profiles():
38
- model_name = config.sub_agent_models.get(profile.name)
39
- if not model_name:
40
- continue
43
+ if skip_sub_agents:
44
+ return LLMClients(main=main_client)
41
45
 
42
- if not profile.enabled_for_model(main_client.model_name):
43
- continue
46
+ helper = SubAgentModelHelper(config)
47
+ sub_agent_configs = helper.build_sub_agent_client_configs()
44
48
 
45
- sub_llm_config = config.get_model_config(model_name)
46
- sub_clients[profile.name] = create_llm_client(sub_llm_config)
49
+ sub_clients: dict[SubAgentType, LLMClientABC] = {}
50
+ for sub_agent_type, sub_model_name in sub_agent_configs.items():
51
+ sub_llm_config = config.get_model_config(sub_model_name)
52
+ sub_clients[sub_agent_type] = create_llm_client(sub_llm_config)
47
53
 
48
54
  return LLMClients(main=main_client, sub_clients=sub_clients)
@@ -1 +1 @@
1
- You're a helpful art assistant
1
+ You're a helpful assistant with capabilities to generate images and edit images.
@@ -54,24 +54,31 @@ def _build_file_diff(before: str, after: str, *, file_path: str) -> model.DiffFi
54
54
  elif tag == "replace":
55
55
  old_block = before_lines[i1:i2]
56
56
  new_block = after_lines[j1:j2]
57
- max_len = max(len(old_block), len(new_block))
58
- for idx in range(max_len):
59
- old_line = old_block[idx] if idx < len(old_block) else None
60
- new_line = new_block[idx] if idx < len(new_block) else None
61
- if old_line is not None and new_line is not None:
62
- remove_spans, add_spans = _diff_line_spans(old_line, new_line)
63
- lines.append(_remove_line(remove_spans))
64
- lines.append(_add_line(add_spans, new_line_no))
65
- stats_remove += 1
66
- stats_add += 1
67
- new_line_no += 1
68
- elif old_line is not None:
69
- lines.append(_remove_line([model.DiffSpan(op="equal", text=old_line)]))
70
- stats_remove += 1
71
- elif new_line is not None:
72
- lines.append(_add_line([model.DiffSpan(op="equal", text=new_line)], new_line_no))
73
- stats_add += 1
74
- new_line_no += 1
57
+
58
+ # Emit replacement blocks in unified-diff style: all removals first, then all additions.
59
+ # This matches VSCode's readability (--- then +++), while keeping per-line char spans.
60
+ remove_block: list[list[model.DiffSpan]] = []
61
+ add_block: list[list[model.DiffSpan]] = []
62
+
63
+ paired_len = min(len(old_block), len(new_block))
64
+ for idx in range(paired_len):
65
+ remove_spans, add_spans = _diff_line_spans(old_block[idx], new_block[idx])
66
+ remove_block.append(remove_spans)
67
+ add_block.append(add_spans)
68
+
69
+ for old_line in old_block[paired_len:]:
70
+ remove_block.append([model.DiffSpan(op="equal", text=old_line)])
71
+ for new_line in new_block[paired_len:]:
72
+ add_block.append([model.DiffSpan(op="equal", text=new_line)])
73
+
74
+ for spans in remove_block:
75
+ lines.append(_remove_line(spans))
76
+ stats_remove += 1
77
+
78
+ for spans in add_block:
79
+ lines.append(_add_line(spans, new_line_no))
80
+ stats_add += 1
81
+ new_line_no += 1
75
82
 
76
83
  return model.DiffFileDiff(
77
84
  file_path=file_path,
@@ -31,11 +31,12 @@ class SubAgentTool(ToolABC):
31
31
  @classmethod
32
32
  def for_profile(cls, profile: SubAgentProfile) -> type[SubAgentTool]:
33
33
  """Create a tool class for a specific sub-agent profile."""
34
- return type(
34
+ tool_cls = type(
35
35
  f"{profile.name}Tool",
36
36
  (SubAgentTool,),
37
37
  {"_profile": profile},
38
38
  )
39
+ return cast(type[SubAgentTool], tool_cls)
39
40
 
40
41
  @classmethod
41
42
  def metadata(cls) -> ToolMetadata:
@@ -16,6 +16,7 @@ from anthropic.types.beta.beta_raw_message_start_event import BetaRawMessageStar
16
16
  from anthropic.types.beta.beta_signature_delta import BetaSignatureDelta
17
17
  from anthropic.types.beta.beta_text_delta import BetaTextDelta
18
18
  from anthropic.types.beta.beta_thinking_delta import BetaThinkingDelta
19
+ from anthropic.types.beta.beta_tool_choice_auto_param import BetaToolChoiceAutoParam
19
20
  from anthropic.types.beta.beta_tool_use_block import BetaToolUseBlock
20
21
  from anthropic.types.beta.message_create_params import MessageCreateParamsStreaming
21
22
 
@@ -64,7 +65,7 @@ def build_payload(
64
65
  param: LLM call parameters.
65
66
  extra_betas: Additional beta flags to prepend to the betas list.
66
67
  """
67
- messages = convert_history_to_input(param.input, param.model)
68
+ messages = convert_history_to_input(param.input, param.model_id)
68
69
  tools = convert_tool_schema(param.tools)
69
70
  system_messages = [msg for msg in param.input if isinstance(msg, message.SystemMessage)]
70
71
  system = convert_system_to_input(param.system, system_messages)
@@ -82,12 +83,14 @@ def build_payload(
82
83
  # Prepend extra betas, avoiding duplicates
83
84
  betas = [b for b in extra_betas if b not in betas] + betas
84
85
 
86
+ tool_choice: BetaToolChoiceAutoParam = {
87
+ "type": "auto",
88
+ "disable_parallel_tool_use": False,
89
+ }
90
+
85
91
  payload: MessageCreateParamsStreaming = {
86
- "model": str(param.model),
87
- "tool_choice": {
88
- "type": "auto",
89
- "disable_parallel_tool_use": False,
90
- },
92
+ "model": str(param.model_id),
93
+ "tool_choice": tool_choice,
91
94
  "stream": True,
92
95
  "max_tokens": param.max_tokens or DEFAULT_MAX_TOKENS,
93
96
  "temperature": param.temperature or DEFAULT_TEMPERATURE,
@@ -183,12 +186,12 @@ async def parse_anthropic_stream(
183
186
  if accumulated_thinking:
184
187
  metadata_tracker.record_token()
185
188
  full_thinking = "".join(accumulated_thinking)
186
- parts.append(message.ThinkingTextPart(text=full_thinking, model_id=str(param.model)))
189
+ parts.append(message.ThinkingTextPart(text=full_thinking, model_id=str(param.model_id)))
187
190
  if pending_signature:
188
191
  parts.append(
189
192
  message.ThinkingSignaturePart(
190
193
  signature=pending_signature,
191
- model_id=str(param.model),
194
+ model_id=str(param.model_id),
192
195
  format="anthropic",
193
196
  )
194
197
  )
@@ -221,7 +224,7 @@ async def parse_anthropic_stream(
221
224
  max_tokens=param.max_tokens,
222
225
  )
223
226
  )
224
- metadata_tracker.set_model_name(str(param.model))
227
+ metadata_tracker.set_model_name(str(param.model_id))
225
228
  metadata_tracker.set_response_id(response_id)
226
229
  raw_stop_reason = getattr(event, "stop_reason", None)
227
230
  if isinstance(raw_stop_reason, str):