klaude-code 2.3.0__py3-none-any.whl → 2.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. klaude_code/cli/list_model.py +3 -3
  2. klaude_code/cli/main.py +2 -2
  3. klaude_code/config/assets/builtin_config.yaml +165 -307
  4. klaude_code/config/config.py +17 -17
  5. klaude_code/config/{select_model.py → model_matcher.py} +7 -7
  6. klaude_code/config/sub_agent_model_helper.py +1 -10
  7. klaude_code/config/thinking.py +2 -2
  8. klaude_code/core/agent_profile.py +9 -23
  9. klaude_code/core/executor.py +72 -70
  10. klaude_code/core/tool/file/diff_builder.py +25 -18
  11. klaude_code/llm/anthropic/client.py +5 -5
  12. klaude_code/llm/client.py +1 -1
  13. klaude_code/llm/codex/client.py +2 -2
  14. klaude_code/llm/google/client.py +6 -6
  15. klaude_code/llm/input_common.py +2 -2
  16. klaude_code/llm/openai_compatible/client.py +3 -3
  17. klaude_code/llm/openai_compatible/stream.py +1 -1
  18. klaude_code/llm/openrouter/client.py +4 -4
  19. klaude_code/llm/openrouter/input.py +1 -3
  20. klaude_code/llm/responses/client.py +5 -5
  21. klaude_code/protocol/events/__init__.py +7 -1
  22. klaude_code/protocol/events/chat.py +10 -0
  23. klaude_code/protocol/llm_param.py +1 -1
  24. klaude_code/protocol/model.py +0 -26
  25. klaude_code/protocol/op.py +0 -5
  26. klaude_code/session/session.py +4 -2
  27. klaude_code/tui/command/clear_cmd.py +0 -1
  28. klaude_code/tui/command/command_abc.py +6 -4
  29. klaude_code/tui/command/copy_cmd.py +10 -10
  30. klaude_code/tui/command/debug_cmd.py +11 -10
  31. klaude_code/tui/command/export_online_cmd.py +18 -23
  32. klaude_code/tui/command/fork_session_cmd.py +39 -43
  33. klaude_code/tui/command/model_cmd.py +5 -7
  34. klaude_code/tui/command/{model_select.py → model_picker.py} +3 -5
  35. klaude_code/tui/command/refresh_cmd.py +0 -1
  36. klaude_code/tui/command/registry.py +15 -21
  37. klaude_code/tui/command/resume_cmd.py +10 -16
  38. klaude_code/tui/command/status_cmd.py +8 -12
  39. klaude_code/tui/command/sub_agent_model_cmd.py +11 -16
  40. klaude_code/tui/command/terminal_setup_cmd.py +8 -11
  41. klaude_code/tui/command/thinking_cmd.py +4 -6
  42. klaude_code/tui/commands.py +5 -0
  43. klaude_code/tui/components/command_output.py +96 -0
  44. klaude_code/tui/components/developer.py +3 -110
  45. klaude_code/tui/components/welcome.py +2 -2
  46. klaude_code/tui/input/prompt_toolkit.py +6 -8
  47. klaude_code/tui/machine.py +5 -0
  48. klaude_code/tui/renderer.py +5 -5
  49. klaude_code/tui/runner.py +0 -6
  50. klaude_code/tui/terminal/selector.py +7 -8
  51. {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/METADATA +21 -74
  52. {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/RECORD +54 -53
  53. {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/WHEEL +0 -0
  54. {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/entry_points.txt +0 -0
@@ -51,7 +51,7 @@ def match_model_from_config(preferred: str | None = None) -> ModelMatchResult:
51
51
  # Only show models from providers with valid API keys
52
52
  models: list[ModelEntry] = sorted(
53
53
  config.iter_model_entries(only_available=True),
54
- key=lambda m: (m.model_name.lower(), m.provider.lower()),
54
+ key=lambda m: (m.provider.lower(), m.model_name.lower()),
55
55
  )
56
56
 
57
57
  if not models:
@@ -86,13 +86,13 @@ def match_model_from_config(preferred: str | None = None) -> ModelMatchResult:
86
86
  return ModelMatchResult(matched_model=None, filtered_models=exact_base_matches, filter_hint=filter_hint)
87
87
 
88
88
  preferred_lower = preferred.lower()
89
- # Case-insensitive exact match (selector/model_name/model_params.model)
89
+ # Case-insensitive exact match (selector/model_name/model_id)
90
90
  exact_ci_matches = [
91
91
  m
92
92
  for m in models
93
93
  if preferred_lower == m.selector.lower()
94
94
  or preferred_lower == m.model_name.lower()
95
- or preferred_lower == (m.model_params.model or "").lower()
95
+ or preferred_lower == (m.model_id or "").lower()
96
96
  ]
97
97
  if len(exact_ci_matches) == 1:
98
98
  return ModelMatchResult(
@@ -110,7 +110,7 @@ def match_model_from_config(preferred: str | None = None) -> ModelMatchResult:
110
110
  for m in models
111
111
  if preferred_norm == _normalize_model_key(m.selector)
112
112
  or preferred_norm == _normalize_model_key(m.model_name)
113
- or preferred_norm == _normalize_model_key(m.model_params.model or "")
113
+ or preferred_norm == _normalize_model_key(m.model_id or "")
114
114
  ]
115
115
  if len(normalized_matches) == 1:
116
116
  return ModelMatchResult(
@@ -125,7 +125,7 @@ def match_model_from_config(preferred: str | None = None) -> ModelMatchResult:
125
125
  for m in models
126
126
  if preferred_norm in _normalize_model_key(m.selector)
127
127
  or preferred_norm in _normalize_model_key(m.model_name)
128
- or preferred_norm in _normalize_model_key(m.model_params.model or "")
128
+ or preferred_norm in _normalize_model_key(m.model_id or "")
129
129
  ]
130
130
  if len(normalized_matches) == 1:
131
131
  return ModelMatchResult(
@@ -134,14 +134,14 @@ def match_model_from_config(preferred: str | None = None) -> ModelMatchResult:
134
134
  filter_hint=None,
135
135
  )
136
136
 
137
- # Partial match (case-insensitive) on model_name or model_params.model.
137
+ # Partial match (case-insensitive) on model_name or model_id.
138
138
  # If normalized matching found candidates (even if multiple), prefer those as the filter set.
139
139
  matches = normalized_matches or [
140
140
  m
141
141
  for m in models
142
142
  if preferred_lower in m.selector.lower()
143
143
  or preferred_lower in m.model_name.lower()
144
- or preferred_lower in (m.model_params.model or "").lower()
144
+ or preferred_lower in (m.model_id or "").lower()
145
145
  ]
146
146
  if len(matches) == 1:
147
147
  return ModelMatchResult(matched_model=matches[0].selector, filtered_models=models, filter_hint=None)
@@ -183,7 +183,7 @@ class SubAgentModelHelper:
183
183
  all_models = self._config.iter_model_entries(only_available=True)
184
184
 
185
185
  if profile.availability_requirement == AVAILABILITY_IMAGE_MODEL:
186
- return [m for m in all_models if m.model_params.modalities and "image" in m.model_params.modalities]
186
+ return [m for m in all_models if m.modalities and "image" in m.modalities]
187
187
 
188
188
  return all_models
189
189
 
@@ -196,15 +196,6 @@ class SubAgentModelHelper:
196
196
  result.append(name)
197
197
  return result
198
198
 
199
- def get_enabled_sub_agent_types(self) -> set[SubAgentType]:
200
- """Return set of sub-agent types that are enabled and available."""
201
- enabled: set[SubAgentType] = set()
202
- for name in sub_agent_tool_names(enabled_only=True):
203
- profile = get_sub_agent_profile_by_tool(name)
204
- if profile is not None and self.check_availability_requirement(profile.availability_requirement):
205
- enabled.add(profile.name)
206
- return enabled
207
-
208
199
  def build_sub_agent_client_configs(self) -> dict[SubAgentType, str]:
209
200
  """Return model names for each sub-agent that needs a dedicated client."""
210
201
  result: dict[SubAgentType, str] = {}
@@ -104,7 +104,7 @@ def format_current_thinking(config: llm_param.LLMConfigParameter) -> str:
104
104
  return "not set"
105
105
 
106
106
  if protocol == llm_param.LLMClientProtocol.OPENROUTER:
107
- if is_openrouter_model_with_reasoning_effort(config.model):
107
+ if is_openrouter_model_with_reasoning_effort(config.model_id):
108
108
  if thinking.reasoning_effort:
109
109
  return f"reasoning_effort={thinking.reasoning_effort}"
110
110
  else:
@@ -198,7 +198,7 @@ def get_thinking_picker_data(config: llm_param.LLMConfigParameter) -> ThinkingPi
198
198
  ThinkingPickerData with options and current value, or None if protocol doesn't support thinking.
199
199
  """
200
200
  protocol = config.protocol
201
- model_name = config.model
201
+ model_name = config.model_id
202
202
  thinking = config.thinking
203
203
 
204
204
  if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX_OAUTH):
@@ -262,10 +262,6 @@ class ModelProfileProvider(Protocol):
262
262
  output_schema: dict[str, Any] | None = None,
263
263
  ) -> AgentProfile: ...
264
264
 
265
- def enabled_sub_agent_types(self) -> set[tools.SubAgentType]:
266
- """Return set of sub-agent types enabled for this provider."""
267
- ...
268
-
269
265
 
270
266
  class DefaultModelProfileProvider(ModelProfileProvider):
271
267
  """Default provider backed by global prompts/tool/reminder registries."""
@@ -281,28 +277,24 @@ class DefaultModelProfileProvider(ModelProfileProvider):
281
277
  output_schema: dict[str, Any] | None = None,
282
278
  ) -> AgentProfile:
283
279
  model_name = llm_client.model_name
280
+ llm_config = llm_client.get_llm_config()
281
+
282
+ # Image generation models should not have tools
283
+ if llm_config.modalities and "image" in llm_config.modalities:
284
+ agent_tools: list[llm_param.ToolSchema] = []
285
+ else:
286
+ agent_tools = load_agent_tools(model_name, sub_agent_type, config=self._config)
287
+
284
288
  profile = AgentProfile(
285
289
  llm_client=llm_client,
286
290
  system_prompt=load_system_prompt(model_name, llm_client.protocol, sub_agent_type),
287
- tools=load_agent_tools(model_name, sub_agent_type, config=self._config),
291
+ tools=agent_tools,
288
292
  reminders=load_agent_reminders(model_name, sub_agent_type),
289
293
  )
290
294
  if output_schema:
291
295
  return with_structured_output(profile, output_schema)
292
296
  return profile
293
297
 
294
- def enabled_sub_agent_types(self) -> set[tools.SubAgentType]:
295
- if self._config is None:
296
- from klaude_code.protocol.sub_agent import get_sub_agent_profile_by_tool, sub_agent_tool_names
297
-
298
- return {
299
- profile.name
300
- for name in sub_agent_tool_names(enabled_only=True)
301
- if (profile := get_sub_agent_profile_by_tool(name)) is not None
302
- }
303
- helper = SubAgentModelHelper(self._config)
304
- return helper.get_enabled_sub_agent_types()
305
-
306
298
 
307
299
  class VanillaModelProfileProvider(ModelProfileProvider):
308
300
  """Provider that strips prompts, reminders, and tools for vanilla mode."""
@@ -325,9 +317,6 @@ class VanillaModelProfileProvider(ModelProfileProvider):
325
317
  return with_structured_output(profile, output_schema)
326
318
  return profile
327
319
 
328
- def enabled_sub_agent_types(self) -> set[tools.SubAgentType]:
329
- return set()
330
-
331
320
 
332
321
  class NanoBananaModelProfileProvider(ModelProfileProvider):
333
322
  """Provider for the Nano Banana image generation model.
@@ -352,6 +341,3 @@ class NanoBananaModelProfileProvider(ModelProfileProvider):
352
341
  if output_schema:
353
342
  return with_structured_output(profile, output_schema)
354
343
  return profile
355
-
356
- def enabled_sub_agent_types(self) -> set[tools.SubAgentType]:
357
- return set()
@@ -17,14 +17,14 @@ from pathlib import Path
17
17
  from klaude_code.config import load_config
18
18
  from klaude_code.config.sub_agent_model_helper import SubAgentModelHelper
19
19
  from klaude_code.core.agent import Agent
20
- from klaude_code.core.agent_profile import DefaultModelProfileProvider, ModelProfileProvider
20
+ from klaude_code.core.agent_profile import AgentProfile, DefaultModelProfileProvider, ModelProfileProvider
21
21
  from klaude_code.core.manager import LLMClients, SubAgentManager
22
22
  from klaude_code.llm.registry import create_llm_client
23
23
  from klaude_code.log import DebugType, log_debug
24
24
  from klaude_code.protocol import commands, events, message, model, op
25
25
  from klaude_code.protocol.llm_param import LLMConfigParameter, Thinking
26
26
  from klaude_code.protocol.op_handler import OperationHandler
27
- from klaude_code.protocol.sub_agent import SubAgentResult
27
+ from klaude_code.protocol.sub_agent import SubAgentResult, get_sub_agent_profile_by_tool
28
28
  from klaude_code.session.export import build_export_html, get_default_export_path
29
29
  from klaude_code.session.session import Session
30
30
 
@@ -110,13 +110,17 @@ class AgentRuntime:
110
110
  def current_agent(self) -> Agent | None:
111
111
  return self._agent
112
112
 
113
- def _get_sub_agent_models(self) -> dict[str, LLMConfigParameter]:
114
- """Build a dict of sub-agent type to LLMConfigParameter."""
115
- enabled = self._model_profile_provider.enabled_sub_agent_types()
113
+ def _get_sub_agent_models(self, profile: AgentProfile) -> dict[str, LLMConfigParameter]:
114
+ """Build a dict of sub-agent type to LLMConfigParameter based on profile tools."""
115
+ enabled_types: set[str] = set()
116
+ for tool in profile.tools:
117
+ sub_profile = get_sub_agent_profile_by_tool(tool.name)
118
+ if sub_profile is not None:
119
+ enabled_types.add(sub_profile.name)
116
120
  return {
117
121
  sub_agent_type: client.get_llm_config()
118
122
  for sub_agent_type, client in self._llm_clients.sub_clients.items()
119
- if sub_agent_type in enabled
123
+ if sub_agent_type in enabled_types
120
124
  }
121
125
 
122
126
  async def ensure_agent(self, session_id: str | None = None) -> Agent:
@@ -145,7 +149,7 @@ class AgentRuntime:
145
149
  session_id=session.id,
146
150
  work_dir=str(session.work_dir),
147
151
  llm_config=self._llm_clients.main.get_llm_config(),
148
- sub_agent_models=self._get_sub_agent_models(),
152
+ sub_agent_models=self._get_sub_agent_models(profile),
149
153
  )
150
154
  )
151
155
 
@@ -162,27 +166,16 @@ class AgentRuntime:
162
166
 
163
167
  async def run_agent(self, operation: op.RunAgentOperation) -> None:
164
168
  agent = await self.ensure_agent(operation.session_id)
165
-
166
- if operation.emit_user_message_event:
167
- await self._emit_event(
168
- events.UserMessageEvent(
169
- content=operation.input.text,
170
- session_id=agent.session.id,
171
- images=operation.input.images,
172
- )
173
- )
174
-
175
- if operation.persist_user_input:
176
- agent.session.append_history(
177
- [
178
- message.UserMessage(
179
- parts=message.parts_from_text_and_images(
180
- operation.input.text,
181
- operation.input.images,
182
- )
169
+ agent.session.append_history(
170
+ [
171
+ message.UserMessage(
172
+ parts=message.parts_from_text_and_images(
173
+ operation.input.text,
174
+ operation.input.images,
183
175
  )
184
- ]
185
- )
176
+ )
177
+ ]
178
+ )
186
179
 
187
180
  existing_active = self._task_manager.get(operation.id)
188
181
  if existing_active is not None and not existing_active.task.done():
@@ -201,17 +194,19 @@ class AgentRuntime:
201
194
  new_session.model_thinking = agent.session.model_thinking
202
195
  agent.session = new_session
203
196
 
204
- developer_item = message.DeveloperMessage(
205
- parts=message.text_parts_from_str("started new conversation"),
206
- ui_extra=model.build_command_output_extra(commands.CommandName.CLEAR),
197
+ await self._emit_event(
198
+ events.CommandOutputEvent(
199
+ session_id=agent.session.id,
200
+ command_name=commands.CommandName.CLEAR,
201
+ content="started new conversation",
202
+ )
207
203
  )
208
- await self._emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
209
204
  await self._emit_event(
210
205
  events.WelcomeEvent(
211
206
  session_id=agent.session.id,
212
207
  work_dir=str(agent.session.work_dir),
213
208
  llm_config=self._llm_clients.main.get_llm_config(),
214
- sub_agent_models=self._get_sub_agent_models(),
209
+ sub_agent_models=self._get_sub_agent_models(agent.profile),
215
210
  )
216
211
  )
217
212
 
@@ -235,7 +230,7 @@ class AgentRuntime:
235
230
  session_id=target_session.id,
236
231
  work_dir=str(target_session.work_dir),
237
232
  llm_config=self._llm_clients.main.get_llm_config(),
238
- sub_agent_models=self._get_sub_agent_models(),
233
+ sub_agent_models=self._get_sub_agent_models(profile),
239
234
  )
240
235
  )
241
236
 
@@ -419,15 +414,6 @@ class ExecutorContext:
419
414
  """Emit an event to the UI display system."""
420
415
  await self.event_queue.put(event)
421
416
 
422
- def _get_sub_agent_models(self) -> dict[str, LLMConfigParameter]:
423
- """Build a dict of sub-agent type to LLMConfigParameter."""
424
- enabled = self.model_profile_provider.enabled_sub_agent_types()
425
- return {
426
- sub_agent_type: client.get_llm_config()
427
- for sub_agent_type, client in self.llm_clients.sub_clients.items()
428
- if sub_agent_type in enabled
429
- }
430
-
431
417
  def current_session_id(self) -> str | None:
432
418
  """Return the primary active session id, if any.
433
419
 
@@ -460,12 +446,13 @@ class ExecutorContext:
460
446
 
461
447
  if operation.emit_switch_message:
462
448
  default_note = " (saved as default)" if operation.save_as_default else ""
463
- developer_item = message.DeveloperMessage(
464
- parts=message.text_parts_from_str(f"Switched to: {llm_config.model}{default_note}"),
465
- ui_extra=model.build_command_output_extra(commands.CommandName.MODEL),
449
+ await self.emit_event(
450
+ events.CommandOutputEvent(
451
+ session_id=agent.session.id,
452
+ command_name=commands.CommandName.MODEL,
453
+ content=f"Switched to: {llm_config.model_id}{default_note}",
454
+ )
466
455
  )
467
- agent.session.append_history([developer_item])
468
- await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
469
456
 
470
457
  if self._on_model_change is not None:
471
458
  self._on_model_change(llm_client_name)
@@ -510,12 +497,13 @@ class ExecutorContext:
510
497
  new_status = _format_thinking_for_display(operation.thinking)
511
498
 
512
499
  if operation.emit_switch_message:
513
- developer_item = message.DeveloperMessage(
514
- parts=message.text_parts_from_str(f"Thinking changed: {current} -> {new_status}"),
515
- ui_extra=model.build_command_output_extra(commands.CommandName.THINKING),
500
+ await self.emit_event(
501
+ events.CommandOutputEvent(
502
+ session_id=agent.session.id,
503
+ command_name=commands.CommandName.THINKING,
504
+ content=f"Thinking changed: {current} -> {new_status}",
505
+ )
516
506
  )
517
- agent.session.append_history([developer_item])
518
- await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
519
507
 
520
508
  if operation.emit_welcome_event:
521
509
  await self.emit_event(
@@ -572,12 +560,13 @@ class ExecutorContext:
572
560
  await config.save()
573
561
 
574
562
  saved_note = " (saved in ~/.klaude/klaude-config.yaml)" if operation.save_as_default else ""
575
- developer_item = message.DeveloperMessage(
576
- parts=message.text_parts_from_str(f"{sub_agent_type} model: {display_model}{saved_note}"),
577
- ui_extra=model.build_command_output_extra(commands.CommandName.SUB_AGENT_MODEL),
563
+ await self.emit_event(
564
+ events.CommandOutputEvent(
565
+ session_id=agent.session.id,
566
+ command_name=commands.CommandName.SUB_AGENT_MODEL,
567
+ content=f"{sub_agent_type} model: {display_model}{saved_note}",
568
+ )
578
569
  )
579
- agent.session.append_history([developer_item])
580
- await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
581
570
 
582
571
  async def handle_clear_session(self, operation: op.ClearSessionOperation) -> None:
583
572
  await self._agent_runtime.clear_session(operation.session_id)
@@ -593,21 +582,24 @@ class ExecutorContext:
593
582
  await asyncio.to_thread(output_path.parent.mkdir, parents=True, exist_ok=True)
594
583
  await asyncio.to_thread(output_path.write_text, html_doc, "utf-8")
595
584
  await asyncio.to_thread(self._open_file, output_path)
596
- developer_item = message.DeveloperMessage(
597
- parts=message.text_parts_from_str(f"Session exported and opened: {output_path}"),
598
- ui_extra=model.build_command_output_extra(commands.CommandName.EXPORT),
585
+ await self.emit_event(
586
+ events.CommandOutputEvent(
587
+ session_id=agent.session.id,
588
+ command_name=commands.CommandName.EXPORT,
589
+ content=f"Session exported and opened: {output_path}",
590
+ )
599
591
  )
600
- agent.session.append_history([developer_item])
601
- await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
602
592
  except Exception as exc: # pragma: no cover
603
593
  import traceback
604
594
 
605
- developer_item = message.DeveloperMessage(
606
- parts=message.text_parts_from_str(f"Failed to export session: {exc}\n{traceback.format_exc()}"),
607
- ui_extra=model.build_command_output_extra(commands.CommandName.EXPORT, is_error=True),
595
+ await self.emit_event(
596
+ events.CommandOutputEvent(
597
+ session_id=agent.session.id,
598
+ command_name=commands.CommandName.EXPORT,
599
+ content=f"Failed to export session: {exc}\n{traceback.format_exc()}",
600
+ is_error=True,
601
+ )
608
602
  )
609
- agent.session.append_history([developer_item])
610
- await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
611
603
 
612
604
  def _resolve_export_output_path(self, raw: str | None, session: Session) -> Path:
613
605
  trimmed = (raw or "").strip()
@@ -701,12 +693,15 @@ class Executor:
701
693
  Unique submission ID for tracking
702
694
  """
703
695
 
704
- submission = op.Submission(id=operation.id, operation=operation)
705
- await self.submission_queue.put(submission)
696
+ if operation.id in self._completion_events:
697
+ raise RuntimeError(f"Submission already registered: {operation.id}")
706
698
 
707
- # Create completion event for tracking
699
+ # Create completion event before queueing to avoid races.
708
700
  self._completion_events[operation.id] = asyncio.Event()
709
701
 
702
+ submission = op.Submission(id=operation.id, operation=operation)
703
+ await self.submission_queue.put(submission)
704
+
710
705
  log_debug(
711
706
  f"Submitted operation {operation.type} with ID {operation.id}",
712
707
  style="blue",
@@ -786,9 +781,16 @@ class Executor:
786
781
  if tasks_to_await:
787
782
  await asyncio.gather(*tasks_to_await, return_exceptions=True)
788
783
 
784
+ if self._background_tasks:
785
+ await asyncio.gather(*self._background_tasks, return_exceptions=True)
786
+ self._background_tasks.clear()
787
+
789
788
  # Clear the active task manager
790
789
  self.context.task_manager.clear()
791
790
 
791
+ for event in self._completion_events.values():
792
+ event.set()
793
+
792
794
  # Send EndOperation to wake up the start() loop
793
795
  try:
794
796
  end_operation = op.EndOperation()
@@ -54,24 +54,31 @@ def _build_file_diff(before: str, after: str, *, file_path: str) -> model.DiffFi
54
54
  elif tag == "replace":
55
55
  old_block = before_lines[i1:i2]
56
56
  new_block = after_lines[j1:j2]
57
- max_len = max(len(old_block), len(new_block))
58
- for idx in range(max_len):
59
- old_line = old_block[idx] if idx < len(old_block) else None
60
- new_line = new_block[idx] if idx < len(new_block) else None
61
- if old_line is not None and new_line is not None:
62
- remove_spans, add_spans = _diff_line_spans(old_line, new_line)
63
- lines.append(_remove_line(remove_spans))
64
- lines.append(_add_line(add_spans, new_line_no))
65
- stats_remove += 1
66
- stats_add += 1
67
- new_line_no += 1
68
- elif old_line is not None:
69
- lines.append(_remove_line([model.DiffSpan(op="equal", text=old_line)]))
70
- stats_remove += 1
71
- elif new_line is not None:
72
- lines.append(_add_line([model.DiffSpan(op="equal", text=new_line)], new_line_no))
73
- stats_add += 1
74
- new_line_no += 1
57
+
58
+ # Emit replacement blocks in unified-diff style: all removals first, then all additions.
59
+ # This matches VSCode's readability (--- then +++), while keeping per-line char spans.
60
+ remove_block: list[list[model.DiffSpan]] = []
61
+ add_block: list[list[model.DiffSpan]] = []
62
+
63
+ paired_len = min(len(old_block), len(new_block))
64
+ for idx in range(paired_len):
65
+ remove_spans, add_spans = _diff_line_spans(old_block[idx], new_block[idx])
66
+ remove_block.append(remove_spans)
67
+ add_block.append(add_spans)
68
+
69
+ for old_line in old_block[paired_len:]:
70
+ remove_block.append([model.DiffSpan(op="equal", text=old_line)])
71
+ for new_line in new_block[paired_len:]:
72
+ add_block.append([model.DiffSpan(op="equal", text=new_line)])
73
+
74
+ for spans in remove_block:
75
+ lines.append(_remove_line(spans))
76
+ stats_remove += 1
77
+
78
+ for spans in add_block:
79
+ lines.append(_add_line(spans, new_line_no))
80
+ stats_add += 1
81
+ new_line_no += 1
75
82
 
76
83
  return model.DiffFileDiff(
77
84
  file_path=file_path,
@@ -65,7 +65,7 @@ def build_payload(
65
65
  param: LLM call parameters.
66
66
  extra_betas: Additional beta flags to prepend to the betas list.
67
67
  """
68
- messages = convert_history_to_input(param.input, param.model)
68
+ messages = convert_history_to_input(param.input, param.model_id)
69
69
  tools = convert_tool_schema(param.tools)
70
70
  system_messages = [msg for msg in param.input if isinstance(msg, message.SystemMessage)]
71
71
  system = convert_system_to_input(param.system, system_messages)
@@ -89,7 +89,7 @@ def build_payload(
89
89
  }
90
90
 
91
91
  payload: MessageCreateParamsStreaming = {
92
- "model": str(param.model),
92
+ "model": str(param.model_id),
93
93
  "tool_choice": tool_choice,
94
94
  "stream": True,
95
95
  "max_tokens": param.max_tokens or DEFAULT_MAX_TOKENS,
@@ -186,12 +186,12 @@ async def parse_anthropic_stream(
186
186
  if accumulated_thinking:
187
187
  metadata_tracker.record_token()
188
188
  full_thinking = "".join(accumulated_thinking)
189
- parts.append(message.ThinkingTextPart(text=full_thinking, model_id=str(param.model)))
189
+ parts.append(message.ThinkingTextPart(text=full_thinking, model_id=str(param.model_id)))
190
190
  if pending_signature:
191
191
  parts.append(
192
192
  message.ThinkingSignaturePart(
193
193
  signature=pending_signature,
194
- model_id=str(param.model),
194
+ model_id=str(param.model_id),
195
195
  format="anthropic",
196
196
  )
197
197
  )
@@ -224,7 +224,7 @@ async def parse_anthropic_stream(
224
224
  max_tokens=param.max_tokens,
225
225
  )
226
226
  )
227
- metadata_tracker.set_model_name(str(param.model))
227
+ metadata_tracker.set_model_name(str(param.model_id))
228
228
  metadata_tracker.set_response_id(response_id)
229
229
  raw_stop_reason = getattr(event, "stop_reason", None)
230
230
  if isinstance(raw_stop_reason, str):
klaude_code/llm/client.py CHANGED
@@ -25,7 +25,7 @@ class LLMClientABC(ABC):
25
25
 
26
26
  @property
27
27
  def model_name(self) -> str:
28
- return self._config.model or ""
28
+ return self._config.model_id or ""
29
29
 
30
30
  @property
31
31
  def protocol(self) -> llm_param.LLMClientProtocol:
@@ -31,13 +31,13 @@ from klaude_code.protocol import llm_param, message
31
31
 
32
32
  def build_payload(param: llm_param.LLMCallParameter) -> ResponseCreateParamsStreaming:
33
33
  """Build Codex API request parameters."""
34
- inputs = convert_history_to_input(param.input, param.model)
34
+ inputs = convert_history_to_input(param.input, param.model_id)
35
35
  tools = convert_tool_schema(param.tools)
36
36
 
37
37
  session_id = param.session_id or ""
38
38
 
39
39
  payload: ResponseCreateParamsStreaming = {
40
- "model": str(param.model),
40
+ "model": str(param.model_id),
41
41
  "tool_choice": "auto",
42
42
  "parallel_tool_calls": True,
43
43
  "include": [
@@ -163,7 +163,7 @@ async def parse_google_stream(
163
163
  assistant_parts.append(
164
164
  message.ThinkingTextPart(
165
165
  text="".join(accumulated_thoughts),
166
- model_id=str(param.model),
166
+ model_id=str(param.model_id),
167
167
  )
168
168
  )
169
169
  accumulated_thoughts.clear()
@@ -171,7 +171,7 @@ async def parse_google_stream(
171
171
  assistant_parts.append(
172
172
  message.ThinkingSignaturePart(
173
173
  signature=thought_signature,
174
- model_id=str(param.model),
174
+ model_id=str(param.model_id),
175
175
  format="google_thought_signature",
176
176
  )
177
177
  )
@@ -301,7 +301,7 @@ async def parse_google_stream(
301
301
  usage = _usage_from_metadata(last_usage_metadata, context_limit=param.context_limit, max_tokens=param.max_tokens)
302
302
  if usage is not None:
303
303
  metadata_tracker.set_usage(usage)
304
- metadata_tracker.set_model_name(str(param.model))
304
+ metadata_tracker.set_model_name(str(param.model_id))
305
305
  metadata_tracker.set_response_id(response_id)
306
306
  metadata = metadata_tracker.finalize()
307
307
  yield message.AssistantMessage(
@@ -336,13 +336,13 @@ class GoogleClient(LLMClientABC):
336
336
  param = apply_config_defaults(param, self.get_llm_config())
337
337
  metadata_tracker = MetadataTracker(cost_config=self.get_llm_config().cost)
338
338
 
339
- contents = convert_history_to_contents(param.input, model_name=str(param.model))
339
+ contents = convert_history_to_contents(param.input, model_name=str(param.model_id))
340
340
  config = _build_config(param)
341
341
 
342
342
  log_debug(
343
343
  json.dumps(
344
344
  {
345
- "model": str(param.model),
345
+ "model": str(param.model_id),
346
346
  "contents": [c.model_dump(exclude_none=True) for c in contents],
347
347
  "config": config.model_dump(exclude_none=True),
348
348
  },
@@ -354,7 +354,7 @@ class GoogleClient(LLMClientABC):
354
354
 
355
355
  try:
356
356
  stream = await self.client.aio.models.generate_content_stream(
357
- model=str(param.model),
357
+ model=str(param.model_id),
358
358
  contents=cast(Any, contents),
359
359
  config=config,
360
360
  )
@@ -165,8 +165,8 @@ def split_thinking_parts(
165
165
 
166
166
  def apply_config_defaults(param: "LLMCallParameter", config: "LLMConfigParameter") -> "LLMCallParameter":
167
167
  """Apply config defaults to LLM call parameters."""
168
- if param.model is None:
169
- param.model = config.model
168
+ if param.model_id is None:
169
+ param.model_id = config.model_id
170
170
  if param.temperature is None:
171
171
  param.temperature = config.temperature
172
172
  if param.max_tokens is None:
@@ -19,7 +19,7 @@ from klaude_code.protocol import llm_param, message
19
19
 
20
20
  def build_payload(param: llm_param.LLMCallParameter) -> tuple[CompletionCreateParamsStreaming, dict[str, object]]:
21
21
  """Build OpenAI API request parameters."""
22
- messages = convert_history_to_input(param.input, param.system, param.model)
22
+ messages = convert_history_to_input(param.input, param.system, param.model_id)
23
23
  tools = convert_tool_schema(param.tools)
24
24
 
25
25
  extra_body: dict[str, object] = {}
@@ -31,7 +31,7 @@ def build_payload(param: llm_param.LLMCallParameter) -> tuple[CompletionCreatePa
31
31
  }
32
32
 
33
33
  payload: CompletionCreateParamsStreaming = {
34
- "model": str(param.model),
34
+ "model": str(param.model_id),
35
35
  "tool_choice": "auto",
36
36
  "parallel_tool_calls": True,
37
37
  "stream": True,
@@ -108,7 +108,7 @@ class OpenAICompatibleClient(LLMClientABC):
108
108
  return
109
109
 
110
110
  reasoning_handler = DefaultReasoningHandler(
111
- param_model=str(param.model),
111
+ param_model=str(param.model_id),
112
112
  response_id=None,
113
113
  )
114
114
 
@@ -179,7 +179,7 @@ async def parse_chat_completions_stream(
179
179
  """
180
180
 
181
181
  state = StreamStateManager(
182
- param_model=str(param.model),
182
+ param_model=str(param.model_id),
183
183
  reasoning_flusher=reasoning_handler.flush,
184
184
  )
185
185