klaude-code 2.3.0__py3-none-any.whl → 2.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. klaude_code/cli/list_model.py +3 -3
  2. klaude_code/cli/main.py +2 -2
  3. klaude_code/config/assets/builtin_config.yaml +165 -307
  4. klaude_code/config/config.py +17 -17
  5. klaude_code/config/{select_model.py → model_matcher.py} +7 -7
  6. klaude_code/config/sub_agent_model_helper.py +1 -10
  7. klaude_code/config/thinking.py +2 -2
  8. klaude_code/core/agent_profile.py +9 -23
  9. klaude_code/core/executor.py +72 -70
  10. klaude_code/core/tool/file/diff_builder.py +25 -18
  11. klaude_code/llm/anthropic/client.py +5 -5
  12. klaude_code/llm/client.py +1 -1
  13. klaude_code/llm/codex/client.py +2 -2
  14. klaude_code/llm/google/client.py +6 -6
  15. klaude_code/llm/input_common.py +2 -2
  16. klaude_code/llm/openai_compatible/client.py +3 -3
  17. klaude_code/llm/openai_compatible/stream.py +1 -1
  18. klaude_code/llm/openrouter/client.py +4 -4
  19. klaude_code/llm/openrouter/input.py +1 -3
  20. klaude_code/llm/responses/client.py +5 -5
  21. klaude_code/protocol/events/__init__.py +7 -1
  22. klaude_code/protocol/events/chat.py +10 -0
  23. klaude_code/protocol/llm_param.py +1 -1
  24. klaude_code/protocol/model.py +0 -26
  25. klaude_code/protocol/op.py +0 -5
  26. klaude_code/session/session.py +4 -2
  27. klaude_code/tui/command/clear_cmd.py +0 -1
  28. klaude_code/tui/command/command_abc.py +6 -4
  29. klaude_code/tui/command/copy_cmd.py +10 -10
  30. klaude_code/tui/command/debug_cmd.py +11 -10
  31. klaude_code/tui/command/export_online_cmd.py +18 -23
  32. klaude_code/tui/command/fork_session_cmd.py +39 -43
  33. klaude_code/tui/command/model_cmd.py +5 -7
  34. klaude_code/tui/command/{model_select.py → model_picker.py} +3 -5
  35. klaude_code/tui/command/refresh_cmd.py +0 -1
  36. klaude_code/tui/command/registry.py +15 -21
  37. klaude_code/tui/command/resume_cmd.py +10 -16
  38. klaude_code/tui/command/status_cmd.py +8 -12
  39. klaude_code/tui/command/sub_agent_model_cmd.py +11 -16
  40. klaude_code/tui/command/terminal_setup_cmd.py +8 -11
  41. klaude_code/tui/command/thinking_cmd.py +4 -6
  42. klaude_code/tui/commands.py +5 -0
  43. klaude_code/tui/components/command_output.py +96 -0
  44. klaude_code/tui/components/developer.py +3 -110
  45. klaude_code/tui/components/welcome.py +2 -2
  46. klaude_code/tui/input/prompt_toolkit.py +6 -8
  47. klaude_code/tui/machine.py +5 -0
  48. klaude_code/tui/renderer.py +5 -5
  49. klaude_code/tui/runner.py +0 -6
  50. klaude_code/tui/terminal/selector.py +7 -8
  51. {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/METADATA +21 -74
  52. {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/RECORD +54 -53
  53. {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/WHEEL +0 -0
  54. {klaude_code-2.3.0.dist-info → klaude_code-2.4.1.dist-info}/entry_points.txt +0 -0
@@ -30,7 +30,7 @@ def build_payload(
30
30
  param: llm_param.LLMCallParameter,
31
31
  ) -> tuple[CompletionCreateParamsStreaming, dict[str, object], dict[str, str]]:
32
32
  """Build OpenRouter API request parameters."""
33
- messages = convert_history_to_input(param.input, param.system, param.model)
33
+ messages = convert_history_to_input(param.input, param.system, param.model_id)
34
34
  tools = convert_tool_schema(param.tools)
35
35
 
36
36
  extra_body: dict[str, object] = {
@@ -66,13 +66,13 @@ def build_payload(
66
66
  if param.provider_routing:
67
67
  extra_body["provider"] = param.provider_routing.model_dump(exclude_none=True)
68
68
 
69
- if is_claude_model(param.model):
69
+ if is_claude_model(param.model_id):
70
70
  extra_headers["x-anthropic-beta"] = (
71
71
  f"{ANTHROPIC_BETA_FINE_GRAINED_TOOL_STREAMING},{ANTHROPIC_BETA_INTERLEAVED_THINKING}"
72
72
  )
73
73
 
74
74
  payload: CompletionCreateParamsStreaming = {
75
- "model": str(param.model),
75
+ "model": str(param.model_id),
76
76
  "tool_choice": "auto",
77
77
  "parallel_tool_calls": True,
78
78
  "stream": True,
@@ -133,7 +133,7 @@ class OpenRouterClient(LLMClientABC):
133
133
  return
134
134
 
135
135
  reasoning_handler = ReasoningStreamHandler(
136
- param_model=str(param.model),
136
+ param_model=str(param.model_id),
137
137
  response_id=None,
138
138
  )
139
139
 
@@ -115,9 +115,7 @@ def convert_history_to_input(
115
115
  )
116
116
  ]
117
117
  if system and use_cache_control
118
- else (
119
- [cast(chat.ChatCompletionMessageParam, {"role": "system", "content": system})] if system else []
120
- )
118
+ else ([cast(chat.ChatCompletionMessageParam, {"role": "system", "content": system})] if system else [])
121
119
  )
122
120
 
123
121
  for msg, attachment in attach_developer_messages(history):
@@ -24,11 +24,11 @@ if TYPE_CHECKING:
24
24
 
25
25
  def build_payload(param: llm_param.LLMCallParameter) -> ResponseCreateParamsStreaming:
26
26
  """Build OpenAI Responses API request parameters."""
27
- inputs = convert_history_to_input(param.input, param.model)
27
+ inputs = convert_history_to_input(param.input, param.model_id)
28
28
  tools = convert_tool_schema(param.tools)
29
29
 
30
30
  payload: ResponseCreateParamsStreaming = {
31
- "model": str(param.model),
31
+ "model": str(param.model_id),
32
32
  "tool_choice": "auto",
33
33
  "parallel_tool_calls": True,
34
34
  "include": [
@@ -77,7 +77,7 @@ async def parse_responses_stream(
77
77
  assistant_parts.append(
78
78
  message.ThinkingTextPart(
79
79
  text="".join(accumulated_thinking),
80
- model_id=str(param.model),
80
+ model_id=str(param.model_id),
81
81
  )
82
82
  )
83
83
  accumulated_thinking.clear()
@@ -85,7 +85,7 @@ async def parse_responses_stream(
85
85
  assistant_parts.append(
86
86
  message.ThinkingSignaturePart(
87
87
  signature=pending_signature,
88
- model_id=str(param.model),
88
+ model_id=str(param.model_id),
89
89
  format="openai_reasoning",
90
90
  )
91
91
  )
@@ -197,7 +197,7 @@ async def parse_responses_stream(
197
197
  max_tokens=param.max_tokens,
198
198
  )
199
199
  )
200
- metadata_tracker.set_model_name(str(param.model))
200
+ metadata_tracker.set_model_name(str(param.model_id))
201
201
  metadata_tracker.set_response_id(response_id)
202
202
  stop_reason = map_stop_reason(event.response.status, error_reason)
203
203
  if event.response.status != "completed":
@@ -1,7 +1,12 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from klaude_code.protocol.events.base import Event, ResponseEvent
4
- from klaude_code.protocol.events.chat import DeveloperMessageEvent, TodoChangeEvent, UserMessageEvent
4
+ from klaude_code.protocol.events.chat import (
5
+ CommandOutputEvent,
6
+ DeveloperMessageEvent,
7
+ TodoChangeEvent,
8
+ UserMessageEvent,
9
+ )
5
10
  from klaude_code.protocol.events.lifecycle import TaskFinishEvent, TaskStartEvent, TurnEndEvent, TurnStartEvent
6
11
  from klaude_code.protocol.events.metadata import TaskMetadataEvent, UsageEvent
7
12
  from klaude_code.protocol.events.streaming import (
@@ -30,6 +35,7 @@ __all__ = [
30
35
  "AssistantTextDeltaEvent",
31
36
  "AssistantTextEndEvent",
32
37
  "AssistantTextStartEvent",
38
+ "CommandOutputEvent",
33
39
  "DeveloperMessageEvent",
34
40
  "EndEvent",
35
41
  "ErrorEvent",
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from klaude_code.protocol import message, model
4
+ from klaude_code.protocol.commands import CommandName
4
5
 
5
6
  from .base import Event
6
7
 
@@ -18,3 +19,12 @@ class DeveloperMessageEvent(Event):
18
19
 
19
20
  class TodoChangeEvent(Event):
20
21
  todos: list[model.TodoItem]
22
+
23
+
24
+ class CommandOutputEvent(Event):
25
+ """Event for command output display. Not persisted to session history."""
26
+
27
+ command_name: CommandName | str
28
+ content: str = ""
29
+ ui_extra: model.ToolResultUIExtra | None = None
30
+ is_error: bool = False
@@ -119,7 +119,7 @@ class LLMConfigProviderParameter(BaseModel):
119
119
 
120
120
 
121
121
  class LLMConfigModelParameter(BaseModel):
122
- model: str | None = None
122
+ model_id: str | None = None
123
123
  temperature: float | None = None
124
124
  max_tokens: int | None = None
125
125
  context_limit: int | None = None
@@ -5,7 +5,6 @@ from typing import Annotated, Any, Literal
5
5
  from pydantic import BaseModel, Field, computed_field
6
6
 
7
7
  from klaude_code.const import DEFAULT_MAX_TOKENS
8
- from klaude_code.protocol.commands import CommandName
9
8
  from klaude_code.protocol.tools import SubAgentType
10
9
 
11
10
  RoleType = Literal["system", "developer", "user", "assistant", "tool"]
@@ -272,12 +271,6 @@ ToolResultUIExtra = Annotated[
272
271
  ]
273
272
 
274
273
 
275
- class CommandOutput(BaseModel):
276
- command_name: CommandName
277
- ui_extra: ToolResultUIExtra | None = None
278
- is_error: bool = False
279
-
280
-
281
274
  class MemoryFileLoaded(BaseModel):
282
275
  path: str
283
276
  mentioned_patterns: list[str] = Field(default_factory=list)
@@ -319,11 +312,6 @@ class SkillActivatedUIItem(BaseModel):
319
312
  name: str
320
313
 
321
314
 
322
- class CommandOutputUIItem(BaseModel):
323
- type: Literal["command_output"] = "command_output"
324
- output: CommandOutput
325
-
326
-
327
315
  type DeveloperUIItem = (
328
316
  MemoryLoadedUIItem
329
317
  | ExternalFileChangesUIItem
@@ -331,7 +319,6 @@ type DeveloperUIItem = (
331
319
  | AtFileOpsUIItem
332
320
  | UserImagesUIItem
333
321
  | SkillActivatedUIItem
334
- | CommandOutputUIItem
335
322
  )
336
323
 
337
324
 
@@ -343,19 +330,6 @@ class DeveloperUIExtra(BaseModel):
343
330
  items: list[DeveloperUIItem] = Field(default_factory=_empty_developer_ui_items)
344
331
 
345
332
 
346
- def build_command_output_extra(
347
- command_name: CommandName,
348
- *,
349
- ui_extra: ToolResultUIExtra | None = None,
350
- is_error: bool = False,
351
- ) -> DeveloperUIExtra:
352
- return DeveloperUIExtra(
353
- items=[
354
- CommandOutputUIItem(output=CommandOutput(command_name=command_name, ui_extra=ui_extra, is_error=is_error))
355
- ]
356
- )
357
-
358
-
359
333
  class SubAgentState(BaseModel):
360
334
  sub_agent_type: SubAgentType
361
335
  sub_agent_desc: str
@@ -52,11 +52,6 @@ class RunAgentOperation(Operation):
52
52
  type: OperationType = OperationType.RUN_AGENT
53
53
  session_id: str
54
54
  input: UserInputPayload
55
- # Frontends may choose to render the user message themselves (e.g. TUI) to support
56
- # event-only commands; in that case the core should skip emitting the UserMessageEvent.
57
- emit_user_message_event: bool = True
58
- # Frontends may choose to run without persisting input (e.g. some interactive commands).
59
- persist_user_input: bool = True
60
55
 
61
56
  async def execute(self, handler: OperationHandler) -> None:
62
57
  await handler.handle_run_agent(self)
@@ -237,7 +237,7 @@ class Session(BaseModel):
237
237
  Args:
238
238
  new_id: Optional ID for the forked session.
239
239
  until_index: If provided, only copy conversation history up to (but not including) this index.
240
- If None, copy all history.
240
+ If -1, copy all history.
241
241
  """
242
242
 
243
243
  forked = Session.create(id=new_id, work_dir=self.work_dir)
@@ -250,7 +250,9 @@ class Session(BaseModel):
250
250
  forked.todos = [todo.model_copy(deep=True) for todo in self.todos]
251
251
 
252
252
  history_to_copy = (
253
- self.conversation_history[:until_index] if until_index is not None else self.conversation_history
253
+ self.conversation_history[:until_index]
254
+ if (until_index is not None and until_index >= 0)
255
+ else self.conversation_history
254
256
  )
255
257
  items = [it.model_copy(deep=True) for it in history_to_copy]
256
258
  if items:
@@ -22,5 +22,4 @@ class ClearCommand(CommandABC):
22
22
 
23
23
  return CommandResult(
24
24
  operations=[op.ClearSessionOperation(session_id=agent.session.id)],
25
- persist=False,
26
25
  )
@@ -37,14 +37,16 @@ class CommandResult(BaseModel):
37
37
  """Result of a command execution."""
38
38
 
39
39
  events: (
40
- list[protocol_events.DeveloperMessageEvent | protocol_events.WelcomeEvent | protocol_events.ReplayHistoryEvent]
40
+ list[
41
+ protocol_events.CommandOutputEvent
42
+ | protocol_events.ErrorEvent
43
+ | protocol_events.WelcomeEvent
44
+ | protocol_events.ReplayHistoryEvent
45
+ ]
41
46
  | None
42
47
  ) = None # List of UI events to display immediately
43
48
  operations: list[op.Operation] | None = None
44
49
 
45
- # Persistence controls: some slash commands are UI/control actions and should not be written to session history.
46
- persist: bool = True
47
-
48
50
 
49
51
  class CommandABC(ABC):
50
52
  """Abstract base class for slash commands."""
@@ -1,4 +1,4 @@
1
- from klaude_code.protocol import commands, events, message, model
1
+ from klaude_code.protocol import commands, events, message
2
2
  from klaude_code.tui.input.clipboard import copy_to_clipboard
3
3
 
4
4
  from .command_abc import Agent, CommandABC, CommandResult
@@ -20,10 +20,10 @@ class CopyCommand(CommandABC):
20
20
 
21
21
  last = _get_last_assistant_text(agent.session.conversation_history)
22
22
  if not last:
23
- return _developer_message(agent, "(no assistant message to copy)", self.name)
23
+ return _command_output(agent, "(no assistant message to copy)", self.name, is_error=True)
24
24
 
25
25
  copy_to_clipboard(last)
26
- return _developer_message(agent, "Copied last assistant message to clipboard.", self.name)
26
+ return _command_output(agent, "Copied last assistant message to clipboard.", self.name)
27
27
 
28
28
 
29
29
  def _get_last_assistant_text(history: list[message.HistoryEvent]) -> str:
@@ -37,16 +37,16 @@ def _get_last_assistant_text(history: list[message.HistoryEvent]) -> str:
37
37
  return ""
38
38
 
39
39
 
40
- def _developer_message(agent: Agent, content: str, command_name: commands.CommandName) -> CommandResult:
40
+ def _command_output(
41
+ agent: Agent, content: str, command_name: commands.CommandName, *, is_error: bool = False
42
+ ) -> CommandResult:
41
43
  return CommandResult(
42
44
  events=[
43
- events.DeveloperMessageEvent(
45
+ events.CommandOutputEvent(
44
46
  session_id=agent.session.id,
45
- item=message.DeveloperMessage(
46
- parts=message.text_parts_from_str(content),
47
- ui_extra=model.build_command_output_extra(command_name),
48
- ),
47
+ command_name=command_name,
48
+ content=content,
49
+ is_error=is_error,
49
50
  )
50
51
  ],
51
- persist=False,
52
52
  )
@@ -1,5 +1,5 @@
1
1
  from klaude_code.log import DebugType, get_current_log_file, is_debug_enabled, set_debug_logging
2
- from klaude_code.protocol import commands, events, message, model
2
+ from klaude_code.protocol import commands, events, message
3
3
 
4
4
  from .command_abc import Agent, CommandABC, CommandResult
5
5
 
@@ -52,7 +52,7 @@ class DebugCommand(CommandABC):
52
52
  # /debug (no args) - enable debug
53
53
  if not raw:
54
54
  set_debug_logging(True, write_to_file=True)
55
- return self._message_result(agent, _format_status())
55
+ return self._command_output(agent, _format_status())
56
56
 
57
57
  # /debug <filters> - enable with filters
58
58
  try:
@@ -60,21 +60,22 @@ class DebugCommand(CommandABC):
60
60
  if filters:
61
61
  set_debug_logging(True, write_to_file=True, filters=filters)
62
62
  filter_names = ", ".join(sorted(dt.value for dt in filters))
63
- return self._message_result(agent, f"Filters: {filter_names}\n{_format_status()}")
63
+ return self._command_output(agent, f"Filters: {filter_names}\n{_format_status()}")
64
64
  except ValueError:
65
65
  pass
66
66
 
67
- return self._message_result(agent, f"Invalid filter: {raw}\nValid: {', '.join(dt.value for dt in DebugType)}")
67
+ return self._command_output(
68
+ agent, f"Invalid filter: {raw}\nValid: {', '.join(dt.value for dt in DebugType)}", is_error=True
69
+ )
68
70
 
69
- def _message_result(self, agent: "Agent", content: str) -> CommandResult:
71
+ def _command_output(self, agent: "Agent", content: str, *, is_error: bool = False) -> CommandResult:
70
72
  return CommandResult(
71
73
  events=[
72
- events.DeveloperMessageEvent(
74
+ events.CommandOutputEvent(
73
75
  session_id=agent.session.id,
74
- item=message.DeveloperMessage(
75
- parts=message.text_parts_from_str(content),
76
- ui_extra=model.build_command_output_extra(self.name),
77
- ),
76
+ command_name=self.name,
77
+ content=content,
78
+ is_error=is_error,
78
79
  )
79
80
  ]
80
81
  )
@@ -9,7 +9,7 @@ from pathlib import Path
9
9
  from rich.console import Console
10
10
  from rich.text import Text
11
11
 
12
- from klaude_code.protocol import commands, events, message, model
12
+ from klaude_code.protocol import commands, events, message
13
13
  from klaude_code.session.export import build_export_html
14
14
 
15
15
  from .command_abc import Agent, CommandABC, CommandResult
@@ -39,54 +39,49 @@ class ExportOnlineCommand(CommandABC):
39
39
  # Check if npx or surge is available
40
40
  surge_cmd = self._get_surge_command()
41
41
  if not surge_cmd:
42
- event = events.DeveloperMessageEvent(
42
+ event = events.CommandOutputEvent(
43
43
  session_id=agent.session.id,
44
- item=message.DeveloperMessage(
45
- parts=message.text_parts_from_str("surge.sh CLI not found. Install with: npm install -g surge"),
46
- ui_extra=model.build_command_output_extra(self.name, is_error=True),
47
- ),
44
+ command_name=self.name,
45
+ content="surge.sh CLI not found. Install with: npm install -g surge",
46
+ is_error=True,
48
47
  )
49
48
  return CommandResult(events=[event])
50
49
 
51
50
  try:
52
51
  console = Console()
53
52
  # Check login status inside status context since npx surge whoami can be slow
54
- with console.status(Text("Checking surge.sh login status", style="dim"), spinner_style="dim"):
53
+ with console.status(Text("Checking surge.sh login status...", style="dim"), spinner_style="dim"):
55
54
  logged_in = self._is_surge_logged_in(surge_cmd)
56
55
 
57
56
  if not logged_in:
58
57
  login_cmd = " ".join([*surge_cmd, "login"])
59
- event = events.DeveloperMessageEvent(
58
+ event = events.CommandOutputEvent(
60
59
  session_id=agent.session.id,
61
- item=message.DeveloperMessage(
62
- parts=message.text_parts_from_str(f"Not logged in to surge.sh. Please run: {login_cmd}"),
63
- ui_extra=model.build_command_output_extra(self.name, is_error=True),
64
- ),
60
+ command_name=self.name,
61
+ content=f"Not logged in to surge.sh. Please run: {login_cmd}",
62
+ is_error=True,
65
63
  )
66
64
  return CommandResult(events=[event])
67
65
 
68
- with console.status(Text("Deploying to surge.sh", style="dim"), spinner_style="dim"):
66
+ with console.status(Text("Deploying to surge.sh...", style="dim"), spinner_style="dim"):
69
67
  html_doc = self._build_html(agent)
70
68
  domain = self._generate_domain()
71
69
  url = self._deploy_to_surge(surge_cmd, html_doc, domain)
72
70
 
73
- event = events.DeveloperMessageEvent(
71
+ event = events.CommandOutputEvent(
74
72
  session_id=agent.session.id,
75
- item=message.DeveloperMessage(
76
- parts=message.text_parts_from_str(f"Session deployed to: {url}"),
77
- ui_extra=model.build_command_output_extra(self.name),
78
- ),
73
+ command_name=self.name,
74
+ content=f"Session deployed to: {url}",
79
75
  )
80
76
  return CommandResult(events=[event])
81
77
  except Exception as exc:
82
78
  import traceback
83
79
 
84
- event = events.DeveloperMessageEvent(
80
+ event = events.CommandOutputEvent(
85
81
  session_id=agent.session.id,
86
- item=message.DeveloperMessage(
87
- parts=message.text_parts_from_str(f"Failed to deploy session: {exc}\n{traceback.format_exc()}"),
88
- ui_extra=model.build_command_output_extra(self.name, is_error=True),
89
- ),
82
+ command_name=self.name,
83
+ content=f"Failed to deploy session: {exc}\n{traceback.format_exc()}",
84
+ is_error=True,
90
85
  )
91
86
  return CommandResult(events=[event])
92
87
 
@@ -31,7 +31,7 @@ FORK_SELECT_STYLE = Style(
31
31
  class ForkPoint:
32
32
  """A fork point in conversation history."""
33
33
 
34
- history_index: int | None # None means fork entire conversation
34
+ history_index: int # -1 means fork entire conversation
35
35
  user_message: str
36
36
  tool_call_stats: dict[str, int] # tool_name -> count
37
37
  last_assistant_summary: str
@@ -94,7 +94,7 @@ def _build_fork_points(conversation_history: list[message.HistoryEvent]) -> list
94
94
  if user_indices:
95
95
  fork_points.append(
96
96
  ForkPoint(
97
- history_index=None, # None means fork entire conversation
97
+ history_index=-1, # None means fork entire conversation
98
98
  user_message="", # No specific message, this represents the end
99
99
  tool_call_stats={},
100
100
  last_assistant_summary="",
@@ -104,9 +104,9 @@ def _build_fork_points(conversation_history: list[message.HistoryEvent]) -> list
104
104
  return fork_points
105
105
 
106
106
 
107
- def _build_select_items(fork_points: list[ForkPoint]) -> list[SelectItem[int | None]]:
107
+ def _build_select_items(fork_points: list[ForkPoint]) -> list[SelectItem[int]]:
108
108
  """Build SelectItem list from fork points."""
109
- items: list[SelectItem[int | None]] = []
109
+ items: list[SelectItem[int]] = []
110
110
 
111
111
  for i, fp in enumerate(fork_points):
112
112
  is_first = i == 0
@@ -116,8 +116,8 @@ def _build_select_items(fork_points: list[ForkPoint]) -> list[SelectItem[int | N
116
116
  title_parts: list[tuple[str, str]] = []
117
117
 
118
118
  # First line: separator (with special markers for first/last fork points)
119
- if is_first and not is_last:
120
- title_parts.append(("class:separator", "----- fork from here (empty session) -----\n\n"))
119
+ if is_first:
120
+ pass
121
121
  elif is_last:
122
122
  title_parts.append(("class:separator", "----- fork from here (entire session) -----\n\n"))
123
123
  else:
@@ -150,17 +150,16 @@ def _build_select_items(fork_points: list[ForkPoint]) -> list[SelectItem[int | N
150
150
  return items
151
151
 
152
152
 
153
- def _select_fork_point_sync(fork_points: list[ForkPoint]) -> int | None | Literal["cancelled"]:
153
+ def _select_fork_point_sync(fork_points: list[ForkPoint]) -> int | Literal["cancelled"]:
154
154
  """Interactive fork point selection (sync version for asyncio.to_thread).
155
155
 
156
156
  Returns:
157
- - int: history index to fork at (exclusive)
158
- - None: fork entire conversation
157
+ - int: history index to fork at (exclusive), -1 means fork entire conversation
159
158
  - "cancelled": user cancelled selection
160
159
  """
161
160
  items = _build_select_items(fork_points)
162
161
  if not items:
163
- return None
162
+ return -1
164
163
 
165
164
  # Default to the last option (fork entire conversation)
166
165
  last_value = items[-1].value
@@ -204,14 +203,13 @@ class ForkSessionCommand(CommandABC):
204
203
  del user_input # unused
205
204
 
206
205
  if agent.session.messages_count == 0:
207
- event = events.DeveloperMessageEvent(
206
+ event = events.CommandOutputEvent(
208
207
  session_id=agent.session.id,
209
- item=message.DeveloperMessage(
210
- parts=message.text_parts_from_str("(no messages to fork)"),
211
- ui_extra=model.build_command_output_extra(self.name),
212
- ),
208
+ command_name=self.name,
209
+ content="(no messages to fork)",
210
+ is_error=True,
213
211
  )
214
- return CommandResult(events=[event], persist=False)
212
+ return CommandResult(events=[event])
215
213
 
216
214
  # Build fork points from conversation history
217
215
  fork_points = _build_fork_points(agent.session.conversation_history)
@@ -224,51 +222,49 @@ class ForkSessionCommand(CommandABC):
224
222
  resume_cmd = f"klaude --resume-by-id {new_session.id}"
225
223
  copy_to_clipboard(resume_cmd)
226
224
 
227
- event = events.DeveloperMessageEvent(
225
+ event = events.CommandOutputEvent(
228
226
  session_id=agent.session.id,
229
- item=message.DeveloperMessage(
230
- parts=message.text_parts_from_str(f"Session forked successfully. New session id: {new_session.id}"),
231
- ui_extra=model.build_command_output_extra(
232
- self.name,
233
- ui_extra=model.SessionIdUIExtra(session_id=new_session.id),
234
- ),
235
- ),
227
+ command_name=self.name,
228
+ content=f"Session forked successfully. New session id: {new_session.id}",
229
+ ui_extra=model.SessionIdUIExtra(session_id=new_session.id),
236
230
  )
237
- return CommandResult(events=[event], persist=False)
231
+ return CommandResult(events=[event])
238
232
 
239
233
  # Interactive selection
240
234
  selected = await asyncio.to_thread(_select_fork_point_sync, fork_points)
241
235
 
242
236
  if selected == "cancelled":
243
- event = events.DeveloperMessageEvent(
237
+ event = events.CommandOutputEvent(
244
238
  session_id=agent.session.id,
245
- item=message.DeveloperMessage(
246
- parts=message.text_parts_from_str("(fork cancelled)"),
247
- ui_extra=model.build_command_output_extra(self.name),
248
- ),
239
+ command_name=self.name,
240
+ content="(fork cancelled)",
249
241
  )
250
- return CommandResult(events=[event], persist=False)
242
+ return CommandResult(events=[event])
243
+
244
+ # First option (empty session) is just for UI display, not a valid fork point
245
+ if selected == fork_points[0].history_index:
246
+ event = events.CommandOutputEvent(
247
+ session_id=agent.session.id,
248
+ command_name=self.name,
249
+ content="(cannot fork to empty session)",
250
+ is_error=True,
251
+ )
252
+ return CommandResult(events=[event])
251
253
 
252
254
  # Perform the fork
253
255
  new_session = agent.session.fork(until_index=selected)
254
256
  await new_session.wait_for_flush()
255
257
 
256
258
  # Build result message
257
- fork_description = "entire conversation" if selected is None else f"up to message index {selected}"
259
+ fork_description = "entire conversation" if selected == -1 else f"up to message index {selected}"
258
260
 
259
261
  resume_cmd = f"klaude --resume-by-id {new_session.id}"
260
262
  copy_to_clipboard(resume_cmd)
261
263
 
262
- event = events.DeveloperMessageEvent(
264
+ event = events.CommandOutputEvent(
263
265
  session_id=agent.session.id,
264
- item=message.DeveloperMessage(
265
- parts=message.text_parts_from_str(
266
- f"Session forked ({fork_description}). New session id: {new_session.id}"
267
- ),
268
- ui_extra=model.build_command_output_extra(
269
- self.name,
270
- ui_extra=model.SessionIdUIExtra(session_id=new_session.id),
271
- ),
272
- ),
266
+ command_name=self.name,
267
+ content=f"Session forked ({fork_description}). New session id: {new_session.id}",
268
+ ui_extra=model.SessionIdUIExtra(session_id=new_session.id),
273
269
  )
274
- return CommandResult(events=[event], persist=False)
270
+ return CommandResult(events=[event])
@@ -1,9 +1,9 @@
1
1
  import asyncio
2
2
 
3
- from klaude_code.protocol import commands, events, message, model, op
3
+ from klaude_code.protocol import commands, events, message, op
4
4
 
5
5
  from .command_abc import Agent, CommandABC, CommandResult
6
- from .model_select import ModelSelectStatus, select_model_interactive
6
+ from .model_picker import ModelSelectStatus, select_model_interactive
7
7
 
8
8
 
9
9
  class ModelCommand(CommandABC):
@@ -37,12 +37,10 @@ class ModelCommand(CommandABC):
37
37
  if selected_model is None or selected_model == current_model:
38
38
  return CommandResult(
39
39
  events=[
40
- events.DeveloperMessageEvent(
40
+ events.CommandOutputEvent(
41
41
  session_id=agent.session.id,
42
- item=message.DeveloperMessage(
43
- parts=message.text_parts_from_str("(no change)"),
44
- ui_extra=model.build_command_output_extra(self.name),
45
- ),
42
+ command_name=self.name,
43
+ content="(no change)",
46
44
  )
47
45
  ]
48
46
  )
@@ -7,7 +7,7 @@ from dataclasses import dataclass
7
7
  from enum import Enum
8
8
 
9
9
  from klaude_code.config.config import load_config
10
- from klaude_code.config.select_model import match_model_from_config
10
+ from klaude_code.config.model_matcher import match_model_from_config
11
11
  from klaude_code.log import log
12
12
 
13
13
 
@@ -35,7 +35,7 @@ def select_model_interactive(
35
35
  This function combines matching logic with interactive UI selection.
36
36
  For CLI usage.
37
37
 
38
- If keywords is provided, preferred is ignored and the model list is pre-filtered by model_params.model.
38
+ If keywords is provided, preferred is ignored and the model list is pre-filtered by model_id.
39
39
 
40
40
  If preferred is provided:
41
41
  - Exact match: return immediately
@@ -54,9 +54,7 @@ def select_model_interactive(
54
54
  if keywords:
55
55
  keywords_lower = [k.lower() for k in keywords]
56
56
  filtered_models = [
57
- m
58
- for m in result.filtered_models
59
- if any(kw in (m.model_params.model or "").lower() for kw in keywords_lower)
57
+ m for m in result.filtered_models if any(kw in (m.model_id or "").lower() for kw in keywords_lower)
60
58
  ]
61
59
  if not filtered_models:
62
60
  return ModelSelectResult(status=ModelSelectStatus.NO_MATCH)
@@ -38,5 +38,4 @@ class RefreshTerminalCommand(CommandABC):
38
38
  is_load=False,
39
39
  ),
40
40
  ],
41
- persist=False,
42
41
  )