klaude-code 2.1.0__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. klaude_code/app/__init__.py +1 -2
  2. klaude_code/app/runtime.py +26 -41
  3. klaude_code/cli/main.py +19 -152
  4. klaude_code/config/assets/builtin_config.yaml +13 -0
  5. klaude_code/const.py +1 -1
  6. klaude_code/core/agent_profile.py +38 -3
  7. klaude_code/core/manager/llm_clients_builder.py +1 -1
  8. klaude_code/core/prompts/prompt-nano-banana.md +1 -0
  9. klaude_code/core/reminders.py +20 -4
  10. klaude_code/core/tool/__init__.py +0 -2
  11. klaude_code/core/tool/shell/command_safety.py +4 -189
  12. klaude_code/core/turn.py +2 -5
  13. klaude_code/llm/anthropic/client.py +1 -1
  14. klaude_code/llm/google/client.py +1 -1
  15. klaude_code/llm/openai_compatible/stream.py +1 -1
  16. klaude_code/llm/responses/client.py +1 -1
  17. klaude_code/protocol/commands.py +1 -0
  18. klaude_code/protocol/events/tools.py +5 -1
  19. klaude_code/protocol/message.py +2 -2
  20. klaude_code/protocol/tools.py +0 -1
  21. klaude_code/session/session.py +0 -2
  22. klaude_code/skill/loader.py +31 -87
  23. klaude_code/skill/manager.py +38 -0
  24. klaude_code/tui/command/__init__.py +6 -3
  25. klaude_code/tui/command/clear_cmd.py +1 -1
  26. klaude_code/tui/command/command_abc.py +1 -2
  27. klaude_code/tui/command/copy_cmd.py +52 -0
  28. klaude_code/tui/command/fork_session_cmd.py +4 -4
  29. klaude_code/tui/command/refresh_cmd.py +1 -2
  30. klaude_code/tui/command/resume_cmd.py +3 -4
  31. klaude_code/tui/command/status_cmd.py +1 -1
  32. klaude_code/tui/components/developer.py +11 -11
  33. klaude_code/tui/components/metadata.py +1 -1
  34. klaude_code/tui/components/rich/theme.py +2 -2
  35. klaude_code/tui/components/tools.py +4 -8
  36. klaude_code/tui/components/user_input.py +9 -21
  37. klaude_code/tui/machine.py +3 -1
  38. klaude_code/tui/renderer.py +1 -1
  39. klaude_code/tui/runner.py +2 -2
  40. klaude_code/tui/terminal/selector.py +3 -15
  41. klaude_code/ui/__init__.py +0 -24
  42. klaude_code/ui/common.py +3 -2
  43. klaude_code/ui/core/display.py +2 -2
  44. {klaude_code-2.1.0.dist-info → klaude_code-2.2.0.dist-info}/METADATA +16 -81
  45. {klaude_code-2.1.0.dist-info → klaude_code-2.2.0.dist-info}/RECORD +47 -50
  46. klaude_code/core/tool/skill/__init__.py +0 -0
  47. klaude_code/core/tool/skill/skill_tool.md +0 -24
  48. klaude_code/core/tool/skill/skill_tool.py +0 -89
  49. klaude_code/tui/command/prompt-commit.md +0 -82
  50. klaude_code/ui/exec_mode.py +0 -60
  51. {klaude_code-2.1.0.dist-info → klaude_code-2.2.0.dist-info}/WHEEL +0 -0
  52. {klaude_code-2.1.0.dist-info → klaude_code-2.2.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,52 @@
1
+ from klaude_code.protocol import commands, events, message, model
2
+ from klaude_code.tui.input.clipboard import copy_to_clipboard
3
+
4
+ from .command_abc import Agent, CommandABC, CommandResult
5
+
6
+
7
+ class CopyCommand(CommandABC):
8
+ """Copy the last assistant message to system clipboard."""
9
+
10
+ @property
11
+ def name(self) -> commands.CommandName:
12
+ return commands.CommandName.COPY
13
+
14
+ @property
15
+ def summary(self) -> str:
16
+ return "Copy last assistant message to clipboard"
17
+
18
+ async def run(self, agent: Agent, user_input: message.UserInputPayload) -> CommandResult:
19
+ del user_input # unused
20
+
21
+ last = _get_last_assistant_text(agent.session.conversation_history)
22
+ if not last:
23
+ return _developer_message(agent, "(no assistant message to copy)", self.name)
24
+
25
+ copy_to_clipboard(last)
26
+ return _developer_message(agent, "Copied last assistant message to clipboard.", self.name)
27
+
28
+
29
+ def _get_last_assistant_text(history: list[message.HistoryEvent]) -> str:
30
+ for item in reversed(history):
31
+ if not isinstance(item, message.AssistantMessage):
32
+ continue
33
+ content = message.join_text_parts(item.parts)
34
+ images = [part for part in item.parts if isinstance(part, message.ImageFilePart)]
35
+ formatted = message.format_saved_images(images, content)
36
+ return formatted.strip()
37
+ return ""
38
+
39
+
40
+ def _developer_message(agent: Agent, content: str, command_name: commands.CommandName) -> CommandResult:
41
+ return CommandResult(
42
+ events=[
43
+ events.DeveloperMessageEvent(
44
+ session_id=agent.session.id,
45
+ item=message.DeveloperMessage(
46
+ parts=message.text_parts_from_str(content),
47
+ ui_extra=model.build_command_output_extra(command_name),
48
+ ),
49
+ )
50
+ ],
51
+ persist=False,
52
+ )
@@ -211,7 +211,7 @@ class ForkSessionCommand(CommandABC):
211
211
  ui_extra=model.build_command_output_extra(self.name),
212
212
  ),
213
213
  )
214
- return CommandResult(events=[event], persist_user_input=False, persist_events=False)
214
+ return CommandResult(events=[event], persist=False)
215
215
 
216
216
  # Build fork points from conversation history
217
217
  fork_points = _build_fork_points(agent.session.conversation_history)
@@ -234,7 +234,7 @@ class ForkSessionCommand(CommandABC):
234
234
  ),
235
235
  ),
236
236
  )
237
- return CommandResult(events=[event], persist_user_input=False, persist_events=False)
237
+ return CommandResult(events=[event], persist=False)
238
238
 
239
239
  # Interactive selection
240
240
  selected = await asyncio.to_thread(_select_fork_point_sync, fork_points)
@@ -247,7 +247,7 @@ class ForkSessionCommand(CommandABC):
247
247
  ui_extra=model.build_command_output_extra(self.name),
248
248
  ),
249
249
  )
250
- return CommandResult(events=[event], persist_user_input=False, persist_events=False)
250
+ return CommandResult(events=[event], persist=False)
251
251
 
252
252
  # Perform the fork
253
253
  new_session = agent.session.fork(until_index=selected)
@@ -271,4 +271,4 @@ class ForkSessionCommand(CommandABC):
271
271
  ),
272
272
  ),
273
273
  )
274
- return CommandResult(events=[event], persist_user_input=False, persist_events=False)
274
+ return CommandResult(events=[event], persist=False)
@@ -38,6 +38,5 @@ class RefreshTerminalCommand(CommandABC):
38
38
  is_load=False,
39
39
  ),
40
40
  ],
41
- persist_user_input=False,
42
- persist_events=False,
41
+ persist=False,
43
42
  )
@@ -96,7 +96,7 @@ class ResumeCommand(CommandABC):
96
96
  ui_extra=model.build_command_output_extra(self.name, is_error=True),
97
97
  ),
98
98
  )
99
- return CommandResult(events=[event], persist_user_input=False, persist_events=False)
99
+ return CommandResult(events=[event], persist=False)
100
100
 
101
101
  selected_session_id = await asyncio.to_thread(select_session_sync)
102
102
  if selected_session_id is None:
@@ -107,10 +107,9 @@ class ResumeCommand(CommandABC):
107
107
  ui_extra=model.build_command_output_extra(self.name),
108
108
  ),
109
109
  )
110
- return CommandResult(events=[event], persist_user_input=False, persist_events=False)
110
+ return CommandResult(events=[event], persist=False)
111
111
 
112
112
  return CommandResult(
113
113
  operations=[op.ResumeSessionOperation(target_session_id=selected_session_id)],
114
- persist_user_input=False,
115
- persist_events=False,
114
+ persist=False,
116
115
  )
@@ -153,4 +153,4 @@ class StatusCommand(CommandABC):
153
153
  ),
154
154
  )
155
155
 
156
- return CommandResult(events=[event], persist_user_input=False, persist_events=False)
156
+ return CommandResult(events=[event], persist=False)
@@ -143,7 +143,7 @@ def render_command_output(e: events.DeveloperMessageEvent) -> RenderableType:
143
143
  content = message.join_text_parts(e.item.parts)
144
144
  match command_output.command_name:
145
145
  case commands.CommandName.HELP:
146
- return Padding.indent(Text.from_markup(content or ""), level=2)
146
+ return Padding.indent(Text.from_markup(content or "", style=ThemeKey.TOOL_RESULT), level=2)
147
147
  case commands.CommandName.STATUS:
148
148
  return _render_status_output(command_output)
149
149
  case commands.CommandName.RELEASE_NOTES:
@@ -178,14 +178,14 @@ def _format_cost(cost: float | None, currency: str = "USD") -> str:
178
178
  def _render_fork_session_output(command_output: model.CommandOutput) -> RenderableType:
179
179
  """Render fork session output with usage instructions."""
180
180
  if not isinstance(command_output.ui_extra, model.SessionIdUIExtra):
181
- return Padding.indent(Text("(no session id)", style=ThemeKey.METADATA), level=2)
181
+ return Padding.indent(Text("(no session id)", style=ThemeKey.TOOL_RESULT), level=2)
182
182
 
183
183
  grid = Table.grid(padding=(0, 1))
184
184
  session_id = command_output.ui_extra.session_id
185
- grid.add_column(style=ThemeKey.METADATA, overflow="fold")
185
+ grid.add_column(style=ThemeKey.TOOL_RESULT, overflow="fold")
186
186
 
187
- grid.add_row(Text("Session forked. Resume command copied to clipboard:", style=ThemeKey.METADATA))
188
- grid.add_row(Text(f" klaude --resume-by-id {session_id}", style=ThemeKey.METADATA_BOLD))
187
+ grid.add_row(Text("Session forked. Resume command copied to clipboard:", style=ThemeKey.TOOL_RESULT))
188
+ grid.add_row(Text(f" klaude --resume-by-id {session_id}", style=ThemeKey.TOOL_RESULT_BOLD))
189
189
 
190
190
  return Padding.indent(grid, level=2)
191
191
 
@@ -193,24 +193,24 @@ def _render_fork_session_output(command_output: model.CommandOutput) -> Renderab
193
193
  def _render_status_output(command_output: model.CommandOutput) -> RenderableType:
194
194
  """Render session status with total cost and per-model breakdown."""
195
195
  if not isinstance(command_output.ui_extra, model.SessionStatusUIExtra):
196
- return Text("(no status data)", style=ThemeKey.METADATA)
196
+ return Text("(no status data)", style=ThemeKey.TOOL_RESULT)
197
197
 
198
198
  status = command_output.ui_extra
199
199
  usage = status.usage
200
200
 
201
201
  table = Table.grid(padding=(0, 2))
202
- table.add_column(style=ThemeKey.METADATA, overflow="fold")
203
- table.add_column(style=ThemeKey.METADATA, overflow="fold")
202
+ table.add_column(style=ThemeKey.TOOL_RESULT, overflow="fold")
203
+ table.add_column(style=ThemeKey.TOOL_RESULT, overflow="fold")
204
204
 
205
205
  # Total cost line
206
206
  table.add_row(
207
- Text("Total cost:", style=ThemeKey.METADATA_BOLD),
208
- Text(_format_cost(usage.total_cost, usage.currency), style=ThemeKey.METADATA_BOLD),
207
+ Text("Total cost:", style=ThemeKey.TOOL_RESULT_BOLD),
208
+ Text(_format_cost(usage.total_cost, usage.currency), style=ThemeKey.TOOL_RESULT_BOLD),
209
209
  )
210
210
 
211
211
  # Per-model breakdown
212
212
  if status.by_model:
213
- table.add_row(Text("Usage by model:", style=ThemeKey.METADATA_BOLD), "")
213
+ table.add_row(Text("Usage by model:", style=ThemeKey.TOOL_RESULT_BOLD), "")
214
214
  for meta in status.by_model:
215
215
  model_label = meta.model_name
216
216
  if meta.provider:
@@ -188,7 +188,7 @@ def render_task_metadata(e: events.TaskMetadataEvent) -> RenderableType:
188
188
  ("Σ ", ThemeKey.METADATA_DIM),
189
189
  ("total ", ThemeKey.METADATA_DIM),
190
190
  (currency_symbol, ThemeKey.METADATA_DIM),
191
- (f"{total_cost:.4f}", ThemeKey.METADATA_BOLD),
191
+ (f"{total_cost:.4f}", ThemeKey.METADATA),
192
192
  )
193
193
  grid = create_grid()
194
194
  grid.add_row(Text(" ", style=ThemeKey.METADATA_DIM), total_line)
@@ -250,8 +250,8 @@ def get_theme(theme: str | None = None) -> Themes:
250
250
  ThemeKey.USER_INPUT.value: palette.magenta,
251
251
  ThemeKey.USER_INPUT_PROMPT.value: "bold " + palette.magenta,
252
252
  ThemeKey.USER_INPUT_AT_PATTERN.value: palette.purple,
253
- ThemeKey.USER_INPUT_SLASH_COMMAND.value: "bold reverse " + palette.blue,
254
- ThemeKey.USER_INPUT_SKILL.value: "bold reverse " + palette.green,
253
+ ThemeKey.USER_INPUT_SLASH_COMMAND.value: "bold " + palette.blue,
254
+ ThemeKey.USER_INPUT_SKILL.value: "bold " + palette.green,
255
255
  # ASSISTANT
256
256
  ThemeKey.ASSISTANT_MESSAGE_MARK.value: "bold",
257
257
  # METADATA
@@ -37,7 +37,6 @@ MARK_MERMAID = "⧉"
37
37
  MARK_WEB_FETCH = "→"
38
38
  MARK_WEB_SEARCH = "✱"
39
39
  MARK_DONE = "✔"
40
- MARK_SKILL = "✪"
41
40
 
42
41
  # Todo status markers
43
42
  MARK_TODO_PENDING = "▢"
@@ -491,7 +490,7 @@ def render_mermaid_tool_result(
491
490
 
492
491
  link_info = _extract_mermaid_link(tr.ui_extra)
493
492
  if link_info is None:
494
- return render_generic_tool_result(tr.result, is_error=tr.status == "error")
493
+ return render_generic_tool_result(tr.result, is_error=tr.is_error)
495
494
 
496
495
  use_osc8 = supports_osc8_hyperlinks()
497
496
  viewer = _render_mermaid_viewer_link(tr, link_info, use_osc8=use_osc8)
@@ -537,7 +536,6 @@ _TOOL_ACTIVE_FORM: dict[str, str] = {
537
536
  tools.WRITE: "Writing",
538
537
  tools.TODO_WRITE: "Planning",
539
538
  tools.UPDATE_PLAN: "Planning",
540
- tools.SKILL: "Skilling",
541
539
  tools.MERMAID: "Diagramming",
542
540
  tools.WEB_FETCH: "Fetching Web",
543
541
  tools.WEB_SEARCH: "Searching Web",
@@ -589,8 +587,6 @@ def render_tool_call(e: events.ToolCallEvent) -> RenderableType | None:
589
587
  return render_update_plan_tool_call(e.arguments)
590
588
  case tools.MERMAID:
591
589
  return render_mermaid_tool_call(e.arguments)
592
- case tools.SKILL:
593
- return render_generic_tool_call(e.tool_name, e.arguments, MARK_SKILL)
594
590
  case tools.REPORT_BACK:
595
591
  return render_report_back_tool_call()
596
592
  case tools.WEB_FETCH:
@@ -649,7 +645,7 @@ def render_tool_result(
649
645
  return TreeQuote.for_tool_result(content, is_last=e.is_last_in_turn)
650
646
 
651
647
  # Handle error case
652
- if e.status == "error" and e.ui_extra is None:
648
+ if e.is_error and e.ui_extra is None:
653
649
  return wrap(render_generic_tool_result(e.result, is_error=True))
654
650
 
655
651
  # Render multiple ui blocks if present
@@ -666,7 +662,7 @@ def render_tool_result(
666
662
  # Show truncation info if output was truncated and saved to file
667
663
  truncation_info = get_truncation_info(e)
668
664
  if truncation_info:
669
- result = render_generic_tool_result(e.result, is_error=e.status == "error")
665
+ result = render_generic_tool_result(e.result, is_error=e.is_error)
670
666
  return wrap(Group(render_truncation_info(truncation_info), result))
671
667
 
672
668
  diff_ui = _extract_diff(e.ui_extra)
@@ -675,7 +671,7 @@ def render_tool_result(
675
671
  def _render_fallback() -> TreeQuote:
676
672
  if len(e.result.strip()) == 0:
677
673
  return wrap(render_generic_tool_result("(no content)"))
678
- return wrap(render_generic_tool_result(e.result, is_error=e.status == "error"))
674
+ return wrap(render_generic_tool_result(e.result, is_error=e.is_error))
679
675
 
680
676
  match e.tool_name:
681
677
  case tools.READ:
@@ -4,7 +4,6 @@ from rich.console import Group, RenderableType
4
4
  from rich.text import Text
5
5
 
6
6
  from klaude_code.skill import get_available_skills
7
- from klaude_code.tui.command import is_slash_command_name
8
7
  from klaude_code.tui.components.common import create_grid
9
8
  from klaude_code.tui.components.rich.theme import ThemeKey
10
9
 
@@ -54,35 +53,28 @@ def _is_valid_skill_name(name: str) -> bool:
54
53
  def render_user_input(content: str) -> RenderableType:
55
54
  """Render a user message as a group of quoted lines with styles.
56
55
 
57
- - Highlights slash command on the first line if recognized
56
+ - Highlights slash command token on the first line
58
57
  - Highlights $skill pattern on the first line if recognized
59
58
  - Highlights @file patterns in all lines
60
59
  """
61
60
  lines = content.strip().split("\n")
62
61
  renderables: list[RenderableType] = []
63
- has_command = False
64
- command_style: str | None = None
65
62
  for i, line in enumerate(lines):
66
63
  line_text = render_at_pattern(line)
67
64
 
68
65
  if i == 0 and line.startswith("/"):
69
66
  splits = line.split(" ", maxsplit=1)
70
- if is_slash_command_name(splits[0][1:]):
71
- has_command = True
72
- command_style = ThemeKey.USER_INPUT_SLASH_COMMAND
73
- line_text = Text.assemble(
74
- (f"{splits[0]}", ThemeKey.USER_INPUT_SLASH_COMMAND),
75
- " ",
76
- render_at_pattern(splits[1]) if len(splits) > 1 else Text(""),
77
- )
78
- renderables.append(line_text)
79
- continue
67
+ line_text = Text.assemble(
68
+ (splits[0], ThemeKey.USER_INPUT_SLASH_COMMAND),
69
+ " ",
70
+ render_at_pattern(splits[1]) if len(splits) > 1 else Text(""),
71
+ )
72
+ renderables.append(line_text)
73
+ continue
80
74
 
81
75
  if i == 0 and (line.startswith("$") or line.startswith("¥")):
82
76
  m = SKILL_RENDER_PATTERN.match(line)
83
77
  if m and _is_valid_skill_name(m.group(1)):
84
- has_command = True
85
- command_style = ThemeKey.USER_INPUT_SKILL
86
78
  skill_token = m.group(0) # e.g. "$skill-name"
87
79
  rest = line[len(skill_token) :]
88
80
  line_text = Text.assemble(
@@ -95,11 +87,7 @@ def render_user_input(content: str) -> RenderableType:
95
87
  renderables.append(line_text)
96
88
  grid = create_grid()
97
89
  grid.padding = (0, 0)
98
- mark = (
99
- Text(USER_MESSAGE_MARK, style=ThemeKey.USER_INPUT_PROMPT)
100
- if not has_command
101
- else Text(" ", style=command_style or ThemeKey.USER_INPUT_SLASH_COMMAND)
102
- )
90
+ mark = Text(USER_MESSAGE_MARK, style=ThemeKey.USER_INPUT_PROMPT)
103
91
  grid.add_row(mark, Group(*renderables))
104
92
  return grid
105
93
 
@@ -521,13 +521,15 @@ class DisplayStateMachine:
521
521
  self._spinner.finish_sub_agent_tool_call(e.tool_call_id, get_tool_active_form(e.tool_name))
522
522
  cmds.extend(self._spinner_update_commands())
523
523
 
524
- if s.is_sub_agent and e.status == "success":
524
+ if s.is_sub_agent and not e.is_error:
525
525
  return cmds
526
526
 
527
527
  cmds.append(RenderToolResult(event=e, is_sub_agent_session=s.is_sub_agent))
528
528
  return cmds
529
529
 
530
530
  case events.TaskMetadataEvent() as e:
531
+ cmds.append(EndThinkingStream(e.session_id))
532
+ cmds.append(EndAssistantStream(e.session_id))
531
533
  cmds.append(RenderTaskMetadata(e))
532
534
  return cmds
533
535
 
@@ -396,7 +396,7 @@ class TUICommandRenderer:
396
396
  if c_tools.is_sub_agent_tool(e.tool_name):
397
397
  return
398
398
 
399
- if is_sub_agent and e.status == "error":
399
+ if is_sub_agent and e.is_error:
400
400
  error_msg = truncate_head(e.result)
401
401
  self.print(c_errors.render_tool_error(error_msg))
402
402
  return
klaude_code/tui/runner.py CHANGED
@@ -77,12 +77,12 @@ async def submit_user_input_payload(
77
77
  raise ValueError("Multiple RunAgentOperation results are not supported")
78
78
 
79
79
  for run_op in run_ops:
80
- run_op.persist_user_input = cmd_result.persist_user_input
80
+ run_op.persist_user_input = cmd_result.persist
81
81
  run_op.emit_user_message_event = False
82
82
 
83
83
  if cmd_result.events:
84
84
  for evt in cmd_result.events:
85
- if cmd_result.persist_events and isinstance(evt, events.DeveloperMessageEvent):
85
+ if cmd_result.persist and isinstance(evt, events.DeveloperMessageEvent):
86
86
  agent.session.append_history([evt.item])
87
87
  await executor.context.emit_event(evt)
88
88
 
@@ -19,6 +19,8 @@ from prompt_toolkit.layout.containers import Container, ScrollOffsets
19
19
  from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl
20
20
  from prompt_toolkit.styles import Style, merge_styles
21
21
 
22
+ from klaude_code.ui.common import format_model_params
23
+
22
24
 
23
25
  @dataclass(frozen=True, slots=True)
24
26
  class SelectItem[T]:
@@ -49,26 +51,12 @@ def build_model_select_items(models: list[Any]) -> list[SelectItem[str]]:
49
51
  max_model_name_length = max(len(m.model_name) for m in models)
50
52
  num_width = len(str(len(models)))
51
53
 
52
- def _thinking_info(m: Any) -> str:
53
- thinking = m.model_params.thinking
54
- if not thinking:
55
- return ""
56
- if thinking.reasoning_effort:
57
- return f"reasoning {thinking.reasoning_effort}"
58
- if thinking.budget_tokens:
59
- return f"thinking budget {thinking.budget_tokens}"
60
- return "thinking (configured)"
61
-
62
54
  items: list[SelectItem[str]] = []
63
55
  for idx, m in enumerate(models, 1):
64
56
  model_id = m.model_params.model or "N/A"
65
57
  first_line_prefix = f"{m.model_name:<{max_model_name_length}} → "
66
- thinking_info = _thinking_info(m)
67
58
  meta_parts: list[str] = [m.provider]
68
- if thinking_info:
69
- meta_parts.append(thinking_info)
70
- if m.model_params.verbosity:
71
- meta_parts.append(f"verbosity {m.model_params.verbosity}")
59
+ meta_parts.extend(format_model_params(m.model_params))
72
60
  meta_str = " · ".join(meta_parts)
73
61
  title = [
74
62
  ("class:meta", f"{idx:>{num_width}}. "),
@@ -10,33 +10,9 @@ Terminal (Rich/prompt-toolkit) UI lives in `klaude_code.tui`.
10
10
  from .core.display import DisplayABC
11
11
  from .core.input import InputProviderABC
12
12
  from .debug_mode import DebugEventDisplay
13
- from .exec_mode import ExecDisplay, StreamJsonDisplay
14
-
15
-
16
- def create_exec_display(debug: bool = False, stream_json: bool = False) -> DisplayABC:
17
- """
18
- Create a display for exec (non-interactive) mode.
19
-
20
- Args:
21
- debug: If True, wrap the display with DebugEventDisplay to log all events.
22
- stream_json: If True, stream all events as JSON lines instead of normal output.
23
-
24
- Returns:
25
- A DisplayABC implementation that only outputs task results.
26
- """
27
- if stream_json:
28
- return StreamJsonDisplay()
29
- exec_display = ExecDisplay()
30
- if debug:
31
- return DebugEventDisplay(exec_display)
32
- return exec_display
33
-
34
13
 
35
14
  __all__ = [
36
15
  "DebugEventDisplay",
37
16
  "DisplayABC",
38
- "ExecDisplay",
39
17
  "InputProviderABC",
40
- "StreamJsonDisplay",
41
- "create_exec_display",
42
18
  ]
klaude_code/ui/common.py CHANGED
@@ -101,6 +101,7 @@ def format_model_params(model_params: "LLMConfigModelParameter") -> list[str]:
101
101
  - "reasoning medium"
102
102
  - "thinking budget 10000"
103
103
  - "verbosity 2"
104
+ - "image generation"
104
105
  - "provider-routing: {…}"
105
106
  """
106
107
  parts: list[str] = []
@@ -119,8 +120,8 @@ def format_model_params(model_params: "LLMConfigModelParameter") -> list[str]:
119
120
  if model_params.provider_routing:
120
121
  parts.append(f"provider routing {_format_provider_routing(model_params.provider_routing)}")
121
122
 
122
- if model_params.modalities:
123
- parts.append(f"modalities {','.join(model_params.modalities)}")
123
+ if model_params.modalities and any(m.casefold() == "image" for m in model_params.modalities):
124
+ parts.append("image generation")
124
125
 
125
126
  if model_params.image_config:
126
127
  if model_params.image_config.aspect_ratio:
@@ -10,8 +10,8 @@ class DisplayABC(ABC):
10
10
  Abstract base class for UI display implementations.
11
11
 
12
12
  A Display is responsible for rendering events from the executor to the user.
13
- Implementations can range from simple text output (ExecDisplay) to rich
14
- interactive terminals (TUIDisplay) or debugging wrappers (DebugEventDisplay).
13
+ Implementations can range from minimal text output to rich interactive
14
+ terminals (TUIDisplay) or debugging wrappers (DebugEventDisplay).
15
15
 
16
16
  Lifecycle:
17
17
  1. start() is called once before any events are consumed.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: klaude-code
3
- Version: 2.1.0
3
+ Version: 2.2.0
4
4
  Summary: Minimal code agent CLI
5
5
  Requires-Dist: anthropic>=0.66.0
6
6
  Requires-Dist: chardet>=5.2.0
@@ -25,21 +25,19 @@ Description-Content-Type: text/markdown
25
25
  Minimal code agent CLI.
26
26
 
27
27
  ## Features
28
- - **Multi-provider**: Anthropic, OpenAI Responses API, OpenRouter
28
+ - **Multi-provider**: Anthropic Message API, OpenAI Responses API, OpenRouter, Claude Max OAuth and ChatGPT Codex OAuth etc.
29
29
  - **Keep reasoning item in context**: Interleaved thinking support
30
- - **Model-aware tools**: Claude Code tools for Sonnet, `apply_patch` for GPT-5/Codex
31
- - **Structured sub-agent output**: Define JSON schema, get schema-compliant responses via constrained decoding
30
+ - **Model-aware tools**: Claude Code tool set for Opus, `apply_patch` for GPT-5/Codex
31
+ - **Reminders**: Cooldown-based todo tracking, instruction reinforcement and external file change reminder
32
+ - **Sub-agents**: Task, Explore, Web, ImageGen
33
+ - **Structured sub-agent output**: Main agent defines JSON schema and get schema-compliant responses via constrained decoding
32
34
  - **Recursive `@file` mentions**: Circular dependency protection, relative path resolution
33
- - **Reminders**: Cooldown-based todo tracking and instruction reinforcement
34
35
  - **External file sync**: Monitoring for external edits (linter, manual)
35
36
  - **Interrupt handling**: Ctrl+C preserves partial responses and synthesizes tool cancellation results
36
37
  - **Output truncation**: Large outputs saved to file system with snapshot links
37
- - **Skills**: Built-in + user + project Agent Skills (with implicit invocation by Skill tool or explicit invocation by typing `$`)
38
+ - **Agent Skills**: Built-in + user + project Agent Skills (with implicit invocation by Skill tool or explicit invocation by typing `$`)
38
39
  - **Sessions**: Resumable with `--continue`
39
- - **Cost tracking**: Automatic API cost calculation and display (USD/CNY)
40
- - **Version update check**: Background PyPI version check with upgrade prompts
41
- - **Terminal title**: Shows current directory and model name
42
- - **Mermaid diagrams**: Interactive local HTML viewer with zoom, pan, and SVG export
40
+ - **Mermaid diagrams**: Terminal image preview and Interactive local HTML viewer with zoom, pan, and SVG export
43
41
  - **Extras**: Slash commands, sub-agents, image paste, terminal notifications, auto-theming
44
42
 
45
43
  ## Installation
@@ -59,32 +57,23 @@ Or use the built-in alias command:
59
57
  ```bash
60
58
  klaude update
61
59
  klaude upgrade
62
- ```
63
-
64
- To show version:
65
-
66
- ```bash
67
60
  klaude --version
68
- klaude -v
69
- klaude version
61
+
70
62
  ```
71
63
 
72
64
  ## Usage
73
65
 
74
- ### Interactive Mode
75
-
76
66
  ```bash
77
67
  klaude [--model <name>] [--select-model]
78
68
  ```
79
69
 
80
70
  **Options:**
81
- - `--version`/`-V`/`-v`: Show version and exit.
82
71
  - `--model`/`-m`: Preferred model name (exact match picks immediately; otherwise opens the interactive selector filtered by this value).
83
72
  - `--select-model`/`-s`: Open the interactive model selector at startup (shows all models unless `--model` is also provided).
84
73
  - `--continue`/`-c`: Resume the most recent session.
85
74
  - `--resume`/`-r`: Select a session to resume for this project.
86
75
  - `--resume-by-id <id>`: Resume a session by its ID directly.
87
- - `--vanilla`: Minimal mode with only basic tools (Bash, Read, Edit) and no system prompts.
76
+ - `--vanilla`: Minimal mode with only basic tools (Bash, Read, Edit, Write) and no system prompts.
88
77
 
89
78
  **Model selection behavior:**
90
79
  - Default: uses `main_model` from config.
@@ -255,48 +244,16 @@ provider_list:
255
244
  context_limit: 128000
256
245
  ```
257
246
 
258
- ##### Full Example
259
-
260
- ```yaml
261
- # User configuration - merged with built-in config
262
- main_model: opus
263
-
264
- sub_agent_models:
265
- explore: sonnet
266
- task: opus
267
- webagent: sonnet
268
-
269
- provider_list:
270
- # Add models to built-in openrouter
271
- - provider_name: openrouter
272
- model_list:
273
- - model_name: qwen-coder
274
- model_params:
275
- model: qwen/qwen-2.5-coder-32b-instruct
276
- context_limit: 131072
277
-
278
- # Add a completely new provider
279
- - provider_name: local-ollama
280
- protocol: openai
281
- base_url: http://localhost:11434/v1
282
- api_key: ollama
283
- model_list:
284
- - model_name: local-llama
285
- model_params:
286
- model: llama3.2
287
- context_limit: 8192
288
- ```
289
-
290
247
  ##### Supported Protocols
291
248
 
292
- - `anthropic` - Anthropic Claude API
249
+ - `anthropic` - Anthropic Messages API
293
250
  - `claude_oauth` - Claude OAuth (for Claude Pro/Max subscribers)
294
- - `openai` - OpenAI-compatible API
251
+ - `openai` - OpenAI Chat Completion API
295
252
  - `responses` - OpenAI Responses API (for o-series, GPT-5, Codex)
296
- - `openrouter` - OpenRouter API
297
- - `google` - Google Gemini API
298
- - `bedrock` - AWS Bedrock (uses AWS credentials instead of api_key)
299
253
  - `codex_oauth` - OpenAI Codex CLI (OAuth-based, for ChatGPT Pro subscribers)
254
+ - `openrouter` - OpenRouter API (handling `reasoning_details` for interleaved thinking)
255
+ - `google` - Google Gemini API
256
+ - `bedrock` - AWS Bedrock for Claude(uses AWS credentials instead of api_key)
300
257
 
301
258
  List configured providers and models:
302
259
 
@@ -338,6 +295,7 @@ Inside the interactive session (`klaude`), use these commands to streamline your
338
295
  - `/model` - Switch the active LLM during the session.
339
296
  - `/thinking` - Configure model thinking/reasoning level.
340
297
  - `/clear` - Clear the current conversation context.
298
+ - `/copy` - Copy last assistant message.
341
299
  - `/status` - Show session usage statistics (cost, tokens, model breakdown).
342
300
  - `/resume` - Select and resume a previous session.
343
301
  - `/fork-session` - Fork current session to a new session ID (supports interactive fork point selection).
@@ -364,29 +322,6 @@ Inside the interactive session (`klaude`), use these commands to streamline your
364
322
  | `Backspace` | Delete character or selected text |
365
323
  | `c` (with selection) | Copy selected text to clipboard |
366
324
 
367
- ### Non-Interactive Headless Mode (exec)
368
-
369
- Execute a single command without starting the interactive REPL:
370
-
371
- ```bash
372
- # Direct input
373
- klaude exec "what is 2+2?"
374
-
375
- # Pipe input
376
- echo "hello world" | klaude exec
377
-
378
- # With model selection
379
-
380
- # Exact model name (non-interactive)
381
- echo "generate quicksort in python" | klaude exec --model gpt-5.1
382
-
383
- # Partial/ambiguous name opens the interactive selector (filtered)
384
- echo "generate quicksort in python" | klaude exec --model gpt
385
-
386
- # Stream all events as JSON lines (for programmatic processing)
387
- klaude exec "what is 2+2?" --stream-json
388
- ```
389
-
390
325
  ### Sub-Agents
391
326
 
392
327
  The main agent can spawn specialized sub-agents for specific tasks: