klaude-code 1.2.10__py3-none-any.whl → 1.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. klaude_code/cli/main.py +2 -7
  2. klaude_code/cli/runtime.py +23 -19
  3. klaude_code/command/__init__.py +29 -26
  4. klaude_code/command/clear_cmd.py +0 -2
  5. klaude_code/command/diff_cmd.py +0 -2
  6. klaude_code/command/export_cmd.py +0 -2
  7. klaude_code/command/help_cmd.py +0 -2
  8. klaude_code/command/model_cmd.py +0 -2
  9. klaude_code/command/refresh_cmd.py +0 -2
  10. klaude_code/command/registry.py +4 -8
  11. klaude_code/command/release_notes_cmd.py +0 -2
  12. klaude_code/command/status_cmd.py +2 -4
  13. klaude_code/command/terminal_setup_cmd.py +0 -2
  14. klaude_code/command/thinking_cmd.py +227 -0
  15. klaude_code/config/select_model.py +5 -15
  16. klaude_code/const/__init__.py +1 -1
  17. klaude_code/core/agent.py +1 -1
  18. klaude_code/core/executor.py +1 -4
  19. klaude_code/core/manager/agent_manager.py +15 -9
  20. klaude_code/core/manager/llm_clients_builder.py +4 -7
  21. klaude_code/core/prompt.py +5 -5
  22. klaude_code/core/prompts/prompt-claude-code.md +1 -12
  23. klaude_code/core/prompts/prompt-minimal.md +12 -0
  24. klaude_code/core/task.py +5 -2
  25. klaude_code/core/tool/memory/memory_tool.md +4 -0
  26. klaude_code/core/tool/memory/skill_loader.py +1 -1
  27. klaude_code/core/tool/todo/todo_write_tool.md +0 -157
  28. klaude_code/core/tool/todo/todo_write_tool_raw.md +182 -0
  29. klaude_code/core/tool/tool_registry.py +3 -4
  30. klaude_code/core/turn.py +0 -1
  31. klaude_code/llm/anthropic/client.py +56 -47
  32. klaude_code/llm/client.py +1 -19
  33. klaude_code/llm/codex/client.py +49 -30
  34. klaude_code/llm/openai_compatible/client.py +52 -34
  35. klaude_code/llm/openrouter/client.py +63 -41
  36. klaude_code/llm/responses/client.py +56 -39
  37. klaude_code/llm/usage.py +1 -49
  38. klaude_code/protocol/commands.py +1 -0
  39. klaude_code/protocol/llm_param.py +1 -9
  40. klaude_code/protocol/model.py +4 -3
  41. klaude_code/protocol/op.py +5 -2
  42. klaude_code/protocol/sub_agent.py +1 -0
  43. klaude_code/session/export.py +3 -0
  44. klaude_code/session/selector.py +12 -7
  45. klaude_code/session/session.py +1 -5
  46. klaude_code/session/templates/export_session.html +155 -0
  47. klaude_code/ui/modes/repl/completers.py +3 -3
  48. klaude_code/ui/modes/repl/event_handler.py +1 -5
  49. klaude_code/ui/modes/repl/input_prompt_toolkit.py +3 -34
  50. klaude_code/ui/renderers/metadata.py +11 -1
  51. klaude_code/ui/renderers/tools.py +13 -2
  52. klaude_code/ui/rich/markdown.py +4 -1
  53. klaude_code/ui/terminal/__init__.py +55 -0
  54. {klaude_code-1.2.10.dist-info → klaude_code-1.2.12.dist-info}/METADATA +1 -4
  55. {klaude_code-1.2.10.dist-info → klaude_code-1.2.12.dist-info}/RECORD +57 -54
  56. {klaude_code-1.2.10.dist-info → klaude_code-1.2.12.dist-info}/WHEEL +0 -0
  57. {klaude_code-1.2.10.dist-info → klaude_code-1.2.12.dist-info}/entry_points.txt +0 -0
@@ -6,20 +6,56 @@ import httpx
6
6
  import openai
7
7
  from openai import AsyncAzureOpenAI, AsyncOpenAI
8
8
  from openai.types import responses
9
+ from openai.types.responses.response_create_params import ResponseCreateParamsStreaming
9
10
 
10
- from klaude_code.llm.client import LLMClientABC, call_with_logged_payload
11
+ from klaude_code.llm.client import LLMClientABC
11
12
  from klaude_code.llm.input_common import apply_config_defaults
12
13
  from klaude_code.llm.registry import register
13
14
  from klaude_code.llm.responses.input import convert_history_to_input, convert_tool_schema
14
- from klaude_code.llm.usage import MetadataTracker, convert_responses_usage
15
+ from klaude_code.llm.usage import MetadataTracker
15
16
  from klaude_code.protocol import llm_param, model
16
17
  from klaude_code.trace import DebugType, log_debug
17
18
 
19
+
18
20
  if TYPE_CHECKING:
19
21
  from openai import AsyncStream
20
22
  from openai.types.responses import ResponseStreamEvent
21
23
 
22
24
 
25
+ def build_payload(param: llm_param.LLMCallParameter) -> ResponseCreateParamsStreaming:
26
+ """Build OpenAI Responses API request parameters."""
27
+ inputs = convert_history_to_input(param.input, param.model)
28
+ tools = convert_tool_schema(param.tools)
29
+
30
+ payload: ResponseCreateParamsStreaming = {
31
+ "model": str(param.model),
32
+ "tool_choice": "auto",
33
+ "parallel_tool_calls": True,
34
+ "include": [
35
+ "reasoning.encrypted_content",
36
+ ],
37
+ "store": False,
38
+ "stream": True,
39
+ "temperature": param.temperature,
40
+ "max_output_tokens": param.max_tokens,
41
+ "input": inputs,
42
+ "instructions": param.system,
43
+ "tools": tools,
44
+ "prompt_cache_key": param.session_id or "",
45
+ }
46
+
47
+ if param.thinking and param.thinking.reasoning_effort:
48
+ payload["reasoning"] = {
49
+ "effort": param.thinking.reasoning_effort,
50
+ "summary": param.thinking.reasoning_summary,
51
+ }
52
+
53
+ if param.verbosity:
54
+ payload["text"] = {"verbosity": param.verbosity}
55
+
56
+ return payload
57
+
58
+
23
59
  async def parse_responses_stream(
24
60
  stream: "AsyncStream[ResponseStreamEvent]",
25
61
  param: llm_param.LLMCallParameter,
@@ -95,16 +131,17 @@ async def parse_responses_stream(
95
131
  if event.response.incomplete_details is not None:
96
132
  error_reason = event.response.incomplete_details.reason
97
133
  if event.response.usage is not None:
98
- usage = convert_responses_usage(
99
- input_tokens=event.response.usage.input_tokens,
100
- output_tokens=event.response.usage.output_tokens,
101
- cached_tokens=event.response.usage.input_tokens_details.cached_tokens,
102
- reasoning_tokens=event.response.usage.output_tokens_details.reasoning_tokens,
103
- total_tokens=event.response.usage.total_tokens,
104
- context_limit=param.context_limit,
105
- max_tokens=param.max_tokens,
134
+ metadata_tracker.set_usage(
135
+ model.Usage(
136
+ input_tokens=event.response.usage.input_tokens,
137
+ output_tokens=event.response.usage.output_tokens,
138
+ cached_tokens=event.response.usage.input_tokens_details.cached_tokens,
139
+ reasoning_tokens=event.response.usage.output_tokens_details.reasoning_tokens,
140
+ context_size=event.response.usage.total_tokens,
141
+ context_limit=param.context_limit,
142
+ max_tokens=param.max_tokens,
143
+ )
106
144
  )
107
- metadata_tracker.set_usage(usage)
108
145
  metadata_tracker.set_model_name(str(param.model))
109
146
  metadata_tracker.set_response_id(response_id)
110
147
  yield metadata_tracker.finalize()
@@ -162,36 +199,16 @@ class ResponsesClient(LLMClientABC):
162
199
 
163
200
  metadata_tracker = MetadataTracker(cost_config=self.get_llm_config().cost)
164
201
 
165
- inputs = convert_history_to_input(param.input, param.model)
166
- tools = convert_tool_schema(param.tools)
202
+ payload = build_payload(param)
167
203
 
204
+ log_debug(
205
+ json.dumps(payload, ensure_ascii=False, default=str),
206
+ style="yellow",
207
+ debug_type=DebugType.LLM_PAYLOAD,
208
+ )
168
209
  try:
169
- stream = await call_with_logged_payload(
170
- self.client.responses.create,
171
- model=str(param.model),
172
- tool_choice="auto",
173
- parallel_tool_calls=True,
174
- include=[
175
- "reasoning.encrypted_content",
176
- ],
177
- store=param.store,
178
- previous_response_id=param.previous_response_id,
179
- stream=True,
180
- temperature=param.temperature,
181
- max_output_tokens=param.max_tokens,
182
- input=inputs,
183
- instructions=param.system,
184
- tools=tools,
185
- text={
186
- "verbosity": param.verbosity,
187
- },
188
- prompt_cache_key=param.session_id or "",
189
- reasoning={
190
- "effort": param.thinking.reasoning_effort,
191
- "summary": param.thinking.reasoning_summary,
192
- }
193
- if param.thinking and param.thinking.reasoning_effort
194
- else None,
210
+ stream = await self.client.responses.create(
211
+ **payload,
195
212
  extra_headers={"extra": json.dumps({"session_id": param.session_id}, sort_keys=True)},
196
213
  )
197
214
  except (openai.OpenAIError, httpx.HTTPError) as e:
klaude_code/llm/usage.py CHANGED
@@ -108,55 +108,7 @@ def convert_usage(
108
108
  reasoning_tokens=(usage.completion_tokens_details.reasoning_tokens if usage.completion_tokens_details else 0)
109
109
  or 0,
110
110
  output_tokens=usage.completion_tokens,
111
- context_token=usage.total_tokens,
112
- context_limit=context_limit,
113
- max_tokens=max_tokens,
114
- )
115
-
116
-
117
- def convert_anthropic_usage(
118
- input_tokens: int,
119
- output_tokens: int,
120
- cached_tokens: int,
121
- context_limit: int | None = None,
122
- max_tokens: int | None = None,
123
- ) -> model.Usage:
124
- """Convert Anthropic usage data to internal Usage model.
125
-
126
- context_token is computed from input + cached + output tokens,
127
- representing the actual context window usage for this turn.
128
- """
129
- context_token = input_tokens + cached_tokens + output_tokens
130
- return model.Usage(
131
- input_tokens=input_tokens,
132
- output_tokens=output_tokens,
133
- cached_tokens=cached_tokens,
134
- context_token=context_token,
135
- context_limit=context_limit,
136
- max_tokens=max_tokens,
137
- )
138
-
139
-
140
- def convert_responses_usage(
141
- input_tokens: int,
142
- output_tokens: int,
143
- cached_tokens: int,
144
- reasoning_tokens: int,
145
- total_tokens: int,
146
- context_limit: int | None = None,
147
- max_tokens: int | None = None,
148
- ) -> model.Usage:
149
- """Convert OpenAI Responses API usage data to internal Usage model.
150
-
151
- context_token is set to total_tokens from the API response,
152
- representing the actual context window usage for this turn.
153
- """
154
- return model.Usage(
155
- input_tokens=input_tokens,
156
- output_tokens=output_tokens,
157
- cached_tokens=cached_tokens,
158
- reasoning_tokens=reasoning_tokens,
159
- context_token=total_tokens,
111
+ context_size=usage.total_tokens,
160
112
  context_limit=context_limit,
161
113
  max_tokens=max_tokens,
162
114
  )
@@ -13,6 +13,7 @@ class CommandName(str, Enum):
13
13
  EXPORT = "export"
14
14
  STATUS = "status"
15
15
  RELEASE_NOTES = "release-notes"
16
+ THINKING = "thinking"
16
17
  # PLAN and DOC are dynamically registered now, but kept here if needed for reference
17
18
  # or we can remove them if no code explicitly imports them.
18
19
  # PLAN = "plan"
@@ -28,7 +28,7 @@ class Thinking(BaseModel):
28
28
  """
29
29
 
30
30
  # OpenAI Reasoning Style
31
- reasoning_effort: Literal["high", "medium", "low", "minimal", "none"] | None = None
31
+ reasoning_effort: Literal["high", "medium", "low", "minimal", "none", "xhigh"] | None = None
32
32
  reasoning_summary: Literal["auto", "concise", "detailed"] | None = None
33
33
 
34
34
  # Claude/Gemini Thinking Style
@@ -138,12 +138,4 @@ class LLMCallParameter(LLMConfigModelParameter):
138
138
  input: list[ConversationItem]
139
139
  system: str | None = None
140
140
  tools: list[ToolSchema] | None = None
141
-
142
- stream: Literal[True] = True # Always True
143
-
144
- # OpenAI Responses
145
- include: list[str] | None = None
146
- store: bool = True
147
- previous_response_id: str | None = None
148
-
149
141
  session_id: str | None = None
@@ -20,7 +20,7 @@ class Usage(BaseModel):
20
20
  output_tokens: int = 0
21
21
 
22
22
  # Context window tracking
23
- context_token: int | None = None # Peak total_tokens seen (for context usage display)
23
+ context_size: int | None = None # Peak total_tokens seen (for context usage display)
24
24
  context_limit: int | None = None # Model's context limit
25
25
  max_tokens: int | None = None # Max output tokens for this request
26
26
 
@@ -53,12 +53,12 @@ class Usage(BaseModel):
53
53
  """Context usage percentage computed from context_token / (context_limit - max_tokens)."""
54
54
  if self.context_limit is None or self.context_limit <= 0:
55
55
  return None
56
- if self.context_token is None:
56
+ if self.context_size is None:
57
57
  return None
58
58
  effective_limit = self.context_limit - (self.max_tokens or const.DEFAULT_MAX_TOKENS)
59
59
  if effective_limit <= 0:
60
60
  return None
61
- return (self.context_token / effective_limit) * 100
61
+ return (self.context_size / effective_limit) * 100
62
62
 
63
63
 
64
64
  class TodoItem(BaseModel):
@@ -319,6 +319,7 @@ class TaskMetadata(BaseModel):
319
319
  model_name: str = ""
320
320
  provider: str | None = None
321
321
  task_duration_s: float | None = None
322
+ turn_count: int = 0
322
323
 
323
324
  @staticmethod
324
325
  def aggregate_by_model(metadata_list: list["TaskMetadata"]) -> list["TaskMetadata"]:
@@ -63,11 +63,14 @@ class InterruptOperation(Operation):
63
63
 
64
64
 
65
65
  class InitAgentOperation(Operation):
66
- """Operation for initializing an agent and replaying history if any."""
66
+ """Operation for initializing an agent and replaying history if any.
67
+
68
+ If session_id is None, a new session is created with an auto-generated ID.
69
+ If session_id is provided, attempts to load existing session or creates new one.
70
+ """
67
71
 
68
72
  type: OperationType = OperationType.INIT_AGENT
69
73
  session_id: str | None = None
70
- is_new_session: bool = False
71
74
 
72
75
  async def execute(self, handler: OperationHandler) -> None:
73
76
  await handler.handle_init_agent(self)
@@ -290,6 +290,7 @@ register_sub_agent(
290
290
  tool_set=(tools.BASH, tools.READ),
291
291
  prompt_builder=_explore_prompt_builder,
292
292
  active_form="Exploring",
293
+ target_model_filter=lambda model: ("haiku" not in model) and ("kimi" not in model) and ("grok" not in model),
293
294
  )
294
295
  )
295
296
 
@@ -403,6 +403,9 @@ def _get_mermaid_link_html(
403
403
  buttons_html.append(
404
404
  f'<button type="button" class="copy-mermaid-btn" data-code="{escaped_code}" title="Copy Mermaid Code">Copy Code</button>'
405
405
  )
406
+ buttons_html.append(
407
+ '<button type="button" class="fullscreen-mermaid-btn" title="View Fullscreen">Fullscreen</button>'
408
+ )
406
409
 
407
410
  link = ui_extra.link if isinstance(ui_extra, model.MermaidLinkUIExtra) else None
408
411
 
@@ -10,6 +10,11 @@ from .session import Session
10
10
 
11
11
 
12
12
  def resume_select_session() -> str | None:
13
+ # Column widths
14
+ UPDATED_AT_WIDTH = 16
15
+ MSG_COUNT_WIDTH = 3
16
+ MODEL_WIDTH = 25
17
+ FIRST_MESSAGE_WIDTH = 50
13
18
  sessions = Session.list_sessions()
14
19
  if not sessions:
15
20
  log("No sessions found for this project.")
@@ -31,20 +36,20 @@ def resume_select_session() -> str | None:
31
36
  model_display = s.model_name or "N/A"
32
37
 
33
38
  title = [
34
- ("class:d", f"{_fmt(s.updated_at):<16} "),
35
- ("class:b", f"{msg_count_display:>3} "),
39
+ ("class:d", f"{_fmt(s.updated_at):<{UPDATED_AT_WIDTH}} "),
40
+ ("class:b", f"{msg_count_display:>{MSG_COUNT_WIDTH}} "),
36
41
  (
37
42
  "class:t",
38
- f"{model_display[:29] + '…' if len(model_display) > 29 else model_display:<30} ",
43
+ f"{model_display[:MODEL_WIDTH - 1] + '…' if len(model_display) > MODEL_WIDTH else model_display:<{MODEL_WIDTH}} ",
39
44
  ),
40
45
  (
41
46
  "class:t",
42
- f"{first_user_message.strip().replace('\n', ' ↩ '):<50}",
47
+ f"{first_user_message.strip().replace('\n', ' ↩ '):<{FIRST_MESSAGE_WIDTH}}",
43
48
  ),
44
49
  ]
45
50
  choices.append(questionary.Choice(title=title, value=s.id))
46
51
  return questionary.select(
47
- message=f"{' Updated at':<17} {'Msg':>3} {'Model':<30} {'First message':<50}",
52
+ message=f"{' Updated at':<{UPDATED_AT_WIDTH + 1}} {'Msg':>{MSG_COUNT_WIDTH}} {'Model':<{MODEL_WIDTH}} {'First message':<{FIRST_MESSAGE_WIDTH}}",
48
53
  choices=choices,
49
54
  pointer="→",
50
55
  instruction="↑↓ to move",
@@ -63,8 +68,8 @@ def resume_select_session() -> str | None:
63
68
  msg_count_display = "N/A" if s.messages_count == -1 else str(s.messages_count)
64
69
  model_display = s.model_name or "N/A"
65
70
  print(
66
- f"{i}. {_fmt(s.updated_at)} {msg_count_display:>3} "
67
- f"{model_display[:29] + '…' if len(model_display) > 29 else model_display:<30} {s.id} {s.work_dir}"
71
+ f"{i}. {_fmt(s.updated_at)} {msg_count_display:>{MSG_COUNT_WIDTH}} "
72
+ f"{model_display[:MODEL_WIDTH - 1] + '…' if len(model_display) > MODEL_WIDTH else model_display:<{MODEL_WIDTH}} {s.id} {s.work_dir}"
68
73
  )
69
74
  try:
70
75
  raw = input("Select a session number: ").strip()
@@ -108,12 +108,8 @@ class Session(BaseModel):
108
108
  return Session(id=id or uuid.uuid4().hex, work_dir=Path.cwd())
109
109
 
110
110
  @classmethod
111
- def load(cls, id: str, *, skip_if_missing: bool = False) -> "Session":
111
+ def load(cls, id: str) -> "Session":
112
112
  """Load an existing session or create a new one if not found."""
113
-
114
- if skip_if_missing:
115
- return Session(id=id, work_dir=Path.cwd())
116
-
117
113
  # Load session metadata
118
114
  sessions_dir = cls._sessions_dir()
119
115
  session_candidates = sorted(
@@ -338,6 +338,57 @@
338
338
  border-color: var(--accent);
339
339
  }
340
340
 
341
+ .mermaid-modal {
342
+ position: fixed;
343
+ top: 0;
344
+ left: 0;
345
+ width: 100vw;
346
+ height: 100vh;
347
+ background: rgba(255, 255, 255, 0.98);
348
+ z-index: 1000;
349
+ display: flex;
350
+ flex-direction: column;
351
+ align-items: center;
352
+ justify-content: center;
353
+ opacity: 0;
354
+ pointer-events: none;
355
+ transition: opacity 0.2s;
356
+ }
357
+ .mermaid-modal.active {
358
+ opacity: 1;
359
+ pointer-events: auto;
360
+ }
361
+ .mermaid-modal-content {
362
+ width: 95%;
363
+ height: 90%;
364
+ display: flex;
365
+ align-items: center;
366
+ justify-content: center;
367
+ overflow: auto;
368
+ }
369
+ .mermaid-modal-content svg {
370
+ width: auto !important;
371
+ height: auto !important;
372
+ max-width: 100%;
373
+ max-height: 100%;
374
+ }
375
+ .mermaid-modal-close {
376
+ position: absolute;
377
+ top: 20px;
378
+ right: 20px;
379
+ background: transparent;
380
+ border: none;
381
+ font-size: 32px;
382
+ cursor: pointer;
383
+ color: var(--text-dim);
384
+ z-index: 1001;
385
+ line-height: 1;
386
+ padding: 8px;
387
+ }
388
+ .mermaid-modal-close:hover {
389
+ color: var(--text);
390
+ }
391
+
341
392
  .copy-mermaid-btn {
342
393
  border: 1px solid var(--border);
343
394
  background: transparent;
@@ -356,6 +407,25 @@
356
407
  border-color: var(--accent);
357
408
  }
358
409
 
410
+ .fullscreen-mermaid-btn {
411
+ margin-left: 8px;
412
+ border: 1px solid var(--border);
413
+ background: transparent;
414
+ color: var(--text-dim);
415
+ font-family: var(--font-mono);
416
+ font-size: var(--font-size-xs);
417
+ text-transform: uppercase;
418
+ padding: 2px 10px;
419
+ border-radius: 999px;
420
+ cursor: pointer;
421
+ transition: color 0.2s, border-color 0.2s, background 0.2s;
422
+ font-weight: var(--font-weight-bold);
423
+ }
424
+ .fullscreen-mermaid-btn:hover {
425
+ color: var(--text);
426
+ border-color: var(--accent);
427
+ }
428
+
359
429
  .assistant-rendered {
360
430
  width: 100%;
361
431
  }
@@ -1065,6 +1135,13 @@
1065
1135
  </svg>
1066
1136
  </div>
1067
1137
 
1138
+ <div id="mermaid-modal" class="mermaid-modal">
1139
+ <button class="mermaid-modal-close" id="mermaid-modal-close">
1140
+ &times;
1141
+ </button>
1142
+ <div class="mermaid-modal-content" id="mermaid-modal-content"></div>
1143
+ </div>
1144
+
1068
1145
  <link
1069
1146
  rel="stylesheet"
1070
1147
  href="https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.9.0/build/styles/github.min.css"
@@ -1280,6 +1357,84 @@
1280
1357
  });
1281
1358
  });
1282
1359
 
1360
+ // Mermaid Fullscreen Logic
1361
+ const modal = document.getElementById("mermaid-modal");
1362
+ const modalContent = document.getElementById("mermaid-modal-content");
1363
+ const modalClose = document.getElementById("mermaid-modal-close");
1364
+
1365
+ if (modal && modalContent && modalClose) {
1366
+ const closeModal = () => {
1367
+ modal.classList.remove("active");
1368
+ modalContent.innerHTML = "";
1369
+ };
1370
+
1371
+ modalClose.addEventListener("click", closeModal);
1372
+
1373
+ modal.addEventListener("click", (e) => {
1374
+ if (e.target === modal) {
1375
+ closeModal();
1376
+ }
1377
+ });
1378
+
1379
+ // Handle Escape key
1380
+ document.addEventListener("keydown", (e) => {
1381
+ if (e.key === "Escape" && modal.classList.contains("active")) {
1382
+ closeModal();
1383
+ }
1384
+ });
1385
+
1386
+ document.querySelectorAll(".fullscreen-mermaid-btn").forEach((btn) => {
1387
+ btn.addEventListener("click", (e) => {
1388
+ // The structure is:
1389
+ // wrapper > mermaid > svg
1390
+ // wrapper > toolbar > buttons > btn
1391
+
1392
+ // We need to find the mermaid div that is a sibling of the toolbar
1393
+
1394
+ // Traverse up to the wrapper
1395
+ let wrapper = btn.closest("div[style*='background: white']");
1396
+
1397
+ if (!wrapper) {
1398
+ // Fallback: try to find by traversing up and looking for .mermaid
1399
+ let p = btn.parentElement;
1400
+ while (p) {
1401
+ if (p.querySelector(".mermaid")) {
1402
+ wrapper = p;
1403
+ break;
1404
+ }
1405
+ p = p.parentElement;
1406
+ if (p === document.body) break;
1407
+ }
1408
+ }
1409
+
1410
+ if (wrapper) {
1411
+ const mermaidDiv = wrapper.querySelector(".mermaid");
1412
+ if (mermaidDiv) {
1413
+ const svg = mermaidDiv.querySelector("svg");
1414
+
1415
+ if (svg) {
1416
+ // Clone the SVG to put in modal
1417
+ // We treat the SVG as the source
1418
+ const clone = svg.cloneNode(true);
1419
+ // Remove fixed sizes to let it scale in flex container
1420
+ clone.removeAttribute("height");
1421
+ clone.removeAttribute("width");
1422
+ clone.style.maxWidth = "100%";
1423
+ clone.style.maxHeight = "100%";
1424
+
1425
+ modalContent.appendChild(clone);
1426
+ modal.classList.add("active");
1427
+ } else if (mermaidDiv.textContent.trim()) {
1428
+ // Fallback if not rendered yet (should not happen on export usually)
1429
+ modalContent.textContent = "Diagram not rendered yet.";
1430
+ modal.classList.add("active");
1431
+ }
1432
+ }
1433
+ }
1434
+ });
1435
+ });
1436
+ }
1437
+
1283
1438
  // Scroll to bottom button
1284
1439
  const scrollBtn = document.getElementById("scroll-btn");
1285
1440
 
@@ -81,9 +81,9 @@ class _SlashCommandCompleter(Completer):
81
81
  # Get available commands
82
82
  commands = get_commands()
83
83
 
84
- # Filter commands that match the fragment
84
+ # Filter commands that match the fragment (preserve registration order)
85
85
  matched: list[tuple[str, object, str]] = []
86
- for cmd_name, cmd_obj in sorted(commands.items(), key=lambda x: str(x[1].name)):
86
+ for cmd_name, cmd_obj in commands.items():
87
87
  if cmd_name.startswith(frag):
88
88
  hint = " [args]" if cmd_obj.support_addition_params else ""
89
89
  matched.append((cmd_name, cmd_obj, hint))
@@ -103,7 +103,7 @@ class _SlashCommandCompleter(Completer):
103
103
 
104
104
  # Using HTML for formatting: bold command name, normal hint, gray summary
105
105
  display_text = HTML(
106
- f"<b>{cmd_name}</b>{hint}{padding}<style color='ansibrightblack'>— {cmd_obj.summary}</style>" # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
106
+ f"<b>{cmd_name}</b>{hint}{padding}<style color='ansibrightblack'>{cmd_obj.summary}</style>" # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
107
107
  )
108
108
  completion_text = f"/{cmd_name} "
109
109
  yield Completion(
@@ -453,14 +453,10 @@ class DisplayEventHandler:
453
453
  if len(todo.content) > 0:
454
454
  status_text = todo.content
455
455
  status_text = status_text.replace("\n", "")
456
- return self._truncate_status_text(status_text, max_length=30)
456
+ return self._truncate_status_text(status_text, max_length=100)
457
457
 
458
458
  def _truncate_status_text(self, text: str, max_length: int) -> str:
459
- """Truncate text to max_length while preserving complete words."""
460
459
  if len(text) <= max_length:
461
460
  return text
462
461
  truncated = text[:max_length]
463
- last_space = truncated.rfind(" ")
464
- if last_space > 0:
465
- return truncated[:last_space] + "..."
466
462
  return truncated + "..."
@@ -6,9 +6,7 @@ from pathlib import Path
6
6
  from typing import NamedTuple, override
7
7
 
8
8
  from prompt_toolkit import PromptSession
9
- from prompt_toolkit.buffer import Buffer
10
9
  from prompt_toolkit.completion import ThreadedCompleter
11
- from prompt_toolkit.filters import Condition
12
10
  from prompt_toolkit.formatted_text import FormattedText
13
11
  from prompt_toolkit.history import FileHistory
14
12
  from prompt_toolkit.patch_stdout import patch_stdout
@@ -45,9 +43,6 @@ class PromptToolkitInput(InputProviderABC):
45
43
  ): # ▌
46
44
  self._status_provider = status_provider
47
45
 
48
- # Mouse is disabled by default; only enabled when input becomes multi-line.
49
- self._mouse_enabled: bool = False
50
-
51
46
  project = str(Path.cwd()).strip("/").replace("/", "-")
52
47
  history_path = Path.home() / ".klaude" / "projects" / f"{project}" / "input_history.txt"
53
48
 
@@ -56,8 +51,6 @@ class PromptToolkitInput(InputProviderABC):
56
51
  if not history_path.exists():
57
52
  history_path.touch()
58
53
 
59
- mouse_support_filter = Condition(lambda: self._mouse_enabled)
60
-
61
54
  # Create key bindings with injected dependencies
62
55
  kb = create_key_bindings(
63
56
  capture_clipboard_tag=capture_clipboard_tag,
@@ -75,7 +68,7 @@ class PromptToolkitInput(InputProviderABC):
75
68
  complete_while_typing=True,
76
69
  erase_when_done=True,
77
70
  bottom_toolbar=self._render_bottom_toolbar,
78
- mouse_support=mouse_support_filter,
71
+ mouse_support=False,
79
72
  style=Style.from_dict(
80
73
  {
81
74
  "completion-menu": "bg:default",
@@ -90,12 +83,6 @@ class PromptToolkitInput(InputProviderABC):
90
83
  ),
91
84
  )
92
85
 
93
- try:
94
- self._session.default_buffer.on_text_changed += self._on_buffer_text_changed
95
- except Exception:
96
- # If we can't hook the buffer events for any reason, fall back to static behavior.
97
- pass
98
-
99
86
  def _render_bottom_toolbar(self) -> FormattedText:
100
87
  """Render bottom toolbar with working directory, git branch on left, model name and context usage on right.
101
88
 
@@ -168,8 +155,6 @@ class PromptToolkitInput(InputProviderABC):
168
155
  @override
169
156
  async def iter_inputs(self) -> AsyncIterator[UserInputPayload]:
170
157
  while True:
171
- # For each new prompt, start with mouse disabled so users can select history.
172
- self._mouse_enabled = False
173
158
  with patch_stdout():
174
159
  line: str = await self._session.prompt_async()
175
160
 
@@ -178,21 +163,5 @@ class PromptToolkitInput(InputProviderABC):
178
163
 
179
164
  yield UserInputPayload(text=line, images=images if images else None)
180
165
 
181
- def _on_buffer_text_changed(self, buf: Buffer) -> None:
182
- """Toggle mouse support based on current buffer content.
183
-
184
- Mouse stays disabled when input is empty. It is enabled only when
185
- the user has entered more than one line of text.
186
- """
187
- try:
188
- text = buf.text
189
- except Exception:
190
- return
191
- self._mouse_enabled = self._should_enable_mouse(text)
192
-
193
- def _should_enable_mouse(self, text: str) -> bool:
194
- """Return True when mouse support should be enabled for current input."""
195
- if not text.strip():
196
- return False
197
- # Enable mouse only when input spans multiple lines.
198
- return "\n" in text
166
+ # Note: Mouse support is intentionally disabled at the PromptSession
167
+ # level so that terminals retain their native scrollback behavior.