klaude-code 1.2.10__py3-none-any.whl → 1.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. klaude_code/cli/main.py +2 -7
  2. klaude_code/cli/runtime.py +23 -19
  3. klaude_code/core/agent.py +7 -0
  4. klaude_code/core/executor.py +1 -4
  5. klaude_code/core/manager/agent_manager.py +15 -9
  6. klaude_code/core/manager/llm_clients_builder.py +4 -7
  7. klaude_code/core/prompt.py +4 -4
  8. klaude_code/core/prompts/prompt-claude-code.md +1 -12
  9. klaude_code/core/prompts/prompt-minimal.md +12 -0
  10. klaude_code/core/task.py +4 -0
  11. klaude_code/core/tool/memory/memory_tool.md +4 -0
  12. klaude_code/core/tool/memory/skill_loader.py +1 -1
  13. klaude_code/core/tool/todo/todo_write_tool.md +0 -157
  14. klaude_code/core/tool/todo/todo_write_tool_raw.md +182 -0
  15. klaude_code/core/tool/tool_registry.py +3 -4
  16. klaude_code/llm/client.py +1 -1
  17. klaude_code/protocol/model.py +3 -0
  18. klaude_code/protocol/op.py +5 -2
  19. klaude_code/protocol/sub_agent.py +1 -0
  20. klaude_code/session/export.py +11 -1
  21. klaude_code/session/session.py +1 -5
  22. klaude_code/session/templates/export_session.html +155 -0
  23. klaude_code/ui/modes/repl/event_handler.py +1 -5
  24. klaude_code/ui/modes/repl/input_prompt_toolkit.py +3 -34
  25. klaude_code/ui/renderers/metadata.py +21 -0
  26. klaude_code/ui/renderers/tools.py +13 -2
  27. klaude_code/ui/rich/markdown.py +4 -1
  28. klaude_code/ui/terminal/__init__.py +55 -0
  29. {klaude_code-1.2.10.dist-info → klaude_code-1.2.11.dist-info}/METADATA +1 -4
  30. {klaude_code-1.2.10.dist-info → klaude_code-1.2.11.dist-info}/RECORD +32 -30
  31. {klaude_code-1.2.10.dist-info → klaude_code-1.2.11.dist-info}/WHEEL +0 -0
  32. {klaude_code-1.2.10.dist-info → klaude_code-1.2.11.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,182 @@
1
+ Use this tool to create and manage a structured task list for your current coding session. This helps you track progress, organize complex tasks, and demonstrate thoroughness to the user.
2
+ It also helps the user understand the progress of the task and overall progress of their requests.
3
+
4
+ #### When to Use This Tool
5
+ Use this tool proactively in these scenarios:
6
+
7
+ 1. Complex multi-step tasks - When a task requires 3 or more distinct steps or actions
8
+ 2. Non-trivial and complex tasks - Tasks that require careful planning or multiple operations
9
+ 3. User explicitly requests todo list - When the user directly asks you to use the todo list
10
+ 4. User provides multiple tasks - When users provide a list of things to be done (numbered or comma-separated)
11
+ 5. After receiving new instructions - Immediately capture user requirements as todos
12
+ 6. When you start working on a task - Mark it as in_progress BEFORE beginning work. Ideally you should only have one todo as in_progress at a time
13
+ 7. After completing a task - Mark it as completed and add any new follow-up tasks discovered during implementation
14
+
15
+ #### When NOT to Use This Tool
16
+
17
+ Skip using this tool when:
18
+ 1. There is only a single, straightforward task
19
+ 2. The task is trivial and tracking it provides no organizational benefit
20
+ 3. The task can be completed in less than 3 trivial steps
21
+ 4. The task is purely conversational or informational
22
+
23
+ NOTE that you should not use this tool if there is only one trivial task to do. In this case you are better off just doing the task directly.
24
+
25
+ #### Examples of When to Use the Todo List
26
+
27
+ <example>
28
+ User: I want to add a dark mode toggle to the application settings. Make sure you run the tests and build when you're done!
29
+ Assistant: I'll help add a dark mode toggle to your application settings. Let me create a todo list to track this implementation.
30
+ *Creates todo list with the following items:*
31
+ 1. Creating dark mode toggle component in Settings page
32
+ 2. Adding dark mode state management (context/store)
33
+ 3. Implementing CSS-in-JS styles for dark theme
34
+ 4. Updating existing components to support theme switching
35
+ 5. Running tests and build process, addressing any failures or errors that occur
36
+ *Begins working on the first task*
37
+
38
+ <reasoning>
39
+ The assistant used the todo list because:
40
+ 1. Adding dark mode is a multi-step feature requiring UI, state management, and styling changes
41
+ 2. The user explicitly requested tests and build be run afterward
42
+ 3. The assistant inferred that tests and build need to pass by adding "Ensure tests and build succeed" as the final task
43
+ </reasoning>
44
+ </example>
45
+
46
+ <example>
47
+ User: Help me rename the function getCwd to getCurrentWorkingDirectory across my project
48
+ Assistant: Let me first search through your codebase to find all occurrences of 'getCwd'.
49
+ *Uses grep or search tools to locate all instances of getCwd in the codebase*
50
+ Assistant: I've found 15 instances of 'getCwd' across 8 different files. Let me create a todo list to track these changes.
51
+ *Creates todo list with specific items for each file that needs updating*
52
+
53
+ <reasoning>
54
+ The assistant used the todo list because:
55
+ 1. First, the assistant searched to understand the scope of the task
56
+ 2. Upon finding multiple occurrences across different files, it determined this was a complex task with multiple steps
57
+ 3. The todo list helps ensure every instance is tracked and updated systematically
58
+ 4. This approach prevents missing any occurrences and maintains code consistency
59
+ </reasoning>
60
+ </example>
61
+
62
+
63
+ <example>
64
+ User: I need to implement these features for my e-commerce site: user registration, product catalog, shopping cart, and checkout flow.
65
+ Assistant: I'll help implement these features. First, let's add all the features to the todo list.
66
+ *Creates a todo list breaking down each feature into specific tasks based on the project architecture*
67
+ Assistant: Let's start with user registration. This will involve creating database models, API endpoints, and frontend forms that integrate with your existing authentication system.
68
+
69
+ <reasoning>
70
+ The assistant used the todo list because:
71
+ 1. The user provided multiple complex features to implement in a comma separated list
72
+ 2. The todo list helps organize these large features into manageable tasks
73
+ 3. This approach allows for tracking progress across the entire implementation
74
+ </reasoning>
75
+ </example>
76
+
77
+ <example>
78
+ User: Can you help optimize my React application? It's rendering slowly and has performance issues.</user>
79
+ Assistant: I'll help optimize your React application. First, let me examine your codebase to identify potential performance bottlenecks.
80
+ *Reviews component structure, render patterns, state management, and data fetching*
81
+ Assistant: After analyzing your codebase, I've identified several performance issues. Let me create a todo list to track our optimization efforts.
82
+ *Creates todo list with items like: 1) Implementing memoization for expensive calculations in ProductList, 2) Adding virtualization for long lists in Dashboard, 3) Optimizing image loading in Gallery component, 4) Fixing state update loops in ShoppingCart, 5) Reviewing bundle size and implementing code splitting*
83
+ Let's start by implementing memoization for the expensive calculations in your ProductList component.</assistant>
84
+
85
+ <reasoning>
86
+ The assistant used the todo list because:
87
+ 1. First, the assistant examined the codebase to identify specific performance issues
88
+ 2. Based on this analysis, it identified multiple optimization opportunities
89
+ 3. Performance optimization is a non-trivial task requiring multiple steps
90
+ 4. The todo list helps methodically track improvements across different components
91
+ 5. This systematic approach ensures all performance bottlenecks are addressed
92
+ </reasoning>
93
+ </example>
94
+
95
+ #### Examples of When NOT to Use the Todo List
96
+
97
+ <example>
98
+ User: How do I print 'Hello World' in Python?
99
+ Assistant: In Python, you can print "Hello World" with this simple code:
100
+
101
+ python
102
+ print("Hello World")
103
+
104
+ This will output the text "Hello World" to the console when executed.</assistant>
105
+
106
+ <reasoning>
107
+ The assistant did not use the todo list because this is a single, trivial task that can be completed in one step. There's no need to track multiple tasks or steps for such a straightforward request.
108
+ </reasoning>
109
+ </example>
110
+
111
+ <example>
112
+ User: What does the git status command do?
113
+ Assistant: The git status command shows the current state of your working directory and staging area. It displays which changes have been staged, which haven't, and which files aren't being tracked by Git. This is a fundamental command for understanding the current state of your repository before making commits.
114
+
115
+ <reasoning>
116
+ The assistant did not use the todo list because this is an informational request with no actual coding task to complete. The user is simply asking for an explanation, not for the assistant to perform multiple steps or tasks.
117
+ </reasoning>
118
+ </example>
119
+
120
+ <example>
121
+ User: Can you add a comment to the calculateTotal function to explain what it does?
122
+ Assistant: Sure, let me add a comment to the calculateTotal function to explain what it does.
123
+ * Uses the Edit tool to add a comment to the calculateTotal function *
124
+
125
+ <reasoning>
126
+ The assistant did not use the todo list because this is a single, straightforward task confined to one location in the code. Adding a comment doesn't require tracking multiple steps or systematic organization.
127
+ </reasoning>
128
+ </example>
129
+
130
+ <example>
131
+ User: Run npm install for me and tell me what happens.
132
+ Assistant: I'll run the npm install command for you.
133
+
134
+ *Executes: npm install*
135
+
136
+ The command completed successfully. Here's the output:
137
+ [Output of npm install command]
138
+
139
+ All dependencies have been installed according to your package.json file.
140
+
141
+ <reasoning>
142
+ The assistant did not use the todo list because this is a single command execution with immediate results. There are no multiple steps to track or organize, making the todo list unnecessary for this straightforward task.
143
+ </reasoning>
144
+ </example>
145
+
146
+ #### Task States and Management
147
+
148
+ 1. **Task States**: Use these states to track progress:
149
+ - pending: Task not yet started
150
+ - in_progress: Currently working on (limit to ONE task at a time)
151
+ - completed: Task finished successfully
152
+
153
+ **IMPORTANT**: Task descriptions must have two forms:
154
+ - content: The imperative form describing what needs to be done (e.g., "Run tests", "Build the project")
155
+ - activeForm: The present continuous form shown during execution (e.g., "Running tests", "Building the project")
156
+
157
+ 2. **Task Management**:
158
+ - Update task status in real-time as you work
159
+ - Mark tasks complete IMMEDIATELY after finishing (don't batch completions)
160
+ - Exactly ONE task must be in_progress at any time (not less, not more)
161
+ - Complete current tasks before starting new ones
162
+ - Remove tasks that are no longer relevant from the list entirely
163
+
164
+ 3. **Task Completion Requirements**:
165
+ - ONLY mark a task as completed when you have FULLY accomplished it
166
+ - If you encounter errors, blockers, or cannot finish, keep the task as in_progress
167
+ - When blocked, create a new task describing what needs to be resolved
168
+ - Never mark a task as completed if:
169
+ - Tests are failing
170
+ - Implementation is partial
171
+ - You encountered unresolved errors
172
+ - You couldn't find necessary files or dependencies
173
+
174
+ 4. **Task Breakdown**:
175
+ - Create specific, actionable items
176
+ - Break complex tasks into smaller, manageable steps
177
+ - Use clear, descriptive task names
178
+ - Always provide both forms:
179
+ - content: "Fix authentication bug"
180
+ - activeForm: "Fixing authentication bug"
181
+
182
+ When in doubt, use this tool. Being proactive with task management demonstrates attentiveness and ensures you complete all requirements successfully.
@@ -61,18 +61,17 @@ def load_agent_tools(
61
61
 
62
62
  if sub_agent_type is not None:
63
63
  profile = get_sub_agent_profile(sub_agent_type)
64
- if not profile.enabled_for_model(model_name):
65
- return []
66
64
  return get_tool_schemas(list(profile.tool_set))
67
65
 
68
66
  # Main agent tools
69
67
  if "gpt-5" in model_name:
70
- tool_names = [tools.BASH, tools.READ, tools.APPLY_PATCH, tools.UPDATE_PLAN]
68
+ tool_names = [tools.BASH, tools.APPLY_PATCH, tools.UPDATE_PLAN]
71
69
  elif "gemini-3" in model_name:
72
70
  tool_names = [tools.BASH, tools.READ, tools.EDIT, tools.WRITE]
73
71
  else:
74
72
  tool_names = [tools.BASH, tools.READ, tools.EDIT, tools.WRITE, tools.TODO_WRITE]
75
73
 
76
74
  tool_names.extend(sub_agent_tool_names(enabled_only=True, model_name=model_name))
77
- tool_names.extend([tools.SKILL, tools.MERMAID, tools.MEMORY])
75
+ tool_names.extend([tools.SKILL, tools.MERMAID])
76
+ # tool_names.extend([tools.MEMORY])
78
77
  return get_tool_schemas(tool_names)
klaude_code/llm/client.py CHANGED
@@ -42,7 +42,7 @@ def call_with_logged_payload(func: Callable[P, R], *args: P.args, **kwargs: P.kw
42
42
 
43
43
  payload = {k: v for k, v in kwargs.items() if v is not None}
44
44
  log_debug(
45
- json.dumps(payload, ensure_ascii=False, default=str, sort_keys=True),
45
+ json.dumps(payload, ensure_ascii=False, default=str),
46
46
  style="yellow",
47
47
  debug_type=DebugType.LLM_PAYLOAD,
48
48
  )
@@ -21,6 +21,8 @@ class Usage(BaseModel):
21
21
 
22
22
  # Context window tracking
23
23
  context_token: int | None = None # Peak total_tokens seen (for context usage display)
24
+ context_delta: int | None = None # Context growth since last task (for cache ratio calculation)
25
+ last_turn_output_token: int | None = None # Context growth since last task (for cache ratio calculation)
24
26
  context_limit: int | None = None # Model's context limit
25
27
  max_tokens: int | None = None # Max output tokens for this request
26
28
 
@@ -319,6 +321,7 @@ class TaskMetadata(BaseModel):
319
321
  model_name: str = ""
320
322
  provider: str | None = None
321
323
  task_duration_s: float | None = None
324
+ turn_count: int = 0
322
325
 
323
326
  @staticmethod
324
327
  def aggregate_by_model(metadata_list: list["TaskMetadata"]) -> list["TaskMetadata"]:
@@ -63,11 +63,14 @@ class InterruptOperation(Operation):
63
63
 
64
64
 
65
65
  class InitAgentOperation(Operation):
66
- """Operation for initializing an agent and replaying history if any."""
66
+ """Operation for initializing an agent and replaying history if any.
67
+
68
+ If session_id is None, a new session is created with an auto-generated ID.
69
+ If session_id is provided, attempts to load existing session or creates new one.
70
+ """
67
71
 
68
72
  type: OperationType = OperationType.INIT_AGENT
69
73
  session_id: str | None = None
70
- is_new_session: bool = False
71
74
 
72
75
  async def execute(self, handler: OperationHandler) -> None:
73
76
  await handler.handle_init_agent(self)
@@ -290,6 +290,7 @@ register_sub_agent(
290
290
  tool_set=(tools.BASH, tools.READ),
291
291
  prompt_builder=_explore_prompt_builder,
292
292
  active_form="Exploring",
293
+ target_model_filter=lambda model: ("haiku" not in model) and ("kimi" not in model) and ("grok" not in model),
293
294
  )
294
295
  )
295
296
 
@@ -194,11 +194,18 @@ def _render_single_metadata(
194
194
  input_stat += f"({_format_cost(u.input_cost, u.currency)})"
195
195
  parts.append(f'<span class="metadata-stat">{input_stat}</span>')
196
196
 
197
- # Cached with cost
197
+ # Cached with cost and cache ratio
198
198
  if u.cached_tokens > 0:
199
199
  cached_stat = f"cached: {_format_token_count(u.cached_tokens)}"
200
200
  if u.cache_read_cost is not None:
201
201
  cached_stat += f"({_format_cost(u.cache_read_cost, u.currency)})"
202
+ # Cache ratio: (cached + context_delta - last_turn_output) / input tokens
203
+ # Shows how much of the input was cached (not new context growth)
204
+ if u.input_tokens > 0:
205
+ context_delta = u.context_delta or 0
206
+ last_turn_output_token = u.last_turn_output_token or 0
207
+ cache_ratio = (u.cached_tokens + context_delta - last_turn_output_token) / u.input_tokens * 100
208
+ cached_stat += f"[{cache_ratio:.0f}%]"
202
209
  parts.append(f'<span class="metadata-stat">{cached_stat}</span>')
203
210
 
204
211
  # Output with cost
@@ -403,6 +410,9 @@ def _get_mermaid_link_html(
403
410
  buttons_html.append(
404
411
  f'<button type="button" class="copy-mermaid-btn" data-code="{escaped_code}" title="Copy Mermaid Code">Copy Code</button>'
405
412
  )
413
+ buttons_html.append(
414
+ '<button type="button" class="fullscreen-mermaid-btn" title="View Fullscreen">Fullscreen</button>'
415
+ )
406
416
 
407
417
  link = ui_extra.link if isinstance(ui_extra, model.MermaidLinkUIExtra) else None
408
418
 
@@ -108,12 +108,8 @@ class Session(BaseModel):
108
108
  return Session(id=id or uuid.uuid4().hex, work_dir=Path.cwd())
109
109
 
110
110
  @classmethod
111
- def load(cls, id: str, *, skip_if_missing: bool = False) -> "Session":
111
+ def load(cls, id: str) -> "Session":
112
112
  """Load an existing session or create a new one if not found."""
113
-
114
- if skip_if_missing:
115
- return Session(id=id, work_dir=Path.cwd())
116
-
117
113
  # Load session metadata
118
114
  sessions_dir = cls._sessions_dir()
119
115
  session_candidates = sorted(
@@ -338,6 +338,57 @@
338
338
  border-color: var(--accent);
339
339
  }
340
340
 
341
+ .mermaid-modal {
342
+ position: fixed;
343
+ top: 0;
344
+ left: 0;
345
+ width: 100vw;
346
+ height: 100vh;
347
+ background: rgba(255, 255, 255, 0.98);
348
+ z-index: 1000;
349
+ display: flex;
350
+ flex-direction: column;
351
+ align-items: center;
352
+ justify-content: center;
353
+ opacity: 0;
354
+ pointer-events: none;
355
+ transition: opacity 0.2s;
356
+ }
357
+ .mermaid-modal.active {
358
+ opacity: 1;
359
+ pointer-events: auto;
360
+ }
361
+ .mermaid-modal-content {
362
+ width: 95%;
363
+ height: 90%;
364
+ display: flex;
365
+ align-items: center;
366
+ justify-content: center;
367
+ overflow: auto;
368
+ }
369
+ .mermaid-modal-content svg {
370
+ width: auto !important;
371
+ height: auto !important;
372
+ max-width: 100%;
373
+ max-height: 100%;
374
+ }
375
+ .mermaid-modal-close {
376
+ position: absolute;
377
+ top: 20px;
378
+ right: 20px;
379
+ background: transparent;
380
+ border: none;
381
+ font-size: 32px;
382
+ cursor: pointer;
383
+ color: var(--text-dim);
384
+ z-index: 1001;
385
+ line-height: 1;
386
+ padding: 8px;
387
+ }
388
+ .mermaid-modal-close:hover {
389
+ color: var(--text);
390
+ }
391
+
341
392
  .copy-mermaid-btn {
342
393
  border: 1px solid var(--border);
343
394
  background: transparent;
@@ -356,6 +407,25 @@
356
407
  border-color: var(--accent);
357
408
  }
358
409
 
410
+ .fullscreen-mermaid-btn {
411
+ margin-left: 8px;
412
+ border: 1px solid var(--border);
413
+ background: transparent;
414
+ color: var(--text-dim);
415
+ font-family: var(--font-mono);
416
+ font-size: var(--font-size-xs);
417
+ text-transform: uppercase;
418
+ padding: 2px 10px;
419
+ border-radius: 999px;
420
+ cursor: pointer;
421
+ transition: color 0.2s, border-color 0.2s, background 0.2s;
422
+ font-weight: var(--font-weight-bold);
423
+ }
424
+ .fullscreen-mermaid-btn:hover {
425
+ color: var(--text);
426
+ border-color: var(--accent);
427
+ }
428
+
359
429
  .assistant-rendered {
360
430
  width: 100%;
361
431
  }
@@ -1065,6 +1135,13 @@
1065
1135
  </svg>
1066
1136
  </div>
1067
1137
 
1138
+ <div id="mermaid-modal" class="mermaid-modal">
1139
+ <button class="mermaid-modal-close" id="mermaid-modal-close">
1140
+ &times;
1141
+ </button>
1142
+ <div class="mermaid-modal-content" id="mermaid-modal-content"></div>
1143
+ </div>
1144
+
1068
1145
  <link
1069
1146
  rel="stylesheet"
1070
1147
  href="https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.9.0/build/styles/github.min.css"
@@ -1280,6 +1357,84 @@
1280
1357
  });
1281
1358
  });
1282
1359
 
1360
+ // Mermaid Fullscreen Logic
1361
+ const modal = document.getElementById("mermaid-modal");
1362
+ const modalContent = document.getElementById("mermaid-modal-content");
1363
+ const modalClose = document.getElementById("mermaid-modal-close");
1364
+
1365
+ if (modal && modalContent && modalClose) {
1366
+ const closeModal = () => {
1367
+ modal.classList.remove("active");
1368
+ modalContent.innerHTML = "";
1369
+ };
1370
+
1371
+ modalClose.addEventListener("click", closeModal);
1372
+
1373
+ modal.addEventListener("click", (e) => {
1374
+ if (e.target === modal) {
1375
+ closeModal();
1376
+ }
1377
+ });
1378
+
1379
+ // Handle Escape key
1380
+ document.addEventListener("keydown", (e) => {
1381
+ if (e.key === "Escape" && modal.classList.contains("active")) {
1382
+ closeModal();
1383
+ }
1384
+ });
1385
+
1386
+ document.querySelectorAll(".fullscreen-mermaid-btn").forEach((btn) => {
1387
+ btn.addEventListener("click", (e) => {
1388
+ // The structure is:
1389
+ // wrapper > mermaid > svg
1390
+ // wrapper > toolbar > buttons > btn
1391
+
1392
+ // We need to find the mermaid div that is a sibling of the toolbar
1393
+
1394
+ // Traverse up to the wrapper
1395
+ let wrapper = btn.closest("div[style*='background: white']");
1396
+
1397
+ if (!wrapper) {
1398
+ // Fallback: try to find by traversing up and looking for .mermaid
1399
+ let p = btn.parentElement;
1400
+ while (p) {
1401
+ if (p.querySelector(".mermaid")) {
1402
+ wrapper = p;
1403
+ break;
1404
+ }
1405
+ p = p.parentElement;
1406
+ if (p === document.body) break;
1407
+ }
1408
+ }
1409
+
1410
+ if (wrapper) {
1411
+ const mermaidDiv = wrapper.querySelector(".mermaid");
1412
+ if (mermaidDiv) {
1413
+ const svg = mermaidDiv.querySelector("svg");
1414
+
1415
+ if (svg) {
1416
+ // Clone the SVG to put in modal
1417
+ // We treat the SVG as the source
1418
+ const clone = svg.cloneNode(true);
1419
+ // Remove fixed sizes to let it scale in flex container
1420
+ clone.removeAttribute("height");
1421
+ clone.removeAttribute("width");
1422
+ clone.style.maxWidth = "100%";
1423
+ clone.style.maxHeight = "100%";
1424
+
1425
+ modalContent.appendChild(clone);
1426
+ modal.classList.add("active");
1427
+ } else if (mermaidDiv.textContent.trim()) {
1428
+ // Fallback if not rendered yet (should not happen on export usually)
1429
+ modalContent.textContent = "Diagram not rendered yet.";
1430
+ modal.classList.add("active");
1431
+ }
1432
+ }
1433
+ }
1434
+ });
1435
+ });
1436
+ }
1437
+
1283
1438
  // Scroll to bottom button
1284
1439
  const scrollBtn = document.getElementById("scroll-btn");
1285
1440
 
@@ -453,14 +453,10 @@ class DisplayEventHandler:
453
453
  if len(todo.content) > 0:
454
454
  status_text = todo.content
455
455
  status_text = status_text.replace("\n", "")
456
- return self._truncate_status_text(status_text, max_length=30)
456
+ return self._truncate_status_text(status_text, max_length=100)
457
457
 
458
458
  def _truncate_status_text(self, text: str, max_length: int) -> str:
459
- """Truncate text to max_length while preserving complete words."""
460
459
  if len(text) <= max_length:
461
460
  return text
462
461
  truncated = text[:max_length]
463
- last_space = truncated.rfind(" ")
464
- if last_space > 0:
465
- return truncated[:last_space] + "..."
466
462
  return truncated + "..."
@@ -6,9 +6,7 @@ from pathlib import Path
6
6
  from typing import NamedTuple, override
7
7
 
8
8
  from prompt_toolkit import PromptSession
9
- from prompt_toolkit.buffer import Buffer
10
9
  from prompt_toolkit.completion import ThreadedCompleter
11
- from prompt_toolkit.filters import Condition
12
10
  from prompt_toolkit.formatted_text import FormattedText
13
11
  from prompt_toolkit.history import FileHistory
14
12
  from prompt_toolkit.patch_stdout import patch_stdout
@@ -45,9 +43,6 @@ class PromptToolkitInput(InputProviderABC):
45
43
  ): # ▌
46
44
  self._status_provider = status_provider
47
45
 
48
- # Mouse is disabled by default; only enabled when input becomes multi-line.
49
- self._mouse_enabled: bool = False
50
-
51
46
  project = str(Path.cwd()).strip("/").replace("/", "-")
52
47
  history_path = Path.home() / ".klaude" / "projects" / f"{project}" / "input_history.txt"
53
48
 
@@ -56,8 +51,6 @@ class PromptToolkitInput(InputProviderABC):
56
51
  if not history_path.exists():
57
52
  history_path.touch()
58
53
 
59
- mouse_support_filter = Condition(lambda: self._mouse_enabled)
60
-
61
54
  # Create key bindings with injected dependencies
62
55
  kb = create_key_bindings(
63
56
  capture_clipboard_tag=capture_clipboard_tag,
@@ -75,7 +68,7 @@ class PromptToolkitInput(InputProviderABC):
75
68
  complete_while_typing=True,
76
69
  erase_when_done=True,
77
70
  bottom_toolbar=self._render_bottom_toolbar,
78
- mouse_support=mouse_support_filter,
71
+ mouse_support=False,
79
72
  style=Style.from_dict(
80
73
  {
81
74
  "completion-menu": "bg:default",
@@ -90,12 +83,6 @@ class PromptToolkitInput(InputProviderABC):
90
83
  ),
91
84
  )
92
85
 
93
- try:
94
- self._session.default_buffer.on_text_changed += self._on_buffer_text_changed
95
- except Exception:
96
- # If we can't hook the buffer events for any reason, fall back to static behavior.
97
- pass
98
-
99
86
  def _render_bottom_toolbar(self) -> FormattedText:
100
87
  """Render bottom toolbar with working directory, git branch on left, model name and context usage on right.
101
88
 
@@ -168,8 +155,6 @@ class PromptToolkitInput(InputProviderABC):
168
155
  @override
169
156
  async def iter_inputs(self) -> AsyncIterator[UserInputPayload]:
170
157
  while True:
171
- # For each new prompt, start with mouse disabled so users can select history.
172
- self._mouse_enabled = False
173
158
  with patch_stdout():
174
159
  line: str = await self._session.prompt_async()
175
160
 
@@ -178,21 +163,5 @@ class PromptToolkitInput(InputProviderABC):
178
163
 
179
164
  yield UserInputPayload(text=line, images=images if images else None)
180
165
 
181
- def _on_buffer_text_changed(self, buf: Buffer) -> None:
182
- """Toggle mouse support based on current buffer content.
183
-
184
- Mouse stays disabled when input is empty. It is enabled only when
185
- the user has entered more than one line of text.
186
- """
187
- try:
188
- text = buf.text
189
- except Exception:
190
- return
191
- self._mouse_enabled = self._should_enable_mouse(text)
192
-
193
- def _should_enable_mouse(self, text: str) -> bool:
194
- """Return True when mouse support should be enabled for current input."""
195
- if not text.strip():
196
- return False
197
- # Enable mouse only when input spans multiple lines.
198
- return "\n" in text
166
+ # Note: Mouse support is intentionally disabled at the PromptSession
167
+ # level so that terminals retain their native scrollback behavior.
@@ -77,6 +77,17 @@ def _render_task_metadata_block(
77
77
  ]
78
78
  if metadata.usage.cache_read_cost is not None:
79
79
  cached_parts.append((f"({currency_symbol}{metadata.usage.cache_read_cost:.4f})", ThemeKey.METADATA_DIM))
80
+ # Cache ratio: (content + cached - last turn output) / input tokens, this might caclulate over 100% if system prompt is cached in first turn
81
+ # Shows how much of the input was cached (not new context growth)
82
+ if show_context_and_time and metadata.usage.input_tokens > 0:
83
+ context_delta = metadata.usage.context_delta or 0
84
+ last_turn_output_token = metadata.usage.last_turn_output_token or 0
85
+ cache_ratio = (
86
+ (metadata.usage.cached_tokens + context_delta - last_turn_output_token)
87
+ / metadata.usage.input_tokens
88
+ * 100
89
+ )
90
+ cached_parts.append((f"[{cache_ratio:.0f}%]", ThemeKey.METADATA_DIM))
80
91
  parts2.append(Text.assemble(*cached_parts))
81
92
 
82
93
  # Output
@@ -150,6 +161,16 @@ def _render_task_metadata_block(
150
161
  )
151
162
  )
152
163
 
164
+ # Turn count
165
+ if show_context_and_time and metadata.turn_count > 0:
166
+ parts3.append(
167
+ Text.assemble(
168
+ ("turns", ThemeKey.METADATA_DIM),
169
+ (":", ThemeKey.METADATA_DIM),
170
+ (str(metadata.turn_count), ThemeKey.METADATA_DIM),
171
+ )
172
+ )
173
+
153
174
  if parts3:
154
175
  line2 = Text(" / ", style=ThemeKey.METADATA_DIM).join(parts3)
155
176
  renderables.append(Padding(line2, (0, 0, 0, indent + 2)))
@@ -339,12 +339,23 @@ def render_mermaid_tool_call(arguments: str) -> RenderableType:
339
339
 
340
340
 
341
341
  def render_mermaid_tool_result(tr: events.ToolResultEvent) -> RenderableType:
342
+ from klaude_code.ui.terminal import supports_osc8_hyperlinks
343
+
342
344
  link_info = _extract_mermaid_link(tr.ui_extra)
343
345
  if link_info is None:
344
346
  return render_generic_tool_result(tr.result, is_error=tr.status == "error")
345
347
 
346
- link_text = Text.from_markup(f"[blue u][link={link_info.link}]Command+click to view[/link][/blue u]")
347
- return Padding.indent(link_text, level=2)
348
+ if supports_osc8_hyperlinks():
349
+ link_text = Text.from_markup(f"[blue u][link={link_info.link}]Command+click to view[/link][/blue u]")
350
+ return Padding.indent(link_text, level=2)
351
+
352
+ # For terminals that don't support OSC 8, show a hint to use /export
353
+ hint_text = Text.assemble(
354
+ ("Use ", ThemeKey.TOOL_RESULT),
355
+ ("/export", ThemeKey.TOOL_RESULT_BOLD),
356
+ (" to view the diagram.", ThemeKey.TOOL_RESULT),
357
+ )
358
+ return Padding.indent(hint_text, level=2)
348
359
 
349
360
 
350
361
  def _extract_truncation(
@@ -284,7 +284,10 @@ class MarkdownStream:
284
284
 
285
285
  target_height = min(self._live_window_seen_height, self.live_window)
286
286
  if target_height > 0 and current_height < target_height:
287
- pad_count = target_height - current_height + 1
287
+ # Pad only up to the maximum height we've seen so far.
288
+ # This keeps the Live region height stable without overshooting,
289
+ # which can cause the spinner to jump by a line.
290
+ pad_count = target_height - current_height
288
291
  # Pad after the existing lines so spinner visually stays at the bottom.
289
292
  rest_lines = rest_lines + ["\n"] * pad_count
290
293