klaude-code 1.5.0__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. klaude_code/cli/list_model.py +55 -4
  2. klaude_code/cli/main.py +3 -56
  3. klaude_code/cli/session_cmd.py +3 -2
  4. klaude_code/command/fork_session_cmd.py +220 -2
  5. klaude_code/command/refresh_cmd.py +4 -4
  6. klaude_code/command/resume_cmd.py +21 -11
  7. klaude_code/config/assets/builtin_config.yaml +37 -2
  8. klaude_code/config/builtin_config.py +1 -0
  9. klaude_code/config/config.py +14 -0
  10. klaude_code/config/thinking.py +14 -0
  11. klaude_code/llm/anthropic/client.py +127 -114
  12. klaude_code/llm/bedrock/__init__.py +3 -0
  13. klaude_code/llm/bedrock/client.py +60 -0
  14. klaude_code/llm/google/__init__.py +3 -0
  15. klaude_code/llm/google/client.py +309 -0
  16. klaude_code/llm/google/input.py +215 -0
  17. klaude_code/llm/registry.py +10 -5
  18. klaude_code/llm/usage.py +1 -1
  19. klaude_code/protocol/llm_param.py +9 -0
  20. klaude_code/session/__init__.py +2 -2
  21. klaude_code/session/selector.py +32 -4
  22. klaude_code/session/session.py +20 -12
  23. klaude_code/ui/modes/repl/event_handler.py +22 -32
  24. klaude_code/ui/modes/repl/renderer.py +1 -1
  25. klaude_code/ui/renderers/developer.py +2 -2
  26. klaude_code/ui/renderers/metadata.py +8 -0
  27. klaude_code/ui/rich/markdown.py +41 -9
  28. klaude_code/ui/rich/status.py +83 -22
  29. klaude_code/ui/terminal/selector.py +72 -3
  30. {klaude_code-1.5.0.dist-info → klaude_code-1.7.0.dist-info}/METADATA +33 -5
  31. {klaude_code-1.5.0.dist-info → klaude_code-1.7.0.dist-info}/RECORD +33 -28
  32. {klaude_code-1.5.0.dist-info → klaude_code-1.7.0.dist-info}/WHEEL +0 -0
  33. {klaude_code-1.5.0.dist-info → klaude_code-1.7.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,215 @@
1
+ # pyright: reportReturnType=false
2
+ # pyright: reportArgumentType=false
3
+ # pyright: reportUnknownMemberType=false
4
+ # pyright: reportAttributeAccessIssue=false
5
+
6
+ import json
7
+ from base64 import b64decode
8
+ from binascii import Error as BinasciiError
9
+ from typing import Any
10
+
11
+ from google.genai import types
12
+
13
+ from klaude_code.llm.input_common import AssistantGroup, ToolGroup, UserGroup, merge_reminder_text, parse_message_groups
14
+ from klaude_code.protocol import llm_param, model
15
+
16
+
17
+ def _data_url_to_blob(url: str) -> types.Blob:
18
+ header_and_media = url.split(",", 1)
19
+ if len(header_and_media) != 2:
20
+ raise ValueError("Invalid data URL for image: missing comma separator")
21
+ header, base64_data = header_and_media
22
+ if not header.startswith("data:"):
23
+ raise ValueError("Invalid data URL for image: missing data: prefix")
24
+ if ";base64" not in header:
25
+ raise ValueError("Invalid data URL for image: missing base64 marker")
26
+
27
+ media_type = header[5:].split(";", 1)[0]
28
+ base64_payload = base64_data.strip()
29
+ if base64_payload == "":
30
+ raise ValueError("Inline image data is empty")
31
+
32
+ try:
33
+ decoded = b64decode(base64_payload, validate=True)
34
+ except (BinasciiError, ValueError) as exc:
35
+ raise ValueError("Inline image data is not valid base64") from exc
36
+
37
+ return types.Blob(data=decoded, mime_type=media_type)
38
+
39
+
40
+ def _image_part_to_part(image: model.ImageURLPart) -> types.Part:
41
+ url = image.image_url.url
42
+ if url.startswith("data:"):
43
+ return types.Part(inline_data=_data_url_to_blob(url))
44
+ # Best-effort: Gemini supports file URIs, and may accept public HTTPS URLs.
45
+ return types.Part(file_data=types.FileData(file_uri=url))
46
+
47
+
48
+ def _user_group_to_content(group: UserGroup) -> types.Content:
49
+ parts: list[types.Part] = []
50
+ for text in group.text_parts:
51
+ parts.append(types.Part(text=text + "\n"))
52
+ for image in group.images:
53
+ parts.append(_image_part_to_part(image))
54
+ if not parts:
55
+ parts.append(types.Part(text=""))
56
+ return types.Content(role="user", parts=parts)
57
+
58
+
59
+ def _tool_groups_to_content(groups: list[ToolGroup], model_name: str | None) -> list[types.Content]:
60
+ supports_multimodal_function_response = bool(model_name and "gemini-3" in model_name.lower())
61
+
62
+ response_parts: list[types.Part] = []
63
+ extra_image_contents: list[types.Content] = []
64
+
65
+ for group in groups:
66
+ merged_text = merge_reminder_text(
67
+ group.tool_result.output or "<system-reminder>Tool ran without output or errors</system-reminder>",
68
+ group.reminder_texts,
69
+ )
70
+ has_text = merged_text.strip() != ""
71
+
72
+ images = list(group.tool_result.images or []) + list(group.reminder_images)
73
+ image_parts: list[types.Part] = []
74
+ for image in images:
75
+ try:
76
+ image_parts.append(_image_part_to_part(image))
77
+ except ValueError:
78
+ # Skip invalid data URLs
79
+ continue
80
+
81
+ has_images = len(image_parts) > 0
82
+ response_value = merged_text if has_text else "(see attached image)" if has_images else ""
83
+ response_payload = (
84
+ {"error": response_value} if group.tool_result.status == "error" else {"output": response_value}
85
+ )
86
+
87
+ function_response = types.FunctionResponse(
88
+ id=group.tool_result.call_id,
89
+ name=group.tool_result.tool_name or "",
90
+ response=response_payload,
91
+ parts=image_parts if (has_images and supports_multimodal_function_response) else None,
92
+ )
93
+ response_parts.append(types.Part(function_response=function_response))
94
+
95
+ if has_images and not supports_multimodal_function_response:
96
+ extra_image_contents.append(
97
+ types.Content(role="user", parts=[types.Part(text="Tool result image:"), *image_parts])
98
+ )
99
+
100
+ contents: list[types.Content] = []
101
+ if response_parts:
102
+ contents.append(types.Content(role="user", parts=response_parts))
103
+ contents.extend(extra_image_contents)
104
+ return contents
105
+
106
+
107
+ def _assistant_group_to_content(group: AssistantGroup, model_name: str | None) -> types.Content | None:
108
+ parts: list[types.Part] = []
109
+
110
+ degraded_thinking_texts: list[str] = []
111
+ pending_thought_text: str | None = None
112
+ pending_thought_signature: str | None = None
113
+
114
+ for item in group.reasoning_items:
115
+ match item:
116
+ case model.ReasoningTextItem():
117
+ if not item.content:
118
+ continue
119
+ if model_name is not None and item.model is not None and item.model != model_name:
120
+ degraded_thinking_texts.append(item.content)
121
+ else:
122
+ pending_thought_text = item.content
123
+ case model.ReasoningEncryptedItem():
124
+ if not (
125
+ model_name is not None
126
+ and item.model == model_name
127
+ and item.encrypted_content
128
+ and (item.format or "").startswith("google")
129
+ and pending_thought_text
130
+ ):
131
+ continue
132
+ pending_thought_signature = item.encrypted_content
133
+ parts.append(
134
+ types.Part(
135
+ text=pending_thought_text,
136
+ thought=True,
137
+ thought_signature=pending_thought_signature,
138
+ )
139
+ )
140
+ pending_thought_text = None
141
+ pending_thought_signature = None
142
+
143
+ if pending_thought_text:
144
+ parts.append(
145
+ types.Part(
146
+ text=pending_thought_text,
147
+ thought=True,
148
+ thought_signature=pending_thought_signature,
149
+ )
150
+ )
151
+
152
+ if degraded_thinking_texts:
153
+ parts.insert(0, types.Part(text="<thinking>\n" + "\n".join(degraded_thinking_texts) + "\n</thinking>"))
154
+
155
+ if group.text_content:
156
+ parts.append(types.Part(text=group.text_content))
157
+
158
+ for tc in group.tool_calls:
159
+ args: dict[str, Any]
160
+ if tc.arguments:
161
+ try:
162
+ args = json.loads(tc.arguments)
163
+ except json.JSONDecodeError:
164
+ args = {"_raw": tc.arguments}
165
+ else:
166
+ args = {}
167
+ parts.append(types.Part(function_call=types.FunctionCall(id=tc.call_id, name=tc.name, args=args)))
168
+
169
+ if not parts:
170
+ return None
171
+ return types.Content(role="model", parts=parts)
172
+
173
+
174
+ def convert_history_to_contents(
175
+ history: list[model.ConversationItem],
176
+ model_name: str | None,
177
+ ) -> list[types.Content]:
178
+ contents: list[types.Content] = []
179
+ pending_tool_groups: list[ToolGroup] = []
180
+
181
+ def flush_tool_groups() -> None:
182
+ nonlocal pending_tool_groups
183
+ if pending_tool_groups:
184
+ contents.extend(_tool_groups_to_content(pending_tool_groups, model_name=model_name))
185
+ pending_tool_groups = []
186
+
187
+ for group in parse_message_groups(history):
188
+ match group:
189
+ case UserGroup():
190
+ flush_tool_groups()
191
+ contents.append(_user_group_to_content(group))
192
+ case ToolGroup():
193
+ pending_tool_groups.append(group)
194
+ case AssistantGroup():
195
+ flush_tool_groups()
196
+ content = _assistant_group_to_content(group, model_name=model_name)
197
+ if content is not None:
198
+ contents.append(content)
199
+
200
+ flush_tool_groups()
201
+ return contents
202
+
203
+
204
+ def convert_tool_schema(tools: list[llm_param.ToolSchema] | None) -> list[types.Tool]:
205
+ if tools is None or len(tools) == 0:
206
+ return []
207
+ declarations = [
208
+ types.FunctionDeclaration(
209
+ name=tool.name,
210
+ description=tool.description,
211
+ parameters_json_schema=tool.parameters,
212
+ )
213
+ for tool in tools
214
+ ]
215
+ return [types.Tool(function_declarations=declarations)]
@@ -1,3 +1,4 @@
1
+ import importlib
1
2
  from collections.abc import Callable
2
3
  from typing import TYPE_CHECKING, TypeVar
3
4
 
@@ -21,15 +22,19 @@ def _load_protocol(protocol: llm_param.LLMClientProtocol) -> None:
21
22
 
22
23
  # Import only the needed module to trigger @register decorator
23
24
  if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
24
- from . import anthropic as _
25
+ importlib.import_module("klaude_code.llm.anthropic")
26
+ elif protocol == llm_param.LLMClientProtocol.BEDROCK:
27
+ importlib.import_module("klaude_code.llm.bedrock")
25
28
  elif protocol == llm_param.LLMClientProtocol.CODEX:
26
- from . import codex as _
29
+ importlib.import_module("klaude_code.llm.codex")
27
30
  elif protocol == llm_param.LLMClientProtocol.OPENAI:
28
- from . import openai_compatible as _
31
+ importlib.import_module("klaude_code.llm.openai_compatible")
29
32
  elif protocol == llm_param.LLMClientProtocol.OPENROUTER:
30
- from . import openrouter as _
33
+ importlib.import_module("klaude_code.llm.openrouter")
31
34
  elif protocol == llm_param.LLMClientProtocol.RESPONSES:
32
- from . import responses as _ # noqa: F401
35
+ importlib.import_module("klaude_code.llm.responses")
36
+ elif protocol == llm_param.LLMClientProtocol.GOOGLE:
37
+ importlib.import_module("klaude_code.llm.google")
33
38
 
34
39
 
35
40
  def register(name: llm_param.LLMClientProtocol) -> Callable[[_T], _T]:
klaude_code/llm/usage.py CHANGED
@@ -81,7 +81,7 @@ class MetadataTracker:
81
81
  ) * 1000
82
82
 
83
83
  if self._last_token_time is not None and self._metadata_item.usage.output_tokens > 0:
84
- time_duration = self._last_token_time - self._first_token_time
84
+ time_duration = self._last_token_time - self._request_start_time
85
85
  if time_duration >= 0.15:
86
86
  self._metadata_item.usage.throughput_tps = self._metadata_item.usage.output_tokens / time_duration
87
87
 
@@ -12,7 +12,9 @@ class LLMClientProtocol(Enum):
12
12
  RESPONSES = "responses"
13
13
  OPENROUTER = "openrouter"
14
14
  ANTHROPIC = "anthropic"
15
+ BEDROCK = "bedrock"
15
16
  CODEX = "codex"
17
+ GOOGLE = "google"
16
18
 
17
19
 
18
20
  class ToolSchema(BaseModel):
@@ -91,8 +93,15 @@ class LLMConfigProviderParameter(BaseModel):
91
93
  protocol: LLMClientProtocol
92
94
  base_url: str | None = None
93
95
  api_key: str | None = None
96
+ # Azure OpenAI
94
97
  is_azure: bool = False
95
98
  azure_api_version: str | None = None
99
+ # AWS Bedrock configuration
100
+ aws_access_key: str | None = None
101
+ aws_secret_key: str | None = None
102
+ aws_region: str | None = None
103
+ aws_session_token: str | None = None
104
+ aws_profile: str | None = None
96
105
 
97
106
 
98
107
  class LLMConfigModelParameter(BaseModel):
@@ -1,4 +1,4 @@
1
- from .selector import SessionSelectOption, build_session_select_options
1
+ from .selector import SessionSelectOption, build_session_select_options, format_user_messages_display
2
2
  from .session import Session
3
3
 
4
- __all__ = ["Session", "SessionSelectOption", "build_session_select_options"]
4
+ __all__ = ["Session", "SessionSelectOption", "build_session_select_options", "format_user_messages_display"]
@@ -33,12 +33,39 @@ class SessionSelectOption:
33
33
  """Option data for session selection UI."""
34
34
 
35
35
  session_id: str
36
- first_user_message: str
36
+ user_messages: list[str]
37
37
  messages_count: str
38
38
  relative_time: str
39
39
  model_name: str
40
40
 
41
41
 
42
+ def _format_message(msg: str) -> str:
43
+ """Format a user message for display (strip and collapse newlines)."""
44
+ return msg.strip().replace("\n", " ")
45
+
46
+
47
+ def format_user_messages_display(messages: list[str]) -> list[str]:
48
+ """Format user messages for display in session selection.
49
+
50
+ Shows up to 6 messages. If more than 6, shows first 3 and last 3 with ellipsis.
51
+ Each message is on its own line.
52
+
53
+ Args:
54
+ messages: List of user messages.
55
+
56
+ Returns:
57
+ List of formatted message lines for display.
58
+ """
59
+ if len(messages) <= 6:
60
+ return messages
61
+
62
+ # More than 6: show first 3, ellipsis, last 3
63
+ result = messages[:3]
64
+ result.append("⋮")
65
+ result.extend(messages[-3:])
66
+ return result
67
+
68
+
42
69
  def build_session_select_options() -> list[SessionSelectOption]:
43
70
  """Build session selection options data.
44
71
 
@@ -51,8 +78,9 @@ def build_session_select_options() -> list[SessionSelectOption]:
51
78
 
52
79
  options: list[SessionSelectOption] = []
53
80
  for s in sessions:
54
- first_msg = s.first_user_message or "N/A"
55
- first_msg = first_msg.strip().replace("\n", " ")
81
+ user_messages = [_format_message(m) for m in s.user_messages if m.strip()]
82
+ if not user_messages:
83
+ user_messages = ["N/A"]
56
84
 
57
85
  msg_count = "N/A" if s.messages_count == -1 else f"{s.messages_count} messages"
58
86
  model = s.model_name or "N/A"
@@ -60,7 +88,7 @@ def build_session_select_options() -> list[SessionSelectOption]:
60
88
  options.append(
61
89
  SessionSelectOption(
62
90
  session_id=str(s.id),
63
- first_user_message=first_msg,
91
+ user_messages=user_messages,
64
92
  messages_count=msg_count,
65
93
  relative_time=_relative_time(s.updated_at),
66
94
  model_name=model,
@@ -197,11 +197,16 @@ class Session(BaseModel):
197
197
  )
198
198
  self._store.append_and_flush(session_id=self.id, items=items, meta=meta)
199
199
 
200
- def fork(self, *, new_id: str | None = None) -> Session:
200
+ def fork(self, *, new_id: str | None = None, until_index: int | None = None) -> Session:
201
201
  """Create a new session as a fork of the current session.
202
202
 
203
203
  The forked session copies metadata and conversation history, but does not
204
204
  modify the current session.
205
+
206
+ Args:
207
+ new_id: Optional ID for the forked session.
208
+ until_index: If provided, only copy conversation history up to (but not including) this index.
209
+ If None, copy all history.
205
210
  """
206
211
 
207
212
  forked = Session.create(id=new_id, work_dir=self.work_dir)
@@ -213,7 +218,10 @@ class Session(BaseModel):
213
218
  forked.file_tracker = {k: v.model_copy(deep=True) for k, v in self.file_tracker.items()}
214
219
  forked.todos = [todo.model_copy(deep=True) for todo in self.todos]
215
220
 
216
- items = [it.model_copy(deep=True) for it in self.conversation_history]
221
+ history_to_copy = (
222
+ self.conversation_history[:until_index] if until_index is not None else self.conversation_history
223
+ )
224
+ items = [it.model_copy(deep=True) for it in history_to_copy]
217
225
  if items:
218
226
  forked.append_history(items)
219
227
 
@@ -338,7 +346,7 @@ class Session(BaseModel):
338
346
  updated_at: float
339
347
  work_dir: str
340
348
  path: str
341
- first_user_message: str | None = None
349
+ user_messages: list[str] = []
342
350
  messages_count: int = -1
343
351
  model_name: str | None = None
344
352
 
@@ -346,10 +354,11 @@ class Session(BaseModel):
346
354
  def list_sessions(cls) -> list[SessionMetaBrief]:
347
355
  store = get_default_store()
348
356
 
349
- def _get_first_user_message(session_id: str) -> str | None:
357
+ def _get_user_messages(session_id: str) -> list[str]:
350
358
  events_path = store.paths.events_file(session_id)
351
359
  if not events_path.exists():
352
- return None
360
+ return []
361
+ messages: list[str] = []
353
362
  try:
354
363
  for line in events_path.read_text(encoding="utf-8").splitlines():
355
364
  obj_raw = json.loads(line)
@@ -360,15 +369,14 @@ class Session(BaseModel):
360
369
  continue
361
370
  data_raw = obj.get("data")
362
371
  if not isinstance(data_raw, dict):
363
- return None
372
+ continue
364
373
  data = cast(dict[str, Any], data_raw)
365
374
  content = data.get("content")
366
375
  if isinstance(content, str):
367
- return content
368
- return None
376
+ messages.append(content)
369
377
  except (OSError, json.JSONDecodeError):
370
- return None
371
- return None
378
+ pass
379
+ return messages
372
380
 
373
381
  items: list[Session.SessionMetaBrief] = []
374
382
  for meta_path in store.iter_meta_files():
@@ -382,7 +390,7 @@ class Session(BaseModel):
382
390
  created = float(data.get("created_at", meta_path.stat().st_mtime))
383
391
  updated = float(data.get("updated_at", meta_path.stat().st_mtime))
384
392
  work_dir = str(data.get("work_dir", ""))
385
- first_user_message = _get_first_user_message(sid)
393
+ user_messages = _get_user_messages(sid)
386
394
  messages_count = int(data.get("messages_count", -1))
387
395
  model_name = data.get("model_name") if isinstance(data.get("model_name"), str) else None
388
396
 
@@ -393,7 +401,7 @@ class Session(BaseModel):
393
401
  updated_at=updated,
394
402
  work_dir=work_dir,
395
403
  path=str(meta_path),
396
- first_user_message=first_user_message,
404
+ user_messages=user_messages,
397
405
  messages_count=messages_count,
398
406
  model_name=model_name,
399
407
  )
@@ -2,7 +2,6 @@ from __future__ import annotations
2
2
 
3
3
  from dataclasses import dataclass
4
4
 
5
- from rich.cells import cell_len
6
5
  from rich.rule import Rule
7
6
  from rich.text import Text
8
7
 
@@ -265,11 +264,27 @@ class SpinnerStatusState:
265
264
 
266
265
  return result
267
266
 
268
- def get_context_text(self) -> Text | None:
269
- """Get context usage text for right-aligned display."""
270
- if self._context_percent is None:
267
+ def get_right_text(self) -> r_status.DynamicText | None:
268
+ """Get right-aligned status text (elapsed time and optional context %)."""
269
+
270
+ elapsed_text = r_status.current_elapsed_text()
271
+ has_context = self._context_percent is not None
272
+
273
+ if elapsed_text is None and not has_context:
271
274
  return None
272
- return Text(f"{self._context_percent:.1f}%", style=ThemeKey.METADATA_DIM)
275
+
276
+ def _render() -> Text:
277
+ parts: list[str] = []
278
+ if self._context_percent is not None:
279
+ parts.append(f"{self._context_percent:.1f}%")
280
+ current_elapsed = r_status.current_elapsed_text()
281
+ if current_elapsed is not None:
282
+ if parts:
283
+ parts.append(" · ")
284
+ parts.append(current_elapsed)
285
+ return Text("".join(parts), style=ThemeKey.METADATA_DIM)
286
+
287
+ return r_status.DynamicText(_render)
273
288
 
274
289
 
275
290
  class DisplayEventHandler:
@@ -550,11 +565,10 @@ class DisplayEventHandler:
550
565
  def _update_spinner(self) -> None:
551
566
  """Update spinner text from current status state."""
552
567
  status_text = self.spinner_status.get_status()
553
- context_text = self.spinner_status.get_context_text()
554
- status_text = self._truncate_spinner_status_text(status_text, right_text=context_text)
568
+ right_text = self.spinner_status.get_right_text()
555
569
  self.renderer.spinner_update(
556
570
  status_text,
557
- context_text,
571
+ right_text,
558
572
  )
559
573
 
560
574
  async def _flush_assistant_buffer(self, state: StreamState) -> None:
@@ -612,27 +626,3 @@ class DisplayEventHandler:
612
626
  if len(todo.content) > 0:
613
627
  status_text = todo.content
614
628
  return status_text.replace("\n", " ").strip()
615
-
616
- def _truncate_spinner_status_text(self, status_text: Text, *, right_text: Text | None) -> Text:
617
- """Truncate spinner status to a single line based on terminal width.
618
-
619
- Rich wraps based on terminal cell width (CJK chars count as 2). Use
620
- cell-aware truncation to prevent the status from wrapping into two lines.
621
- """
622
-
623
- terminal_width = self.renderer.console.size.width
624
-
625
- # BreathingSpinner renders as a 2-column Table.grid(padding=1):
626
- # 1 cell for glyph + 1 cell of padding between columns (collapsed).
627
- spinner_prefix_cells = 2
628
-
629
- hint_cells = cell_len(r_status.current_hint_text())
630
- right_cells = cell_len(right_text.plain) if right_text is not None else 0
631
-
632
- max_main_cells = terminal_width - spinner_prefix_cells - hint_cells - right_cells - 1
633
- # rich.text.Text.truncate behaves unexpectedly for 0; clamp to at least 1.
634
- max_main_cells = max(1, max_main_cells)
635
-
636
- truncated = status_text.copy()
637
- truncated.truncate(max_main_cells, overflow="ellipsis", pad=False)
638
- return truncated
@@ -283,7 +283,7 @@ class REPLRenderer:
283
283
  self._spinner_visible = False
284
284
  self._refresh_bottom_live()
285
285
 
286
- def spinner_update(self, status_text: str | Text, right_text: Text | None = None) -> None:
286
+ def spinner_update(self, status_text: str | Text, right_text: RenderableType | None = None) -> None:
287
287
  """Update the spinner status text with optional right-aligned text."""
288
288
  self._status_text = ShimmerStatusText(status_text, right_text)
289
289
  self._status_spinner.update(text=SingleLine(self._status_text), style=ThemeKey.STATUS_SPINNER)
@@ -161,10 +161,10 @@ def _format_cost(cost: float | None, currency: str = "USD") -> str:
161
161
  def _render_fork_session_output(command_output: model.CommandOutput) -> RenderableType:
162
162
  """Render fork session output with usage instructions."""
163
163
  if not isinstance(command_output.ui_extra, model.SessionIdUIExtra):
164
- return Text("(no session id)", style=ThemeKey.METADATA)
164
+ return Padding.indent(Text("(no session id)", style=ThemeKey.METADATA), level=2)
165
165
 
166
- session_id = command_output.ui_extra.session_id
167
166
  grid = Table.grid(padding=(0, 1))
167
+ session_id = command_output.ui_extra.session_id
168
168
  grid.add_column(style=ThemeKey.METADATA, overflow="fold")
169
169
 
170
170
  grid.add_row(Text("Session forked. To continue in a new conversation:", style=ThemeKey.METADATA))
@@ -6,6 +6,7 @@ from rich.padding import Padding
6
6
  from rich.panel import Panel
7
7
  from rich.text import Text
8
8
 
9
+ from klaude_code import const
9
10
  from klaude_code.protocol import events, model
10
11
  from klaude_code.trace import is_debug_enabled
11
12
  from klaude_code.ui.renderers.common import create_grid
@@ -95,10 +96,17 @@ def _render_task_metadata_block(
95
96
  # Context (only for main agent)
96
97
  if show_context_and_time and metadata.usage.context_usage_percent is not None:
97
98
  context_size = format_number(metadata.usage.context_size or 0)
99
+ # Calculate effective limit (same as Usage.context_usage_percent)
100
+ effective_limit = (metadata.usage.context_limit or 0) - (
101
+ metadata.usage.max_tokens or const.DEFAULT_MAX_TOKENS
102
+ )
103
+ effective_limit_str = format_number(effective_limit) if effective_limit > 0 else "?"
98
104
  parts.append(
99
105
  Text.assemble(
100
106
  ("context ", ThemeKey.METADATA_DIM),
101
107
  (context_size, ThemeKey.METADATA),
108
+ ("/", ThemeKey.METADATA_DIM),
109
+ (effective_limit_str, ThemeKey.METADATA),
102
110
  (f" ({metadata.usage.context_usage_percent:.1f}%)", ThemeKey.METADATA_DIM),
103
111
  )
104
112
  )
@@ -254,18 +254,36 @@ class MarkdownStream:
254
254
  live suffix separately may introduce an extra blank line that wouldn't
255
255
  appear when rendering the full document.
256
256
 
257
- This function removes leading blank lines from the live ANSI when the
258
- stable ANSI already ends with a blank line.
257
+ This function removes *overlapping* blank lines from the live ANSI when
258
+ the stable ANSI already ends with one or more blank lines.
259
+
260
+ Important: don't remove *all* leading blank lines from the live suffix.
261
+ In some incomplete-block cases, the live render may begin with multiple
262
+ blank lines while the full-document render would keep one of them.
259
263
  """
260
264
 
261
265
  stable_lines = stable_ansi.splitlines(keepends=True)
262
- stable_ends_blank = bool(stable_lines) and not stable_lines[-1].strip()
263
- if not stable_ends_blank:
266
+ if not stable_lines:
267
+ return live_ansi
268
+
269
+ stable_trailing_blank = 0
270
+ for line in reversed(stable_lines):
271
+ if line.strip():
272
+ break
273
+ stable_trailing_blank += 1
274
+ if stable_trailing_blank <= 0:
264
275
  return live_ansi
265
276
 
266
277
  live_lines = live_ansi.splitlines(keepends=True)
267
- while live_lines and not live_lines[0].strip():
268
- live_lines.pop(0)
278
+ live_leading_blank = 0
279
+ for line in live_lines:
280
+ if line.strip():
281
+ break
282
+ live_leading_blank += 1
283
+
284
+ drop = min(stable_trailing_blank, live_leading_blank)
285
+ if drop > 0:
286
+ live_lines = live_lines[drop:]
269
287
  return "".join(live_lines)
270
288
 
271
289
  def _append_nonfinal_sentinel(self, stable_source: str) -> str:
@@ -400,9 +418,23 @@ class MarkdownStream:
400
418
  apply_mark_live = self._stable_source_line_count == 0
401
419
  live_lines = self._render_markdown_to_lines(live_source, apply_mark=apply_mark_live)
402
420
 
403
- if self._stable_rendered_lines and not self._stable_rendered_lines[-1].strip():
404
- while live_lines and not live_lines[0].strip():
405
- live_lines.pop(0)
421
+ if self._stable_rendered_lines:
422
+ stable_trailing_blank = 0
423
+ for line in reversed(self._stable_rendered_lines):
424
+ if line.strip():
425
+ break
426
+ stable_trailing_blank += 1
427
+
428
+ if stable_trailing_blank > 0:
429
+ live_leading_blank = 0
430
+ for line in live_lines:
431
+ if line.strip():
432
+ break
433
+ live_leading_blank += 1
434
+
435
+ drop = min(stable_trailing_blank, live_leading_blank)
436
+ if drop > 0:
437
+ live_lines = live_lines[drop:]
406
438
 
407
439
  live_text = Text.from_ansi("".join(live_lines))
408
440
  self._live_sink(live_text)