klaude-code 1.2.26__py3-none-any.whl → 1.2.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. klaude_code/cli/config_cmd.py +1 -5
  2. klaude_code/cli/debug.py +9 -1
  3. klaude_code/cli/list_model.py +170 -129
  4. klaude_code/cli/main.py +76 -19
  5. klaude_code/cli/runtime.py +15 -11
  6. klaude_code/cli/self_update.py +2 -1
  7. klaude_code/cli/session_cmd.py +1 -1
  8. klaude_code/command/__init__.py +3 -0
  9. klaude_code/command/export_online_cmd.py +15 -12
  10. klaude_code/command/fork_session_cmd.py +42 -0
  11. klaude_code/config/__init__.py +3 -1
  12. klaude_code/config/assets/__init__.py +1 -0
  13. klaude_code/config/assets/builtin_config.yaml +233 -0
  14. klaude_code/config/builtin_config.py +37 -0
  15. klaude_code/config/config.py +332 -112
  16. klaude_code/config/select_model.py +46 -8
  17. klaude_code/core/executor.py +6 -3
  18. klaude_code/core/manager/llm_clients_builder.py +4 -1
  19. klaude_code/core/reminders.py +52 -16
  20. klaude_code/core/tool/file/edit_tool.py +4 -4
  21. klaude_code/core/tool/file/write_tool.py +4 -4
  22. klaude_code/core/tool/shell/bash_tool.py +2 -2
  23. klaude_code/core/tool/web/mermaid_tool.md +17 -0
  24. klaude_code/core/tool/web/mermaid_tool.py +2 -2
  25. klaude_code/llm/openai_compatible/stream.py +2 -1
  26. klaude_code/llm/openai_compatible/tool_call_accumulator.py +17 -1
  27. klaude_code/protocol/commands.py +1 -0
  28. klaude_code/protocol/model.py +1 -0
  29. klaude_code/session/export.py +52 -7
  30. klaude_code/session/selector.py +2 -2
  31. klaude_code/session/session.py +26 -4
  32. klaude_code/trace/log.py +7 -1
  33. klaude_code/ui/modes/repl/__init__.py +3 -44
  34. klaude_code/ui/modes/repl/completers.py +39 -7
  35. klaude_code/ui/modes/repl/event_handler.py +8 -6
  36. klaude_code/ui/modes/repl/input_prompt_toolkit.py +33 -66
  37. klaude_code/ui/modes/repl/key_bindings.py +4 -4
  38. klaude_code/ui/modes/repl/renderer.py +1 -6
  39. klaude_code/ui/renderers/common.py +11 -4
  40. klaude_code/ui/renderers/developer.py +17 -0
  41. klaude_code/ui/renderers/diffs.py +1 -1
  42. klaude_code/ui/renderers/errors.py +10 -5
  43. klaude_code/ui/renderers/metadata.py +2 -2
  44. klaude_code/ui/renderers/tools.py +8 -4
  45. klaude_code/ui/rich/markdown.py +5 -5
  46. klaude_code/ui/rich/theme.py +7 -3
  47. klaude_code/ui/terminal/color.py +1 -1
  48. klaude_code/ui/terminal/control.py +4 -4
  49. {klaude_code-1.2.26.dist-info → klaude_code-1.2.28.dist-info}/METADATA +121 -127
  50. {klaude_code-1.2.26.dist-info → klaude_code-1.2.28.dist-info}/RECORD +52 -48
  51. {klaude_code-1.2.26.dist-info → klaude_code-1.2.28.dist-info}/entry_points.txt +1 -0
  52. {klaude_code-1.2.26.dist-info → klaude_code-1.2.28.dist-info}/WHEEL +0 -0
@@ -46,6 +46,17 @@ class AtPatternSource:
46
46
  mentioned_in: str | None = None
47
47
 
48
48
 
49
+ def _extract_at_patterns(content: str) -> list[str]:
50
+ """Extract @ patterns from content."""
51
+ patterns: list[str] = []
52
+ if "@" in content:
53
+ for match in AT_FILE_PATTERN.finditer(content):
54
+ path_str = match.group("quoted") or match.group("plain")
55
+ if path_str:
56
+ patterns.append(path_str)
57
+ return patterns
58
+
59
+
49
60
  def get_at_patterns_with_source(session: Session) -> list[AtPatternSource]:
50
61
  """Get @ patterns from last user input and developer messages, preserving source info."""
51
62
  patterns: list[AtPatternSource] = []
@@ -56,24 +67,14 @@ def get_at_patterns_with_source(session: Session) -> list[AtPatternSource]:
56
67
 
57
68
  if isinstance(item, model.UserMessageItem):
58
69
  content = item.content or ""
59
- if "@" in content:
60
- for match in AT_FILE_PATTERN.finditer(content):
61
- path_str = match.group("quoted") or match.group("plain")
62
- if path_str:
63
- patterns.append(AtPatternSource(pattern=path_str, mentioned_in=None))
70
+ for path_str in _extract_at_patterns(content):
71
+ patterns.append(AtPatternSource(pattern=path_str, mentioned_in=None))
64
72
  break
65
73
 
66
- if isinstance(item, model.DeveloperMessageItem):
67
- content = item.content or ""
68
- if "@" not in content:
69
- continue
70
- # Use first memory_path as the source if available
71
- source = item.memory_paths[0] if item.memory_paths else None
72
- for match in AT_FILE_PATTERN.finditer(content):
73
- path_str = match.group("quoted") or match.group("plain")
74
- if path_str:
75
- patterns.append(AtPatternSource(pattern=path_str, mentioned_in=source))
76
-
74
+ if isinstance(item, model.DeveloperMessageItem) and item.memory_mentioned:
75
+ for memory_path, mentioned_patterns in item.memory_mentioned.items():
76
+ for pattern in mentioned_patterns:
77
+ patterns.append(AtPatternSource(pattern=pattern, mentioned_in=memory_path))
77
78
  return patterns
78
79
 
79
80
 
@@ -92,6 +93,23 @@ def get_skill_from_user_input(session: Session) -> str | None:
92
93
  return None
93
94
 
94
95
 
96
+ def _is_tracked_file_unchanged(session: Session, path: str) -> bool:
97
+ status = session.file_tracker.get(path)
98
+ if status is None or status.content_sha256 is None:
99
+ return False
100
+
101
+ try:
102
+ current_mtime = Path(path).stat().st_mtime
103
+ except (OSError, FileNotFoundError):
104
+ return False
105
+
106
+ if current_mtime == status.mtime:
107
+ return True
108
+
109
+ current_sha256 = _compute_file_content_sha256(path)
110
+ return current_sha256 is not None and current_sha256 == status.content_sha256
111
+
112
+
95
113
  async def _load_at_file_recursive(
96
114
  session: Session,
97
115
  pattern: str,
@@ -112,6 +130,8 @@ async def _load_at_file_recursive(
112
130
  context_token = set_tool_context_from_session(session)
113
131
  try:
114
132
  if path.exists() and path.is_file():
133
+ if _is_tracked_file_unchanged(session, path_str):
134
+ return
115
135
  args = ReadTool.ReadArguments(file_path=path_str)
116
136
  tool_result = await ReadTool.call_with_args(args)
117
137
  at_files[path_str] = model.AtPatternParseResult(
@@ -458,6 +478,13 @@ async def memory_reminder(session: Session) -> model.DeveloperMessageItem | None
458
478
  memories_str = "\n\n".join(
459
479
  [f"Contents of {memory.path} ({memory.instruction}):\n\n{memory.content}" for memory in memories]
460
480
  )
481
+ # Build memory_mentioned: extract @ patterns from each memory's content
482
+ memory_mentioned: dict[str, list[str]] = {}
483
+ for memory in memories:
484
+ patterns = _extract_at_patterns(memory.content)
485
+ if patterns:
486
+ memory_mentioned[memory.path] = patterns
487
+
461
488
  return model.DeveloperMessageItem(
462
489
  content=f"""<system-reminder>As you answer the user's questions, you can use the following context:
463
490
 
@@ -474,6 +501,7 @@ NEVER proactively create documentation files (*.md) or README files. Only create
474
501
  IMPORTANT: this context may or may not be relevant to your tasks. You should not respond to this context unless it is highly relevant to your task.
475
502
  </system-reminder>""",
476
503
  memory_paths=[memory.path for memory in memories],
504
+ memory_mentioned=memory_mentioned or None,
477
505
  )
478
506
  return None
479
507
 
@@ -544,10 +572,18 @@ async def last_path_memory_reminder(
544
572
  memories_str = "\n\n".join(
545
573
  [f"Contents of {memory.path} ({memory.instruction}):\n\n{memory.content}" for memory in memories]
546
574
  )
575
+ # Build memory_mentioned: extract @ patterns from each memory's content
576
+ memory_mentioned: dict[str, list[str]] = {}
577
+ for memory in memories:
578
+ patterns = _extract_at_patterns(memory.content)
579
+ if patterns:
580
+ memory_mentioned[memory.path] = patterns
581
+
547
582
  return model.DeveloperMessageItem(
548
583
  content=f"""<system-reminder>{memories_str}
549
584
  </system-reminder>""",
550
585
  memory_paths=[memory.path for memory in memories],
586
+ memory_mentioned=memory_mentioned or None,
551
587
  )
552
588
 
553
589
 
@@ -88,7 +88,7 @@ class EditTool(ToolABC):
88
88
  async def call(cls, arguments: str) -> model.ToolResultItem:
89
89
  try:
90
90
  args = EditTool.EditArguments.model_validate_json(arguments)
91
- except Exception as e: # pragma: no cover - defensive
91
+ except ValueError as e: # pragma: no cover - defensive
92
92
  return model.ToolResultItem(status="error", output=f"Invalid arguments: {e}")
93
93
 
94
94
  file_path = os.path.abspath(args.file_path)
@@ -150,7 +150,7 @@ class EditTool(ToolABC):
150
150
  # Backward-compat: old sessions only stored mtime.
151
151
  try:
152
152
  current_mtime = Path(file_path).stat().st_mtime
153
- except Exception:
153
+ except OSError:
154
154
  current_mtime = tracked_status.mtime
155
155
  if current_mtime != tracked_status.mtime:
156
156
  return model.ToolResultItem(
@@ -188,7 +188,7 @@ class EditTool(ToolABC):
188
188
  # Write back
189
189
  try:
190
190
  await asyncio.to_thread(write_text, file_path, after)
191
- except Exception as e: # pragma: no cover
191
+ except (OSError, UnicodeError) as e: # pragma: no cover
192
192
  return model.ToolResultItem(status="error", output=f"<tool_use_error>{e}</tool_use_error>")
193
193
 
194
194
  # Prepare UI extra: unified diff with 3 context lines
@@ -233,7 +233,7 @@ class EditTool(ToolABC):
233
233
  plus_range = plus.split(" ")[0]
234
234
  start = int(plus_range.split(",")[0]) if "," in plus_range else int(plus_range)
235
235
  after_line_no = start - 1
236
- except Exception:
236
+ except (ValueError, IndexError):
237
237
  after_line_no = 0
238
238
  continue
239
239
  if line.startswith(" ") or (line.startswith("+") and not line.startswith("+++ ")):
@@ -49,7 +49,7 @@ class WriteTool(ToolABC):
49
49
  async def call(cls, arguments: str) -> model.ToolResultItem:
50
50
  try:
51
51
  args = WriteArguments.model_validate_json(arguments)
52
- except Exception as e: # pragma: no cover - defensive
52
+ except ValueError as e: # pragma: no cover - defensive
53
53
  return model.ToolResultItem(status="error", output=f"Invalid arguments: {e}")
54
54
 
55
55
  file_path = os.path.abspath(args.file_path)
@@ -79,7 +79,7 @@ class WriteTool(ToolABC):
79
79
  try:
80
80
  before = await asyncio.to_thread(read_text, file_path)
81
81
  before_read_ok = True
82
- except Exception:
82
+ except OSError:
83
83
  before = ""
84
84
  before_read_ok = False
85
85
 
@@ -98,7 +98,7 @@ class WriteTool(ToolABC):
98
98
  # Backward-compat: old sessions only stored mtime, or we couldn't hash.
99
99
  try:
100
100
  current_mtime = Path(file_path).stat().st_mtime
101
- except Exception:
101
+ except OSError:
102
102
  current_mtime = tracked_status.mtime
103
103
  if current_mtime != tracked_status.mtime:
104
104
  return model.ToolResultItem(
@@ -111,7 +111,7 @@ class WriteTool(ToolABC):
111
111
 
112
112
  try:
113
113
  await asyncio.to_thread(write_text, file_path, args.content)
114
- except Exception as e: # pragma: no cover
114
+ except (OSError, UnicodeError) as e: # pragma: no cover
115
115
  return model.ToolResultItem(status="error", output=f"<tool_use_error>{e}</tool_use_error>")
116
116
 
117
117
  if file_tracker is not None:
@@ -274,7 +274,7 @@ class BashTool(ToolABC):
274
274
  proc.terminate()
275
275
  except ProcessLookupError:
276
276
  return
277
- except Exception:
277
+ except OSError:
278
278
  # Fall back to kill below.
279
279
  pass
280
280
 
@@ -356,7 +356,7 @@ class BashTool(ToolABC):
356
356
  except asyncio.CancelledError:
357
357
  # Propagate cooperative cancellation so outer layers can handle interrupts correctly.
358
358
  raise
359
- except Exception as e: # safeguard against unexpected failures
359
+ except OSError as e: # safeguard: catch remaining OS-level errors (permissions, resources, etc.)
360
360
  return model.ToolResultItem(
361
361
  status="error",
362
362
  output=f"Execution error: {e}",
@@ -45,3 +45,20 @@ sequenceDiagram
45
45
 
46
46
  # Styling
47
47
  - When defining custom classDefs, always define fill color, stroke color, and text color ("fill", "stroke", "color") explicitly
48
+ - Use colors to distinguish node types and improve readability
49
+
50
+ ## Color Palette
51
+ - Cyan #e0f0f0 - information, data flow
52
+ - Green #e0f0e0 - success, completion
53
+ - Blue #e0e8f5 - primary actions, main flow
54
+ - Purple #ede0f5 - highlights, special nodes
55
+ - Orange #f5ebe0 - warnings, pending
56
+ - Red #f5e0e0 - errors, critical
57
+ - Grey #e8e8e8 - neutral elements
58
+ - Yellow #f5f5e0 - attention, notes
59
+
60
+ Example:
61
+ ```mermaid
62
+ classDef primary fill:#e0e8f5,stroke:#3078C5,color:#1a1a1a
63
+ classDef success fill:#e0f0e0,stroke:#00875f,color:#1a1a1a
64
+ ```
@@ -11,7 +11,7 @@ from klaude_code.core.tool.tool_abc import ToolABC, load_desc
11
11
  from klaude_code.core.tool.tool_registry import register
12
12
  from klaude_code.protocol import llm_param, model, tools
13
13
 
14
- _MERMAID_LIVE_PREFIX = "https://mermaid.live/view#pako:"
14
+ _MERMAID_LIVE_PREFIX = "https://mermaid.live/edit#pako:"
15
15
 
16
16
 
17
17
  @register(tools.MERMAID)
@@ -31,7 +31,7 @@ class MermaidTool(ToolABC):
31
31
  "type": "object",
32
32
  "properties": {
33
33
  "code": {
34
- "description": "The Mermaid diagram code to render (DO NOT override with custom colors or other styles, DO NOT use HTML tags in node labels)",
34
+ "description": "The Mermaid diagram code to render (DO NOT use HTML tags in node labels)",
35
35
  "type": "string",
36
36
  },
37
37
  },
@@ -20,6 +20,7 @@ from typing import Any, Literal, cast
20
20
  import httpx
21
21
  import openai
22
22
  import openai.types
23
+ import pydantic
23
24
  from openai import AsyncStream
24
25
  from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
25
26
 
@@ -204,7 +205,7 @@ async def parse_chat_completions_stream(
204
205
  try:
205
206
  usage = openai.types.CompletionUsage.model_validate(choice_usage)
206
207
  metadata_tracker.set_usage(convert_usage(usage, param.context_limit, param.max_tokens))
207
- except Exception:
208
+ except pydantic.ValidationError:
208
209
  pass
209
210
 
210
211
  delta = cast(Any, getattr(choice0, "delta", None))
@@ -1,9 +1,25 @@
1
+ import re
1
2
  from abc import ABC, abstractmethod
2
3
 
3
4
  from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
4
5
  from pydantic import BaseModel, Field
5
6
 
6
7
  from klaude_code.protocol import model
8
+ from klaude_code.trace.log import log_debug
9
+
10
+
11
+ def normalize_tool_name(name: str) -> str:
12
+ """Normalize tool name from Gemini-3 format.
13
+
14
+ Gemini-3 sometimes returns tool names in format like 'tool_Edit_mUoY2p3W3r3z8uO5P2nZ'.
15
+ This function extracts the actual tool name (e.g., 'Edit').
16
+ """
17
+ match = re.match(r"^tool_([A-Za-z]+)_[A-Za-z0-9]+$", name)
18
+ if match:
19
+ normalized = match.group(1)
20
+ log_debug(f"Gemini-3 tool name normalized: {name} -> {normalized}", style="yellow")
21
+ return normalized
22
+ return name
7
23
 
8
24
 
9
25
  class ToolCallAccumulatorABC(ABC):
@@ -74,7 +90,7 @@ class BasicToolCallAccumulator(ToolCallAccumulatorABC, BaseModel):
74
90
  if first_chunk.function is None:
75
91
  continue
76
92
  if first_chunk.function.name:
77
- result[-1].name = first_chunk.function.name
93
+ result[-1].name = normalize_tool_name(first_chunk.function.name)
78
94
  if first_chunk.function.arguments:
79
95
  result[-1].arguments += first_chunk.function.arguments
80
96
  return result
@@ -15,6 +15,7 @@ class CommandName(str, Enum):
15
15
  STATUS = "status"
16
16
  RELEASE_NOTES = "release-notes"
17
17
  THINKING = "thinking"
18
+ FORK_SESSION = "fork-session"
18
19
  # PLAN and DOC are dynamically registered now, but kept here if needed for reference
19
20
  # or we can remove them if no code explicitly imports them.
20
21
  # PLAN = "plan"
@@ -260,6 +260,7 @@ class DeveloperMessageItem(BaseModel):
260
260
 
261
261
  # Special fields for reminders UI
262
262
  memory_paths: list[str] | None = None
263
+ memory_mentioned: dict[str, list[str]] | None = None # memory_path -> list of @ patterns mentioned in it
263
264
  external_file_changes: list[str] | None = None
264
265
  todo_use: bool | None = None
265
266
  at_files: list[AtPatternParseResult] | None = None
@@ -427,6 +427,41 @@ def _get_diff_ui_extra(ui_extra: model.ToolResultUIExtra | None) -> model.DiffUI
427
427
  return None
428
428
 
429
429
 
430
+ def _render_markdown_doc(doc: model.MarkdownDocUIExtra) -> str:
431
+ encoded = _escape_html(doc.content)
432
+ file_path = _escape_html(doc.file_path)
433
+ header = f'<div class="diff-file">{file_path} <span style="font-weight: normal; color: var(--text-dim); font-size: 12px; margin-left: 8px;">(markdown content)</span></div>'
434
+
435
+ # Using a container that mimics diff-view but for markdown
436
+ content = (
437
+ f'<div class="markdown-content markdown-body" data-raw="{encoded}" '
438
+ f'style="padding: 12px; border: 1px solid var(--border); border-radius: var(--radius-md); background: var(--bg-body); margin-top: 4px;">'
439
+ f'<noscript><pre style="white-space: pre-wrap;">{encoded}</pre></noscript>'
440
+ f"</div>"
441
+ )
442
+
443
+ line_count = doc.content.count("\n") + 1
444
+ open_attr = " open"
445
+
446
+ return (
447
+ f'<details class="diff-collapsible"{open_attr}>'
448
+ f"<summary>File Content ({line_count} lines)</summary>"
449
+ f'<div style="margin-top: 8px;">'
450
+ f"{header}"
451
+ f"{content}"
452
+ f"</div>"
453
+ f"</details>"
454
+ )
455
+
456
+
457
+ def _collect_ui_extras(ui_extra: model.ToolResultUIExtra | None) -> list[model.ToolResultUIExtra]:
458
+ if ui_extra is None:
459
+ return []
460
+ if isinstance(ui_extra, model.MultiUIExtra):
461
+ return list(ui_extra.items)
462
+ return [ui_extra]
463
+
464
+
430
465
  def _build_add_only_diff(text: str, file_path: str) -> model.DiffUIExtra:
431
466
  lines: list[model.DiffLine] = []
432
467
  new_line_no = 1
@@ -567,19 +602,26 @@ def _format_tool_call(tool_call: model.ToolCallItem, result: model.ToolResultIte
567
602
  ]
568
603
 
569
604
  if result:
570
- diff_ui = _get_diff_ui_extra(result.ui_extra)
571
- mermaid_html = _get_mermaid_link_html(result.ui_extra, tool_call)
605
+ extras = _collect_ui_extras(result.ui_extra)
606
+
607
+ mermaid_extra = next((x for x in extras if isinstance(x, model.MermaidLinkUIExtra)), None)
608
+ mermaid_source = mermaid_extra if mermaid_extra else result.ui_extra
609
+ mermaid_html = _get_mermaid_link_html(mermaid_source, tool_call)
572
610
 
573
611
  should_hide_text = tool_call.name in ("TodoWrite", "update_plan") and result.status != "error"
574
612
 
575
- if tool_call.name == "Edit" and not diff_ui and result.status != "error":
613
+ if (
614
+ tool_call.name == "Edit"
615
+ and not any(isinstance(x, model.DiffUIExtra) for x in extras)
616
+ and result.status != "error"
617
+ ):
576
618
  try:
577
619
  args_data = json.loads(tool_call.arguments)
578
620
  file_path = args_data.get("file_path", "Unknown file")
579
621
  old_string = args_data.get("old_string", "")
580
622
  new_string = args_data.get("new_string", "")
581
623
  if old_string == "" and new_string:
582
- diff_ui = _build_add_only_diff(new_string, file_path)
624
+ extras.append(_build_add_only_diff(new_string, file_path))
583
625
  except (json.JSONDecodeError, TypeError):
584
626
  pass
585
627
 
@@ -591,8 +633,11 @@ def _format_tool_call(tool_call: model.ToolCallItem, result: model.ToolResultIte
591
633
  else:
592
634
  items_to_render.append(_render_text_block(result.output))
593
635
 
594
- if diff_ui:
595
- items_to_render.append(_render_diff_block(diff_ui))
636
+ for extra in extras:
637
+ if isinstance(extra, model.DiffUIExtra):
638
+ items_to_render.append(_render_diff_block(extra))
639
+ elif isinstance(extra, model.MarkdownDocUIExtra):
640
+ items_to_render.append(_render_markdown_doc(extra))
596
641
 
597
642
  if mermaid_html:
598
643
  items_to_render.append(mermaid_html)
@@ -702,7 +747,7 @@ def _render_sub_agent_session(
702
747
 
703
748
  try:
704
749
  sub_session = Session.load(session_id)
705
- except Exception:
750
+ except (OSError, json.JSONDecodeError, ValueError):
706
751
  return None
707
752
 
708
753
  sub_history = sub_session.conversation_history
@@ -23,7 +23,7 @@ def resume_select_session() -> str | None:
23
23
  def _fmt(ts: float) -> str:
24
24
  try:
25
25
  return time.strftime("%m-%d %H:%M:%S", time.localtime(ts))
26
- except Exception:
26
+ except (ValueError, OSError):
27
27
  return str(ts)
28
28
 
29
29
  try:
@@ -76,6 +76,6 @@ def resume_select_session() -> str | None:
76
76
  idx = int(raw)
77
77
  if 1 <= idx <= len(sessions):
78
78
  return str(sessions[idx - 1].id)
79
- except Exception:
79
+ except (ValueError, EOFError):
80
80
  return None
81
81
  return None
@@ -7,7 +7,7 @@ from collections.abc import Iterable, Sequence
7
7
  from pathlib import Path
8
8
  from typing import Any, cast
9
9
 
10
- from pydantic import BaseModel, Field, PrivateAttr
10
+ from pydantic import BaseModel, Field, PrivateAttr, ValidationError
11
11
 
12
12
  from klaude_code.protocol import events, llm_param, model, tools
13
13
  from klaude_code.session.store import JsonlSessionStore, ProjectPaths, build_meta_snapshot
@@ -124,7 +124,7 @@ class Session(BaseModel):
124
124
  if isinstance(k, str) and isinstance(v, dict):
125
125
  try:
126
126
  file_tracker[k] = model.FileStatus.model_validate(v)
127
- except Exception:
127
+ except ValidationError:
128
128
  continue
129
129
 
130
130
  todos_raw = raw.get("todos")
@@ -135,7 +135,7 @@ class Session(BaseModel):
135
135
  continue
136
136
  try:
137
137
  todos.append(model.TodoItem.model_validate(todo_raw))
138
- except Exception:
138
+ except ValidationError:
139
139
  continue
140
140
 
141
141
  created_at = float(raw.get("created_at", time.time()))
@@ -197,6 +197,28 @@ class Session(BaseModel):
197
197
  )
198
198
  self._store.append_and_flush(session_id=self.id, items=items, meta=meta)
199
199
 
200
+ def fork(self, *, new_id: str | None = None) -> Session:
201
+ """Create a new session as a fork of the current session.
202
+
203
+ The forked session copies metadata and conversation history, but does not
204
+ modify the current session.
205
+ """
206
+
207
+ forked = Session.create(id=new_id, work_dir=self.work_dir)
208
+
209
+ forked.sub_agent_state = None
210
+ forked.model_name = self.model_name
211
+ forked.model_config_name = self.model_config_name
212
+ forked.model_thinking = self.model_thinking.model_copy(deep=True) if self.model_thinking is not None else None
213
+ forked.file_tracker = {k: v.model_copy(deep=True) for k, v in self.file_tracker.items()}
214
+ forked.todos = [todo.model_copy(deep=True) for todo in self.todos]
215
+
216
+ items = [cast(model.ConversationItem, it.model_copy(deep=True)) for it in self.conversation_history]
217
+ if items:
218
+ forked.append_history(items)
219
+
220
+ return forked
221
+
200
222
  async def wait_for_flush(self) -> None:
201
223
  await self._store.wait_for_flush(self.id)
202
224
 
@@ -306,7 +328,7 @@ class Session(BaseModel):
306
328
  seen_sub_agent_sessions.add(session_id)
307
329
  try:
308
330
  sub_session = Session.load(session_id)
309
- except Exception:
331
+ except (OSError, json.JSONDecodeError, ValueError):
310
332
  return
311
333
  yield from sub_session.get_history_item()
312
334
 
klaude_code/trace/log.py CHANGED
@@ -302,6 +302,12 @@ def _trash_path(path: Path) -> None:
302
302
  """Send a path to trash, falling back to unlink if trash is unavailable."""
303
303
 
304
304
  try:
305
- subprocess.run(["trash", str(path)], check=False, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
305
+ subprocess.run(
306
+ ["trash", str(path)],
307
+ stdin=subprocess.DEVNULL,
308
+ stdout=subprocess.DEVNULL,
309
+ stderr=subprocess.DEVNULL,
310
+ check=False,
311
+ )
306
312
  except FileNotFoundError:
307
313
  path.unlink(missing_ok=True)
@@ -1,47 +1,6 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING
4
-
5
- from klaude_code.protocol import model
6
1
  from klaude_code.ui.modes.repl.input_prompt_toolkit import REPLStatusSnapshot
7
2
 
8
- if TYPE_CHECKING:
9
- from klaude_code.core.agent import Agent
10
-
11
-
12
- def build_repl_status_snapshot(agent: Agent | None, update_message: str | None) -> REPLStatusSnapshot:
13
- """Build a status snapshot for the REPL bottom toolbar.
14
-
15
- Aggregates model name, context usage, and basic call counts from the
16
- provided agent's session history.
17
- """
18
-
19
- model_name = ""
20
- context_usage_percent: float | None = None
21
- llm_calls = 0
22
- tool_calls = 0
23
-
24
- if agent is not None:
25
- model_name = agent.profile.llm_client.model_name or ""
26
-
27
- history = agent.session.conversation_history
28
- for item in history:
29
- if isinstance(item, model.AssistantMessageItem):
30
- llm_calls += 1
31
- elif isinstance(item, model.ToolCallItem):
32
- tool_calls += 1
33
-
34
- for item in reversed(history):
35
- if isinstance(item, model.ResponseMetadataItem):
36
- usage = item.usage
37
- if usage is not None and hasattr(usage, "context_usage_percent"):
38
- context_usage_percent = usage.context_usage_percent
39
- break
40
3
 
41
- return REPLStatusSnapshot(
42
- model_name=model_name,
43
- context_usage_percent=context_usage_percent,
44
- llm_calls=llm_calls,
45
- tool_calls=tool_calls,
46
- update_message=update_message,
47
- )
4
+ def build_repl_status_snapshot(update_message: str | None) -> REPLStatusSnapshot:
5
+ """Build a status snapshot for the REPL bottom toolbar."""
6
+ return REPLStatusSnapshot(update_message=update_message)