klaude-code 1.2.11__py3-none-any.whl → 1.2.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. klaude_code/auth/codex/oauth.py +3 -3
  2. klaude_code/cli/main.py +5 -5
  3. klaude_code/cli/runtime.py +19 -27
  4. klaude_code/cli/session_cmd.py +6 -8
  5. klaude_code/command/__init__.py +31 -28
  6. klaude_code/command/clear_cmd.py +0 -2
  7. klaude_code/command/diff_cmd.py +0 -2
  8. klaude_code/command/export_cmd.py +3 -5
  9. klaude_code/command/help_cmd.py +0 -2
  10. klaude_code/command/model_cmd.py +0 -2
  11. klaude_code/command/refresh_cmd.py +0 -2
  12. klaude_code/command/registry.py +5 -9
  13. klaude_code/command/release_notes_cmd.py +0 -2
  14. klaude_code/command/status_cmd.py +2 -4
  15. klaude_code/command/terminal_setup_cmd.py +2 -4
  16. klaude_code/command/thinking_cmd.py +229 -0
  17. klaude_code/config/__init__.py +1 -1
  18. klaude_code/config/list_model.py +1 -1
  19. klaude_code/config/select_model.py +5 -15
  20. klaude_code/const/__init__.py +1 -1
  21. klaude_code/core/agent.py +14 -69
  22. klaude_code/core/executor.py +11 -10
  23. klaude_code/core/manager/agent_manager.py +4 -4
  24. klaude_code/core/manager/llm_clients.py +10 -49
  25. klaude_code/core/manager/llm_clients_builder.py +8 -21
  26. klaude_code/core/manager/sub_agent_manager.py +3 -3
  27. klaude_code/core/prompt.py +3 -3
  28. klaude_code/core/reminders.py +1 -1
  29. klaude_code/core/task.py +4 -5
  30. klaude_code/core/tool/__init__.py +16 -25
  31. klaude_code/core/tool/file/_utils.py +1 -1
  32. klaude_code/core/tool/file/apply_patch.py +17 -25
  33. klaude_code/core/tool/file/apply_patch_tool.py +4 -7
  34. klaude_code/core/tool/file/edit_tool.py +4 -11
  35. klaude_code/core/tool/file/multi_edit_tool.py +2 -3
  36. klaude_code/core/tool/file/read_tool.py +3 -4
  37. klaude_code/core/tool/file/write_tool.py +2 -3
  38. klaude_code/core/tool/memory/memory_tool.py +2 -8
  39. klaude_code/core/tool/memory/skill_loader.py +3 -2
  40. klaude_code/core/tool/shell/command_safety.py +0 -1
  41. klaude_code/core/tool/tool_context.py +1 -3
  42. klaude_code/core/tool/tool_registry.py +2 -1
  43. klaude_code/core/tool/tool_runner.py +1 -1
  44. klaude_code/core/tool/truncation.py +2 -5
  45. klaude_code/core/turn.py +9 -4
  46. klaude_code/llm/anthropic/client.py +62 -49
  47. klaude_code/llm/client.py +2 -20
  48. klaude_code/llm/codex/client.py +51 -32
  49. klaude_code/llm/input_common.py +2 -2
  50. klaude_code/llm/openai_compatible/client.py +60 -39
  51. klaude_code/llm/openai_compatible/stream_processor.py +2 -1
  52. klaude_code/llm/openrouter/client.py +79 -45
  53. klaude_code/llm/openrouter/reasoning_handler.py +19 -132
  54. klaude_code/llm/registry.py +6 -5
  55. klaude_code/llm/responses/client.py +65 -43
  56. klaude_code/llm/usage.py +1 -49
  57. klaude_code/protocol/commands.py +1 -0
  58. klaude_code/protocol/events.py +7 -0
  59. klaude_code/protocol/llm_param.py +1 -9
  60. klaude_code/protocol/model.py +10 -6
  61. klaude_code/protocol/sub_agent.py +2 -1
  62. klaude_code/session/export.py +1 -8
  63. klaude_code/session/selector.py +12 -7
  64. klaude_code/session/session.py +2 -4
  65. klaude_code/trace/__init__.py +1 -1
  66. klaude_code/trace/log.py +1 -1
  67. klaude_code/ui/__init__.py +4 -9
  68. klaude_code/ui/core/stage_manager.py +7 -4
  69. klaude_code/ui/modes/repl/__init__.py +1 -1
  70. klaude_code/ui/modes/repl/completers.py +6 -7
  71. klaude_code/ui/modes/repl/display.py +3 -4
  72. klaude_code/ui/modes/repl/event_handler.py +63 -5
  73. klaude_code/ui/modes/repl/key_bindings.py +2 -3
  74. klaude_code/ui/modes/repl/renderer.py +2 -1
  75. klaude_code/ui/renderers/diffs.py +1 -4
  76. klaude_code/ui/renderers/metadata.py +1 -12
  77. klaude_code/ui/rich/markdown.py +3 -3
  78. klaude_code/ui/rich/searchable_text.py +6 -6
  79. klaude_code/ui/rich/status.py +3 -4
  80. klaude_code/ui/rich/theme.py +1 -4
  81. klaude_code/ui/terminal/control.py +7 -16
  82. klaude_code/ui/terminal/notifier.py +2 -4
  83. klaude_code/ui/utils/common.py +1 -1
  84. klaude_code/ui/utils/debouncer.py +2 -2
  85. {klaude_code-1.2.11.dist-info → klaude_code-1.2.13.dist-info}/METADATA +1 -1
  86. {klaude_code-1.2.11.dist-info → klaude_code-1.2.13.dist-info}/RECORD +88 -87
  87. {klaude_code-1.2.11.dist-info → klaude_code-1.2.13.dist-info}/WHEEL +0 -0
  88. {klaude_code-1.2.11.dist-info → klaude_code-1.2.13.dist-info}/entry_points.txt +0 -0
@@ -7,6 +7,7 @@ from klaude_code.core.manager.llm_clients import LLMClients
7
7
  from klaude_code.llm.client import LLMClientABC
8
8
  from klaude_code.llm.registry import create_llm_client
9
9
  from klaude_code.protocol.sub_agent import iter_sub_agent_profiles
10
+ from klaude_code.protocol.tools import SubAgentType
10
11
  from klaude_code.trace import DebugType, log_debug
11
12
 
12
13
 
@@ -18,10 +19,7 @@ def build_llm_clients(
18
19
  """Create an ``LLMClients`` bundle driven by application config."""
19
20
 
20
21
  # Resolve main agent LLM config
21
- if model_override:
22
- llm_config = config.get_model_config(model_override)
23
- else:
24
- llm_config = config.get_main_model_config()
22
+ llm_config = config.get_model_config(model_override) if model_override else config.get_main_model_config()
25
23
 
26
24
  log_debug(
27
25
  "Main LLM config",
@@ -30,29 +28,18 @@ def build_llm_clients(
30
28
  debug_type=DebugType.LLM_CONFIG,
31
29
  )
32
30
 
33
- main_model_name = str(llm_config.model)
34
-
35
- def _main_factory() -> LLMClientABC:
36
- return create_llm_client(llm_config)
37
-
38
- clients = LLMClients(
39
- main_factory=_main_factory,
40
- main_model_name=main_model_name,
41
- main_llm_config=llm_config,
42
- )
31
+ main_client = create_llm_client(llm_config)
32
+ sub_clients: dict[SubAgentType, LLMClientABC] = {}
43
33
 
44
34
  for profile in iter_sub_agent_profiles():
45
35
  model_name = config.subagent_models.get(profile.name)
46
36
  if not model_name:
47
37
  continue
48
38
 
49
- if not profile.enabled_for_model(main_model_name):
39
+ if not profile.enabled_for_model(main_client.model_name):
50
40
  continue
51
41
 
52
- def _factory(model_name_for_factory: str = model_name) -> LLMClientABC:
53
- sub_llm_config = config.get_model_config(model_name_for_factory)
54
- return create_llm_client(sub_llm_config)
55
-
56
- clients.register_sub_client_factory(profile.name, _factory)
42
+ sub_llm_config = config.get_model_config(model_name)
43
+ sub_clients[profile.name] = create_llm_client(sub_llm_config)
57
44
 
58
- return clients
45
+ return LLMClients(main=main_client, sub_clients=sub_clients)
@@ -43,7 +43,7 @@ class SubAgentManager:
43
43
  child_session = Session(work_dir=parent_session.work_dir)
44
44
  child_session.sub_agent_state = state
45
45
 
46
- child_profile = self._model_profile_provider.build_profile_eager(
46
+ child_profile = self._model_profile_provider.build_profile(
47
47
  self._llm_clients.get_client(state.sub_agent_type),
48
48
  state.sub_agent_type,
49
49
  )
@@ -79,12 +79,12 @@ class SubAgentManager:
79
79
  raise
80
80
  except Exception as exc: # pragma: no cover - defensive logging
81
81
  log_debug(
82
- f"Subagent task failed: [{exc.__class__.__name__}] {str(exc)}",
82
+ f"Subagent task failed: [{exc.__class__.__name__}] {exc!s}",
83
83
  style="red",
84
84
  debug_type=DebugType.EXECUTION,
85
85
  )
86
86
  return SubAgentResult(
87
- task_result=f"Subagent task failed: [{exc.__class__.__name__}] {str(exc)}",
87
+ task_result=f"Subagent task failed: [{exc.__class__.__name__}] {exc!s}",
88
88
  session_id="",
89
89
  error=True,
90
90
  )
@@ -1,6 +1,6 @@
1
1
  import datetime
2
2
  import shutil
3
- from functools import lru_cache
3
+ from functools import cache
4
4
  from importlib.resources import files
5
5
  from pathlib import Path
6
6
 
@@ -25,7 +25,7 @@ PROMPT_FILES: dict[str, str] = {
25
25
  }
26
26
 
27
27
 
28
- @lru_cache(maxsize=None)
28
+ @cache
29
29
  def _load_base_prompt(file_key: str) -> str:
30
30
  """Load and cache the base prompt content from file."""
31
31
  try:
@@ -84,7 +84,7 @@ def _build_env_info(model_name: str) -> str:
84
84
  return "\n".join(env_lines)
85
85
 
86
86
 
87
- def get_system_prompt(model_name: str, sub_agent_type: str | None = None) -> str:
87
+ def load_system_prompt(model_name: str, sub_agent_type: str | None = None) -> str:
88
88
  """Get system prompt content for the given model and sub-agent type."""
89
89
  file_key = _get_file_key(model_name, sub_agent_type)
90
90
  base_prompt = _load_base_prompt(file_key)
@@ -1,8 +1,8 @@
1
1
  import json
2
2
  import re
3
3
  import shlex
4
+ from collections.abc import Awaitable, Callable
4
5
  from pathlib import Path
5
- from typing import Awaitable, Callable
6
6
 
7
7
  from pydantic import BaseModel
8
8
 
klaude_code/core/task.py CHANGED
@@ -45,11 +45,10 @@ class MetadataAccumulator:
45
45
  acc_usage.cached_tokens += usage.cached_tokens
46
46
  acc_usage.reasoning_tokens += usage.reasoning_tokens
47
47
  acc_usage.output_tokens += usage.output_tokens
48
- acc_usage.last_turn_output_token = usage.output_tokens
49
48
  acc_usage.currency = usage.currency
50
49
 
51
- if usage.context_token is not None:
52
- acc_usage.context_token = usage.context_token
50
+ if usage.context_size is not None:
51
+ acc_usage.context_size = usage.context_size
53
52
  if usage.context_limit is not None:
54
53
  acc_usage.context_limit = usage.context_limit
55
54
 
@@ -120,7 +119,7 @@ class TaskExecutionContext:
120
119
  profile: AgentProfile
121
120
  tool_registry: dict[str, type[ToolABC]]
122
121
  # For reminder processing - needs access to session
123
- process_reminder: Callable[[Reminder], AsyncGenerator[events.DeveloperMessageEvent, None]]
122
+ process_reminder: Callable[[Reminder], AsyncGenerator[events.DeveloperMessageEvent]]
124
123
  sub_agent_state: model.SubAgentState | None
125
124
 
126
125
 
@@ -147,7 +146,7 @@ class TaskExecutor:
147
146
  self._current_turn = None
148
147
  return ui_events
149
148
 
150
- async def run(self, user_input: model.UserInputPayload) -> AsyncGenerator[events.Event, None]:
149
+ async def run(self, user_input: model.UserInputPayload) -> AsyncGenerator[events.Event]:
151
150
  """Execute the task, yielding events as they occur."""
152
151
  ctx = self._context
153
152
  session_ctx = ctx.session_ctx
@@ -29,49 +29,40 @@ from .web.mermaid_tool import MermaidTool
29
29
  from .web.web_fetch_tool import WebFetchTool
30
30
 
31
31
  __all__ = [
32
- # Tools
32
+ "MEMORY_DIR_NAME",
33
33
  "ApplyPatchTool",
34
34
  "BashTool",
35
+ "DiffError",
35
36
  "EditTool",
36
37
  "MemoryTool",
37
38
  "MermaidTool",
38
39
  "MultiEditTool",
39
40
  "ReadTool",
41
+ "SafetyCheckResult",
42
+ "SimpleTruncationStrategy",
43
+ "Skill",
44
+ "SkillLoader",
40
45
  "SkillTool",
41
46
  "SubAgentTool",
47
+ "TodoContext",
42
48
  "TodoWriteTool",
49
+ "ToolABC",
50
+ "ToolContextToken",
51
+ "TruncationStrategy",
43
52
  "UpdatePlanTool",
44
53
  "WebFetchTool",
45
54
  "WriteTool",
46
- # Tool ABC
47
- "ToolABC",
48
- # Tool context
49
- "TodoContext",
50
55
  "build_todo_context",
51
- "ToolContextToken",
52
56
  "current_run_subtask_callback",
53
- "reset_tool_context",
54
- "set_tool_context_from_session",
55
- "tool_context",
56
- # Tool registry
57
- "load_agent_tools",
58
57
  "get_registry",
59
58
  "get_tool_schemas",
60
- "run_tool",
61
- # Truncation
62
- "SimpleTruncationStrategy",
63
- "TruncationStrategy",
64
59
  "get_truncation_strategy",
65
- "set_truncation_strategy",
66
- # Command safety
67
- "SafetyCheckResult",
68
60
  "is_safe_command",
69
- # Skill
70
- "Skill",
71
- "SkillLoader",
72
- # Memory
73
- "MEMORY_DIR_NAME",
74
- # Apply patch
75
- "DiffError",
61
+ "load_agent_tools",
76
62
  "process_patch",
63
+ "reset_tool_context",
64
+ "run_tool",
65
+ "set_tool_context_from_session",
66
+ "set_truncation_strategy",
67
+ "tool_context",
77
68
  ]
@@ -18,7 +18,7 @@ def file_exists(path: str) -> bool:
18
18
 
19
19
  def read_text(path: str) -> str:
20
20
  """Read text from file with UTF-8 encoding."""
21
- with open(path, "r", encoding="utf-8", errors="replace") as f:
21
+ with open(path, encoding="utf-8", errors="replace") as f:
22
22
  return f.read()
23
23
 
24
24
 
@@ -3,8 +3,8 @@ https://github.com/openai/openai-cookbook/blob/main/examples/gpt-5/apply_patch.p
3
3
  """
4
4
 
5
5
  import os
6
+ from collections.abc import Callable
6
7
  from enum import Enum
7
- from typing import Callable, Optional
8
8
 
9
9
  from pydantic import BaseModel, Field
10
10
 
@@ -17,16 +17,16 @@ class ActionType(str, Enum):
17
17
 
18
18
  class FileChange(BaseModel):
19
19
  type: ActionType
20
- old_content: Optional[str] = None
21
- new_content: Optional[str] = None
22
- move_path: Optional[str] = None
20
+ old_content: str | None = None
21
+ new_content: str | None = None
22
+ move_path: str | None = None
23
23
 
24
24
 
25
25
  class Commit(BaseModel):
26
26
  changes: dict[str, FileChange] = Field(default_factory=dict)
27
27
 
28
28
 
29
- def assemble_changes(orig: dict[str, Optional[str]], dest: dict[str, Optional[str]]) -> Commit:
29
+ def assemble_changes(orig: dict[str, str | None], dest: dict[str, str | None]) -> Commit:
30
30
  commit = Commit()
31
31
  for path in sorted(set(orig.keys()).union(dest.keys())):
32
32
  old_content = orig.get(path)
@@ -49,7 +49,7 @@ def assemble_changes(orig: dict[str, Optional[str]], dest: dict[str, Optional[st
49
49
  old_content=old_content,
50
50
  )
51
51
  else:
52
- assert False
52
+ raise AssertionError()
53
53
  return commit
54
54
 
55
55
 
@@ -71,9 +71,9 @@ def _new_chunk_list() -> list["Chunk"]:
71
71
 
72
72
  class PatchAction(BaseModel):
73
73
  type: ActionType
74
- new_file: Optional[str] = None
74
+ new_file: str | None = None
75
75
  chunks: list[Chunk] = Field(default_factory=_new_chunk_list)
76
- move_path: Optional[str] = None
76
+ move_path: str | None = None
77
77
 
78
78
 
79
79
  class Patch(BaseModel):
@@ -87,26 +87,19 @@ class Parser(BaseModel):
87
87
  patch: Patch = Field(default_factory=Patch)
88
88
  fuzz: int = 0
89
89
 
90
- def is_done(self, prefixes: Optional[tuple[str, ...]] = None) -> bool:
90
+ def is_done(self, prefixes: tuple[str, ...] | None = None) -> bool:
91
91
  if self.index >= len(self.lines):
92
92
  return True
93
- if prefixes and self.lines[self.index].startswith(prefixes):
94
- return True
95
- return False
93
+ return bool(prefixes and self.lines[self.index].startswith(prefixes))
96
94
 
97
95
  def startswith(self, prefix: str | tuple[str, ...]) -> bool:
98
96
  assert self.index < len(self.lines), f"Index: {self.index} >= {len(self.lines)}"
99
- if self.lines[self.index].startswith(prefix):
100
- return True
101
- return False
97
+ return self.lines[self.index].startswith(prefix)
102
98
 
103
99
  def read_str(self, prefix: str = "", return_everything: bool = False) -> str:
104
100
  assert self.index < len(self.lines), f"Index: {self.index} >= {len(self.lines)}"
105
101
  if self.lines[self.index].startswith(prefix):
106
- if return_everything:
107
- text = self.lines[self.index]
108
- else:
109
- text = self.lines[self.index][len(prefix) :]
102
+ text = self.lines[self.index] if return_everything else self.lines[self.index][len(prefix) :]
110
103
  self.index += 1
111
104
  return text
112
105
  return ""
@@ -167,10 +160,9 @@ class Parser(BaseModel):
167
160
  ):
168
161
  def_str = self.read_str("@@ ")
169
162
  section_str = ""
170
- if not def_str:
171
- if self.lines[self.index] == "@@":
172
- section_str = self.lines[self.index]
173
- self.index += 1
163
+ if not def_str and self.lines[self.index] == "@@":
164
+ section_str = self.lines[self.index]
165
+ self.index += 1
174
166
  if not (def_str or section_str or index == 0):
175
167
  raise DiffError(f"Invalid Line:\n{self.lines[self.index]}")
176
168
  if def_str.strip():
@@ -457,7 +449,7 @@ def process_patch(
457
449
 
458
450
 
459
451
  def open_file(path: str) -> str:
460
- with open(path, "rt") as f:
452
+ with open(path) as f:
461
453
  return f.read()
462
454
 
463
455
 
@@ -465,7 +457,7 @@ def write_file(path: str, content: str) -> None:
465
457
  if "/" in path:
466
458
  parent = "/".join(path.split("/")[:-1])
467
459
  os.makedirs(parent, exist_ok=True)
468
- with open(path, "wt") as f:
460
+ with open(path, "w") as f:
469
461
  f.write(content)
470
462
 
471
463
 
@@ -1,6 +1,7 @@
1
1
  """ApplyPatch tool providing direct patch application capability."""
2
2
 
3
3
  import asyncio
4
+ import contextlib
4
5
  import difflib
5
6
  import os
6
7
  from pathlib import Path
@@ -58,7 +59,7 @@ class ApplyPatchHandler:
58
59
  if os.path.isdir(resolved):
59
60
  raise ap.DiffError(f"Cannot apply patch to directory: {path}")
60
61
  try:
61
- with open(resolved, "r", encoding="utf-8") as handle:
62
+ with open(resolved, encoding="utf-8") as handle:
62
63
  orig[path] = handle.read()
63
64
  except OSError as error:
64
65
  raise ap.DiffError(f"Failed to read {path}: {error}") from error
@@ -78,10 +79,8 @@ class ApplyPatchHandler:
78
79
  handle.write(content)
79
80
 
80
81
  if file_tracker is not None:
81
- try:
82
+ with contextlib.suppress(Exception): # pragma: no cover - file tracker best-effort
82
83
  file_tracker[resolved] = Path(resolved).stat().st_mtime
83
- except Exception: # pragma: no cover - file tracker best-effort
84
- pass
85
84
 
86
85
  def remove_fn(path: str) -> None:
87
86
  resolved = resolve_path(path)
@@ -92,10 +91,8 @@ class ApplyPatchHandler:
92
91
  os.remove(resolved)
93
92
 
94
93
  if file_tracker is not None:
95
- try:
94
+ with contextlib.suppress(Exception): # pragma: no cover - file tracker best-effort
96
95
  file_tracker.pop(resolved, None)
97
- except Exception: # pragma: no cover - file tracker best-effort
98
- pass
99
96
 
100
97
  ap.apply_commit(commit, write_fn, remove_fn)
101
98
  return "Done!", diff_text
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
+ import contextlib
4
5
  import difflib
5
6
  import os
6
7
  from pathlib import Path
@@ -191,10 +192,8 @@ class EditTool(ToolABC):
191
192
 
192
193
  # Update tracker with new mtime
193
194
  if file_tracker is not None:
194
- try:
195
+ with contextlib.suppress(Exception):
195
196
  file_tracker[file_path] = Path(file_path).stat().st_mtime
196
- except Exception:
197
- pass
198
197
 
199
198
  # Build output message
200
199
  if args.replace_all:
@@ -213,18 +212,12 @@ class EditTool(ToolABC):
213
212
  header = line
214
213
  plus = header.split("+", 1)[1]
215
214
  plus_range = plus.split(" ")[0]
216
- if "," in plus_range:
217
- start = int(plus_range.split(",")[0])
218
- else:
219
- start = int(plus_range)
215
+ start = int(plus_range.split(",")[0]) if "," in plus_range else int(plus_range)
220
216
  after_line_no = start - 1
221
217
  except Exception:
222
218
  after_line_no = 0
223
219
  continue
224
- if line.startswith(" "):
225
- after_line_no += 1
226
- include_after_line_nos.append(after_line_no)
227
- elif line.startswith("+") and not line.startswith("+++ "):
220
+ if line.startswith(" ") or (line.startswith("+") and not line.startswith("+++ ")):
228
221
  after_line_no += 1
229
222
  include_after_line_nos.append(after_line_no)
230
223
  elif line.startswith("-") and not line.startswith("--- "):
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
+ import contextlib
4
5
  import difflib
5
6
  import os
6
7
  from pathlib import Path
@@ -162,10 +163,8 @@ class MultiEditTool(ToolABC):
162
163
 
163
164
  # Update tracker
164
165
  if file_tracker is not None:
165
- try:
166
+ with contextlib.suppress(Exception):
166
167
  file_tracker[file_path] = Path(file_path).stat().st_mtime
167
- except Exception:
168
- pass
169
168
 
170
169
  # Build output message
171
170
  lines = [f"Applied {len(args.edits)} edits to {file_path}:"]
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
+ import contextlib
4
5
  import os
5
6
  from base64 import b64encode
6
7
  from dataclasses import dataclass
@@ -58,7 +59,7 @@ def _read_segment(options: ReadOptions) -> ReadSegmentResult:
58
59
  remaining_selected_beyond_cap = 0
59
60
  selected_lines: list[tuple[int, str]] = []
60
61
  selected_chars = 0
61
- with open(options.file_path, "r", encoding="utf-8", errors="replace") as f:
62
+ with open(options.file_path, encoding="utf-8", errors="replace") as f:
62
63
  for line_no, raw_line in enumerate(f, start=1):
63
64
  total_lines = line_no
64
65
  within = line_no >= options.offset and (options.limit is None or selected_lines_count < options.limit)
@@ -90,10 +91,8 @@ def _track_file_access(file_path: str) -> None:
90
91
  file_tracker = get_current_file_tracker()
91
92
  if file_tracker is None or not file_exists(file_path) or is_directory(file_path):
92
93
  return
93
- try:
94
+ with contextlib.suppress(Exception):
94
95
  file_tracker[file_path] = Path(file_path).stat().st_mtime
95
- except Exception:
96
- pass
97
96
 
98
97
 
99
98
  def _is_supported_image_file(file_path: str) -> bool:
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
+ import contextlib
4
5
  import difflib
5
6
  import os
6
7
  from pathlib import Path
@@ -98,10 +99,8 @@ class WriteTool(ToolABC):
98
99
  return model.ToolResultItem(status="error", output=f"<tool_use_error>{e}</tool_use_error>")
99
100
 
100
101
  if file_tracker is not None:
101
- try:
102
+ with contextlib.suppress(Exception):
102
103
  file_tracker[file_path] = Path(file_path).stat().st_mtime
103
- except Exception:
104
- pass
105
104
 
106
105
  # Build diff between previous and new content
107
106
  after = args.content
@@ -68,16 +68,10 @@ def _validate_path(virtual_path: str) -> tuple[Path | None, str | None]:
68
68
  return None, f"Path must start with {MEMORY_VIRTUAL_ROOT}"
69
69
 
70
70
  # Get relative path from /memories
71
- if virtual_path == MEMORY_VIRTUAL_ROOT:
72
- relative = ""
73
- else:
74
- relative = virtual_path[len(MEMORY_VIRTUAL_ROOT) :].lstrip("/")
71
+ relative = "" if virtual_path == MEMORY_VIRTUAL_ROOT else virtual_path[len(MEMORY_VIRTUAL_ROOT) :].lstrip("/")
75
72
 
76
73
  memories_root = _get_memories_root()
77
- if relative:
78
- actual_path = memories_root / relative
79
- else:
80
- actual_path = memories_root
74
+ actual_path = memories_root / relative if relative else memories_root
81
75
 
82
76
  # Resolve to canonical path and verify it's still within memories
83
77
  try:
@@ -1,6 +1,7 @@
1
1
  import re
2
2
  from dataclasses import dataclass
3
3
  from pathlib import Path
4
+ from typing import ClassVar
4
5
 
5
6
  import yaml
6
7
 
@@ -36,13 +37,13 @@ class SkillLoader:
36
37
  """Load and manage Claude Skills from SKILL.md files"""
37
38
 
38
39
  # User-level skills directories (checked in order, later ones override earlier ones with same name)
39
- USER_SKILLS_DIRS = [
40
+ USER_SKILLS_DIRS: ClassVar[list[Path]] = [
40
41
  Path("~/.claude/skills"),
41
42
  Path("~/.klaude/skills"),
42
43
  # Path("~/.claude/plugins/marketplaces"),
43
44
  ]
44
45
  # Project-level skills directory
45
- PROJECT_SKILLS_DIR = Path("./.claude/skills")
46
+ PROJECT_SKILLS_DIR: ClassVar[Path] = Path("./.claude/skills")
46
47
 
47
48
  def __init__(self) -> None:
48
49
  """Initialize the skill loader"""
@@ -293,7 +293,6 @@ def _is_safe_argv(argv: list[str]) -> SafetyCheckResult:
293
293
  "ruff",
294
294
  "pyright",
295
295
  "make",
296
- "isort",
297
296
  "npm",
298
297
  "pnpm",
299
298
  "bun",
@@ -83,9 +83,7 @@ def reset_tool_context(token: ToolContextToken) -> None:
83
83
 
84
84
 
85
85
  @contextmanager
86
- def tool_context(
87
- file_tracker: MutableMapping[str, float], todo_ctx: TodoContext
88
- ) -> Generator[ToolContextToken, None, None]:
86
+ def tool_context(file_tracker: MutableMapping[str, float], todo_ctx: TodoContext) -> Generator[ToolContextToken]:
89
87
  """Context manager for setting and resetting tool execution context."""
90
88
 
91
89
  file_tracker_token = current_file_tracker_var.set(file_tracker)
@@ -1,4 +1,5 @@
1
- from typing import Callable, TypeVar
1
+ from collections.abc import Callable
2
+ from typing import TypeVar
2
3
 
3
4
  from klaude_code.core.tool.sub_agent_tool import SubAgentTool
4
5
  from klaude_code.core.tool.tool_abc import ToolABC
@@ -100,7 +100,7 @@ class ToolExecutor:
100
100
  self._call_event_emitted: set[str] = set()
101
101
  self._sub_agent_tasks: set[asyncio.Task[list[ToolExecutorEvent]]] = set()
102
102
 
103
- async def run_tools(self, tool_calls: list[model.ToolCallItem]) -> AsyncGenerator[ToolExecutorEvent, None]:
103
+ async def run_tools(self, tool_calls: list[model.ToolCallItem]) -> AsyncGenerator[ToolExecutorEvent]:
104
104
  """Run the given tool calls and yield execution events.
105
105
 
106
106
  Tool calls are partitioned into regular tools and sub-agent tools. Regular tools
@@ -27,10 +27,7 @@ def _extract_url_filename(url: str) -> str:
27
27
  # Combine host and path for a meaningful filename
28
28
  host = parsed.netloc.replace(".", "_").replace(":", "_")
29
29
  path = parsed.path.strip("/").replace("/", "_")
30
- if path:
31
- name = f"{host}_{path}"
32
- else:
33
- name = host
30
+ name = f"{host}_{path}" if path else host
34
31
  # Sanitize: keep only alphanumeric, underscore, hyphen
35
32
  name = re.sub(r"[^a-zA-Z0-9_\-]", "_", name)
36
33
  # Limit length
@@ -106,7 +103,7 @@ class SmartTruncationStrategy(TruncationStrategy):
106
103
  file_path = self.truncation_dir / filename
107
104
  file_path.write_text(output, encoding="utf-8")
108
105
  return str(file_path)
109
- except (OSError, IOError):
106
+ except OSError:
110
107
  return None
111
108
 
112
109
  def truncate(self, output: str, tool_call: model.ToolCallItem | None = None) -> TruncationResult:
klaude_code/core/turn.py CHANGED
@@ -114,7 +114,7 @@ class TurnExecutor:
114
114
  self._tool_executor = None
115
115
  return ui_events
116
116
 
117
- async def run(self) -> AsyncGenerator[events.Event, None]:
117
+ async def run(self) -> AsyncGenerator[events.Event]:
118
118
  """Execute the turn, yielding events as they occur.
119
119
 
120
120
  Raises:
@@ -148,7 +148,7 @@ class TurnExecutor:
148
148
 
149
149
  yield events.TurnEndEvent(session_id=session_ctx.session_id)
150
150
 
151
- async def _consume_llm_stream(self, turn_result: TurnResult) -> AsyncGenerator[events.Event, None]:
151
+ async def _consume_llm_stream(self, turn_result: TurnResult) -> AsyncGenerator[events.Event]:
152
152
  """Stream events from LLM and update turn_result in place."""
153
153
 
154
154
  ctx = self._context
@@ -158,7 +158,6 @@ class TurnExecutor:
158
158
  input=session_ctx.get_conversation_history(),
159
159
  system=ctx.system_prompt,
160
160
  tools=ctx.tools,
161
- store=False,
162
161
  session_id=session_ctx.session_id,
163
162
  )
164
163
  ):
@@ -180,6 +179,12 @@ class TurnExecutor:
180
179
  )
181
180
  case model.ReasoningEncryptedItem() as item:
182
181
  turn_result.reasoning_items.append(item)
182
+ case model.ReasoningTextDelta() as item:
183
+ yield events.ThinkingDeltaEvent(
184
+ content=item.content,
185
+ response_id=item.response_id,
186
+ session_id=session_ctx.session_id,
187
+ )
183
188
  case model.AssistantMessageDelta() as item:
184
189
  yield events.AssistantMessageDeltaEvent(
185
190
  content=item.content,
@@ -229,7 +234,7 @@ class TurnExecutor:
229
234
  if turn_result.tool_calls:
230
235
  session_ctx.append_history(turn_result.tool_calls)
231
236
 
232
- async def _run_tool_executor(self, tool_calls: list[model.ToolCallItem]) -> AsyncGenerator[events.Event, None]:
237
+ async def _run_tool_executor(self, tool_calls: list[model.ToolCallItem]) -> AsyncGenerator[events.Event]:
233
238
  """Run tools for the turn and translate executor events to UI events."""
234
239
 
235
240
  ctx = self._context