klaude-code 1.2.17__py3-none-any.whl → 1.2.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. klaude_code/cli/config_cmd.py +1 -1
  2. klaude_code/cli/debug.py +1 -1
  3. klaude_code/cli/main.py +3 -9
  4. klaude_code/cli/runtime.py +10 -13
  5. klaude_code/command/__init__.py +4 -1
  6. klaude_code/command/clear_cmd.py +2 -7
  7. klaude_code/command/command_abc.py +33 -5
  8. klaude_code/command/debug_cmd.py +79 -0
  9. klaude_code/command/diff_cmd.py +2 -6
  10. klaude_code/command/export_cmd.py +7 -7
  11. klaude_code/command/export_online_cmd.py +1 -5
  12. klaude_code/command/help_cmd.py +4 -9
  13. klaude_code/command/model_cmd.py +10 -6
  14. klaude_code/command/prompt_command.py +2 -6
  15. klaude_code/command/refresh_cmd.py +2 -7
  16. klaude_code/command/registry.py +2 -4
  17. klaude_code/command/release_notes_cmd.py +2 -6
  18. klaude_code/command/status_cmd.py +2 -7
  19. klaude_code/command/terminal_setup_cmd.py +2 -6
  20. klaude_code/command/thinking_cmd.py +13 -8
  21. klaude_code/config/select_model.py +81 -5
  22. klaude_code/const/__init__.py +1 -1
  23. klaude_code/core/executor.py +236 -109
  24. klaude_code/core/manager/__init__.py +2 -4
  25. klaude_code/core/prompts/prompt-claude-code.md +1 -1
  26. klaude_code/core/prompts/prompt-sub-agent-web.md +8 -5
  27. klaude_code/core/reminders.py +9 -35
  28. klaude_code/core/tool/file/read_tool.py +38 -10
  29. klaude_code/core/tool/shell/bash_tool.py +22 -2
  30. klaude_code/core/tool/tool_runner.py +26 -23
  31. klaude_code/core/tool/truncation.py +23 -9
  32. klaude_code/core/tool/web/web_fetch_tool.md +1 -1
  33. klaude_code/core/tool/web/web_fetch_tool.py +36 -1
  34. klaude_code/core/turn.py +28 -0
  35. klaude_code/protocol/commands.py +1 -0
  36. klaude_code/protocol/sub_agent/web.py +3 -2
  37. klaude_code/session/session.py +2 -2
  38. klaude_code/session/templates/export_session.html +24 -13
  39. klaude_code/trace/__init__.py +20 -2
  40. klaude_code/ui/modes/repl/completers.py +19 -2
  41. klaude_code/ui/modes/repl/event_handler.py +8 -6
  42. klaude_code/ui/renderers/metadata.py +2 -4
  43. klaude_code/ui/renderers/thinking.py +24 -8
  44. klaude_code/ui/renderers/tools.py +79 -10
  45. klaude_code/ui/rich/code_panel.py +112 -0
  46. klaude_code/ui/rich/markdown.py +3 -4
  47. klaude_code/ui/rich/status.py +0 -2
  48. klaude_code/ui/rich/theme.py +10 -1
  49. {klaude_code-1.2.17.dist-info → klaude_code-1.2.18.dist-info}/METADATA +16 -6
  50. {klaude_code-1.2.17.dist-info → klaude_code-1.2.18.dist-info}/RECORD +53 -52
  51. klaude_code/core/manager/agent_manager.py +0 -132
  52. /klaude_code/{config → cli}/list_model.py +0 -0
  53. {klaude_code-1.2.17.dist-info → klaude_code-1.2.18.dist-info}/WHEEL +0 -0
  54. {klaude_code-1.2.17.dist-info → klaude_code-1.2.18.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,3 @@
1
- import json
2
1
  import re
3
2
  import shlex
4
3
  from collections.abc import Awaitable, Callable
@@ -282,7 +281,6 @@ def get_memory_paths() -> list[tuple[Path, str]]:
282
281
  "user's private global instructions for all projects",
283
282
  ),
284
283
  (Path.cwd() / "AGENTS.md", "project instructions, checked into the codebase"),
285
- (Path.cwd() / "AGENT.md", "project instructions, checked into the codebase"),
286
284
  (Path.cwd() / "CLAUDE.md", "project instructions, checked into the codebase"),
287
285
  ]
288
286
 
@@ -351,46 +349,22 @@ IMPORTANT: this context may or may not be relevant to your tasks. You should not
351
349
  return None
352
350
 
353
351
 
354
- def get_last_turn_tool_call(session: Session) -> list[model.ToolCallItem]:
355
- tool_calls: list[model.ToolCallItem] = []
356
- for item in reversed(session.conversation_history):
357
- if isinstance(item, model.ToolCallItem):
358
- tool_calls.append(item)
359
- if isinstance(
360
- item,
361
- (
362
- model.ReasoningEncryptedItem,
363
- model.ReasoningTextItem,
364
- model.AssistantMessageItem,
365
- ),
366
- ):
367
- break
368
- return tool_calls
369
-
370
-
371
352
  MEMORY_FILE_NAMES = ["CLAUDE.md", "AGENTS.md", "AGENT.md"]
372
353
 
373
354
 
374
355
  async def last_path_memory_reminder(
375
356
  session: Session,
376
357
  ) -> model.DeveloperMessageItem | None:
377
- """When last turn tool call entered a directory (or parent directory) with CLAUDE.md AGENTS.md"""
378
- tool_calls = get_last_turn_tool_call(session)
379
- if len(tool_calls) == 0:
358
+ """Load CLAUDE.md/AGENTS.md from directories containing files in file_tracker.
359
+
360
+ Uses session.file_tracker to detect accessed paths (works for both tool calls
361
+ and @ file references). Uses session.loaded_memory to avoid duplicate loading.
362
+ """
363
+ if not session.file_tracker:
380
364
  return None
381
- paths: list[str] = []
382
- for tool_call in tool_calls:
383
- if tool_call.name in (tools.READ, tools.EDIT, tools.MULTI_EDIT, tools.WRITE):
384
- try:
385
- json_dict = json.loads(tool_call.arguments)
386
- if path := json_dict.get("file_path", ""):
387
- paths.append(path)
388
- except json.JSONDecodeError:
389
- continue
390
- paths = list(set(paths))
365
+
366
+ paths = list(session.file_tracker.keys())
391
367
  memories: list[Memory] = []
392
- if len(paths) == 0:
393
- return None
394
368
 
395
369
  cwd = Path.cwd().resolve()
396
370
  loaded_set: set[str] = set(session.loaded_memory)
@@ -484,8 +458,8 @@ def load_agent_reminders(
484
458
  reminders.extend(
485
459
  [
486
460
  memory_reminder,
487
- last_path_memory_reminder,
488
461
  at_file_reader_reminder,
462
+ last_path_memory_reminder,
489
463
  file_changed_externally_reminder,
490
464
  image_reminder,
491
465
  ]
@@ -16,12 +16,6 @@ from klaude_code.core.tool.tool_context import get_current_file_tracker
16
16
  from klaude_code.core.tool.tool_registry import register
17
17
  from klaude_code.protocol import llm_param, model, tools
18
18
 
19
- SYSTEM_REMINDER_MALICIOUS = (
20
- "<system-reminder>\n"
21
- "Whenever you read a file, you should consider whether it looks malicious. If it does, you MUST refuse to improve or augment the code. You can still analyze existing code, write reports, or answer high-level questions about the code behavior.\n"
22
- "</system-reminder>"
23
- )
24
-
25
19
  _IMAGE_MIME_TYPES: dict[str, str] = {
26
20
  ".png": "image/png",
27
21
  ".jpg": "image/jpeg",
@@ -51,6 +45,8 @@ class ReadSegmentResult:
51
45
  selected_lines: list[tuple[int, str]]
52
46
  selected_chars_count: int
53
47
  remaining_selected_beyond_cap: int
48
+ # For large file diagnostics: list of (start_line, end_line, char_count)
49
+ segment_char_stats: list[tuple[int, int, int]]
54
50
 
55
51
 
56
52
  def _read_segment(options: ReadOptions) -> ReadSegmentResult:
@@ -59,6 +55,13 @@ def _read_segment(options: ReadOptions) -> ReadSegmentResult:
59
55
  remaining_selected_beyond_cap = 0
60
56
  selected_lines: list[tuple[int, str]] = []
61
57
  selected_chars = 0
58
+
59
+ # Track char counts per 100-line segment for diagnostics
60
+ segment_size = 100
61
+ segment_char_stats: list[tuple[int, int, int]] = []
62
+ current_segment_start = options.offset
63
+ current_segment_chars = 0
64
+
62
65
  with open(options.file_path, encoding="utf-8", errors="replace") as f:
63
66
  for line_no, raw_line in enumerate(f, start=1):
64
67
  total_lines = line_no
@@ -74,16 +77,32 @@ def _read_segment(options: ReadOptions) -> ReadSegmentResult:
74
77
  content[: options.char_limit_per_line]
75
78
  + f" ... (more {truncated_chars} characters in this line are truncated)"
76
79
  )
77
- selected_chars += len(content) + 1
80
+ line_chars = len(content) + 1
81
+ selected_chars += line_chars
82
+ current_segment_chars += line_chars
83
+
84
+ # Check if we've completed a segment
85
+ if selected_lines_count % segment_size == 0:
86
+ segment_char_stats.append((current_segment_start, line_no, current_segment_chars))
87
+ current_segment_start = line_no + 1
88
+ current_segment_chars = 0
89
+
78
90
  if options.global_line_cap is None or len(selected_lines) < options.global_line_cap:
79
91
  selected_lines.append((line_no, content))
80
92
  else:
81
93
  remaining_selected_beyond_cap += 1
94
+
95
+ # Add the last partial segment if any
96
+ if current_segment_chars > 0 and selected_lines_count > 0:
97
+ last_line = options.offset + selected_lines_count - 1
98
+ segment_char_stats.append((current_segment_start, last_line, current_segment_chars))
99
+
82
100
  return ReadSegmentResult(
83
101
  total_lines=total_lines,
84
102
  selected_lines=selected_lines,
85
103
  selected_chars_count=selected_chars,
86
104
  remaining_selected_beyond_cap=remaining_selected_beyond_cap,
105
+ segment_char_stats=segment_char_stats,
87
106
  )
88
107
 
89
108
 
@@ -292,10 +311,21 @@ class ReadTool(ToolABC):
292
311
 
293
312
  # After limit/offset, if total selected chars exceed limit, error (only check if limits are enabled)
294
313
  if max_chars is not None and read_result.selected_chars_count > max_chars:
314
+ # Build segment statistics for better guidance
315
+ stats_lines: list[str] = []
316
+ for start, end, chars in read_result.segment_char_stats:
317
+ stats_lines.append(f" Lines {start}-{end}: {chars} chars")
318
+ segment_stats_str = "\n".join(stats_lines) if stats_lines else " (no segment data)"
319
+
295
320
  return model.ToolResultItem(
296
321
  status="error",
297
322
  output=(
298
- f"File content ({read_result.selected_chars_count} chars) exceeds maximum allowed tokens ({max_chars}). Please use offset and limit parameters to read specific portions of the file, or use the `rg` command to search for specific content."
323
+ f"Selected file content {read_result.selected_chars_count} chars exceeds maximum allowed chars ({max_chars}).\n"
324
+ f"File has {read_result.total_lines} total lines.\n\n"
325
+ f"Character distribution by segment:\n{segment_stats_str}\n\n"
326
+ f"Use offset and limit parameters to read specific portions. "
327
+ f"For example: offset=1, limit=100 to read the first 100 lines. "
328
+ f"Or use `rg` command to search for specific content."
299
329
  ),
300
330
  )
301
331
 
@@ -304,8 +334,6 @@ class ReadTool(ToolABC):
304
334
  if read_result.remaining_selected_beyond_cap > 0:
305
335
  lines_out.append(f"... (more {read_result.remaining_selected_beyond_cap} lines are truncated)")
306
336
  read_result_str = "\n".join(lines_out)
307
- # if read_result_str:
308
- # read_result_str += "\n\n" + SYSTEM_REMINDER_MALICIOUS
309
337
 
310
338
  # Update FileTracker with last modified time
311
339
  _track_file_access(file_path)
@@ -11,8 +11,28 @@ from klaude_code.core.tool.tool_abc import ToolABC, load_desc
11
11
  from klaude_code.core.tool.tool_registry import register
12
12
  from klaude_code.protocol import llm_param, model, tools
13
13
 
14
- # Regex to strip ANSI escape sequences from command output
15
- _ANSI_ESCAPE_RE = re.compile(r"\x1b\[[0-9;]*m")
14
+ # Regex to strip ANSI and terminal control sequences from command output
15
+ #
16
+ # This is intentionally broader than just SGR color codes (e.g. "\x1b[31m").
17
+ # Many interactive or TUI-style programs emit additional escape sequences
18
+ # that move the cursor, clear the screen, or switch screen buffers
19
+ # (CSI/OSC/DCS/APC/PM, etc). If these reach the Rich console, they can
20
+ # corrupt the REPL layout. We therefore remove all of them before
21
+ # rendering the output.
22
+ _ANSI_ESCAPE_RE = re.compile(
23
+ r"""
24
+ \x1B
25
+ (?:
26
+ \[[0-?]*[ -/]*[@-~] | # CSI sequences
27
+ \][0-?]*.*?(?:\x07|\x1B\\) | # OSC sequences
28
+ P.*?(?:\x07|\x1B\\) | # DCS sequences
29
+ _.*?(?:\x07|\x1B\\) | # APC sequences
30
+ \^.*?(?:\x07|\x1B\\) | # PM sequences
31
+ [@-Z\\-_] # 2-char sequences
32
+ )
33
+ """,
34
+ re.VERBOSE | re.DOTALL,
35
+ )
16
36
 
17
37
 
18
38
  @register(tools.BASH)
@@ -9,6 +9,9 @@ from klaude_code.core.tool.truncation import truncate_tool_output
9
9
  from klaude_code.protocol import model, tools
10
10
  from klaude_code.protocol.sub_agent import is_sub_agent_tool
11
11
 
12
+ # Tools that can run concurrently (IO-bound, no local state mutations)
13
+ _CONCURRENT_TOOLS: frozenset[str] = frozenset({tools.WEB_SEARCH, tools.WEB_FETCH})
14
+
12
15
 
13
16
  async def run_tool(tool_call: model.ToolCallItem, registry: dict[str, type[ToolABC]]) -> model.ToolResultItem:
14
17
  """Execute a tool call and return the result.
@@ -89,8 +92,8 @@ class ToolExecutor:
89
92
  """Execute and coordinate a batch of tool calls for a single turn.
90
93
 
91
94
  The executor is responsible for:
92
- - Partitioning tool calls into regular tools and sub-agent tools
93
- - Running regular tools sequentially and sub-agent tools concurrently
95
+ - Partitioning tool calls into sequential and concurrent tools
96
+ - Running sequential tools one by one and concurrent tools in parallel
94
97
  - Emitting ToolCall/ToolResult events and tool side-effect events
95
98
  - Tracking unfinished calls so `cancel()` can synthesize cancellation results
96
99
  """
@@ -106,7 +109,7 @@ class ToolExecutor:
106
109
 
107
110
  self._unfinished_calls: dict[str, model.ToolCallItem] = {}
108
111
  self._call_event_emitted: set[str] = set()
109
- self._sub_agent_tasks: set[asyncio.Task[list[ToolExecutorEvent]]] = set()
112
+ self._concurrent_tasks: set[asyncio.Task[list[ToolExecutorEvent]]] = set()
110
113
 
111
114
  async def run_tools(self, tool_calls: list[model.ToolCallItem]) -> AsyncGenerator[ToolExecutorEvent]:
112
115
  """Run the given tool calls and yield execution events.
@@ -119,10 +122,10 @@ class ToolExecutor:
119
122
  for tool_call in tool_calls:
120
123
  self._unfinished_calls[tool_call.call_id] = tool_call
121
124
 
122
- regular_tool_calls, sub_agent_tool_calls = self._partition_tool_calls(tool_calls)
125
+ sequential_tool_calls, concurrent_tool_calls = self._partition_tool_calls(tool_calls)
123
126
 
124
- # Run regular tools sequentially.
125
- for tool_call in regular_tool_calls:
127
+ # Run sequential tools one by one.
128
+ for tool_call in sequential_tool_calls:
126
129
  tool_call_event = self._build_tool_call_started(tool_call)
127
130
  self._call_event_emitted.add(tool_call.call_id)
128
131
  yield tool_call_event
@@ -136,16 +139,16 @@ class ToolExecutor:
136
139
  for exec_event in result_events:
137
140
  yield exec_event
138
141
 
139
- # Run sub-agent tools concurrently.
140
- if sub_agent_tool_calls:
142
+ # Run concurrent tools (sub-agents, web tools) in parallel.
143
+ if concurrent_tool_calls:
141
144
  execution_tasks: list[asyncio.Task[list[ToolExecutorEvent]]] = []
142
- for tool_call in sub_agent_tool_calls:
145
+ for tool_call in concurrent_tool_calls:
143
146
  tool_call_event = self._build_tool_call_started(tool_call)
144
147
  self._call_event_emitted.add(tool_call.call_id)
145
148
  yield tool_call_event
146
149
 
147
150
  task = asyncio.create_task(self._run_single_tool_call(tool_call))
148
- self._register_sub_agent_task(task)
151
+ self._register_concurrent_task(task)
149
152
  execution_tasks.append(task)
150
153
 
151
154
  for task in asyncio.as_completed(execution_tasks):
@@ -165,7 +168,7 @@ class ToolExecutor:
165
168
  def cancel(self) -> Iterable[ToolExecutorEvent]:
166
169
  """Cancel unfinished tool calls and synthesize error results.
167
170
 
168
- - Cancels any running sub-agent tool tasks so they stop emitting events.
171
+ - Cancels any running concurrent tool tasks so they stop emitting events.
169
172
  - For each unfinished tool call, yields a ToolExecutionCallStarted (if not
170
173
  already emitted for this turn) followed by a ToolExecutionResult with
171
174
  error status and a standard cancellation output. The corresponding
@@ -174,11 +177,11 @@ class ToolExecutor:
174
177
 
175
178
  events_to_yield: list[ToolExecutorEvent] = []
176
179
 
177
- # Cancel running sub-agent tool tasks.
178
- for task in list(self._sub_agent_tasks):
180
+ # Cancel running concurrent tool tasks.
181
+ for task in list(self._concurrent_tasks):
179
182
  if not task.done():
180
183
  task.cancel()
181
- self._sub_agent_tasks.clear()
184
+ self._concurrent_tasks.clear()
182
185
 
183
186
  if not self._unfinished_calls:
184
187
  return events_to_yield
@@ -203,11 +206,11 @@ class ToolExecutor:
203
206
 
204
207
  return events_to_yield
205
208
 
206
- def _register_sub_agent_task(self, task: asyncio.Task[list[ToolExecutorEvent]]) -> None:
207
- self._sub_agent_tasks.add(task)
209
+ def _register_concurrent_task(self, task: asyncio.Task[list[ToolExecutorEvent]]) -> None:
210
+ self._concurrent_tasks.add(task)
208
211
 
209
212
  def _cleanup(completed: asyncio.Task[list[ToolExecutorEvent]]) -> None:
210
- self._sub_agent_tasks.discard(completed)
213
+ self._concurrent_tasks.discard(completed)
211
214
 
212
215
  task.add_done_callback(_cleanup)
213
216
 
@@ -215,14 +218,14 @@ class ToolExecutor:
215
218
  def _partition_tool_calls(
216
219
  tool_calls: list[model.ToolCallItem],
217
220
  ) -> tuple[list[model.ToolCallItem], list[model.ToolCallItem]]:
218
- regular_tool_calls: list[model.ToolCallItem] = []
219
- sub_agent_tool_calls: list[model.ToolCallItem] = []
221
+ sequential_tool_calls: list[model.ToolCallItem] = []
222
+ concurrent_tool_calls: list[model.ToolCallItem] = []
220
223
  for tool_call in tool_calls:
221
- if is_sub_agent_tool(tool_call.name):
222
- sub_agent_tool_calls.append(tool_call)
224
+ if is_sub_agent_tool(tool_call.name) or tool_call.name in _CONCURRENT_TOOLS:
225
+ concurrent_tool_calls.append(tool_call)
223
226
  else:
224
- regular_tool_calls.append(tool_call)
225
- return regular_tool_calls, sub_agent_tool_calls
227
+ sequential_tool_calls.append(tool_call)
228
+ return sequential_tool_calls, concurrent_tool_calls
226
229
 
227
230
  def _build_tool_call_started(self, tool_call: model.ToolCallItem) -> ToolExecutionCallStarted:
228
231
  return ToolExecutionCallStarted(tool_call=tool_call)
@@ -21,6 +21,15 @@ class TruncationResult:
21
21
  truncated_length: int = 0
22
22
 
23
23
 
24
+ FILE_SAVED_PATTERN = re.compile(r"<file_saved>([^<]+)</file_saved>")
25
+
26
+
27
+ def _extract_saved_file_path(output: str) -> str | None:
28
+ """Extract file path from <file_saved> tag if present."""
29
+ match = FILE_SAVED_PATTERN.search(output)
30
+ return match.group(1) if match else None
31
+
32
+
24
33
  def _extract_url_filename(url: str) -> str:
25
34
  """Extract a safe filename from a URL."""
26
35
  parsed = urlparse(url)
@@ -116,24 +125,29 @@ class SmartTruncationStrategy(TruncationStrategy):
116
125
  if original_length <= self.max_length:
117
126
  return TruncationResult(output=output, was_truncated=False, original_length=original_length)
118
127
 
119
- # Save full output to file
120
- saved_file_path = self._save_to_file(output, tool_call)
128
+ # Check if file was already saved (e.g., by WebFetch)
129
+ existing_file_path = _extract_saved_file_path(output)
130
+ saved_file_path = existing_file_path or self._save_to_file(output, tool_call)
131
+
132
+ # Strip existing <file_saved> tag to avoid duplication in head/tail
133
+ content_to_truncate = FILE_SAVED_PATTERN.sub("", output).lstrip("\n") if existing_file_path else output
134
+ content_length = len(content_to_truncate)
121
135
 
122
- truncated_length = original_length - self.head_chars - self.tail_chars
123
- head_content = output[: self.head_chars]
124
- tail_content = output[-self.tail_chars :]
136
+ truncated_length = content_length - self.head_chars - self.tail_chars
137
+ head_content = content_to_truncate[: self.head_chars]
138
+ tail_content = content_to_truncate[-self.tail_chars :]
125
139
 
126
140
  # Build truncated output with file info
127
141
  if saved_file_path:
128
142
  header = (
129
- f"<system-reminder>Output truncated: {truncated_length} chars hidden. "
130
- f"Full tool output saved to {saved_file_path}. "
131
- f"Use Read with limit+offset or rg/grep to inspect.\n"
143
+ f"<system-reminder>Output truncated ({truncated_length} chars hidden) to reduce context usage. "
144
+ f"Full content saved to <file_saved>{saved_file_path}</file_saved>. "
145
+ f"Use Read(offset, limit) or rg to inspect if needed. "
132
146
  f"Showing first {self.head_chars} and last {self.tail_chars} chars:</system-reminder>\n\n"
133
147
  )
134
148
  else:
135
149
  header = (
136
- f"<system-reminder>Output truncated: {truncated_length} chars hidden. "
150
+ f"<system-reminder>Output truncated ({truncated_length} chars hidden) to reduce context usage. "
137
151
  f"Showing first {self.head_chars} and last {self.tail_chars} chars:</system-reminder>\n\n"
138
152
  )
139
153
 
@@ -5,4 +5,4 @@ The tool automatically processes the response based on Content-Type:
5
5
  - JSON responses are formatted with indentation
6
6
  - Markdown and other text content is returned as-is
7
7
 
8
- Use this tool to retrieve web page content for analysis.
8
+ Content is always saved to a local file. The file path is included at the start of the output in a `<file_saved>` tag. For large content that gets truncated, you can read the saved file directly.
@@ -1,18 +1,23 @@
1
1
  import asyncio
2
2
  import json
3
+ import re
4
+ import time
3
5
  import urllib.error
4
6
  import urllib.request
5
7
  from http.client import HTTPResponse
6
8
  from pathlib import Path
9
+ from urllib.parse import urlparse
7
10
 
8
11
  from pydantic import BaseModel
9
12
 
13
+ from klaude_code import const
10
14
  from klaude_code.core.tool.tool_abc import ToolABC, load_desc
11
15
  from klaude_code.core.tool.tool_registry import register
12
16
  from klaude_code.protocol import llm_param, model, tools
13
17
 
14
18
  DEFAULT_TIMEOUT_SEC = 30
15
19
  DEFAULT_USER_AGENT = "Mozilla/5.0 (compatible; KlaudeCode/1.0)"
20
+ WEB_FETCH_SAVE_DIR = Path(const.TOOL_OUTPUT_TRUNCATION_DIR) / "web"
16
21
 
17
22
 
18
23
  def _extract_content_type(response: HTTPResponse) -> str:
@@ -43,6 +48,30 @@ def _format_json(text: str) -> str:
43
48
  return text
44
49
 
45
50
 
51
+ def _extract_url_filename(url: str) -> str:
52
+ """Extract a safe filename from a URL."""
53
+ parsed = urlparse(url)
54
+ host = parsed.netloc.replace(".", "_").replace(":", "_")
55
+ path = parsed.path.strip("/").replace("/", "_")
56
+ name = f"{host}_{path}" if path else host
57
+ name = re.sub(r"[^a-zA-Z0-9_\-]", "_", name)
58
+ return name[:80] if len(name) > 80 else name
59
+
60
+
61
+ def _save_web_content(url: str, content: str) -> str | None:
62
+ """Save web content to file. Returns file path or None on failure."""
63
+ try:
64
+ WEB_FETCH_SAVE_DIR.mkdir(parents=True, exist_ok=True)
65
+ timestamp = int(time.time())
66
+ identifier = _extract_url_filename(url)
67
+ filename = f"{identifier}-{timestamp}.md"
68
+ file_path = WEB_FETCH_SAVE_DIR / filename
69
+ file_path.write_text(content, encoding="utf-8")
70
+ return str(file_path)
71
+ except OSError:
72
+ return None
73
+
74
+
46
75
  def _process_content(content_type: str, text: str) -> str:
47
76
  """Process content based on Content-Type header."""
48
77
  if content_type == "text/html":
@@ -127,9 +156,15 @@ class WebFetchTool(ToolABC):
127
156
  content_type, text = await asyncio.to_thread(_fetch_url, url)
128
157
  processed = _process_content(content_type, text)
129
158
 
159
+ # Always save content to file
160
+ saved_path = _save_web_content(url, processed)
161
+
162
+ # Build output with file path info
163
+ output = f"<file_saved>{saved_path}</file_saved>\n\n{processed}" if saved_path else processed
164
+
130
165
  return model.ToolResultItem(
131
166
  status="success",
132
- output=processed,
167
+ output=output,
133
168
  )
134
169
 
135
170
  except urllib.error.HTTPError as e:
klaude_code/core/turn.py CHANGED
@@ -100,6 +100,8 @@ class TurnExecutor:
100
100
  self._context = context
101
101
  self._tool_executor: ToolExecutor | None = None
102
102
  self._turn_result: TurnResult | None = None
103
+ self._assistant_delta_buffer: list[str] = []
104
+ self._assistant_response_id: str | None = None
103
105
 
104
106
  @property
105
107
  def report_back_result(self) -> str | None:
@@ -138,6 +140,7 @@ class TurnExecutor:
138
140
  def cancel(self) -> list[events.Event]:
139
141
  """Cancel running tools and return any resulting events."""
140
142
  ui_events: list[events.Event] = []
143
+ self._persist_partial_assistant_on_cancel()
141
144
  if self._tool_executor is not None:
142
145
  for exec_event in self._tool_executor.cancel():
143
146
  for ui_event in build_events_from_tool_executor_event(self._context.session_ctx.session_id, exec_event):
@@ -227,6 +230,9 @@ class TurnExecutor:
227
230
  session_id=session_ctx.session_id,
228
231
  )
229
232
  case model.AssistantMessageDelta() as item:
233
+ if item.response_id:
234
+ self._assistant_response_id = item.response_id
235
+ self._assistant_delta_buffer.append(item.content)
230
236
  yield events.AssistantMessageDeltaEvent(
231
237
  content=item.content,
232
238
  response_id=item.response_id,
@@ -274,6 +280,8 @@ class TurnExecutor:
274
280
  session_ctx.append_history([turn_result.assistant_message])
275
281
  if turn_result.tool_calls:
276
282
  session_ctx.append_history(turn_result.tool_calls)
283
+ self._assistant_delta_buffer.clear()
284
+ self._assistant_response_id = None
277
285
 
278
286
  async def _run_tool_executor(self, tool_calls: list[model.ToolCallItem]) -> AsyncGenerator[events.Event]:
279
287
  """Run tools for the turn and translate executor events to UI events."""
@@ -292,3 +300,23 @@ class TurnExecutor:
292
300
  yield ui_event
293
301
  finally:
294
302
  self._tool_executor = None
303
+
304
+ def _persist_partial_assistant_on_cancel(self) -> None:
305
+ """Persist streamed assistant text when a turn is interrupted.
306
+
307
+ Reasoning and tool calls are intentionally discarded on interrupt; only
308
+ the assistant message text collected so far is saved so it appears in
309
+ subsequent history/context.
310
+ """
311
+
312
+ if not self._assistant_delta_buffer:
313
+ return
314
+ partial_text = "".join(self._assistant_delta_buffer) + "<system interrupted by user>"
315
+ if not partial_text:
316
+ return
317
+ message_item = model.AssistantMessageItem(
318
+ content=partial_text,
319
+ response_id=self._assistant_response_id,
320
+ )
321
+ self._context.session_ctx.append_history([message_item])
322
+ self._assistant_delta_buffer.clear()
@@ -3,6 +3,7 @@ from enum import Enum
3
3
 
4
4
  class CommandName(str, Enum):
5
5
  INIT = "init"
6
+ DEBUG = "debug"
6
7
  DIFF = "diff"
7
8
  HELP = "help"
8
9
  MODEL = "model"
@@ -20,14 +20,15 @@ Capabilities:
20
20
 
21
21
  How to use:
22
22
  - Write a clear prompt describing what information you need - the agent will search and fetch as needed
23
+ - Account for "Today's date" in <env>. For example, if <env> says "Today's date: 2025-07-01", and the user wants the latest docs, do not use 2024 in the search query. Use 2025.
23
24
  - Optionally provide a `url` if you already know the target page
24
25
  - Use `output_format` (JSON Schema) to get structured data back from the agent
25
- - Account for "Today's date" in <env>. For example, if <env> says "Today's date: 2025-07-01", and the user wants the latest docs, do not use 2024 in the search query. Use 2025.
26
26
 
27
27
  What you receive:
28
28
  - The agent returns a text response summarizing its findings
29
29
  - With `output_format`, you receive structured JSON matching your schema
30
- - The response is the agent's analysis, not raw web content\
30
+ - The response is the agent's analysis, not raw web content
31
+ - Web content is saved to local files (paths included in Sources) - read them directly if you need full content\
31
32
  """
32
33
 
33
34
  WEB_AGENT_PARAMETERS = {
@@ -36,7 +36,7 @@ class Session(BaseModel):
36
36
 
37
37
  @property
38
38
  def messages_count(self) -> int:
39
- """Count of user and assistant messages in conversation history.
39
+ """Count of user, assistant messages, and tool calls in conversation history.
40
40
 
41
41
  This is a cached property that is invalidated when append_history is called.
42
42
  """
@@ -44,7 +44,7 @@ class Session(BaseModel):
44
44
  self._messages_count_cache = sum(
45
45
  1
46
46
  for it in self.conversation_history
47
- if isinstance(it, (model.UserMessageItem, model.AssistantMessageItem))
47
+ if isinstance(it, (model.UserMessageItem, model.AssistantMessageItem, model.ToolCallItem))
48
48
  )
49
49
  return self._messages_count_cache
50
50
 
@@ -9,22 +9,36 @@
9
9
  href="data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 24 24%22 fill=%22none%22 stroke=%22%230851b2%22 stroke-width=%222%22 stroke-linecap=%22round%22 stroke-linejoin=%22round%22><polyline points=%2216 18 22 12 16 6%22></polyline><polyline points=%228 6 2 12 8 18%22></polyline></svg>"
10
10
  />
11
11
  <link
12
- href="https://cdn.jsdelivr.net/npm/@fontsource/geist-sans/latin-400.css"
12
+ href="https://cdn.jsdelivr.net/npm/@fontsource/source-sans-3/400.css"
13
13
  rel="stylesheet"
14
14
  />
15
15
  <link
16
- href="https://cdn.jsdelivr.net/npm/@fontsource/geist-sans/latin-500.css"
16
+ href="https://cdn.jsdelivr.net/npm/@fontsource/source-sans-3/400-italic.css"
17
17
  rel="stylesheet"
18
18
  />
19
19
  <link
20
- href="https://cdn.jsdelivr.net/npm/@fontsource/geist-sans/latin-700.css"
20
+ href="https://cdn.jsdelivr.net/npm/@fontsource/source-sans-3/700.css"
21
+ rel="stylesheet"
22
+ />
23
+ <link
24
+ href="https://cdn.jsdelivr.net/npm/@fontsource/source-sans-3/700-italic.css"
25
+ rel="stylesheet"
26
+ />
27
+ <link
28
+ href="https://cdn.jsdelivr.net/npm/@fontsource/fira-code/400.css"
29
+ rel="stylesheet"
30
+ />
31
+ <link
32
+ href="https://cdn.jsdelivr.net/npm/@fontsource/fira-code/700.css"
21
33
  rel="stylesheet"
22
34
  />
23
35
  <style>
24
36
  :root {
25
- --bg-body: #ededed;
26
- --bg-container: #f0f0f0;
27
- --bg-card: #f0f0f0;
37
+ --bg-body: #eae9e5;
38
+ --bg-container: #edece9;
39
+ --bg-card: #efeeeb;
40
+ --bg-error: #ffebee;
41
+ --bg-code: #f2f1ed;
28
42
  --border: #c8c8c8;
29
43
  --text: #111111;
30
44
  --text-dim: #64748b;
@@ -32,13 +46,11 @@
32
46
  --accent-dim: rgba(8, 145, 178, 0.08);
33
47
  --success: #15803d;
34
48
  --error: #dc2626;
35
- --bg-error: #ffebee;
36
- --bg-code: #f3f3f3;
37
49
  --fg-inline-code: #4f4fc7;
38
- --font-mono: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, monospace;
39
- --font-markdown-mono: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, monospace;
40
- --font-markdown: "Geist Sans", system-ui, sans-serif;
41
- --font-weight-bold: 800;
50
+ --font-mono: "Fira Code", ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, monospace;
51
+ --font-markdown-mono: "Fira Code", ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, monospace;
52
+ --font-markdown: "Source Sans 3", system-ui, sans-serif;
53
+ --font-weight-bold: 700;
42
54
  --font-size-xs: 13px;
43
55
  --font-size-sm: 14px;
44
56
  --font-size-base: 15px;
@@ -62,7 +74,6 @@
62
74
  background-color: var(--bg-body);
63
75
  color: var(--text);
64
76
  font-family: var(--font-mono);
65
- font-feature-settings: "ss18";
66
77
  line-height: 1.6;
67
78
  font-size: var(--font-size-lg);
68
79
  -webkit-font-smoothing: antialiased;