klaude-code 1.2.20__py3-none-any.whl → 1.2.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. klaude_code/cli/debug.py +8 -10
  2. klaude_code/cli/main.py +23 -0
  3. klaude_code/cli/runtime.py +13 -1
  4. klaude_code/command/__init__.py +0 -3
  5. klaude_code/command/prompt-deslop.md +1 -1
  6. klaude_code/command/thinking_cmd.py +10 -0
  7. klaude_code/const/__init__.py +2 -5
  8. klaude_code/core/prompt.py +5 -2
  9. klaude_code/core/prompts/prompt-codex-gpt-5-2-codex.md +117 -0
  10. klaude_code/core/prompts/{prompt-codex-gpt-5-1.md → prompt-codex.md} +9 -42
  11. klaude_code/core/reminders.py +36 -2
  12. klaude_code/core/tool/__init__.py +0 -5
  13. klaude_code/core/tool/file/_utils.py +6 -0
  14. klaude_code/core/tool/file/apply_patch_tool.py +30 -72
  15. klaude_code/core/tool/file/diff_builder.py +151 -0
  16. klaude_code/core/tool/file/edit_tool.py +35 -18
  17. klaude_code/core/tool/file/read_tool.py +45 -86
  18. klaude_code/core/tool/file/write_tool.py +40 -30
  19. klaude_code/core/tool/shell/bash_tool.py +151 -3
  20. klaude_code/core/tool/web/mermaid_tool.md +26 -0
  21. klaude_code/protocol/commands.py +0 -1
  22. klaude_code/protocol/model.py +29 -10
  23. klaude_code/protocol/tools.py +1 -2
  24. klaude_code/session/export.py +75 -20
  25. klaude_code/session/session.py +7 -0
  26. klaude_code/session/templates/export_session.html +28 -0
  27. klaude_code/ui/renderers/common.py +26 -11
  28. klaude_code/ui/renderers/developer.py +0 -5
  29. klaude_code/ui/renderers/diffs.py +84 -0
  30. klaude_code/ui/renderers/tools.py +19 -98
  31. klaude_code/ui/rich/markdown.py +11 -1
  32. klaude_code/ui/rich/status.py +8 -11
  33. klaude_code/ui/rich/theme.py +14 -4
  34. {klaude_code-1.2.20.dist-info → klaude_code-1.2.22.dist-info}/METADATA +2 -1
  35. {klaude_code-1.2.20.dist-info → klaude_code-1.2.22.dist-info}/RECORD +37 -40
  36. klaude_code/command/diff_cmd.py +0 -136
  37. klaude_code/core/tool/file/multi_edit_tool.md +0 -42
  38. klaude_code/core/tool/file/multi_edit_tool.py +0 -175
  39. klaude_code/core/tool/memory/memory_tool.md +0 -20
  40. klaude_code/core/tool/memory/memory_tool.py +0 -456
  41. {klaude_code-1.2.20.dist-info → klaude_code-1.2.22.dist-info}/WHEEL +0 -0
  42. {klaude_code-1.2.20.dist-info → klaude_code-1.2.22.dist-info}/entry_points.txt +0 -0
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import asyncio
4
4
  import contextlib
5
+ import hashlib
5
6
  import os
6
7
  from base64 import b64encode
7
8
  from dataclasses import dataclass
@@ -37,6 +38,7 @@ class ReadOptions:
37
38
  limit: int | None
38
39
  char_limit_per_line: int | None = const.READ_CHAR_LIMIT_PER_LINE
39
40
  global_line_cap: int | None = const.READ_GLOBAL_LINE_CAP
41
+ max_total_chars: int | None = const.READ_MAX_CHARS
40
42
 
41
43
 
42
44
  @dataclass
@@ -45,29 +47,32 @@ class ReadSegmentResult:
45
47
  selected_lines: list[tuple[int, str]]
46
48
  selected_chars_count: int
47
49
  remaining_selected_beyond_cap: int
48
- # For large file diagnostics: list of (start_line, end_line, char_count)
49
- segment_char_stats: list[tuple[int, int, int]]
50
+ remaining_due_to_char_limit: int
51
+ content_sha256: str
50
52
 
51
53
 
52
54
  def _read_segment(options: ReadOptions) -> ReadSegmentResult:
53
55
  total_lines = 0
54
56
  selected_lines_count = 0
55
57
  remaining_selected_beyond_cap = 0
58
+ remaining_due_to_char_limit = 0
56
59
  selected_lines: list[tuple[int, str]] = []
57
60
  selected_chars = 0
58
-
59
- # Track char counts per 100-line segment for diagnostics
60
- segment_size = 100
61
- segment_char_stats: list[tuple[int, int, int]] = []
62
- current_segment_start = options.offset
63
- current_segment_chars = 0
61
+ char_limit_reached = False
62
+ hasher = hashlib.sha256()
64
63
 
65
64
  with open(options.file_path, encoding="utf-8", errors="replace") as f:
66
65
  for line_no, raw_line in enumerate(f, start=1):
67
66
  total_lines = line_no
67
+ hasher.update(raw_line.encode("utf-8"))
68
68
  within = line_no >= options.offset and (options.limit is None or selected_lines_count < options.limit)
69
69
  if not within:
70
70
  continue
71
+
72
+ if char_limit_reached:
73
+ remaining_due_to_char_limit += 1
74
+ continue
75
+
71
76
  selected_lines_count += 1
72
77
  content = raw_line.rstrip("\n")
73
78
  original_len = len(content)
@@ -79,42 +84,39 @@ def _read_segment(options: ReadOptions) -> ReadSegmentResult:
79
84
  )
80
85
  line_chars = len(content) + 1
81
86
  selected_chars += line_chars
82
- current_segment_chars += line_chars
83
87
 
84
- # Check if we've completed a segment
85
- if selected_lines_count % segment_size == 0:
86
- segment_char_stats.append((current_segment_start, line_no, current_segment_chars))
87
- current_segment_start = line_no + 1
88
- current_segment_chars = 0
88
+ if options.max_total_chars is not None and selected_chars > options.max_total_chars:
89
+ char_limit_reached = True
90
+ selected_lines.append((line_no, content))
91
+ continue
89
92
 
90
93
  if options.global_line_cap is None or len(selected_lines) < options.global_line_cap:
91
94
  selected_lines.append((line_no, content))
92
95
  else:
93
96
  remaining_selected_beyond_cap += 1
94
97
 
95
- # Add the last partial segment if any
96
- if current_segment_chars > 0 and selected_lines_count > 0:
97
- last_line = options.offset + selected_lines_count - 1
98
- segment_char_stats.append((current_segment_start, last_line, current_segment_chars))
99
-
100
98
  return ReadSegmentResult(
101
99
  total_lines=total_lines,
102
100
  selected_lines=selected_lines,
103
101
  selected_chars_count=selected_chars,
104
102
  remaining_selected_beyond_cap=remaining_selected_beyond_cap,
105
- segment_char_stats=segment_char_stats,
103
+ remaining_due_to_char_limit=remaining_due_to_char_limit,
104
+ content_sha256=hasher.hexdigest(),
106
105
  )
107
106
 
108
107
 
109
- def _track_file_access(file_path: str, *, is_memory: bool = False) -> None:
108
+ def _track_file_access(file_path: str, *, content_sha256: str | None = None, is_memory: bool = False) -> None:
110
109
  file_tracker = get_current_file_tracker()
111
110
  if file_tracker is None or not file_exists(file_path) or is_directory(file_path):
112
111
  return
113
112
  with contextlib.suppress(Exception):
114
113
  existing = file_tracker.get(file_path)
115
- # Preserve is_memory flag if already set
116
114
  is_mem = is_memory or (existing.is_memory if existing else False)
117
- file_tracker[file_path] = model.FileStatus(mtime=Path(file_path).stat().st_mtime, is_memory=is_mem)
115
+ file_tracker[file_path] = model.FileStatus(
116
+ mtime=Path(file_path).stat().st_mtime,
117
+ content_sha256=content_sha256,
118
+ is_memory=is_mem,
119
+ )
118
120
 
119
121
 
120
122
  def _is_supported_image_file(file_path: str) -> bool:
@@ -129,12 +131,6 @@ def _image_mime_type(file_path: str) -> str:
129
131
  return mime_type
130
132
 
131
133
 
132
- def _encode_image_to_data_url(file_path: str, mime_type: str) -> str:
133
- with open(file_path, "rb") as image_file:
134
- encoded = b64encode(image_file.read()).decode("ascii")
135
- return f"data:{mime_type};base64,{encoded}"
136
-
137
-
138
134
  @register(tools.READ)
139
135
  class ReadTool(ToolABC):
140
136
  class ReadArguments(BaseModel):
@@ -178,24 +174,18 @@ class ReadTool(ToolABC):
178
174
  return await cls.call_with_args(args)
179
175
 
180
176
  @classmethod
181
- def _effective_limits(cls) -> tuple[int | None, int | None, int | None, int | None]:
182
- """Return effective limits based on current policy: char_per_line, global_line_cap, max_chars, max_kb"""
177
+ def _effective_limits(cls) -> tuple[int | None, int | None, int | None]:
183
178
  return (
184
179
  const.READ_CHAR_LIMIT_PER_LINE,
185
180
  const.READ_GLOBAL_LINE_CAP,
186
181
  const.READ_MAX_CHARS,
187
- const.READ_MAX_KB,
188
182
  )
189
183
 
190
184
  @classmethod
191
185
  async def call_with_args(cls, args: ReadTool.ReadArguments) -> model.ToolResultItem:
192
- # Accept relative path by resolving to absolute (schema encourages absolute)
193
186
  file_path = os.path.abspath(args.file_path)
187
+ char_per_line, line_cap, max_chars = cls._effective_limits()
194
188
 
195
- # Get effective limits based on policy
196
- char_per_line, line_cap, max_chars, max_kb = cls._effective_limits()
197
-
198
- # Common file errors
199
189
  if is_directory(file_path):
200
190
  return model.ToolResultItem(
201
191
  status="error",
@@ -228,11 +218,9 @@ class ReadTool(ToolABC):
228
218
  ),
229
219
  )
230
220
 
231
- # If file is too large and no pagination provided (only check if limits are enabled)
232
221
  try:
233
222
  size_bytes = Path(file_path).stat().st_size
234
223
  except OSError:
235
- # Best-effort size detection; on stat errors fall back to treating size as unknown.
236
224
  size_bytes = 0
237
225
 
238
226
  is_image_file = _is_supported_image_file(file_path)
@@ -247,42 +235,26 @@ class ReadTool(ToolABC):
247
235
  )
248
236
  try:
249
237
  mime_type = _image_mime_type(file_path)
250
- data_url = _encode_image_to_data_url(file_path, mime_type)
238
+ with open(file_path, "rb") as image_file:
239
+ image_bytes = image_file.read()
240
+ data_url = f"data:{mime_type};base64,{b64encode(image_bytes).decode('ascii')}"
251
241
  except Exception as exc:
252
242
  return model.ToolResultItem(
253
243
  status="error",
254
244
  output=f"<tool_use_error>Failed to read image file: {exc}</tool_use_error>",
255
245
  )
256
246
 
257
- _track_file_access(file_path)
247
+ _track_file_access(file_path, content_sha256=hashlib.sha256(image_bytes).hexdigest())
258
248
  size_kb = size_bytes / 1024.0 if size_bytes else 0.0
259
249
  output_text = f"[image] {Path(file_path).name} ({size_kb:.1f}KB)"
260
250
  image_part = model.ImageURLPart(image_url=model.ImageURLPart.ImageURL(url=data_url, id=None))
261
251
  return model.ToolResultItem(status="success", output=output_text, images=[image_part])
262
252
 
263
- if (
264
- not is_image_file
265
- and max_kb is not None
266
- and args.offset is None
267
- and args.limit is None
268
- and size_bytes > max_kb * 1024
269
- ):
270
- size_kb = size_bytes / 1024.0
271
- return model.ToolResultItem(
272
- status="error",
273
- output=(
274
- f"File content ({size_kb:.1f}KB) exceeds maximum allowed size ({max_kb}KB). Please use offset and limit parameters to read specific portions of the file, or use the `rg` command to search for specific content."
275
- ),
276
- )
277
-
278
253
  offset = 1 if args.offset is None or args.offset < 1 else int(args.offset)
279
254
  limit = None if args.limit is None else int(args.limit)
280
255
  if limit is not None and limit < 0:
281
256
  limit = 0
282
257
 
283
- # Stream file line-by-line and build response
284
- read_result: ReadSegmentResult | None = None
285
-
286
258
  try:
287
259
  read_result = await asyncio.to_thread(
288
260
  _read_segment,
@@ -292,6 +264,7 @@ class ReadTool(ToolABC):
292
264
  limit=limit,
293
265
  char_limit_per_line=char_per_line,
294
266
  global_line_cap=line_cap,
267
+ max_total_chars=max_chars,
295
268
  ),
296
269
  )
297
270
 
@@ -306,40 +279,26 @@ class ReadTool(ToolABC):
306
279
  output="<tool_use_error>Illegal operation on a directory. read</tool_use_error>",
307
280
  )
308
281
 
309
- # If offset beyond total lines, emit system reminder warning
310
282
  if offset > max(read_result.total_lines, 0):
311
283
  warn = f"<system-reminder>Warning: the file exists but is shorter than the provided offset ({offset}). The file has {read_result.total_lines} lines.</system-reminder>"
312
- # Update FileTracker (we still consider it as a read attempt)
313
- _track_file_access(file_path)
284
+ _track_file_access(file_path, content_sha256=read_result.content_sha256)
314
285
  return model.ToolResultItem(status="success", output=warn)
315
286
 
316
- # After limit/offset, if total selected chars exceed limit, error (only check if limits are enabled)
317
- if max_chars is not None and read_result.selected_chars_count > max_chars:
318
- # Build segment statistics for better guidance
319
- stats_lines: list[str] = []
320
- for start, end, chars in read_result.segment_char_stats:
321
- stats_lines.append(f" Lines {start}-{end}: {chars} chars")
322
- segment_stats_str = "\n".join(stats_lines) if stats_lines else " (no segment data)"
287
+ lines_out: list[str] = [_format_numbered_line(no, content) for no, content in read_result.selected_lines]
323
288
 
324
- return model.ToolResultItem(
325
- status="error",
326
- output=(
327
- f"Selected file content {read_result.selected_chars_count} chars exceeds maximum allowed chars ({max_chars}).\n"
328
- f"File has {read_result.total_lines} total lines.\n\n"
329
- f"Character distribution by segment:\n{segment_stats_str}\n\n"
330
- f"Use offset and limit parameters to read specific portions. "
331
- f"For example: offset=1, limit=100 to read the first 100 lines. "
332
- f"Or use `rg` command to search for specific content."
333
- ),
289
+ # Show truncation info with reason
290
+ if read_result.remaining_due_to_char_limit > 0:
291
+ lines_out.append(
292
+ f"... ({read_result.remaining_due_to_char_limit} more lines truncated due to {max_chars} char limit, "
293
+ f"file has {read_result.total_lines} lines total, use offset/limit to read other parts)"
294
+ )
295
+ elif read_result.remaining_selected_beyond_cap > 0:
296
+ lines_out.append(
297
+ f"... ({read_result.remaining_selected_beyond_cap} more lines truncated due to {line_cap} line limit, "
298
+ f"file has {read_result.total_lines} lines total, use offset/limit to read other parts)"
334
299
  )
335
300
 
336
- # Build display with numbering and reminders
337
- lines_out: list[str] = [_format_numbered_line(no, content) for no, content in read_result.selected_lines]
338
- if read_result.remaining_selected_beyond_cap > 0:
339
- lines_out.append(f"... (more {read_result.remaining_selected_beyond_cap} lines are truncated)")
340
301
  read_result_str = "\n".join(lines_out)
341
-
342
- # Update FileTracker with last modified time
343
- _track_file_access(file_path)
302
+ _track_file_access(file_path, content_sha256=read_result.content_sha256)
344
303
 
345
304
  return model.ToolResultItem(status="success", output=read_result_str)
@@ -2,13 +2,13 @@ from __future__ import annotations
2
2
 
3
3
  import asyncio
4
4
  import contextlib
5
- import difflib
6
5
  import os
7
6
  from pathlib import Path
8
7
 
9
8
  from pydantic import BaseModel
10
9
 
11
- from klaude_code.core.tool.file._utils import file_exists, is_directory, read_text, write_text
10
+ from klaude_code.core.tool.file._utils import file_exists, hash_text_sha256, is_directory, read_text, write_text
11
+ from klaude_code.core.tool.file.diff_builder import build_structured_diff
12
12
  from klaude_code.core.tool.tool_abc import ToolABC, load_desc
13
13
  from klaude_code.core.tool.tool_context import get_current_file_tracker
14
14
  from klaude_code.core.tool.tool_registry import register
@@ -62,36 +62,52 @@ class WriteTool(ToolABC):
62
62
 
63
63
  file_tracker = get_current_file_tracker()
64
64
  exists = file_exists(file_path)
65
+ tracked_status: model.FileStatus | None = None
65
66
 
66
67
  if exists:
67
- tracked_status: model.FileStatus | None = None
68
- if file_tracker is not None:
69
- tracked_status = file_tracker.get(file_path)
68
+ tracked_status = file_tracker.get(file_path) if file_tracker is not None else None
70
69
  if tracked_status is None:
71
70
  return model.ToolResultItem(
72
71
  status="error",
73
72
  output=("File has not been read yet. Read it first before writing to it."),
74
73
  )
75
- try:
76
- current_mtime = Path(file_path).stat().st_mtime
77
- except Exception:
78
- current_mtime = tracked_status.mtime
79
- if current_mtime != tracked_status.mtime:
80
- return model.ToolResultItem(
81
- status="error",
82
- output=(
83
- "File has been modified externally. Either by user or a linter. "
84
- "Read it first before writing to it."
85
- ),
86
- )
87
74
 
88
- # Capture previous content (if any) for diff generation
75
+ # Capture previous content (if any) for diff generation and external-change detection.
89
76
  before = ""
77
+ before_read_ok = False
90
78
  if exists:
91
79
  try:
92
80
  before = await asyncio.to_thread(read_text, file_path)
81
+ before_read_ok = True
93
82
  except Exception:
94
83
  before = ""
84
+ before_read_ok = False
85
+
86
+ # Re-check external modifications using content hash when available.
87
+ if before_read_ok and tracked_status is not None and tracked_status.content_sha256 is not None:
88
+ current_sha256 = hash_text_sha256(before)
89
+ if current_sha256 != tracked_status.content_sha256:
90
+ return model.ToolResultItem(
91
+ status="error",
92
+ output=(
93
+ "File has been modified externally. Either by user or a linter. "
94
+ "Read it first before writing to it."
95
+ ),
96
+ )
97
+ elif tracked_status is not None:
98
+ # Backward-compat: old sessions only stored mtime, or we couldn't hash.
99
+ try:
100
+ current_mtime = Path(file_path).stat().st_mtime
101
+ except Exception:
102
+ current_mtime = tracked_status.mtime
103
+ if current_mtime != tracked_status.mtime:
104
+ return model.ToolResultItem(
105
+ status="error",
106
+ output=(
107
+ "File has been modified externally. Either by user or a linter. "
108
+ "Read it first before writing to it."
109
+ ),
110
+ )
95
111
 
96
112
  try:
97
113
  await asyncio.to_thread(write_text, file_path, args.content)
@@ -102,21 +118,15 @@ class WriteTool(ToolABC):
102
118
  with contextlib.suppress(Exception):
103
119
  existing = file_tracker.get(file_path)
104
120
  is_mem = existing.is_memory if existing else False
105
- file_tracker[file_path] = model.FileStatus(mtime=Path(file_path).stat().st_mtime, is_memory=is_mem)
121
+ file_tracker[file_path] = model.FileStatus(
122
+ mtime=Path(file_path).stat().st_mtime,
123
+ content_sha256=hash_text_sha256(args.content),
124
+ is_memory=is_mem,
125
+ )
106
126
 
107
127
  # Build diff between previous and new content
108
128
  after = args.content
109
- diff_lines = list(
110
- difflib.unified_diff(
111
- before.splitlines(),
112
- after.splitlines(),
113
- fromfile=file_path,
114
- tofile=file_path,
115
- n=3,
116
- )
117
- )
118
- diff_text = "\n".join(diff_lines)
119
- ui_extra = model.DiffTextUIExtra(diff_text=diff_text)
129
+ ui_extra = build_structured_diff(before, after, file_path=file_path)
120
130
 
121
131
  message = f"File {'overwritten' if exists else 'created'} successfully at: {file_path}"
122
132
  return model.ToolResultItem(status="success", output=message, ui_extra=ui_extra)
@@ -2,15 +2,18 @@ import asyncio
2
2
  import contextlib
3
3
  import os
4
4
  import re
5
+ import shlex
5
6
  import signal
6
7
  import subprocess
7
8
  from pathlib import Path
9
+ from typing import Any
8
10
 
9
11
  from pydantic import BaseModel
10
12
 
11
13
  from klaude_code import const
12
14
  from klaude_code.core.tool.shell.command_safety import is_safe_command
13
15
  from klaude_code.core.tool.tool_abc import ToolABC, load_desc
16
+ from klaude_code.core.tool.tool_context import get_current_file_tracker
14
17
  from klaude_code.core.tool.tool_registry import register
15
18
  from klaude_code.protocol import llm_param, model, tools
16
19
 
@@ -116,13 +119,156 @@ class BashTool(ToolABC):
116
119
  }
117
120
  )
118
121
 
122
+ def _hash_file_content_sha256(file_path: str) -> str | None:
123
+ try:
124
+ suffix = Path(file_path).suffix.lower()
125
+ if suffix in {".png", ".jpg", ".jpeg", ".gif", ".webp"}:
126
+ import hashlib
127
+
128
+ with open(file_path, "rb") as f:
129
+ return hashlib.sha256(f.read()).hexdigest()
130
+
131
+ import hashlib
132
+
133
+ hasher = hashlib.sha256()
134
+ with open(file_path, encoding="utf-8", errors="replace") as f:
135
+ for line in f:
136
+ hasher.update(line.encode("utf-8"))
137
+ return hasher.hexdigest()
138
+ except (FileNotFoundError, IsADirectoryError, OSError, PermissionError, UnicodeDecodeError):
139
+ return None
140
+
141
+ def _resolve_in_dir(base_dir: str, path: str) -> str:
142
+ if os.path.isabs(path):
143
+ return os.path.abspath(path)
144
+ return os.path.abspath(os.path.join(base_dir, path))
145
+
146
+ def _track_files_read(file_paths: list[str], *, base_dir: str) -> None:
147
+ file_tracker = get_current_file_tracker()
148
+ if file_tracker is None:
149
+ return
150
+ for p in file_paths:
151
+ abs_path = _resolve_in_dir(base_dir, p)
152
+ if not os.path.exists(abs_path) or os.path.isdir(abs_path):
153
+ continue
154
+ sha = _hash_file_content_sha256(abs_path)
155
+ if sha is None:
156
+ continue
157
+ existing = file_tracker.get(abs_path)
158
+ is_mem = existing.is_memory if existing else False
159
+ with contextlib.suppress(Exception):
160
+ file_tracker[abs_path] = model.FileStatus(
161
+ mtime=Path(abs_path).stat().st_mtime,
162
+ content_sha256=sha,
163
+ is_memory=is_mem,
164
+ )
165
+
166
+ def _track_files_written(file_paths: list[str], *, base_dir: str) -> None:
167
+ # Same as read tracking, but intentionally kept separate for clarity.
168
+ _track_files_read(file_paths, base_dir=base_dir)
169
+
170
+ def _track_mv(src_paths: list[str], dest_path: str, *, base_dir: str) -> None:
171
+ file_tracker = get_current_file_tracker()
172
+ if file_tracker is None:
173
+ return
174
+
175
+ abs_dest = _resolve_in_dir(base_dir, dest_path)
176
+ dest_is_dir = os.path.isdir(abs_dest)
177
+
178
+ for src in src_paths:
179
+ abs_src = _resolve_in_dir(base_dir, src)
180
+ abs_new = os.path.join(abs_dest, os.path.basename(abs_src)) if dest_is_dir else abs_dest
181
+
182
+ # Remove old entry if present.
183
+ existing = file_tracker.pop(abs_src, None)
184
+ is_mem = existing.is_memory if existing else False
185
+
186
+ if not os.path.exists(abs_new) or os.path.isdir(abs_new):
187
+ continue
188
+
189
+ sha = _hash_file_content_sha256(abs_new)
190
+ if sha is None:
191
+ continue
192
+ with contextlib.suppress(Exception):
193
+ file_tracker[abs_new] = model.FileStatus(
194
+ mtime=Path(abs_new).stat().st_mtime,
195
+ content_sha256=sha,
196
+ is_memory=is_mem,
197
+ )
198
+
199
+ def _best_effort_update_file_tracker(command: str) -> None:
200
+ # Best-effort heuristics for common shell tools that access/modify files.
201
+ # We intentionally do not try to interpret complex shell scripts here.
202
+ try:
203
+ argv = shlex.split(command, posix=True)
204
+ except ValueError:
205
+ return
206
+ if not argv:
207
+ return
208
+
209
+ # Handle common patterns like: cd subdir && cat file
210
+ base_dir = os.getcwd()
211
+ while len(argv) >= 4 and argv[0] == "cd" and argv[2] == "&&":
212
+ dest = argv[1]
213
+ if dest != "-":
214
+ base_dir = _resolve_in_dir(base_dir, dest)
215
+ argv = argv[3:]
216
+ if not argv:
217
+ return
218
+
219
+ cmd0 = argv[0]
220
+ if cmd0 == "cat":
221
+ paths = [a for a in argv[1:] if a and not a.startswith("-") and a != "-"]
222
+ _track_files_read(paths, base_dir=base_dir)
223
+ return
224
+
225
+ if cmd0 == "sed":
226
+ # Support: sed [-i ...] 's/old/new/' file1 [file2 ...]
227
+ # and: sed -n 'Np' file
228
+ saw_script = False
229
+ file_paths: list[str] = []
230
+ for a in argv[1:]:
231
+ if not a:
232
+ continue
233
+ if a == "--":
234
+ continue
235
+ if a.startswith("-") and not saw_script:
236
+ continue
237
+ if not saw_script and (a.startswith("s/") or a.startswith("s|") or a.endswith("p")):
238
+ saw_script = True
239
+ continue
240
+ if saw_script and not a.startswith("-"):
241
+ file_paths.append(a)
242
+
243
+ if file_paths:
244
+ _track_files_written(file_paths, base_dir=base_dir)
245
+ return
246
+
247
+ if cmd0 == "mv":
248
+ # Support: mv [opts] src... dest
249
+ operands: list[str] = []
250
+ end_of_opts = False
251
+ for a in argv[1:]:
252
+ if not end_of_opts and a == "--":
253
+ end_of_opts = True
254
+ continue
255
+ if not end_of_opts and a.startswith("-"):
256
+ continue
257
+ operands.append(a)
258
+ if len(operands) < 2:
259
+ return
260
+ srcs = operands[:-1]
261
+ dest = operands[-1]
262
+ _track_mv(srcs, dest, base_dir=base_dir)
263
+ return
264
+
119
265
  async def _terminate_process(proc: asyncio.subprocess.Process) -> None:
120
266
  # Best-effort termination. Ensure we don't hang on cancellation.
121
267
  if proc.returncode is not None:
122
268
  return
123
269
 
124
270
  try:
125
- if os.name == "posix" and proc.pid is not None:
271
+ if os.name == "posix":
126
272
  os.killpg(proc.pid, signal.SIGTERM)
127
273
  else:
128
274
  proc.terminate()
@@ -138,7 +284,7 @@ class BashTool(ToolABC):
138
284
 
139
285
  # Escalate to hard kill if it didn't exit quickly.
140
286
  with contextlib.suppress(Exception):
141
- if os.name == "posix" and proc.pid is not None:
287
+ if os.name == "posix":
142
288
  os.killpg(proc.pid, signal.SIGKILL)
143
289
  else:
144
290
  proc.kill()
@@ -148,7 +294,7 @@ class BashTool(ToolABC):
148
294
  try:
149
295
  # Create a dedicated process group so we can terminate the whole tree.
150
296
  # (macOS/Linux support start_new_session; Windows does not.)
151
- kwargs: dict[str, object] = {
297
+ kwargs: dict[str, Any] = {
152
298
  "stdin": asyncio.subprocess.DEVNULL,
153
299
  "stdout": asyncio.subprocess.PIPE,
154
300
  "stderr": asyncio.subprocess.PIPE,
@@ -184,6 +330,8 @@ class BashTool(ToolABC):
184
330
  # Include stderr if there is useful diagnostics despite success
185
331
  if stderr.strip():
186
332
  output = (output + ("\n" if output else "")) + f"[stderr]\n{stderr}"
333
+
334
+ _best_effort_update_file_tracker(args.command)
187
335
  return model.ToolResultItem(
188
336
  status="success",
189
337
  output=output.strip(),
@@ -17,5 +17,31 @@ Diagrams are especially valuable for visualizing:
17
17
  - Sequence and timing of operations
18
18
  - Decision trees and conditional logic
19
19
 
20
+ # Syntax
21
+ - ALWAYS wrap node labels in double quotes, especially when they contain spaces, special characters, or non-ASCII text
22
+ - This applies to all node types: regular nodes, subgraph titles, and edge labels
23
+
24
+ Examples:
25
+ ```mermaid
26
+ graph LR
27
+ A["User Input"] --> B["Process Data"]
28
+ B --> C["Output Result"]
29
+ ```
30
+
31
+ ```mermaid
32
+ flowchart TD
33
+ subgraph auth["Authentication Module"]
34
+ login["Login Service"]
35
+ oauth["OAuth Provider"]
36
+ end
37
+ ```
38
+
39
+ ```mermaid
40
+ sequenceDiagram
41
+ participant client as "Web Client"
42
+ participant server as "API Server"
43
+ client ->> server: "Send Request"
44
+ ```
45
+
20
46
  # Styling
21
47
  - When defining custom classDefs, always define fill color, stroke color, and text color ("fill", "stroke", "color") explicitly
@@ -4,7 +4,6 @@ from enum import Enum
4
4
  class CommandName(str, Enum):
5
5
  INIT = "init"
6
6
  DEBUG = "debug"
7
- DIFF = "diff"
8
7
  HELP = "help"
9
8
  MODEL = "model"
10
9
  COMPACT = "compact"