klaude-code 2.10.1__py3-none-any.whl → 2.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -102,12 +102,12 @@ provider_list:
102
102
  cost: {input: 1.75, output: 14, cache_read: 0.17}
103
103
 
104
104
  - model_name: kimi
105
- model_id: moonshotai/kimi-k2-thinking
105
+ model_id: moonshotai/kimi-k2.5
106
106
  context_limit: 262144
107
- provider_routing:
108
- only:
109
- - moonshotai/turbo
110
- cost: {input: 0.6, output: 2.5, cache_read: 0.15}
107
+ # provider_routing:
108
+ # only:
109
+ # - moonshotai/turbo
110
+ cost: {input: 0.6, output: 3, cache_read: 0.1}
111
111
 
112
112
  - model_name: haiku
113
113
  model_id: anthropic/claude-haiku-4.5
@@ -272,12 +272,12 @@ provider_list:
272
272
  model_list:
273
273
 
274
274
  - model_name: kimi
275
- model_id: kimi-k2-thinking
275
+ model_id: kimi-k2.5
276
276
  context_limit: 262144
277
277
  thinking:
278
278
  type: enabled
279
279
  budget_tokens: 8192
280
- cost: {input: 4, output: 16, cache_read: 1, currency: CNY}
280
+ cost: {input: 4, output: 21, cache_read: 0.7, currency: CNY}
281
281
 
282
282
 
283
283
  - provider_name: cerebras
@@ -130,12 +130,6 @@ def load_system_prompt(
130
130
  ) -> str:
131
131
  """Get system prompt content for the given model and sub-agent type."""
132
132
 
133
- # For codex_oauth protocol, use dynamic prompts from GitHub (no additions).
134
- if protocol == llm_param.LLMClientProtocol.CODEX_OAUTH:
135
- from klaude_code.llm.openai_codex.prompt_sync import get_codex_instructions
136
-
137
- return get_codex_instructions(model_name)
138
-
139
133
  # For antigravity protocol, use exact prompt without any additions.
140
134
  if protocol == llm_param.LLMClientProtocol.ANTIGRAVITY:
141
135
  return _load_prompt_by_path(ANTIGRAVITY_PROMPT_PATH)
@@ -107,11 +107,20 @@ def _tool_blocks_to_message(blocks: list[BetaToolResultBlockParam]) -> BetaMessa
107
107
  }
108
108
 
109
109
 
110
+ def _model_supports_unsigned_thinking(model_name: str | None) -> bool:
111
+ """Check if the model supports thinking blocks without signature (e.g., kimi, deepseek)."""
112
+ if not model_name:
113
+ return False
114
+ model_lower = model_name.lower()
115
+ return "kimi" in model_lower or "deepseek" in model_lower
116
+
117
+
110
118
  def _assistant_message_to_message(msg: message.AssistantMessage, model_name: str | None) -> BetaMessageParam:
111
119
  content: list[BetaContentBlockParam] = []
112
120
  current_thinking_content: str | None = None
113
121
  native_thinking_parts, _ = split_thinking_parts(msg, model_name)
114
122
  native_thinking_ids = {id(part) for part in native_thinking_parts}
123
+ supports_unsigned = _model_supports_unsigned_thinking(model_name)
115
124
 
116
125
  def _degraded_thinking_block(text: str) -> BetaTextBlockParam | None:
117
126
  stripped = text.strip()
@@ -125,11 +134,18 @@ def _assistant_message_to_message(msg: message.AssistantMessage, model_name: str
125
134
  },
126
135
  )
127
136
 
128
- def _flush_thinking_as_text_block() -> None:
137
+ def _flush_thinking() -> None:
129
138
  nonlocal current_thinking_content
130
139
  if current_thinking_content is None:
131
140
  return
132
- if block := _degraded_thinking_block(current_thinking_content):
141
+ if supports_unsigned:
142
+ content.append(
143
+ cast(
144
+ BetaContentBlockParam,
145
+ {"type": "thinking", "thinking": current_thinking_content},
146
+ )
147
+ )
148
+ elif block := _degraded_thinking_block(current_thinking_content):
133
149
  content.append(block)
134
150
  current_thinking_content = None
135
151
 
@@ -156,9 +172,17 @@ def _assistant_message_to_message(msg: message.AssistantMessage, model_name: str
156
172
  )
157
173
  )
158
174
  current_thinking_content = None
175
+ elif supports_unsigned:
176
+ content.append(
177
+ cast(
178
+ BetaContentBlockParam,
179
+ {"type": "thinking", "thinking": current_thinking_content or ""},
180
+ )
181
+ )
182
+ current_thinking_content = None
159
183
  continue
160
184
 
161
- _flush_thinking_as_text_block()
185
+ _flush_thinking()
162
186
  if isinstance(part, message.TextPart):
163
187
  content.append(cast(BetaTextBlockParam, {"type": "text", "text": part.text}))
164
188
  elif isinstance(part, message.ToolCallPart):
@@ -182,7 +206,7 @@ def _assistant_message_to_message(msg: message.AssistantMessage, model_name: str
182
206
  )
183
207
  )
184
208
 
185
- _flush_thinking_as_text_block()
209
+ _flush_thinking()
186
210
 
187
211
  return {"role": "assistant", "content": content}
188
212
 
@@ -146,28 +146,6 @@ class CodexClient(LLMClientABC):
146
146
  )
147
147
  except (openai.OpenAIError, httpx.HTTPError) as e:
148
148
  error_message = f"{e.__class__.__name__} {e!s}"
149
-
150
- # Check for invalid instruction error and invalidate prompt cache
151
- if _is_invalid_instruction_error(e) and param.model_id:
152
- _invalidate_prompt_cache_for_model(param.model_id)
153
-
154
149
  return error_llm_stream(metadata_tracker, error=error_message)
155
150
 
156
151
  return ResponsesLLMStream(stream, param=param, metadata_tracker=metadata_tracker)
157
-
158
-
159
- def _is_invalid_instruction_error(e: Exception) -> bool:
160
- """Check if the error is related to invalid instructions."""
161
- error_str = str(e).lower()
162
- return "invalid instruction" in error_str or "invalid_instruction" in error_str
163
-
164
-
165
- def _invalidate_prompt_cache_for_model(model_id: str) -> None:
166
- """Invalidate the cached prompt for a model to force refresh."""
167
- from klaude_code.llm.openai_codex.prompt_sync import invalidate_cache
168
-
169
- log_debug(
170
- f"Invalidating prompt cache for model {model_id} due to invalid instruction error",
171
- debug_type=DebugType.GENERAL,
172
- )
173
- invalidate_cache(model_id)
@@ -17,7 +17,7 @@ from klaude_code.llm.client import LLMClientABC, LLMStreamABC
17
17
  from klaude_code.llm.input_common import apply_config_defaults
18
18
  from klaude_code.llm.openai_compatible.input import convert_tool_schema
19
19
  from klaude_code.llm.openai_compatible.stream import OpenAILLMStream
20
- from klaude_code.llm.openrouter.input import convert_history_to_input, is_claude_model
20
+ from klaude_code.llm.openrouter.input import convert_history_to_input, is_claude_model, is_xai_model
21
21
  from klaude_code.llm.openrouter.reasoning import ReasoningStreamHandler
22
22
  from klaude_code.llm.registry import register
23
23
  from klaude_code.llm.usage import MetadataTracker, error_llm_stream
@@ -70,6 +70,9 @@ def build_payload(
70
70
  f"{ANTHROPIC_BETA_FINE_GRAINED_TOOL_STREAMING},{ANTHROPIC_BETA_INTERLEAVED_THINKING}"
71
71
  )
72
72
 
73
+ if is_xai_model(param.model_id):
74
+ extra_body["plugins"] = [{"id": "web", "engine": "native"}]
75
+
73
76
  payload: CompletionCreateParamsStreaming = {
74
77
  "model": str(param.model_id),
75
78
  "tool_choice": "auto",
@@ -34,6 +34,12 @@ def is_gemini_model(model_name: str | None) -> bool:
34
34
  return model_name is not None and model_name.startswith("google/gemini")
35
35
 
36
36
 
37
+ def is_xai_model(model_name: str | None) -> bool:
38
+ """Return True if the model name represents an xAI model."""
39
+
40
+ return model_name is not None and model_name.startswith("x-ai/")
41
+
42
+
37
43
  def _assistant_message_to_openrouter(
38
44
  msg: message.AssistantMessage, model_name: str | None
39
45
  ) -> chat.ChatCompletionMessageParam:
@@ -6,7 +6,7 @@ from klaude_code.tui.components.common import create_grid
6
6
  from klaude_code.tui.components.rich.theme import ThemeKey
7
7
  from klaude_code.tui.components.tools import render_path
8
8
 
9
- REMINDER_BULLET = ""
9
+ REMINDER_BULLET = " +"
10
10
 
11
11
 
12
12
  def need_render_developer_message(e: events.DeveloperMessageEvent) -> bool:
@@ -9,8 +9,7 @@ def render_error(error_msg: Text) -> RenderableType:
9
9
  """Render error with X mark for error events."""
10
10
  grid = create_grid()
11
11
  error_msg.style = ThemeKey.ERROR
12
- error_msg.overflow = "ellipsis"
13
- error_msg.no_wrap = True
12
+ error_msg.overflow = "fold"
14
13
  grid.add_row(Text("✘", style=ThemeKey.ERROR_BOLD), error_msg)
15
14
  return grid
16
15
 
@@ -19,7 +18,6 @@ def render_tool_error(error_msg: Text) -> RenderableType:
19
18
  """Render error with indent for tool results."""
20
19
  grid = create_grid()
21
20
  error_msg.style = ThemeKey.ERROR
22
- error_msg.overflow = "ellipsis"
23
- error_msg.no_wrap = True
21
+ error_msg.overflow = "fold"
24
22
  grid.add_row(Text(" "), error_msg)
25
23
  return grid
@@ -32,9 +32,6 @@ def _render_task_metadata_block(
32
32
 
33
33
  # Second column: provider/model description / tokens / cost / …
34
34
  content = Text()
35
- if metadata.provider is not None:
36
- content.append_text(Text(metadata.provider.lower().replace(" ", "-"), style=ThemeKey.METADATA))
37
- content.append_text(Text("/", style=ThemeKey.METADATA))
38
35
  content.append_text(Text(metadata.model_name, style=ThemeKey.METADATA))
39
36
  if metadata.description:
40
37
  content.append_text(Text(" ", style=ThemeKey.METADATA)).append_text(
@@ -129,7 +126,7 @@ def _render_task_metadata_block(
129
126
 
130
127
  if parts:
131
128
  content.append_text(Text(" ", style=ThemeKey.METADATA))
132
- content.append_text(Text(" ", style=ThemeKey.METADATA).join(parts))
129
+ content.append_text(Text(" ", style=ThemeKey.METADATA_DIM).join(parts))
133
130
 
134
131
  grid.add_row(mark, content)
135
132
  return grid
@@ -140,15 +137,14 @@ def render_task_metadata(e: events.TaskMetadataEvent) -> RenderableType:
140
137
  renderables: list[RenderableType] = []
141
138
 
142
139
  has_sub_agents = len(e.metadata.sub_agent_task_metadata) > 0
143
- # Use an extra space for the main agent mark to align with two-character marks (├─, └─)
144
- main_mark_text = "●"
140
+ main_mark_text = "•"
145
141
  main_mark = Text(main_mark_text, style=ThemeKey.METADATA)
146
142
 
147
143
  renderables.append(_render_task_metadata_block(e.metadata.main_agent, mark=main_mark, show_context_and_time=True))
148
144
 
149
145
  # Render each sub-agent metadata block
150
146
  for meta in e.metadata.sub_agent_task_metadata:
151
- sub_mark = Text(" ", style=ThemeKey.METADATA)
147
+ sub_mark = Text(" ", style=ThemeKey.METADATA)
152
148
  renderables.append(_render_task_metadata_block(meta, mark=sub_mark, show_context_and_time=True))
153
149
 
154
150
  # Add total cost line when there are sub-agents
@@ -165,9 +161,8 @@ def render_task_metadata(e: events.TaskMetadataEvent) -> RenderableType:
165
161
 
166
162
  currency_symbol = "¥" if currency == "CNY" else "$"
167
163
  total_line = Text.assemble(
168
- (" ", ThemeKey.METADATA),
169
- (" Σ ", ThemeKey.METADATA),
170
- ("total ", ThemeKey.METADATA),
164
+ (" ", ThemeKey.METADATA),
165
+ (" total ", ThemeKey.METADATA),
171
166
  (currency_symbol, ThemeKey.METADATA),
172
167
  (f"{total_cost:.4f}", ThemeKey.METADATA),
173
168
  )
@@ -609,7 +609,11 @@ class MarkdownStream:
609
609
 
610
610
  live_text_to_set: Text | None = None
611
611
  if not final and MARKDOWN_STREAM_LIVE_REPAINT_ENABLED and self._live_sink is not None:
612
- # Only update live area after we have rendered at least one stable block
612
+ # Only update the live area after we have rendered at least one stable block.
613
+ #
614
+ # This keeps the bottom "live" region anchored to stable scrollback, and
615
+ # avoids showing a live frame that would later need to be retroactively
616
+ # re-rendered once stable content exists.
613
617
  if not self._stable_rendered_lines:
614
618
  return
615
619
  # When nothing is stable yet, we still want to show incremental output.
@@ -262,8 +262,8 @@ def get_theme(theme: str | None = None) -> Themes:
262
262
  ThemeKey.ERROR_DIM.value: "dim " + palette.red,
263
263
  ThemeKey.INTERRUPT.value: palette.red,
264
264
  # USER_INPUT
265
- ThemeKey.USER_INPUT.value: f"{palette.magenta} on {palette.user_message_background}",
266
- ThemeKey.USER_INPUT_PROMPT.value: f"bold {palette.magenta} on {palette.user_message_background}",
265
+ ThemeKey.USER_INPUT.value: f"{palette.cyan} on {palette.user_message_background}",
266
+ ThemeKey.USER_INPUT_PROMPT.value: f"bold {palette.cyan} on {palette.user_message_background}",
267
267
  ThemeKey.USER_INPUT_AT_PATTERN.value: f"{palette.purple} on {palette.user_message_background}",
268
268
  ThemeKey.USER_INPUT_SLASH_COMMAND.value: f"bold {palette.blue} on {palette.user_message_background}",
269
269
  ThemeKey.USER_INPUT_SKILL.value: f"bold {palette.green} on {palette.user_message_background}",
@@ -89,7 +89,7 @@ def render_user_input(content: str) -> RenderableType:
89
89
 
90
90
  return Padding(
91
91
  Group(*renderables),
92
- pad=(0, 1),
92
+ pad=(1, 1),
93
93
  style=ThemeKey.USER_INPUT,
94
94
  expand=False,
95
95
  )
@@ -673,34 +673,68 @@ class _AtFilesCompleter(Completer):
673
673
  all_files_lower = self._git_file_list_lower or []
674
674
  kn = keyword_norm
675
675
 
676
- # Bound per-keystroke work: stop scanning once enough matches are found.
676
+ # Bound per-keystroke work.
677
+ #
678
+ # Important: When the keyword is common (e.g. "tools"), truncating the
679
+ # scan purely by number of matching *files* can accidentally hide valid
680
+ # directory completions that appear later in the git path order.
681
+ #
682
+ # Example: multiple */tools/ directories under different parents.
683
+ file_quota = max_results
684
+ dir_quota = max_results
685
+ scan_cap = max(2000, max_results * 200)
686
+
687
+ keyword_stripped = keyword_norm.strip("/")
688
+ keyword_basename = os.path.basename(keyword_stripped)
689
+ explicit_parent = "/" in keyword_stripped
690
+
691
+ def dir_matches_keyword(dir_path: str) -> bool:
692
+ if not keyword_basename:
693
+ return False
694
+ if explicit_parent:
695
+ # When user typed an explicit parent segment, match against the
696
+ # whole directory path (not just basename).
697
+ return kn in f"{dir_path}/".lower()
698
+ # Otherwise prioritize directories by basename match.
699
+ return keyword_basename in os.path.basename(dir_path).lower()
700
+
677
701
  matching_files: list[str] = []
702
+ dir_list: list[str] = []
703
+ dir_seen: set[str] = set()
678
704
  scan_truncated = False
705
+ scanned = 0
706
+
679
707
  for p, pl in zip(all_files, all_files_lower, strict=False):
680
- if kn in pl:
681
- matching_files.append(p)
682
- if len(matching_files) >= max_results:
708
+ scanned += 1
709
+ if kn not in pl:
710
+ if scanned >= scan_cap and (matching_files or dir_list):
683
711
  scan_truncated = True
684
712
  break
713
+ continue
714
+
715
+ if len(matching_files) < file_quota:
716
+ matching_files.append(p)
685
717
 
686
- # Also include parent directories of matching files so users can
687
- # complete into a folder, similar to fd's directory results.
688
- dir_candidates: set[str] = set()
689
- for p in matching_files[: max_results * 3]:
718
+ # Collect matching parent directories, walking upwards until repo root.
719
+ # This allows completing into directories like "image/tools/" even
720
+ # when the matching file is nested deeper.
690
721
  parent = os.path.dirname(p)
691
722
  while parent and parent != ".":
692
- dir_candidates.add(f"{parent}/")
723
+ if dir_matches_keyword(parent):
724
+ cand = f"{parent}/"
725
+ if cand not in dir_seen:
726
+ dir_seen.add(cand)
727
+ dir_list.append(cand)
728
+ if len(dir_list) >= dir_quota:
729
+ break
693
730
  parent = os.path.dirname(parent)
694
731
 
695
- dir_list = sorted(dir_candidates)
696
- dir_truncated = False
697
- if len(dir_list) > max_results:
698
- dir_list = dir_list[:max_results]
699
- dir_truncated = True
732
+ if len(matching_files) >= file_quota and len(dir_list) >= dir_quota:
733
+ scan_truncated = True
734
+ break
700
735
 
701
- candidates = matching_files + dir_list
702
- truncated = scan_truncated or dir_truncated
703
- return candidates, truncated
736
+ candidates = dir_list + matching_files
737
+ return candidates, scan_truncated
704
738
 
705
739
  def _get_git_repo_root(self, cwd: Path) -> Path | None:
706
740
  if not self._has_cmd("git"):
@@ -28,6 +28,9 @@ from klaude_code.protocol.message import ImageFilePart
28
28
 
29
29
  IMAGE_SUFFIXES = frozenset({".png", ".jpg", ".jpeg", ".gif", ".webp"})
30
30
 
31
+ # Claude API limit is 5MB, we use 4.5MB to have some margin
32
+ MAX_IMAGE_SIZE_BYTES = 4_500_000
33
+
31
34
  IMAGE_MARKER_RE = re.compile(r'\[image (?P<path>"[^"]+"|[^\]]+)\]')
32
35
 
33
36
 
@@ -159,6 +162,127 @@ def _grab_clipboard_image(dest_path: Path) -> bool:
159
162
  return _grab_clipboard_image_linux(dest_path)
160
163
 
161
164
 
165
+ # ---------------------------------------------------------------------------
166
+ # Image resizing for size limits
167
+ # ---------------------------------------------------------------------------
168
+
169
+
170
+ def _get_image_dimensions_macos(path: Path) -> tuple[int, int] | None:
171
+ """Get image dimensions using sips on macOS."""
172
+ try:
173
+ result = subprocess.run(
174
+ ["sips", "-g", "pixelWidth", "-g", "pixelHeight", str(path)],
175
+ capture_output=True,
176
+ text=True,
177
+ )
178
+ if result.returncode != 0:
179
+ return None
180
+ width = height = 0
181
+ for line in result.stdout.splitlines():
182
+ if "pixelWidth" in line:
183
+ width = int(line.split(":")[-1].strip())
184
+ elif "pixelHeight" in line:
185
+ height = int(line.split(":")[-1].strip())
186
+ if width > 0 and height > 0:
187
+ return (width, height)
188
+ except (OSError, ValueError):
189
+ pass
190
+ return None
191
+
192
+
193
+ def _resize_image_macos(path: Path, scale: float) -> bool:
194
+ """Resize image using sips on macOS. Modifies file in place."""
195
+ dims = _get_image_dimensions_macos(path)
196
+ if dims is None:
197
+ return False
198
+ new_width = max(1, int(dims[0] * scale))
199
+ try:
200
+ result = subprocess.run(
201
+ ["sips", "--resampleWidth", str(new_width), str(path)],
202
+ capture_output=True,
203
+ )
204
+ return result.returncode == 0
205
+ except OSError:
206
+ return False
207
+
208
+
209
+ def _resize_image_linux(path: Path, scale: float) -> bool:
210
+ """Resize image using ImageMagick convert on Linux."""
211
+ if not shutil.which("convert"):
212
+ return False
213
+ percent = int(scale * 100)
214
+ try:
215
+ result = subprocess.run(
216
+ ["convert", str(path), "-resize", f"{percent}%", str(path)],
217
+ capture_output=True,
218
+ )
219
+ return result.returncode == 0
220
+ except OSError:
221
+ return False
222
+
223
+
224
+ def _resize_image_windows(path: Path, scale: float) -> bool:
225
+ """Resize image using PowerShell on Windows."""
226
+ script = f'''
227
+ Add-Type -AssemblyName System.Drawing
228
+ $img = [System.Drawing.Image]::FromFile("{path}")
229
+ $newWidth = [int]($img.Width * {scale})
230
+ $newHeight = [int]($img.Height * {scale})
231
+ $bmp = New-Object System.Drawing.Bitmap($newWidth, $newHeight)
232
+ $graphics = [System.Drawing.Graphics]::FromImage($bmp)
233
+ $graphics.DrawImage($img, 0, 0, $newWidth, $newHeight)
234
+ $img.Dispose()
235
+ $bmp.Save("{path}", [System.Drawing.Imaging.ImageFormat]::Png)
236
+ $bmp.Dispose()
237
+ $graphics.Dispose()
238
+ Write-Output "ok"
239
+ '''
240
+ try:
241
+ result = subprocess.run(
242
+ ["powershell", "-Command", script],
243
+ capture_output=True,
244
+ text=True,
245
+ )
246
+ return result.returncode == 0 and "ok" in result.stdout
247
+ except OSError:
248
+ return False
249
+
250
+
251
+ def _resize_image(path: Path, scale: float) -> bool:
252
+ """Resize image by scale factor. Modifies file in place."""
253
+ if sys.platform == "darwin":
254
+ return _resize_image_macos(path, scale)
255
+ elif sys.platform == "win32":
256
+ return _resize_image_windows(path, scale)
257
+ else:
258
+ return _resize_image_linux(path, scale)
259
+
260
+
261
+ def _ensure_image_size_limit(path: Path, max_bytes: int = MAX_IMAGE_SIZE_BYTES) -> None:
262
+ """Resize image if it exceeds the size limit. Modifies file in place."""
263
+ try:
264
+ current_size = path.stat().st_size
265
+ if current_size <= max_bytes:
266
+ return
267
+
268
+ # Calculate scale factor based on size ratio
269
+ # We use sqrt because area scales with square of linear dimensions
270
+ scale = (max_bytes / current_size) ** 0.5
271
+ # Be a bit more aggressive to ensure we get under the limit
272
+ scale *= 0.9
273
+
274
+ for _ in range(5): # Max 5 resize attempts
275
+ if not _resize_image(path, scale):
276
+ return
277
+ new_size = path.stat().st_size
278
+ if new_size <= max_bytes:
279
+ return
280
+ # Still too large, reduce more
281
+ scale = 0.8
282
+ except OSError:
283
+ pass
284
+
285
+
162
286
  def capture_clipboard_tag() -> str | None:
163
287
  """Capture an image from clipboard and return an [image ...] marker."""
164
288
 
@@ -174,6 +298,9 @@ def capture_clipboard_tag() -> str | None:
174
298
  if not _grab_clipboard_image(path):
175
299
  return None
176
300
 
301
+ # Resize if image exceeds size limit
302
+ _ensure_image_size_limit(path)
303
+
177
304
  return format_image_marker(str(path))
178
305
 
179
306
 
@@ -76,6 +76,60 @@ def create_key_bindings(
76
76
  term_program = os.environ.get("TERM_PROGRAM", "").lower()
77
77
  swallow_next_control_j = False
78
78
 
79
+ def _history_backward_cursor_to_start(buf: Buffer) -> None:
80
+ """Switch to previous history entry and place cursor at absolute start.
81
+
82
+ prompt_toolkit's default `Buffer.history_backward()` moves the cursor to
83
+ the end of the (possibly multi-line) history entry. That makes it hard
84
+ to keep pressing Up to continue cycling history, because subsequent Up
85
+ key presses start moving within the multi-line buffer.
86
+ """
87
+
88
+ try:
89
+ before = int(buf.working_index) # type: ignore[reportUnknownMemberType]
90
+ except Exception:
91
+ before = None
92
+
93
+ buf.history_backward()
94
+
95
+ try:
96
+ after = int(buf.working_index) # type: ignore[reportUnknownMemberType]
97
+ except Exception:
98
+ after = None
99
+
100
+ if before is not None and after is not None and before == after:
101
+ return
102
+
103
+ with contextlib.suppress(Exception):
104
+ buf.cursor_position = 0 # type: ignore[reportUnknownMemberType]
105
+
106
+ def _history_forward_cursor_to_end(buf: Buffer) -> None:
107
+ """Switch to next history entry and place cursor at absolute end.
108
+
109
+ prompt_toolkit's default `Buffer.history_forward()` moves the cursor to
110
+ the end of the *first* line. For our multiline REPL, it's more useful to
111
+ land at the end so that pressing Down keeps cycling through history.
112
+ """
113
+
114
+ try:
115
+ before = int(buf.working_index) # type: ignore[reportUnknownMemberType]
116
+ except Exception:
117
+ before = None
118
+
119
+ buf.history_forward()
120
+
121
+ try:
122
+ after = int(buf.working_index) # type: ignore[reportUnknownMemberType]
123
+ except Exception:
124
+ after = None
125
+
126
+ if before is not None and after is not None and before == after:
127
+ return
128
+
129
+ with contextlib.suppress(Exception):
130
+ text = buf.text # type: ignore[reportUnknownMemberType]
131
+ buf.cursor_position = len(text) # type: ignore[reportUnknownMemberType]
132
+
79
133
  def _is_bash_mode_text(text: str) -> bool:
80
134
  return text.startswith(("!", "!"))
81
135
 
@@ -151,6 +205,20 @@ def create_key_bindings(
151
205
  except Exception:
152
206
  return False
153
207
 
208
+ def _current_cursor_row() -> int:
209
+ try:
210
+ doc = get_app().current_buffer.document
211
+ return int(doc.cursor_position_row)
212
+ except Exception:
213
+ return 0
214
+
215
+ def _current_line_count() -> int:
216
+ try:
217
+ doc = get_app().current_buffer.document
218
+ return int(doc.line_count)
219
+ except Exception:
220
+ return 1
221
+
154
222
  def _move_cursor_visually_within_wrapped_line(event: KeyPressEvent, *, delta_visible_y: int) -> None:
155
223
  """Move the cursor Up/Down by one wrapped screen row, keeping column."""
156
224
 
@@ -493,6 +561,32 @@ def create_key_bindings(
493
561
  def _(event: KeyPressEvent) -> None:
494
562
  _move_cursor_visually_within_wrapped_line(event, delta_visible_y=1)
495
563
 
564
+ @kb.add(
565
+ "up",
566
+ filter=enabled
567
+ & ~has_completions
568
+ & ~is_searching
569
+ & Condition(lambda: not _can_move_cursor_visually_within_wrapped_line(delta_visible_y=-1))
570
+ & Condition(lambda: _current_cursor_row() == 0),
571
+ eager=True,
572
+ )
573
+ def _(event: KeyPressEvent) -> None:
574
+ """Up on first logical line: switch history and keep caret at start."""
575
+ _history_backward_cursor_to_start(event.current_buffer)
576
+
577
+ @kb.add(
578
+ "down",
579
+ filter=enabled
580
+ & ~has_completions
581
+ & ~is_searching
582
+ & Condition(lambda: not _can_move_cursor_visually_within_wrapped_line(delta_visible_y=1))
583
+ & Condition(lambda: _current_cursor_row() >= (_current_line_count() - 1)),
584
+ eager=True,
585
+ )
586
+ def _(event: KeyPressEvent) -> None:
587
+ """Down on last logical line: switch history and keep caret at end."""
588
+ _history_forward_cursor_to_end(event.current_buffer)
589
+
496
590
  @kb.add("c-j", filter=enabled)
497
591
  def _(event: KeyPressEvent) -> None:
498
592
  nonlocal swallow_next_control_j
@@ -617,11 +711,11 @@ def create_key_bindings(
617
711
  @kb.add("escape", "up", filter=enabled & ~has_completions)
618
712
  def _(event: KeyPressEvent) -> None:
619
713
  """Option+Up switches to previous history entry."""
620
- event.current_buffer.history_backward()
714
+ _history_backward_cursor_to_start(event.current_buffer)
621
715
 
622
716
  @kb.add("escape", "down", filter=enabled & ~has_completions)
623
717
  def _(event: KeyPressEvent) -> None:
624
718
  """Option+Down switches to next history entry."""
625
- event.current_buffer.history_forward()
719
+ _history_forward_cursor_to_end(event.current_buffer)
626
720
 
627
721
  return kb
@@ -61,7 +61,7 @@ COMPLETION_SELECTED_DARK_BG = "ansigreen"
61
61
  COMPLETION_SELECTED_LIGHT_BG = "ansigreen"
62
62
  COMPLETION_SELECTED_UNKNOWN_BG = "ansigreen"
63
63
  COMPLETION_MENU = "ansibrightblack"
64
- INPUT_PROMPT_STYLE = "ansimagenta bold"
64
+ INPUT_PROMPT_STYLE = "ansicyan bold"
65
65
  INPUT_PROMPT_BASH_STYLE = "ansigreen bold"
66
66
  PLACEHOLDER_TEXT_STYLE_DARK_BG = "fg:#5a5a5a"
67
67
  PLACEHOLDER_TEXT_STYLE_LIGHT_BG = "fg:#7a7a7a"
@@ -736,6 +736,7 @@ class DisplayStateMachine:
736
736
  and not e.has_structured_output
737
737
  and s.assistant_char_count == 0
738
738
  and e.task_result.strip()
739
+ and e.task_result.strip().lower() not in {"task cancelled", "task canceled"}
739
740
  ):
740
741
  cmds.append(StartAssistantStream(session_id=e.session_id))
741
742
  cmds.append(AppendAssistant(session_id=e.session_id, content=e.task_result))
@@ -734,6 +734,9 @@ class TUICommandRenderer:
734
734
  self._sub_agent_thinking_buffers[session_id] = ""
735
735
  elif not self._thinking_stream.is_active:
736
736
  self._thinking_stream.start(self._new_thinking_mdstream())
737
+ if not self._replay_mode:
738
+ self._thinking_stream.append("Thinking… \n")
739
+ self._thinking_stream.render(transform=c_thinking.normalize_thinking_content)
737
740
  case AppendThinking(session_id=session_id, content=content):
738
741
  if self.is_sub_agent_session(session_id):
739
742
  if session_id in self._sub_agent_thinking_buffers:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: klaude-code
3
- Version: 2.10.1
3
+ Version: 2.10.3
4
4
  Summary: Minimal code agent CLI
5
5
  Requires-Dist: anthropic>=0.66.0
6
6
  Requires-Dist: chardet>=5.2.0
@@ -30,7 +30,7 @@ klaude_code/cli/main.py,sha256=bQijkLDTYtjFUdU1t-dGCzOilo7mcotIQefleaF9or8,13072
30
30
  klaude_code/cli/self_update.py,sha256=1xdG9ifvRZQDSx6RAtSSgXmw9hZNXMLvqC2zu4bS-GY,2622
31
31
  klaude_code/config/__init__.py,sha256=Qe1BeMekBfO2-Zd30x33lB70hdM1QQZGrp4DbWSQ-II,353
32
32
  klaude_code/config/assets/__init__.py,sha256=uMUfmXT3I-gYiI-HVr1DrE60mx5cY1o8V7SYuGqOmvY,32
33
- klaude_code/config/assets/builtin_config.yaml,sha256=EYGgBMAVtgUKGHZcfkm4sCfSB1-fBx83q10MGVnNz8I,9121
33
+ klaude_code/config/assets/builtin_config.yaml,sha256=5S5dEox3cr5bLWk6N3z6njIP5RLe-sHE2vAvkKPBCqo,9112
34
34
  klaude_code/config/builtin_config.py,sha256=OG5VERUHo3tSojgFXfNDV6pAHNOh3kO-xFHpvTr-cpc,1786
35
35
  klaude_code/config/config.py,sha256=otBvsUkvI-2fpZzx9fO6SPnCek7FI7kufvAnGIQqTz8,27084
36
36
  klaude_code/config/model_matcher.py,sha256=3IlLU5h3NDh_bURbCW-PV027C3irG3hyitwj1cj99Ig,6179
@@ -39,7 +39,7 @@ klaude_code/config/thinking.py,sha256=5uVM0cFUJZBBsBGGdPG-tjdiNwZ-GFeWOBBWIdSPFv
39
39
  klaude_code/const.py,sha256=VCK3HgZJZO6jcYz6U2rcHS7W-n4oyKYg9AC6eTB4HIQ,11575
40
40
  klaude_code/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  klaude_code/core/agent.py,sha256=GrIg22nfoq1c90UHyEfU_bh46vtXTCo4bLezb-3mGNo,4120
42
- klaude_code/core/agent_profile.py,sha256=8fEiRI2emEFVlwsCEtNaZ90UUp1A7MKhzOnzQK1IOGI,11507
42
+ klaude_code/core/agent_profile.py,sha256=wfde6zMez5abh5O04e7BzBlbLhHwd23pn4FBijOHxQg,11231
43
43
  klaude_code/core/bash_mode.py,sha256=BUy2_uOcJ8bO89t_QCwtw6GlHt7vd1t62sN5iWk-UuQ,9250
44
44
  klaude_code/core/compaction/AGENTS.md,sha256=KZR5lxe4jVAbT5K9PxbZcHWI1UwsppbGmxIfCdHYr7Q,3684
45
45
  klaude_code/core/compaction/__init__.py,sha256=CvidYx3sX0IZAa4pifX9jrQSkg4Nib7PKrcaOHswF60,329
@@ -111,7 +111,7 @@ klaude_code/core/turn.py,sha256=TK4-ZGmRo_qa21XRklGfEMnfhFGR20wnKjnvlf31HBQ,1888
111
111
  klaude_code/llm/__init__.py,sha256=b4AsqnrMIs0a5qR_ti6rZcHwFzAReTwOW96EqozEoSo,287
112
112
  klaude_code/llm/anthropic/__init__.py,sha256=PWETvaeNAAX3ue0ww1uRUIxTJG0RpWiutkn7MlwKxBs,67
113
113
  klaude_code/llm/anthropic/client.py,sha256=RpYw4UQnhLzAsp6i-FU7cDW4deqngdAoQaTPGnCeO5U,17346
114
- klaude_code/llm/anthropic/input.py,sha256=ObcRuQEwdiFoI7bjBzHv5bClgIYEw23bJMgHudT92C4,9923
114
+ klaude_code/llm/anthropic/input.py,sha256=_ng3WtRWbyhmMuFDNCnJoEi-k-tEOy9ukGbzvvgZ3vQ,10819
115
115
  klaude_code/llm/antigravity/__init__.py,sha256=TuK_k4mJpBQVBCfhRFQvVLeGtHRU8_2wXO2lRC-OB9o,71
116
116
  klaude_code/llm/antigravity/client.py,sha256=XnOBw7QPNA8NRrZIdNrOD1qLrvWLyI82PL7kIolgdic,20120
117
117
  klaude_code/llm/antigravity/input.py,sha256=WblkpS4DxnqHLb9oXGS3uDlgbzdP4GKWmC9L2HEtRwk,9463
@@ -127,8 +127,7 @@ klaude_code/llm/image.py,sha256=TJzduNtSmoJLizTUDo0EjbovqOeaXkvwnRyMfuzVVUQ,4512
127
127
  klaude_code/llm/input_common.py,sha256=htV8YHDlQ7fx2qW_D9e_BgxWlyBmZwAtNxnM15uj9gQ,7563
128
128
  klaude_code/llm/json_stable.py,sha256=FrgJbJ33YrAkzl5iPOWLolFjYFZMuT97sbOegeXN7GE,1288
129
129
  klaude_code/llm/openai_codex/__init__.py,sha256=Dx2MxrZEHQZ8gsJmM05dE4qqak4F0zjwfXd5v8pIXaE,139
130
- klaude_code/llm/openai_codex/client.py,sha256=bpTKtp3Qum-c6OBQAZHJ9LKSzgToHOig5M03568aX6I,6248
131
- klaude_code/llm/openai_codex/prompt_sync.py,sha256=u9RBuVpfRJGB5cdpI_OJkJVSL33s4pZNxbiCIm1l70Y,8767
130
+ klaude_code/llm/openai_codex/client.py,sha256=wIOfCsB1F_b9D6ymq-nyCIXAuuBTz6-CMUxD8NiuvkQ,5403
132
131
  klaude_code/llm/openai_compatible/__init__.py,sha256=ACGpnki7k53mMcCl591aw99pm9jZOZk0ghr7atOfNps,81
133
132
  klaude_code/llm/openai_compatible/client.py,sha256=oiRmqiqNJ-clLk6pK1LTOAyx1svmNBg1sNuT8uN3l5g,4717
134
133
  klaude_code/llm/openai_compatible/input.py,sha256=UfEu2c866QuVMFTY3GEPhuKV7QcCgxBwNYqGT88E_8E,2834
@@ -137,8 +136,8 @@ klaude_code/llm/openai_responses/__init__.py,sha256=WsiyvnNiIytaYcaAqNiB8GI-5zcp
137
136
  klaude_code/llm/openai_responses/client.py,sha256=qmQNcHZUBzvjQZZQkWh2f3oUYC9Lbx_dZD7KwnXbPPI,16362
138
137
  klaude_code/llm/openai_responses/input.py,sha256=eEeSHlGYY5VYGpj2DIVm8qjH2cXo-JaODT_SjFtS6Nw,9264
139
138
  klaude_code/llm/openrouter/__init__.py,sha256=_As8lHjwj6vapQhLorZttTpukk5ZiCdhFdGT38_ASPo,69
140
- klaude_code/llm/openrouter/client.py,sha256=P_mDQK4_i1MLF0jK4p_bKhh15ACXgw6Ie0rUrtbfsdM,5738
141
- klaude_code/llm/openrouter/input.py,sha256=Z_Cf6TnMZ5KQNJ0E5IIDCKK2OWlzi8IW0S5A72BBGT0,6176
139
+ klaude_code/llm/openrouter/client.py,sha256=pBQG3JIftEyDpWNo_rT7cDaTAmeUxTktyRRTcZNq9Hg,5858
140
+ klaude_code/llm/openrouter/input.py,sha256=VqKPWCUb-BlFwNs1uldDD_dGPJzwskG301uULaqm958,6363
142
141
  klaude_code/llm/openrouter/reasoning.py,sha256=u7ccfnGxJ4Ws8P3X5FW91d8HXie29JjeWz0hZ1r0oFg,3320
143
142
  klaude_code/llm/partial_message.py,sha256=-sjlpV-et4ViBtBpdtihK5QBjAlwS47-mBpVbRQPP3s,142
144
143
  klaude_code/llm/registry.py,sha256=oYObtCgWQxL2eFfmvpYLqMhKvYD3JoszzpvuC08sRQQ,2177
@@ -204,35 +203,35 @@ klaude_code/tui/components/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
204
203
  klaude_code/tui/components/bash_syntax.py,sha256=a8JMn6rFlJXcx7-WbsxIVwqtWQoI9krLUkj0imVC20I,7632
205
204
  klaude_code/tui/components/command_output.py,sha256=SnNiWdMAK37wd6PtfSWsLW2UutNfkpTKDbFmw2wu3LA,3688
206
205
  klaude_code/tui/components/common.py,sha256=dhUYLVVOSKxg5GCoS4eyUeKZ3E8Kpt4nqft4njuvPaI,4698
207
- klaude_code/tui/components/developer.py,sha256=m6gcnLyLoSSw3wpwYQZ_yzaT9zgXRgNzTpjZt8Yo21o,5488
206
+ klaude_code/tui/components/developer.py,sha256=jJPx3F6T8pBc1ubgJaAeOE5dpapBi8jfXDZA_PZdQxg,5487
208
207
  klaude_code/tui/components/diffs.py,sha256=vwllnYBxC5xGjfKU3uIkCjcupr9nrjRjvvj0tg0_MQA,3085
209
- klaude_code/tui/components/errors.py,sha256=fSojNfRceB6eE7cyJHfwGt5Ru0OYp63fCJ-W6-3SSYs,799
208
+ klaude_code/tui/components/errors.py,sha256=uqxVDqZLV6-Gvk46JWPFy2dc_bXHxgg_FEwrUX71jyg,733
210
209
  klaude_code/tui/components/mermaid_viewer.py,sha256=zI1FBuX6Ionx38KqkzhOIQ9tFzd7REPbjW1iqSiNrec,3086
211
- klaude_code/tui/components/metadata.py,sha256=JzHMi3fr_YWQmachSRukBTYO3iHvPFAXaA3f2SRyD54,7286
210
+ klaude_code/tui/components/metadata.py,sha256=bkN2WOHFkNSEh5Ajup_976WvcU83lR6lNG03MaH9YYg,6944
212
211
  klaude_code/tui/components/rich/__init__.py,sha256=zEZjnHR3Fnv_sFMxwIMjoJfwDoC4GRGv3lHJzAGRq_o,236
213
212
  klaude_code/tui/components/rich/cjk_wrap.py,sha256=eMqBxftUtll7zrytUb9WtJ6naYLyax0W4KJRpGwWulM,7602
214
213
  klaude_code/tui/components/rich/code_panel.py,sha256=SOdyfHBZNB4gAWIbnN_enhHB1EWxw8Hxiafx6yjwdJo,5544
215
214
  klaude_code/tui/components/rich/live.py,sha256=xiMT6dPsxM_jaazddKrV9CMJQWwpe2t9OdjffHvo1JU,2821
216
- klaude_code/tui/components/rich/markdown.py,sha256=vas08cxz8fEU2PlTUNrgmF-zIXtFoMxP-Jefc8FSGuE,25276
215
+ klaude_code/tui/components/rich/markdown.py,sha256=8BXTyR0UMQ6Ut62HMeFPTB_8QLAvYDtk9SbGZ6H1hkY,25518
217
216
  klaude_code/tui/components/rich/quote.py,sha256=u6sBmGdp0ckaZLw_XgJk7iHW4zxnWikUaB3GX2tkhlM,5375
218
217
  klaude_code/tui/components/rich/status.py,sha256=hSvMwEguF2DfHH3ISR0bmDg58zAnM3CTJLcRff_rtrg,14791
219
- klaude_code/tui/components/rich/theme.py,sha256=cp7PrhxIgOLu_AjHBOf3wCZewzngOAZDFQQmiO0sAbY,17084
218
+ klaude_code/tui/components/rich/theme.py,sha256=eg9EIcjp2qysQi1s_D2r7QYtclOe3CMXJjf_bwTXAGM,17078
220
219
  klaude_code/tui/components/sub_agent.py,sha256=8XTWsTi9mfbNLMD8SZ__nZQmBf81rW-NWpuOT-sFbv8,4723
221
220
  klaude_code/tui/components/thinking.py,sha256=yVzY7BbcdDwB4XKi9i2sp3cKREirvmlMiEID74b-5_0,955
222
221
  klaude_code/tui/components/tools.py,sha256=MqW-_a4gtMnCR5N8APJYtN2TfSvuUdvo2WFEGm3Qa9A,27327
223
- klaude_code/tui/components/user_input.py,sha256=fYvdoc3gMyv90l2sQvSs4ha0wILu71rBAcwbzQPMMVc,3659
222
+ klaude_code/tui/components/user_input.py,sha256=u3i6y3aKvUv7at39eeT2mEi0cgOsrha91FH-lUsUetw,3659
224
223
  klaude_code/tui/components/welcome.py,sha256=Ahkhg0dsSqy17pKLOp_5UZWn9vysr68T3Y-jB40yWsA,5303
225
224
  klaude_code/tui/display.py,sha256=VbBmNjGxp6R4oLjC0FrjXkr0INWSjfXlWq1mSVFGyMU,3933
226
225
  klaude_code/tui/input/AGENTS.md,sha256=2RBLz7H0JbUJv6OBzeadLOlGUF5EBqvtwTGBf6nZuN0,1633
227
226
  klaude_code/tui/input/__init__.py,sha256=wLbjqBrvP6fmbGtbKe9Wp12yxhse0faVLOxtoWua_1E,353
228
- klaude_code/tui/input/completers.py,sha256=MJO1nBq0V5jDbGw_o4Ab5WLVD1ns5plJRI3cIYnGfHs,33154
227
+ klaude_code/tui/input/completers.py,sha256=vs2Lq0_PIO1FszV968gJobeqptM5Jz4YCv-MpPqHakE,34559
229
228
  klaude_code/tui/input/drag_drop.py,sha256=oyKtrHCyUiGiMLEXpsDTnTnAKJ1_xrvVkrASOiG8O4g,3974
230
- klaude_code/tui/input/images.py,sha256=ft2AaOg1Figdm1t_NNoBCGdp20silYXGw-m9XKDd9GU,6996
231
- klaude_code/tui/input/key_bindings.py,sha256=Gpc-VhQh-h431cnetheV0HF9QGcFyy82QrHWUAYF-LA,26633
229
+ klaude_code/tui/input/images.py,sha256=dfM_BvlQE6F8BYEr2YBNTsWKd57_Jx-2wgFtv5ZSXSk,11202
230
+ klaude_code/tui/input/key_bindings.py,sha256=6oxWdTdklFqLDOk6ZpyKijgrnQO4B9hSqgMfT2kdjXs,30032
232
231
  klaude_code/tui/input/paste.py,sha256=kELg5jC0WdBXWHJUsEjIhZ67KCvHMbN1XzyGmevVSNM,1888
233
- klaude_code/tui/input/prompt_toolkit.py,sha256=aE1pVK1G96DWERc2APIXq4WmSPOdJDRHTgnyyZXp_u4,30663
234
- klaude_code/tui/machine.py,sha256=iRTBNx22eozmT9gyya9QbpDDcmqst07bUAa2vWMDZAA,32482
235
- klaude_code/tui/renderer.py,sha256=E9SSuxWlQDMA2_4ENZHpwfB54ZVpkFMJYPe_WQSzq0E,34652
232
+ klaude_code/tui/input/prompt_toolkit.py,sha256=gohHzvAWp7Wh06-VjxTZpPojPq_0lVW-KeoTUPRsgSQ,30660
233
+ klaude_code/tui/machine.py,sha256=STwhoBw_HwpkDHDts7CrwtUN211OYaySQ0jy8r7fa08,32579
234
+ klaude_code/tui/renderer.py,sha256=b59_Pr17seKOcQlHDEH4CMWKNWzdpMzmUhqUDqLvQyU,34884
236
235
  klaude_code/tui/runner.py,sha256=ZADAH28Iu1DU-KDCggEyvJiM_LTbN1sjPEaQsuBNTbc,13111
237
236
  klaude_code/tui/terminal/__init__.py,sha256=GIMnsEcIAGT_vBHvTlWEdyNmAEpruyscUA6M_j3GQZU,1412
238
237
  klaude_code/tui/terminal/color.py,sha256=6SJR2RA8cqJINNoRz65w0HL3x9g46ydIvDOGWMeNnQU,7195
@@ -250,7 +249,7 @@ klaude_code/ui/debug_mode.py,sha256=ZvqbOx4c_rUerMbEZzOfcbNf9leqEDFjqJUlALtzF9Y,
250
249
  klaude_code/ui/terminal/__init__.py,sha256=5OeAzr994r8-peWsLON0iXsAvJ2pexwMp36JY7FKGDc,179
251
250
  klaude_code/ui/terminal/title.py,sha256=lCk1dKk7fIe5Fb-FRU9P4ktVEfBmT3ac3wICYmC4mGE,1229
252
251
  klaude_code/update.py,sha256=QER816AZe9u3RhRvP0Z37Jh2Ch5RLy9PREyDsI0e1dA,4480
253
- klaude_code-2.10.1.dist-info/WHEEL,sha256=eh7sammvW2TypMMMGKgsM83HyA_3qQ5Lgg3ynoecH3M,79
254
- klaude_code-2.10.1.dist-info/entry_points.txt,sha256=kkXIXedaTOtjXPr2rVjRVVXZYlFUcBHELaqmyVlWUFA,92
255
- klaude_code-2.10.1.dist-info/METADATA,sha256=zM4Cjq5SylCPMJsKpmsr0b0eIknIojYMQPSE3ypB_6U,10120
256
- klaude_code-2.10.1.dist-info/RECORD,,
252
+ klaude_code-2.10.3.dist-info/WHEEL,sha256=eh7sammvW2TypMMMGKgsM83HyA_3qQ5Lgg3ynoecH3M,79
253
+ klaude_code-2.10.3.dist-info/entry_points.txt,sha256=kkXIXedaTOtjXPr2rVjRVVXZYlFUcBHELaqmyVlWUFA,92
254
+ klaude_code-2.10.3.dist-info/METADATA,sha256=p1r3t9GoNu-eBxdkTC3cKWiS7kAbXF2BXyD1ALViTm0,10120
255
+ klaude_code-2.10.3.dist-info/RECORD,,
@@ -1,237 +0,0 @@
1
- """Dynamic prompt synchronization from OpenAI Codex GitHub repository."""
2
-
3
- import json
4
- import time
5
- from functools import cache
6
- from importlib.resources import files
7
- from pathlib import Path
8
- from typing import Any, Literal
9
-
10
- import httpx
11
-
12
- from klaude_code.log import DebugType, log_debug
13
-
14
- GITHUB_API_RELEASES = "https://api.github.com/repos/openai/codex/releases/latest"
15
- GITHUB_HTML_RELEASES = "https://github.com/openai/codex/releases/latest"
16
- GITHUB_RAW_BASE = "https://raw.githubusercontent.com/openai/codex"
17
-
18
- CACHE_DIR = Path.home() / ".klaude" / "codex-prompts"
19
- CACHE_TTL_SECONDS = 24 * 60 * 60 # 24 hours
20
-
21
- type ModelFamily = Literal["gpt-5.2-codex", "codex-max", "codex", "gpt-5.2", "gpt-5.1"]
22
-
23
- PROMPT_FILES: dict[ModelFamily, str] = {
24
- "gpt-5.2-codex": "gpt-5.2-codex_prompt.md",
25
- "codex-max": "gpt-5.1-codex-max_prompt.md",
26
- "codex": "gpt_5_codex_prompt.md",
27
- "gpt-5.2": "gpt_5_2_prompt.md",
28
- "gpt-5.1": "gpt_5_1_prompt.md",
29
- }
30
-
31
- CACHE_FILES: dict[ModelFamily, str] = {
32
- "gpt-5.2-codex": "gpt-5.2-codex-instructions.md",
33
- "codex-max": "codex-max-instructions.md",
34
- "codex": "codex-instructions.md",
35
- "gpt-5.2": "gpt-5.2-instructions.md",
36
- "gpt-5.1": "gpt-5.1-instructions.md",
37
- }
38
-
39
-
40
- @cache
41
- def _load_bundled_prompt(prompt_path: str) -> str:
42
- """Load bundled prompt from package resources."""
43
- return files("klaude_code.core").joinpath(prompt_path).read_text(encoding="utf-8").strip()
44
-
45
-
46
- class CacheMetadata:
47
- def __init__(self, etag: str | None, tag: str, last_checked: int, url: str):
48
- self.etag = etag
49
- self.tag = tag
50
- self.last_checked = last_checked
51
- self.url = url
52
-
53
- def to_dict(self) -> dict[str, str | int | None]:
54
- return {
55
- "etag": self.etag,
56
- "tag": self.tag,
57
- "last_checked": self.last_checked,
58
- "url": self.url,
59
- }
60
-
61
- @classmethod
62
- def from_dict(cls, data: dict[str, object]) -> "CacheMetadata":
63
- etag = data.get("etag")
64
- last_checked = data.get("last_checked")
65
- return cls(
66
- etag=etag if isinstance(etag, str) else None,
67
- tag=str(data.get("tag", "")),
68
- last_checked=int(last_checked) if isinstance(last_checked, int | float) else 0,
69
- url=str(data.get("url", "")),
70
- )
71
-
72
-
73
- def get_model_family(model: str) -> ModelFamily:
74
- """Determine model family from model name."""
75
- if "gpt-5.2-codex" in model or "gpt 5.2 codex" in model:
76
- return "gpt-5.2-codex"
77
- if "codex-max" in model:
78
- return "codex-max"
79
- if "codex" in model or model.startswith("codex-"):
80
- return "codex"
81
- if "gpt-5.2" in model:
82
- return "gpt-5.2"
83
- return "gpt-5.1"
84
-
85
-
86
- def _get_latest_release_tag(client: httpx.Client) -> str:
87
- """Get latest release tag from GitHub."""
88
- try:
89
- response = client.get(GITHUB_API_RELEASES)
90
- if response.status_code == 200:
91
- data: dict[str, Any] = response.json()
92
- tag_name: Any = data.get("tag_name")
93
- if isinstance(tag_name, str):
94
- return tag_name
95
- except httpx.HTTPError:
96
- pass
97
-
98
- # Fallback: follow redirect from releases/latest
99
- response = client.get(GITHUB_HTML_RELEASES, follow_redirects=True)
100
- if response.status_code == 200:
101
- final_url = str(response.url)
102
- if "/tag/" in final_url:
103
- parts = final_url.split("/tag/")
104
- if len(parts) > 1 and "/" not in parts[-1]:
105
- return parts[-1]
106
-
107
- raise RuntimeError("Failed to determine latest release tag from GitHub")
108
-
109
-
110
- def _load_cache_metadata(meta_file: Path) -> CacheMetadata | None:
111
- if not meta_file.exists():
112
- return None
113
- try:
114
- data = json.loads(meta_file.read_text())
115
- return CacheMetadata.from_dict(data)
116
- except (json.JSONDecodeError, ValueError):
117
- return None
118
-
119
-
120
- def _save_cache_metadata(meta_file: Path, metadata: CacheMetadata) -> None:
121
- meta_file.parent.mkdir(parents=True, exist_ok=True)
122
- meta_file.write_text(json.dumps(metadata.to_dict(), indent=2))
123
-
124
-
125
- def get_codex_instructions(model: str = "gpt-5.1-codex", force_refresh: bool = False) -> str:
126
- """Get Codex instructions for the given model.
127
-
128
- Args:
129
- model: Model name to get instructions for.
130
- force_refresh: If True, bypass cache TTL and fetch fresh instructions.
131
-
132
- Returns:
133
- The Codex system prompt instructions.
134
- """
135
- model_family = get_model_family(model)
136
- prompt_file = PROMPT_FILES[model_family]
137
- cache_file = CACHE_DIR / CACHE_FILES[model_family]
138
- meta_file = CACHE_DIR / f"{CACHE_FILES[model_family].replace('.md', '-meta.json')}"
139
-
140
- # Check cache unless force refresh
141
- if not force_refresh:
142
- metadata = _load_cache_metadata(meta_file)
143
- if metadata and cache_file.exists():
144
- age = int(time.time()) - metadata.last_checked
145
- if age < CACHE_TTL_SECONDS:
146
- log_debug(f"Using cached {model_family} instructions (age: {age}s)", debug_type=DebugType.GENERAL)
147
- return cache_file.read_text()
148
-
149
- try:
150
- with httpx.Client(timeout=30.0) as client:
151
- latest_tag = _get_latest_release_tag(client)
152
- instructions_url = f"{GITHUB_RAW_BASE}/{latest_tag}/codex-rs/core/{prompt_file}"
153
-
154
- # Load existing metadata for conditional request
155
- metadata = _load_cache_metadata(meta_file)
156
- headers: dict[str, str] = {}
157
-
158
- # Only use ETag if tag matches (different release = different content)
159
- if metadata and metadata.tag == latest_tag and metadata.etag:
160
- headers["If-None-Match"] = metadata.etag
161
-
162
- response = client.get(instructions_url, headers=headers)
163
-
164
- if response.status_code == 304 and cache_file.exists():
165
- # Not modified, update last_checked and return cached
166
- if metadata:
167
- metadata.last_checked = int(time.time())
168
- _save_cache_metadata(meta_file, metadata)
169
- log_debug(f"Codex {model_family} instructions not modified", debug_type=DebugType.GENERAL)
170
- return cache_file.read_text()
171
-
172
- if response.status_code == 200:
173
- instructions = response.text
174
- new_etag = response.headers.get("etag")
175
-
176
- # Save to cache
177
- cache_file.parent.mkdir(parents=True, exist_ok=True)
178
- cache_file.write_text(instructions)
179
- _save_cache_metadata(
180
- meta_file,
181
- CacheMetadata(
182
- etag=new_etag,
183
- tag=latest_tag,
184
- last_checked=int(time.time()),
185
- url=instructions_url,
186
- ),
187
- )
188
-
189
- log_debug(f"Updated {model_family} instructions from GitHub", debug_type=DebugType.GENERAL)
190
- return instructions
191
-
192
- raise RuntimeError(f"HTTP {response.status_code}")
193
-
194
- except Exception as e:
195
- log_debug(f"Failed to fetch {model_family} instructions: {e}", debug_type=DebugType.GENERAL)
196
-
197
- # Fallback to cached version
198
- if cache_file.exists():
199
- log_debug(f"Using cached {model_family} instructions (fallback)", debug_type=DebugType.GENERAL)
200
- return cache_file.read_text()
201
-
202
- # Last resort: use bundled prompt
203
- bundled_path = _get_bundled_prompt_path(model_family)
204
- if bundled_path:
205
- log_debug(f"Using bundled {model_family} instructions (fallback)", debug_type=DebugType.GENERAL)
206
- return _load_bundled_prompt(bundled_path)
207
-
208
- raise RuntimeError(f"No Codex instructions available for {model_family}") from e
209
-
210
-
211
- def _get_bundled_prompt_path(model_family: ModelFamily) -> str | None:
212
- """Get bundled prompt path for model family."""
213
- if model_family == "gpt-5.2-codex":
214
- return "prompts/prompt-codex-gpt-5-2-codex.md"
215
- if model_family == "gpt-5.2":
216
- return "prompts/prompt-codex-gpt-5-2.md"
217
- if model_family in ("codex", "codex-max", "gpt-5.1"):
218
- return "prompts/prompt-codex.md"
219
- return None
220
-
221
-
222
- def invalidate_cache(model: str | None = None) -> None:
223
- """Invalidate cached instructions to force refresh on next access.
224
-
225
- Args:
226
- model: If provided, only invalidate cache for this model's family.
227
- If None, invalidate all cached instructions.
228
- """
229
- if model:
230
- model_family = get_model_family(model)
231
- meta_file = CACHE_DIR / f"{CACHE_FILES[model_family].replace('.md', '-meta.json')}"
232
- if meta_file.exists():
233
- meta_file.unlink()
234
- else:
235
- if CACHE_DIR.exists():
236
- for meta_file in CACHE_DIR.glob("*-meta.json"):
237
- meta_file.unlink()