klaude-code 2.10.0__py3-none-any.whl → 2.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,6 +12,7 @@ from klaude_code.core.agent import Agent
12
12
  from klaude_code.core.agent_profile import (
13
13
  DefaultModelProfileProvider,
14
14
  VanillaModelProfileProvider,
15
+ WebModelProfileProvider,
15
16
  )
16
17
  from klaude_code.core.executor import Executor
17
18
  from klaude_code.core.manager import build_llm_clients
@@ -27,6 +28,7 @@ class AppInitConfig:
27
28
  model: str | None
28
29
  debug: bool
29
30
  vanilla: bool
31
+ web: bool = False
30
32
  debug_filters: set[DebugType] | None = None
31
33
 
32
34
 
@@ -74,6 +76,8 @@ async def initialize_app_components(
74
76
 
75
77
  if init_config.vanilla:
76
78
  model_profile_provider = VanillaModelProfileProvider()
79
+ elif init_config.web:
80
+ model_profile_provider = WebModelProfileProvider(config=config)
77
81
  else:
78
82
  model_profile_provider = DefaultModelProfileProvider(config=config)
79
83
 
klaude_code/cli/main.py CHANGED
@@ -200,6 +200,11 @@ def main_callback(
200
200
  help="Image generation mode (alias for --model banana)",
201
201
  rich_help_panel="LLM",
202
202
  ),
203
+ web: bool = typer.Option(
204
+ False,
205
+ "--web",
206
+ help="Enable web tools (WebFetch, WebSearch) for the main agent",
207
+ ),
203
208
  version: bool = typer.Option(
204
209
  False,
205
210
  "--version",
@@ -218,6 +223,10 @@ def main_callback(
218
223
  log(("Error: --banana cannot be combined with --vanilla", "red"))
219
224
  raise typer.Exit(2)
220
225
 
226
+ if vanilla and web:
227
+ log(("Error: --web cannot be combined with --vanilla", "red"))
228
+ raise typer.Exit(2)
229
+
221
230
  resume_by_id_value = resume_by_id.strip() if resume_by_id is not None else None
222
231
  if resume_by_id_value == "":
223
232
  log(("Error: --resume <id> cannot be empty", "red"))
@@ -347,6 +356,7 @@ def main_callback(
347
356
  model=chosen_model,
348
357
  debug=debug_enabled,
349
358
  vanilla=vanilla,
359
+ web=web,
350
360
  debug_filters=debug_filters,
351
361
  )
352
362
 
@@ -57,22 +57,14 @@ provider_list:
57
57
  reasoning_summary: concise
58
58
  cost: {input: 1.75, output: 14, cache_read: 0.17}
59
59
 
60
- - model_name: gpt-5.2-fast
61
- model_id: gpt-5.2
62
- context_limit: 400000
63
- verbosity: low
60
+ - model_name: gpt-5.2-codex
61
+ model_id: gpt-5.2-codex
64
62
  thinking:
65
- reasoning_effort: none
66
- cost: {input: 1.75, output: 14, cache_read: 0.17}
67
-
68
- - model_name: gpt-5.1-codex-max
69
- model_id: gpt-5.1-codex-max
70
- max_tokens: 128000
63
+ reasoning_effort: high
64
+ reasoning_summary: auto
71
65
  context_limit: 400000
72
- thinking:
73
- reasoning_effort: medium
74
- reasoning_summary: concise
75
- cost: {input: 1.25, output: 10, cache_read: 0.13}
66
+ max_tokens: 128000
67
+ cost: {input: 1.75, output: 14, cache_read: 0.17}
76
68
 
77
69
 
78
70
  - provider_name: openrouter
@@ -80,6 +72,15 @@ provider_list:
80
72
  api_key: ${OPENROUTER_API_KEY}
81
73
  model_list:
82
74
 
75
+ - model_name: gpt-5.2-codex
76
+ model_id: gpt-5.2-codex
77
+ thinking:
78
+ reasoning_effort: high
79
+ reasoning_summary: auto
80
+ context_limit: 400000
81
+ max_tokens: 128000
82
+ cost: {input: 1.75, output: 14, cache_read: 0.17}
83
+
83
84
  - model_name: gpt-5.2-high
84
85
  model_id: openai/gpt-5.2
85
86
  max_tokens: 128000
@@ -130,12 +130,6 @@ def load_system_prompt(
130
130
  ) -> str:
131
131
  """Get system prompt content for the given model and sub-agent type."""
132
132
 
133
- # For codex_oauth protocol, use dynamic prompts from GitHub (no additions).
134
- if protocol == llm_param.LLMClientProtocol.CODEX_OAUTH:
135
- from klaude_code.llm.openai_codex.prompt_sync import get_codex_instructions
136
-
137
- return get_codex_instructions(model_name)
138
-
139
133
  # For antigravity protocol, use exact prompt without any additions.
140
134
  if protocol == llm_param.LLMClientProtocol.ANTIGRAVITY:
141
135
  return _load_prompt_by_path(ANTIGRAVITY_PROMPT_PATH)
@@ -305,3 +299,26 @@ class VanillaModelProfileProvider(ModelProfileProvider):
305
299
  if output_schema:
306
300
  return with_structured_output(profile, output_schema)
307
301
  return profile
302
+
303
+
304
+ class WebModelProfileProvider(DefaultModelProfileProvider):
305
+ """Provider that adds web tools to the main agent."""
306
+
307
+ def build_profile(
308
+ self,
309
+ llm_client: LLMClientABC,
310
+ sub_agent_type: tools.SubAgentType | None = None,
311
+ *,
312
+ output_schema: dict[str, Any] | None = None,
313
+ ) -> AgentProfile:
314
+ profile = super().build_profile(llm_client, sub_agent_type, output_schema=output_schema)
315
+ # Only add web tools for main agent (not sub-agents)
316
+ if sub_agent_type is None:
317
+ web_tools = get_tool_schemas([tools.WEB_FETCH, tools.WEB_SEARCH])
318
+ return AgentProfile(
319
+ llm_client=profile.llm_client,
320
+ system_prompt=profile.system_prompt,
321
+ tools=[*profile.tools, *web_tools],
322
+ reminders=profile.reminders,
323
+ )
324
+ return profile
@@ -17,7 +17,7 @@ You are a web research subagent that searches and fetches web content to provide
17
17
  - HTML pages are automatically converted to Markdown
18
18
  - JSON responses are auto-formatted with indentation
19
19
  - Other text content returned as-is
20
- - **Content is always saved to a local file** - path shown in `[Web content saved to ...]` at output start
20
+ - **Content is always saved to a local file** - path shown in `[Full content saved to ...]` at output start
21
21
 
22
22
  ## Tool Usage Strategy
23
23
 
@@ -54,7 +54,7 @@ Balance efficiency with thoroughness. For open-ended questions (e.g., "recommend
54
54
  ## Response Guidelines
55
55
 
56
56
  - Only your last message is returned to the main agent
57
- - Include the file path from `[Web content saved to ...]` so the main agent can access full content
57
+ - Include the file path from `[Full content saved to ...]` so the main agent can access full content
58
58
  - **DO NOT copy full web page content** - the main agent can read the saved files directly
59
59
  - Provide a concise summary/analysis of key findings
60
60
  - Lead with the most recent info for evolving topics
@@ -227,11 +227,11 @@ class HeadTailOffloadStrategy(OffloadStrategy):
227
227
  if self._should_offload(needs_truncation):
228
228
  offloaded_path = self._save_to_file(output, tool_call)
229
229
 
230
- # Prefer line-based truncation if line limit exceeded
231
- if needs_line_truncation:
232
- truncated_output, hidden = self._truncate_by_lines(output, lines, offloaded_path)
233
- else:
230
+ # Prefer char-based truncation if char limit exceeded (stricter limit)
231
+ if needs_char_truncation:
234
232
  truncated_output, hidden = self._truncate_by_chars(output, offloaded_path)
233
+ else:
234
+ truncated_output, hidden = self._truncate_by_lines(output, lines, offloaded_path)
235
235
 
236
236
  return OffloadResult(
237
237
  output=truncated_output,
@@ -1,8 +1,9 @@
1
1
  Fetch content from a URL and return it in a readable format.
2
2
 
3
3
  The tool automatically processes the response based on Content-Type:
4
+
4
5
  - HTML pages are converted to Markdown for easier reading
5
6
  - JSON responses are formatted with indentation
6
7
  - Markdown and other text content is returned as-is
7
8
 
8
- Content is always saved to a local file. The file path is shown at the start of the output in `[Web content saved to ...]` format. For large content that gets truncated, you can read the saved file directly.
9
+ Content is always saved to a local file. The file path is shown at the start of the output in `[Full content saved to ...]` format. For large content that gets truncated, you can read the saved file directly.
@@ -235,7 +235,7 @@ class WebFetchTool(ToolABC):
235
235
  text = _decode_content(data, charset)
236
236
  processed = _process_content(content_type, text)
237
237
  saved_path = _save_text_content(url, processed)
238
- output = f"[Web content saved to {saved_path}]\n\n{processed}" if saved_path else processed
238
+ output = f"[Full content saved to {saved_path}]\n\n{processed}" if saved_path else processed
239
239
 
240
240
  return message.ToolResultMessage(
241
241
  status="success",
klaude_code/core/turn.py CHANGED
@@ -196,8 +196,7 @@ class TurnExecutor:
196
196
  ):
197
197
  # Discard partial message if it only contains thinking parts
198
198
  has_non_thinking = any(
199
- not isinstance(part, message.ThinkingTextPart)
200
- for part in self._turn_result.assistant_message.parts
199
+ not isinstance(part, message.ThinkingTextPart) for part in self._turn_result.assistant_message.parts
201
200
  )
202
201
  if has_non_thinking:
203
202
  session_ctx.append_history([self._turn_result.assistant_message])
@@ -146,28 +146,6 @@ class CodexClient(LLMClientABC):
146
146
  )
147
147
  except (openai.OpenAIError, httpx.HTTPError) as e:
148
148
  error_message = f"{e.__class__.__name__} {e!s}"
149
-
150
- # Check for invalid instruction error and invalidate prompt cache
151
- if _is_invalid_instruction_error(e) and param.model_id:
152
- _invalidate_prompt_cache_for_model(param.model_id)
153
-
154
149
  return error_llm_stream(metadata_tracker, error=error_message)
155
150
 
156
151
  return ResponsesLLMStream(stream, param=param, metadata_tracker=metadata_tracker)
157
-
158
-
159
- def _is_invalid_instruction_error(e: Exception) -> bool:
160
- """Check if the error is related to invalid instructions."""
161
- error_str = str(e).lower()
162
- return "invalid instruction" in error_str or "invalid_instruction" in error_str
163
-
164
-
165
- def _invalidate_prompt_cache_for_model(model_id: str) -> None:
166
- """Invalidate the cached prompt for a model to force refresh."""
167
- from klaude_code.llm.openai_codex.prompt_sync import invalidate_cache
168
-
169
- log_debug(
170
- f"Invalidating prompt cache for model {model_id} due to invalid instruction error",
171
- debug_type=DebugType.GENERAL,
172
- )
173
- invalidate_cache(model_id)
@@ -17,7 +17,7 @@ from klaude_code.llm.client import LLMClientABC, LLMStreamABC
17
17
  from klaude_code.llm.input_common import apply_config_defaults
18
18
  from klaude_code.llm.openai_compatible.input import convert_tool_schema
19
19
  from klaude_code.llm.openai_compatible.stream import OpenAILLMStream
20
- from klaude_code.llm.openrouter.input import convert_history_to_input, is_claude_model
20
+ from klaude_code.llm.openrouter.input import convert_history_to_input, is_claude_model, is_xai_model
21
21
  from klaude_code.llm.openrouter.reasoning import ReasoningStreamHandler
22
22
  from klaude_code.llm.registry import register
23
23
  from klaude_code.llm.usage import MetadataTracker, error_llm_stream
@@ -70,6 +70,9 @@ def build_payload(
70
70
  f"{ANTHROPIC_BETA_FINE_GRAINED_TOOL_STREAMING},{ANTHROPIC_BETA_INTERLEAVED_THINKING}"
71
71
  )
72
72
 
73
+ if is_xai_model(param.model_id):
74
+ extra_body["plugins"] = [{"id": "web", "engine": "native"}]
75
+
73
76
  payload: CompletionCreateParamsStreaming = {
74
77
  "model": str(param.model_id),
75
78
  "tool_choice": "auto",
@@ -34,6 +34,12 @@ def is_gemini_model(model_name: str | None) -> bool:
34
34
  return model_name is not None and model_name.startswith("google/gemini")
35
35
 
36
36
 
37
+ def is_xai_model(model_name: str | None) -> bool:
38
+ """Return True if the model name represents an xAI model."""
39
+
40
+ return model_name is not None and model_name.startswith("x-ai/")
41
+
42
+
37
43
  def _assistant_message_to_openrouter(
38
44
  msg: message.AssistantMessage, model_name: str | None
39
45
  ) -> chat.ChatCompletionMessageParam:
@@ -539,6 +539,21 @@ __KLAUDE_CODE__</textarea
539
539
  </svg>
540
540
  <span>SVG</span>
541
541
  </button>
542
+ <button class="tool-btn" id="btn-download-png" title="Download PNG">
543
+ <svg
544
+ width="16"
545
+ height="16"
546
+ viewBox="0 0 24 24"
547
+ fill="none"
548
+ stroke="currentColor"
549
+ stroke-width="2"
550
+ >
551
+ <path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4"></path>
552
+ <polyline points="7 10 12 15 17 10"></polyline>
553
+ <line x1="12" y1="15" x2="12" y2="3"></line>
554
+ </svg>
555
+ <span>PNG</span>
556
+ </button>
542
557
  </div>
543
558
  </div>
544
559
 
@@ -570,6 +585,7 @@ __KLAUDE_CODE__</textarea
570
585
  zoomOut: document.getElementById("btn-zoom-out"),
571
586
  reset: document.getElementById("btn-reset"),
572
587
  download: document.getElementById("btn-download"),
588
+ downloadPng: document.getElementById("btn-download-png"),
573
589
  copy: document.getElementById("btn-copy-code"),
574
590
  collapse: document.getElementById("btn-collapse"),
575
591
  expand: document.getElementById("btn-expand"),
@@ -894,6 +910,75 @@ __KLAUDE_CODE__</textarea
894
910
  URL.revokeObjectURL(url);
895
911
  };
896
912
 
913
+ els.btns.downloadPng.onclick = async () => {
914
+ const svg = els.canvas.querySelector("svg");
915
+ if (!svg) return;
916
+
917
+ const clone = svg.cloneNode(true);
918
+
919
+ const bbox = svg.getBBox();
920
+ const width = bbox.width + 40;
921
+ const height = bbox.height + 40;
922
+
923
+ clone.setAttribute("width", width);
924
+ clone.setAttribute("height", height);
925
+ clone.setAttribute("xmlns", "http://www.w3.org/2000/svg");
926
+ clone.setAttribute("xmlns:xlink", "http://www.w3.org/1999/xlink");
927
+
928
+ // Remove foreignObject elements (they cause tainted canvas)
929
+ clone.querySelectorAll("foreignObject").forEach((fo) => {
930
+ const text = fo.textContent || "";
931
+ const parent = fo.parentNode;
932
+ if (parent) {
933
+ const textEl = document.createElementNS("http://www.w3.org/2000/svg", "text");
934
+ textEl.textContent = text;
935
+ textEl.setAttribute("font-family", "sans-serif");
936
+ textEl.setAttribute("font-size", "14");
937
+ parent.replaceChild(textEl, fo);
938
+ }
939
+ });
940
+
941
+ // Add white background rect
942
+ const bgRect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
943
+ bgRect.setAttribute("width", "100%");
944
+ bgRect.setAttribute("height", "100%");
945
+ bgRect.setAttribute("fill", "white");
946
+ clone.insertBefore(bgRect, clone.firstChild);
947
+
948
+ const svgData = new XMLSerializer().serializeToString(clone);
949
+ const svgBase64 = btoa(unescape(encodeURIComponent(svgData)));
950
+ const dataUrl = "data:image/svg+xml;base64," + svgBase64;
951
+
952
+ const img = new Image();
953
+ img.onload = () => {
954
+ const scale = 2;
955
+ const canvas = document.createElement("canvas");
956
+ canvas.width = width * scale;
957
+ canvas.height = height * scale;
958
+
959
+ const ctx = canvas.getContext("2d");
960
+ ctx.fillStyle = "white";
961
+ ctx.fillRect(0, 0, canvas.width, canvas.height);
962
+ ctx.scale(scale, scale);
963
+ ctx.drawImage(img, 0, 0);
964
+
965
+ canvas.toBlob((blob) => {
966
+ const pngUrl = URL.createObjectURL(blob);
967
+ const a = document.createElement("a");
968
+ a.href = pngUrl;
969
+ a.download = "diagram.png";
970
+ document.body.appendChild(a);
971
+ a.click();
972
+ a.remove();
973
+ URL.revokeObjectURL(pngUrl);
974
+ }, "image/png");
975
+ };
976
+ img.onerror = (e) => {
977
+ console.error("PNG export failed:", e);
978
+ };
979
+ img.src = dataUrl;
980
+ };
981
+
897
982
  els.btns.copy.onclick = async () => {
898
983
  try {
899
984
  await navigator.clipboard.writeText(els.textarea.value);
@@ -47,13 +47,16 @@ def _render_task_metadata_block(
47
47
  if metadata.usage is not None:
48
48
  # Tokens: ↑37k ◎5k ↓907 ∿45k ⌗ 100
49
49
  token_text = Text()
50
+ input_tokens = max(metadata.usage.input_tokens - metadata.usage.cached_tokens, 0)
51
+ output_tokens = max(metadata.usage.output_tokens - metadata.usage.reasoning_tokens, 0)
52
+
50
53
  token_text.append("↑", style=ThemeKey.METADATA)
51
- token_text.append(format_number(metadata.usage.input_tokens), style=ThemeKey.METADATA)
54
+ token_text.append(format_number(input_tokens), style=ThemeKey.METADATA)
52
55
  if metadata.usage.cached_tokens > 0:
53
56
  token_text.append(" ◎", style=ThemeKey.METADATA)
54
57
  token_text.append(format_number(metadata.usage.cached_tokens), style=ThemeKey.METADATA)
55
58
  token_text.append(" ↓", style=ThemeKey.METADATA)
56
- token_text.append(format_number(metadata.usage.output_tokens), style=ThemeKey.METADATA)
59
+ token_text.append(format_number(output_tokens), style=ThemeKey.METADATA)
57
60
  if metadata.usage.reasoning_tokens > 0:
58
61
  token_text.append(" ∿", style=ThemeKey.METADATA)
59
62
  token_text.append(format_number(metadata.usage.reasoning_tokens), style=ThemeKey.METADATA)
@@ -609,6 +609,9 @@ class MarkdownStream:
609
609
 
610
610
  live_text_to_set: Text | None = None
611
611
  if not final and MARKDOWN_STREAM_LIVE_REPAINT_ENABLED and self._live_sink is not None:
612
+ # Only update live area after we have rendered at least one stable block
613
+ if not self._stable_rendered_lines:
614
+ return
612
615
  # When nothing is stable yet, we still want to show incremental output.
613
616
  # Apply the mark only for the first (all-live) frame so it stays anchored
614
617
  # to the first visible line of the full message.
@@ -270,10 +270,10 @@ def get_theme(theme: str | None = None) -> Themes:
270
270
  # ASSISTANT
271
271
  ThemeKey.ASSISTANT_MESSAGE_MARK.value: "bold",
272
272
  # METADATA
273
- ThemeKey.METADATA.value: palette.blue,
274
- ThemeKey.METADATA_DIM.value: "dim " + palette.blue,
275
- ThemeKey.METADATA_BOLD.value: "bold " + palette.blue,
276
- ThemeKey.METADATA_ITALIC.value: "italic " + palette.blue,
273
+ ThemeKey.METADATA.value: palette.grey1,
274
+ ThemeKey.METADATA_DIM.value: "dim " + palette.grey1,
275
+ ThemeKey.METADATA_BOLD.value: "bold " + palette.grey1,
276
+ ThemeKey.METADATA_ITALIC.value: "italic " + palette.grey1,
277
277
  # STATUS
278
278
  ThemeKey.STATUS_SPINNER.value: palette.blue,
279
279
  ThemeKey.STATUS_TEXT.value: palette.blue,
@@ -351,8 +351,8 @@ def get_theme(theme: str | None = None) -> Themes:
351
351
  # it is used while rendering assistant output.
352
352
  "markdown.thinking": "italic " + palette.grey2,
353
353
  "markdown.thinking.tag": palette.grey2,
354
- "markdown.code.border": palette.grey3,
355
- "markdown.code.fence": palette.grey3,
354
+ "markdown.code.border": palette.grey2,
355
+ "markdown.code.fence": palette.grey2,
356
356
  "markdown.code.fence.title": palette.grey1,
357
357
  # Used by ThinkingMarkdown when rendering `<thinking>` blocks.
358
358
  "markdown.code.block": palette.grey1,
@@ -379,8 +379,9 @@ def get_theme(theme: str | None = None) -> Themes:
379
379
  "markdown.strong": "italic " + palette.grey1,
380
380
  "markdown.code": palette.grey1 + " italic on " + palette.code_background,
381
381
  "markdown.code.block": palette.grey2,
382
- "markdown.code.fence": palette.grey3,
383
- "markdown.code.border": palette.grey3,
382
+ "markdown.code.fence": palette.grey2,
383
+ "markdown.code.fence.title": palette.grey1,
384
+ "markdown.code.border": palette.grey2,
384
385
  "markdown.thinking.tag": palette.grey2 + " dim",
385
386
  "markdown.h1": "bold reverse",
386
387
  "markdown.h1.border": palette.grey3,
@@ -26,38 +26,3 @@ def normalize_thinking_content(content: str) -> str:
26
26
  text = text.replace("**\n\n", "** \n")
27
27
 
28
28
  return text
29
-
30
-
31
- def extract_last_bold_header(text: str) -> str | None:
32
- """Extract the latest complete bold header ("**…**") from text.
33
-
34
- We treat a bold segment as a "header" only if it appears at the beginning
35
- of a line (ignoring leading whitespace). This avoids picking up incidental
36
- emphasis inside paragraphs.
37
-
38
- Returns None if no complete bold segment is available yet.
39
- """
40
-
41
- last: str | None = None
42
- i = 0
43
- while True:
44
- start = text.find("**", i)
45
- if start < 0:
46
- break
47
-
48
- line_start = text.rfind("\n", 0, start) + 1
49
- if text[line_start:start].strip():
50
- i = start + 2
51
- continue
52
-
53
- end = text.find("**", start + 2)
54
- if end < 0:
55
- break
56
-
57
- inner = " ".join(text[start + 2 : end].split())
58
- if inner and "\n" not in inner:
59
- last = inner
60
-
61
- i = end + 2
62
-
63
- return last
@@ -4,6 +4,7 @@ from typing import Any, cast
4
4
 
5
5
  from rich import box
6
6
  from rich.console import Group, RenderableType
7
+ from rich.padding import Padding
7
8
  from rich.panel import Panel
8
9
  from rich.style import Style
9
10
  from rich.text import Text
@@ -166,7 +167,6 @@ def render_bash_tool_call(arguments: str) -> RenderableType:
166
167
  if isinstance(command, str) and command.strip():
167
168
  cmd_str = command.strip()
168
169
  highlighted = highlight_bash_command(cmd_str)
169
- highlighted.stylize(ThemeKey.CODE_BACKGROUND)
170
170
 
171
171
  display_line_count = len(highlighted.plain.splitlines())
172
172
 
@@ -189,7 +189,8 @@ def render_bash_tool_call(arguments: str) -> RenderableType:
189
189
  highlighted.append(f" {timeout_ms // 1000}s", style=ThemeKey.TOOL_TIMEOUT)
190
190
  else:
191
191
  highlighted.append(f" {timeout_ms}ms", style=ThemeKey.TOOL_TIMEOUT)
192
- return _render_tool_call_tree(mark=MARK_BASH, tool_name=tool_name, details=highlighted)
192
+ padded = Padding(highlighted, pad=0, style=ThemeKey.CODE_BACKGROUND, expand=False)
193
+ return _render_tool_call_tree(mark=MARK_BASH, tool_name=tool_name, details=padded)
193
194
  else:
194
195
  summary = Text("", ThemeKey.TOOL_PARAM)
195
196
  if isinstance(timeout_ms, int):
@@ -25,7 +25,7 @@ def render_at_and_skill_patterns(
25
25
  available_skill_names: set[str] | None = None,
26
26
  ) -> Text:
27
27
  """Render text with highlighted @file and $skill patterns."""
28
- result = Text(text, style=other_style)
28
+ result = Text(text, style=other_style, overflow="fold")
29
29
  for match in INLINE_RENDER_PATTERN.finditer(text):
30
30
  skill_name = match.group(1)
31
31
  if skill_name is None:
@@ -79,6 +79,7 @@ def render_user_input(content: str) -> RenderableType:
79
79
  render_at_and_skill_patterns(splits[1], available_skill_names=available_skill_names)
80
80
  if len(splits) > 1
81
81
  else Text(""),
82
+ overflow="fold",
82
83
  )
83
84
  renderables.append(line_text)
84
85
  continue
@@ -100,4 +100,4 @@ class TUIDisplay(DisplayABC):
100
100
  with contextlib.suppress(Exception):
101
101
  self._renderer.spinner_stop()
102
102
  with contextlib.suppress(Exception):
103
- self._renderer.stop_bottom_live()
103
+ self._renderer.stop_bottom_live()
@@ -76,6 +76,60 @@ def create_key_bindings(
76
76
  term_program = os.environ.get("TERM_PROGRAM", "").lower()
77
77
  swallow_next_control_j = False
78
78
 
79
+ def _history_backward_cursor_to_start(buf: Buffer) -> None:
80
+ """Switch to previous history entry and place cursor at absolute start.
81
+
82
+ prompt_toolkit's default `Buffer.history_backward()` moves the cursor to
83
+ the end of the (possibly multi-line) history entry. That makes it hard
84
+ to keep pressing Up to continue cycling history, because subsequent Up
85
+ key presses start moving within the multi-line buffer.
86
+ """
87
+
88
+ try:
89
+ before = int(buf.working_index) # type: ignore[reportUnknownMemberType]
90
+ except Exception:
91
+ before = None
92
+
93
+ buf.history_backward()
94
+
95
+ try:
96
+ after = int(buf.working_index) # type: ignore[reportUnknownMemberType]
97
+ except Exception:
98
+ after = None
99
+
100
+ if before is not None and after is not None and before == after:
101
+ return
102
+
103
+ with contextlib.suppress(Exception):
104
+ buf.cursor_position = 0 # type: ignore[reportUnknownMemberType]
105
+
106
+ def _history_forward_cursor_to_end(buf: Buffer) -> None:
107
+ """Switch to next history entry and place cursor at absolute end.
108
+
109
+ prompt_toolkit's default `Buffer.history_forward()` moves the cursor to
110
+ the end of the *first* line. For our multiline REPL, it's more useful to
111
+ land at the end so that pressing Down keeps cycling through history.
112
+ """
113
+
114
+ try:
115
+ before = int(buf.working_index) # type: ignore[reportUnknownMemberType]
116
+ except Exception:
117
+ before = None
118
+
119
+ buf.history_forward()
120
+
121
+ try:
122
+ after = int(buf.working_index) # type: ignore[reportUnknownMemberType]
123
+ except Exception:
124
+ after = None
125
+
126
+ if before is not None and after is not None and before == after:
127
+ return
128
+
129
+ with contextlib.suppress(Exception):
130
+ text = buf.text # type: ignore[reportUnknownMemberType]
131
+ buf.cursor_position = len(text) # type: ignore[reportUnknownMemberType]
132
+
79
133
  def _is_bash_mode_text(text: str) -> bool:
80
134
  return text.startswith(("!", "!"))
81
135
 
@@ -151,6 +205,20 @@ def create_key_bindings(
151
205
  except Exception:
152
206
  return False
153
207
 
208
+ def _current_cursor_row() -> int:
209
+ try:
210
+ doc = get_app().current_buffer.document
211
+ return int(doc.cursor_position_row)
212
+ except Exception:
213
+ return 0
214
+
215
+ def _current_line_count() -> int:
216
+ try:
217
+ doc = get_app().current_buffer.document
218
+ return int(doc.line_count)
219
+ except Exception:
220
+ return 1
221
+
154
222
  def _move_cursor_visually_within_wrapped_line(event: KeyPressEvent, *, delta_visible_y: int) -> None:
155
223
  """Move the cursor Up/Down by one wrapped screen row, keeping column."""
156
224
 
@@ -493,6 +561,32 @@ def create_key_bindings(
493
561
  def _(event: KeyPressEvent) -> None:
494
562
  _move_cursor_visually_within_wrapped_line(event, delta_visible_y=1)
495
563
 
564
+ @kb.add(
565
+ "up",
566
+ filter=enabled
567
+ & ~has_completions
568
+ & ~is_searching
569
+ & Condition(lambda: not _can_move_cursor_visually_within_wrapped_line(delta_visible_y=-1))
570
+ & Condition(lambda: _current_cursor_row() == 0),
571
+ eager=True,
572
+ )
573
+ def _(event: KeyPressEvent) -> None:
574
+ """Up on first logical line: switch history and keep caret at start."""
575
+ _history_backward_cursor_to_start(event.current_buffer)
576
+
577
+ @kb.add(
578
+ "down",
579
+ filter=enabled
580
+ & ~has_completions
581
+ & ~is_searching
582
+ & Condition(lambda: not _can_move_cursor_visually_within_wrapped_line(delta_visible_y=1))
583
+ & Condition(lambda: _current_cursor_row() >= (_current_line_count() - 1)),
584
+ eager=True,
585
+ )
586
+ def _(event: KeyPressEvent) -> None:
587
+ """Down on last logical line: switch history and keep caret at end."""
588
+ _history_forward_cursor_to_end(event.current_buffer)
589
+
496
590
  @kb.add("c-j", filter=enabled)
497
591
  def _(event: KeyPressEvent) -> None:
498
592
  nonlocal swallow_next_control_j
@@ -617,11 +711,11 @@ def create_key_bindings(
617
711
  @kb.add("escape", "up", filter=enabled & ~has_completions)
618
712
  def _(event: KeyPressEvent) -> None:
619
713
  """Option+Up switches to previous history entry."""
620
- event.current_buffer.history_backward()
714
+ _history_backward_cursor_to_start(event.current_buffer)
621
715
 
622
716
  @kb.add("escape", "down", filter=enabled & ~has_completions)
623
717
  def _(event: KeyPressEvent) -> None:
624
718
  """Option+Down switches to next history entry."""
625
- event.current_buffer.history_forward()
719
+ _history_forward_cursor_to_end(event.current_buffer)
626
720
 
627
721
  return kb
@@ -358,7 +358,6 @@ class PromptToolkitInput(InputProviderABC):
358
358
  return []
359
359
  return [
360
360
  ("fg:ansigreen", " bash mode"),
361
- ("fg:ansibrightblack", " (type ! at start; backspace first char to exit)"),
362
361
  ]
363
362
 
364
363
  def _setup_model_picker(self) -> None:
@@ -50,7 +50,6 @@ from klaude_code.tui.commands import (
50
50
  )
51
51
  from klaude_code.tui.components.rich import status as r_status
52
52
  from klaude_code.tui.components.rich.theme import ThemeKey
53
- from klaude_code.tui.components.thinking import extract_last_bold_header, normalize_thinking_content
54
53
  from klaude_code.tui.components.tools import get_task_active_form, get_tool_active_form, is_sub_agent_tool
55
54
 
56
55
  # Tools that complete quickly and don't benefit from streaming activity display.
@@ -293,7 +292,6 @@ class _SessionState:
293
292
  assistant_stream_active: bool = False
294
293
  thinking_stream_active: bool = False
295
294
  assistant_char_count: int = 0
296
- thinking_tail: str = ""
297
295
  task_active: bool = False
298
296
 
299
297
  @property
@@ -304,15 +302,6 @@ class _SessionState:
304
302
  def should_show_sub_agent_thinking_header(self) -> bool:
305
303
  return bool(self.sub_agent_state and self.sub_agent_state.sub_agent_type == tools.IMAGE_GEN)
306
304
 
307
- @property
308
- def should_extract_reasoning_header(self) -> bool:
309
- """Gemini and GPT-5 models use markdown bold headers in thinking."""
310
- return False # Temporarily disabled for all models
311
- if self.model_id is None:
312
- return False
313
- model_lower = self.model_id.lower()
314
- return "gemini" in model_lower or "gpt-5" in model_lower
315
-
316
305
  def should_skip_tool_activity(self, tool_name: str) -> bool:
317
306
  """Check if tool activity should be skipped for non-streaming models."""
318
307
  if self.model_id is None:
@@ -335,6 +324,11 @@ class DisplayStateMachine:
335
324
  self._primary_session_id: str | None = None
336
325
  self._spinner = SpinnerStatusState()
337
326
 
327
+ def _reset_sessions(self) -> None:
328
+ self._sessions = {}
329
+ self._primary_session_id = None
330
+ self._spinner.reset()
331
+
338
332
  def _session(self, session_id: str) -> _SessionState:
339
333
  existing = self._sessions.get(session_id)
340
334
  if existing is not None:
@@ -367,7 +361,9 @@ class DisplayStateMachine:
367
361
  return self._spinner_update_commands()
368
362
 
369
363
  def begin_replay(self) -> list[RenderCommand]:
370
- self._spinner.reset()
364
+ # Replay is a full rebuild of the terminal view; clear session state so primary-session
365
+ # routing is recalculated from the replayed TaskStartEvent.
366
+ self._reset_sessions()
371
367
  return [SpinnerStop(), PrintBlankLine()]
372
368
 
373
369
  def end_replay(self) -> list[RenderCommand]:
@@ -386,6 +382,13 @@ class DisplayStateMachine:
386
382
 
387
383
  match event:
388
384
  case events.WelcomeEvent() as e:
385
+ # WelcomeEvent marks (or reaffirms) the current interactive session.
386
+ # If the session id changes (e.g., /clear creates a new session), clear
387
+ # routing state so subsequent streamed events are not dropped.
388
+ if self._primary_session_id is not None and self._primary_session_id != e.session_id:
389
+ self._reset_sessions()
390
+ s = self._session(e.session_id)
391
+ self._primary_session_id = e.session_id
389
392
  cmds.append(RenderWelcome(e))
390
393
  return cmds
391
394
 
@@ -431,7 +434,12 @@ class DisplayStateMachine:
431
434
  s.model_id = e.model_id
432
435
  s.task_active = True
433
436
  if not s.is_sub_agent:
434
- self._set_primary_if_needed(e.session_id)
437
+ # Keep primary session tracking in sync even if the session id changes
438
+ # during the process lifetime (e.g., /clear).
439
+ if is_replay:
440
+ self._set_primary_if_needed(e.session_id)
441
+ else:
442
+ self._primary_session_id = e.session_id
435
443
  if not is_replay:
436
444
  cmds.append(TaskClockStart())
437
445
 
@@ -487,9 +495,8 @@ class DisplayStateMachine:
487
495
  if not self._is_primary(e.session_id):
488
496
  return []
489
497
  s.thinking_stream_active = True
490
- s.thinking_tail = ""
491
498
  # Ensure the status reflects that reasoning has started even
492
- # before we receive any deltas (or a bold header).
499
+ # before we receive any deltas.
493
500
  if not is_replay:
494
501
  self._spinner.set_reasoning_status(STATUS_THINKING_TEXT)
495
502
  cmds.append(StartThinkingStream(session_id=e.session_id))
@@ -507,16 +514,6 @@ class DisplayStateMachine:
507
514
  if not self._is_primary(e.session_id):
508
515
  return []
509
516
  cmds.append(AppendThinking(session_id=e.session_id, content=e.content))
510
-
511
- # Update reasoning status for spinner (based on bounded tail).
512
- # Only extract headers for models that use markdown bold headers in thinking.
513
- if not is_replay and s.should_extract_reasoning_header:
514
- s.thinking_tail = (s.thinking_tail + e.content)[-8192:]
515
- header = extract_last_bold_header(normalize_thinking_content(s.thinking_tail))
516
- if header:
517
- self._spinner.set_reasoning_status(header)
518
- cmds.extend(self._spinner_update_commands())
519
-
520
517
  return cmds
521
518
 
522
519
  case events.ThinkingEndEvent() as e:
@@ -721,6 +718,30 @@ class DisplayStateMachine:
721
718
  case events.TaskFinishEvent() as e:
722
719
  s.task_active = False
723
720
  cmds.append(RenderTaskFinish(e))
721
+
722
+ # Defensive: finalize any open streams so buffered markdown is flushed.
723
+ if s.thinking_stream_active:
724
+ s.thinking_stream_active = False
725
+ cmds.append(EndThinkingStream(session_id=e.session_id))
726
+ if s.assistant_stream_active:
727
+ s.assistant_stream_active = False
728
+ cmds.append(EndAssistantStream(session_id=e.session_id))
729
+
730
+ # Rare providers / edge cases may complete a turn without emitting any
731
+ # assistant deltas (or without the display consuming them). In that case,
732
+ # fall back to rendering the final task result to avoid a "blank" turn.
733
+ if (
734
+ not is_replay
735
+ and not s.is_sub_agent
736
+ and not e.has_structured_output
737
+ and s.assistant_char_count == 0
738
+ and e.task_result.strip()
739
+ and e.task_result.strip().lower() not in {"task cancelled", "task canceled"}
740
+ ):
741
+ cmds.append(StartAssistantStream(session_id=e.session_id))
742
+ cmds.append(AppendAssistant(session_id=e.session_id, content=e.task_result))
743
+ cmds.append(EndAssistantStream(session_id=e.session_id))
744
+
724
745
  if not s.is_sub_agent and not is_replay:
725
746
  cmds.append(TaskClockClear())
726
747
  self._spinner.reset()
@@ -349,7 +349,11 @@ class TUICommandRenderer:
349
349
  if pad_lines:
350
350
  stream = Padding(stream, (0, 0, pad_lines, 0))
351
351
  stream_part = stream
352
- gap_part = Text(" ") if (self._spinner_visible and self._bash_stream_active) else Group()
352
+ gap_part = (
353
+ Text(" ")
354
+ if (self._spinner_visible and (self._bash_stream_active or self._stream_renderable))
355
+ else Group()
356
+ )
353
357
 
354
358
  status_part: RenderableType = SingleLine(self._status_spinner) if self._spinner_visible else Group()
355
359
  return Group(stream_part, gap_part, status_part)
@@ -730,6 +734,9 @@ class TUICommandRenderer:
730
734
  self._sub_agent_thinking_buffers[session_id] = ""
731
735
  elif not self._thinking_stream.is_active:
732
736
  self._thinking_stream.start(self._new_thinking_mdstream())
737
+ if not self._replay_mode:
738
+ self._thinking_stream.append("Thinking… \n")
739
+ self._thinking_stream.render(transform=c_thinking.normalize_thinking_content)
733
740
  case AppendThinking(session_id=session_id, content=content):
734
741
  if self.is_sub_agent_session(session_id):
735
742
  if session_id in self._sub_agent_thinking_buffers:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: klaude-code
3
- Version: 2.10.0
3
+ Version: 2.10.2
4
4
  Summary: Minimal code agent CLI
5
5
  Requires-Dist: anthropic>=0.66.0
6
6
  Requires-Dist: chardet>=5.2.0
@@ -1,7 +1,7 @@
1
1
  klaude_code/.DS_Store,sha256=cLWFbSgdN0bXEd3_tz93BJSposEPafUBqSr7t-3lPbA,6148
2
2
  klaude_code/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  klaude_code/app/__init__.py,sha256=7mgWpN9SFDqe8AW44bBn9M19nVsBcZURrsGB_8l2hrU,264
4
- klaude_code/app/runtime.py,sha256=MEfD6asbwRpZLyf0jUXAenDPufBZEnA3Trh6QnEUniQ,6031
4
+ klaude_code/app/runtime.py,sha256=d06L6O0uyExL-_qEVSKqekk_tUBr6hAzV9jT8As6eSY,6180
5
5
  klaude_code/auth/AGENTS.md,sha256=5ObIfgMfUDuNBKykK6kikRSEvCxDt5fO0-ySVaLVDW0,8467
6
6
  klaude_code/auth/__init__.py,sha256=LhGS2P80Ci_DeaqxVueknDIj-Ded4OFQdNmFHekXNY8,1106
7
7
  klaude_code/auth/antigravity/__init__.py,sha256=Lv37yKg7CLzoQss2Jho-jtrGiU-zUCa7W1w3eDWmR0o,610
@@ -26,11 +26,11 @@ klaude_code/cli/config_cmd.py,sha256=7BmZpKeiO24mKKLKGO46WvSQzSaNwuZ3KtCV4GH-Yh0
26
26
  klaude_code/cli/cost_cmd.py,sha256=PofksXj7ZmalaRfxAHCxtUBxEc7tfI-sIER_9GRA1CU,16604
27
27
  klaude_code/cli/debug.py,sha256=vEHOjObhrIHDAXk3q6cOgeW2NZxCx5AWM1rJ6FiJnVU,1901
28
28
  klaude_code/cli/list_model.py,sha256=GYznb88rvubnUCMvW-D1r3aGVWQB4-DzvsTjBSHjOUw,16168
29
- klaude_code/cli/main.py,sha256=Z0jCfGvMH7iIlmpjcPsCOHgqDFEcEl3LkzIQr9fUHNM,12772
29
+ klaude_code/cli/main.py,sha256=bQijkLDTYtjFUdU1t-dGCzOilo7mcotIQefleaF9or8,13072
30
30
  klaude_code/cli/self_update.py,sha256=1xdG9ifvRZQDSx6RAtSSgXmw9hZNXMLvqC2zu4bS-GY,2622
31
31
  klaude_code/config/__init__.py,sha256=Qe1BeMekBfO2-Zd30x33lB70hdM1QQZGrp4DbWSQ-II,353
32
32
  klaude_code/config/assets/__init__.py,sha256=uMUfmXT3I-gYiI-HVr1DrE60mx5cY1o8V7SYuGqOmvY,32
33
- klaude_code/config/assets/builtin_config.yaml,sha256=9ozI1jwqXExq48Trs0ayVNX8WAAkOh3qC5R1kUkP_Rw,9093
33
+ klaude_code/config/assets/builtin_config.yaml,sha256=EYGgBMAVtgUKGHZcfkm4sCfSB1-fBx83q10MGVnNz8I,9121
34
34
  klaude_code/config/builtin_config.py,sha256=OG5VERUHo3tSojgFXfNDV6pAHNOh3kO-xFHpvTr-cpc,1786
35
35
  klaude_code/config/config.py,sha256=otBvsUkvI-2fpZzx9fO6SPnCek7FI7kufvAnGIQqTz8,27084
36
36
  klaude_code/config/model_matcher.py,sha256=3IlLU5h3NDh_bURbCW-PV027C3irG3hyitwj1cj99Ig,6179
@@ -39,7 +39,7 @@ klaude_code/config/thinking.py,sha256=5uVM0cFUJZBBsBGGdPG-tjdiNwZ-GFeWOBBWIdSPFv
39
39
  klaude_code/const.py,sha256=VCK3HgZJZO6jcYz6U2rcHS7W-n4oyKYg9AC6eTB4HIQ,11575
40
40
  klaude_code/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  klaude_code/core/agent.py,sha256=GrIg22nfoq1c90UHyEfU_bh46vtXTCo4bLezb-3mGNo,4120
42
- klaude_code/core/agent_profile.py,sha256=AoBpOC8rGqyJW4DGCzejQAq3P1497n3NrCayIGIOQBY,10632
42
+ klaude_code/core/agent_profile.py,sha256=wfde6zMez5abh5O04e7BzBlbLhHwd23pn4FBijOHxQg,11231
43
43
  klaude_code/core/bash_mode.py,sha256=BUy2_uOcJ8bO89t_QCwtw6GlHt7vd1t62sN5iWk-UuQ,9250
44
44
  klaude_code/core/compaction/AGENTS.md,sha256=KZR5lxe4jVAbT5K9PxbZcHWI1UwsppbGmxIfCdHYr7Q,3684
45
45
  klaude_code/core/compaction/__init__.py,sha256=CvidYx3sX0IZAa4pifX9jrQSkg4Nib7PKrcaOHswF60,329
@@ -62,7 +62,7 @@ klaude_code/core/prompts/prompt-gemini.md,sha256=JjE1tHSByGKJzjn4Gpj1zekT7ry1Yqb
62
62
  klaude_code/core/prompts/prompt-minimal.md,sha256=6-ZmQQkE3f92W_3V2wS7ocB13wLog1_UojCjZG0K4v8,1559
63
63
  klaude_code/core/prompts/prompt-sub-agent-explore.md,sha256=21kFodjhvN0L-c_ZFo4yVhJOyzfgES-Dty9Vz_Ew9q8,2629
64
64
  klaude_code/core/prompts/prompt-sub-agent-image-gen.md,sha256=tXYKSzFd04OiC0dmVO9suMKeD5f9qo_4NsvqGo7irfI,78
65
- klaude_code/core/prompts/prompt-sub-agent-web.md,sha256=UwrO5M_jPUbee_8lL7gB-2VFFLxvzEejluXDkMzmR5A,3625
65
+ klaude_code/core/prompts/prompt-sub-agent-web.md,sha256=xi9nyk8k0_64muL2RBMkrCdli5elXALjhKAsRO3qr-U,3627
66
66
  klaude_code/core/prompts/prompt-sub-agent.md,sha256=dmmdsOenbAOfqG6FmdR88spOLZkXmntDBs-cmZ9DN_g,897
67
67
  klaude_code/core/reminders.py,sha256=Dar0GqyOgiZiv0VzrzYOGM22ViSWJUaV12Ssdtcdjlo,21720
68
68
  klaude_code/core/task.py,sha256=uXNg_SxVnp6nFwDmWl8LjhG0HDv7_3P83zAV6dR2gcQ,20125
@@ -80,7 +80,7 @@ klaude_code/core/tool/file/read_tool.md,sha256=_0yftoexOCwdJBKKUxNfxuEXixJipmhtT
80
80
  klaude_code/core/tool/file/read_tool.py,sha256=JOcARJqZ5RQGFGBtBAzDTzFOCXnR8PXkHg04I3-yacc,14262
81
81
  klaude_code/core/tool/file/write_tool.md,sha256=CNnYgtieUasuHdpXLDpTEsqe492Pf7v75M4RQ3oIer8,613
82
82
  klaude_code/core/tool/file/write_tool.py,sha256=R2gWJp8kDOm_gUMbb8F6Z-SrEf8-8Y__9KaMmaQaQVg,5674
83
- klaude_code/core/tool/offload.py,sha256=hxc-up--3KdIvjoslg7FvUek-8wUUSHr3QcGfyJaSQg,11557
83
+ klaude_code/core/tool/offload.py,sha256=5-Th1CXRhRTaysOyvfoi9bpOVjZT1EMn3uwqieyDFZY,11574
84
84
  klaude_code/core/tool/report_back_tool.py,sha256=SkuRhfLpVwTOSpIj7XwYfGDNBp8YsCUNXieXDkafS2E,3381
85
85
  klaude_code/core/tool/shell/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
86
86
  klaude_code/core/tool/shell/bash_tool.md,sha256=VqDfwZOy3Ok1t1rEPKEEkN4Rf_1ZFZzDFzpmOstH2Xo,52
@@ -103,11 +103,11 @@ klaude_code/core/tool/tool_runner.py,sha256=yj9DpXSMd-u8BOAiFsVVxUbB-CEFS8D_Vcv3
103
103
  klaude_code/core/tool/web/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
104
104
  klaude_code/core/tool/web/mermaid_tool.md,sha256=vvPSWxbY3P_cBpHh6AM8Je9JJoMY4FBTJzoteEkwuDU,2095
105
105
  klaude_code/core/tool/web/mermaid_tool.py,sha256=FELwyLBzFdHhkimGee31_qZlrZq2vTzIaFTsp6qeGus,2715
106
- klaude_code/core/tool/web/web_fetch_tool.md,sha256=i0IwsZ6r9vAQeCpwDBtEGrWmHPzZk_XE_CsqvjE4aUA,498
107
- klaude_code/core/tool/web/web_fetch_tool.py,sha256=jXbJTgpI_RvyXy5ac8qIrC-AKOUX1fJ3TpqXq_BfkS4,9596
106
+ klaude_code/core/tool/web/web_fetch_tool.md,sha256=tpLnWd9miepnS1E3vUumaEHLrUoVgZpbEAocuPvFt7M,499
107
+ klaude_code/core/tool/web/web_fetch_tool.py,sha256=THMbzs-aHI6ST9NU0kNUtJCz9uhclDB7v8S9814dwnI,9597
108
108
  klaude_code/core/tool/web/web_search_tool.md,sha256=l5gGPx-fXHFel1zLBljm8isy9pwEYXGrq5cFzzw1VBw,1135
109
109
  klaude_code/core/tool/web/web_search_tool.py,sha256=ljkgXxP6L5nJnbYB_IOUtPUN9zA_h5hBD55lhNAja08,4293
110
- klaude_code/core/turn.py,sha256=YKzjlaKggN9NdSvPvrWfVvR84mI8PXofzeqC_fmnrCs,18902
110
+ klaude_code/core/turn.py,sha256=TK4-ZGmRo_qa21XRklGfEMnfhFGR20wnKjnvlf31HBQ,18882
111
111
  klaude_code/llm/__init__.py,sha256=b4AsqnrMIs0a5qR_ti6rZcHwFzAReTwOW96EqozEoSo,287
112
112
  klaude_code/llm/anthropic/__init__.py,sha256=PWETvaeNAAX3ue0ww1uRUIxTJG0RpWiutkn7MlwKxBs,67
113
113
  klaude_code/llm/anthropic/client.py,sha256=RpYw4UQnhLzAsp6i-FU7cDW4deqngdAoQaTPGnCeO5U,17346
@@ -127,8 +127,7 @@ klaude_code/llm/image.py,sha256=TJzduNtSmoJLizTUDo0EjbovqOeaXkvwnRyMfuzVVUQ,4512
127
127
  klaude_code/llm/input_common.py,sha256=htV8YHDlQ7fx2qW_D9e_BgxWlyBmZwAtNxnM15uj9gQ,7563
128
128
  klaude_code/llm/json_stable.py,sha256=FrgJbJ33YrAkzl5iPOWLolFjYFZMuT97sbOegeXN7GE,1288
129
129
  klaude_code/llm/openai_codex/__init__.py,sha256=Dx2MxrZEHQZ8gsJmM05dE4qqak4F0zjwfXd5v8pIXaE,139
130
- klaude_code/llm/openai_codex/client.py,sha256=bpTKtp3Qum-c6OBQAZHJ9LKSzgToHOig5M03568aX6I,6248
131
- klaude_code/llm/openai_codex/prompt_sync.py,sha256=u9RBuVpfRJGB5cdpI_OJkJVSL33s4pZNxbiCIm1l70Y,8767
130
+ klaude_code/llm/openai_codex/client.py,sha256=wIOfCsB1F_b9D6ymq-nyCIXAuuBTz6-CMUxD8NiuvkQ,5403
132
131
  klaude_code/llm/openai_compatible/__init__.py,sha256=ACGpnki7k53mMcCl591aw99pm9jZOZk0ghr7atOfNps,81
133
132
  klaude_code/llm/openai_compatible/client.py,sha256=oiRmqiqNJ-clLk6pK1LTOAyx1svmNBg1sNuT8uN3l5g,4717
134
133
  klaude_code/llm/openai_compatible/input.py,sha256=UfEu2c866QuVMFTY3GEPhuKV7QcCgxBwNYqGT88E_8E,2834
@@ -137,8 +136,8 @@ klaude_code/llm/openai_responses/__init__.py,sha256=WsiyvnNiIytaYcaAqNiB8GI-5zcp
137
136
  klaude_code/llm/openai_responses/client.py,sha256=qmQNcHZUBzvjQZZQkWh2f3oUYC9Lbx_dZD7KwnXbPPI,16362
138
137
  klaude_code/llm/openai_responses/input.py,sha256=eEeSHlGYY5VYGpj2DIVm8qjH2cXo-JaODT_SjFtS6Nw,9264
139
138
  klaude_code/llm/openrouter/__init__.py,sha256=_As8lHjwj6vapQhLorZttTpukk5ZiCdhFdGT38_ASPo,69
140
- klaude_code/llm/openrouter/client.py,sha256=P_mDQK4_i1MLF0jK4p_bKhh15ACXgw6Ie0rUrtbfsdM,5738
141
- klaude_code/llm/openrouter/input.py,sha256=Z_Cf6TnMZ5KQNJ0E5IIDCKK2OWlzi8IW0S5A72BBGT0,6176
139
+ klaude_code/llm/openrouter/client.py,sha256=pBQG3JIftEyDpWNo_rT7cDaTAmeUxTktyRRTcZNq9Hg,5858
140
+ klaude_code/llm/openrouter/input.py,sha256=VqKPWCUb-BlFwNs1uldDD_dGPJzwskG301uULaqm958,6363
142
141
  klaude_code/llm/openrouter/reasoning.py,sha256=u7ccfnGxJ4Ws8P3X5FW91d8HXie29JjeWz0hZ1r0oFg,3320
143
142
  klaude_code/llm/partial_message.py,sha256=-sjlpV-et4ViBtBpdtihK5QBjAlwS47-mBpVbRQPP3s,142
144
143
  klaude_code/llm/registry.py,sha256=oYObtCgWQxL2eFfmvpYLqMhKvYD3JoszzpvuC08sRQQ,2177
@@ -167,7 +166,7 @@ klaude_code/session/selector.py,sha256=snBpnz9UQCe_0K8HttSGCJECCE4YEzpWs_Fdmk2P9
167
166
  klaude_code/session/session.py,sha256=AL-2oNggPf7PTnNthFdyiI233ySWdCcOqcxufkqrvwE,29373
168
167
  klaude_code/session/store.py,sha256=f_Ve6uMX1s-yH3jqiDWPULoLnab07QcFA04b3PD0ehE,6306
169
168
  klaude_code/session/templates/export_session.html,sha256=ekRt1zGePqT2lOYSPgdNlDjsOemM2r7FVB6X8nBrC00,137452
170
- klaude_code/session/templates/mermaid_viewer.html,sha256=Y_wEWFm4mKWpfAz3YMis5DdLEkhw_2d8CpU6jbvGZow,27842
169
+ klaude_code/session/templates/mermaid_viewer.html,sha256=2e5q0YpKpqB2FeFdH5t-BxJKtDDGrKKz8z1hmKSZ93M,30991
171
170
  klaude_code/skill/.DS_Store,sha256=zy9qIqi2YLGzlZwHNM4oAX8rDoNTg9yxdo22PJOwupg,6148
172
171
  klaude_code/skill/__init__.py,sha256=yeWeCfRGPOhT4mx_pjdo4fLondQ_Vx0edBtnFusLhls,839
173
172
  klaude_code/skill/assets/.DS_Store,sha256=1lFlJ5EFymdzGAUAaI30vcaaLHt3F1LwpG7xILf9jsM,6148
@@ -208,31 +207,31 @@ klaude_code/tui/components/developer.py,sha256=m6gcnLyLoSSw3wpwYQZ_yzaT9zgXRgNzT
208
207
  klaude_code/tui/components/diffs.py,sha256=vwllnYBxC5xGjfKU3uIkCjcupr9nrjRjvvj0tg0_MQA,3085
209
208
  klaude_code/tui/components/errors.py,sha256=fSojNfRceB6eE7cyJHfwGt5Ru0OYp63fCJ-W6-3SSYs,799
210
209
  klaude_code/tui/components/mermaid_viewer.py,sha256=zI1FBuX6Ionx38KqkzhOIQ9tFzd7REPbjW1iqSiNrec,3086
211
- klaude_code/tui/components/metadata.py,sha256=iG6V3-rHj7eKACMa57-zORIy0FJfhAKLVXgfmGoZI6A,7130
210
+ klaude_code/tui/components/metadata.py,sha256=JzHMi3fr_YWQmachSRukBTYO3iHvPFAXaA3f2SRyD54,7286
212
211
  klaude_code/tui/components/rich/__init__.py,sha256=zEZjnHR3Fnv_sFMxwIMjoJfwDoC4GRGv3lHJzAGRq_o,236
213
212
  klaude_code/tui/components/rich/cjk_wrap.py,sha256=eMqBxftUtll7zrytUb9WtJ6naYLyax0W4KJRpGwWulM,7602
214
213
  klaude_code/tui/components/rich/code_panel.py,sha256=SOdyfHBZNB4gAWIbnN_enhHB1EWxw8Hxiafx6yjwdJo,5544
215
214
  klaude_code/tui/components/rich/live.py,sha256=xiMT6dPsxM_jaazddKrV9CMJQWwpe2t9OdjffHvo1JU,2821
216
- klaude_code/tui/components/rich/markdown.py,sha256=Ps6KaSwArqY5IVW8J2ShkqYg9vOGMnlfTJyZQOkXmo8,25120
215
+ klaude_code/tui/components/rich/markdown.py,sha256=vas08cxz8fEU2PlTUNrgmF-zIXtFoMxP-Jefc8FSGuE,25276
217
216
  klaude_code/tui/components/rich/quote.py,sha256=u6sBmGdp0ckaZLw_XgJk7iHW4zxnWikUaB3GX2tkhlM,5375
218
217
  klaude_code/tui/components/rich/status.py,sha256=hSvMwEguF2DfHH3ISR0bmDg58zAnM3CTJLcRff_rtrg,14791
219
- klaude_code/tui/components/rich/theme.py,sha256=Bioy3rbR_6M5XJFg3sHD_aNuwF7eJtZWHLij0-8tDQU,17020
218
+ klaude_code/tui/components/rich/theme.py,sha256=cp7PrhxIgOLu_AjHBOf3wCZewzngOAZDFQQmiO0sAbY,17084
220
219
  klaude_code/tui/components/sub_agent.py,sha256=8XTWsTi9mfbNLMD8SZ__nZQmBf81rW-NWpuOT-sFbv8,4723
221
- klaude_code/tui/components/thinking.py,sha256=zxeELXVoU0zgN_IrRHSNqjCHfpt5uX7_U-rXpd3RktI,1857
222
- klaude_code/tui/components/tools.py,sha256=Zdfj1HLFjGnFtQOWcb4W4zyR3AI5h9QUd4om116uL6M,27262
223
- klaude_code/tui/components/user_input.py,sha256=U4gdkCqV886PSBJ0KBqc3a_8FXdpEBAXUZEF1_123Dw,3609
220
+ klaude_code/tui/components/thinking.py,sha256=yVzY7BbcdDwB4XKi9i2sp3cKREirvmlMiEID74b-5_0,955
221
+ klaude_code/tui/components/tools.py,sha256=MqW-_a4gtMnCR5N8APJYtN2TfSvuUdvo2WFEGm3Qa9A,27327
222
+ klaude_code/tui/components/user_input.py,sha256=fYvdoc3gMyv90l2sQvSs4ha0wILu71rBAcwbzQPMMVc,3659
224
223
  klaude_code/tui/components/welcome.py,sha256=Ahkhg0dsSqy17pKLOp_5UZWn9vysr68T3Y-jB40yWsA,5303
225
- klaude_code/tui/display.py,sha256=bPWhDNZ3R5InQIz0yThLRKi7RJvLx8zcmJoJgyzG5MM,3932
224
+ klaude_code/tui/display.py,sha256=VbBmNjGxp6R4oLjC0FrjXkr0INWSjfXlWq1mSVFGyMU,3933
226
225
  klaude_code/tui/input/AGENTS.md,sha256=2RBLz7H0JbUJv6OBzeadLOlGUF5EBqvtwTGBf6nZuN0,1633
227
226
  klaude_code/tui/input/__init__.py,sha256=wLbjqBrvP6fmbGtbKe9Wp12yxhse0faVLOxtoWua_1E,353
228
227
  klaude_code/tui/input/completers.py,sha256=MJO1nBq0V5jDbGw_o4Ab5WLVD1ns5plJRI3cIYnGfHs,33154
229
228
  klaude_code/tui/input/drag_drop.py,sha256=oyKtrHCyUiGiMLEXpsDTnTnAKJ1_xrvVkrASOiG8O4g,3974
230
229
  klaude_code/tui/input/images.py,sha256=ft2AaOg1Figdm1t_NNoBCGdp20silYXGw-m9XKDd9GU,6996
231
- klaude_code/tui/input/key_bindings.py,sha256=Gpc-VhQh-h431cnetheV0HF9QGcFyy82QrHWUAYF-LA,26633
230
+ klaude_code/tui/input/key_bindings.py,sha256=6oxWdTdklFqLDOk6ZpyKijgrnQO4B9hSqgMfT2kdjXs,30032
232
231
  klaude_code/tui/input/paste.py,sha256=kELg5jC0WdBXWHJUsEjIhZ67KCvHMbN1XzyGmevVSNM,1888
233
- klaude_code/tui/input/prompt_toolkit.py,sha256=eftqR8T8bVVv40m_UnjymFl_uzA8mLzeMy4t89MIZKY,30751
234
- klaude_code/tui/machine.py,sha256=qSxVAYFXhNbwBjgCDYwdvARu2rP17L5aQ4oPqvWtSGA,31311
235
- klaude_code/tui/renderer.py,sha256=ylkCiBbL4H9WXZqRcuEwn5lYRH7fXJ5KweyXrPR8rgk,34543
232
+ klaude_code/tui/input/prompt_toolkit.py,sha256=aE1pVK1G96DWERc2APIXq4WmSPOdJDRHTgnyyZXp_u4,30663
233
+ klaude_code/tui/machine.py,sha256=STwhoBw_HwpkDHDts7CrwtUN211OYaySQ0jy8r7fa08,32579
234
+ klaude_code/tui/renderer.py,sha256=b59_Pr17seKOcQlHDEH4CMWKNWzdpMzmUhqUDqLvQyU,34884
236
235
  klaude_code/tui/runner.py,sha256=ZADAH28Iu1DU-KDCggEyvJiM_LTbN1sjPEaQsuBNTbc,13111
237
236
  klaude_code/tui/terminal/__init__.py,sha256=GIMnsEcIAGT_vBHvTlWEdyNmAEpruyscUA6M_j3GQZU,1412
238
237
  klaude_code/tui/terminal/color.py,sha256=6SJR2RA8cqJINNoRz65w0HL3x9g46ydIvDOGWMeNnQU,7195
@@ -250,7 +249,7 @@ klaude_code/ui/debug_mode.py,sha256=ZvqbOx4c_rUerMbEZzOfcbNf9leqEDFjqJUlALtzF9Y,
250
249
  klaude_code/ui/terminal/__init__.py,sha256=5OeAzr994r8-peWsLON0iXsAvJ2pexwMp36JY7FKGDc,179
251
250
  klaude_code/ui/terminal/title.py,sha256=lCk1dKk7fIe5Fb-FRU9P4ktVEfBmT3ac3wICYmC4mGE,1229
252
251
  klaude_code/update.py,sha256=QER816AZe9u3RhRvP0Z37Jh2Ch5RLy9PREyDsI0e1dA,4480
253
- klaude_code-2.10.0.dist-info/WHEEL,sha256=eh7sammvW2TypMMMGKgsM83HyA_3qQ5Lgg3ynoecH3M,79
254
- klaude_code-2.10.0.dist-info/entry_points.txt,sha256=kkXIXedaTOtjXPr2rVjRVVXZYlFUcBHELaqmyVlWUFA,92
255
- klaude_code-2.10.0.dist-info/METADATA,sha256=K0AELbdxgcZPuvXZFY2xXqA6__nkIV_NzF7NXfhMUSU,10120
256
- klaude_code-2.10.0.dist-info/RECORD,,
252
+ klaude_code-2.10.2.dist-info/WHEEL,sha256=eh7sammvW2TypMMMGKgsM83HyA_3qQ5Lgg3ynoecH3M,79
253
+ klaude_code-2.10.2.dist-info/entry_points.txt,sha256=kkXIXedaTOtjXPr2rVjRVVXZYlFUcBHELaqmyVlWUFA,92
254
+ klaude_code-2.10.2.dist-info/METADATA,sha256=CS3smLUK3V4OvUNZP44SBy8cQ8irncXNspd_EwDAfk0,10120
255
+ klaude_code-2.10.2.dist-info/RECORD,,
@@ -1,237 +0,0 @@
1
- """Dynamic prompt synchronization from OpenAI Codex GitHub repository."""
2
-
3
- import json
4
- import time
5
- from functools import cache
6
- from importlib.resources import files
7
- from pathlib import Path
8
- from typing import Any, Literal
9
-
10
- import httpx
11
-
12
- from klaude_code.log import DebugType, log_debug
13
-
14
- GITHUB_API_RELEASES = "https://api.github.com/repos/openai/codex/releases/latest"
15
- GITHUB_HTML_RELEASES = "https://github.com/openai/codex/releases/latest"
16
- GITHUB_RAW_BASE = "https://raw.githubusercontent.com/openai/codex"
17
-
18
- CACHE_DIR = Path.home() / ".klaude" / "codex-prompts"
19
- CACHE_TTL_SECONDS = 24 * 60 * 60 # 24 hours
20
-
21
- type ModelFamily = Literal["gpt-5.2-codex", "codex-max", "codex", "gpt-5.2", "gpt-5.1"]
22
-
23
- PROMPT_FILES: dict[ModelFamily, str] = {
24
- "gpt-5.2-codex": "gpt-5.2-codex_prompt.md",
25
- "codex-max": "gpt-5.1-codex-max_prompt.md",
26
- "codex": "gpt_5_codex_prompt.md",
27
- "gpt-5.2": "gpt_5_2_prompt.md",
28
- "gpt-5.1": "gpt_5_1_prompt.md",
29
- }
30
-
31
- CACHE_FILES: dict[ModelFamily, str] = {
32
- "gpt-5.2-codex": "gpt-5.2-codex-instructions.md",
33
- "codex-max": "codex-max-instructions.md",
34
- "codex": "codex-instructions.md",
35
- "gpt-5.2": "gpt-5.2-instructions.md",
36
- "gpt-5.1": "gpt-5.1-instructions.md",
37
- }
38
-
39
-
40
- @cache
41
- def _load_bundled_prompt(prompt_path: str) -> str:
42
- """Load bundled prompt from package resources."""
43
- return files("klaude_code.core").joinpath(prompt_path).read_text(encoding="utf-8").strip()
44
-
45
-
46
- class CacheMetadata:
47
- def __init__(self, etag: str | None, tag: str, last_checked: int, url: str):
48
- self.etag = etag
49
- self.tag = tag
50
- self.last_checked = last_checked
51
- self.url = url
52
-
53
- def to_dict(self) -> dict[str, str | int | None]:
54
- return {
55
- "etag": self.etag,
56
- "tag": self.tag,
57
- "last_checked": self.last_checked,
58
- "url": self.url,
59
- }
60
-
61
- @classmethod
62
- def from_dict(cls, data: dict[str, object]) -> "CacheMetadata":
63
- etag = data.get("etag")
64
- last_checked = data.get("last_checked")
65
- return cls(
66
- etag=etag if isinstance(etag, str) else None,
67
- tag=str(data.get("tag", "")),
68
- last_checked=int(last_checked) if isinstance(last_checked, int | float) else 0,
69
- url=str(data.get("url", "")),
70
- )
71
-
72
-
73
- def get_model_family(model: str) -> ModelFamily:
74
- """Determine model family from model name."""
75
- if "gpt-5.2-codex" in model or "gpt 5.2 codex" in model:
76
- return "gpt-5.2-codex"
77
- if "codex-max" in model:
78
- return "codex-max"
79
- if "codex" in model or model.startswith("codex-"):
80
- return "codex"
81
- if "gpt-5.2" in model:
82
- return "gpt-5.2"
83
- return "gpt-5.1"
84
-
85
-
86
- def _get_latest_release_tag(client: httpx.Client) -> str:
87
- """Get latest release tag from GitHub."""
88
- try:
89
- response = client.get(GITHUB_API_RELEASES)
90
- if response.status_code == 200:
91
- data: dict[str, Any] = response.json()
92
- tag_name: Any = data.get("tag_name")
93
- if isinstance(tag_name, str):
94
- return tag_name
95
- except httpx.HTTPError:
96
- pass
97
-
98
- # Fallback: follow redirect from releases/latest
99
- response = client.get(GITHUB_HTML_RELEASES, follow_redirects=True)
100
- if response.status_code == 200:
101
- final_url = str(response.url)
102
- if "/tag/" in final_url:
103
- parts = final_url.split("/tag/")
104
- if len(parts) > 1 and "/" not in parts[-1]:
105
- return parts[-1]
106
-
107
- raise RuntimeError("Failed to determine latest release tag from GitHub")
108
-
109
-
110
- def _load_cache_metadata(meta_file: Path) -> CacheMetadata | None:
111
- if not meta_file.exists():
112
- return None
113
- try:
114
- data = json.loads(meta_file.read_text())
115
- return CacheMetadata.from_dict(data)
116
- except (json.JSONDecodeError, ValueError):
117
- return None
118
-
119
-
120
- def _save_cache_metadata(meta_file: Path, metadata: CacheMetadata) -> None:
121
- meta_file.parent.mkdir(parents=True, exist_ok=True)
122
- meta_file.write_text(json.dumps(metadata.to_dict(), indent=2))
123
-
124
-
125
- def get_codex_instructions(model: str = "gpt-5.1-codex", force_refresh: bool = False) -> str:
126
- """Get Codex instructions for the given model.
127
-
128
- Args:
129
- model: Model name to get instructions for.
130
- force_refresh: If True, bypass cache TTL and fetch fresh instructions.
131
-
132
- Returns:
133
- The Codex system prompt instructions.
134
- """
135
- model_family = get_model_family(model)
136
- prompt_file = PROMPT_FILES[model_family]
137
- cache_file = CACHE_DIR / CACHE_FILES[model_family]
138
- meta_file = CACHE_DIR / f"{CACHE_FILES[model_family].replace('.md', '-meta.json')}"
139
-
140
- # Check cache unless force refresh
141
- if not force_refresh:
142
- metadata = _load_cache_metadata(meta_file)
143
- if metadata and cache_file.exists():
144
- age = int(time.time()) - metadata.last_checked
145
- if age < CACHE_TTL_SECONDS:
146
- log_debug(f"Using cached {model_family} instructions (age: {age}s)", debug_type=DebugType.GENERAL)
147
- return cache_file.read_text()
148
-
149
- try:
150
- with httpx.Client(timeout=30.0) as client:
151
- latest_tag = _get_latest_release_tag(client)
152
- instructions_url = f"{GITHUB_RAW_BASE}/{latest_tag}/codex-rs/core/{prompt_file}"
153
-
154
- # Load existing metadata for conditional request
155
- metadata = _load_cache_metadata(meta_file)
156
- headers: dict[str, str] = {}
157
-
158
- # Only use ETag if tag matches (different release = different content)
159
- if metadata and metadata.tag == latest_tag and metadata.etag:
160
- headers["If-None-Match"] = metadata.etag
161
-
162
- response = client.get(instructions_url, headers=headers)
163
-
164
- if response.status_code == 304 and cache_file.exists():
165
- # Not modified, update last_checked and return cached
166
- if metadata:
167
- metadata.last_checked = int(time.time())
168
- _save_cache_metadata(meta_file, metadata)
169
- log_debug(f"Codex {model_family} instructions not modified", debug_type=DebugType.GENERAL)
170
- return cache_file.read_text()
171
-
172
- if response.status_code == 200:
173
- instructions = response.text
174
- new_etag = response.headers.get("etag")
175
-
176
- # Save to cache
177
- cache_file.parent.mkdir(parents=True, exist_ok=True)
178
- cache_file.write_text(instructions)
179
- _save_cache_metadata(
180
- meta_file,
181
- CacheMetadata(
182
- etag=new_etag,
183
- tag=latest_tag,
184
- last_checked=int(time.time()),
185
- url=instructions_url,
186
- ),
187
- )
188
-
189
- log_debug(f"Updated {model_family} instructions from GitHub", debug_type=DebugType.GENERAL)
190
- return instructions
191
-
192
- raise RuntimeError(f"HTTP {response.status_code}")
193
-
194
- except Exception as e:
195
- log_debug(f"Failed to fetch {model_family} instructions: {e}", debug_type=DebugType.GENERAL)
196
-
197
- # Fallback to cached version
198
- if cache_file.exists():
199
- log_debug(f"Using cached {model_family} instructions (fallback)", debug_type=DebugType.GENERAL)
200
- return cache_file.read_text()
201
-
202
- # Last resort: use bundled prompt
203
- bundled_path = _get_bundled_prompt_path(model_family)
204
- if bundled_path:
205
- log_debug(f"Using bundled {model_family} instructions (fallback)", debug_type=DebugType.GENERAL)
206
- return _load_bundled_prompt(bundled_path)
207
-
208
- raise RuntimeError(f"No Codex instructions available for {model_family}") from e
209
-
210
-
211
- def _get_bundled_prompt_path(model_family: ModelFamily) -> str | None:
212
- """Get bundled prompt path for model family."""
213
- if model_family == "gpt-5.2-codex":
214
- return "prompts/prompt-codex-gpt-5-2-codex.md"
215
- if model_family == "gpt-5.2":
216
- return "prompts/prompt-codex-gpt-5-2.md"
217
- if model_family in ("codex", "codex-max", "gpt-5.1"):
218
- return "prompts/prompt-codex.md"
219
- return None
220
-
221
-
222
- def invalidate_cache(model: str | None = None) -> None:
223
- """Invalidate cached instructions to force refresh on next access.
224
-
225
- Args:
226
- model: If provided, only invalidate cache for this model's family.
227
- If None, invalidate all cached instructions.
228
- """
229
- if model:
230
- model_family = get_model_family(model)
231
- meta_file = CACHE_DIR / f"{CACHE_FILES[model_family].replace('.md', '-meta.json')}"
232
- if meta_file.exists():
233
- meta_file.unlink()
234
- else:
235
- if CACHE_DIR.exists():
236
- for meta_file in CACHE_DIR.glob("*-meta.json"):
237
- meta_file.unlink()