klaude-code 1.2.24__py3-none-any.whl → 1.2.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -265,7 +265,23 @@ async def run_interactive(init_config: AppInitConfig, session_id: str | None = N
265
265
  )
266
266
 
267
267
  # Set up input provider for interactive mode
268
- input_provider: ui.InputProviderABC = ui.PromptToolkitInput(status_provider=_status_provider)
268
+ def _stop_rich_bottom_ui() -> None:
269
+ display = components.display
270
+ if isinstance(display, ui.REPLDisplay):
271
+ display.renderer.spinner_stop()
272
+ display.renderer.stop_bottom_live()
273
+ elif (
274
+ isinstance(display, ui.DebugEventDisplay)
275
+ and display.wrapped_display
276
+ and isinstance(display.wrapped_display, ui.REPLDisplay)
277
+ ):
278
+ display.wrapped_display.renderer.spinner_stop()
279
+ display.wrapped_display.renderer.stop_bottom_live()
280
+
281
+ input_provider: ui.InputProviderABC = ui.PromptToolkitInput(
282
+ status_provider=_status_provider,
283
+ pre_prompt=_stop_rich_bottom_ui,
284
+ )
269
285
 
270
286
  # --- Custom Ctrl+C handler: double-press within 2s to exit, single press shows toast ---
271
287
  def _show_toast_once() -> None:
@@ -56,6 +56,14 @@ def _is_gemini_flash_model(model_name: str | None) -> bool:
56
56
  return "gemini-3-flash" in model_name.lower()
57
57
 
58
58
 
59
+ def should_auto_trigger_thinking(model_name: str | None) -> bool:
60
+ """Check if model should auto-trigger thinking selection on switch."""
61
+ if not model_name:
62
+ return False
63
+ model_lower = model_name.lower()
64
+ return "gpt-5" in model_lower or "gemini-3" in model_lower or "opus" in model_lower
65
+
66
+
59
67
  def _get_levels_for_responses(model_name: str | None) -> list[str]:
60
68
  """Get thinking levels for responses protocol."""
61
69
  if _is_codex_max_model(model_name):
@@ -69,7 +77,7 @@ def _get_levels_for_responses(model_name: str | None) -> list[str]:
69
77
  return RESPONSES_LEVELS
70
78
 
71
79
 
72
- def _format_current_thinking(config: llm_param.LLMConfigParameter) -> str:
80
+ def format_current_thinking(config: llm_param.LLMConfigParameter) -> str:
73
81
  """Format the current thinking configuration for display."""
74
82
  thinking = config.thinking
75
83
  if not thinking:
@@ -164,6 +172,31 @@ def _select_anthropic_thinking_sync() -> llm_param.Thinking | None:
164
172
  return None
165
173
 
166
174
 
175
+ async def select_thinking_for_protocol(config: llm_param.LLMConfigParameter) -> llm_param.Thinking | None:
176
+ """Select thinking configuration based on the LLM protocol.
177
+
178
+ Returns the selected Thinking config, or None if user cancelled.
179
+ """
180
+ protocol = config.protocol
181
+ model_name = config.model
182
+
183
+ if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
184
+ return await asyncio.to_thread(_select_responses_thinking_sync, model_name)
185
+
186
+ if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
187
+ return await asyncio.to_thread(_select_anthropic_thinking_sync)
188
+
189
+ if protocol == llm_param.LLMClientProtocol.OPENROUTER:
190
+ if _is_openrouter_model_with_reasoning_effort(model_name):
191
+ return await asyncio.to_thread(_select_responses_thinking_sync, model_name)
192
+ return await asyncio.to_thread(_select_anthropic_thinking_sync)
193
+
194
+ if protocol == llm_param.LLMClientProtocol.OPENAI:
195
+ return await asyncio.to_thread(_select_anthropic_thinking_sync)
196
+
197
+ return None
198
+
199
+
167
200
  class ThinkingCommand(CommandABC):
168
201
  """Configure model thinking/reasoning level."""
169
202
 
@@ -185,40 +218,16 @@ class ThinkingCommand(CommandABC):
185
218
  return self._no_change_result(agent, "No profile configured")
186
219
 
187
220
  config = agent.profile.llm_client.get_llm_config()
188
- protocol = config.protocol
189
- model_name = config.model
190
-
191
- current = _format_current_thinking(config)
192
-
193
- # Select new thinking configuration based on protocol
194
- new_thinking: llm_param.Thinking | None = None
195
-
196
- if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
197
- new_thinking = await asyncio.to_thread(_select_responses_thinking_sync, model_name)
198
-
199
- elif protocol == llm_param.LLMClientProtocol.ANTHROPIC:
200
- new_thinking = await asyncio.to_thread(_select_anthropic_thinking_sync)
201
-
202
- elif protocol == llm_param.LLMClientProtocol.OPENROUTER:
203
- if _is_openrouter_model_with_reasoning_effort(model_name):
204
- new_thinking = await asyncio.to_thread(_select_responses_thinking_sync, model_name)
205
- else:
206
- new_thinking = await asyncio.to_thread(_select_anthropic_thinking_sync)
207
-
208
- elif protocol == llm_param.LLMClientProtocol.OPENAI:
209
- # openai_compatible uses anthropic style
210
- new_thinking = await asyncio.to_thread(_select_anthropic_thinking_sync)
211
-
212
- else:
213
- return self._no_change_result(agent, f"Unsupported protocol: {protocol}")
221
+ current = format_current_thinking(config)
214
222
 
223
+ new_thinking = await select_thinking_for_protocol(config)
215
224
  if new_thinking is None:
216
225
  return self._no_change_result(agent, "(no change)")
217
226
 
218
227
  # Apply the new thinking configuration
219
228
  config.thinking = new_thinking
220
229
  agent.session.model_thinking = new_thinking
221
- new_status = _format_current_thinking(config)
230
+ new_status = format_current_thinking(config)
222
231
 
223
232
  return CommandResult(
224
233
  events=[
klaude_code/const.py CHANGED
@@ -97,7 +97,11 @@ SUB_AGENT_RESULT_MAX_LINES = 50
97
97
 
98
98
 
99
99
  # UI refresh rate (frames per second) for debounced content streaming
100
- UI_REFRESH_RATE_FPS = 20
100
+ UI_REFRESH_RATE_FPS = 10
101
+
102
+ # Enable live area for streaming markdown (shows incomplete blocks being typed)
103
+ # When False, only completed markdown blocks are displayed (more stable, less flicker)
104
+ MARKDOWN_STREAM_LIVE_REPAINT_ENABLED = False
101
105
 
102
106
  # Number of lines to keep visible at bottom of markdown streaming window
103
107
  MARKDOWN_STREAM_LIVE_WINDOW = 6
@@ -117,16 +121,14 @@ STATUS_DEFAULT_TEXT = "Thinking …"
117
121
  # Status shimmer animation
118
122
  # Horizontal padding used when computing shimmer band position
119
123
  STATUS_SHIMMER_PADDING = 10
120
- # Duration in seconds for one full shimmer sweep across the text
121
- STATUS_SHIMMER_SWEEP_SECONDS = 2
122
124
  # Half-width of the shimmer band in characters
123
125
  STATUS_SHIMMER_BAND_HALF_WIDTH = 5.0
124
126
  # Scale factor applied to shimmer intensity when blending colors
125
127
  STATUS_SHIMMER_ALPHA_SCALE = 0.7
126
128
 
127
- # Spinner breathing animation
128
- # Duration in seconds for one full breathe-in + breathe-out cycle
129
- # Keep in sync with STATUS_SHIMMER_SWEEP_SECONDS for visual consistency
129
+ # Spinner breathing and shimmer animation period
130
+ # Duration in seconds for one full breathe-in + breathe-out cycle (breathing)
131
+ # and one full shimmer sweep across the text (shimmer)
130
132
  SPINNER_BREATH_PERIOD_SECONDS: float = 2.0
131
133
 
132
134
 
@@ -14,6 +14,11 @@ from dataclasses import dataclass
14
14
  from pathlib import Path
15
15
 
16
16
  from klaude_code.command import dispatch_command
17
+ from klaude_code.command.thinking_cmd import (
18
+ format_current_thinking,
19
+ select_thinking_for_protocol,
20
+ should_auto_trigger_thinking,
21
+ )
17
22
  from klaude_code.config import load_config
18
23
  from klaude_code.core.agent import Agent, DefaultModelProfileProvider, ModelProfileProvider
19
24
  from klaude_code.core.manager import LLMClients, SubAgentManager
@@ -235,17 +240,55 @@ class ExecutorContext:
235
240
  agent.session.model_thinking = llm_config.thinking
236
241
 
237
242
  developer_item = model.DeveloperMessageItem(
238
- content=f"switched to model: {operation.model_name}",
243
+ content=f"Switched to: {llm_config.model}",
239
244
  command_output=model.CommandOutput(command_name=commands.CommandName.MODEL),
240
245
  )
241
246
  agent.session.append_history([developer_item])
242
247
 
243
248
  await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
244
- await self.emit_event(events.WelcomeEvent(llm_config=llm_config, work_dir=str(agent.session.work_dir)))
245
249
 
246
250
  if self._on_model_change is not None:
247
251
  self._on_model_change(llm_client.model_name)
248
252
 
253
+ if should_auto_trigger_thinking(llm_config.model):
254
+ thinking_op = op.ChangeThinkingOperation(session_id=operation.session_id)
255
+ await thinking_op.execute(handler=self)
256
+ # WelcomeEvent is already handled by the thinking change
257
+ else:
258
+ await self.emit_event(events.WelcomeEvent(llm_config=llm_config, work_dir=str(agent.session.work_dir)))
259
+
260
+ async def handle_change_thinking(self, operation: op.ChangeThinkingOperation) -> None:
261
+ """Handle a change thinking operation by prompting user to select thinking level."""
262
+ agent = await self._ensure_agent(operation.session_id)
263
+ if not agent.profile:
264
+ return
265
+
266
+ config = agent.profile.llm_client.get_llm_config()
267
+ current = format_current_thinking(config)
268
+
269
+ new_thinking = await select_thinking_for_protocol(config)
270
+
271
+ if new_thinking is None:
272
+ developer_item = model.DeveloperMessageItem(
273
+ content="(thinking unchanged)",
274
+ command_output=model.CommandOutput(command_name=commands.CommandName.THINKING),
275
+ )
276
+ await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
277
+ return
278
+
279
+ config.thinking = new_thinking
280
+ agent.session.model_thinking = new_thinking
281
+ new_status = format_current_thinking(config)
282
+
283
+ developer_item = model.DeveloperMessageItem(
284
+ content=f"Thinking changed: {current} -> {new_status}",
285
+ command_output=model.CommandOutput(command_name=commands.CommandName.THINKING),
286
+ )
287
+ agent.session.append_history([developer_item])
288
+
289
+ await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
290
+ await self.emit_event(events.WelcomeEvent(work_dir=str(agent.session.work_dir), llm_config=config))
291
+
249
292
  async def handle_clear_session(self, operation: op.ClearSessionOperation) -> None:
250
293
  agent = await self._ensure_agent(operation.session_id)
251
294
  new_session = Session.create(work_dir=agent.session.work_dir)
@@ -20,7 +20,7 @@ class ApplyPatchHandler:
20
20
  @classmethod
21
21
  async def handle_apply_patch(cls, patch_text: str) -> model.ToolResultItem:
22
22
  try:
23
- output, diff_ui = await asyncio.to_thread(cls._apply_patch_in_thread, patch_text)
23
+ output, ui_extra = await asyncio.to_thread(cls._apply_patch_in_thread, patch_text)
24
24
  except apply_patch_module.DiffError as error:
25
25
  return model.ToolResultItem(status="error", output=str(error))
26
26
  except Exception as error: # pragma: no cover # unexpected errors bubbled to tool result
@@ -28,11 +28,11 @@ class ApplyPatchHandler:
28
28
  return model.ToolResultItem(
29
29
  status="success",
30
30
  output=output,
31
- ui_extra=diff_ui,
31
+ ui_extra=ui_extra,
32
32
  )
33
33
 
34
34
  @staticmethod
35
- def _apply_patch_in_thread(patch_text: str) -> tuple[str, model.DiffUIExtra]:
35
+ def _apply_patch_in_thread(patch_text: str) -> tuple[str, model.ToolResultUIExtra]:
36
36
  ap = apply_patch_module
37
37
  normalized_start = patch_text.lstrip()
38
38
  if not normalized_start.startswith("*** Begin Patch"):
@@ -69,6 +69,16 @@ class ApplyPatchHandler:
69
69
  commit = ap.patch_to_commit(patch, orig)
70
70
  diff_ui = ApplyPatchHandler._commit_to_structured_diff(commit)
71
71
 
72
+ md_items: list[model.MarkdownDocUIExtra] = []
73
+ for change_path, change in commit.changes.items():
74
+ if change.type == apply_patch_module.ActionType.ADD and change_path.endswith(".md"):
75
+ md_items.append(
76
+ model.MarkdownDocUIExtra(
77
+ file_path=resolve_path(change_path),
78
+ content=change.new_content or "",
79
+ )
80
+ )
81
+
72
82
  def write_fn(path: str, content: str) -> None:
73
83
  resolved = resolve_path(path)
74
84
  if os.path.isdir(resolved):
@@ -102,6 +112,16 @@ class ApplyPatchHandler:
102
112
  file_tracker.pop(resolved, None)
103
113
 
104
114
  ap.apply_commit(commit, write_fn, remove_fn)
115
+
116
+ # apply_patch can include multiple operations. If we added markdown files,
117
+ # return a MultiUIExtra so UI can render markdown previews (without showing a diff for those markdown adds).
118
+ if md_items:
119
+ items: list[model.MultiUIExtraItem] = []
120
+ items.extend(md_items)
121
+ if diff_ui.files:
122
+ items.append(diff_ui)
123
+ return "Done!", model.MultiUIExtra(items=items)
124
+
105
125
  return "Done!", diff_ui
106
126
 
107
127
  @staticmethod
@@ -110,6 +130,9 @@ class ApplyPatchHandler:
110
130
  for path in sorted(commit.changes):
111
131
  change = commit.changes[path]
112
132
  if change.type == apply_patch_module.ActionType.ADD:
133
+ # For markdown files created via Add File, we render content via MarkdownDocUIExtra instead of a diff.
134
+ if path.endswith(".md"):
135
+ continue
113
136
  files.append(build_structured_file_diff("", change.new_content or "", file_path=path))
114
137
  elif change.type == apply_patch_module.ActionType.DELETE:
115
138
  files.append(build_structured_file_diff(change.old_content or "", "", file_path=path))
@@ -151,6 +151,28 @@ class SessionStatusUIExtra(BaseModel):
151
151
  by_model: list["TaskMetadata"] = []
152
152
 
153
153
 
154
+ MultiUIExtraItem = (
155
+ DiffUIExtra
156
+ | TodoListUIExtra
157
+ | SessionIdUIExtra
158
+ | MermaidLinkUIExtra
159
+ | TruncationUIExtra
160
+ | MarkdownDocUIExtra
161
+ | SessionStatusUIExtra
162
+ )
163
+
164
+
165
+ class MultiUIExtra(BaseModel):
166
+ """A container UIExtra that can render multiple UI blocks for a single tool result.
167
+
168
+ This is primarily used by tools like apply_patch which can perform multiple
169
+ operations in one invocation.
170
+ """
171
+
172
+ type: Literal["multi"] = "multi"
173
+ items: list[MultiUIExtraItem]
174
+
175
+
154
176
  ToolResultUIExtra = Annotated[
155
177
  DiffUIExtra
156
178
  | TodoListUIExtra
@@ -158,7 +180,8 @@ ToolResultUIExtra = Annotated[
158
180
  | MermaidLinkUIExtra
159
181
  | TruncationUIExtra
160
182
  | MarkdownDocUIExtra
161
- | SessionStatusUIExtra,
183
+ | SessionStatusUIExtra
184
+ | MultiUIExtra,
162
185
  Field(discriminator="type"),
163
186
  ]
164
187
 
@@ -25,6 +25,7 @@ class OperationType(Enum):
25
25
  USER_INPUT = "user_input"
26
26
  RUN_AGENT = "run_agent"
27
27
  CHANGE_MODEL = "change_model"
28
+ CHANGE_THINKING = "change_thinking"
28
29
  CLEAR_SESSION = "clear_session"
29
30
  EXPORT_SESSION = "export_session"
30
31
  INTERRUPT = "interrupt"
@@ -77,6 +78,16 @@ class ChangeModelOperation(Operation):
77
78
  await handler.handle_change_model(self)
78
79
 
79
80
 
81
+ class ChangeThinkingOperation(Operation):
82
+ """Operation for changing the thinking/reasoning configuration."""
83
+
84
+ type: OperationType = OperationType.CHANGE_THINKING
85
+ session_id: str
86
+
87
+ async def execute(self, handler: OperationHandler) -> None:
88
+ await handler.handle_change_thinking(self)
89
+
90
+
80
91
  class ClearSessionOperation(Operation):
81
92
  """Operation for clearing the active session and starting a new one."""
82
93
 
@@ -11,6 +11,7 @@ from typing import TYPE_CHECKING, Protocol
11
11
  if TYPE_CHECKING:
12
12
  from klaude_code.protocol.op import (
13
13
  ChangeModelOperation,
14
+ ChangeThinkingOperation,
14
15
  ClearSessionOperation,
15
16
  ExportSessionOperation,
16
17
  InitAgentOperation,
@@ -35,6 +36,10 @@ class OperationHandler(Protocol):
35
36
  """Handle a change model operation."""
36
37
  ...
37
38
 
39
+ async def handle_change_thinking(self, operation: ChangeThinkingOperation) -> None:
40
+ """Handle a change thinking operation."""
41
+ ...
42
+
38
43
  async def handle_clear_session(self, operation: ClearSessionOperation) -> None:
39
44
  """Handle a clear session operation."""
40
45
  ...
@@ -57,3 +57,5 @@ class REPLDisplay(DisplayABC):
57
57
  # Spinner may already be stopped or not started; ignore.
58
58
  with contextlib.suppress(Exception):
59
59
  self.renderer.spinner_stop()
60
+ with contextlib.suppress(Exception):
61
+ self.renderer.stop_bottom_live()
@@ -122,6 +122,7 @@ class ActivityState:
122
122
 
123
123
  def __init__(self) -> None:
124
124
  self._composing: bool = False
125
+ self._buffer_length: int = 0
125
126
  self._tool_calls: dict[str, int] = {}
126
127
 
127
128
  @property
@@ -134,6 +135,11 @@ class ActivityState:
134
135
 
135
136
  def set_composing(self, composing: bool) -> None:
136
137
  self._composing = composing
138
+ if not composing:
139
+ self._buffer_length = 0
140
+
141
+ def set_buffer_length(self, length: int) -> None:
142
+ self._buffer_length = length
137
143
 
138
144
  def add_tool_call(self, tool_name: str) -> None:
139
145
  self._tool_calls[tool_name] = self._tool_calls.get(tool_name, 0) + 1
@@ -143,6 +149,7 @@ class ActivityState:
143
149
 
144
150
  def reset(self) -> None:
145
151
  self._composing = False
152
+ self._buffer_length = 0
146
153
  self._tool_calls = {}
147
154
 
148
155
  def get_activity_text(self) -> Text | None:
@@ -159,7 +166,12 @@ class ActivityState:
159
166
  first = False
160
167
  return activity_text
161
168
  if self._composing:
162
- return Text("Composing")
169
+ # Main status text with creative verb
170
+ text = Text.assemble(
171
+ ("Composing ", ThemeKey.STATUS_TEXT_BOLD),
172
+ (f"({self._buffer_length:,})", ThemeKey.STATUS_TEXT),
173
+ )
174
+ return text
163
175
  return None
164
176
 
165
177
 
@@ -206,6 +218,10 @@ class SpinnerStatusState:
206
218
  self._reasoning_status = None
207
219
  self._activity.set_composing(composing)
208
220
 
221
+ def set_buffer_length(self, length: int) -> None:
222
+ """Set buffer length for composing state display."""
223
+ self._activity.set_buffer_length(length)
224
+
209
225
  def add_tool_call(self, tool_name: str) -> None:
210
226
  """Add a tool call to the accumulator."""
211
227
  self._activity.add_tool_call(tool_name)
@@ -368,22 +384,20 @@ class DisplayEventHandler:
368
384
 
369
385
  first_delta = not self.thinking_stream.is_active
370
386
  if first_delta:
371
- self.renderer.console.push_theme(self.renderer.themes.thinking_markdown_theme)
372
387
  mdstream = MarkdownStream(
373
388
  mdargs={
374
389
  "code_theme": self.renderer.themes.code_theme,
375
- "style": self.renderer.console.get_style(ThemeKey.THINKING),
390
+ "style": ThemeKey.THINKING,
376
391
  },
377
392
  theme=self.renderer.themes.thinking_markdown_theme,
378
393
  console=self.renderer.console,
379
- spinner=self.renderer.spinner_renderable(),
394
+ live_sink=self.renderer.set_stream_renderable if const.MARKDOWN_STREAM_LIVE_REPAINT_ENABLED else None,
380
395
  mark=THINKING_MESSAGE_MARK,
381
396
  mark_style=ThemeKey.THINKING,
382
397
  left_margin=const.MARKDOWN_LEFT_MARGIN,
383
398
  markdown_class=ThinkingMarkdown,
384
399
  )
385
400
  self.thinking_stream.start(mdstream)
386
- self.renderer.spinner_stop()
387
401
 
388
402
  self.thinking_stream.append(event.content)
389
403
 
@@ -414,17 +428,16 @@ class DisplayEventHandler:
414
428
  mdargs={"code_theme": self.renderer.themes.code_theme},
415
429
  theme=self.renderer.themes.markdown_theme,
416
430
  console=self.renderer.console,
417
- spinner=self.renderer.spinner_renderable(),
431
+ live_sink=self.renderer.set_stream_renderable if const.MARKDOWN_STREAM_LIVE_REPAINT_ENABLED else None,
418
432
  mark=ASSISTANT_MESSAGE_MARK,
419
433
  left_margin=const.MARKDOWN_LEFT_MARGIN,
420
434
  )
421
435
  self.assistant_stream.start(mdstream)
422
436
  self.assistant_stream.append(event.content)
437
+ self.spinner_status.set_buffer_length(len(self.assistant_stream.buffer))
438
+ if not first_delta:
439
+ self._update_spinner()
423
440
  if first_delta and self.assistant_stream.mdstream is not None:
424
- # Stop spinner and immediately start MarkdownStream's Live
425
- # to avoid flicker. The update() call starts the Live with
426
- # the spinner embedded, providing seamless transition.
427
- self.renderer.spinner_stop()
428
441
  self.assistant_stream.mdstream.update(self.assistant_stream.buffer)
429
442
  await self.stage_manager.transition_to(Stage.ASSISTANT)
430
443
  await self._flush_assistant_buffer(self.assistant_stream)
@@ -488,7 +501,6 @@ class DisplayEventHandler:
488
501
  self.spinner_status.reset()
489
502
  self.renderer.spinner_stop()
490
503
  self.renderer.console.print(Rule(characters="-", style=ThemeKey.LINES))
491
- self.renderer.print()
492
504
  await self.stage_manager.transition_to(Stage.WAITING)
493
505
  self._maybe_notify_task_finish(event)
494
506
 
@@ -552,7 +564,6 @@ class DisplayEventHandler:
552
564
  assert mdstream is not None
553
565
  mdstream.update(normalize_thinking_content(self.thinking_stream.buffer), final=True)
554
566
  self.thinking_stream.finish()
555
- self.renderer.console.pop_theme()
556
567
  self.renderer.print()
557
568
  self.renderer.spinner_start()
558
569
 
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import contextlib
3
4
  import shutil
4
5
  from collections.abc import AsyncIterator, Callable
5
6
  from pathlib import Path
@@ -51,8 +52,12 @@ class PromptToolkitInput(InputProviderABC):
51
52
  self,
52
53
  prompt: str = USER_MESSAGE_MARK,
53
54
  status_provider: Callable[[], REPLStatusSnapshot] | None = None,
55
+ pre_prompt: Callable[[], None] | None = None,
56
+ post_prompt: Callable[[], None] | None = None,
54
57
  ): # ▌
55
58
  self._status_provider = status_provider
59
+ self._pre_prompt = pre_prompt
60
+ self._post_prompt = post_prompt
56
61
  self._is_light_terminal_background = is_light_terminal_background(timeout=0.2)
57
62
 
58
63
  project = str(Path.cwd()).strip("/").replace("/", "-")
@@ -80,7 +85,7 @@ class PromptToolkitInput(InputProviderABC):
80
85
  [(INPUT_PROMPT_STYLE, prompt)],
81
86
  history=FileHistory(str(history_path)),
82
87
  multiline=True,
83
- cursor=CursorShape.BEAM,
88
+ cursor=CursorShape.BLINKING_BEAM,
84
89
  prompt_continuation=[(INPUT_PROMPT_STYLE, " ")],
85
90
  key_bindings=kb,
86
91
  completer=ThreadedCompleter(create_repl_completer()),
@@ -202,8 +207,14 @@ class PromptToolkitInput(InputProviderABC):
202
207
  @override
203
208
  async def iter_inputs(self) -> AsyncIterator[UserInputPayload]:
204
209
  while True:
210
+ if self._pre_prompt is not None:
211
+ with contextlib.suppress(Exception):
212
+ self._pre_prompt()
205
213
  with patch_stdout():
206
214
  line: str = await self._session.prompt_async(placeholder=self._render_input_placeholder())
215
+ if self._post_prompt is not None:
216
+ with contextlib.suppress(Exception):
217
+ self._post_prompt()
207
218
 
208
219
  # Extract images referenced in the input text
209
220
  images = extract_images_from_text(line)