klaude-code 1.4.2__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/cli/main.py +75 -11
- klaude_code/cli/runtime.py +171 -34
- klaude_code/command/__init__.py +4 -0
- klaude_code/command/help_cmd.py +2 -1
- klaude_code/command/model_cmd.py +3 -5
- klaude_code/command/model_select.py +84 -0
- klaude_code/command/registry.py +23 -0
- klaude_code/command/resume_cmd.py +52 -2
- klaude_code/command/thinking_cmd.py +30 -199
- klaude_code/config/select_model.py +47 -97
- klaude_code/config/thinking.py +255 -0
- klaude_code/core/executor.py +53 -63
- klaude_code/protocol/commands.py +11 -0
- klaude_code/protocol/op.py +15 -0
- klaude_code/session/__init__.py +2 -2
- klaude_code/session/selector.py +33 -61
- klaude_code/ui/modes/repl/completers.py +48 -58
- klaude_code/ui/modes/repl/event_handler.py +2 -1
- klaude_code/ui/modes/repl/input_prompt_toolkit.py +498 -50
- klaude_code/ui/modes/repl/key_bindings.py +30 -10
- klaude_code/ui/renderers/metadata.py +3 -6
- klaude_code/ui/renderers/user_input.py +18 -1
- klaude_code/ui/rich/theme.py +2 -2
- klaude_code/ui/terminal/notifier.py +42 -0
- klaude_code/ui/terminal/selector.py +419 -136
- {klaude_code-1.4.2.dist-info → klaude_code-1.5.0.dist-info}/METADATA +1 -1
- {klaude_code-1.4.2.dist-info → klaude_code-1.5.0.dist-info}/RECORD +29 -27
- {klaude_code-1.4.2.dist-info → klaude_code-1.5.0.dist-info}/WHEEL +0 -0
- {klaude_code-1.4.2.dist-info → klaude_code-1.5.0.dist-info}/entry_points.txt +0 -0
|
@@ -1,21 +1,41 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import asyncio
|
|
3
4
|
import contextlib
|
|
4
5
|
import shutil
|
|
5
|
-
|
|
6
|
+
import time
|
|
7
|
+
from collections.abc import AsyncIterator, Awaitable, Callable
|
|
6
8
|
from pathlib import Path
|
|
7
9
|
from typing import NamedTuple, override
|
|
8
10
|
|
|
11
|
+
import prompt_toolkit.layout.menus as pt_menus
|
|
9
12
|
from prompt_toolkit import PromptSession
|
|
10
|
-
from prompt_toolkit.
|
|
13
|
+
from prompt_toolkit.application.current import get_app
|
|
14
|
+
from prompt_toolkit.buffer import Buffer
|
|
15
|
+
from prompt_toolkit.completion import Completion, ThreadedCompleter
|
|
11
16
|
from prompt_toolkit.cursor_shapes import CursorShape
|
|
12
|
-
from prompt_toolkit.
|
|
17
|
+
from prompt_toolkit.data_structures import Point
|
|
18
|
+
from prompt_toolkit.filters import Condition
|
|
19
|
+
from prompt_toolkit.formatted_text import FormattedText, StyleAndTextTuples, to_formatted_text
|
|
13
20
|
from prompt_toolkit.history import FileHistory
|
|
14
|
-
from prompt_toolkit.
|
|
21
|
+
from prompt_toolkit.key_binding import merge_key_bindings
|
|
22
|
+
from prompt_toolkit.layout import Float
|
|
23
|
+
from prompt_toolkit.layout.containers import Container, FloatContainer, Window
|
|
24
|
+
from prompt_toolkit.layout.controls import BufferControl, UIContent
|
|
15
25
|
from prompt_toolkit.layout.menus import CompletionsMenu, MultiColumnCompletionsMenu
|
|
16
26
|
from prompt_toolkit.patch_stdout import patch_stdout
|
|
17
27
|
from prompt_toolkit.styles import Style
|
|
18
|
-
|
|
28
|
+
from prompt_toolkit.utils import get_cwidth
|
|
29
|
+
|
|
30
|
+
from klaude_code.config import load_config
|
|
31
|
+
from klaude_code.config.config import ModelEntry
|
|
32
|
+
from klaude_code.config.thinking import (
|
|
33
|
+
format_current_thinking,
|
|
34
|
+
get_thinking_picker_data,
|
|
35
|
+
parse_thinking_value,
|
|
36
|
+
)
|
|
37
|
+
from klaude_code.protocol import llm_param
|
|
38
|
+
from klaude_code.protocol.commands import CommandInfo
|
|
19
39
|
from klaude_code.protocol.model import UserInputPayload
|
|
20
40
|
from klaude_code.ui.core.input import InputProviderABC
|
|
21
41
|
from klaude_code.ui.modes.repl.clipboard import capture_clipboard_tag, copy_to_clipboard, extract_images_from_text
|
|
@@ -23,6 +43,7 @@ from klaude_code.ui.modes.repl.completers import AT_TOKEN_PATTERN, create_repl_c
|
|
|
23
43
|
from klaude_code.ui.modes.repl.key_bindings import create_key_bindings
|
|
24
44
|
from klaude_code.ui.renderers.user_input import USER_MESSAGE_MARK
|
|
25
45
|
from klaude_code.ui.terminal.color import is_light_terminal_background
|
|
46
|
+
from klaude_code.ui.terminal.selector import SelectItem, SelectOverlay, build_model_select_items
|
|
26
47
|
|
|
27
48
|
|
|
28
49
|
class REPLStatusSnapshot(NamedTuple):
|
|
@@ -31,9 +52,9 @@ class REPLStatusSnapshot(NamedTuple):
|
|
|
31
52
|
update_message: str | None = None
|
|
32
53
|
|
|
33
54
|
|
|
34
|
-
COMPLETION_SELECTED_DARK_BG = "
|
|
35
|
-
COMPLETION_SELECTED_LIGHT_BG = "
|
|
36
|
-
COMPLETION_SELECTED_UNKNOWN_BG = "
|
|
55
|
+
COMPLETION_SELECTED_DARK_BG = "ansigreen"
|
|
56
|
+
COMPLETION_SELECTED_LIGHT_BG = "ansigreen"
|
|
57
|
+
COMPLETION_SELECTED_UNKNOWN_BG = "ansigreen"
|
|
37
58
|
COMPLETION_MENU = "ansibrightblack"
|
|
38
59
|
INPUT_PROMPT_STYLE = "ansimagenta bold"
|
|
39
60
|
PLACEHOLDER_TEXT_STYLE_DARK_BG = "fg:#5a5a5a italic"
|
|
@@ -44,6 +65,11 @@ PLACEHOLDER_SYMBOL_STYLE_LIGHT_BG = "bg:#e6e6e6 fg:#7a7a7a"
|
|
|
44
65
|
PLACEHOLDER_SYMBOL_STYLE_UNKNOWN_BG = "bg:#2a2a2a fg:#8a8a8a"
|
|
45
66
|
|
|
46
67
|
|
|
68
|
+
# ---------------------------------------------------------------------------
|
|
69
|
+
# Layout helpers
|
|
70
|
+
# ---------------------------------------------------------------------------
|
|
71
|
+
|
|
72
|
+
|
|
47
73
|
def _left_align_completion_menus(container: Container) -> None:
|
|
48
74
|
"""Force completion menus to render at column 0.
|
|
49
75
|
|
|
@@ -52,7 +78,6 @@ def _left_align_completion_menus(container: Container) -> None:
|
|
|
52
78
|
We walk the layout tree and rewrite the Float positioning for completion menus
|
|
53
79
|
to keep them fixed at the left edge.
|
|
54
80
|
"""
|
|
55
|
-
|
|
56
81
|
if isinstance(container, FloatContainer):
|
|
57
82
|
for flt in container.floats:
|
|
58
83
|
if isinstance(flt.content, (CompletionsMenu, MultiColumnCompletionsMenu)):
|
|
@@ -63,6 +88,140 @@ def _left_align_completion_menus(container: Container) -> None:
|
|
|
63
88
|
_left_align_completion_menus(child)
|
|
64
89
|
|
|
65
90
|
|
|
91
|
+
def _find_first_float_container(container: Container) -> FloatContainer | None:
|
|
92
|
+
if isinstance(container, FloatContainer):
|
|
93
|
+
return container
|
|
94
|
+
for child in container.get_children():
|
|
95
|
+
found = _find_first_float_container(child)
|
|
96
|
+
if found is not None:
|
|
97
|
+
return found
|
|
98
|
+
return None
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _find_window_for_buffer(container: Container, target_buffer: Buffer) -> Window | None:
|
|
102
|
+
if isinstance(container, Window):
|
|
103
|
+
content = container.content
|
|
104
|
+
if isinstance(content, BufferControl) and content.buffer is target_buffer:
|
|
105
|
+
return container
|
|
106
|
+
|
|
107
|
+
for child in container.get_children():
|
|
108
|
+
found = _find_window_for_buffer(child, target_buffer)
|
|
109
|
+
if found is not None:
|
|
110
|
+
return found
|
|
111
|
+
return None
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _patch_completion_menu_controls(container: Container) -> None:
|
|
115
|
+
"""Replace prompt_toolkit completion menu controls with customized versions."""
|
|
116
|
+
if isinstance(container, Window):
|
|
117
|
+
content = container.content
|
|
118
|
+
if isinstance(content, pt_menus.CompletionsMenuControl) and not isinstance(
|
|
119
|
+
content, _KlaudeCompletionsMenuControl
|
|
120
|
+
):
|
|
121
|
+
container.content = _KlaudeCompletionsMenuControl()
|
|
122
|
+
|
|
123
|
+
for child in container.get_children():
|
|
124
|
+
_patch_completion_menu_controls(child)
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
# ---------------------------------------------------------------------------
|
|
128
|
+
# Custom completion menu control
|
|
129
|
+
# ---------------------------------------------------------------------------
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
class _KlaudeCompletionsMenuControl(pt_menus.CompletionsMenuControl):
|
|
133
|
+
"""CompletionsMenuControl with stable 2-char left prefix.
|
|
134
|
+
|
|
135
|
+
Requirements:
|
|
136
|
+
- Add a 2-character prefix for every row.
|
|
137
|
+
- Render "-> " for the selected row, and " " for non-selected rows.
|
|
138
|
+
|
|
139
|
+
Keep completion text unstyled so that the menu's current-row style can
|
|
140
|
+
override it entirely.
|
|
141
|
+
"""
|
|
142
|
+
|
|
143
|
+
_PREFIX_WIDTH = 2
|
|
144
|
+
|
|
145
|
+
def _get_menu_width(self, max_width: int, complete_state: pt_menus.CompletionState) -> int: # pyright: ignore[reportPrivateImportUsage]
|
|
146
|
+
"""Return the width of the main column.
|
|
147
|
+
|
|
148
|
+
This is prompt_toolkit's default implementation, except we reserve one
|
|
149
|
+
extra character for the 2-char prefix ("-> "/" ").
|
|
150
|
+
"""
|
|
151
|
+
return min(
|
|
152
|
+
max_width,
|
|
153
|
+
max(
|
|
154
|
+
self.MIN_WIDTH,
|
|
155
|
+
max(get_cwidth(c.display_text) for c in complete_state.completions) + 3,
|
|
156
|
+
),
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
def create_content(self, width: int, height: int) -> UIContent:
|
|
160
|
+
complete_state = get_app().current_buffer.complete_state
|
|
161
|
+
if complete_state:
|
|
162
|
+
completions = complete_state.completions
|
|
163
|
+
index = complete_state.complete_index
|
|
164
|
+
|
|
165
|
+
menu_width = self._get_menu_width(width, complete_state)
|
|
166
|
+
menu_meta_width = self._get_menu_meta_width(width - menu_width, complete_state)
|
|
167
|
+
show_meta = self._show_meta(complete_state)
|
|
168
|
+
|
|
169
|
+
def get_line(i: int) -> StyleAndTextTuples:
|
|
170
|
+
completion = completions[i]
|
|
171
|
+
is_current_completion = i == index
|
|
172
|
+
|
|
173
|
+
result = self._get_menu_item_fragments_with_cursor(
|
|
174
|
+
completion,
|
|
175
|
+
is_current_completion,
|
|
176
|
+
menu_width,
|
|
177
|
+
space_after=True,
|
|
178
|
+
)
|
|
179
|
+
if show_meta:
|
|
180
|
+
result += self._get_menu_item_meta_fragments(
|
|
181
|
+
completion,
|
|
182
|
+
is_current_completion,
|
|
183
|
+
menu_meta_width,
|
|
184
|
+
)
|
|
185
|
+
return result
|
|
186
|
+
|
|
187
|
+
return UIContent(
|
|
188
|
+
get_line=get_line,
|
|
189
|
+
cursor_position=Point(x=0, y=index or 0),
|
|
190
|
+
line_count=len(completions),
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
return UIContent()
|
|
194
|
+
|
|
195
|
+
def _get_menu_item_fragments_with_cursor(
|
|
196
|
+
self,
|
|
197
|
+
completion: Completion,
|
|
198
|
+
is_current_completion: bool,
|
|
199
|
+
width: int,
|
|
200
|
+
*,
|
|
201
|
+
space_after: bool = False,
|
|
202
|
+
) -> StyleAndTextTuples:
|
|
203
|
+
if is_current_completion:
|
|
204
|
+
style_str = f"class:completion-menu.completion.current {completion.style} {completion.selected_style}"
|
|
205
|
+
prefix = "→ "
|
|
206
|
+
else:
|
|
207
|
+
style_str = "class:completion-menu.completion " + completion.style
|
|
208
|
+
prefix = " "
|
|
209
|
+
|
|
210
|
+
max_text_width = width - self._PREFIX_WIDTH - (1 if space_after else 0)
|
|
211
|
+
text, text_width = pt_menus._trim_formatted_text(completion.display, max_text_width) # pyright: ignore[reportPrivateUsage]
|
|
212
|
+
padding = " " * (width - self._PREFIX_WIDTH - text_width)
|
|
213
|
+
|
|
214
|
+
return to_formatted_text(
|
|
215
|
+
[("", prefix), *text, ("", padding)],
|
|
216
|
+
style=style_str,
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
# ---------------------------------------------------------------------------
|
|
221
|
+
# PromptToolkitInput
|
|
222
|
+
# ---------------------------------------------------------------------------
|
|
223
|
+
|
|
224
|
+
|
|
66
225
|
class PromptToolkitInput(InputProviderABC):
|
|
67
226
|
def __init__(
|
|
68
227
|
self,
|
|
@@ -71,27 +230,57 @@ class PromptToolkitInput(InputProviderABC):
|
|
|
71
230
|
pre_prompt: Callable[[], None] | None = None,
|
|
72
231
|
post_prompt: Callable[[], None] | None = None,
|
|
73
232
|
is_light_background: bool | None = None,
|
|
74
|
-
|
|
233
|
+
on_change_model: Callable[[str], Awaitable[None]] | None = None,
|
|
234
|
+
get_current_model_config_name: Callable[[], str | None] | None = None,
|
|
235
|
+
on_change_thinking: Callable[[llm_param.Thinking], Awaitable[None]] | None = None,
|
|
236
|
+
get_current_llm_config: Callable[[], llm_param.LLMConfigParameter | None] | None = None,
|
|
237
|
+
command_info_provider: Callable[[], list[CommandInfo]] | None = None,
|
|
238
|
+
):
|
|
75
239
|
self._status_provider = status_provider
|
|
76
240
|
self._pre_prompt = pre_prompt
|
|
77
241
|
self._post_prompt = post_prompt
|
|
242
|
+
self._on_change_model = on_change_model
|
|
243
|
+
self._get_current_model_config_name = get_current_model_config_name
|
|
244
|
+
self._on_change_thinking = on_change_thinking
|
|
245
|
+
self._get_current_llm_config = get_current_llm_config
|
|
246
|
+
self._command_info_provider = command_info_provider
|
|
247
|
+
|
|
248
|
+
self._toast_message: str | None = None
|
|
249
|
+
self._toast_until: float = 0.0
|
|
250
|
+
|
|
78
251
|
# Use provided value if available to avoid redundant TTY queries that may interfere
|
|
79
252
|
# with prompt_toolkit's terminal state after interactive UIs have been used.
|
|
80
253
|
self._is_light_terminal_background = (
|
|
81
254
|
is_light_background if is_light_background is not None else is_light_terminal_background(timeout=0.2)
|
|
82
255
|
)
|
|
83
256
|
|
|
257
|
+
self._session = self._build_prompt_session(prompt)
|
|
258
|
+
self._setup_model_picker()
|
|
259
|
+
self._setup_thinking_picker()
|
|
260
|
+
self._apply_layout_customizations()
|
|
261
|
+
|
|
262
|
+
def _build_prompt_session(self, prompt: str) -> PromptSession[str]:
|
|
263
|
+
"""Build the prompt_toolkit PromptSession with key bindings and styles."""
|
|
84
264
|
project = str(Path.cwd()).strip("/").replace("/", "-")
|
|
85
265
|
history_path = Path.home() / ".klaude" / "projects" / project / "input" / "input_history.txt"
|
|
86
|
-
|
|
87
266
|
history_path.parent.mkdir(parents=True, exist_ok=True)
|
|
88
267
|
history_path.touch(exist_ok=True)
|
|
89
268
|
|
|
90
|
-
#
|
|
269
|
+
# Model and thinking pickers will be set up later; create placeholder condition
|
|
270
|
+
self._model_picker: SelectOverlay[str] | None = None
|
|
271
|
+
self._thinking_picker: SelectOverlay[str] | None = None
|
|
272
|
+
input_enabled = Condition(
|
|
273
|
+
lambda: (self._model_picker is None or not self._model_picker.is_open)
|
|
274
|
+
and (self._thinking_picker is None or not self._thinking_picker.is_open)
|
|
275
|
+
)
|
|
276
|
+
|
|
91
277
|
kb = create_key_bindings(
|
|
92
278
|
capture_clipboard_tag=capture_clipboard_tag,
|
|
93
279
|
copy_to_clipboard=copy_to_clipboard,
|
|
94
280
|
at_token_pattern=AT_TOKEN_PATTERN,
|
|
281
|
+
input_enabled=input_enabled,
|
|
282
|
+
open_model_picker=self._open_model_picker,
|
|
283
|
+
open_thinking_picker=self._open_thinking_picker,
|
|
95
284
|
)
|
|
96
285
|
|
|
97
286
|
# Select completion selected color based on terminal background
|
|
@@ -102,14 +291,14 @@ class PromptToolkitInput(InputProviderABC):
|
|
|
102
291
|
else:
|
|
103
292
|
completion_selected = COMPLETION_SELECTED_UNKNOWN_BG
|
|
104
293
|
|
|
105
|
-
|
|
294
|
+
return PromptSession(
|
|
106
295
|
[(INPUT_PROMPT_STYLE, prompt)],
|
|
107
296
|
history=FileHistory(str(history_path)),
|
|
108
297
|
multiline=True,
|
|
109
298
|
cursor=CursorShape.BLINKING_BEAM,
|
|
110
299
|
prompt_continuation=[(INPUT_PROMPT_STYLE, " ")],
|
|
111
300
|
key_bindings=kb,
|
|
112
|
-
completer=ThreadedCompleter(create_repl_completer()),
|
|
301
|
+
completer=ThreadedCompleter(create_repl_completer(command_info_provider=self._command_info_provider)),
|
|
113
302
|
complete_while_typing=True,
|
|
114
303
|
erase_when_done=True,
|
|
115
304
|
mouse_support=False,
|
|
@@ -119,53 +308,303 @@ class PromptToolkitInput(InputProviderABC):
|
|
|
119
308
|
"completion-menu.border": "bg:default",
|
|
120
309
|
"scrollbar.background": "bg:default",
|
|
121
310
|
"scrollbar.button": "bg:default",
|
|
122
|
-
"completion-menu.completion":
|
|
311
|
+
"completion-menu.completion": "bg:default fg:default",
|
|
123
312
|
"completion-menu.meta.completion": f"bg:default fg:{COMPLETION_MENU}",
|
|
124
|
-
"completion-menu.completion.current": f"noreverse bg:default fg:{completion_selected}
|
|
125
|
-
"completion-menu.meta.completion.current": f"bg:default fg:{completion_selected}
|
|
313
|
+
"completion-menu.completion.current": f"noreverse bg:default fg:{completion_selected}",
|
|
314
|
+
"completion-menu.meta.completion.current": f"bg:default fg:{completion_selected}",
|
|
315
|
+
# Embedded selector overlay styles
|
|
316
|
+
"pointer": "ansigreen",
|
|
317
|
+
"highlighted": "ansigreen",
|
|
318
|
+
"text": "ansibrightblack",
|
|
319
|
+
"question": "bold",
|
|
320
|
+
"msg": "",
|
|
321
|
+
"meta": "fg:ansibrightblack",
|
|
322
|
+
"frame.border": "fg:ansibrightblack",
|
|
323
|
+
"search_prefix": "fg:ansibrightblack",
|
|
324
|
+
"search_placeholder": "fg:ansibrightblack italic",
|
|
325
|
+
"search_input": "",
|
|
326
|
+
# Empty bottom-toolbar style
|
|
327
|
+
"bottom-toolbar": "bg:default fg:default noreverse",
|
|
328
|
+
"bottom-toolbar.text": "bg:default fg:default noreverse",
|
|
126
329
|
}
|
|
127
330
|
),
|
|
128
331
|
)
|
|
129
332
|
|
|
130
|
-
|
|
333
|
+
def _setup_model_picker(self) -> None:
|
|
334
|
+
"""Initialize the model picker overlay and attach it to the layout."""
|
|
335
|
+
model_picker = SelectOverlay[str](
|
|
336
|
+
pointer="→",
|
|
337
|
+
use_search_filter=True,
|
|
338
|
+
search_placeholder="type to search",
|
|
339
|
+
list_height=10,
|
|
340
|
+
on_select=self._handle_model_selected,
|
|
341
|
+
)
|
|
342
|
+
self._model_picker = model_picker
|
|
343
|
+
|
|
344
|
+
# Merge overlay key bindings with existing session key bindings
|
|
345
|
+
existing_kb = self._session.key_bindings
|
|
346
|
+
if existing_kb is not None:
|
|
347
|
+
merged_kb = merge_key_bindings([existing_kb, model_picker.key_bindings])
|
|
348
|
+
self._session.key_bindings = merged_kb
|
|
349
|
+
|
|
350
|
+
# Attach overlay as a float above the prompt
|
|
351
|
+
with contextlib.suppress(Exception):
|
|
352
|
+
root = self._session.app.layout.container
|
|
353
|
+
overlay_float = Float(content=model_picker.container, bottom=1, left=0)
|
|
354
|
+
|
|
355
|
+
# Always attach this overlay at the top level so it is not clipped by
|
|
356
|
+
# small nested FloatContainers (e.g. the completion-menu container).
|
|
357
|
+
if isinstance(root, FloatContainer):
|
|
358
|
+
root.floats.append(overlay_float)
|
|
359
|
+
else:
|
|
360
|
+
self._session.app.layout.container = FloatContainer(content=root, floats=[overlay_float])
|
|
361
|
+
|
|
362
|
+
def _setup_thinking_picker(self) -> None:
|
|
363
|
+
"""Initialize the thinking picker overlay and attach it to the layout."""
|
|
364
|
+
thinking_picker = SelectOverlay[str](
|
|
365
|
+
pointer="→",
|
|
366
|
+
use_search_filter=False,
|
|
367
|
+
list_height=6,
|
|
368
|
+
on_select=self._handle_thinking_selected,
|
|
369
|
+
)
|
|
370
|
+
self._thinking_picker = thinking_picker
|
|
371
|
+
|
|
372
|
+
# Merge overlay key bindings with existing session key bindings
|
|
373
|
+
existing_kb = self._session.key_bindings
|
|
374
|
+
if existing_kb is not None:
|
|
375
|
+
merged_kb = merge_key_bindings([existing_kb, thinking_picker.key_bindings])
|
|
376
|
+
self._session.key_bindings = merged_kb
|
|
377
|
+
|
|
378
|
+
# Attach overlay as a float above the prompt
|
|
379
|
+
with contextlib.suppress(Exception):
|
|
380
|
+
root = self._session.app.layout.container
|
|
381
|
+
overlay_float = Float(content=thinking_picker.container, bottom=1, left=0)
|
|
382
|
+
|
|
383
|
+
if isinstance(root, FloatContainer):
|
|
384
|
+
root.floats.append(overlay_float)
|
|
385
|
+
else:
|
|
386
|
+
self._session.app.layout.container = FloatContainer(content=root, floats=[overlay_float])
|
|
387
|
+
|
|
388
|
+
def _apply_layout_customizations(self) -> None:
|
|
389
|
+
"""Apply layout customizations after session is created."""
|
|
390
|
+
# Make the Escape key feel responsive
|
|
391
|
+
with contextlib.suppress(Exception):
|
|
392
|
+
self._session.app.ttimeoutlen = 0.05
|
|
393
|
+
|
|
394
|
+
# Keep completion popups left-aligned
|
|
131
395
|
with contextlib.suppress(Exception):
|
|
132
396
|
_left_align_completion_menus(self._session.app.layout.container)
|
|
133
397
|
|
|
134
|
-
|
|
135
|
-
|
|
398
|
+
# Customize completion rendering
|
|
399
|
+
with contextlib.suppress(Exception):
|
|
400
|
+
_patch_completion_menu_controls(self._session.app.layout.container)
|
|
136
401
|
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
return
|
|
141
|
-
if not state.completions: # type: ignore[reportUnknownMemberType]
|
|
142
|
-
return
|
|
143
|
-
if state.complete_index is None: # type: ignore[reportUnknownMemberType]
|
|
144
|
-
state.complete_index = 0 # type: ignore[reportUnknownMemberType]
|
|
145
|
-
with contextlib.suppress(Exception):
|
|
146
|
-
self._session.app.invalidate()
|
|
147
|
-
except Exception:
|
|
148
|
-
return
|
|
402
|
+
# Reserve more vertical space while the model picker overlay is open.
|
|
403
|
+
# prompt_toolkit's default multiline prompt caps out at ~9 lines.
|
|
404
|
+
self._patch_prompt_height_for_model_picker()
|
|
149
405
|
|
|
150
|
-
# Ensure
|
|
151
|
-
|
|
152
|
-
self._session.default_buffer.on_completions_changed += _select_first_completion_on_open
|
|
406
|
+
# Ensure completion menu has default selection
|
|
407
|
+
self._session.default_buffer.on_completions_changed += self._select_first_completion_on_open # pyright: ignore[reportUnknownMemberType]
|
|
153
408
|
|
|
154
|
-
def
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
return None
|
|
409
|
+
def _patch_prompt_height_for_model_picker(self) -> None:
|
|
410
|
+
if self._model_picker is None and self._thinking_picker is None:
|
|
411
|
+
return
|
|
158
412
|
|
|
413
|
+
with contextlib.suppress(Exception):
|
|
414
|
+
root = self._session.app.layout.container
|
|
415
|
+
input_window = _find_window_for_buffer(root, self._session.default_buffer)
|
|
416
|
+
if input_window is None:
|
|
417
|
+
return
|
|
418
|
+
|
|
419
|
+
original_height = input_window.height
|
|
420
|
+
|
|
421
|
+
def _height(): # type: ignore[no-untyped-def]
|
|
422
|
+
picker_open = (self._model_picker is not None and self._model_picker.is_open) or (
|
|
423
|
+
self._thinking_picker is not None and self._thinking_picker.is_open
|
|
424
|
+
)
|
|
425
|
+
if picker_open:
|
|
426
|
+
# Target 20 rows, but cap to the current terminal size.
|
|
427
|
+
# Leave a small buffer to avoid triggering "Window too small".
|
|
428
|
+
try:
|
|
429
|
+
rows = get_app().output.get_size().rows
|
|
430
|
+
except Exception:
|
|
431
|
+
rows = 0
|
|
432
|
+
return max(3, min(20, rows - 2))
|
|
433
|
+
|
|
434
|
+
if callable(original_height):
|
|
435
|
+
return original_height()
|
|
436
|
+
return original_height
|
|
437
|
+
|
|
438
|
+
input_window.height = _height
|
|
439
|
+
|
|
440
|
+
def _select_first_completion_on_open(self, buf) -> None: # type: ignore[no-untyped-def]
|
|
441
|
+
"""Default to selecting the first completion without inserting it."""
|
|
159
442
|
try:
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
443
|
+
state = buf.complete_state # type: ignore[reportUnknownMemberType]
|
|
444
|
+
if state is None:
|
|
445
|
+
return
|
|
446
|
+
if not state.completions: # type: ignore[reportUnknownMemberType]
|
|
447
|
+
return
|
|
448
|
+
if state.complete_index is None: # type: ignore[reportUnknownMemberType]
|
|
449
|
+
state.complete_index = 0 # type: ignore[reportUnknownMemberType]
|
|
450
|
+
with contextlib.suppress(Exception):
|
|
451
|
+
self._session.app.invalidate()
|
|
452
|
+
except Exception:
|
|
453
|
+
return
|
|
454
|
+
|
|
455
|
+
# -------------------------------------------------------------------------
|
|
456
|
+
# Model picker
|
|
457
|
+
# -------------------------------------------------------------------------
|
|
458
|
+
|
|
459
|
+
def _build_model_picker_items(self) -> tuple[list[SelectItem[str]], str | None]:
|
|
460
|
+
config = load_config()
|
|
461
|
+
models: list[ModelEntry] = sorted(
|
|
462
|
+
config.iter_model_entries(only_available=True),
|
|
463
|
+
key=lambda m: m.model_name.lower(),
|
|
464
|
+
)
|
|
465
|
+
if not models:
|
|
466
|
+
return [], None
|
|
467
|
+
|
|
468
|
+
items = build_model_select_items(models)
|
|
469
|
+
|
|
470
|
+
initial = None
|
|
471
|
+
if self._get_current_model_config_name is not None:
|
|
472
|
+
with contextlib.suppress(Exception):
|
|
473
|
+
initial = self._get_current_model_config_name()
|
|
474
|
+
if initial is None:
|
|
475
|
+
initial = config.main_model
|
|
476
|
+
return items, initial
|
|
477
|
+
|
|
478
|
+
def _open_model_picker(self) -> None:
|
|
479
|
+
if self._model_picker is None:
|
|
480
|
+
return
|
|
481
|
+
items, initial = self._build_model_picker_items()
|
|
482
|
+
if not items:
|
|
483
|
+
return
|
|
484
|
+
self._model_picker.set_content(message="Select a model:", items=items, initial_value=initial)
|
|
485
|
+
self._model_picker.open()
|
|
486
|
+
|
|
487
|
+
async def _handle_model_selected(self, model_name: str) -> None:
|
|
488
|
+
current = None
|
|
489
|
+
if self._get_current_model_config_name is not None:
|
|
490
|
+
with contextlib.suppress(Exception):
|
|
491
|
+
current = self._get_current_model_config_name()
|
|
492
|
+
if current is not None and model_name == current:
|
|
493
|
+
return
|
|
494
|
+
if self._on_change_model is None:
|
|
495
|
+
return
|
|
496
|
+
await self._on_change_model(model_name)
|
|
497
|
+
self._set_toast(f"model: {model_name}")
|
|
498
|
+
|
|
499
|
+
# -------------------------------------------------------------------------
|
|
500
|
+
# Thinking picker
|
|
501
|
+
# -------------------------------------------------------------------------
|
|
502
|
+
|
|
503
|
+
def _build_thinking_picker_items(
|
|
504
|
+
self, config: llm_param.LLMConfigParameter
|
|
505
|
+
) -> tuple[list[SelectItem[str]], str | None]:
|
|
506
|
+
data = get_thinking_picker_data(config)
|
|
507
|
+
if data is None:
|
|
508
|
+
return [], None
|
|
509
|
+
|
|
510
|
+
items: list[SelectItem[str]] = [
|
|
511
|
+
SelectItem(title=[("class:text", opt.label + "\n")], value=opt.value, search_text=opt.label)
|
|
512
|
+
for opt in data.options
|
|
513
|
+
]
|
|
514
|
+
return items, data.current_value
|
|
515
|
+
|
|
516
|
+
def _open_thinking_picker(self) -> None:
|
|
517
|
+
if self._thinking_picker is None:
|
|
518
|
+
return
|
|
519
|
+
if self._get_current_llm_config is None:
|
|
520
|
+
return
|
|
521
|
+
config = self._get_current_llm_config()
|
|
522
|
+
if config is None:
|
|
523
|
+
return
|
|
524
|
+
items, initial = self._build_thinking_picker_items(config)
|
|
525
|
+
if not items:
|
|
526
|
+
return
|
|
527
|
+
current = format_current_thinking(config)
|
|
528
|
+
self._thinking_picker.set_content(
|
|
529
|
+
message=f"Select thinking level (current: {current}):", items=items, initial_value=initial
|
|
530
|
+
)
|
|
531
|
+
self._thinking_picker.open()
|
|
532
|
+
|
|
533
|
+
async def _handle_thinking_selected(self, value: str) -> None:
|
|
534
|
+
if self._on_change_thinking is None:
|
|
535
|
+
return
|
|
536
|
+
|
|
537
|
+
new_thinking = parse_thinking_value(value)
|
|
538
|
+
if new_thinking is None:
|
|
539
|
+
return
|
|
540
|
+
|
|
541
|
+
# Build toast label
|
|
542
|
+
if value.startswith("effort:"):
|
|
543
|
+
toast_label = value[7:]
|
|
544
|
+
elif value.startswith("budget:"):
|
|
545
|
+
budget = int(value[7:])
|
|
546
|
+
toast_label = "off" if budget == 0 else f"{budget} tokens"
|
|
547
|
+
else:
|
|
548
|
+
toast_label = "updated"
|
|
549
|
+
|
|
550
|
+
await self._on_change_thinking(new_thinking)
|
|
551
|
+
self._set_toast(f"thinking: {toast_label}")
|
|
552
|
+
|
|
553
|
+
# -------------------------------------------------------------------------
|
|
554
|
+
# Toast notifications
|
|
555
|
+
# -------------------------------------------------------------------------
|
|
556
|
+
|
|
557
|
+
def _set_toast(self, message: str, *, duration_sec: float = 2.0) -> None:
|
|
558
|
+
self._toast_message = message
|
|
559
|
+
self._toast_until = time.monotonic() + duration_sec
|
|
560
|
+
with contextlib.suppress(Exception):
|
|
561
|
+
self._session.app.invalidate()
|
|
562
|
+
|
|
563
|
+
async def _clear_later() -> None:
|
|
564
|
+
await asyncio.sleep(duration_sec)
|
|
565
|
+
self._toast_message = None
|
|
566
|
+
self._toast_until = 0.0
|
|
567
|
+
with contextlib.suppress(Exception):
|
|
568
|
+
self._session.app.invalidate()
|
|
569
|
+
|
|
570
|
+
with contextlib.suppress(Exception):
|
|
571
|
+
self._session.app.create_background_task(_clear_later())
|
|
164
572
|
|
|
165
|
-
|
|
166
|
-
|
|
573
|
+
# -------------------------------------------------------------------------
|
|
574
|
+
# Bottom toolbar
|
|
575
|
+
# -------------------------------------------------------------------------
|
|
167
576
|
|
|
168
|
-
|
|
577
|
+
def _get_bottom_toolbar(self) -> FormattedText | None:
|
|
578
|
+
"""Return bottom toolbar content.
|
|
579
|
+
|
|
580
|
+
This is used inside the prompt_toolkit Application, so avoid printing or
|
|
581
|
+
doing any blocking IO here.
|
|
582
|
+
"""
|
|
583
|
+
update_message: str | None = None
|
|
584
|
+
if self._status_provider is not None:
|
|
585
|
+
try:
|
|
586
|
+
status = self._status_provider()
|
|
587
|
+
update_message = status.update_message
|
|
588
|
+
except (AttributeError, RuntimeError):
|
|
589
|
+
update_message = None
|
|
590
|
+
|
|
591
|
+
toast: str | None = None
|
|
592
|
+
now = time.monotonic()
|
|
593
|
+
if self._toast_message is not None and now < self._toast_until:
|
|
594
|
+
toast = self._toast_message
|
|
595
|
+
|
|
596
|
+
# If nothing to show, return a blank line to actively clear any previously
|
|
597
|
+
# rendered content. (When `bottom_toolbar` is a callable, prompt_toolkit
|
|
598
|
+
# will still reserve the toolbar line.)
|
|
599
|
+
if not toast and not update_message:
|
|
600
|
+
try:
|
|
601
|
+
terminal_width = shutil.get_terminal_size().columns
|
|
602
|
+
except (OSError, ValueError):
|
|
603
|
+
terminal_width = 0
|
|
604
|
+
return FormattedText([("", " " * max(0, terminal_width))])
|
|
605
|
+
|
|
606
|
+
parts = [p for p in [toast, update_message] if p]
|
|
607
|
+
left_text = " " + " · ".join(parts)
|
|
169
608
|
try:
|
|
170
609
|
terminal_width = shutil.get_terminal_size().columns
|
|
171
610
|
padding = " " * max(0, terminal_width - len(left_text))
|
|
@@ -175,6 +614,10 @@ class PromptToolkitInput(InputProviderABC):
|
|
|
175
614
|
toolbar_text = left_text + padding
|
|
176
615
|
return FormattedText([("#ansiyellow", toolbar_text)])
|
|
177
616
|
|
|
617
|
+
# -------------------------------------------------------------------------
|
|
618
|
+
# Placeholder
|
|
619
|
+
# -------------------------------------------------------------------------
|
|
620
|
+
|
|
178
621
|
def _render_input_placeholder(self) -> FormattedText:
|
|
179
622
|
if self._is_light_terminal_background is True:
|
|
180
623
|
text_style = PLACEHOLDER_TEXT_STYLE_LIGHT_BG
|
|
@@ -200,9 +643,17 @@ class PromptToolkitInput(InputProviderABC):
|
|
|
200
643
|
(symbol_style, " / "),
|
|
201
644
|
(text_style, " "),
|
|
202
645
|
(text_style, "commands"),
|
|
646
|
+
(text_style, " "),
|
|
647
|
+
(symbol_style, " ctrl-l "),
|
|
648
|
+
(text_style, " "),
|
|
649
|
+
(text_style, "models"),
|
|
203
650
|
]
|
|
204
651
|
)
|
|
205
652
|
|
|
653
|
+
# -------------------------------------------------------------------------
|
|
654
|
+
# InputProviderABC implementation
|
|
655
|
+
# -------------------------------------------------------------------------
|
|
656
|
+
|
|
206
657
|
async def start(self) -> None:
|
|
207
658
|
pass
|
|
208
659
|
|
|
@@ -216,13 +667,10 @@ class PromptToolkitInput(InputProviderABC):
|
|
|
216
667
|
with contextlib.suppress(Exception):
|
|
217
668
|
self._pre_prompt()
|
|
218
669
|
|
|
219
|
-
# Only show bottom toolbar if there's an update message
|
|
220
|
-
bottom_toolbar = self._get_bottom_toolbar()
|
|
221
|
-
|
|
222
670
|
with patch_stdout():
|
|
223
671
|
line: str = await self._session.prompt_async(
|
|
224
672
|
placeholder=self._render_input_placeholder(),
|
|
225
|
-
bottom_toolbar=
|
|
673
|
+
bottom_toolbar=self._get_bottom_toolbar,
|
|
226
674
|
)
|
|
227
675
|
if self._post_prompt is not None:
|
|
228
676
|
with contextlib.suppress(Exception):
|