tunacode-cli 0.0.71__py3-none-any.whl → 0.0.72__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/cli/commands/implementations/model.py +332 -32
- tunacode/constants.py +1 -1
- tunacode/core/agents/agent_components/agent_config.py +25 -15
- tunacode/ui/completers.py +211 -9
- tunacode/ui/input.py +7 -1
- tunacode/ui/model_selector.py +394 -0
- tunacode/utils/models_registry.py +563 -0
- {tunacode_cli-0.0.71.dist-info → tunacode_cli-0.0.72.dist-info}/METADATA +1 -1
- {tunacode_cli-0.0.71.dist-info → tunacode_cli-0.0.72.dist-info}/RECORD +12 -10
- {tunacode_cli-0.0.71.dist-info → tunacode_cli-0.0.72.dist-info}/WHEEL +0 -0
- {tunacode_cli-0.0.71.dist-info → tunacode_cli-0.0.72.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.71.dist-info → tunacode_cli-0.0.72.dist-info}/licenses/LICENSE +0 -0
tunacode/ui/completers.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Completers for file references and commands."""
|
|
2
2
|
|
|
3
3
|
import os
|
|
4
|
-
from typing import TYPE_CHECKING, Iterable, Optional
|
|
4
|
+
from typing import TYPE_CHECKING, Iterable, List, Optional
|
|
5
5
|
|
|
6
6
|
from prompt_toolkit.completion import (
|
|
7
7
|
CompleteEvent,
|
|
@@ -13,6 +13,7 @@ from prompt_toolkit.document import Document
|
|
|
13
13
|
|
|
14
14
|
if TYPE_CHECKING:
|
|
15
15
|
from ..cli.commands import CommandRegistry
|
|
16
|
+
from ..utils.models_registry import ModelInfo, ModelsRegistry
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
class CommandCompleter(Completer):
|
|
@@ -126,11 +127,212 @@ class FileReferenceCompleter(Completer):
|
|
|
126
127
|
pass
|
|
127
128
|
|
|
128
129
|
|
|
129
|
-
|
|
130
|
-
"""
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
]
|
|
136
|
-
|
|
130
|
+
class ModelCompleter(Completer):
|
|
131
|
+
"""Completer for model names in /model command."""
|
|
132
|
+
|
|
133
|
+
def __init__(self, registry: Optional["ModelsRegistry"] = None):
|
|
134
|
+
"""Initialize the model completer."""
|
|
135
|
+
self.registry = registry
|
|
136
|
+
self._models_cache: Optional[List[ModelInfo]] = None
|
|
137
|
+
self._registry_loaded = False
|
|
138
|
+
|
|
139
|
+
async def _ensure_registry_loaded(self):
|
|
140
|
+
"""Ensure the models registry is loaded."""
|
|
141
|
+
if self.registry and not self._registry_loaded:
|
|
142
|
+
try:
|
|
143
|
+
# Try to load models (this will be fast if already loaded)
|
|
144
|
+
await self.registry.load()
|
|
145
|
+
self._registry_loaded = True
|
|
146
|
+
self._models_cache = (
|
|
147
|
+
list(self.registry.models.values()) if self.registry.models else []
|
|
148
|
+
)
|
|
149
|
+
except Exception:
|
|
150
|
+
# If loading fails, use empty cache
|
|
151
|
+
self._models_cache = []
|
|
152
|
+
self._registry_loaded = True
|
|
153
|
+
|
|
154
|
+
def get_completions(
|
|
155
|
+
self, document: Document, _complete_event: CompleteEvent
|
|
156
|
+
) -> Iterable[Completion]:
|
|
157
|
+
"""Get completions for model names."""
|
|
158
|
+
if not self.registry:
|
|
159
|
+
return
|
|
160
|
+
|
|
161
|
+
text = document.text_before_cursor
|
|
162
|
+
|
|
163
|
+
# Check if we're in a /model command context
|
|
164
|
+
lines = text.split("\n")
|
|
165
|
+
current_line = lines[-1].strip()
|
|
166
|
+
|
|
167
|
+
# Must start with /model
|
|
168
|
+
if not current_line.startswith("/model"):
|
|
169
|
+
return
|
|
170
|
+
|
|
171
|
+
# Try to load registry synchronously if not loaded
|
|
172
|
+
# Note: This is a compromise - ideally we'd use async completion
|
|
173
|
+
if not self._registry_loaded:
|
|
174
|
+
try:
|
|
175
|
+
# Quick attempt to load cached data only
|
|
176
|
+
if self.registry._is_cache_valid() and self.registry._load_from_cache():
|
|
177
|
+
self._registry_loaded = True
|
|
178
|
+
self._models_cache = list(self.registry.models.values())
|
|
179
|
+
elif not self._models_cache:
|
|
180
|
+
# Use fallback models for immediate completion
|
|
181
|
+
self.registry._load_fallback_models()
|
|
182
|
+
self._registry_loaded = True
|
|
183
|
+
self._models_cache = list(self.registry.models.values())
|
|
184
|
+
except Exception:
|
|
185
|
+
return # Skip completion if we can't load models
|
|
186
|
+
|
|
187
|
+
# Get the part after /model
|
|
188
|
+
parts = current_line.split()
|
|
189
|
+
if len(parts) < 2:
|
|
190
|
+
# Just "/model" - suggest popular searches and top models
|
|
191
|
+
popular_searches = ["claude", "gpt", "gemini", "openai", "anthropic"]
|
|
192
|
+
for search_term in popular_searches:
|
|
193
|
+
yield Completion(
|
|
194
|
+
text=search_term, display=f"{search_term} (search)", display_meta="search term"
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Also show top 3 most popular models if we have them
|
|
198
|
+
if self._models_cache:
|
|
199
|
+
popular_models = []
|
|
200
|
+
# Look for common popular models
|
|
201
|
+
for model in self._models_cache:
|
|
202
|
+
if any(pop in model.id.lower() for pop in ["gpt-4o", "claude-3", "gemini-2"]):
|
|
203
|
+
popular_models.append(model)
|
|
204
|
+
if len(popular_models) >= 3:
|
|
205
|
+
break
|
|
206
|
+
|
|
207
|
+
for model in popular_models:
|
|
208
|
+
display = f"{model.full_id} - {model.name}"
|
|
209
|
+
if model.cost.input is not None:
|
|
210
|
+
display += f" (${model.cost.input}/{model.cost.output})"
|
|
211
|
+
|
|
212
|
+
yield Completion(
|
|
213
|
+
text=model.full_id, display=display, display_meta=f"{model.provider} model"
|
|
214
|
+
)
|
|
215
|
+
return
|
|
216
|
+
|
|
217
|
+
# Get the current word being typed
|
|
218
|
+
word_before_cursor = document.get_word_before_cursor(WORD=True)
|
|
219
|
+
if not word_before_cursor or not self._models_cache:
|
|
220
|
+
return
|
|
221
|
+
|
|
222
|
+
query = word_before_cursor.lower()
|
|
223
|
+
|
|
224
|
+
# Use the new grouped approach to find base models with variants
|
|
225
|
+
base_models = self.registry.find_base_models(query)
|
|
226
|
+
|
|
227
|
+
if not base_models:
|
|
228
|
+
return
|
|
229
|
+
|
|
230
|
+
results = []
|
|
231
|
+
shown_base_models = 0
|
|
232
|
+
|
|
233
|
+
# Sort base models by popularity/relevance
|
|
234
|
+
sorted_base_models = sorted(
|
|
235
|
+
base_models.items(),
|
|
236
|
+
key=lambda x: (
|
|
237
|
+
# Popular models first
|
|
238
|
+
-1
|
|
239
|
+
if any(
|
|
240
|
+
pop in x[0] for pop in ["gpt-4o", "gpt-4", "claude-3", "gemini-2", "o3", "o1"]
|
|
241
|
+
)
|
|
242
|
+
else 0,
|
|
243
|
+
# Then by name
|
|
244
|
+
x[0],
|
|
245
|
+
),
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
for base_model_name, variants in sorted_base_models:
|
|
249
|
+
if shown_base_models >= 5: # Limit to top 5 base models
|
|
250
|
+
break
|
|
251
|
+
|
|
252
|
+
shown_variants = 0
|
|
253
|
+
for i, model in enumerate(variants):
|
|
254
|
+
if shown_variants >= 3: # Show max 3 variants per base model
|
|
255
|
+
break
|
|
256
|
+
|
|
257
|
+
# Calculate start position for replacement
|
|
258
|
+
start_pos = -len(word_before_cursor)
|
|
259
|
+
|
|
260
|
+
# Build display text with enhanced info
|
|
261
|
+
cost_str = ""
|
|
262
|
+
if model.cost.input is not None:
|
|
263
|
+
if model.cost.input == 0:
|
|
264
|
+
cost_str = " (FREE)"
|
|
265
|
+
else:
|
|
266
|
+
cost_str = f" (${model.cost.input}/{model.cost.output})"
|
|
267
|
+
|
|
268
|
+
# Format provider info
|
|
269
|
+
provider_display = self._get_provider_display_name(model.provider)
|
|
270
|
+
|
|
271
|
+
# Primary variant gets the bullet, others get indentation
|
|
272
|
+
if i == 0:
|
|
273
|
+
# First variant - primary option with bullet
|
|
274
|
+
display = f"● {model.full_id} - {model.name}{cost_str}"
|
|
275
|
+
if model.cost.input == 0:
|
|
276
|
+
display += " ⭐" # Star for free models
|
|
277
|
+
else:
|
|
278
|
+
# Additional variants - indented
|
|
279
|
+
display = f" {model.full_id} - {model.name}{cost_str}"
|
|
280
|
+
if model.cost.input == 0:
|
|
281
|
+
display += " ⭐"
|
|
282
|
+
|
|
283
|
+
meta_info = f"{provider_display}"
|
|
284
|
+
if len(variants) > 1:
|
|
285
|
+
meta_info += f" ({len(variants)} sources)"
|
|
286
|
+
|
|
287
|
+
results.append(
|
|
288
|
+
Completion(
|
|
289
|
+
text=model.full_id,
|
|
290
|
+
start_position=start_pos,
|
|
291
|
+
display=display,
|
|
292
|
+
display_meta=meta_info,
|
|
293
|
+
)
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
shown_variants += 1
|
|
297
|
+
|
|
298
|
+
shown_base_models += 1
|
|
299
|
+
|
|
300
|
+
# Limit total results for readability
|
|
301
|
+
for completion in results[:20]:
|
|
302
|
+
yield completion
|
|
303
|
+
|
|
304
|
+
def _get_provider_display_name(self, provider: str) -> str:
|
|
305
|
+
"""Get a user-friendly provider display name."""
|
|
306
|
+
provider_names = {
|
|
307
|
+
"openai": "OpenAI Direct",
|
|
308
|
+
"anthropic": "Anthropic Direct",
|
|
309
|
+
"google": "Google Direct",
|
|
310
|
+
"google-gla": "Google Labs",
|
|
311
|
+
"openrouter": "OpenRouter",
|
|
312
|
+
"github-models": "GitHub Models (FREE)",
|
|
313
|
+
"azure": "Azure OpenAI",
|
|
314
|
+
"fastrouter": "FastRouter",
|
|
315
|
+
"requesty": "Requesty",
|
|
316
|
+
"cloudflare-workers-ai": "Cloudflare",
|
|
317
|
+
"amazon-bedrock": "AWS Bedrock",
|
|
318
|
+
"chutes": "Chutes AI",
|
|
319
|
+
"deepinfra": "DeepInfra",
|
|
320
|
+
"venice": "Venice AI",
|
|
321
|
+
}
|
|
322
|
+
return provider_names.get(provider, provider.title())
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
def create_completer(
|
|
326
|
+
command_registry: Optional["CommandRegistry"] = None,
|
|
327
|
+
models_registry: Optional["ModelsRegistry"] = None,
|
|
328
|
+
) -> Completer:
|
|
329
|
+
"""Create a merged completer for commands, file references, and models."""
|
|
330
|
+
completers = [
|
|
331
|
+
CommandCompleter(command_registry),
|
|
332
|
+
FileReferenceCompleter(),
|
|
333
|
+
]
|
|
334
|
+
|
|
335
|
+
if models_registry:
|
|
336
|
+
completers.append(ModelCompleter(models_registry))
|
|
337
|
+
|
|
338
|
+
return merge_completers(completers)
|
tunacode/ui/input.py
CHANGED
|
@@ -95,6 +95,12 @@ async def multiline_input(
|
|
|
95
95
|
)
|
|
96
96
|
)
|
|
97
97
|
|
|
98
|
+
# Create models registry for auto-completion (lazy loaded)
|
|
99
|
+
from ..utils.models_registry import ModelsRegistry
|
|
100
|
+
|
|
101
|
+
models_registry = ModelsRegistry()
|
|
102
|
+
# Note: Registry will be loaded lazily by the completer when needed
|
|
103
|
+
|
|
98
104
|
# Display input area (Plan Mode indicator is handled dynamically in prompt manager)
|
|
99
105
|
result = await input(
|
|
100
106
|
"multiline",
|
|
@@ -102,7 +108,7 @@ async def multiline_input(
|
|
|
102
108
|
key_bindings=kb,
|
|
103
109
|
multiline=True,
|
|
104
110
|
placeholder=placeholder,
|
|
105
|
-
completer=create_completer(command_registry),
|
|
111
|
+
completer=create_completer(command_registry, models_registry),
|
|
106
112
|
lexer=FileReferenceLexer(),
|
|
107
113
|
state_manager=state_manager,
|
|
108
114
|
)
|
|
@@ -0,0 +1,394 @@
|
|
|
1
|
+
"""Interactive model selector UI component."""
|
|
2
|
+
|
|
3
|
+
from typing import List, Optional
|
|
4
|
+
|
|
5
|
+
from prompt_toolkit.application import Application
|
|
6
|
+
from prompt_toolkit.buffer import Buffer
|
|
7
|
+
from prompt_toolkit.formatted_text import HTML, StyleAndTextTuples
|
|
8
|
+
from prompt_toolkit.key_binding import KeyBindings
|
|
9
|
+
from prompt_toolkit.layout import (
|
|
10
|
+
FormattedTextControl,
|
|
11
|
+
HSplit,
|
|
12
|
+
Layout,
|
|
13
|
+
VSplit,
|
|
14
|
+
Window,
|
|
15
|
+
WindowAlign,
|
|
16
|
+
)
|
|
17
|
+
from prompt_toolkit.layout.controls import BufferControl
|
|
18
|
+
from prompt_toolkit.layout.dimension import Dimension
|
|
19
|
+
from prompt_toolkit.search import SearchState
|
|
20
|
+
from prompt_toolkit.styles import Style
|
|
21
|
+
from prompt_toolkit.widgets import Frame
|
|
22
|
+
|
|
23
|
+
from ..utils.models_registry import ModelInfo, ModelsRegistry
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ModelSelector:
|
|
27
|
+
"""Interactive model selector with search and navigation."""
|
|
28
|
+
|
|
29
|
+
def __init__(self, registry: ModelsRegistry):
|
|
30
|
+
"""Initialize the model selector."""
|
|
31
|
+
self.registry = registry
|
|
32
|
+
self.models: List[ModelInfo] = []
|
|
33
|
+
self.filtered_models: List[ModelInfo] = []
|
|
34
|
+
self.selected_index = 0
|
|
35
|
+
self.search_text = ""
|
|
36
|
+
self.selected_model: Optional[ModelInfo] = None
|
|
37
|
+
|
|
38
|
+
# Create key bindings
|
|
39
|
+
self.kb = self._create_key_bindings()
|
|
40
|
+
|
|
41
|
+
# Create search buffer
|
|
42
|
+
self.search_buffer = Buffer(on_text_changed=self._on_search_changed)
|
|
43
|
+
|
|
44
|
+
# Search state
|
|
45
|
+
self.search_state = SearchState()
|
|
46
|
+
|
|
47
|
+
def _create_key_bindings(self) -> KeyBindings:
|
|
48
|
+
"""Create key bindings for the selector."""
|
|
49
|
+
kb = KeyBindings()
|
|
50
|
+
|
|
51
|
+
@kb.add("up", "k")
|
|
52
|
+
def move_up(event):
|
|
53
|
+
"""Move selection up."""
|
|
54
|
+
if self.selected_index > 0:
|
|
55
|
+
self.selected_index -= 1
|
|
56
|
+
self._update_display()
|
|
57
|
+
|
|
58
|
+
@kb.add("down", "j")
|
|
59
|
+
def move_down(event):
|
|
60
|
+
"""Move selection down."""
|
|
61
|
+
if self.selected_index < len(self.filtered_models) - 1:
|
|
62
|
+
self.selected_index += 1
|
|
63
|
+
self._update_display()
|
|
64
|
+
|
|
65
|
+
@kb.add("pageup")
|
|
66
|
+
def page_up(event):
|
|
67
|
+
"""Move selection up by page."""
|
|
68
|
+
self.selected_index = max(0, self.selected_index - 10)
|
|
69
|
+
self._update_display()
|
|
70
|
+
|
|
71
|
+
@kb.add("pagedown")
|
|
72
|
+
def page_down(event):
|
|
73
|
+
"""Move selection down by page."""
|
|
74
|
+
self.selected_index = min(len(self.filtered_models) - 1, self.selected_index + 10)
|
|
75
|
+
self._update_display()
|
|
76
|
+
|
|
77
|
+
@kb.add("enter")
|
|
78
|
+
def select_model(event):
|
|
79
|
+
"""Select the current model."""
|
|
80
|
+
if 0 <= self.selected_index < len(self.filtered_models):
|
|
81
|
+
self.selected_model = self.filtered_models[self.selected_index]
|
|
82
|
+
event.app.exit(result=self.selected_model)
|
|
83
|
+
|
|
84
|
+
@kb.add("c-c", "escape", "q")
|
|
85
|
+
def cancel(event):
|
|
86
|
+
"""Cancel selection."""
|
|
87
|
+
event.app.exit(result=None)
|
|
88
|
+
|
|
89
|
+
@kb.add("/")
|
|
90
|
+
def focus_search(event):
|
|
91
|
+
"""Focus the search input."""
|
|
92
|
+
event.app.layout.focus(self.search_buffer)
|
|
93
|
+
|
|
94
|
+
@kb.add("tab")
|
|
95
|
+
def next_provider(event):
|
|
96
|
+
"""Jump to next provider."""
|
|
97
|
+
if not self.filtered_models:
|
|
98
|
+
return
|
|
99
|
+
|
|
100
|
+
current_provider = self.filtered_models[self.selected_index].provider
|
|
101
|
+
for i in range(self.selected_index + 1, len(self.filtered_models)):
|
|
102
|
+
if self.filtered_models[i].provider != current_provider:
|
|
103
|
+
self.selected_index = i
|
|
104
|
+
self._update_display()
|
|
105
|
+
break
|
|
106
|
+
|
|
107
|
+
@kb.add("s-tab")
|
|
108
|
+
def prev_provider(event):
|
|
109
|
+
"""Jump to previous provider."""
|
|
110
|
+
if not self.filtered_models:
|
|
111
|
+
return
|
|
112
|
+
|
|
113
|
+
current_provider = self.filtered_models[self.selected_index].provider
|
|
114
|
+
for i in range(self.selected_index - 1, -1, -1):
|
|
115
|
+
if self.filtered_models[i].provider != current_provider:
|
|
116
|
+
self.selected_index = i
|
|
117
|
+
self._update_display()
|
|
118
|
+
break
|
|
119
|
+
|
|
120
|
+
return kb
|
|
121
|
+
|
|
122
|
+
def _on_search_changed(self, buffer: Buffer) -> None:
|
|
123
|
+
"""Handle search text changes."""
|
|
124
|
+
self.search_text = buffer.text
|
|
125
|
+
self._filter_models()
|
|
126
|
+
self._update_display()
|
|
127
|
+
|
|
128
|
+
def _filter_models(self) -> None:
|
|
129
|
+
"""Filter models based on search text."""
|
|
130
|
+
if not self.search_text:
|
|
131
|
+
self.filtered_models = self.models.copy()
|
|
132
|
+
else:
|
|
133
|
+
# Search and sort by relevance
|
|
134
|
+
self.filtered_models = self.registry.search_models(self.search_text)
|
|
135
|
+
|
|
136
|
+
# Reset selection
|
|
137
|
+
self.selected_index = 0 if self.filtered_models else -1
|
|
138
|
+
|
|
139
|
+
def _get_model_lines(self) -> List[StyleAndTextTuples]:
|
|
140
|
+
"""Get formatted lines for model display."""
|
|
141
|
+
lines = []
|
|
142
|
+
|
|
143
|
+
if not self.filtered_models:
|
|
144
|
+
lines.append([("class:muted", "No models found")])
|
|
145
|
+
return lines
|
|
146
|
+
|
|
147
|
+
# Group models by provider
|
|
148
|
+
current_provider = None
|
|
149
|
+
for i, model in enumerate(self.filtered_models):
|
|
150
|
+
# Add provider header if changed
|
|
151
|
+
if model.provider != current_provider:
|
|
152
|
+
if current_provider is not None:
|
|
153
|
+
lines.append([]) # Empty line between providers
|
|
154
|
+
|
|
155
|
+
provider_info = self.registry.providers.get(model.provider)
|
|
156
|
+
provider_name = provider_info.name if provider_info else model.provider
|
|
157
|
+
lines.append([("class:provider", f"▼ {provider_name}")])
|
|
158
|
+
current_provider = model.provider
|
|
159
|
+
|
|
160
|
+
# Model line
|
|
161
|
+
is_selected = i == self.selected_index
|
|
162
|
+
|
|
163
|
+
# Build model display
|
|
164
|
+
parts = []
|
|
165
|
+
|
|
166
|
+
# Selection indicator
|
|
167
|
+
if is_selected:
|
|
168
|
+
parts.append(("class:selected", "→ "))
|
|
169
|
+
else:
|
|
170
|
+
parts.append(("", " "))
|
|
171
|
+
|
|
172
|
+
# Model ID and name
|
|
173
|
+
parts.append(
|
|
174
|
+
("class:model-id" if not is_selected else "class:selected-id", f"{model.id}")
|
|
175
|
+
)
|
|
176
|
+
parts.append(("class:muted", " - "))
|
|
177
|
+
parts.append(
|
|
178
|
+
("class:model-name" if not is_selected else "class:selected-name", model.name)
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
# Cost and limits
|
|
182
|
+
details = []
|
|
183
|
+
if model.cost.input is not None:
|
|
184
|
+
details.append(f"${model.cost.input}/{model.cost.output}")
|
|
185
|
+
if model.limits.context:
|
|
186
|
+
details.append(f"{model.limits.context // 1000}k")
|
|
187
|
+
|
|
188
|
+
if details:
|
|
189
|
+
parts.append(("class:muted", f" ({', '.join(details)})"))
|
|
190
|
+
|
|
191
|
+
# Capabilities badges
|
|
192
|
+
badges = []
|
|
193
|
+
if model.capabilities.attachment:
|
|
194
|
+
badges.append("📎")
|
|
195
|
+
if model.capabilities.reasoning:
|
|
196
|
+
badges.append("🧠")
|
|
197
|
+
if model.capabilities.tool_call:
|
|
198
|
+
badges.append("🔧")
|
|
199
|
+
|
|
200
|
+
if badges:
|
|
201
|
+
parts.append(("class:badges", " " + "".join(badges)))
|
|
202
|
+
|
|
203
|
+
lines.append(parts)
|
|
204
|
+
|
|
205
|
+
return lines
|
|
206
|
+
|
|
207
|
+
def _get_details_panel(self) -> StyleAndTextTuples:
|
|
208
|
+
"""Get the details panel content for selected model."""
|
|
209
|
+
if not self.filtered_models or self.selected_index < 0:
|
|
210
|
+
return [("", "Select a model to see details")]
|
|
211
|
+
|
|
212
|
+
model = self.filtered_models[self.selected_index]
|
|
213
|
+
lines = []
|
|
214
|
+
|
|
215
|
+
# Model name and ID
|
|
216
|
+
lines.append([("class:title", model.name)])
|
|
217
|
+
lines.append([("class:muted", f"{model.full_id}")])
|
|
218
|
+
lines.append([])
|
|
219
|
+
|
|
220
|
+
# Pricing
|
|
221
|
+
lines.append([("class:section", "Pricing:")])
|
|
222
|
+
if model.cost.input is not None:
|
|
223
|
+
lines.append([("", f" Input: ${model.cost.input} per 1M tokens")])
|
|
224
|
+
lines.append([("", f" Output: ${model.cost.output} per 1M tokens")])
|
|
225
|
+
else:
|
|
226
|
+
lines.append([("class:muted", " Not available")])
|
|
227
|
+
lines.append([])
|
|
228
|
+
|
|
229
|
+
# Limits
|
|
230
|
+
lines.append([("class:section", "Limits:")])
|
|
231
|
+
if model.limits.context:
|
|
232
|
+
lines.append([("", f" Context: {model.limits.context:,} tokens")])
|
|
233
|
+
if model.limits.output:
|
|
234
|
+
lines.append([("", f" Output: {model.limits.output:,} tokens")])
|
|
235
|
+
if not model.limits.context and not model.limits.output:
|
|
236
|
+
lines.append([("class:muted", " Not specified")])
|
|
237
|
+
lines.append([])
|
|
238
|
+
|
|
239
|
+
# Capabilities
|
|
240
|
+
lines.append([("class:section", "Capabilities:")])
|
|
241
|
+
caps = []
|
|
242
|
+
if model.capabilities.attachment:
|
|
243
|
+
caps.append("Attachments")
|
|
244
|
+
if model.capabilities.reasoning:
|
|
245
|
+
caps.append("Reasoning")
|
|
246
|
+
if model.capabilities.tool_call:
|
|
247
|
+
caps.append("Tool calling")
|
|
248
|
+
if model.capabilities.temperature:
|
|
249
|
+
caps.append("Temperature control")
|
|
250
|
+
|
|
251
|
+
if caps:
|
|
252
|
+
for cap in caps:
|
|
253
|
+
lines.append([("", f" ✓ {cap}")])
|
|
254
|
+
else:
|
|
255
|
+
lines.append([("class:muted", " Basic text generation")])
|
|
256
|
+
|
|
257
|
+
if model.capabilities.knowledge:
|
|
258
|
+
lines.append([])
|
|
259
|
+
lines.append([("class:section", "Knowledge cutoff:")])
|
|
260
|
+
lines.append([("", f" {model.capabilities.knowledge}")])
|
|
261
|
+
|
|
262
|
+
# Modalities
|
|
263
|
+
if model.modalities:
|
|
264
|
+
lines.append([])
|
|
265
|
+
lines.append([("class:section", "Modalities:")])
|
|
266
|
+
if "input" in model.modalities:
|
|
267
|
+
lines.append([("", f" Input: {', '.join(model.modalities['input'])}")])
|
|
268
|
+
if "output" in model.modalities:
|
|
269
|
+
lines.append([("", f" Output: {', '.join(model.modalities['output'])}")])
|
|
270
|
+
|
|
271
|
+
return lines
|
|
272
|
+
|
|
273
|
+
def _update_display(self) -> None:
|
|
274
|
+
"""Update the display (called on changes)."""
|
|
275
|
+
# This will trigger a redraw through prompt_toolkit's event system
|
|
276
|
+
if hasattr(self, "app"):
|
|
277
|
+
self.app.invalidate()
|
|
278
|
+
|
|
279
|
+
def _create_layout(self) -> Layout:
|
|
280
|
+
"""Create the application layout."""
|
|
281
|
+
# Model list
|
|
282
|
+
model_list = FormattedTextControl(self._get_model_lines, focusable=False, show_cursor=False)
|
|
283
|
+
|
|
284
|
+
model_window = Window(
|
|
285
|
+
content=model_list,
|
|
286
|
+
width=Dimension(min=40, preferred=60),
|
|
287
|
+
height=Dimension(min=10, preferred=20),
|
|
288
|
+
scroll_offsets=True,
|
|
289
|
+
wrap_lines=False,
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
# Details panel
|
|
293
|
+
details_control = FormattedTextControl(
|
|
294
|
+
self._get_details_panel, focusable=False, show_cursor=False
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
details_window = Window(
|
|
298
|
+
content=details_control, width=Dimension(min=30, preferred=40), wrap_lines=True
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
# Search bar
|
|
302
|
+
search_field = Window(
|
|
303
|
+
BufferControl(buffer=self.search_buffer, focus_on_click=True), height=1
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
search_label = Window(
|
|
307
|
+
FormattedTextControl(HTML("<b>Search:</b> ")), width=8, height=1, dont_extend_width=True
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
search_bar = VSplit([search_label, search_field])
|
|
311
|
+
|
|
312
|
+
# Help text
|
|
313
|
+
help_text = Window(
|
|
314
|
+
FormattedTextControl(
|
|
315
|
+
HTML(
|
|
316
|
+
"<muted>↑↓: Navigate | Enter: Select | /: Search | Tab: Next provider | Esc: Cancel</muted>"
|
|
317
|
+
)
|
|
318
|
+
),
|
|
319
|
+
height=1,
|
|
320
|
+
align=WindowAlign.CENTER,
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# Main content
|
|
324
|
+
content = VSplit(
|
|
325
|
+
[Frame(model_window, title="Select Model"), Frame(details_window, title="Details")]
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
# Root layout
|
|
329
|
+
root = HSplit(
|
|
330
|
+
[
|
|
331
|
+
search_bar,
|
|
332
|
+
Window(height=1), # Spacer
|
|
333
|
+
content,
|
|
334
|
+
Window(height=1), # Spacer
|
|
335
|
+
help_text,
|
|
336
|
+
]
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
return Layout(root)
|
|
340
|
+
|
|
341
|
+
async def select_model(self, initial_query: str = "") -> Optional[ModelInfo]:
|
|
342
|
+
"""Show the model selector and return selected model."""
|
|
343
|
+
# Load all models
|
|
344
|
+
self.models = list(self.registry.models.values())
|
|
345
|
+
self.search_buffer.text = initial_query
|
|
346
|
+
|
|
347
|
+
# Filter initially
|
|
348
|
+
self._filter_models()
|
|
349
|
+
|
|
350
|
+
# Create application
|
|
351
|
+
self.app = Application(
|
|
352
|
+
layout=self._create_layout(),
|
|
353
|
+
key_bindings=self.kb,
|
|
354
|
+
mouse_support=True,
|
|
355
|
+
full_screen=False,
|
|
356
|
+
style=self._get_style(),
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
# Run the selector
|
|
360
|
+
result = await self.app.run_async()
|
|
361
|
+
return result
|
|
362
|
+
|
|
363
|
+
def _get_style(self) -> Style:
|
|
364
|
+
"""Get the style for the selector."""
|
|
365
|
+
return Style.from_dict(
|
|
366
|
+
{
|
|
367
|
+
"provider": "bold cyan",
|
|
368
|
+
"model-id": "white",
|
|
369
|
+
"model-name": "ansiwhite",
|
|
370
|
+
"selected": "reverse bold",
|
|
371
|
+
"selected-id": "reverse bold white",
|
|
372
|
+
"selected-name": "reverse bold ansiwhite",
|
|
373
|
+
"muted": "gray",
|
|
374
|
+
"badges": "yellow",
|
|
375
|
+
"title": "bold ansiwhite",
|
|
376
|
+
"section": "bold cyan",
|
|
377
|
+
}
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
async def select_model_interactive(
|
|
382
|
+
registry: Optional[ModelsRegistry] = None, initial_query: str = ""
|
|
383
|
+
) -> Optional[str]:
|
|
384
|
+
"""Show interactive model selector and return selected model ID."""
|
|
385
|
+
if registry is None:
|
|
386
|
+
registry = ModelsRegistry()
|
|
387
|
+
await registry.load()
|
|
388
|
+
|
|
389
|
+
selector = ModelSelector(registry)
|
|
390
|
+
model = await selector.select_model(initial_query)
|
|
391
|
+
|
|
392
|
+
if model:
|
|
393
|
+
return model.full_id
|
|
394
|
+
return None
|