code-puppy 0.0.214__py3-none-any.whl → 0.0.366__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/__init__.py +7 -1
- code_puppy/agents/__init__.py +2 -0
- code_puppy/agents/agent_c_reviewer.py +59 -6
- code_puppy/agents/agent_code_puppy.py +7 -1
- code_puppy/agents/agent_code_reviewer.py +12 -2
- code_puppy/agents/agent_cpp_reviewer.py +73 -6
- code_puppy/agents/agent_creator_agent.py +45 -4
- code_puppy/agents/agent_golang_reviewer.py +92 -3
- code_puppy/agents/agent_javascript_reviewer.py +101 -8
- code_puppy/agents/agent_manager.py +81 -4
- code_puppy/agents/agent_pack_leader.py +383 -0
- code_puppy/agents/agent_planning.py +163 -0
- code_puppy/agents/agent_python_programmer.py +165 -0
- code_puppy/agents/agent_python_reviewer.py +28 -6
- code_puppy/agents/agent_qa_expert.py +98 -6
- code_puppy/agents/agent_qa_kitten.py +12 -7
- code_puppy/agents/agent_security_auditor.py +113 -3
- code_puppy/agents/agent_terminal_qa.py +323 -0
- code_puppy/agents/agent_typescript_reviewer.py +106 -7
- code_puppy/agents/base_agent.py +802 -176
- code_puppy/agents/event_stream_handler.py +350 -0
- code_puppy/agents/pack/__init__.py +34 -0
- code_puppy/agents/pack/bloodhound.py +304 -0
- code_puppy/agents/pack/husky.py +321 -0
- code_puppy/agents/pack/retriever.py +393 -0
- code_puppy/agents/pack/shepherd.py +348 -0
- code_puppy/agents/pack/terrier.py +287 -0
- code_puppy/agents/pack/watchdog.py +367 -0
- code_puppy/agents/prompt_reviewer.py +145 -0
- code_puppy/agents/subagent_stream_handler.py +276 -0
- code_puppy/api/__init__.py +13 -0
- code_puppy/api/app.py +169 -0
- code_puppy/api/main.py +21 -0
- code_puppy/api/pty_manager.py +446 -0
- code_puppy/api/routers/__init__.py +12 -0
- code_puppy/api/routers/agents.py +36 -0
- code_puppy/api/routers/commands.py +217 -0
- code_puppy/api/routers/config.py +74 -0
- code_puppy/api/routers/sessions.py +232 -0
- code_puppy/api/templates/terminal.html +361 -0
- code_puppy/api/websocket.py +154 -0
- code_puppy/callbacks.py +142 -4
- code_puppy/chatgpt_codex_client.py +283 -0
- code_puppy/claude_cache_client.py +586 -0
- code_puppy/cli_runner.py +916 -0
- code_puppy/command_line/add_model_menu.py +1079 -0
- code_puppy/command_line/agent_menu.py +395 -0
- code_puppy/command_line/attachments.py +10 -5
- code_puppy/command_line/autosave_menu.py +605 -0
- code_puppy/command_line/clipboard.py +527 -0
- code_puppy/command_line/colors_menu.py +520 -0
- code_puppy/command_line/command_handler.py +176 -738
- code_puppy/command_line/command_registry.py +150 -0
- code_puppy/command_line/config_commands.py +715 -0
- code_puppy/command_line/core_commands.py +792 -0
- code_puppy/command_line/diff_menu.py +863 -0
- code_puppy/command_line/load_context_completion.py +15 -22
- code_puppy/command_line/mcp/base.py +0 -3
- code_puppy/command_line/mcp/catalog_server_installer.py +175 -0
- code_puppy/command_line/mcp/custom_server_form.py +688 -0
- code_puppy/command_line/mcp/custom_server_installer.py +195 -0
- code_puppy/command_line/mcp/edit_command.py +148 -0
- code_puppy/command_line/mcp/handler.py +9 -4
- code_puppy/command_line/mcp/help_command.py +6 -5
- code_puppy/command_line/mcp/install_command.py +15 -26
- code_puppy/command_line/mcp/install_menu.py +685 -0
- code_puppy/command_line/mcp/list_command.py +2 -2
- code_puppy/command_line/mcp/logs_command.py +174 -65
- code_puppy/command_line/mcp/remove_command.py +2 -2
- code_puppy/command_line/mcp/restart_command.py +12 -4
- code_puppy/command_line/mcp/search_command.py +16 -10
- code_puppy/command_line/mcp/start_all_command.py +18 -6
- code_puppy/command_line/mcp/start_command.py +47 -25
- code_puppy/command_line/mcp/status_command.py +4 -5
- code_puppy/command_line/mcp/stop_all_command.py +7 -1
- code_puppy/command_line/mcp/stop_command.py +8 -4
- code_puppy/command_line/mcp/test_command.py +2 -2
- code_puppy/command_line/mcp/wizard_utils.py +20 -16
- code_puppy/command_line/mcp_completion.py +174 -0
- code_puppy/command_line/model_picker_completion.py +75 -25
- code_puppy/command_line/model_settings_menu.py +884 -0
- code_puppy/command_line/motd.py +14 -8
- code_puppy/command_line/onboarding_slides.py +179 -0
- code_puppy/command_line/onboarding_wizard.py +340 -0
- code_puppy/command_line/pin_command_completion.py +329 -0
- code_puppy/command_line/prompt_toolkit_completion.py +463 -63
- code_puppy/command_line/session_commands.py +296 -0
- code_puppy/command_line/utils.py +54 -0
- code_puppy/config.py +898 -112
- code_puppy/error_logging.py +118 -0
- code_puppy/gemini_code_assist.py +385 -0
- code_puppy/gemini_model.py +602 -0
- code_puppy/http_utils.py +210 -148
- code_puppy/keymap.py +128 -0
- code_puppy/main.py +5 -698
- code_puppy/mcp_/__init__.py +17 -0
- code_puppy/mcp_/async_lifecycle.py +35 -4
- code_puppy/mcp_/blocking_startup.py +70 -43
- code_puppy/mcp_/captured_stdio_server.py +2 -2
- code_puppy/mcp_/config_wizard.py +4 -4
- code_puppy/mcp_/dashboard.py +15 -6
- code_puppy/mcp_/managed_server.py +65 -38
- code_puppy/mcp_/manager.py +146 -52
- code_puppy/mcp_/mcp_logs.py +224 -0
- code_puppy/mcp_/registry.py +6 -6
- code_puppy/mcp_/server_registry_catalog.py +24 -5
- code_puppy/messaging/__init__.py +199 -2
- code_puppy/messaging/bus.py +610 -0
- code_puppy/messaging/commands.py +167 -0
- code_puppy/messaging/markdown_patches.py +57 -0
- code_puppy/messaging/message_queue.py +17 -48
- code_puppy/messaging/messages.py +500 -0
- code_puppy/messaging/queue_console.py +1 -24
- code_puppy/messaging/renderers.py +43 -146
- code_puppy/messaging/rich_renderer.py +1027 -0
- code_puppy/messaging/spinner/__init__.py +21 -5
- code_puppy/messaging/spinner/console_spinner.py +86 -51
- code_puppy/messaging/subagent_console.py +461 -0
- code_puppy/model_factory.py +634 -83
- code_puppy/model_utils.py +167 -0
- code_puppy/models.json +66 -68
- code_puppy/models_dev_api.json +1 -0
- code_puppy/models_dev_parser.py +592 -0
- code_puppy/plugins/__init__.py +164 -10
- code_puppy/plugins/antigravity_oauth/__init__.py +10 -0
- code_puppy/plugins/antigravity_oauth/accounts.py +406 -0
- code_puppy/plugins/antigravity_oauth/antigravity_model.py +704 -0
- code_puppy/plugins/antigravity_oauth/config.py +42 -0
- code_puppy/plugins/antigravity_oauth/constants.py +136 -0
- code_puppy/plugins/antigravity_oauth/oauth.py +478 -0
- code_puppy/plugins/antigravity_oauth/register_callbacks.py +406 -0
- code_puppy/plugins/antigravity_oauth/storage.py +271 -0
- code_puppy/plugins/antigravity_oauth/test_plugin.py +319 -0
- code_puppy/plugins/antigravity_oauth/token.py +167 -0
- code_puppy/plugins/antigravity_oauth/transport.py +767 -0
- code_puppy/plugins/antigravity_oauth/utils.py +169 -0
- code_puppy/plugins/chatgpt_oauth/__init__.py +8 -0
- code_puppy/plugins/chatgpt_oauth/config.py +52 -0
- code_puppy/plugins/chatgpt_oauth/oauth_flow.py +328 -0
- code_puppy/plugins/chatgpt_oauth/register_callbacks.py +94 -0
- code_puppy/plugins/chatgpt_oauth/test_plugin.py +293 -0
- code_puppy/plugins/chatgpt_oauth/utils.py +489 -0
- code_puppy/plugins/claude_code_oauth/README.md +167 -0
- code_puppy/plugins/claude_code_oauth/SETUP.md +93 -0
- code_puppy/plugins/claude_code_oauth/__init__.py +6 -0
- code_puppy/plugins/claude_code_oauth/config.py +50 -0
- code_puppy/plugins/claude_code_oauth/register_callbacks.py +308 -0
- code_puppy/plugins/claude_code_oauth/test_plugin.py +283 -0
- code_puppy/plugins/claude_code_oauth/utils.py +518 -0
- code_puppy/plugins/customizable_commands/__init__.py +0 -0
- code_puppy/plugins/customizable_commands/register_callbacks.py +169 -0
- code_puppy/plugins/example_custom_command/README.md +280 -0
- code_puppy/plugins/example_custom_command/register_callbacks.py +2 -2
- code_puppy/plugins/file_permission_handler/__init__.py +4 -0
- code_puppy/plugins/file_permission_handler/register_callbacks.py +523 -0
- code_puppy/plugins/frontend_emitter/__init__.py +25 -0
- code_puppy/plugins/frontend_emitter/emitter.py +121 -0
- code_puppy/plugins/frontend_emitter/register_callbacks.py +261 -0
- code_puppy/plugins/oauth_puppy_html.py +228 -0
- code_puppy/plugins/shell_safety/__init__.py +6 -0
- code_puppy/plugins/shell_safety/agent_shell_safety.py +69 -0
- code_puppy/plugins/shell_safety/command_cache.py +156 -0
- code_puppy/plugins/shell_safety/register_callbacks.py +202 -0
- code_puppy/prompts/antigravity_system_prompt.md +1 -0
- code_puppy/prompts/codex_system_prompt.md +310 -0
- code_puppy/pydantic_patches.py +131 -0
- code_puppy/reopenable_async_client.py +8 -8
- code_puppy/round_robin_model.py +9 -12
- code_puppy/session_storage.py +2 -1
- code_puppy/status_display.py +21 -4
- code_puppy/summarization_agent.py +41 -13
- code_puppy/terminal_utils.py +418 -0
- code_puppy/tools/__init__.py +37 -1
- code_puppy/tools/agent_tools.py +536 -52
- code_puppy/tools/browser/__init__.py +37 -0
- code_puppy/tools/browser/browser_control.py +19 -23
- code_puppy/tools/browser/browser_interactions.py +41 -48
- code_puppy/tools/browser/browser_locators.py +36 -38
- code_puppy/tools/browser/browser_manager.py +316 -0
- code_puppy/tools/browser/browser_navigation.py +16 -16
- code_puppy/tools/browser/browser_screenshot.py +79 -143
- code_puppy/tools/browser/browser_scripts.py +32 -42
- code_puppy/tools/browser/browser_workflows.py +44 -27
- code_puppy/tools/browser/chromium_terminal_manager.py +259 -0
- code_puppy/tools/browser/terminal_command_tools.py +521 -0
- code_puppy/tools/browser/terminal_screenshot_tools.py +556 -0
- code_puppy/tools/browser/terminal_tools.py +525 -0
- code_puppy/tools/command_runner.py +930 -147
- code_puppy/tools/common.py +1113 -5
- code_puppy/tools/display.py +84 -0
- code_puppy/tools/file_modifications.py +288 -89
- code_puppy/tools/file_operations.py +226 -154
- code_puppy/tools/subagent_context.py +158 -0
- code_puppy/uvx_detection.py +242 -0
- code_puppy/version_checker.py +30 -11
- code_puppy-0.0.366.data/data/code_puppy/models.json +110 -0
- code_puppy-0.0.366.data/data/code_puppy/models_dev_api.json +1 -0
- {code_puppy-0.0.214.dist-info → code_puppy-0.0.366.dist-info}/METADATA +149 -75
- code_puppy-0.0.366.dist-info/RECORD +217 -0
- {code_puppy-0.0.214.dist-info → code_puppy-0.0.366.dist-info}/WHEEL +1 -1
- code_puppy/command_line/mcp/add_command.py +0 -183
- code_puppy/messaging/spinner/textual_spinner.py +0 -106
- code_puppy/tools/browser/camoufox_manager.py +0 -216
- code_puppy/tools/browser/vqa_agent.py +0 -70
- code_puppy/tui/__init__.py +0 -10
- code_puppy/tui/app.py +0 -1105
- code_puppy/tui/components/__init__.py +0 -21
- code_puppy/tui/components/chat_view.py +0 -551
- code_puppy/tui/components/command_history_modal.py +0 -218
- code_puppy/tui/components/copy_button.py +0 -139
- code_puppy/tui/components/custom_widgets.py +0 -63
- code_puppy/tui/components/human_input_modal.py +0 -175
- code_puppy/tui/components/input_area.py +0 -167
- code_puppy/tui/components/sidebar.py +0 -309
- code_puppy/tui/components/status_bar.py +0 -185
- code_puppy/tui/messages.py +0 -27
- code_puppy/tui/models/__init__.py +0 -8
- code_puppy/tui/models/chat_message.py +0 -25
- code_puppy/tui/models/command_history.py +0 -89
- code_puppy/tui/models/enums.py +0 -24
- code_puppy/tui/screens/__init__.py +0 -17
- code_puppy/tui/screens/autosave_picker.py +0 -175
- code_puppy/tui/screens/help.py +0 -130
- code_puppy/tui/screens/mcp_install_wizard.py +0 -803
- code_puppy/tui/screens/settings.py +0 -306
- code_puppy/tui/screens/tools.py +0 -74
- code_puppy/tui_state.py +0 -55
- code_puppy-0.0.214.data/data/code_puppy/models.json +0 -112
- code_puppy-0.0.214.dist-info/RECORD +0 -131
- {code_puppy-0.0.214.dist-info → code_puppy-0.0.366.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.214.dist-info → code_puppy-0.0.366.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1079 @@
|
|
|
1
|
+
"""Interactive terminal UI for browsing and adding models from models_dev_api.json.
|
|
2
|
+
|
|
3
|
+
Provides a beautiful split-panel interface for browsing providers and models
|
|
4
|
+
with live preview of model details and one-click addition to extra_models.json.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
import sys
|
|
10
|
+
import time
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import List, Optional
|
|
13
|
+
|
|
14
|
+
from prompt_toolkit.application import Application
|
|
15
|
+
from prompt_toolkit.key_binding import KeyBindings
|
|
16
|
+
from prompt_toolkit.layout import Dimension, Layout, VSplit, Window
|
|
17
|
+
from prompt_toolkit.layout.controls import FormattedTextControl
|
|
18
|
+
from prompt_toolkit.widgets import Frame
|
|
19
|
+
|
|
20
|
+
from code_puppy.command_line.utils import safe_input
|
|
21
|
+
from code_puppy.config import EXTRA_MODELS_FILE, set_config_value
|
|
22
|
+
from code_puppy.messaging import emit_error, emit_info, emit_warning
|
|
23
|
+
from code_puppy.models_dev_parser import ModelInfo, ModelsDevRegistry, ProviderInfo
|
|
24
|
+
from code_puppy.tools.command_runner import set_awaiting_user_input
|
|
25
|
+
|
|
26
|
+
PAGE_SIZE = 15 # Items per page
|
|
27
|
+
|
|
28
|
+
# Hardcoded OpenAI-compatible endpoints for providers that have dedicated SDKs
|
|
29
|
+
# but actually work fine with custom_openai. These are fallbacks when provider.api is not set.
|
|
30
|
+
PROVIDER_ENDPOINTS = {
|
|
31
|
+
"xai": "https://api.x.ai/v1",
|
|
32
|
+
"cohere": "https://api.cohere.com/compatibility/v1", # Cohere's OpenAI-compatible endpoint
|
|
33
|
+
"groq": "https://api.groq.com/openai/v1",
|
|
34
|
+
"mistral": "https://api.mistral.ai/v1",
|
|
35
|
+
"togetherai": "https://api.together.xyz/v1",
|
|
36
|
+
"perplexity": "https://api.perplexity.ai",
|
|
37
|
+
"deepinfra": "https://api.deepinfra.com/v1/openai",
|
|
38
|
+
"aihubmix": "https://aihubmix.com/v1",
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
# Providers that require custom SDK implementations we don't support yet.
|
|
42
|
+
# These use non-OpenAI-compatible APIs or require special authentication (AWS SigV4, GCP, etc.)
|
|
43
|
+
UNSUPPORTED_PROVIDERS = {
|
|
44
|
+
"amazon-bedrock": "Requires AWS SigV4 authentication",
|
|
45
|
+
"google-vertex": "Requires GCP service account authentication",
|
|
46
|
+
"google-vertex-anthropic": "Requires GCP service account authentication",
|
|
47
|
+
"cloudflare-workers-ai": "Requires account ID in URL path",
|
|
48
|
+
"vercel": "Vercel AI Gateway - not yet supported",
|
|
49
|
+
"v0": "Vercel v0 - not yet supported",
|
|
50
|
+
"ollama-cloud": "Requires user-specific Ollama instance URL",
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class AddModelMenu:
|
|
55
|
+
"""Interactive TUI for browsing and adding models."""
|
|
56
|
+
|
|
57
|
+
def __init__(self):
|
|
58
|
+
"""Initialize the model browser menu."""
|
|
59
|
+
self.registry: Optional[ModelsDevRegistry] = None
|
|
60
|
+
self.providers: List[ProviderInfo] = []
|
|
61
|
+
self.current_provider: Optional[ProviderInfo] = None
|
|
62
|
+
self.current_models: List[ModelInfo] = []
|
|
63
|
+
|
|
64
|
+
# State management
|
|
65
|
+
self.view_mode = "providers" # "providers" or "models"
|
|
66
|
+
self.selected_provider_idx = 0
|
|
67
|
+
self.selected_model_idx = 0
|
|
68
|
+
self.current_page = 0
|
|
69
|
+
self.result = None # Track if user added a model
|
|
70
|
+
|
|
71
|
+
# Pending model for credential prompting
|
|
72
|
+
self.pending_model: Optional[ModelInfo] = None
|
|
73
|
+
self.pending_provider: Optional[ProviderInfo] = None
|
|
74
|
+
|
|
75
|
+
# Custom model support
|
|
76
|
+
self.is_custom_model_selected = False
|
|
77
|
+
self.custom_model_name: Optional[str] = None
|
|
78
|
+
|
|
79
|
+
# Initialize registry
|
|
80
|
+
self._initialize_registry()
|
|
81
|
+
|
|
82
|
+
def _initialize_registry(self):
|
|
83
|
+
"""Initialize the ModelsDevRegistry with error handling.
|
|
84
|
+
|
|
85
|
+
Fetches from live models.dev API first, falls back to bundled JSON.
|
|
86
|
+
"""
|
|
87
|
+
try:
|
|
88
|
+
self.registry = (
|
|
89
|
+
ModelsDevRegistry()
|
|
90
|
+
) # Will try API first, then bundled fallback
|
|
91
|
+
self.providers = self.registry.get_providers()
|
|
92
|
+
if not self.providers:
|
|
93
|
+
emit_error("No providers found in models database")
|
|
94
|
+
except FileNotFoundError as e:
|
|
95
|
+
emit_error(f"Models database unavailable: {e}")
|
|
96
|
+
except Exception as e:
|
|
97
|
+
emit_error(f"Error loading models registry: {e}")
|
|
98
|
+
|
|
99
|
+
def _get_current_provider(self) -> Optional[ProviderInfo]:
|
|
100
|
+
"""Get the currently selected provider."""
|
|
101
|
+
if 0 <= self.selected_provider_idx < len(self.providers):
|
|
102
|
+
return self.providers[self.selected_provider_idx]
|
|
103
|
+
return None
|
|
104
|
+
|
|
105
|
+
def _get_current_model(self) -> Optional[ModelInfo]:
|
|
106
|
+
"""Get the currently selected model.
|
|
107
|
+
|
|
108
|
+
Returns None if "Custom model" option is selected (which is at index len(current_models)).
|
|
109
|
+
"""
|
|
110
|
+
if self.view_mode == "models" and self.current_provider:
|
|
111
|
+
# Check if custom model option is selected (it's the last item)
|
|
112
|
+
if self.selected_model_idx == len(self.current_models):
|
|
113
|
+
return None # Custom model selected
|
|
114
|
+
if 0 <= self.selected_model_idx < len(self.current_models):
|
|
115
|
+
return self.current_models[self.selected_model_idx]
|
|
116
|
+
return None
|
|
117
|
+
|
|
118
|
+
def _is_custom_model_selected(self) -> bool:
|
|
119
|
+
"""Check if the custom model option is currently selected."""
|
|
120
|
+
if self.view_mode == "models" and self.current_provider:
|
|
121
|
+
return self.selected_model_idx == len(self.current_models)
|
|
122
|
+
return False
|
|
123
|
+
|
|
124
|
+
def _render_provider_list(self) -> List:
|
|
125
|
+
"""Render the provider list panel."""
|
|
126
|
+
lines = []
|
|
127
|
+
|
|
128
|
+
lines.append(("", " Providers"))
|
|
129
|
+
lines.append(("", "\n\n"))
|
|
130
|
+
|
|
131
|
+
if not self.providers:
|
|
132
|
+
lines.append(("fg:yellow", " No providers available."))
|
|
133
|
+
lines.append(("", "\n\n"))
|
|
134
|
+
self._render_navigation_hints(lines)
|
|
135
|
+
return lines
|
|
136
|
+
|
|
137
|
+
# Show providers for current page
|
|
138
|
+
total_pages = (len(self.providers) + PAGE_SIZE - 1) // PAGE_SIZE
|
|
139
|
+
start_idx = self.current_page * PAGE_SIZE
|
|
140
|
+
end_idx = min(start_idx + PAGE_SIZE, len(self.providers))
|
|
141
|
+
|
|
142
|
+
for i in range(start_idx, end_idx):
|
|
143
|
+
provider = self.providers[i]
|
|
144
|
+
is_selected = i == self.selected_provider_idx
|
|
145
|
+
is_unsupported = provider.id in UNSUPPORTED_PROVIDERS
|
|
146
|
+
|
|
147
|
+
# Format: "> Provider Name (X models)" or " Provider Name (X models)"
|
|
148
|
+
prefix = " > " if is_selected else " "
|
|
149
|
+
suffix = " ⚠️" if is_unsupported else ""
|
|
150
|
+
label = f"{prefix}{provider.name} ({provider.model_count} models){suffix}"
|
|
151
|
+
|
|
152
|
+
# Use dimmed color for unsupported providers
|
|
153
|
+
if is_unsupported:
|
|
154
|
+
lines.append(("fg:ansibrightblack dim", label))
|
|
155
|
+
elif is_selected:
|
|
156
|
+
lines.append(("fg:ansibrightblack", label))
|
|
157
|
+
else:
|
|
158
|
+
lines.append(("fg:ansibrightblack", label))
|
|
159
|
+
|
|
160
|
+
lines.append(("", "\n"))
|
|
161
|
+
|
|
162
|
+
lines.append(("", "\n"))
|
|
163
|
+
lines.append(
|
|
164
|
+
("fg:ansibrightblack", f" Page {self.current_page + 1}/{total_pages}")
|
|
165
|
+
)
|
|
166
|
+
lines.append(("", "\n"))
|
|
167
|
+
|
|
168
|
+
self._render_navigation_hints(lines)
|
|
169
|
+
return lines
|
|
170
|
+
|
|
171
|
+
def _render_model_list(self) -> List:
|
|
172
|
+
"""Render the model list panel."""
|
|
173
|
+
lines = []
|
|
174
|
+
|
|
175
|
+
if not self.current_provider:
|
|
176
|
+
lines.append(("fg:yellow", " No provider selected."))
|
|
177
|
+
lines.append(("", "\n\n"))
|
|
178
|
+
self._render_navigation_hints(lines)
|
|
179
|
+
return lines
|
|
180
|
+
|
|
181
|
+
lines.append(("", f" {self.current_provider.name} Models"))
|
|
182
|
+
lines.append(("", "\n\n"))
|
|
183
|
+
|
|
184
|
+
# Total items = models + 1 for custom model option
|
|
185
|
+
total_items = len(self.current_models) + 1
|
|
186
|
+
total_pages = (total_items + PAGE_SIZE - 1) // PAGE_SIZE
|
|
187
|
+
start_idx = self.current_page * PAGE_SIZE
|
|
188
|
+
end_idx = min(start_idx + PAGE_SIZE, total_items)
|
|
189
|
+
|
|
190
|
+
# Render models from the current page
|
|
191
|
+
for i in range(start_idx, end_idx):
|
|
192
|
+
# Check if this is the custom model option (last item)
|
|
193
|
+
if i == len(self.current_models):
|
|
194
|
+
is_selected = i == self.selected_model_idx
|
|
195
|
+
if is_selected:
|
|
196
|
+
lines.append(("fg:ansicyan bold", " > ✨ Custom model..."))
|
|
197
|
+
else:
|
|
198
|
+
lines.append(("fg:ansicyan", " ✨ Custom model..."))
|
|
199
|
+
lines.append(("", "\n"))
|
|
200
|
+
continue
|
|
201
|
+
|
|
202
|
+
model = self.current_models[i]
|
|
203
|
+
is_selected = i == self.selected_model_idx
|
|
204
|
+
|
|
205
|
+
# Create capability icons
|
|
206
|
+
icons = []
|
|
207
|
+
if model.has_vision:
|
|
208
|
+
icons.append("👁")
|
|
209
|
+
if model.tool_call:
|
|
210
|
+
icons.append("🔧")
|
|
211
|
+
if model.reasoning:
|
|
212
|
+
icons.append("🧠")
|
|
213
|
+
|
|
214
|
+
icon_str = " ".join(icons) + " " if icons else ""
|
|
215
|
+
|
|
216
|
+
if is_selected:
|
|
217
|
+
lines.append(("fg:ansibrightblack", f" > {icon_str}{model.name}"))
|
|
218
|
+
else:
|
|
219
|
+
lines.append(("fg:ansibrightblack", f" {icon_str}{model.name}"))
|
|
220
|
+
|
|
221
|
+
lines.append(("", "\n"))
|
|
222
|
+
|
|
223
|
+
lines.append(("", "\n"))
|
|
224
|
+
lines.append(
|
|
225
|
+
("fg:ansibrightblack", f" Page {self.current_page + 1}/{total_pages}")
|
|
226
|
+
)
|
|
227
|
+
lines.append(("", "\n"))
|
|
228
|
+
|
|
229
|
+
self._render_navigation_hints(lines)
|
|
230
|
+
return lines
|
|
231
|
+
|
|
232
|
+
def _render_navigation_hints(self, lines: List):
|
|
233
|
+
"""Render navigation hints at the bottom of the list panel."""
|
|
234
|
+
lines.append(("", "\n"))
|
|
235
|
+
lines.append(("fg:ansibrightblack", " ↑/↓ "))
|
|
236
|
+
lines.append(("", "Navigate "))
|
|
237
|
+
lines.append(("fg:ansibrightblack", "←/→ "))
|
|
238
|
+
lines.append(("", "Page\n"))
|
|
239
|
+
if self.view_mode == "providers":
|
|
240
|
+
lines.append(("fg:green", " Enter "))
|
|
241
|
+
lines.append(("", "Select\n"))
|
|
242
|
+
else:
|
|
243
|
+
lines.append(("fg:green", " Enter "))
|
|
244
|
+
lines.append(("", "Add Model\n"))
|
|
245
|
+
lines.append(("fg:ansibrightblack", " Esc/Back "))
|
|
246
|
+
lines.append(("", "Back\n"))
|
|
247
|
+
lines.append(("fg:ansibrightred", " Ctrl+C "))
|
|
248
|
+
lines.append(("", "Cancel"))
|
|
249
|
+
|
|
250
|
+
def _render_model_details(self) -> List:
|
|
251
|
+
"""Render the model details panel."""
|
|
252
|
+
lines = []
|
|
253
|
+
|
|
254
|
+
lines.append(("dim cyan", " MODEL DETAILS"))
|
|
255
|
+
lines.append(("", "\n\n"))
|
|
256
|
+
|
|
257
|
+
if self.view_mode == "providers":
|
|
258
|
+
provider = self._get_current_provider()
|
|
259
|
+
if not provider:
|
|
260
|
+
lines.append(("fg:yellow", " No provider selected."))
|
|
261
|
+
return lines
|
|
262
|
+
|
|
263
|
+
lines.append(("bold", f" {provider.name}"))
|
|
264
|
+
lines.append(("", "\n"))
|
|
265
|
+
lines.append(("fg:ansibrightblack", f" ID: {provider.id}"))
|
|
266
|
+
lines.append(("", "\n"))
|
|
267
|
+
lines.append(("fg:ansibrightblack", f" Models: {provider.model_count}"))
|
|
268
|
+
lines.append(("", "\n"))
|
|
269
|
+
lines.append(("fg:ansibrightblack", f" API: {provider.api}"))
|
|
270
|
+
lines.append(("", "\n"))
|
|
271
|
+
|
|
272
|
+
# Show unsupported warning if applicable
|
|
273
|
+
if provider.id in UNSUPPORTED_PROVIDERS:
|
|
274
|
+
lines.append(("", "\n"))
|
|
275
|
+
lines.append(("fg:ansired bold", " ⚠️ UNSUPPORTED PROVIDER"))
|
|
276
|
+
lines.append(("", "\n"))
|
|
277
|
+
lines.append(("fg:ansired", f" {UNSUPPORTED_PROVIDERS[provider.id]}"))
|
|
278
|
+
lines.append(("", "\n"))
|
|
279
|
+
lines.append(
|
|
280
|
+
(
|
|
281
|
+
"fg:ansibrightblack",
|
|
282
|
+
" Models from this provider cannot be added.",
|
|
283
|
+
)
|
|
284
|
+
)
|
|
285
|
+
lines.append(("", "\n"))
|
|
286
|
+
|
|
287
|
+
if provider.env:
|
|
288
|
+
lines.append(("", "\n"))
|
|
289
|
+
lines.append(("bold", " Environment Variables:"))
|
|
290
|
+
lines.append(("", "\n"))
|
|
291
|
+
for env_var in provider.env:
|
|
292
|
+
lines.append(("fg:ansibrightblack", f" • {env_var}"))
|
|
293
|
+
lines.append(("", "\n"))
|
|
294
|
+
|
|
295
|
+
if provider.doc:
|
|
296
|
+
lines.append(("", "\n"))
|
|
297
|
+
lines.append(("bold", " Documentation:"))
|
|
298
|
+
lines.append(("", "\n"))
|
|
299
|
+
lines.append(("fg:ansibrightblack", f" {provider.doc}"))
|
|
300
|
+
lines.append(("", "\n"))
|
|
301
|
+
|
|
302
|
+
else: # models view
|
|
303
|
+
model = self._get_current_model()
|
|
304
|
+
provider = self.current_provider
|
|
305
|
+
|
|
306
|
+
if not provider:
|
|
307
|
+
lines.append(("fg:yellow", " No model selected."))
|
|
308
|
+
return lines
|
|
309
|
+
|
|
310
|
+
# Handle custom model option
|
|
311
|
+
if self._is_custom_model_selected():
|
|
312
|
+
lines.append(("bold", " ✨ Custom Model"))
|
|
313
|
+
lines.append(("", "\n\n"))
|
|
314
|
+
lines.append(("fg:ansicyan", " Add a model not listed in models.dev"))
|
|
315
|
+
lines.append(("", "\n\n"))
|
|
316
|
+
lines.append(("bold", " How it works:"))
|
|
317
|
+
lines.append(("", "\n"))
|
|
318
|
+
lines.append(("fg:ansibrightblack", " 1. Press Enter to select"))
|
|
319
|
+
lines.append(("", "\n"))
|
|
320
|
+
lines.append(("fg:ansibrightblack", " 2. Enter the model ID/name"))
|
|
321
|
+
lines.append(("", "\n"))
|
|
322
|
+
lines.append(
|
|
323
|
+
("fg:ansibrightblack", f" 3. Uses {provider.name}'s API endpoint")
|
|
324
|
+
)
|
|
325
|
+
lines.append(("", "\n\n"))
|
|
326
|
+
lines.append(("bold", " Use cases:"))
|
|
327
|
+
lines.append(("", "\n"))
|
|
328
|
+
lines.append(("fg:ansibrightblack", " • Newly released models"))
|
|
329
|
+
lines.append(("", "\n"))
|
|
330
|
+
lines.append(("fg:ansibrightblack", " • Fine-tuned models"))
|
|
331
|
+
lines.append(("", "\n"))
|
|
332
|
+
lines.append(("fg:ansibrightblack", " • Preview/beta models"))
|
|
333
|
+
lines.append(("", "\n"))
|
|
334
|
+
lines.append(("fg:ansibrightblack", " • Custom deployments"))
|
|
335
|
+
lines.append(("", "\n\n"))
|
|
336
|
+
if provider.env:
|
|
337
|
+
lines.append(("bold", " Required credentials:"))
|
|
338
|
+
lines.append(("", "\n"))
|
|
339
|
+
for env_var in provider.env:
|
|
340
|
+
lines.append(("fg:ansibrightblack", f" • {env_var}"))
|
|
341
|
+
lines.append(("", "\n"))
|
|
342
|
+
return lines
|
|
343
|
+
|
|
344
|
+
if not model:
|
|
345
|
+
lines.append(("fg:yellow", " No model selected."))
|
|
346
|
+
return lines
|
|
347
|
+
|
|
348
|
+
lines.append(("bold", f" {provider.name} - {model.name}"))
|
|
349
|
+
lines.append(("", "\n\n"))
|
|
350
|
+
|
|
351
|
+
# BIG WARNING for models without tool calling
|
|
352
|
+
if not model.tool_call:
|
|
353
|
+
lines.append(("fg:ansiyellow bold", " ⚠️ NO TOOL CALLING SUPPORT"))
|
|
354
|
+
lines.append(("", "\n"))
|
|
355
|
+
lines.append(
|
|
356
|
+
("fg:ansiyellow", " This model cannot use tools (file ops,")
|
|
357
|
+
)
|
|
358
|
+
lines.append(("", "\n"))
|
|
359
|
+
lines.append(
|
|
360
|
+
("fg:ansiyellow", " shell commands, etc). It will be very")
|
|
361
|
+
)
|
|
362
|
+
lines.append(("", "\n"))
|
|
363
|
+
lines.append(("fg:ansiyellow", " limited for coding tasks!"))
|
|
364
|
+
lines.append(("", "\n\n"))
|
|
365
|
+
|
|
366
|
+
# Capabilities
|
|
367
|
+
lines.append(("bold", " Capabilities:"))
|
|
368
|
+
lines.append(("", "\n"))
|
|
369
|
+
|
|
370
|
+
capabilities = [
|
|
371
|
+
("Vision", model.has_vision),
|
|
372
|
+
("Tool Calling", model.tool_call),
|
|
373
|
+
("Reasoning", model.reasoning),
|
|
374
|
+
("Temperature", model.temperature),
|
|
375
|
+
("Structured Output", model.structured_output),
|
|
376
|
+
("Attachments", model.attachment),
|
|
377
|
+
]
|
|
378
|
+
|
|
379
|
+
for cap_name, has_cap in capabilities:
|
|
380
|
+
if has_cap:
|
|
381
|
+
lines.append(("fg:green", f" ✓ {cap_name}"))
|
|
382
|
+
else:
|
|
383
|
+
lines.append(("fg:ansibrightblack", f" ✗ {cap_name}"))
|
|
384
|
+
lines.append(("", "\n"))
|
|
385
|
+
|
|
386
|
+
# Pricing
|
|
387
|
+
lines.append(("", "\n"))
|
|
388
|
+
lines.append(("bold", " Pricing:"))
|
|
389
|
+
lines.append(("", "\n"))
|
|
390
|
+
|
|
391
|
+
if model.cost_input is not None or model.cost_output is not None:
|
|
392
|
+
if model.cost_input is not None:
|
|
393
|
+
lines.append(
|
|
394
|
+
(
|
|
395
|
+
"fg:ansibrightblack",
|
|
396
|
+
f" Input: ${model.cost_input:.6f}/token",
|
|
397
|
+
)
|
|
398
|
+
)
|
|
399
|
+
lines.append(("", "\n"))
|
|
400
|
+
if model.cost_output is not None:
|
|
401
|
+
lines.append(
|
|
402
|
+
(
|
|
403
|
+
"fg:ansibrightblack",
|
|
404
|
+
f" Output: ${model.cost_output:.6f}/token",
|
|
405
|
+
)
|
|
406
|
+
)
|
|
407
|
+
lines.append(("", "\n"))
|
|
408
|
+
if model.cost_cache_read is not None:
|
|
409
|
+
lines.append(
|
|
410
|
+
(
|
|
411
|
+
"fg:ansibrightblack",
|
|
412
|
+
f" Cache Read: ${model.cost_cache_read:.6f}/token",
|
|
413
|
+
)
|
|
414
|
+
)
|
|
415
|
+
lines.append(("", "\n"))
|
|
416
|
+
else:
|
|
417
|
+
lines.append(("fg:ansibrightblack", " Pricing not available"))
|
|
418
|
+
lines.append(("", "\n"))
|
|
419
|
+
|
|
420
|
+
# Limits
|
|
421
|
+
lines.append(("", "\n"))
|
|
422
|
+
lines.append(("bold", " Limits:"))
|
|
423
|
+
lines.append(("", "\n"))
|
|
424
|
+
|
|
425
|
+
if model.context_length > 0:
|
|
426
|
+
lines.append(
|
|
427
|
+
(
|
|
428
|
+
"fg:ansibrightblack",
|
|
429
|
+
f" Context: {model.context_length:,} tokens",
|
|
430
|
+
)
|
|
431
|
+
)
|
|
432
|
+
lines.append(("", "\n"))
|
|
433
|
+
if model.max_output > 0:
|
|
434
|
+
lines.append(
|
|
435
|
+
(
|
|
436
|
+
"fg:ansibrightblack",
|
|
437
|
+
f" Max Output: {model.max_output:,} tokens",
|
|
438
|
+
)
|
|
439
|
+
)
|
|
440
|
+
lines.append(("", "\n"))
|
|
441
|
+
|
|
442
|
+
# Modalities
|
|
443
|
+
if model.input_modalities or model.output_modalities:
|
|
444
|
+
lines.append(("", "\n"))
|
|
445
|
+
lines.append(("bold", " Modalities:"))
|
|
446
|
+
lines.append(("", "\n"))
|
|
447
|
+
|
|
448
|
+
if model.input_modalities:
|
|
449
|
+
lines.append(
|
|
450
|
+
(
|
|
451
|
+
"fg:ansibrightblack",
|
|
452
|
+
f" Input: {', '.join(model.input_modalities)}",
|
|
453
|
+
)
|
|
454
|
+
)
|
|
455
|
+
lines.append(("", "\n"))
|
|
456
|
+
if model.output_modalities:
|
|
457
|
+
lines.append(
|
|
458
|
+
(
|
|
459
|
+
"fg:ansibrightblack",
|
|
460
|
+
f" Output: {', '.join(model.output_modalities)}",
|
|
461
|
+
)
|
|
462
|
+
)
|
|
463
|
+
lines.append(("", "\n"))
|
|
464
|
+
|
|
465
|
+
# Metadata
|
|
466
|
+
lines.append(("", "\n"))
|
|
467
|
+
lines.append(("bold", " Metadata:"))
|
|
468
|
+
lines.append(("", "\n"))
|
|
469
|
+
|
|
470
|
+
lines.append(("fg:ansibrightblack", f" Model ID: {model.model_id}"))
|
|
471
|
+
lines.append(("", "\n"))
|
|
472
|
+
lines.append(("fg:ansibrightblack", f" Full ID: {model.full_id}"))
|
|
473
|
+
lines.append(("", "\n"))
|
|
474
|
+
|
|
475
|
+
if model.knowledge:
|
|
476
|
+
lines.append(
|
|
477
|
+
("fg:ansibrightblack", f" Knowledge: {model.knowledge}")
|
|
478
|
+
)
|
|
479
|
+
lines.append(("", "\n"))
|
|
480
|
+
|
|
481
|
+
if model.release_date:
|
|
482
|
+
lines.append(
|
|
483
|
+
("fg:ansibrightblack", f" Released: {model.release_date}")
|
|
484
|
+
)
|
|
485
|
+
lines.append(("", "\n"))
|
|
486
|
+
|
|
487
|
+
lines.append(
|
|
488
|
+
("fg:ansibrightblack", f" Open Weights: {model.open_weights}")
|
|
489
|
+
)
|
|
490
|
+
lines.append(("", "\n"))
|
|
491
|
+
|
|
492
|
+
return lines
|
|
493
|
+
|
|
494
|
+
def _add_model_to_extra_config(
|
|
495
|
+
self, model: ModelInfo, provider: ProviderInfo
|
|
496
|
+
) -> bool:
|
|
497
|
+
"""Add a model to the extra_models.json configuration file.
|
|
498
|
+
|
|
499
|
+
The extra_models.json format is a dictionary where:
|
|
500
|
+
- Keys are user-friendly model names (e.g., "provider-model-name")
|
|
501
|
+
- Values contain type, name, custom_endpoint (if needed), and context_length
|
|
502
|
+
"""
|
|
503
|
+
try:
|
|
504
|
+
# Load existing extra models (dictionary format)
|
|
505
|
+
extra_models_path = Path(EXTRA_MODELS_FILE)
|
|
506
|
+
extra_models: dict = {}
|
|
507
|
+
|
|
508
|
+
if extra_models_path.exists():
|
|
509
|
+
try:
|
|
510
|
+
with open(extra_models_path, "r", encoding="utf-8") as f:
|
|
511
|
+
extra_models = json.load(f)
|
|
512
|
+
if not isinstance(extra_models, dict):
|
|
513
|
+
emit_error(
|
|
514
|
+
"extra_models.json must be a dictionary, not a list"
|
|
515
|
+
)
|
|
516
|
+
return False
|
|
517
|
+
except json.JSONDecodeError as e:
|
|
518
|
+
emit_error(f"Error parsing extra_models.json: {e}")
|
|
519
|
+
return False
|
|
520
|
+
|
|
521
|
+
# Create a unique key for this model (provider-modelname format)
|
|
522
|
+
model_key = f"{provider.id}-{model.model_id}".replace("/", "-").replace(
|
|
523
|
+
":", "-"
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
# Check for duplicates
|
|
527
|
+
if model_key in extra_models:
|
|
528
|
+
emit_info(f"Model {model_key} is already in extra_models.json")
|
|
529
|
+
return True # Not an error, just already exists
|
|
530
|
+
|
|
531
|
+
# Convert to Code Puppy config format (dictionary value)
|
|
532
|
+
config = self._build_model_config(model, provider)
|
|
533
|
+
extra_models[model_key] = config
|
|
534
|
+
|
|
535
|
+
# Ensure directory exists
|
|
536
|
+
extra_models_path.parent.mkdir(parents=True, exist_ok=True)
|
|
537
|
+
|
|
538
|
+
# Save updated configuration
|
|
539
|
+
with open(extra_models_path, "w", encoding="utf-8") as f:
|
|
540
|
+
json.dump(extra_models, f, indent=4, ensure_ascii=False)
|
|
541
|
+
|
|
542
|
+
emit_info(f"Added {model_key} to extra_models.json")
|
|
543
|
+
return True
|
|
544
|
+
|
|
545
|
+
except Exception as e:
|
|
546
|
+
emit_error(f"Error adding model to extra_models.json: {e}")
|
|
547
|
+
return False
|
|
548
|
+
|
|
549
|
+
def _build_model_config(self, model: ModelInfo, provider: ProviderInfo) -> dict:
|
|
550
|
+
"""Build a Code Puppy compatible model configuration.
|
|
551
|
+
|
|
552
|
+
Format matches models.json structure:
|
|
553
|
+
{
|
|
554
|
+
"type": "openai" | "anthropic" | "gemini" | "custom_openai" | etc.,
|
|
555
|
+
"name": "actual-model-id",
|
|
556
|
+
"custom_endpoint": {"url": "...", "api_key": "$ENV_VAR"}, # if needed
|
|
557
|
+
"context_length": 200000
|
|
558
|
+
}
|
|
559
|
+
"""
|
|
560
|
+
# Map provider IDs to Code Puppy types
|
|
561
|
+
type_mapping = {
|
|
562
|
+
"openai": "openai",
|
|
563
|
+
"anthropic": "anthropic",
|
|
564
|
+
"google": "gemini",
|
|
565
|
+
"google-vertex": "gemini",
|
|
566
|
+
"mistral": "custom_openai",
|
|
567
|
+
"groq": "custom_openai",
|
|
568
|
+
"together-ai": "custom_openai",
|
|
569
|
+
"fireworks": "custom_openai",
|
|
570
|
+
"deepseek": "custom_openai",
|
|
571
|
+
"openrouter": "custom_openai",
|
|
572
|
+
"cerebras": "cerebras",
|
|
573
|
+
"cohere": "custom_openai",
|
|
574
|
+
"perplexity": "custom_openai",
|
|
575
|
+
"minimax": "custom_anthropic",
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
# Determine the model type
|
|
579
|
+
model_type = type_mapping.get(provider.id, "custom_openai")
|
|
580
|
+
|
|
581
|
+
# Special case: kimi-for-coding provider uses "kimi-for-coding" as the model name
|
|
582
|
+
# instead of the model_id from models.dev (which is "kimi-k2-thinking")
|
|
583
|
+
if provider.id == "kimi-for-coding":
|
|
584
|
+
model_name = "kimi-for-coding"
|
|
585
|
+
else:
|
|
586
|
+
model_name = model.model_id
|
|
587
|
+
|
|
588
|
+
config: dict = {
|
|
589
|
+
"type": model_type,
|
|
590
|
+
"name": model_name,
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
# Add custom endpoint for non-standard providers
|
|
594
|
+
if model_type == "custom_openai":
|
|
595
|
+
# Get the API URL - prefer provider.api, fall back to hardcoded endpoints
|
|
596
|
+
api_url = provider.api
|
|
597
|
+
if not api_url or api_url == "N/A":
|
|
598
|
+
api_url = PROVIDER_ENDPOINTS.get(provider.id)
|
|
599
|
+
|
|
600
|
+
if api_url:
|
|
601
|
+
# Determine the API key environment variable
|
|
602
|
+
api_key_env = f"${provider.env[0]}" if provider.env else "$API_KEY"
|
|
603
|
+
config["custom_endpoint"] = {"url": api_url, "api_key": api_key_env}
|
|
604
|
+
|
|
605
|
+
# Special handling for minimax: uses custom_anthropic but needs custom_endpoint
|
|
606
|
+
# and the URL needs /v1 stripped (comes as https://api.minimax.io/anthropic/v1)
|
|
607
|
+
if provider.id == "minimax" and provider.api:
|
|
608
|
+
api_url = provider.api
|
|
609
|
+
# Strip /v1 suffix if present
|
|
610
|
+
if api_url.endswith("/v1"):
|
|
611
|
+
api_url = api_url[:-3]
|
|
612
|
+
api_key_env = f"${provider.env[0]}" if provider.env else "$API_KEY"
|
|
613
|
+
config["custom_endpoint"] = {"url": api_url, "api_key": api_key_env}
|
|
614
|
+
|
|
615
|
+
# Add context length if available
|
|
616
|
+
if model.context_length and model.context_length > 0:
|
|
617
|
+
config["context_length"] = model.context_length
|
|
618
|
+
|
|
619
|
+
# Add supported settings based on model type
|
|
620
|
+
if model_type == "anthropic":
|
|
621
|
+
config["supported_settings"] = [
|
|
622
|
+
"temperature",
|
|
623
|
+
"extended_thinking",
|
|
624
|
+
"budget_tokens",
|
|
625
|
+
]
|
|
626
|
+
elif model_type == "openai" and "gpt-5" in model.model_id:
|
|
627
|
+
# GPT-5 models have special settings
|
|
628
|
+
if "codex" in model.model_id:
|
|
629
|
+
config["supported_settings"] = ["reasoning_effort"]
|
|
630
|
+
else:
|
|
631
|
+
config["supported_settings"] = ["reasoning_effort", "verbosity"]
|
|
632
|
+
else:
|
|
633
|
+
# Default settings for most models (no top_p)
|
|
634
|
+
config["supported_settings"] = ["temperature", "seed"]
|
|
635
|
+
|
|
636
|
+
return config
|
|
637
|
+
|
|
638
|
+
def update_display(self):
|
|
639
|
+
"""Update the display based on current state."""
|
|
640
|
+
if self.view_mode == "providers":
|
|
641
|
+
self.menu_control.text = self._render_provider_list()
|
|
642
|
+
else:
|
|
643
|
+
self.menu_control.text = self._render_model_list()
|
|
644
|
+
|
|
645
|
+
self.preview_control.text = self._render_model_details()
|
|
646
|
+
|
|
647
|
+
def _enter_provider(self):
|
|
648
|
+
"""Enter the selected provider to view its models."""
|
|
649
|
+
provider = self._get_current_provider()
|
|
650
|
+
if not provider or not self.registry:
|
|
651
|
+
return
|
|
652
|
+
|
|
653
|
+
self.current_provider = provider
|
|
654
|
+
self.current_models = self.registry.get_models(provider.id)
|
|
655
|
+
self.view_mode = "models"
|
|
656
|
+
self.selected_model_idx = 0
|
|
657
|
+
self.current_page = 0
|
|
658
|
+
self.update_display()
|
|
659
|
+
|
|
660
|
+
def _go_back_to_providers(self):
|
|
661
|
+
"""Go back to providers view."""
|
|
662
|
+
self.view_mode = "providers"
|
|
663
|
+
self.current_provider = None
|
|
664
|
+
self.current_models = []
|
|
665
|
+
self.selected_model_idx = 0
|
|
666
|
+
self.current_page = 0
|
|
667
|
+
self.update_display()
|
|
668
|
+
|
|
669
|
+
def _add_current_model(self):
|
|
670
|
+
"""Add the currently selected model to extra_models.json."""
|
|
671
|
+
provider = self.current_provider
|
|
672
|
+
|
|
673
|
+
if not provider:
|
|
674
|
+
return
|
|
675
|
+
|
|
676
|
+
# Block unsupported providers
|
|
677
|
+
if provider.id in UNSUPPORTED_PROVIDERS:
|
|
678
|
+
self.result = "unsupported"
|
|
679
|
+
return
|
|
680
|
+
|
|
681
|
+
# Check if custom model option is selected
|
|
682
|
+
if self._is_custom_model_selected():
|
|
683
|
+
self.is_custom_model_selected = True
|
|
684
|
+
self.pending_provider = provider
|
|
685
|
+
self.result = (
|
|
686
|
+
"pending_custom_model" # Signal to prompt for custom model name
|
|
687
|
+
)
|
|
688
|
+
return
|
|
689
|
+
|
|
690
|
+
model = self._get_current_model()
|
|
691
|
+
if model:
|
|
692
|
+
# Store model/provider for credential prompting after TUI exits
|
|
693
|
+
self.pending_model = model
|
|
694
|
+
self.pending_provider = provider
|
|
695
|
+
self.result = "pending_credentials" # Signal to prompt for credentials
|
|
696
|
+
|
|
697
|
+
def _get_missing_env_vars(self, provider: ProviderInfo) -> List[str]:
|
|
698
|
+
"""Check which required env vars are missing for a provider."""
|
|
699
|
+
missing = []
|
|
700
|
+
for env_var in provider.env:
|
|
701
|
+
if not os.environ.get(env_var):
|
|
702
|
+
missing.append(env_var)
|
|
703
|
+
return missing
|
|
704
|
+
|
|
705
|
+
def _prompt_for_credentials(self, provider: ProviderInfo) -> bool:
|
|
706
|
+
"""Prompt user for missing credentials and save them.
|
|
707
|
+
|
|
708
|
+
Returns:
|
|
709
|
+
True if all credentials were provided (or none needed), False if user cancelled
|
|
710
|
+
"""
|
|
711
|
+
missing_vars = self._get_missing_env_vars(provider)
|
|
712
|
+
|
|
713
|
+
if not missing_vars:
|
|
714
|
+
emit_info(
|
|
715
|
+
f"✅ All required credentials for {provider.name} are already set!"
|
|
716
|
+
)
|
|
717
|
+
return True
|
|
718
|
+
|
|
719
|
+
emit_info(f"\n🔑 {provider.name} requires the following credentials:\n")
|
|
720
|
+
|
|
721
|
+
for env_var in missing_vars:
|
|
722
|
+
# Show helpful hints based on common env var patterns
|
|
723
|
+
hint = self._get_env_var_hint(env_var)
|
|
724
|
+
if hint:
|
|
725
|
+
emit_info(f" {hint}")
|
|
726
|
+
|
|
727
|
+
try:
|
|
728
|
+
# Use safe_input for cross-platform compatibility (Windows fix)
|
|
729
|
+
value = safe_input(f" Enter {env_var} (or press Enter to skip): ")
|
|
730
|
+
|
|
731
|
+
if not value:
|
|
732
|
+
emit_warning(
|
|
733
|
+
f"Skipped {env_var} - you can set it later with /set {env_var}=<value>"
|
|
734
|
+
)
|
|
735
|
+
continue
|
|
736
|
+
|
|
737
|
+
# Save to config
|
|
738
|
+
set_config_value(env_var, value)
|
|
739
|
+
# Also set in current environment so it's immediately available
|
|
740
|
+
os.environ[env_var] = value
|
|
741
|
+
emit_info(f"✅ Saved {env_var} to config")
|
|
742
|
+
|
|
743
|
+
except (KeyboardInterrupt, EOFError):
|
|
744
|
+
emit_info("") # Clean newline
|
|
745
|
+
emit_warning("Credential input cancelled")
|
|
746
|
+
return False
|
|
747
|
+
|
|
748
|
+
return True
|
|
749
|
+
|
|
750
|
+
def _create_custom_model_info(
|
|
751
|
+
self, model_name: str, context_length: int = 128000
|
|
752
|
+
) -> ModelInfo:
|
|
753
|
+
"""Create a ModelInfo object for a custom model.
|
|
754
|
+
|
|
755
|
+
Since we don't know the model's capabilities, we assume reasonable defaults.
|
|
756
|
+
"""
|
|
757
|
+
provider_id = self.pending_provider.id if self.pending_provider else "custom"
|
|
758
|
+
return ModelInfo(
|
|
759
|
+
provider_id=provider_id,
|
|
760
|
+
model_id=model_name,
|
|
761
|
+
name=model_name,
|
|
762
|
+
tool_call=True, # Assume true for usability
|
|
763
|
+
temperature=True,
|
|
764
|
+
context_length=context_length,
|
|
765
|
+
max_output=min(
|
|
766
|
+
16384, context_length // 4
|
|
767
|
+
), # Reasonable default based on context
|
|
768
|
+
input_modalities=["text"],
|
|
769
|
+
output_modalities=["text"],
|
|
770
|
+
)
|
|
771
|
+
|
|
772
|
+
def _prompt_for_custom_model(self) -> Optional[tuple[str, int]]:
|
|
773
|
+
"""Prompt user for custom model details.
|
|
774
|
+
|
|
775
|
+
Returns:
|
|
776
|
+
Tuple of (model_name, context_length) if provided, None if cancelled
|
|
777
|
+
"""
|
|
778
|
+
provider = self.pending_provider
|
|
779
|
+
if not provider:
|
|
780
|
+
return None
|
|
781
|
+
|
|
782
|
+
emit_info(f"\n✨ Adding custom model for {provider.name}\n")
|
|
783
|
+
emit_info(" Enter the model ID exactly as the provider expects it.")
|
|
784
|
+
emit_info(
|
|
785
|
+
" Examples: gpt-4-turbo-preview, claude-3-opus-20240229, gemini-1.5-pro-latest\n"
|
|
786
|
+
)
|
|
787
|
+
|
|
788
|
+
try:
|
|
789
|
+
model_name = safe_input(" Model ID: ")
|
|
790
|
+
|
|
791
|
+
if not model_name:
|
|
792
|
+
emit_warning("No model name provided, cancelled.")
|
|
793
|
+
return None
|
|
794
|
+
|
|
795
|
+
# Ask for context size
|
|
796
|
+
emit_info("\n Enter the context window size (in tokens).")
|
|
797
|
+
emit_info(" Common sizes: 8192, 32768, 128000, 200000, 1000000\n")
|
|
798
|
+
|
|
799
|
+
context_input = safe_input(" Context size [128000]: ")
|
|
800
|
+
|
|
801
|
+
if not context_input:
|
|
802
|
+
context_length = 128000 # Default
|
|
803
|
+
else:
|
|
804
|
+
# Handle k/K suffix (e.g., "128k" -> 128000)
|
|
805
|
+
context_input_lower = context_input.lower().replace(",", "")
|
|
806
|
+
if context_input_lower.endswith("k"):
|
|
807
|
+
try:
|
|
808
|
+
context_length = int(float(context_input_lower[:-1]) * 1000)
|
|
809
|
+
except ValueError:
|
|
810
|
+
emit_warning("Invalid context size, using default 128000")
|
|
811
|
+
context_length = 128000
|
|
812
|
+
elif context_input_lower.endswith("m"):
|
|
813
|
+
try:
|
|
814
|
+
context_length = int(float(context_input_lower[:-1]) * 1000000)
|
|
815
|
+
except ValueError:
|
|
816
|
+
emit_warning("Invalid context size, using default 128000")
|
|
817
|
+
context_length = 128000
|
|
818
|
+
else:
|
|
819
|
+
try:
|
|
820
|
+
context_length = int(context_input)
|
|
821
|
+
except ValueError:
|
|
822
|
+
emit_warning("Invalid context size, using default 128000")
|
|
823
|
+
context_length = 128000
|
|
824
|
+
|
|
825
|
+
return (model_name, context_length)
|
|
826
|
+
|
|
827
|
+
except (KeyboardInterrupt, EOFError):
|
|
828
|
+
emit_info("") # Clean newline
|
|
829
|
+
emit_warning("Custom model input cancelled")
|
|
830
|
+
return None
|
|
831
|
+
|
|
832
|
+
def _get_env_var_hint(self, env_var: str) -> str:
|
|
833
|
+
"""Get a helpful hint for common environment variables."""
|
|
834
|
+
hints = {
|
|
835
|
+
"OPENAI_API_KEY": "💡 Get your API key from https://platform.openai.com/api-keys",
|
|
836
|
+
"ANTHROPIC_API_KEY": "💡 Get your API key from https://console.anthropic.com/",
|
|
837
|
+
"GEMINI_API_KEY": "💡 Get your API key from https://aistudio.google.com/apikey",
|
|
838
|
+
"GOOGLE_API_KEY": "💡 Get your API key from https://aistudio.google.com/apikey",
|
|
839
|
+
"AZURE_API_KEY": "💡 Get your API key from Azure Portal > Your OpenAI Resource > Keys",
|
|
840
|
+
"AZURE_RESOURCE_NAME": "💡 Your Azure OpenAI resource name (not the full URL)",
|
|
841
|
+
"GROQ_API_KEY": "💡 Get your API key from https://console.groq.com/keys",
|
|
842
|
+
"MISTRAL_API_KEY": "💡 Get your API key from https://console.mistral.ai/",
|
|
843
|
+
"COHERE_API_KEY": "💡 Get your API key from https://dashboard.cohere.com/api-keys",
|
|
844
|
+
"DEEPSEEK_API_KEY": "💡 Get your API key from https://platform.deepseek.com/",
|
|
845
|
+
"TOGETHER_API_KEY": "💡 Get your API key from https://api.together.xyz/settings/api-keys",
|
|
846
|
+
"FIREWORKS_API_KEY": "💡 Get your API key from https://fireworks.ai/api-keys",
|
|
847
|
+
"OPENROUTER_API_KEY": "💡 Get your API key from https://openrouter.ai/keys",
|
|
848
|
+
"PERPLEXITY_API_KEY": "💡 Get your API key from https://www.perplexity.ai/settings/api",
|
|
849
|
+
"CEREBRAS_API_KEY": "💡 Get your API key from https://cloud.cerebras.ai/",
|
|
850
|
+
"HUGGINGFACE_API_KEY": "💡 Get your API key from https://huggingface.co/settings/tokens",
|
|
851
|
+
"XAI_API_KEY": "💡 Get your API key from https://console.x.ai/",
|
|
852
|
+
}
|
|
853
|
+
return hints.get(env_var, "")
|
|
854
|
+
|
|
855
|
+
def run(self) -> bool:
|
|
856
|
+
"""Run the interactive model browser (synchronous).
|
|
857
|
+
|
|
858
|
+
Returns:
|
|
859
|
+
True if a model was added, False otherwise
|
|
860
|
+
"""
|
|
861
|
+
if not self.registry or not self.providers:
|
|
862
|
+
emit_warning("No models data available.")
|
|
863
|
+
return False
|
|
864
|
+
|
|
865
|
+
# Build UI
|
|
866
|
+
self.menu_control = FormattedTextControl(text="")
|
|
867
|
+
self.preview_control = FormattedTextControl(text="")
|
|
868
|
+
|
|
869
|
+
menu_window = Window(
|
|
870
|
+
content=self.menu_control, wrap_lines=True, width=Dimension(weight=30)
|
|
871
|
+
)
|
|
872
|
+
preview_window = Window(
|
|
873
|
+
content=self.preview_control, wrap_lines=True, width=Dimension(weight=70)
|
|
874
|
+
)
|
|
875
|
+
|
|
876
|
+
menu_frame = Frame(menu_window, width=Dimension(weight=30), title="Browse")
|
|
877
|
+
preview_frame = Frame(
|
|
878
|
+
preview_window, width=Dimension(weight=70), title="Details"
|
|
879
|
+
)
|
|
880
|
+
|
|
881
|
+
root_container = VSplit([menu_frame, preview_frame])
|
|
882
|
+
|
|
883
|
+
# Key bindings
|
|
884
|
+
kb = KeyBindings()
|
|
885
|
+
|
|
886
|
+
@kb.add("up")
|
|
887
|
+
def _(event):
|
|
888
|
+
if self.view_mode == "providers":
|
|
889
|
+
if self.selected_provider_idx > 0:
|
|
890
|
+
self.selected_provider_idx -= 1
|
|
891
|
+
self.current_page = self.selected_provider_idx // PAGE_SIZE
|
|
892
|
+
else: # models view
|
|
893
|
+
if self.selected_model_idx > 0:
|
|
894
|
+
self.selected_model_idx -= 1
|
|
895
|
+
self.current_page = self.selected_model_idx // PAGE_SIZE
|
|
896
|
+
self.update_display()
|
|
897
|
+
|
|
898
|
+
@kb.add("down")
|
|
899
|
+
def _(event):
|
|
900
|
+
if self.view_mode == "providers":
|
|
901
|
+
if self.selected_provider_idx < len(self.providers) - 1:
|
|
902
|
+
self.selected_provider_idx += 1
|
|
903
|
+
self.current_page = self.selected_provider_idx // PAGE_SIZE
|
|
904
|
+
else: # models view - include custom model option at the end
|
|
905
|
+
# Max index is len(current_models) which is the "Custom model" option
|
|
906
|
+
if self.selected_model_idx < len(self.current_models):
|
|
907
|
+
self.selected_model_idx += 1
|
|
908
|
+
self.current_page = self.selected_model_idx // PAGE_SIZE
|
|
909
|
+
self.update_display()
|
|
910
|
+
|
|
911
|
+
@kb.add("left")
|
|
912
|
+
def _(event):
|
|
913
|
+
"""Previous page."""
|
|
914
|
+
if self.current_page > 0:
|
|
915
|
+
self.current_page -= 1
|
|
916
|
+
# Update selected index to first item on new page
|
|
917
|
+
if self.view_mode == "providers":
|
|
918
|
+
self.selected_provider_idx = self.current_page * PAGE_SIZE
|
|
919
|
+
else:
|
|
920
|
+
self.selected_model_idx = self.current_page * PAGE_SIZE
|
|
921
|
+
self.update_display()
|
|
922
|
+
|
|
923
|
+
@kb.add("right")
|
|
924
|
+
def _(event):
|
|
925
|
+
"""Next page."""
|
|
926
|
+
if self.view_mode == "providers":
|
|
927
|
+
total_items = len(self.providers)
|
|
928
|
+
else:
|
|
929
|
+
total_items = len(self.current_models) + 1 # +1 for custom model option
|
|
930
|
+
|
|
931
|
+
total_pages = (total_items + PAGE_SIZE - 1) // PAGE_SIZE
|
|
932
|
+
if self.current_page < total_pages - 1:
|
|
933
|
+
self.current_page += 1
|
|
934
|
+
# Update selected index to first item on new page
|
|
935
|
+
if self.view_mode == "providers":
|
|
936
|
+
self.selected_provider_idx = self.current_page * PAGE_SIZE
|
|
937
|
+
else:
|
|
938
|
+
self.selected_model_idx = self.current_page * PAGE_SIZE
|
|
939
|
+
self.update_display()
|
|
940
|
+
|
|
941
|
+
@kb.add("enter")
|
|
942
|
+
def _(event):
|
|
943
|
+
if self.view_mode == "providers":
|
|
944
|
+
self._enter_provider()
|
|
945
|
+
elif self.view_mode == "models":
|
|
946
|
+
# Enter adds the model when viewing models
|
|
947
|
+
self._add_current_model()
|
|
948
|
+
event.app.exit()
|
|
949
|
+
|
|
950
|
+
@kb.add("escape")
|
|
951
|
+
def _(event):
|
|
952
|
+
if self.view_mode == "models":
|
|
953
|
+
self._go_back_to_providers()
|
|
954
|
+
|
|
955
|
+
@kb.add("backspace")
|
|
956
|
+
def _(event):
|
|
957
|
+
if self.view_mode == "models":
|
|
958
|
+
self._go_back_to_providers()
|
|
959
|
+
|
|
960
|
+
@kb.add("c-c")
|
|
961
|
+
def _(event):
|
|
962
|
+
event.app.exit()
|
|
963
|
+
|
|
964
|
+
layout = Layout(root_container)
|
|
965
|
+
app = Application(
|
|
966
|
+
layout=layout,
|
|
967
|
+
key_bindings=kb,
|
|
968
|
+
full_screen=False,
|
|
969
|
+
mouse_support=False,
|
|
970
|
+
)
|
|
971
|
+
|
|
972
|
+
set_awaiting_user_input(True)
|
|
973
|
+
|
|
974
|
+
# Enter alternate screen buffer once for entire session
|
|
975
|
+
sys.stdout.write("\033[?1049h") # Enter alternate buffer
|
|
976
|
+
sys.stdout.write("\033[2J\033[H") # Clear and home
|
|
977
|
+
sys.stdout.flush()
|
|
978
|
+
time.sleep(0.05)
|
|
979
|
+
|
|
980
|
+
try:
|
|
981
|
+
# Initial display
|
|
982
|
+
self.update_display()
|
|
983
|
+
|
|
984
|
+
# Just clear the current buffer (don't switch buffers)
|
|
985
|
+
sys.stdout.write("\033[2J\033[H") # Clear screen within current buffer
|
|
986
|
+
sys.stdout.flush()
|
|
987
|
+
|
|
988
|
+
# Run application in a background thread to avoid event loop conflicts
|
|
989
|
+
# This is needed because code_puppy runs in an async context
|
|
990
|
+
app.run(in_thread=True)
|
|
991
|
+
|
|
992
|
+
finally:
|
|
993
|
+
# Exit alternate screen buffer once at end
|
|
994
|
+
sys.stdout.write("\033[?1049l") # Exit alternate buffer
|
|
995
|
+
sys.stdout.flush()
|
|
996
|
+
# Reset awaiting input flag
|
|
997
|
+
set_awaiting_user_input(False)
|
|
998
|
+
|
|
999
|
+
# Clear exit message (unless we're about to prompt for more input)
|
|
1000
|
+
if self.result not in ("pending_credentials", "pending_custom_model"):
|
|
1001
|
+
emit_info("✓ Exited model browser")
|
|
1002
|
+
|
|
1003
|
+
# Handle unsupported provider
|
|
1004
|
+
if self.result == "unsupported" and self.current_provider:
|
|
1005
|
+
reason = UNSUPPORTED_PROVIDERS.get(
|
|
1006
|
+
self.current_provider.id, "Not supported"
|
|
1007
|
+
)
|
|
1008
|
+
emit_error(f"Cannot add model from {self.current_provider.name}: {reason}")
|
|
1009
|
+
return False
|
|
1010
|
+
|
|
1011
|
+
# Handle custom model flow after TUI exits
|
|
1012
|
+
if self.result == "pending_custom_model" and self.pending_provider:
|
|
1013
|
+
# Prompt for custom model details (name and context size)
|
|
1014
|
+
custom_model_result = self._prompt_for_custom_model()
|
|
1015
|
+
if not custom_model_result:
|
|
1016
|
+
return False
|
|
1017
|
+
|
|
1018
|
+
model_name, context_length = custom_model_result
|
|
1019
|
+
|
|
1020
|
+
# Create a ModelInfo for the custom model
|
|
1021
|
+
self.pending_model = self._create_custom_model_info(
|
|
1022
|
+
model_name, context_length
|
|
1023
|
+
)
|
|
1024
|
+
|
|
1025
|
+
# Prompt for any missing credentials
|
|
1026
|
+
if self._prompt_for_credentials(self.pending_provider):
|
|
1027
|
+
# Now add the model to config
|
|
1028
|
+
if self._add_model_to_extra_config(
|
|
1029
|
+
self.pending_model, self.pending_provider
|
|
1030
|
+
):
|
|
1031
|
+
self.result = "added"
|
|
1032
|
+
return True
|
|
1033
|
+
return False
|
|
1034
|
+
|
|
1035
|
+
# Handle pending credential flow after TUI exits
|
|
1036
|
+
if (
|
|
1037
|
+
self.result == "pending_credentials"
|
|
1038
|
+
and self.pending_model
|
|
1039
|
+
and self.pending_provider
|
|
1040
|
+
):
|
|
1041
|
+
# Warn about non-tool-calling models
|
|
1042
|
+
if not self.pending_model.tool_call:
|
|
1043
|
+
emit_warning(
|
|
1044
|
+
f"⚠️ {self.pending_model.name} does NOT support tool calling!\n"
|
|
1045
|
+
f" This model won't be able to edit files, run commands, or use any tools.\n"
|
|
1046
|
+
f" It will be very limited for coding tasks."
|
|
1047
|
+
)
|
|
1048
|
+
try:
|
|
1049
|
+
confirm = safe_input(
|
|
1050
|
+
"\n Are you sure you want to add this model? (y/N): "
|
|
1051
|
+
).lower()
|
|
1052
|
+
if confirm not in ("y", "yes"):
|
|
1053
|
+
emit_info("Model addition cancelled.")
|
|
1054
|
+
return False
|
|
1055
|
+
except (KeyboardInterrupt, EOFError):
|
|
1056
|
+
emit_info("")
|
|
1057
|
+
return False
|
|
1058
|
+
|
|
1059
|
+
# Prompt for any missing credentials
|
|
1060
|
+
if self._prompt_for_credentials(self.pending_provider):
|
|
1061
|
+
# Now add the model to config
|
|
1062
|
+
if self._add_model_to_extra_config(
|
|
1063
|
+
self.pending_model, self.pending_provider
|
|
1064
|
+
):
|
|
1065
|
+
self.result = "added"
|
|
1066
|
+
return True
|
|
1067
|
+
return False
|
|
1068
|
+
|
|
1069
|
+
return self.result == "added"
|
|
1070
|
+
|
|
1071
|
+
|
|
1072
|
+
def interactive_model_picker() -> bool:
|
|
1073
|
+
"""Show interactive terminal UI to browse and add models.
|
|
1074
|
+
|
|
1075
|
+
Returns:
|
|
1076
|
+
True if a model was added, False otherwise
|
|
1077
|
+
"""
|
|
1078
|
+
menu = AddModelMenu()
|
|
1079
|
+
return menu.run()
|