chat-console 0.4.7__py3-none-any.whl → 0.4.91__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- app/__init__.py +1 -1
- app/console_chat.py +1170 -97
- {chat_console-0.4.7.dist-info → chat_console-0.4.91.dist-info}/METADATA +1 -1
- {chat_console-0.4.7.dist-info → chat_console-0.4.91.dist-info}/RECORD +8 -8
- {chat_console-0.4.7.dist-info → chat_console-0.4.91.dist-info}/WHEEL +0 -0
- {chat_console-0.4.7.dist-info → chat_console-0.4.91.dist-info}/entry_points.txt +0 -0
- {chat_console-0.4.7.dist-info → chat_console-0.4.91.dist-info}/licenses/LICENSE +0 -0
- {chat_console-0.4.7.dist-info → chat_console-0.4.91.dist-info}/top_level.txt +0 -0
app/console_chat.py
CHANGED
@@ -11,6 +11,8 @@ import argparse
|
|
11
11
|
import signal
|
12
12
|
import threading
|
13
13
|
import time
|
14
|
+
import random
|
15
|
+
import json
|
14
16
|
from datetime import datetime
|
15
17
|
from typing import List, Optional, Dict, Any
|
16
18
|
import shutil
|
@@ -23,7 +25,7 @@ from .console_utils import console_streaming_response, apply_style_prefix
|
|
23
25
|
from .api.base import BaseModelClient
|
24
26
|
|
25
27
|
class ConsoleUI:
|
26
|
-
"""Pure console UI following Rams design principles"""
|
28
|
+
"""Pure console UI following Rams design principles with Gemini-inspired enhancements"""
|
27
29
|
|
28
30
|
def __init__(self):
|
29
31
|
self.width = min(shutil.get_terminal_size().columns, 120)
|
@@ -36,10 +38,49 @@ class ConsoleUI:
|
|
36
38
|
self.running = True
|
37
39
|
self.generating = False
|
38
40
|
self.input_mode = "text" # "text" or "menu"
|
41
|
+
self.multi_line_input = []
|
42
|
+
self.input_history = []
|
43
|
+
self.history_index = 0
|
44
|
+
self.theme = self._load_theme()
|
45
|
+
self.loading_phrases = [
|
46
|
+
"Thinking deeply", "Crafting response", "Processing context",
|
47
|
+
"Analyzing request", "Generating ideas", "Considering options",
|
48
|
+
"Formulating answer", "Connecting concepts", "Refining thoughts"
|
49
|
+
]
|
50
|
+
self.loading_phase_index = 0
|
51
|
+
self.start_time = time.time()
|
39
52
|
|
40
53
|
# Suppress verbose logging for console mode
|
41
54
|
self._setup_console_logging()
|
42
55
|
|
56
|
+
def _load_theme(self) -> Dict[str, str]:
|
57
|
+
"""Load color theme configuration"""
|
58
|
+
try:
|
59
|
+
# Try to import colorama for colors
|
60
|
+
from colorama import Fore, Back, Style, init
|
61
|
+
init(autoreset=True)
|
62
|
+
|
63
|
+
# Default theme inspired by gemini-code-assist
|
64
|
+
return {
|
65
|
+
'primary': Fore.CYAN,
|
66
|
+
'secondary': Fore.BLUE,
|
67
|
+
'accent': Fore.MAGENTA,
|
68
|
+
'success': Fore.GREEN,
|
69
|
+
'warning': Fore.YELLOW,
|
70
|
+
'error': Fore.RED,
|
71
|
+
'muted': Fore.LIGHTBLACK_EX,
|
72
|
+
'text': Fore.WHITE,
|
73
|
+
'reset': Style.RESET_ALL,
|
74
|
+
'bold': Style.BRIGHT,
|
75
|
+
'dim': Style.DIM
|
76
|
+
}
|
77
|
+
except ImportError:
|
78
|
+
# Fallback to no colors if colorama not available
|
79
|
+
return {key: '' for key in [
|
80
|
+
'primary', 'secondary', 'accent', 'success', 'warning',
|
81
|
+
'error', 'muted', 'text', 'reset', 'bold', 'dim'
|
82
|
+
]}
|
83
|
+
|
43
84
|
def _setup_console_logging(self):
|
44
85
|
"""Setup logging to minimize disruption to console UI"""
|
45
86
|
import logging
|
@@ -132,105 +173,250 @@ class ConsoleUI:
|
|
132
173
|
return chars['horizontal'] * width
|
133
174
|
|
134
175
|
def draw_header(self) -> List[str]:
|
135
|
-
"""Draw the application header"""
|
176
|
+
"""Draw the application header with colors"""
|
136
177
|
from . import __version__
|
137
178
|
chars = self.get_border_chars()
|
138
179
|
|
139
180
|
lines = []
|
140
181
|
|
141
182
|
# Top border with title and model info
|
142
|
-
title = f" Chat Console v{__version__} "
|
143
|
-
model_info = f" Model: {self.selected_model} "
|
183
|
+
title = f" {self.theme['primary']}Chat Console{self.theme['reset']} v{__version__} "
|
184
|
+
model_info = f" Model: {self.theme['accent']}{self.selected_model}{self.theme['reset']} "
|
144
185
|
|
145
|
-
# Calculate spacing
|
146
|
-
|
186
|
+
# Calculate spacing (without color codes for length calculation)
|
187
|
+
title_plain = f" Chat Console v{__version__} "
|
188
|
+
model_plain = f" Model: {self.selected_model} "
|
189
|
+
used_space = len(title_plain) + len(model_plain)
|
147
190
|
remaining = self.width - used_space - 2
|
148
191
|
spacing = chars['horizontal'] * max(0, remaining)
|
149
192
|
|
150
|
-
header_line = chars['top_left']
|
193
|
+
header_line = f"{self.theme['muted']}{chars['top_left']}{title}{spacing}{model_info}{chars['top_right']}{self.theme['reset']}"
|
151
194
|
lines.append(header_line)
|
152
195
|
|
153
196
|
# Conversation title
|
154
197
|
conv_title = self.current_conversation.title if self.current_conversation else "New Conversation"
|
155
|
-
|
198
|
+
title_content = f" {self.theme['secondary']}{conv_title}{self.theme['reset']} "
|
199
|
+
padding_needed = self.width - 2 - len(conv_title) - 1
|
200
|
+
title_line = f"{self.theme['muted']}{chars['vertical']}{title_content}{' ' * padding_needed}{chars['vertical']}{self.theme['reset']}"
|
156
201
|
lines.append(title_line)
|
157
202
|
|
158
203
|
# Separator
|
159
|
-
|
204
|
+
separator = f"{self.theme['muted']}{self.draw_border_line(self.width, 'middle')}{self.theme['reset']}"
|
205
|
+
lines.append(separator)
|
160
206
|
|
161
207
|
return lines
|
162
208
|
|
163
209
|
def draw_footer(self) -> List[str]:
|
164
|
-
"""Draw the footer with controls"""
|
210
|
+
"""Draw the footer with colorized controls"""
|
165
211
|
chars = self.get_border_chars()
|
166
212
|
|
167
|
-
|
168
|
-
|
213
|
+
# Colorize control keys
|
214
|
+
controls = (f"{self.theme['accent']}[Tab]{self.theme['reset']} Menu Mode "
|
215
|
+
f"{self.theme['accent']}[q]{self.theme['reset']} Quit "
|
216
|
+
f"{self.theme['accent']}[n]{self.theme['reset']} New "
|
217
|
+
f"{self.theme['accent']}[h]{self.theme['reset']} History "
|
218
|
+
f"{self.theme['accent']}[s]{self.theme['reset']} Settings "
|
219
|
+
f"{self.theme['accent']}[m]{self.theme['reset']} Models")
|
220
|
+
|
221
|
+
# Calculate plain text length for padding
|
222
|
+
controls_plain = "[Tab] Menu Mode [q] Quit [n] New [h] History [s] Settings [m] Models"
|
223
|
+
padding_needed = self.width - 2 - len(controls_plain) - 1
|
224
|
+
|
225
|
+
footer_line = f"{self.theme['muted']}{chars['vertical']} {controls}{' ' * padding_needed}{chars['vertical']}{self.theme['reset']}"
|
169
226
|
|
170
227
|
return [
|
171
|
-
self.draw_border_line(self.width, 'middle'),
|
228
|
+
f"{self.theme['muted']}{self.draw_border_line(self.width, 'middle')}{self.theme['reset']}",
|
172
229
|
footer_line,
|
173
|
-
self.draw_border_line(self.width, 'bottom')
|
230
|
+
f"{self.theme['muted']}{self.draw_border_line(self.width, 'bottom')}{self.theme['reset']}"
|
174
231
|
]
|
175
232
|
|
176
233
|
def format_message(self, message: Message) -> List[str]:
|
177
|
-
"""
|
234
|
+
"""Enhanced message formatting with colors, code highlighting and better wrapping"""
|
178
235
|
timestamp = datetime.now().strftime("%H:%M")
|
179
236
|
chars = self.get_border_chars()
|
180
237
|
|
181
238
|
# Calculate available width for content
|
182
239
|
content_width = self.width - 10 # Account for borders and timestamp
|
183
240
|
|
184
|
-
#
|
185
|
-
|
186
|
-
lines = []
|
187
|
-
current_line = ""
|
188
|
-
|
189
|
-
for word in words:
|
190
|
-
if len(current_line) + len(word) + 1 <= content_width:
|
191
|
-
if current_line:
|
192
|
-
current_line += " "
|
193
|
-
current_line += word
|
194
|
-
else:
|
195
|
-
if current_line:
|
196
|
-
lines.append(current_line)
|
197
|
-
current_line = word
|
241
|
+
# Apply code highlighting if enabled
|
242
|
+
highlighted_content = self._detect_and_highlight_code(message.content)
|
198
243
|
|
199
|
-
|
200
|
-
|
244
|
+
# Use improved word wrapping
|
245
|
+
lines = self._improved_word_wrap(highlighted_content, content_width)
|
201
246
|
|
202
|
-
# Format lines with proper spacing
|
247
|
+
# Format lines with proper spacing and colors
|
203
248
|
formatted_lines = []
|
204
249
|
for i, line in enumerate(lines):
|
205
250
|
if i == 0:
|
206
|
-
# First line with timestamp
|
207
|
-
|
208
|
-
|
251
|
+
# First line with colorized timestamp and role indicator
|
252
|
+
if message.role == "user":
|
253
|
+
role_indicator = f"{self.theme['primary']}👤{self.theme['reset']}"
|
254
|
+
role_color = self.theme['primary']
|
255
|
+
else:
|
256
|
+
role_indicator = f"{self.theme['accent']}🤖{self.theme['reset']}"
|
257
|
+
role_color = self.theme['accent']
|
258
|
+
|
259
|
+
prefix = f" {role_indicator} {self.theme['muted']}{timestamp}{self.theme['reset']} "
|
260
|
+
|
261
|
+
# Calculate plain text length for proper alignment
|
262
|
+
prefix_plain = f" 👤 {timestamp} "
|
263
|
+
content_padding = content_width - len(prefix_plain) - len(line.replace(self.theme.get('accent', ''), '').replace(self.theme.get('reset', ''), ''))
|
264
|
+
|
265
|
+
formatted_line = f"{self.theme['muted']}{chars['vertical']}{prefix}{line}{' ' * max(0, content_padding)}{chars['vertical']}{self.theme['reset']}"
|
209
266
|
else:
|
210
|
-
# Continuation lines
|
267
|
+
# Continuation lines with proper indentation
|
211
268
|
prefix = " " # Align with content
|
212
|
-
|
269
|
+
content_padding = content_width - len(prefix) - len(line.replace(self.theme.get('accent', ''), '').replace(self.theme.get('reset', ''), ''))
|
270
|
+
formatted_line = f"{self.theme['muted']}{chars['vertical']}{prefix}{line}{' ' * max(0, content_padding)}{chars['vertical']}{self.theme['reset']}"
|
213
271
|
formatted_lines.append(formatted_line)
|
214
272
|
|
215
273
|
# Add empty line for spacing
|
216
|
-
empty_line = chars['vertical']
|
274
|
+
empty_line = f"{self.theme['muted']}{chars['vertical']}{' ' * (self.width - 2)}{chars['vertical']}{self.theme['reset']}"
|
217
275
|
formatted_lines.append(empty_line)
|
218
276
|
|
219
277
|
return formatted_lines
|
220
278
|
|
279
|
+
def _detect_and_highlight_code(self, content: str) -> str:
|
280
|
+
"""Detect and highlight code blocks in content"""
|
281
|
+
if not CONFIG.get("highlight_code", True):
|
282
|
+
return content
|
283
|
+
|
284
|
+
try:
|
285
|
+
# Try to import colorama for terminal colors
|
286
|
+
from colorama import Fore, Style, init
|
287
|
+
init() # Initialize colorama
|
288
|
+
|
289
|
+
lines = content.split('\n')
|
290
|
+
result_lines = []
|
291
|
+
in_code_block = False
|
292
|
+
|
293
|
+
for line in lines:
|
294
|
+
# Detect code block markers
|
295
|
+
if line.strip().startswith('```'):
|
296
|
+
in_code_block = not in_code_block
|
297
|
+
if in_code_block:
|
298
|
+
result_lines.append(f"{Fore.CYAN}{line}{Style.RESET_ALL}")
|
299
|
+
else:
|
300
|
+
result_lines.append(f"{Fore.CYAN}{line}{Style.RESET_ALL}")
|
301
|
+
elif in_code_block:
|
302
|
+
# Highlight code content
|
303
|
+
result_lines.append(f"{Fore.GREEN}{line}{Style.RESET_ALL}")
|
304
|
+
elif '`' in line and line.count('`') >= 2:
|
305
|
+
# Inline code highlighting
|
306
|
+
import re
|
307
|
+
highlighted = re.sub(
|
308
|
+
r'`([^`]+)`',
|
309
|
+
f'{Fore.GREEN}`\\1`{Style.RESET_ALL}',
|
310
|
+
line
|
311
|
+
)
|
312
|
+
result_lines.append(highlighted)
|
313
|
+
else:
|
314
|
+
result_lines.append(line)
|
315
|
+
|
316
|
+
return '\n'.join(result_lines)
|
317
|
+
|
318
|
+
except ImportError:
|
319
|
+
# Colorama not available, return content as-is
|
320
|
+
return content
|
321
|
+
except Exception:
|
322
|
+
# Any other error, return content as-is
|
323
|
+
return content
|
324
|
+
|
325
|
+
def _improved_word_wrap(self, text: str, width: int) -> List[str]:
|
326
|
+
"""Improved word wrapping that preserves code blocks and handles long lines"""
|
327
|
+
lines = text.split('\n')
|
328
|
+
wrapped_lines = []
|
329
|
+
|
330
|
+
for line in lines:
|
331
|
+
# Handle very long lines (like URLs or code)
|
332
|
+
if len(line) > width:
|
333
|
+
# If it looks like code or a URL, don't break it aggressively
|
334
|
+
if (line.strip().startswith(('http', 'https', 'www', ' ', '\t')) or
|
335
|
+
'```' in line or line.count('`') >= 2):
|
336
|
+
# Add as-is but truncate if necessary
|
337
|
+
if len(line) > width:
|
338
|
+
wrapped_lines.append(line[:width-3] + "...")
|
339
|
+
else:
|
340
|
+
wrapped_lines.append(line)
|
341
|
+
else:
|
342
|
+
# Normal word wrapping
|
343
|
+
words = line.split()
|
344
|
+
current_line = ""
|
345
|
+
|
346
|
+
for word in words:
|
347
|
+
if len(current_line) + len(word) + 1 <= width:
|
348
|
+
if current_line:
|
349
|
+
current_line += " "
|
350
|
+
current_line += word
|
351
|
+
else:
|
352
|
+
if current_line:
|
353
|
+
wrapped_lines.append(current_line)
|
354
|
+
current_line = word
|
355
|
+
|
356
|
+
if current_line:
|
357
|
+
wrapped_lines.append(current_line)
|
358
|
+
else:
|
359
|
+
# Line fits, add as-is
|
360
|
+
wrapped_lines.append(line)
|
361
|
+
|
362
|
+
return wrapped_lines or [""]
|
363
|
+
|
364
|
+
def draw_ascii_welcome(self) -> List[str]:
|
365
|
+
"""Draw ASCII art welcome screen"""
|
366
|
+
chars = self.get_border_chars()
|
367
|
+
lines = []
|
368
|
+
|
369
|
+
# ASCII art that scales with terminal width
|
370
|
+
if self.width >= 80:
|
371
|
+
ascii_art = [
|
372
|
+
" ┌─┐┬ ┬┌─┐┌┬┐ ┌─┐┌─┐┌┐┌┌─┐┌─┐┬ ┌─┐",
|
373
|
+
" │ ├─┤├─┤ │ │ │ ││││└─┐│ ││ ├┤ ",
|
374
|
+
" └─┘┴ ┴┴ ┴ ┴ └─┘└─┘┘└┘└─┘└─┘┴─┘└─┘"
|
375
|
+
]
|
376
|
+
elif self.width >= 60:
|
377
|
+
ascii_art = [
|
378
|
+
" ┌─┐┬ ┬┌─┐┌┬┐",
|
379
|
+
" │ ├─┤├─┤ │ ",
|
380
|
+
" └─┘┴ ┴┴ ┴ ┴ "
|
381
|
+
]
|
382
|
+
else:
|
383
|
+
ascii_art = ["Chat Console"]
|
384
|
+
|
385
|
+
# Center and colorize ASCII art
|
386
|
+
for art_line in ascii_art:
|
387
|
+
centered = art_line.center(self.width - 2)
|
388
|
+
colored_line = f"{self.theme['muted']}{chars['vertical']} {self.theme['primary']}{centered}{self.theme['muted']} {chars['vertical']}{self.theme['reset']}"
|
389
|
+
lines.append(colored_line)
|
390
|
+
|
391
|
+
# Add spacing
|
392
|
+
empty_line = f"{self.theme['muted']}{chars['vertical']}{' ' * (self.width - 2)}{chars['vertical']}{self.theme['reset']}"
|
393
|
+
lines.append(empty_line)
|
394
|
+
|
395
|
+
# Add tips
|
396
|
+
tips = [
|
397
|
+
f"{self.theme['secondary']}💡 Pro Tips:{self.theme['reset']}",
|
398
|
+
f"{self.theme['accent']}• Use Shift+Enter for multi-line input{self.theme['reset']}",
|
399
|
+
f"{self.theme['accent']}• Press Tab to switch between text and menu modes{self.theme['reset']}",
|
400
|
+
f"{self.theme['accent']}• Try 'm' for model browser{self.theme['reset']}"
|
401
|
+
]
|
402
|
+
|
403
|
+
for tip in tips:
|
404
|
+
# Calculate plain text length for padding
|
405
|
+
tip_plain = tip.replace(self.theme.get('secondary', ''), '').replace(self.theme.get('accent', ''), '').replace(self.theme.get('reset', ''), '')
|
406
|
+
padding = (self.width - 2 - len(tip_plain)) // 2
|
407
|
+
tip_line = f"{self.theme['muted']}{chars['vertical']}{' ' * padding}{tip}{' ' * (self.width - 2 - len(tip_plain) - padding)}{chars['vertical']}{self.theme['reset']}"
|
408
|
+
lines.append(tip_line)
|
409
|
+
|
410
|
+
return lines
|
411
|
+
|
221
412
|
def draw_messages(self) -> List[str]:
|
222
|
-
"""Draw all messages in the conversation"""
|
413
|
+
"""Draw all messages in the conversation with enhanced empty state"""
|
223
414
|
lines = []
|
224
415
|
chars = self.get_border_chars()
|
225
416
|
|
226
417
|
if not self.messages:
|
227
|
-
#
|
228
|
-
|
229
|
-
lines.extend([empty_line] * 3)
|
230
|
-
center_text = "Start a conversation by typing a message below"
|
231
|
-
centered_line = chars['vertical'] + center_text.center(self.width - 2) + chars['vertical']
|
232
|
-
lines.append(centered_line)
|
233
|
-
lines.extend([empty_line] * 3)
|
418
|
+
# Enhanced empty state with ASCII welcome
|
419
|
+
lines.extend(self.draw_ascii_welcome())
|
234
420
|
else:
|
235
421
|
# Display messages
|
236
422
|
for message in self.messages[-10:]: # Show last 10 messages
|
@@ -239,33 +425,64 @@ class ConsoleUI:
|
|
239
425
|
return lines
|
240
426
|
|
241
427
|
def draw_input_area(self, current_input: str = "", prompt: str = "Type your message") -> List[str]:
|
242
|
-
"""Draw the input area with
|
428
|
+
"""Draw the input area with enhanced multi-line support and indicators"""
|
243
429
|
chars = self.get_border_chars()
|
244
430
|
lines = []
|
245
431
|
|
246
|
-
# Input prompt with mode indicator
|
432
|
+
# Input prompt with mode indicator and multi-line status
|
247
433
|
mode_indicator = "📝" if self.input_mode == "text" else "⚡"
|
248
434
|
mode_text = "TEXT" if self.input_mode == "text" else "MENU"
|
249
|
-
|
250
|
-
|
435
|
+
|
436
|
+
# Multi-line indicator
|
437
|
+
if self.multi_line_input:
|
438
|
+
ml_indicator = f"{self.theme['accent']}[MULTI-LINE: {len(self.multi_line_input)} lines]{self.theme['reset']}"
|
439
|
+
prompt_with_mode = f"{mode_indicator} {ml_indicator} (Ctrl+D to send, Esc to cancel)"
|
440
|
+
else:
|
441
|
+
prompt_with_mode = f"{mode_indicator} {prompt} ({mode_text} mode - Tab to switch, Ctrl+J for multi-line)"
|
442
|
+
|
443
|
+
prompt_line = chars['vertical'] + f" {prompt_with_mode}".ljust(self.width - 2) + chars['vertical']
|
251
444
|
lines.append(prompt_line)
|
252
445
|
|
253
|
-
# Input field
|
446
|
+
# Input field(s)
|
254
447
|
if self.input_mode == "text":
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
448
|
+
if self.multi_line_input:
|
449
|
+
# Show multi-line input with line numbers
|
450
|
+
for i, line_content in enumerate(self.multi_line_input[-3:]): # Show last 3 lines
|
451
|
+
line_num = len(self.multi_line_input) - 3 + i + 1 if len(self.multi_line_input) > 3 else i + 1
|
452
|
+
if len(line_content) > self.width - 12:
|
453
|
+
display_content = line_content[:self.width - 15] + "..."
|
454
|
+
else:
|
455
|
+
display_content = line_content
|
456
|
+
|
457
|
+
if i == len(self.multi_line_input[-3:]) - 1: # Current line
|
458
|
+
input_line = chars['vertical'] + f" {self.theme['primary']}{line_num:2d}>{self.theme['reset']} {display_content}".ljust(self.width - 2) + chars['vertical']
|
459
|
+
else:
|
460
|
+
input_line = chars['vertical'] + f" {self.theme['muted']}{line_num:2d}:{self.theme['reset']} {display_content}".ljust(self.width - 2) + chars['vertical']
|
461
|
+
lines.append(input_line)
|
462
|
+
|
463
|
+
# Show line count if more than 3 lines
|
464
|
+
if len(self.multi_line_input) > 3:
|
465
|
+
more_line = chars['vertical'] + f" {self.theme['muted']}... ({len(self.multi_line_input)} total lines){self.theme['reset']}".ljust(self.width - 2) + chars['vertical']
|
466
|
+
lines.append(more_line)
|
467
|
+
else:
|
468
|
+
# Single line input
|
469
|
+
input_content = current_input
|
470
|
+
if len(input_content) > self.width - 6:
|
471
|
+
input_content = input_content[-(self.width - 9):] + "..."
|
472
|
+
input_line = chars['vertical'] + f" {self.theme['primary']}>{self.theme['reset']} {input_content}".ljust(self.width - 2) + chars['vertical']
|
473
|
+
lines.append(input_line)
|
259
474
|
else:
|
260
475
|
# Menu mode - show available hotkeys
|
261
|
-
menu_help = "n)ew h)istory s)ettings q)uit"
|
476
|
+
menu_help = f"{self.theme['secondary']}n{self.theme['reset']})ew {self.theme['secondary']}h{self.theme['reset']})istory {self.theme['secondary']}s{self.theme['reset']})ettings {self.theme['secondary']}m{self.theme['reset']})odels {self.theme['secondary']}q{self.theme['reset']})uit"
|
262
477
|
input_line = chars['vertical'] + f" {menu_help}".ljust(self.width - 2) + chars['vertical']
|
263
|
-
|
264
|
-
lines.append(input_line)
|
478
|
+
lines.append(input_line)
|
265
479
|
|
266
480
|
# Show generating indicator if needed
|
267
481
|
if self.generating:
|
268
|
-
|
482
|
+
elapsed = int(time.time() - self.start_time) if hasattr(self, 'start_time') else 0
|
483
|
+
user_message = getattr(self, '_current_user_message', "")
|
484
|
+
phrase = self._get_dynamic_loading_phrase(user_message)
|
485
|
+
status_line = chars['vertical'] + f" {self.theme['accent']}● {phrase}... ({elapsed}s){self.theme['reset']}".ljust(self.width - 2) + chars['vertical']
|
269
486
|
lines.append(status_line)
|
270
487
|
|
271
488
|
return lines
|
@@ -317,43 +534,87 @@ class ConsoleUI:
|
|
317
534
|
sys.stdout.flush()
|
318
535
|
|
319
536
|
def get_input(self, prompt: str = "Type your message") -> str:
|
320
|
-
"""Enhanced input with
|
321
|
-
|
537
|
+
"""Enhanced input with multi-line support, history navigation, and hotkey support"""
|
538
|
+
# Check if we're in multi-line mode
|
539
|
+
if self.multi_line_input:
|
540
|
+
current_input = "\n".join(self.multi_line_input)
|
541
|
+
else:
|
542
|
+
current_input = ""
|
322
543
|
|
323
544
|
while True:
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
if os.name == 'nt':
|
328
|
-
import msvcrt
|
329
|
-
char = msvcrt.getch().decode('utf-8', errors='ignore')
|
545
|
+
# Update prompt based on multi-line state
|
546
|
+
if self.multi_line_input:
|
547
|
+
display_prompt = f"Multi-line input (Ctrl+D to send, Esc to cancel)"
|
330
548
|
else:
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
549
|
+
display_prompt = prompt
|
550
|
+
|
551
|
+
# Only redraw screen if not currently generating to avoid interference
|
552
|
+
if not self.generating:
|
553
|
+
self.draw_screen(current_input, display_prompt)
|
554
|
+
|
555
|
+
# Get character input with escape sequence handling
|
556
|
+
char = self._get_char_with_escape_sequences()
|
339
557
|
|
340
|
-
# Handle
|
558
|
+
# Handle escape sequences for arrow keys
|
559
|
+
if char.startswith('\x1b['):
|
560
|
+
if char == '\x1b[A': # Up arrow - history navigation
|
561
|
+
if self.input_history and self.history_index > 0:
|
562
|
+
self.history_index -= 1
|
563
|
+
current_input = self.input_history[self.history_index]
|
564
|
+
self.multi_line_input = current_input.split('\n') if '\n' in current_input else []
|
565
|
+
elif char == '\x1b[B': # Down arrow - history navigation
|
566
|
+
if self.history_index < len(self.input_history) - 1:
|
567
|
+
self.history_index += 1
|
568
|
+
current_input = self.input_history[self.history_index]
|
569
|
+
self.multi_line_input = current_input.split('\n') if '\n' in current_input else []
|
570
|
+
elif self.history_index == len(self.input_history) - 1:
|
571
|
+
self.history_index = len(self.input_history)
|
572
|
+
current_input = ""
|
573
|
+
self.multi_line_input = []
|
574
|
+
continue
|
575
|
+
|
576
|
+
# Handle special keys
|
341
577
|
if char == '\t':
|
342
578
|
# Tab - switch between text and menu mode
|
343
579
|
self.input_mode = "menu" if self.input_mode == "text" else "text"
|
344
580
|
continue
|
345
581
|
elif char == '\r' or char == '\n':
|
346
|
-
# Enter
|
582
|
+
# Enter - either new line (Shift+Enter) or submit
|
347
583
|
if self.input_mode == "text":
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
584
|
+
if self.multi_line_input:
|
585
|
+
# In multi-line mode, add new line
|
586
|
+
self.multi_line_input.append("")
|
587
|
+
current_input = "\n".join(self.multi_line_input)
|
588
|
+
else:
|
589
|
+
# Check for Shift+Enter to start multi-line
|
590
|
+
# For simplicity, just Enter submits, Shift+Enter would need platform-specific detection
|
591
|
+
if current_input.strip():
|
592
|
+
# Add to history
|
593
|
+
if current_input not in self.input_history:
|
594
|
+
self.input_history.append(current_input)
|
595
|
+
self.history_index = len(self.input_history)
|
596
|
+
return current_input.strip()
|
597
|
+
else:
|
598
|
+
self.input_mode = "menu"
|
353
599
|
continue
|
354
600
|
else:
|
355
601
|
# In menu mode, Enter does nothing
|
356
602
|
continue
|
603
|
+
elif char == '\x04': # Ctrl+D - send multi-line input
|
604
|
+
if self.multi_line_input and any(line.strip() for line in self.multi_line_input):
|
605
|
+
final_input = "\n".join(self.multi_line_input).strip()
|
606
|
+
if final_input not in self.input_history:
|
607
|
+
self.input_history.append(final_input)
|
608
|
+
self.history_index = len(self.input_history)
|
609
|
+
self.multi_line_input = []
|
610
|
+
return final_input
|
611
|
+
elif char == '\x1b': # Escape - cancel multi-line or switch to text mode
|
612
|
+
if self.multi_line_input:
|
613
|
+
self.multi_line_input = []
|
614
|
+
current_input = ""
|
615
|
+
else:
|
616
|
+
self.input_mode = "text"
|
617
|
+
continue
|
357
618
|
elif char == '\x03':
|
358
619
|
# Ctrl+C
|
359
620
|
if self.generating:
|
@@ -367,10 +628,30 @@ class ConsoleUI:
|
|
367
628
|
# Text input mode
|
368
629
|
if char == '\x7f' or char == '\x08':
|
369
630
|
# Backspace
|
370
|
-
|
631
|
+
if self.multi_line_input:
|
632
|
+
if self.multi_line_input[-1]:
|
633
|
+
self.multi_line_input[-1] = self.multi_line_input[-1][:-1]
|
634
|
+
elif len(self.multi_line_input) > 1:
|
635
|
+
self.multi_line_input.pop()
|
636
|
+
current_input = "\n".join(self.multi_line_input)
|
637
|
+
else:
|
638
|
+
current_input = current_input[:-1]
|
639
|
+
elif char == '\x0a': # Ctrl+J - start/continue multi-line
|
640
|
+
if not self.multi_line_input:
|
641
|
+
# Start multi-line mode
|
642
|
+
self.multi_line_input = [current_input, ""]
|
643
|
+
current_input = "\n".join(self.multi_line_input)
|
644
|
+
else:
|
645
|
+
# Add new line in multi-line mode
|
646
|
+
self.multi_line_input.append("")
|
647
|
+
current_input = "\n".join(self.multi_line_input)
|
371
648
|
elif ord(char) >= 32:
|
372
649
|
# Printable character
|
373
|
-
|
650
|
+
if self.multi_line_input:
|
651
|
+
self.multi_line_input[-1] += char
|
652
|
+
current_input = "\n".join(self.multi_line_input)
|
653
|
+
else:
|
654
|
+
current_input += char
|
374
655
|
else:
|
375
656
|
# Menu mode - handle hotkeys
|
376
657
|
if char.lower() == 'q':
|
@@ -381,10 +662,157 @@ class ConsoleUI:
|
|
381
662
|
return "##HISTORY##"
|
382
663
|
elif char.lower() == 's':
|
383
664
|
return "##SETTINGS##"
|
665
|
+
elif char.lower() == 'm':
|
666
|
+
return "##MODELS##"
|
384
667
|
elif char == '\x1b': # Escape - back to text mode
|
385
668
|
self.input_mode = "text"
|
386
669
|
continue
|
387
670
|
|
671
|
+
def _get_char_with_escape_sequences(self) -> str:
|
672
|
+
"""Get character input with support for escape sequences (arrow keys)"""
|
673
|
+
if os.name == 'nt':
|
674
|
+
import msvcrt
|
675
|
+
char = msvcrt.getch()
|
676
|
+
if char == b'\xe0': # Special key prefix on Windows
|
677
|
+
char = msvcrt.getch()
|
678
|
+
if char == b'H': # Up arrow
|
679
|
+
return '\x1b[A'
|
680
|
+
elif char == b'P': # Down arrow
|
681
|
+
return '\x1b[B'
|
682
|
+
return char.decode('utf-8', errors='ignore')
|
683
|
+
else:
|
684
|
+
import termios, tty
|
685
|
+
fd = sys.stdin.fileno()
|
686
|
+
old_settings = termios.tcgetattr(fd)
|
687
|
+
try:
|
688
|
+
tty.setraw(sys.stdin.fileno())
|
689
|
+
char = sys.stdin.read(1)
|
690
|
+
if char == '\x1b': # Escape sequence
|
691
|
+
char += sys.stdin.read(2) # Read [A, [B, etc.
|
692
|
+
return char
|
693
|
+
finally:
|
694
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
695
|
+
|
696
|
+
def _get_context_aware_loading_phrases(self, user_message: str) -> List[str]:
|
697
|
+
"""Generate context-aware loading phrases based on user input"""
|
698
|
+
message_lower = user_message.lower()
|
699
|
+
|
700
|
+
# Code-related keywords
|
701
|
+
if any(keyword in message_lower for keyword in [
|
702
|
+
'code', 'function', 'debug', 'error', 'bug', 'script', 'program',
|
703
|
+
'algorithm', 'python', 'javascript', 'java', 'c++', 'html', 'css',
|
704
|
+
'sql', 'git', 'api', 'database', 'framework', 'library'
|
705
|
+
]):
|
706
|
+
return [
|
707
|
+
"Analyzing your code", "Reviewing logic", "Debugging the issue",
|
708
|
+
"Examining patterns", "Processing syntax", "Evaluating approach",
|
709
|
+
"Formulating solution", "Optimizing structure"
|
710
|
+
]
|
711
|
+
|
712
|
+
# Writing/creative keywords
|
713
|
+
elif any(keyword in message_lower for keyword in [
|
714
|
+
'write', 'essay', 'story', 'article', 'blog', 'creative', 'poem',
|
715
|
+
'letter', 'email', 'content', 'draft', 'narrative', 'description'
|
716
|
+
]):
|
717
|
+
return [
|
718
|
+
"Crafting your text", "Shaping ideas", "Weaving words",
|
719
|
+
"Building narrative", "Polishing prose", "Structuring content",
|
720
|
+
"Refining language", "Creating flow"
|
721
|
+
]
|
722
|
+
|
723
|
+
# Analysis/research keywords
|
724
|
+
elif any(keyword in message_lower for keyword in [
|
725
|
+
'analyze', 'research', 'study', 'explain', 'compare', 'evaluate',
|
726
|
+
'assess', 'investigate', 'examine', 'understand', 'interpret'
|
727
|
+
]):
|
728
|
+
return [
|
729
|
+
"Analyzing information", "Processing data", "Examining details",
|
730
|
+
"Connecting insights", "Evaluating evidence", "Synthesizing findings",
|
731
|
+
"Drawing conclusions", "Structuring analysis"
|
732
|
+
]
|
733
|
+
|
734
|
+
# Math/calculation keywords
|
735
|
+
elif any(keyword in message_lower for keyword in [
|
736
|
+
'calculate', 'math', 'solve', 'equation', 'formula', 'statistics',
|
737
|
+
'probability', 'geometry', 'algebra', 'number', 'compute'
|
738
|
+
]):
|
739
|
+
return [
|
740
|
+
"Calculating result", "Processing numbers", "Solving equation",
|
741
|
+
"Working through math", "Computing values", "Analyzing formula",
|
742
|
+
"Checking calculations", "Verifying solution"
|
743
|
+
]
|
744
|
+
|
745
|
+
# Question/help keywords
|
746
|
+
elif any(keyword in message_lower for keyword in [
|
747
|
+
'how', 'what', 'why', 'when', 'where', 'help', 'assist', 'guide',
|
748
|
+
'explain', 'show', 'teach', 'learn', 'understand'
|
749
|
+
]):
|
750
|
+
return [
|
751
|
+
"Processing your question", "Gathering information", "Organizing thoughts",
|
752
|
+
"Preparing explanation", "Structuring response", "Connecting concepts",
|
753
|
+
"Clarifying details", "Formulating answer"
|
754
|
+
]
|
755
|
+
|
756
|
+
# Default generic phrases
|
757
|
+
else:
|
758
|
+
return self.loading_phrases
|
759
|
+
|
760
|
+
def _get_dynamic_loading_phrase(self, user_message: str = "") -> str:
|
761
|
+
"""Get current loading phrase with context-awareness and cycling"""
|
762
|
+
elapsed = time.time() - self.start_time
|
763
|
+
|
764
|
+
# Get context-aware phrases if user message provided
|
765
|
+
if user_message and hasattr(self, '_current_context_phrases'):
|
766
|
+
phrases = self._current_context_phrases
|
767
|
+
elif user_message:
|
768
|
+
phrases = self._get_context_aware_loading_phrases(user_message)
|
769
|
+
self._current_context_phrases = phrases # Cache for this generation
|
770
|
+
else:
|
771
|
+
phrases = self.loading_phrases
|
772
|
+
|
773
|
+
# Change phrase every 2 seconds
|
774
|
+
phrase_index = int(elapsed // 2) % len(phrases)
|
775
|
+
return phrases[phrase_index]
|
776
|
+
|
777
|
+
def _update_streaming_display(self, content: str):
|
778
|
+
"""Update display with real-time streaming content and context-aware status"""
|
779
|
+
if not self.generating:
|
780
|
+
return
|
781
|
+
|
782
|
+
# Show dynamic loading indicator with cycling phrases
|
783
|
+
elapsed = int(time.time() - self.start_time)
|
784
|
+
user_message = getattr(self, '_current_user_message', "")
|
785
|
+
phrase = self._get_dynamic_loading_phrase(user_message)
|
786
|
+
|
787
|
+
# Create a streaming preview of content (first/last parts)
|
788
|
+
preview = ""
|
789
|
+
if content:
|
790
|
+
if len(content) <= 100:
|
791
|
+
preview = content.replace('\n', ' ')[:50]
|
792
|
+
else:
|
793
|
+
# Show first 30 chars + ... + last 20 chars
|
794
|
+
start = content[:30].replace('\n', ' ')
|
795
|
+
end = content[-20:].replace('\n', ' ')
|
796
|
+
preview = f"{start}...{end}"
|
797
|
+
|
798
|
+
# Use cursor positioning to update multiple lines at bottom
|
799
|
+
print(f"\033[s", end="") # Save cursor position
|
800
|
+
|
801
|
+
# Update streaming content area (second to last line)
|
802
|
+
if content:
|
803
|
+
print(f"\033[{self.height-1};1H", end="") # Move to second-to-last row
|
804
|
+
print(f"\033[K", end="") # Clear line
|
805
|
+
content_line = f"{self.theme['text']}► {preview}{self.theme['reset']}"
|
806
|
+
print(content_line[:self.width-2], end="", flush=True)
|
807
|
+
|
808
|
+
# Update status line (bottom)
|
809
|
+
print(f"\033[{self.height};1H", end="") # Move to bottom row
|
810
|
+
print(f"\033[K", end="") # Clear line
|
811
|
+
status_line = f"{self.theme['accent']}● {phrase}... {self.theme['muted']}({elapsed}s) - {len(content)} chars{self.theme['reset']}"
|
812
|
+
print(status_line, end="", flush=True)
|
813
|
+
|
814
|
+
print(f"\033[u", end="", flush=True) # Restore cursor position
|
815
|
+
|
388
816
|
async def create_new_conversation(self):
|
389
817
|
"""Create a new conversation"""
|
390
818
|
title = "New Conversation"
|
@@ -419,13 +847,19 @@ class ConsoleUI:
|
|
419
847
|
self.db.update_conversation_title(self.current_conversation.id, new_title)
|
420
848
|
self.current_conversation.title = new_title
|
421
849
|
|
422
|
-
except Exception
|
850
|
+
except Exception:
|
423
851
|
# Silently fail - title generation is not critical
|
424
852
|
pass
|
425
853
|
|
426
854
|
async def generate_response(self, user_message: str):
|
427
|
-
"""Generate AI response"""
|
855
|
+
"""Generate AI response with enhanced streaming display"""
|
428
856
|
self.generating = True
|
857
|
+
self.start_time = time.time() # Reset timer for this generation
|
858
|
+
self._current_user_message = user_message # Store for context-aware loading
|
859
|
+
|
860
|
+
# Clear any cached context phrases for new generation
|
861
|
+
if hasattr(self, '_current_context_phrases'):
|
862
|
+
delattr(self, '_current_context_phrases')
|
429
863
|
|
430
864
|
try:
|
431
865
|
# Add user message
|
@@ -470,8 +904,8 @@ class ConsoleUI:
|
|
470
904
|
nonlocal full_response
|
471
905
|
full_response = content
|
472
906
|
assistant_message.content = content
|
473
|
-
#
|
474
|
-
self.
|
907
|
+
# Update screen with streaming content instead of clearing
|
908
|
+
self._update_streaming_display(content)
|
475
909
|
|
476
910
|
# Apply style to messages
|
477
911
|
styled_messages = apply_style_prefix(api_messages, self.selected_style)
|
@@ -533,32 +967,38 @@ class ConsoleUI:
|
|
533
967
|
pass
|
534
968
|
|
535
969
|
async def show_settings(self):
|
536
|
-
"""Show enhanced settings menu with
|
970
|
+
"""Show enhanced settings menu with style selection and persistence"""
|
537
971
|
while True:
|
538
972
|
self.clear_screen()
|
539
973
|
print("=" * self.width)
|
540
974
|
print("SETTINGS".center(self.width))
|
541
975
|
print("=" * self.width)
|
542
976
|
|
543
|
-
print(f"Current Model: {self.selected_model}")
|
544
|
-
print(f"Current Style: {self.selected_style}")
|
977
|
+
print(f"Current Model: {CONFIG['available_models'].get(self.selected_model, {}).get('display_name', self.selected_model)}")
|
978
|
+
print(f"Current Style: {CONFIG['user_styles'].get(self.selected_style, {}).get('name', self.selected_style)}")
|
545
979
|
print()
|
546
|
-
|
547
980
|
print("What would you like to change?")
|
548
|
-
print("1.
|
981
|
+
print("1. Model")
|
549
982
|
print("2. Response Style")
|
550
|
-
print("3.
|
983
|
+
print("3. Advanced Settings")
|
984
|
+
print("4. Save Settings")
|
551
985
|
print("0. Back to Chat")
|
552
986
|
|
553
987
|
try:
|
554
988
|
choice = input("\n> ").strip()
|
555
989
|
|
556
990
|
if choice == "1":
|
991
|
+
# Model selection
|
557
992
|
await self._select_model()
|
558
993
|
elif choice == "2":
|
994
|
+
# Style selection
|
559
995
|
self._select_style()
|
560
996
|
elif choice == "3":
|
561
|
-
|
997
|
+
# Advanced settings
|
998
|
+
await self._show_advanced_settings()
|
999
|
+
elif choice == "4":
|
1000
|
+
# Save settings
|
1001
|
+
self._save_settings()
|
562
1002
|
elif choice == "0" or choice == "":
|
563
1003
|
break
|
564
1004
|
|
@@ -600,7 +1040,7 @@ class ConsoleUI:
|
|
600
1040
|
"display_name": model_id,
|
601
1041
|
"max_tokens": 4096
|
602
1042
|
}))
|
603
|
-
except Exception
|
1043
|
+
except Exception:
|
604
1044
|
pass # Ollama not available
|
605
1045
|
|
606
1046
|
# Display models by provider
|
@@ -666,6 +1106,263 @@ class ConsoleUI:
|
|
666
1106
|
except (ValueError, KeyboardInterrupt):
|
667
1107
|
pass
|
668
1108
|
|
1109
|
+
def _save_settings(self):
|
1110
|
+
"""Save current settings to config file"""
|
1111
|
+
try:
|
1112
|
+
CONFIG["default_model"] = self.selected_model
|
1113
|
+
CONFIG["default_style"] = self.selected_style
|
1114
|
+
save_config(CONFIG)
|
1115
|
+
print("Settings saved successfully!")
|
1116
|
+
except Exception as e:
|
1117
|
+
print(f"Error saving settings: {e}")
|
1118
|
+
input("Press Enter to continue...")
|
1119
|
+
|
1120
|
+
async def _show_advanced_settings(self):
|
1121
|
+
"""Show advanced settings configuration panel"""
|
1122
|
+
while True:
|
1123
|
+
self.clear_screen()
|
1124
|
+
print("=" * self.width)
|
1125
|
+
print("ADVANCED SETTINGS".center(self.width))
|
1126
|
+
print("=" * self.width)
|
1127
|
+
|
1128
|
+
# Display current advanced settings
|
1129
|
+
print("Current Advanced Settings:")
|
1130
|
+
print(f" Code Highlighting: {'On' if CONFIG.get('highlight_code', True) else 'Off'}")
|
1131
|
+
print(f" Dynamic Titles: {'On' if CONFIG.get('generate_dynamic_titles', True) else 'Off'}")
|
1132
|
+
print(f" Model Preloading: {'On' if CONFIG.get('preload_models', True) else 'Off'}")
|
1133
|
+
print(f" Ollama URL: {CONFIG.get('ollama_base_url', 'http://localhost:11434')}")
|
1134
|
+
print(f" Inactive Timeout: {CONFIG.get('ollama_inactive_timeout', 30)} minutes")
|
1135
|
+
print()
|
1136
|
+
|
1137
|
+
print("What would you like to configure?")
|
1138
|
+
print("1. Provider Settings")
|
1139
|
+
print("2. UI Settings")
|
1140
|
+
print("3. Performance Settings")
|
1141
|
+
print("4. Ollama Settings")
|
1142
|
+
print("0. Back to Settings")
|
1143
|
+
|
1144
|
+
try:
|
1145
|
+
choice = input("\n> ").strip()
|
1146
|
+
|
1147
|
+
if choice == "1":
|
1148
|
+
await self._configure_provider_settings()
|
1149
|
+
elif choice == "2":
|
1150
|
+
await self._configure_ui_settings()
|
1151
|
+
elif choice == "3":
|
1152
|
+
await self._configure_performance_settings()
|
1153
|
+
elif choice == "4":
|
1154
|
+
await self._configure_ollama_settings()
|
1155
|
+
elif choice == "0" or choice == "":
|
1156
|
+
break
|
1157
|
+
|
1158
|
+
except (ValueError, KeyboardInterrupt):
|
1159
|
+
break
|
1160
|
+
|
1161
|
+
async def _configure_provider_settings(self):
|
1162
|
+
"""Configure provider-specific settings"""
|
1163
|
+
self.clear_screen()
|
1164
|
+
print("=" * self.width)
|
1165
|
+
print("PROVIDER SETTINGS".center(self.width))
|
1166
|
+
print("=" * self.width)
|
1167
|
+
|
1168
|
+
print("Current Provider Settings:")
|
1169
|
+
print(f" OpenAI API Key: {'Set' if CONFIG.get('openai_api_key') else 'Not Set'}")
|
1170
|
+
print(f" Anthropic API Key: {'Set' if CONFIG.get('anthropic_api_key') else 'Not Set'}")
|
1171
|
+
print(f" Ollama Base URL: {CONFIG.get('ollama_base_url', 'http://localhost:11434')}")
|
1172
|
+
print()
|
1173
|
+
|
1174
|
+
print("Options:")
|
1175
|
+
print("1. Set OpenAI API Key")
|
1176
|
+
print("2. Set Anthropic API Key")
|
1177
|
+
print("3. Set Ollama Base URL")
|
1178
|
+
print("4. Clear API Keys")
|
1179
|
+
print("0. Back")
|
1180
|
+
|
1181
|
+
choice = input("\n> ").strip()
|
1182
|
+
|
1183
|
+
if choice == "1":
|
1184
|
+
key = input("Enter OpenAI API Key (or press Enter to skip): ").strip()
|
1185
|
+
if key:
|
1186
|
+
CONFIG["openai_api_key"] = key
|
1187
|
+
print("OpenAI API Key updated!")
|
1188
|
+
|
1189
|
+
elif choice == "2":
|
1190
|
+
key = input("Enter Anthropic API Key (or press Enter to skip): ").strip()
|
1191
|
+
if key:
|
1192
|
+
CONFIG["anthropic_api_key"] = key
|
1193
|
+
print("Anthropic API Key updated!")
|
1194
|
+
|
1195
|
+
elif choice == "3":
|
1196
|
+
url = input(f"Enter Ollama Base URL (current: {CONFIG.get('ollama_base_url', 'http://localhost:11434')}): ").strip()
|
1197
|
+
if url:
|
1198
|
+
CONFIG["ollama_base_url"] = url
|
1199
|
+
print("Ollama Base URL updated!")
|
1200
|
+
|
1201
|
+
elif choice == "4":
|
1202
|
+
confirm = input("Clear all API keys? (y/N): ").strip().lower()
|
1203
|
+
if confirm == 'y':
|
1204
|
+
CONFIG.pop("openai_api_key", None)
|
1205
|
+
CONFIG.pop("anthropic_api_key", None)
|
1206
|
+
print("API keys cleared!")
|
1207
|
+
|
1208
|
+
if choice in ["1", "2", "3", "4"]:
|
1209
|
+
input("\nPress Enter to continue...")
|
1210
|
+
|
1211
|
+
async def _configure_ui_settings(self):
|
1212
|
+
"""Configure UI and display settings"""
|
1213
|
+
self.clear_screen()
|
1214
|
+
print("=" * self.width)
|
1215
|
+
print("UI SETTINGS".center(self.width))
|
1216
|
+
print("=" * self.width)
|
1217
|
+
|
1218
|
+
print("Current UI Settings:")
|
1219
|
+
print(f" Code Highlighting: {'On' if CONFIG.get('highlight_code', True) else 'Off'}")
|
1220
|
+
print(f" Emoji Indicators: {'On' if CONFIG.get('use_emoji_indicators', True) else 'Off'}")
|
1221
|
+
print(f" Word Wrapping: {'On' if CONFIG.get('word_wrap', True) else 'Off'}")
|
1222
|
+
print()
|
1223
|
+
|
1224
|
+
print("Options:")
|
1225
|
+
print("1. Toggle Code Highlighting")
|
1226
|
+
print("2. Toggle Emoji Indicators")
|
1227
|
+
print("3. Toggle Word Wrapping")
|
1228
|
+
print("0. Back")
|
1229
|
+
|
1230
|
+
choice = input("\n> ").strip()
|
1231
|
+
|
1232
|
+
if choice == "1":
|
1233
|
+
current = CONFIG.get('highlight_code', True)
|
1234
|
+
CONFIG['highlight_code'] = not current
|
1235
|
+
print(f"Code highlighting {'enabled' if not current else 'disabled'}!")
|
1236
|
+
|
1237
|
+
elif choice == "2":
|
1238
|
+
current = CONFIG.get('use_emoji_indicators', True)
|
1239
|
+
CONFIG['use_emoji_indicators'] = not current
|
1240
|
+
print(f"Emoji indicators {'enabled' if not current else 'disabled'}!")
|
1241
|
+
|
1242
|
+
elif choice == "3":
|
1243
|
+
current = CONFIG.get('word_wrap', True)
|
1244
|
+
CONFIG['word_wrap'] = not current
|
1245
|
+
print(f"Word wrapping {'enabled' if not current else 'disabled'}!")
|
1246
|
+
|
1247
|
+
if choice in ["1", "2", "3"]:
|
1248
|
+
input("\nPress Enter to continue...")
|
1249
|
+
|
1250
|
+
async def _configure_performance_settings(self):
|
1251
|
+
"""Configure performance and optimization settings"""
|
1252
|
+
self.clear_screen()
|
1253
|
+
print("=" * self.width)
|
1254
|
+
print("PERFORMANCE SETTINGS".center(self.width))
|
1255
|
+
print("=" * self.width)
|
1256
|
+
|
1257
|
+
print("Current Performance Settings:")
|
1258
|
+
print(f" Dynamic Title Generation: {'On' if CONFIG.get('generate_dynamic_titles', True) else 'Off'}")
|
1259
|
+
print(f" Model Preloading: {'On' if CONFIG.get('preload_models', True) else 'Off'}")
|
1260
|
+
print(f" History Limit: {CONFIG.get('history_limit', 100)} conversations")
|
1261
|
+
print(f" Message Limit: {CONFIG.get('message_limit', 50)} per conversation")
|
1262
|
+
print()
|
1263
|
+
|
1264
|
+
print("Options:")
|
1265
|
+
print("1. Toggle Dynamic Title Generation")
|
1266
|
+
print("2. Toggle Model Preloading")
|
1267
|
+
print("3. Set History Limit")
|
1268
|
+
print("4. Set Message Limit")
|
1269
|
+
print("0. Back")
|
1270
|
+
|
1271
|
+
choice = input("\n> ").strip()
|
1272
|
+
|
1273
|
+
if choice == "1":
|
1274
|
+
current = CONFIG.get('generate_dynamic_titles', True)
|
1275
|
+
CONFIG['generate_dynamic_titles'] = not current
|
1276
|
+
print(f"Dynamic title generation {'enabled' if not current else 'disabled'}!")
|
1277
|
+
|
1278
|
+
elif choice == "2":
|
1279
|
+
current = CONFIG.get('preload_models', True)
|
1280
|
+
CONFIG['preload_models'] = not current
|
1281
|
+
print(f"Model preloading {'enabled' if not current else 'disabled'}!")
|
1282
|
+
|
1283
|
+
elif choice == "3":
|
1284
|
+
try:
|
1285
|
+
limit = int(input(f"Enter history limit (current: {CONFIG.get('history_limit', 100)}): "))
|
1286
|
+
if limit > 0:
|
1287
|
+
CONFIG['history_limit'] = limit
|
1288
|
+
print(f"History limit set to {limit}!")
|
1289
|
+
except ValueError:
|
1290
|
+
print("Invalid number!")
|
1291
|
+
|
1292
|
+
elif choice == "4":
|
1293
|
+
try:
|
1294
|
+
limit = int(input(f"Enter message limit (current: {CONFIG.get('message_limit', 50)}): "))
|
1295
|
+
if limit > 0:
|
1296
|
+
CONFIG['message_limit'] = limit
|
1297
|
+
print(f"Message limit set to {limit}!")
|
1298
|
+
except ValueError:
|
1299
|
+
print("Invalid number!")
|
1300
|
+
|
1301
|
+
if choice in ["1", "2", "3", "4"]:
|
1302
|
+
input("\nPress Enter to continue...")
|
1303
|
+
|
1304
|
+
async def _configure_ollama_settings(self):
|
1305
|
+
"""Configure Ollama-specific settings"""
|
1306
|
+
self.clear_screen()
|
1307
|
+
print("=" * self.width)
|
1308
|
+
print("OLLAMA SETTINGS".center(self.width))
|
1309
|
+
print("=" * self.width)
|
1310
|
+
|
1311
|
+
print("Current Ollama Settings:")
|
1312
|
+
print(f" Base URL: {CONFIG.get('ollama_base_url', 'http://localhost:11434')}")
|
1313
|
+
print(f" Inactive Timeout: {CONFIG.get('ollama_inactive_timeout', 30)} minutes")
|
1314
|
+
print(f" Auto Start: {'On' if CONFIG.get('ollama_auto_start', True) else 'Off'}")
|
1315
|
+
print(f" Model Cleanup: {'On' if CONFIG.get('ollama_cleanup_models', True) else 'Off'}")
|
1316
|
+
print()
|
1317
|
+
|
1318
|
+
print("Options:")
|
1319
|
+
print("1. Set Base URL")
|
1320
|
+
print("2. Set Inactive Timeout")
|
1321
|
+
print("3. Toggle Auto Start")
|
1322
|
+
print("4. Toggle Model Cleanup")
|
1323
|
+
print("5. Test Connection")
|
1324
|
+
print("0. Back")
|
1325
|
+
|
1326
|
+
choice = input("\n> ").strip()
|
1327
|
+
|
1328
|
+
if choice == "1":
|
1329
|
+
url = input(f"Enter Ollama Base URL (current: {CONFIG.get('ollama_base_url', 'http://localhost:11434')}): ").strip()
|
1330
|
+
if url:
|
1331
|
+
CONFIG['ollama_base_url'] = url
|
1332
|
+
print("Ollama Base URL updated!")
|
1333
|
+
|
1334
|
+
elif choice == "2":
|
1335
|
+
try:
|
1336
|
+
timeout = int(input(f"Enter inactive timeout in minutes (current: {CONFIG.get('ollama_inactive_timeout', 30)}): "))
|
1337
|
+
if timeout > 0:
|
1338
|
+
CONFIG['ollama_inactive_timeout'] = timeout
|
1339
|
+
print(f"Inactive timeout set to {timeout} minutes!")
|
1340
|
+
except ValueError:
|
1341
|
+
print("Invalid number!")
|
1342
|
+
|
1343
|
+
elif choice == "3":
|
1344
|
+
current = CONFIG.get('ollama_auto_start', True)
|
1345
|
+
CONFIG['ollama_auto_start'] = not current
|
1346
|
+
print(f"Ollama auto start {'enabled' if not current else 'disabled'}!")
|
1347
|
+
|
1348
|
+
elif choice == "4":
|
1349
|
+
current = CONFIG.get('ollama_cleanup_models', True)
|
1350
|
+
CONFIG['ollama_cleanup_models'] = not current
|
1351
|
+
print(f"Model cleanup {'enabled' if not current else 'disabled'}!")
|
1352
|
+
|
1353
|
+
elif choice == "5":
|
1354
|
+
print("Testing Ollama connection...")
|
1355
|
+
try:
|
1356
|
+
from .api.ollama import OllamaClient
|
1357
|
+
client = await OllamaClient.create()
|
1358
|
+
models = await client.get_available_models()
|
1359
|
+
print(f"✓ Connection successful! Found {len(models)} local models.")
|
1360
|
+
except Exception as e:
|
1361
|
+
print(f"✗ Connection failed: {str(e)}")
|
1362
|
+
|
1363
|
+
if choice in ["1", "2", "3", "4", "5"]:
|
1364
|
+
input("\nPress Enter to continue...")
|
1365
|
+
|
669
1366
|
async def _detect_ollama_models(self):
|
670
1367
|
"""Detect and add locally available Ollama models"""
|
671
1368
|
self.clear_screen()
|
@@ -714,6 +1411,376 @@ class ConsoleUI:
|
|
714
1411
|
|
715
1412
|
input("\nPress Enter to continue...")
|
716
1413
|
|
1414
|
+
async def show_model_browser(self):
|
1415
|
+
"""Show Ollama model browser for managing local and available models"""
|
1416
|
+
while True:
|
1417
|
+
self.clear_screen()
|
1418
|
+
print("=" * self.width)
|
1419
|
+
print("OLLAMA MODEL BROWSER".center(self.width))
|
1420
|
+
print("=" * self.width)
|
1421
|
+
|
1422
|
+
print("What would you like to do?")
|
1423
|
+
print("1. View Local Models")
|
1424
|
+
print("2. Browse Available Models")
|
1425
|
+
print("3. Search Models")
|
1426
|
+
print("4. Switch Current Model")
|
1427
|
+
print("0. Back to Chat")
|
1428
|
+
|
1429
|
+
try:
|
1430
|
+
choice = input("\n> ").strip()
|
1431
|
+
|
1432
|
+
if choice == "1":
|
1433
|
+
await self._list_local_models()
|
1434
|
+
elif choice == "2":
|
1435
|
+
await self._list_available_models()
|
1436
|
+
elif choice == "3":
|
1437
|
+
await self._search_models()
|
1438
|
+
elif choice == "4":
|
1439
|
+
await self._switch_model()
|
1440
|
+
elif choice == "0" or choice == "":
|
1441
|
+
break
|
1442
|
+
|
1443
|
+
except (ValueError, KeyboardInterrupt):
|
1444
|
+
break
|
1445
|
+
|
1446
|
+
async def _list_local_models(self):
|
1447
|
+
"""List locally installed Ollama models"""
|
1448
|
+
self.clear_screen()
|
1449
|
+
print("=" * self.width)
|
1450
|
+
print("LOCAL OLLAMA MODELS".center(self.width))
|
1451
|
+
print("=" * self.width)
|
1452
|
+
|
1453
|
+
try:
|
1454
|
+
# Get Ollama client with output suppression
|
1455
|
+
with self._suppress_output():
|
1456
|
+
from .api.ollama import OllamaClient
|
1457
|
+
client = await OllamaClient.create()
|
1458
|
+
|
1459
|
+
# Get local models
|
1460
|
+
local_models = await client.get_available_models()
|
1461
|
+
|
1462
|
+
if not local_models:
|
1463
|
+
print("No local models found.")
|
1464
|
+
print("Use option 2 to browse and download models from the registry.")
|
1465
|
+
else:
|
1466
|
+
print(f"Found {len(local_models)} local models:\n")
|
1467
|
+
|
1468
|
+
for i, model in enumerate(local_models):
|
1469
|
+
model_id = model.get("id", "unknown")
|
1470
|
+
marker = "►" if model_id == self.selected_model else " "
|
1471
|
+
print(f"{marker} {i+1:2d}. {model_id}")
|
1472
|
+
|
1473
|
+
print("\nOptions:")
|
1474
|
+
print("d) Delete a model")
|
1475
|
+
print("i) Show model details")
|
1476
|
+
print("s) Switch to a model")
|
1477
|
+
print("Enter) Back to model browser")
|
1478
|
+
|
1479
|
+
sub_choice = input("\n> ").strip().lower()
|
1480
|
+
|
1481
|
+
if sub_choice == "d":
|
1482
|
+
await self._delete_model_menu(local_models)
|
1483
|
+
elif sub_choice == "i":
|
1484
|
+
await self._show_model_details_menu(local_models)
|
1485
|
+
elif sub_choice == "s":
|
1486
|
+
await self._switch_model_menu(local_models)
|
1487
|
+
|
1488
|
+
except Exception as e:
|
1489
|
+
print(f"Error connecting to Ollama: {str(e)}")
|
1490
|
+
print("Make sure Ollama is running and accessible.")
|
1491
|
+
|
1492
|
+
input("\nPress Enter to continue...")
|
1493
|
+
|
1494
|
+
async def _list_available_models(self):
|
1495
|
+
"""List available models for download from Ollama registry"""
|
1496
|
+
self.clear_screen()
|
1497
|
+
print("=" * self.width)
|
1498
|
+
print("AVAILABLE OLLAMA MODELS".center(self.width))
|
1499
|
+
print("=" * self.width)
|
1500
|
+
|
1501
|
+
try:
|
1502
|
+
# Get Ollama client with output suppression
|
1503
|
+
with self._suppress_output():
|
1504
|
+
from .api.ollama import OllamaClient
|
1505
|
+
client = await OllamaClient.create()
|
1506
|
+
|
1507
|
+
print("Loading available models... (this may take a moment)")
|
1508
|
+
with self._suppress_output():
|
1509
|
+
available_models = await client.list_available_models_from_registry("")
|
1510
|
+
|
1511
|
+
if not available_models:
|
1512
|
+
print("No models found in registry.")
|
1513
|
+
else:
|
1514
|
+
# Group by model family for better organization
|
1515
|
+
families = {}
|
1516
|
+
for model in available_models:
|
1517
|
+
family = model.get("model_family", "Other")
|
1518
|
+
if family not in families:
|
1519
|
+
families[family] = []
|
1520
|
+
families[family].append(model)
|
1521
|
+
|
1522
|
+
# Display by family
|
1523
|
+
model_index = 1
|
1524
|
+
model_map = {}
|
1525
|
+
|
1526
|
+
for family, models in sorted(families.items()):
|
1527
|
+
print(f"\n{family} Models:")
|
1528
|
+
print("-" * 40)
|
1529
|
+
|
1530
|
+
for model in models[:5]: # Show first 5 per family
|
1531
|
+
name = model.get("name", "unknown")
|
1532
|
+
description = model.get("description", "")
|
1533
|
+
size = model.get("parameter_size", "Unknown size")
|
1534
|
+
|
1535
|
+
print(f"{model_index:2d}. {name} ({size})")
|
1536
|
+
if description:
|
1537
|
+
print(f" {description[:60]}...")
|
1538
|
+
|
1539
|
+
model_map[str(model_index)] = model
|
1540
|
+
model_index += 1
|
1541
|
+
|
1542
|
+
if len(models) > 5:
|
1543
|
+
print(f" ... and {len(models) - 5} more {family} models")
|
1544
|
+
|
1545
|
+
print(f"\nShowing top models by family (total: {len(available_models)})")
|
1546
|
+
print("\nOptions:")
|
1547
|
+
print("Enter model number to download")
|
1548
|
+
print("s) Search for specific models")
|
1549
|
+
print("Enter) Back to model browser")
|
1550
|
+
|
1551
|
+
choice = input("\n> ").strip()
|
1552
|
+
|
1553
|
+
if choice in model_map:
|
1554
|
+
await self._download_model(model_map[choice])
|
1555
|
+
elif choice.lower() == "s":
|
1556
|
+
await self._search_models()
|
1557
|
+
|
1558
|
+
except Exception as e:
|
1559
|
+
print(f"Error fetching available models: {str(e)}")
|
1560
|
+
|
1561
|
+
input("\nPress Enter to continue...")
|
1562
|
+
|
1563
|
+
async def _search_models(self):
|
1564
|
+
"""Search for models by name or description"""
|
1565
|
+
self.clear_screen()
|
1566
|
+
print("=" * self.width)
|
1567
|
+
print("SEARCH OLLAMA MODELS".center(self.width))
|
1568
|
+
print("=" * self.width)
|
1569
|
+
|
1570
|
+
query = input("Enter search term (name, family, or description): ").strip()
|
1571
|
+
|
1572
|
+
if not query:
|
1573
|
+
return
|
1574
|
+
|
1575
|
+
try:
|
1576
|
+
# Get Ollama client with output suppression
|
1577
|
+
with self._suppress_output():
|
1578
|
+
from .api.ollama import OllamaClient
|
1579
|
+
client = await OllamaClient.create()
|
1580
|
+
|
1581
|
+
print(f"\nSearching for '{query}'...")
|
1582
|
+
with self._suppress_output():
|
1583
|
+
all_models = await client.list_available_models_from_registry("")
|
1584
|
+
|
1585
|
+
# Filter models
|
1586
|
+
matching_models = []
|
1587
|
+
query_lower = query.lower()
|
1588
|
+
|
1589
|
+
for model in all_models:
|
1590
|
+
if (query_lower in model.get("name", "").lower() or
|
1591
|
+
query_lower in model.get("description", "").lower() or
|
1592
|
+
query_lower in model.get("model_family", "").lower()):
|
1593
|
+
matching_models.append(model)
|
1594
|
+
|
1595
|
+
if not matching_models:
|
1596
|
+
print(f"No models found matching '{query}'")
|
1597
|
+
else:
|
1598
|
+
print(f"\nFound {len(matching_models)} models matching '{query}':\n")
|
1599
|
+
|
1600
|
+
model_map = {}
|
1601
|
+
for i, model in enumerate(matching_models[:20]): # Show first 20 matches
|
1602
|
+
name = model.get("name", "unknown")
|
1603
|
+
description = model.get("description", "")
|
1604
|
+
size = model.get("parameter_size", "Unknown size")
|
1605
|
+
family = model.get("model_family", "Unknown")
|
1606
|
+
|
1607
|
+
print(f"{i+1:2d}. {name} ({family}, {size})")
|
1608
|
+
if description:
|
1609
|
+
print(f" {description[:70]}...")
|
1610
|
+
print()
|
1611
|
+
|
1612
|
+
model_map[str(i+1)] = model
|
1613
|
+
|
1614
|
+
if len(matching_models) > 20:
|
1615
|
+
print(f"... and {len(matching_models) - 20} more matches")
|
1616
|
+
|
1617
|
+
print("\nEnter model number to download (or press Enter to continue):")
|
1618
|
+
choice = input("> ").strip()
|
1619
|
+
|
1620
|
+
if choice in model_map:
|
1621
|
+
await self._download_model(model_map[choice])
|
1622
|
+
|
1623
|
+
except Exception as e:
|
1624
|
+
print(f"Error searching models: {str(e)}")
|
1625
|
+
|
1626
|
+
input("\nPress Enter to continue...")
|
1627
|
+
|
1628
|
+
async def _download_model(self, model_info):
|
1629
|
+
"""Download a model with progress indication"""
|
1630
|
+
model_name = model_info.get("name", "unknown")
|
1631
|
+
size_info = model_info.get("parameter_size", "Unknown size")
|
1632
|
+
|
1633
|
+
print(f"\nDownloading {model_name} ({size_info})...")
|
1634
|
+
print("This may take several minutes depending on model size and connection.")
|
1635
|
+
print("Press Ctrl+C to cancel.\n")
|
1636
|
+
|
1637
|
+
confirm = input(f"Download {model_name}? (y/N): ").strip().lower()
|
1638
|
+
if confirm != 'y':
|
1639
|
+
return
|
1640
|
+
|
1641
|
+
try:
|
1642
|
+
# Get Ollama client with output suppression
|
1643
|
+
with self._suppress_output():
|
1644
|
+
from .api.ollama import OllamaClient
|
1645
|
+
client = await OllamaClient.create()
|
1646
|
+
|
1647
|
+
# Track download progress
|
1648
|
+
last_status = ""
|
1649
|
+
|
1650
|
+
async for progress in client.pull_model(model_name):
|
1651
|
+
status = progress.get("status", "")
|
1652
|
+
|
1653
|
+
if status != last_status:
|
1654
|
+
print(f"Status: {status}")
|
1655
|
+
last_status = status
|
1656
|
+
|
1657
|
+
# Show progress if available
|
1658
|
+
if "total" in progress and "completed" in progress:
|
1659
|
+
total = progress["total"]
|
1660
|
+
completed = progress["completed"]
|
1661
|
+
percent = (completed / total) * 100 if total > 0 else 0
|
1662
|
+
print(f"Progress: {percent:.1f}% ({completed:,}/{total:,} bytes)")
|
1663
|
+
|
1664
|
+
# Check if download is complete
|
1665
|
+
if status == "success" or "success" in status.lower():
|
1666
|
+
print(f"\n✓ {model_name} downloaded successfully!")
|
1667
|
+
break
|
1668
|
+
|
1669
|
+
except KeyboardInterrupt:
|
1670
|
+
print("\nDownload cancelled by user.")
|
1671
|
+
except Exception as e:
|
1672
|
+
print(f"\nError downloading model: {str(e)}")
|
1673
|
+
|
1674
|
+
async def _delete_model_menu(self, local_models):
|
1675
|
+
"""Show model deletion menu"""
|
1676
|
+
print("\nSelect model to delete:")
|
1677
|
+
for i, model in enumerate(local_models):
|
1678
|
+
print(f"{i+1:2d}. {model.get('id', 'unknown')}")
|
1679
|
+
|
1680
|
+
choice = input("\nEnter model number (or press Enter to cancel): ").strip()
|
1681
|
+
|
1682
|
+
if choice.isdigit():
|
1683
|
+
idx = int(choice) - 1
|
1684
|
+
if 0 <= idx < len(local_models):
|
1685
|
+
model_id = local_models[idx].get("id", "unknown")
|
1686
|
+
|
1687
|
+
print(f"\nWARNING: This will permanently delete {model_id}")
|
1688
|
+
confirm = input("Type 'DELETE' to confirm: ").strip()
|
1689
|
+
|
1690
|
+
if confirm == "DELETE":
|
1691
|
+
try:
|
1692
|
+
with self._suppress_output():
|
1693
|
+
from .api.ollama import OllamaClient
|
1694
|
+
client = await OllamaClient.create()
|
1695
|
+
await client.delete_model(model_id)
|
1696
|
+
print(f"✓ {model_id} deleted successfully!")
|
1697
|
+
except Exception as e:
|
1698
|
+
print(f"Error deleting model: {str(e)}")
|
1699
|
+
else:
|
1700
|
+
print("Deletion cancelled.")
|
1701
|
+
|
1702
|
+
async def _show_model_details_menu(self, local_models):
|
1703
|
+
"""Show detailed information about a model"""
|
1704
|
+
print("\nSelect model for details:")
|
1705
|
+
for i, model in enumerate(local_models):
|
1706
|
+
print(f"{i+1:2d}. {model.get('id', 'unknown')}")
|
1707
|
+
|
1708
|
+
choice = input("\nEnter model number (or press Enter to cancel): ").strip()
|
1709
|
+
|
1710
|
+
if choice.isdigit():
|
1711
|
+
idx = int(choice) - 1
|
1712
|
+
if 0 <= idx < len(local_models):
|
1713
|
+
model_id = local_models[idx].get("id", "unknown")
|
1714
|
+
await self._show_model_details(model_id)
|
1715
|
+
|
1716
|
+
async def _show_model_details(self, model_id):
|
1717
|
+
"""Show detailed information about a specific model"""
|
1718
|
+
try:
|
1719
|
+
from .api.ollama import OllamaClient
|
1720
|
+
client = await OllamaClient.create()
|
1721
|
+
details = await client.get_model_details(model_id)
|
1722
|
+
|
1723
|
+
self.clear_screen()
|
1724
|
+
print("=" * self.width)
|
1725
|
+
print(f"MODEL DETAILS: {model_id}".center(self.width))
|
1726
|
+
print("=" * self.width)
|
1727
|
+
|
1728
|
+
if "error" in details:
|
1729
|
+
print(f"Error getting details: {details['error']}")
|
1730
|
+
else:
|
1731
|
+
print(f"Name: {model_id}")
|
1732
|
+
|
1733
|
+
if details.get("size"):
|
1734
|
+
size_gb = details["size"] / (1024**3)
|
1735
|
+
print(f"Size: {size_gb:.1f} GB")
|
1736
|
+
|
1737
|
+
if details.get("modified_at"):
|
1738
|
+
print(f"Modified: {details['modified_at']}")
|
1739
|
+
|
1740
|
+
if details.get("parameters"):
|
1741
|
+
print(f"\nParameters: {details['parameters']}")
|
1742
|
+
|
1743
|
+
if details.get("modelfile"):
|
1744
|
+
print(f"\nModelfile (first 500 chars):")
|
1745
|
+
print("-" * 40)
|
1746
|
+
print(details["modelfile"][:500])
|
1747
|
+
if len(details["modelfile"]) > 500:
|
1748
|
+
print("...")
|
1749
|
+
|
1750
|
+
except Exception as e:
|
1751
|
+
print(f"Error getting model details: {str(e)}")
|
1752
|
+
|
1753
|
+
input("\nPress Enter to continue...")
|
1754
|
+
|
1755
|
+
async def _switch_model_menu(self, local_models):
|
1756
|
+
"""Switch to a different local model"""
|
1757
|
+
print("\nSelect model to switch to:")
|
1758
|
+
for i, model in enumerate(local_models):
|
1759
|
+
model_id = model.get("id", "unknown")
|
1760
|
+
marker = "►" if model_id == self.selected_model else " "
|
1761
|
+
print(f"{marker} {i+1:2d}. {model_id}")
|
1762
|
+
|
1763
|
+
choice = input("\nEnter model number (or press Enter to cancel): ").strip()
|
1764
|
+
|
1765
|
+
if choice.isdigit():
|
1766
|
+
idx = int(choice) - 1
|
1767
|
+
if 0 <= idx < len(local_models):
|
1768
|
+
old_model = self.selected_model
|
1769
|
+
self.selected_model = local_models[idx].get("id", "unknown")
|
1770
|
+
print(f"\n✓ Switched from {old_model} to {self.selected_model}")
|
1771
|
+
|
1772
|
+
async def _switch_model(self):
|
1773
|
+
"""Switch current model (combines local and available models)"""
|
1774
|
+
try:
|
1775
|
+
from .api.ollama import OllamaClient
|
1776
|
+
client = await OllamaClient.create()
|
1777
|
+
local_models = await client.get_available_models()
|
1778
|
+
await self._switch_model_menu(local_models)
|
1779
|
+
except Exception as e:
|
1780
|
+
print(f"Error getting local models: {str(e)}")
|
1781
|
+
|
1782
|
+
input("\nPress Enter to continue...")
|
1783
|
+
|
717
1784
|
async def run(self):
|
718
1785
|
"""Main application loop"""
|
719
1786
|
# Create initial conversation
|
@@ -742,6 +1809,9 @@ class ConsoleUI:
|
|
742
1809
|
elif user_input == "##SETTINGS##":
|
743
1810
|
await self.show_settings()
|
744
1811
|
continue
|
1812
|
+
elif user_input == "##MODELS##":
|
1813
|
+
await self.show_model_browser()
|
1814
|
+
continue
|
745
1815
|
|
746
1816
|
# Handle legacy single-letter commands for backward compatibility
|
747
1817
|
if user_input.lower() == 'q':
|
@@ -756,6 +1826,9 @@ class ConsoleUI:
|
|
756
1826
|
elif user_input.lower() == 's':
|
757
1827
|
await self.show_settings()
|
758
1828
|
continue
|
1829
|
+
elif user_input.lower() == 'm':
|
1830
|
+
await self.show_model_browser()
|
1831
|
+
continue
|
759
1832
|
|
760
1833
|
# Generate response
|
761
1834
|
await self.generate_response(user_input)
|
@@ -774,7 +1847,7 @@ class ConsoleUI:
|
|
774
1847
|
|
775
1848
|
def setup_signal_handlers():
|
776
1849
|
"""Setup signal handlers for graceful shutdown"""
|
777
|
-
def signal_handler(
|
1850
|
+
def signal_handler(_signum, _frame):
|
778
1851
|
print("\n\nShutting down gracefully...")
|
779
1852
|
sys.exit(0)
|
780
1853
|
|