chat-console 0.4.7__py3-none-any.whl → 0.4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/console_chat.py CHANGED
@@ -11,6 +11,8 @@ import argparse
11
11
  import signal
12
12
  import threading
13
13
  import time
14
+ import random
15
+ import json
14
16
  from datetime import datetime
15
17
  from typing import List, Optional, Dict, Any
16
18
  import shutil
@@ -23,7 +25,7 @@ from .console_utils import console_streaming_response, apply_style_prefix
23
25
  from .api.base import BaseModelClient
24
26
 
25
27
  class ConsoleUI:
26
- """Pure console UI following Rams design principles"""
28
+ """Pure console UI following Rams design principles with Gemini-inspired enhancements"""
27
29
 
28
30
  def __init__(self):
29
31
  self.width = min(shutil.get_terminal_size().columns, 120)
@@ -36,10 +38,49 @@ class ConsoleUI:
36
38
  self.running = True
37
39
  self.generating = False
38
40
  self.input_mode = "text" # "text" or "menu"
41
+ self.multi_line_input = []
42
+ self.input_history = []
43
+ self.history_index = 0
44
+ self.theme = self._load_theme()
45
+ self.loading_phrases = [
46
+ "Thinking deeply", "Crafting response", "Processing context",
47
+ "Analyzing request", "Generating ideas", "Considering options",
48
+ "Formulating answer", "Connecting concepts", "Refining thoughts"
49
+ ]
50
+ self.loading_phase_index = 0
51
+ self.start_time = time.time()
39
52
 
40
53
  # Suppress verbose logging for console mode
41
54
  self._setup_console_logging()
42
55
 
56
+ def _load_theme(self) -> Dict[str, str]:
57
+ """Load color theme configuration"""
58
+ try:
59
+ # Try to import colorama for colors
60
+ from colorama import Fore, Back, Style, init
61
+ init(autoreset=True)
62
+
63
+ # Default theme inspired by gemini-code-assist
64
+ return {
65
+ 'primary': Fore.CYAN,
66
+ 'secondary': Fore.BLUE,
67
+ 'accent': Fore.MAGENTA,
68
+ 'success': Fore.GREEN,
69
+ 'warning': Fore.YELLOW,
70
+ 'error': Fore.RED,
71
+ 'muted': Fore.LIGHTBLACK_EX,
72
+ 'text': Fore.WHITE,
73
+ 'reset': Style.RESET_ALL,
74
+ 'bold': Style.BRIGHT,
75
+ 'dim': Style.DIM
76
+ }
77
+ except ImportError:
78
+ # Fallback to no colors if colorama not available
79
+ return {key: '' for key in [
80
+ 'primary', 'secondary', 'accent', 'success', 'warning',
81
+ 'error', 'muted', 'text', 'reset', 'bold', 'dim'
82
+ ]}
83
+
43
84
  def _setup_console_logging(self):
44
85
  """Setup logging to minimize disruption to console UI"""
45
86
  import logging
@@ -132,105 +173,250 @@ class ConsoleUI:
132
173
  return chars['horizontal'] * width
133
174
 
134
175
  def draw_header(self) -> List[str]:
135
- """Draw the application header"""
176
+ """Draw the application header with colors"""
136
177
  from . import __version__
137
178
  chars = self.get_border_chars()
138
179
 
139
180
  lines = []
140
181
 
141
182
  # Top border with title and model info
142
- title = f" Chat Console v{__version__} "
143
- model_info = f" Model: {self.selected_model} "
183
+ title = f" {self.theme['primary']}Chat Console{self.theme['reset']} v{__version__} "
184
+ model_info = f" Model: {self.theme['accent']}{self.selected_model}{self.theme['reset']} "
144
185
 
145
- # Calculate spacing
146
- used_space = len(title) + len(model_info)
186
+ # Calculate spacing (without color codes for length calculation)
187
+ title_plain = f" Chat Console v{__version__} "
188
+ model_plain = f" Model: {self.selected_model} "
189
+ used_space = len(title_plain) + len(model_plain)
147
190
  remaining = self.width - used_space - 2
148
191
  spacing = chars['horizontal'] * max(0, remaining)
149
192
 
150
- header_line = chars['top_left'] + title + spacing + model_info + chars['top_right']
193
+ header_line = f"{self.theme['muted']}{chars['top_left']}{title}{spacing}{model_info}{chars['top_right']}{self.theme['reset']}"
151
194
  lines.append(header_line)
152
195
 
153
196
  # Conversation title
154
197
  conv_title = self.current_conversation.title if self.current_conversation else "New Conversation"
155
- title_line = chars['vertical'] + f" {conv_title} ".ljust(self.width - 2) + chars['vertical']
198
+ title_content = f" {self.theme['secondary']}{conv_title}{self.theme['reset']} "
199
+ padding_needed = self.width - 2 - len(conv_title) - 1
200
+ title_line = f"{self.theme['muted']}{chars['vertical']}{title_content}{' ' * padding_needed}{chars['vertical']}{self.theme['reset']}"
156
201
  lines.append(title_line)
157
202
 
158
203
  # Separator
159
- lines.append(self.draw_border_line(self.width, 'middle'))
204
+ separator = f"{self.theme['muted']}{self.draw_border_line(self.width, 'middle')}{self.theme['reset']}"
205
+ lines.append(separator)
160
206
 
161
207
  return lines
162
208
 
163
209
  def draw_footer(self) -> List[str]:
164
- """Draw the footer with controls"""
210
+ """Draw the footer with colorized controls"""
165
211
  chars = self.get_border_chars()
166
212
 
167
- controls = "[Tab] Menu Mode [q] Quit [n] New [h] History [s] Settings"
168
- footer_line = chars['vertical'] + f" {controls} ".ljust(self.width - 2) + chars['vertical']
213
+ # Colorize control keys
214
+ controls = (f"{self.theme['accent']}[Tab]{self.theme['reset']} Menu Mode "
215
+ f"{self.theme['accent']}[q]{self.theme['reset']} Quit "
216
+ f"{self.theme['accent']}[n]{self.theme['reset']} New "
217
+ f"{self.theme['accent']}[h]{self.theme['reset']} History "
218
+ f"{self.theme['accent']}[s]{self.theme['reset']} Settings "
219
+ f"{self.theme['accent']}[m]{self.theme['reset']} Models")
220
+
221
+ # Calculate plain text length for padding
222
+ controls_plain = "[Tab] Menu Mode [q] Quit [n] New [h] History [s] Settings [m] Models"
223
+ padding_needed = self.width - 2 - len(controls_plain) - 1
224
+
225
+ footer_line = f"{self.theme['muted']}{chars['vertical']} {controls}{' ' * padding_needed}{chars['vertical']}{self.theme['reset']}"
169
226
 
170
227
  return [
171
- self.draw_border_line(self.width, 'middle'),
228
+ f"{self.theme['muted']}{self.draw_border_line(self.width, 'middle')}{self.theme['reset']}",
172
229
  footer_line,
173
- self.draw_border_line(self.width, 'bottom')
230
+ f"{self.theme['muted']}{self.draw_border_line(self.width, 'bottom')}{self.theme['reset']}"
174
231
  ]
175
232
 
176
233
  def format_message(self, message: Message) -> List[str]:
177
- """Format a message for console display"""
234
+ """Enhanced message formatting with colors, code highlighting and better wrapping"""
178
235
  timestamp = datetime.now().strftime("%H:%M")
179
236
  chars = self.get_border_chars()
180
237
 
181
238
  # Calculate available width for content
182
239
  content_width = self.width - 10 # Account for borders and timestamp
183
240
 
184
- # Word wrap content
185
- words = message.content.split()
186
- lines = []
187
- current_line = ""
188
-
189
- for word in words:
190
- if len(current_line) + len(word) + 1 <= content_width:
191
- if current_line:
192
- current_line += " "
193
- current_line += word
194
- else:
195
- if current_line:
196
- lines.append(current_line)
197
- current_line = word
241
+ # Apply code highlighting if enabled
242
+ highlighted_content = self._detect_and_highlight_code(message.content)
198
243
 
199
- if current_line:
200
- lines.append(current_line)
244
+ # Use improved word wrapping
245
+ lines = self._improved_word_wrap(highlighted_content, content_width)
201
246
 
202
- # Format lines with proper spacing
247
+ # Format lines with proper spacing and colors
203
248
  formatted_lines = []
204
249
  for i, line in enumerate(lines):
205
250
  if i == 0:
206
- # First line with timestamp
207
- prefix = f" {timestamp} " if message.role == "user" else f" {timestamp} "
208
- formatted_line = chars['vertical'] + prefix + line.ljust(content_width) + chars['vertical']
251
+ # First line with colorized timestamp and role indicator
252
+ if message.role == "user":
253
+ role_indicator = f"{self.theme['primary']}👤{self.theme['reset']}"
254
+ role_color = self.theme['primary']
255
+ else:
256
+ role_indicator = f"{self.theme['accent']}🤖{self.theme['reset']}"
257
+ role_color = self.theme['accent']
258
+
259
+ prefix = f" {role_indicator} {self.theme['muted']}{timestamp}{self.theme['reset']} "
260
+
261
+ # Calculate plain text length for proper alignment
262
+ prefix_plain = f" 👤 {timestamp} "
263
+ content_padding = content_width - len(prefix_plain) - len(line.replace(self.theme.get('accent', ''), '').replace(self.theme.get('reset', ''), ''))
264
+
265
+ formatted_line = f"{self.theme['muted']}{chars['vertical']}{prefix}{line}{' ' * max(0, content_padding)}{chars['vertical']}{self.theme['reset']}"
209
266
  else:
210
- # Continuation lines
267
+ # Continuation lines with proper indentation
211
268
  prefix = " " # Align with content
212
- formatted_line = chars['vertical'] + prefix + line.ljust(content_width) + chars['vertical']
269
+ content_padding = content_width - len(prefix) - len(line.replace(self.theme.get('accent', ''), '').replace(self.theme.get('reset', ''), ''))
270
+ formatted_line = f"{self.theme['muted']}{chars['vertical']}{prefix}{line}{' ' * max(0, content_padding)}{chars['vertical']}{self.theme['reset']}"
213
271
  formatted_lines.append(formatted_line)
214
272
 
215
273
  # Add empty line for spacing
216
- empty_line = chars['vertical'] + " " * (self.width - 2) + chars['vertical']
274
+ empty_line = f"{self.theme['muted']}{chars['vertical']}{' ' * (self.width - 2)}{chars['vertical']}{self.theme['reset']}"
217
275
  formatted_lines.append(empty_line)
218
276
 
219
277
  return formatted_lines
220
278
 
279
+ def _detect_and_highlight_code(self, content: str) -> str:
280
+ """Detect and highlight code blocks in content"""
281
+ if not CONFIG.get("highlight_code", True):
282
+ return content
283
+
284
+ try:
285
+ # Try to import colorama for terminal colors
286
+ from colorama import Fore, Style, init
287
+ init() # Initialize colorama
288
+
289
+ lines = content.split('\n')
290
+ result_lines = []
291
+ in_code_block = False
292
+
293
+ for line in lines:
294
+ # Detect code block markers
295
+ if line.strip().startswith('```'):
296
+ in_code_block = not in_code_block
297
+ if in_code_block:
298
+ result_lines.append(f"{Fore.CYAN}{line}{Style.RESET_ALL}")
299
+ else:
300
+ result_lines.append(f"{Fore.CYAN}{line}{Style.RESET_ALL}")
301
+ elif in_code_block:
302
+ # Highlight code content
303
+ result_lines.append(f"{Fore.GREEN}{line}{Style.RESET_ALL}")
304
+ elif '`' in line and line.count('`') >= 2:
305
+ # Inline code highlighting
306
+ import re
307
+ highlighted = re.sub(
308
+ r'`([^`]+)`',
309
+ f'{Fore.GREEN}`\\1`{Style.RESET_ALL}',
310
+ line
311
+ )
312
+ result_lines.append(highlighted)
313
+ else:
314
+ result_lines.append(line)
315
+
316
+ return '\n'.join(result_lines)
317
+
318
+ except ImportError:
319
+ # Colorama not available, return content as-is
320
+ return content
321
+ except Exception:
322
+ # Any other error, return content as-is
323
+ return content
324
+
325
+ def _improved_word_wrap(self, text: str, width: int) -> List[str]:
326
+ """Improved word wrapping that preserves code blocks and handles long lines"""
327
+ lines = text.split('\n')
328
+ wrapped_lines = []
329
+
330
+ for line in lines:
331
+ # Handle very long lines (like URLs or code)
332
+ if len(line) > width:
333
+ # If it looks like code or a URL, don't break it aggressively
334
+ if (line.strip().startswith(('http', 'https', 'www', ' ', '\t')) or
335
+ '```' in line or line.count('`') >= 2):
336
+ # Add as-is but truncate if necessary
337
+ if len(line) > width:
338
+ wrapped_lines.append(line[:width-3] + "...")
339
+ else:
340
+ wrapped_lines.append(line)
341
+ else:
342
+ # Normal word wrapping
343
+ words = line.split()
344
+ current_line = ""
345
+
346
+ for word in words:
347
+ if len(current_line) + len(word) + 1 <= width:
348
+ if current_line:
349
+ current_line += " "
350
+ current_line += word
351
+ else:
352
+ if current_line:
353
+ wrapped_lines.append(current_line)
354
+ current_line = word
355
+
356
+ if current_line:
357
+ wrapped_lines.append(current_line)
358
+ else:
359
+ # Line fits, add as-is
360
+ wrapped_lines.append(line)
361
+
362
+ return wrapped_lines or [""]
363
+
364
+ def draw_ascii_welcome(self) -> List[str]:
365
+ """Draw ASCII art welcome screen"""
366
+ chars = self.get_border_chars()
367
+ lines = []
368
+
369
+ # ASCII art that scales with terminal width
370
+ if self.width >= 80:
371
+ ascii_art = [
372
+ " ┌─┐┬ ┬┌─┐┌┬┐ ┌─┐┌─┐┌┐┌┌─┐┌─┐┬ ┌─┐",
373
+ " │ ├─┤├─┤ │ │ │ ││││└─┐│ ││ ├┤ ",
374
+ " └─┘┴ ┴┴ ┴ ┴ └─┘└─┘┘└┘└─┘└─┘┴─┘└─┘"
375
+ ]
376
+ elif self.width >= 60:
377
+ ascii_art = [
378
+ " ┌─┐┬ ┬┌─┐┌┬┐",
379
+ " │ ├─┤├─┤ │ ",
380
+ " └─┘┴ ┴┴ ┴ ┴ "
381
+ ]
382
+ else:
383
+ ascii_art = ["Chat Console"]
384
+
385
+ # Center and colorize ASCII art
386
+ for art_line in ascii_art:
387
+ centered = art_line.center(self.width - 2)
388
+ colored_line = f"{self.theme['muted']}{chars['vertical']} {self.theme['primary']}{centered}{self.theme['muted']} {chars['vertical']}{self.theme['reset']}"
389
+ lines.append(colored_line)
390
+
391
+ # Add spacing
392
+ empty_line = f"{self.theme['muted']}{chars['vertical']}{' ' * (self.width - 2)}{chars['vertical']}{self.theme['reset']}"
393
+ lines.append(empty_line)
394
+
395
+ # Add tips
396
+ tips = [
397
+ f"{self.theme['secondary']}💡 Pro Tips:{self.theme['reset']}",
398
+ f"{self.theme['accent']}• Use Shift+Enter for multi-line input{self.theme['reset']}",
399
+ f"{self.theme['accent']}• Press Tab to switch between text and menu modes{self.theme['reset']}",
400
+ f"{self.theme['accent']}• Try 'm' for model browser{self.theme['reset']}"
401
+ ]
402
+
403
+ for tip in tips:
404
+ # Calculate plain text length for padding
405
+ tip_plain = tip.replace(self.theme.get('secondary', ''), '').replace(self.theme.get('accent', ''), '').replace(self.theme.get('reset', ''), '')
406
+ padding = (self.width - 2 - len(tip_plain)) // 2
407
+ tip_line = f"{self.theme['muted']}{chars['vertical']}{' ' * padding}{tip}{' ' * (self.width - 2 - len(tip_plain) - padding)}{chars['vertical']}{self.theme['reset']}"
408
+ lines.append(tip_line)
409
+
410
+ return lines
411
+
221
412
  def draw_messages(self) -> List[str]:
222
- """Draw all messages in the conversation"""
413
+ """Draw all messages in the conversation with enhanced empty state"""
223
414
  lines = []
224
415
  chars = self.get_border_chars()
225
416
 
226
417
  if not self.messages:
227
- # Empty state
228
- empty_line = chars['vertical'] + " " * (self.width - 2) + chars['vertical']
229
- lines.extend([empty_line] * 3)
230
- center_text = "Start a conversation by typing a message below"
231
- centered_line = chars['vertical'] + center_text.center(self.width - 2) + chars['vertical']
232
- lines.append(centered_line)
233
- lines.extend([empty_line] * 3)
418
+ # Enhanced empty state with ASCII welcome
419
+ lines.extend(self.draw_ascii_welcome())
234
420
  else:
235
421
  # Display messages
236
422
  for message in self.messages[-10:]: # Show last 10 messages
@@ -258,7 +444,7 @@ class ConsoleUI:
258
444
  input_line = chars['vertical'] + f" > {input_content}".ljust(self.width - 2) + chars['vertical']
259
445
  else:
260
446
  # Menu mode - show available hotkeys
261
- menu_help = "n)ew h)istory s)ettings q)uit"
447
+ menu_help = "n)ew h)istory s)ettings m)odels q)uit"
262
448
  input_line = chars['vertical'] + f" {menu_help}".ljust(self.width - 2) + chars['vertical']
263
449
 
264
450
  lines.append(input_line)
@@ -317,43 +503,85 @@ class ConsoleUI:
317
503
  sys.stdout.flush()
318
504
 
319
505
  def get_input(self, prompt: str = "Type your message") -> str:
320
- """Enhanced input with tab navigation and hotkey support"""
321
- current_input = ""
506
+ """Enhanced input with multi-line support, history navigation, and hotkey support"""
507
+ # Check if we're in multi-line mode
508
+ if self.multi_line_input:
509
+ current_input = "\n".join(self.multi_line_input)
510
+ else:
511
+ current_input = ""
322
512
 
323
513
  while True:
324
- self.draw_screen(current_input, prompt)
325
-
326
- # Get single character
327
- if os.name == 'nt':
328
- import msvcrt
329
- char = msvcrt.getch().decode('utf-8', errors='ignore')
514
+ # Update prompt based on multi-line state
515
+ if self.multi_line_input:
516
+ display_prompt = f"Multi-line input (Ctrl+D to send, Esc to cancel)"
330
517
  else:
331
- import termios, tty
332
- fd = sys.stdin.fileno()
333
- old_settings = termios.tcgetattr(fd)
334
- try:
335
- tty.setraw(sys.stdin.fileno())
336
- char = sys.stdin.read(1)
337
- finally:
338
- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
518
+ display_prompt = prompt
519
+
520
+ self.draw_screen(current_input, display_prompt)
339
521
 
340
- # Handle special keys first
522
+ # Get character input with escape sequence handling
523
+ char = self._get_char_with_escape_sequences()
524
+
525
+ # Handle escape sequences for arrow keys
526
+ if char.startswith('\x1b['):
527
+ if char == '\x1b[A': # Up arrow - history navigation
528
+ if self.input_history and self.history_index > 0:
529
+ self.history_index -= 1
530
+ current_input = self.input_history[self.history_index]
531
+ self.multi_line_input = current_input.split('\n') if '\n' in current_input else []
532
+ elif char == '\x1b[B': # Down arrow - history navigation
533
+ if self.history_index < len(self.input_history) - 1:
534
+ self.history_index += 1
535
+ current_input = self.input_history[self.history_index]
536
+ self.multi_line_input = current_input.split('\n') if '\n' in current_input else []
537
+ elif self.history_index == len(self.input_history) - 1:
538
+ self.history_index = len(self.input_history)
539
+ current_input = ""
540
+ self.multi_line_input = []
541
+ continue
542
+
543
+ # Handle special keys
341
544
  if char == '\t':
342
545
  # Tab - switch between text and menu mode
343
546
  self.input_mode = "menu" if self.input_mode == "text" else "text"
344
547
  continue
345
548
  elif char == '\r' or char == '\n':
346
- # Enter
549
+ # Enter - either new line (Shift+Enter) or submit
347
550
  if self.input_mode == "text":
348
- # Submit text input
349
- if current_input.strip():
350
- return current_input.strip()
351
- # If empty input in text mode, switch to menu mode
352
- self.input_mode = "menu"
551
+ if self.multi_line_input:
552
+ # In multi-line mode, add new line
553
+ self.multi_line_input.append("")
554
+ current_input = "\n".join(self.multi_line_input)
555
+ else:
556
+ # Check for Shift+Enter to start multi-line
557
+ # For simplicity, just Enter submits, Shift+Enter would need platform-specific detection
558
+ if current_input.strip():
559
+ # Add to history
560
+ if current_input not in self.input_history:
561
+ self.input_history.append(current_input)
562
+ self.history_index = len(self.input_history)
563
+ return current_input.strip()
564
+ else:
565
+ self.input_mode = "menu"
353
566
  continue
354
567
  else:
355
568
  # In menu mode, Enter does nothing
356
569
  continue
570
+ elif char == '\x04': # Ctrl+D - send multi-line input
571
+ if self.multi_line_input and any(line.strip() for line in self.multi_line_input):
572
+ final_input = "\n".join(self.multi_line_input).strip()
573
+ if final_input not in self.input_history:
574
+ self.input_history.append(final_input)
575
+ self.history_index = len(self.input_history)
576
+ self.multi_line_input = []
577
+ return final_input
578
+ elif char == '\x1b': # Escape - cancel multi-line or switch to text mode
579
+ if self.multi_line_input:
580
+ self.multi_line_input = []
581
+ current_input = ""
582
+ else:
583
+ self.input_mode = "text"
584
+ continue
357
585
  elif char == '\x03':
358
586
  # Ctrl+C
359
587
  if self.generating:
@@ -367,10 +595,25 @@ class ConsoleUI:
367
595
  # Text input mode
368
596
  if char == '\x7f' or char == '\x08':
369
597
  # Backspace
370
- current_input = current_input[:-1]
598
+ if self.multi_line_input:
599
+ if self.multi_line_input[-1]:
600
+ self.multi_line_input[-1] = self.multi_line_input[-1][:-1]
601
+ elif len(self.multi_line_input) > 1:
602
+ self.multi_line_input.pop()
603
+ current_input = "\n".join(self.multi_line_input)
604
+ else:
605
+ current_input = current_input[:-1]
606
+ elif char == '\x0a': # Shift+Enter equivalent (start multi-line)
607
+ if not self.multi_line_input:
608
+ self.multi_line_input = [current_input, ""]
609
+ current_input = "\n".join(self.multi_line_input)
371
610
  elif ord(char) >= 32:
372
611
  # Printable character
373
- current_input += char
612
+ if self.multi_line_input:
613
+ self.multi_line_input[-1] += char
614
+ current_input = "\n".join(self.multi_line_input)
615
+ else:
616
+ current_input += char
374
617
  else:
375
618
  # Menu mode - handle hotkeys
376
619
  if char.lower() == 'q':
@@ -381,10 +624,62 @@ class ConsoleUI:
381
624
  return "##HISTORY##"
382
625
  elif char.lower() == 's':
383
626
  return "##SETTINGS##"
627
+ elif char.lower() == 'm':
628
+ return "##MODELS##"
384
629
  elif char == '\x1b': # Escape - back to text mode
385
630
  self.input_mode = "text"
386
631
  continue
387
632
 
633
+ def _get_char_with_escape_sequences(self) -> str:
634
+ """Get character input with support for escape sequences (arrow keys)"""
635
+ if os.name == 'nt':
636
+ import msvcrt
637
+ char = msvcrt.getch()
638
+ if char == b'\xe0': # Special key prefix on Windows
639
+ char = msvcrt.getch()
640
+ if char == b'H': # Up arrow
641
+ return '\x1b[A'
642
+ elif char == b'P': # Down arrow
643
+ return '\x1b[B'
644
+ return char.decode('utf-8', errors='ignore')
645
+ else:
646
+ import termios, tty
647
+ fd = sys.stdin.fileno()
648
+ old_settings = termios.tcgetattr(fd)
649
+ try:
650
+ tty.setraw(sys.stdin.fileno())
651
+ char = sys.stdin.read(1)
652
+ if char == '\x1b': # Escape sequence
653
+ char += sys.stdin.read(2) # Read [A, [B, etc.
654
+ return char
655
+ finally:
656
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
657
+
658
+ def _get_dynamic_loading_phrase(self) -> str:
659
+ """Get current loading phrase with cycling inspired by gemini-code-assist usePhraseCycler"""
660
+ elapsed = time.time() - self.start_time
661
+ # Change phrase every 2 seconds
662
+ phrase_index = int(elapsed // 2) % len(self.loading_phrases)
663
+ return self.loading_phrases[phrase_index]
664
+
665
+ def _update_streaming_display(self, content: str):
666
+ """Update display during streaming without clearing screen"""
667
+ if not self.generating:
668
+ return
669
+
670
+ # Show dynamic loading indicator with cycling phrases
671
+ elapsed = int(time.time() - self.start_time)
672
+ phrase = self._get_dynamic_loading_phrase()
673
+
674
+ # Simple cursor positioning update instead of full screen redraw
675
+ print(f"\r{self.theme['accent']}● {phrase}... {self.theme['muted']}({elapsed}s){self.theme['reset']}", end="", flush=True)
676
+
677
+ # Periodically redraw full screen (every 5 seconds or significant content changes)
678
+ if elapsed % 5 == 0 or len(content) > self.loading_phase_index + 100:
679
+ self.loading_phase_index = len(content)
680
+ # Update the assistant message and redraw
681
+ self.draw_screen("", f"{phrase} ({elapsed}s)")
682
+
388
683
  async def create_new_conversation(self):
389
684
  """Create a new conversation"""
390
685
  title = "New Conversation"
@@ -419,13 +714,14 @@ class ConsoleUI:
419
714
  self.db.update_conversation_title(self.current_conversation.id, new_title)
420
715
  self.current_conversation.title = new_title
421
716
 
422
- except Exception as e:
717
+ except Exception:
423
718
  # Silently fail - title generation is not critical
424
719
  pass
425
720
 
426
721
  async def generate_response(self, user_message: str):
427
- """Generate AI response"""
722
+ """Generate AI response with enhanced streaming display"""
428
723
  self.generating = True
724
+ self.start_time = time.time() # Reset timer for this generation
429
725
 
430
726
  try:
431
727
  # Add user message
@@ -470,8 +766,8 @@ class ConsoleUI:
470
766
  nonlocal full_response
471
767
  full_response = content
472
768
  assistant_message.content = content
473
- # Redraw screen periodically
474
- self.draw_screen("", "Generating response")
769
+ # Update screen with streaming content instead of clearing
770
+ self._update_streaming_display(content)
475
771
 
476
772
  # Apply style to messages
477
773
  styled_messages = apply_style_prefix(api_messages, self.selected_style)
@@ -533,32 +829,38 @@ class ConsoleUI:
533
829
  pass
534
830
 
535
831
  async def show_settings(self):
536
- """Show enhanced settings menu with dynamic model detection"""
832
+ """Show enhanced settings menu with style selection and persistence"""
537
833
  while True:
538
834
  self.clear_screen()
539
835
  print("=" * self.width)
540
836
  print("SETTINGS".center(self.width))
541
837
  print("=" * self.width)
542
838
 
543
- print(f"Current Model: {self.selected_model}")
544
- print(f"Current Style: {self.selected_style}")
839
+ print(f"Current Model: {CONFIG['available_models'].get(self.selected_model, {}).get('display_name', self.selected_model)}")
840
+ print(f"Current Style: {CONFIG['user_styles'].get(self.selected_style, {}).get('name', self.selected_style)}")
545
841
  print()
546
-
547
842
  print("What would you like to change?")
548
- print("1. Select Model")
843
+ print("1. Model")
549
844
  print("2. Response Style")
550
- print("3. Detect Ollama Models")
845
+ print("3. Advanced Settings")
846
+ print("4. Save Settings")
551
847
  print("0. Back to Chat")
552
848
 
553
849
  try:
554
850
  choice = input("\n> ").strip()
555
851
 
556
852
  if choice == "1":
853
+ # Model selection
557
854
  await self._select_model()
558
855
  elif choice == "2":
856
+ # Style selection
559
857
  self._select_style()
560
858
  elif choice == "3":
561
- await self._detect_ollama_models()
859
+ # Advanced settings
860
+ await self._show_advanced_settings()
861
+ elif choice == "4":
862
+ # Save settings
863
+ self._save_settings()
562
864
  elif choice == "0" or choice == "":
563
865
  break
564
866
 
@@ -600,7 +902,7 @@ class ConsoleUI:
600
902
  "display_name": model_id,
601
903
  "max_tokens": 4096
602
904
  }))
603
- except Exception as e:
905
+ except Exception:
604
906
  pass # Ollama not available
605
907
 
606
908
  # Display models by provider
@@ -666,6 +968,263 @@ class ConsoleUI:
666
968
  except (ValueError, KeyboardInterrupt):
667
969
  pass
668
970
 
971
+ def _save_settings(self):
972
+ """Save current settings to config file"""
973
+ try:
974
+ CONFIG["default_model"] = self.selected_model
975
+ CONFIG["default_style"] = self.selected_style
976
+ save_config(CONFIG)
977
+ print("Settings saved successfully!")
978
+ except Exception as e:
979
+ print(f"Error saving settings: {e}")
980
+ input("Press Enter to continue...")
981
+
982
+ async def _show_advanced_settings(self):
983
+ """Show advanced settings configuration panel"""
984
+ while True:
985
+ self.clear_screen()
986
+ print("=" * self.width)
987
+ print("ADVANCED SETTINGS".center(self.width))
988
+ print("=" * self.width)
989
+
990
+ # Display current advanced settings
991
+ print("Current Advanced Settings:")
992
+ print(f" Code Highlighting: {'On' if CONFIG.get('highlight_code', True) else 'Off'}")
993
+ print(f" Dynamic Titles: {'On' if CONFIG.get('generate_dynamic_titles', True) else 'Off'}")
994
+ print(f" Model Preloading: {'On' if CONFIG.get('preload_models', True) else 'Off'}")
995
+ print(f" Ollama URL: {CONFIG.get('ollama_base_url', 'http://localhost:11434')}")
996
+ print(f" Inactive Timeout: {CONFIG.get('ollama_inactive_timeout', 30)} minutes")
997
+ print()
998
+
999
+ print("What would you like to configure?")
1000
+ print("1. Provider Settings")
1001
+ print("2. UI Settings")
1002
+ print("3. Performance Settings")
1003
+ print("4. Ollama Settings")
1004
+ print("0. Back to Settings")
1005
+
1006
+ try:
1007
+ choice = input("\n> ").strip()
1008
+
1009
+ if choice == "1":
1010
+ await self._configure_provider_settings()
1011
+ elif choice == "2":
1012
+ await self._configure_ui_settings()
1013
+ elif choice == "3":
1014
+ await self._configure_performance_settings()
1015
+ elif choice == "4":
1016
+ await self._configure_ollama_settings()
1017
+ elif choice == "0" or choice == "":
1018
+ break
1019
+
1020
+ except (ValueError, KeyboardInterrupt):
1021
+ break
1022
+
1023
+ async def _configure_provider_settings(self):
1024
+ """Configure provider-specific settings"""
1025
+ self.clear_screen()
1026
+ print("=" * self.width)
1027
+ print("PROVIDER SETTINGS".center(self.width))
1028
+ print("=" * self.width)
1029
+
1030
+ print("Current Provider Settings:")
1031
+ print(f" OpenAI API Key: {'Set' if CONFIG.get('openai_api_key') else 'Not Set'}")
1032
+ print(f" Anthropic API Key: {'Set' if CONFIG.get('anthropic_api_key') else 'Not Set'}")
1033
+ print(f" Ollama Base URL: {CONFIG.get('ollama_base_url', 'http://localhost:11434')}")
1034
+ print()
1035
+
1036
+ print("Options:")
1037
+ print("1. Set OpenAI API Key")
1038
+ print("2. Set Anthropic API Key")
1039
+ print("3. Set Ollama Base URL")
1040
+ print("4. Clear API Keys")
1041
+ print("0. Back")
1042
+
1043
+ choice = input("\n> ").strip()
1044
+
1045
+ if choice == "1":
1046
+ key = input("Enter OpenAI API Key (or press Enter to skip): ").strip()
1047
+ if key:
1048
+ CONFIG["openai_api_key"] = key
1049
+ print("OpenAI API Key updated!")
1050
+
1051
+ elif choice == "2":
1052
+ key = input("Enter Anthropic API Key (or press Enter to skip): ").strip()
1053
+ if key:
1054
+ CONFIG["anthropic_api_key"] = key
1055
+ print("Anthropic API Key updated!")
1056
+
1057
+ elif choice == "3":
1058
+ url = input(f"Enter Ollama Base URL (current: {CONFIG.get('ollama_base_url', 'http://localhost:11434')}): ").strip()
1059
+ if url:
1060
+ CONFIG["ollama_base_url"] = url
1061
+ print("Ollama Base URL updated!")
1062
+
1063
+ elif choice == "4":
1064
+ confirm = input("Clear all API keys? (y/N): ").strip().lower()
1065
+ if confirm == 'y':
1066
+ CONFIG.pop("openai_api_key", None)
1067
+ CONFIG.pop("anthropic_api_key", None)
1068
+ print("API keys cleared!")
1069
+
1070
+ if choice in ["1", "2", "3", "4"]:
1071
+ input("\nPress Enter to continue...")
1072
+
1073
+ async def _configure_ui_settings(self):
1074
+ """Configure UI and display settings"""
1075
+ self.clear_screen()
1076
+ print("=" * self.width)
1077
+ print("UI SETTINGS".center(self.width))
1078
+ print("=" * self.width)
1079
+
1080
+ print("Current UI Settings:")
1081
+ print(f" Code Highlighting: {'On' if CONFIG.get('highlight_code', True) else 'Off'}")
1082
+ print(f" Emoji Indicators: {'On' if CONFIG.get('use_emoji_indicators', True) else 'Off'}")
1083
+ print(f" Word Wrapping: {'On' if CONFIG.get('word_wrap', True) else 'Off'}")
1084
+ print()
1085
+
1086
+ print("Options:")
1087
+ print("1. Toggle Code Highlighting")
1088
+ print("2. Toggle Emoji Indicators")
1089
+ print("3. Toggle Word Wrapping")
1090
+ print("0. Back")
1091
+
1092
+ choice = input("\n> ").strip()
1093
+
1094
+ if choice == "1":
1095
+ current = CONFIG.get('highlight_code', True)
1096
+ CONFIG['highlight_code'] = not current
1097
+ print(f"Code highlighting {'enabled' if not current else 'disabled'}!")
1098
+
1099
+ elif choice == "2":
1100
+ current = CONFIG.get('use_emoji_indicators', True)
1101
+ CONFIG['use_emoji_indicators'] = not current
1102
+ print(f"Emoji indicators {'enabled' if not current else 'disabled'}!")
1103
+
1104
+ elif choice == "3":
1105
+ current = CONFIG.get('word_wrap', True)
1106
+ CONFIG['word_wrap'] = not current
1107
+ print(f"Word wrapping {'enabled' if not current else 'disabled'}!")
1108
+
1109
+ if choice in ["1", "2", "3"]:
1110
+ input("\nPress Enter to continue...")
1111
+
1112
+ async def _configure_performance_settings(self):
1113
+ """Configure performance and optimization settings"""
1114
+ self.clear_screen()
1115
+ print("=" * self.width)
1116
+ print("PERFORMANCE SETTINGS".center(self.width))
1117
+ print("=" * self.width)
1118
+
1119
+ print("Current Performance Settings:")
1120
+ print(f" Dynamic Title Generation: {'On' if CONFIG.get('generate_dynamic_titles', True) else 'Off'}")
1121
+ print(f" Model Preloading: {'On' if CONFIG.get('preload_models', True) else 'Off'}")
1122
+ print(f" History Limit: {CONFIG.get('history_limit', 100)} conversations")
1123
+ print(f" Message Limit: {CONFIG.get('message_limit', 50)} per conversation")
1124
+ print()
1125
+
1126
+ print("Options:")
1127
+ print("1. Toggle Dynamic Title Generation")
1128
+ print("2. Toggle Model Preloading")
1129
+ print("3. Set History Limit")
1130
+ print("4. Set Message Limit")
1131
+ print("0. Back")
1132
+
1133
+ choice = input("\n> ").strip()
1134
+
1135
+ if choice == "1":
1136
+ current = CONFIG.get('generate_dynamic_titles', True)
1137
+ CONFIG['generate_dynamic_titles'] = not current
1138
+ print(f"Dynamic title generation {'enabled' if not current else 'disabled'}!")
1139
+
1140
+ elif choice == "2":
1141
+ current = CONFIG.get('preload_models', True)
1142
+ CONFIG['preload_models'] = not current
1143
+ print(f"Model preloading {'enabled' if not current else 'disabled'}!")
1144
+
1145
+ elif choice == "3":
1146
+ try:
1147
+ limit = int(input(f"Enter history limit (current: {CONFIG.get('history_limit', 100)}): "))
1148
+ if limit > 0:
1149
+ CONFIG['history_limit'] = limit
1150
+ print(f"History limit set to {limit}!")
1151
+ except ValueError:
1152
+ print("Invalid number!")
1153
+
1154
+ elif choice == "4":
1155
+ try:
1156
+ limit = int(input(f"Enter message limit (current: {CONFIG.get('message_limit', 50)}): "))
1157
+ if limit > 0:
1158
+ CONFIG['message_limit'] = limit
1159
+ print(f"Message limit set to {limit}!")
1160
+ except ValueError:
1161
+ print("Invalid number!")
1162
+
1163
+ if choice in ["1", "2", "3", "4"]:
1164
+ input("\nPress Enter to continue...")
1165
+
1166
+ async def _configure_ollama_settings(self):
1167
+ """Configure Ollama-specific settings"""
1168
+ self.clear_screen()
1169
+ print("=" * self.width)
1170
+ print("OLLAMA SETTINGS".center(self.width))
1171
+ print("=" * self.width)
1172
+
1173
+ print("Current Ollama Settings:")
1174
+ print(f" Base URL: {CONFIG.get('ollama_base_url', 'http://localhost:11434')}")
1175
+ print(f" Inactive Timeout: {CONFIG.get('ollama_inactive_timeout', 30)} minutes")
1176
+ print(f" Auto Start: {'On' if CONFIG.get('ollama_auto_start', True) else 'Off'}")
1177
+ print(f" Model Cleanup: {'On' if CONFIG.get('ollama_cleanup_models', True) else 'Off'}")
1178
+ print()
1179
+
1180
+ print("Options:")
1181
+ print("1. Set Base URL")
1182
+ print("2. Set Inactive Timeout")
1183
+ print("3. Toggle Auto Start")
1184
+ print("4. Toggle Model Cleanup")
1185
+ print("5. Test Connection")
1186
+ print("0. Back")
1187
+
1188
+ choice = input("\n> ").strip()
1189
+
1190
+ if choice == "1":
1191
+ url = input(f"Enter Ollama Base URL (current: {CONFIG.get('ollama_base_url', 'http://localhost:11434')}): ").strip()
1192
+ if url:
1193
+ CONFIG['ollama_base_url'] = url
1194
+ print("Ollama Base URL updated!")
1195
+
1196
+ elif choice == "2":
1197
+ try:
1198
+ timeout = int(input(f"Enter inactive timeout in minutes (current: {CONFIG.get('ollama_inactive_timeout', 30)}): "))
1199
+ if timeout > 0:
1200
+ CONFIG['ollama_inactive_timeout'] = timeout
1201
+ print(f"Inactive timeout set to {timeout} minutes!")
1202
+ except ValueError:
1203
+ print("Invalid number!")
1204
+
1205
+ elif choice == "3":
1206
+ current = CONFIG.get('ollama_auto_start', True)
1207
+ CONFIG['ollama_auto_start'] = not current
1208
+ print(f"Ollama auto start {'enabled' if not current else 'disabled'}!")
1209
+
1210
+ elif choice == "4":
1211
+ current = CONFIG.get('ollama_cleanup_models', True)
1212
+ CONFIG['ollama_cleanup_models'] = not current
1213
+ print(f"Model cleanup {'enabled' if not current else 'disabled'}!")
1214
+
1215
+ elif choice == "5":
1216
+ print("Testing Ollama connection...")
1217
+ try:
1218
+ from .api.ollama import OllamaClient
1219
+ client = await OllamaClient.create()
1220
+ models = await client.get_available_models()
1221
+ print(f"✓ Connection successful! Found {len(models)} local models.")
1222
+ except Exception as e:
1223
+ print(f"✗ Connection failed: {str(e)}")
1224
+
1225
+ if choice in ["1", "2", "3", "4", "5"]:
1226
+ input("\nPress Enter to continue...")
1227
+
669
1228
  async def _detect_ollama_models(self):
670
1229
  """Detect and add locally available Ollama models"""
671
1230
  self.clear_screen()
@@ -714,6 +1273,376 @@ class ConsoleUI:
714
1273
 
715
1274
  input("\nPress Enter to continue...")
716
1275
 
1276
+ async def show_model_browser(self):
1277
+ """Show Ollama model browser for managing local and available models"""
1278
+ while True:
1279
+ self.clear_screen()
1280
+ print("=" * self.width)
1281
+ print("OLLAMA MODEL BROWSER".center(self.width))
1282
+ print("=" * self.width)
1283
+
1284
+ print("What would you like to do?")
1285
+ print("1. View Local Models")
1286
+ print("2. Browse Available Models")
1287
+ print("3. Search Models")
1288
+ print("4. Switch Current Model")
1289
+ print("0. Back to Chat")
1290
+
1291
+ try:
1292
+ choice = input("\n> ").strip()
1293
+
1294
+ if choice == "1":
1295
+ await self._list_local_models()
1296
+ elif choice == "2":
1297
+ await self._list_available_models()
1298
+ elif choice == "3":
1299
+ await self._search_models()
1300
+ elif choice == "4":
1301
+ await self._switch_model()
1302
+ elif choice == "0" or choice == "":
1303
+ break
1304
+
1305
+ except (ValueError, KeyboardInterrupt):
1306
+ break
1307
+
1308
+ async def _list_local_models(self):
1309
+ """List locally installed Ollama models"""
1310
+ self.clear_screen()
1311
+ print("=" * self.width)
1312
+ print("LOCAL OLLAMA MODELS".center(self.width))
1313
+ print("=" * self.width)
1314
+
1315
+ try:
1316
+ # Get Ollama client with output suppression
1317
+ with self._suppress_output():
1318
+ from .api.ollama import OllamaClient
1319
+ client = await OllamaClient.create()
1320
+
1321
+ # Get local models
1322
+ local_models = await client.get_available_models()
1323
+
1324
+ if not local_models:
1325
+ print("No local models found.")
1326
+ print("Use option 2 to browse and download models from the registry.")
1327
+ else:
1328
+ print(f"Found {len(local_models)} local models:\n")
1329
+
1330
+ for i, model in enumerate(local_models):
1331
+ model_id = model.get("id", "unknown")
1332
+ marker = "►" if model_id == self.selected_model else " "
1333
+ print(f"{marker} {i+1:2d}. {model_id}")
1334
+
1335
+ print("\nOptions:")
1336
+ print("d) Delete a model")
1337
+ print("i) Show model details")
1338
+ print("s) Switch to a model")
1339
+ print("Enter) Back to model browser")
1340
+
1341
+ sub_choice = input("\n> ").strip().lower()
1342
+
1343
+ if sub_choice == "d":
1344
+ await self._delete_model_menu(local_models)
1345
+ elif sub_choice == "i":
1346
+ await self._show_model_details_menu(local_models)
1347
+ elif sub_choice == "s":
1348
+ await self._switch_model_menu(local_models)
1349
+
1350
+ except Exception as e:
1351
+ print(f"Error connecting to Ollama: {str(e)}")
1352
+ print("Make sure Ollama is running and accessible.")
1353
+
1354
+ input("\nPress Enter to continue...")
1355
+
1356
+ async def _list_available_models(self):
1357
+ """List available models for download from Ollama registry"""
1358
+ self.clear_screen()
1359
+ print("=" * self.width)
1360
+ print("AVAILABLE OLLAMA MODELS".center(self.width))
1361
+ print("=" * self.width)
1362
+
1363
+ try:
1364
+ # Get Ollama client with output suppression
1365
+ with self._suppress_output():
1366
+ from .api.ollama import OllamaClient
1367
+ client = await OllamaClient.create()
1368
+
1369
+ print("Loading available models... (this may take a moment)")
1370
+ with self._suppress_output():
1371
+ available_models = await client.list_available_models_from_registry("")
1372
+
1373
+ if not available_models:
1374
+ print("No models found in registry.")
1375
+ else:
1376
+ # Group by model family for better organization
1377
+ families = {}
1378
+ for model in available_models:
1379
+ family = model.get("model_family", "Other")
1380
+ if family not in families:
1381
+ families[family] = []
1382
+ families[family].append(model)
1383
+
1384
+ # Display by family
1385
+ model_index = 1
1386
+ model_map = {}
1387
+
1388
+ for family, models in sorted(families.items()):
1389
+ print(f"\n{family} Models:")
1390
+ print("-" * 40)
1391
+
1392
+ for model in models[:5]: # Show first 5 per family
1393
+ name = model.get("name", "unknown")
1394
+ description = model.get("description", "")
1395
+ size = model.get("parameter_size", "Unknown size")
1396
+
1397
+ print(f"{model_index:2d}. {name} ({size})")
1398
+ if description:
1399
+ print(f" {description[:60]}...")
1400
+
1401
+ model_map[str(model_index)] = model
1402
+ model_index += 1
1403
+
1404
+ if len(models) > 5:
1405
+ print(f" ... and {len(models) - 5} more {family} models")
1406
+
1407
+ print(f"\nShowing top models by family (total: {len(available_models)})")
1408
+ print("\nOptions:")
1409
+ print("Enter model number to download")
1410
+ print("s) Search for specific models")
1411
+ print("Enter) Back to model browser")
1412
+
1413
+ choice = input("\n> ").strip()
1414
+
1415
+ if choice in model_map:
1416
+ await self._download_model(model_map[choice])
1417
+ elif choice.lower() == "s":
1418
+ await self._search_models()
1419
+
1420
+ except Exception as e:
1421
+ print(f"Error fetching available models: {str(e)}")
1422
+
1423
+ input("\nPress Enter to continue...")
1424
+
1425
+ async def _search_models(self):
1426
+ """Search for models by name or description"""
1427
+ self.clear_screen()
1428
+ print("=" * self.width)
1429
+ print("SEARCH OLLAMA MODELS".center(self.width))
1430
+ print("=" * self.width)
1431
+
1432
+ query = input("Enter search term (name, family, or description): ").strip()
1433
+
1434
+ if not query:
1435
+ return
1436
+
1437
+ try:
1438
+ # Get Ollama client with output suppression
1439
+ with self._suppress_output():
1440
+ from .api.ollama import OllamaClient
1441
+ client = await OllamaClient.create()
1442
+
1443
+ print(f"\nSearching for '{query}'...")
1444
+ with self._suppress_output():
1445
+ all_models = await client.list_available_models_from_registry("")
1446
+
1447
+ # Filter models
1448
+ matching_models = []
1449
+ query_lower = query.lower()
1450
+
1451
+ for model in all_models:
1452
+ if (query_lower in model.get("name", "").lower() or
1453
+ query_lower in model.get("description", "").lower() or
1454
+ query_lower in model.get("model_family", "").lower()):
1455
+ matching_models.append(model)
1456
+
1457
+ if not matching_models:
1458
+ print(f"No models found matching '{query}'")
1459
+ else:
1460
+ print(f"\nFound {len(matching_models)} models matching '{query}':\n")
1461
+
1462
+ model_map = {}
1463
+ for i, model in enumerate(matching_models[:20]): # Show first 20 matches
1464
+ name = model.get("name", "unknown")
1465
+ description = model.get("description", "")
1466
+ size = model.get("parameter_size", "Unknown size")
1467
+ family = model.get("model_family", "Unknown")
1468
+
1469
+ print(f"{i+1:2d}. {name} ({family}, {size})")
1470
+ if description:
1471
+ print(f" {description[:70]}...")
1472
+ print()
1473
+
1474
+ model_map[str(i+1)] = model
1475
+
1476
+ if len(matching_models) > 20:
1477
+ print(f"... and {len(matching_models) - 20} more matches")
1478
+
1479
+ print("\nEnter model number to download (or press Enter to continue):")
1480
+ choice = input("> ").strip()
1481
+
1482
+ if choice in model_map:
1483
+ await self._download_model(model_map[choice])
1484
+
1485
+ except Exception as e:
1486
+ print(f"Error searching models: {str(e)}")
1487
+
1488
+ input("\nPress Enter to continue...")
1489
+
1490
+ async def _download_model(self, model_info):
1491
+ """Download a model with progress indication"""
1492
+ model_name = model_info.get("name", "unknown")
1493
+ size_info = model_info.get("parameter_size", "Unknown size")
1494
+
1495
+ print(f"\nDownloading {model_name} ({size_info})...")
1496
+ print("This may take several minutes depending on model size and connection.")
1497
+ print("Press Ctrl+C to cancel.\n")
1498
+
1499
+ confirm = input(f"Download {model_name}? (y/N): ").strip().lower()
1500
+ if confirm != 'y':
1501
+ return
1502
+
1503
+ try:
1504
+ # Get Ollama client with output suppression
1505
+ with self._suppress_output():
1506
+ from .api.ollama import OllamaClient
1507
+ client = await OllamaClient.create()
1508
+
1509
+ # Track download progress
1510
+ last_status = ""
1511
+
1512
+ async for progress in client.pull_model(model_name):
1513
+ status = progress.get("status", "")
1514
+
1515
+ if status != last_status:
1516
+ print(f"Status: {status}")
1517
+ last_status = status
1518
+
1519
+ # Show progress if available
1520
+ if "total" in progress and "completed" in progress:
1521
+ total = progress["total"]
1522
+ completed = progress["completed"]
1523
+ percent = (completed / total) * 100 if total > 0 else 0
1524
+ print(f"Progress: {percent:.1f}% ({completed:,}/{total:,} bytes)")
1525
+
1526
+ # Check if download is complete
1527
+ if status == "success" or "success" in status.lower():
1528
+ print(f"\n✓ {model_name} downloaded successfully!")
1529
+ break
1530
+
1531
+ except KeyboardInterrupt:
1532
+ print("\nDownload cancelled by user.")
1533
+ except Exception as e:
1534
+ print(f"\nError downloading model: {str(e)}")
1535
+
1536
+ async def _delete_model_menu(self, local_models):
1537
+ """Show model deletion menu"""
1538
+ print("\nSelect model to delete:")
1539
+ for i, model in enumerate(local_models):
1540
+ print(f"{i+1:2d}. {model.get('id', 'unknown')}")
1541
+
1542
+ choice = input("\nEnter model number (or press Enter to cancel): ").strip()
1543
+
1544
+ if choice.isdigit():
1545
+ idx = int(choice) - 1
1546
+ if 0 <= idx < len(local_models):
1547
+ model_id = local_models[idx].get("id", "unknown")
1548
+
1549
+ print(f"\nWARNING: This will permanently delete {model_id}")
1550
+ confirm = input("Type 'DELETE' to confirm: ").strip()
1551
+
1552
+ if confirm == "DELETE":
1553
+ try:
1554
+ with self._suppress_output():
1555
+ from .api.ollama import OllamaClient
1556
+ client = await OllamaClient.create()
1557
+ await client.delete_model(model_id)
1558
+ print(f"✓ {model_id} deleted successfully!")
1559
+ except Exception as e:
1560
+ print(f"Error deleting model: {str(e)}")
1561
+ else:
1562
+ print("Deletion cancelled.")
1563
+
1564
+ async def _show_model_details_menu(self, local_models):
1565
+ """Show detailed information about a model"""
1566
+ print("\nSelect model for details:")
1567
+ for i, model in enumerate(local_models):
1568
+ print(f"{i+1:2d}. {model.get('id', 'unknown')}")
1569
+
1570
+ choice = input("\nEnter model number (or press Enter to cancel): ").strip()
1571
+
1572
+ if choice.isdigit():
1573
+ idx = int(choice) - 1
1574
+ if 0 <= idx < len(local_models):
1575
+ model_id = local_models[idx].get("id", "unknown")
1576
+ await self._show_model_details(model_id)
1577
+
1578
+ async def _show_model_details(self, model_id):
1579
+ """Show detailed information about a specific model"""
1580
+ try:
1581
+ from .api.ollama import OllamaClient
1582
+ client = await OllamaClient.create()
1583
+ details = await client.get_model_details(model_id)
1584
+
1585
+ self.clear_screen()
1586
+ print("=" * self.width)
1587
+ print(f"MODEL DETAILS: {model_id}".center(self.width))
1588
+ print("=" * self.width)
1589
+
1590
+ if "error" in details:
1591
+ print(f"Error getting details: {details['error']}")
1592
+ else:
1593
+ print(f"Name: {model_id}")
1594
+
1595
+ if details.get("size"):
1596
+ size_gb = details["size"] / (1024**3)
1597
+ print(f"Size: {size_gb:.1f} GB")
1598
+
1599
+ if details.get("modified_at"):
1600
+ print(f"Modified: {details['modified_at']}")
1601
+
1602
+ if details.get("parameters"):
1603
+ print(f"\nParameters: {details['parameters']}")
1604
+
1605
+ if details.get("modelfile"):
1606
+ print(f"\nModelfile (first 500 chars):")
1607
+ print("-" * 40)
1608
+ print(details["modelfile"][:500])
1609
+ if len(details["modelfile"]) > 500:
1610
+ print("...")
1611
+
1612
+ except Exception as e:
1613
+ print(f"Error getting model details: {str(e)}")
1614
+
1615
+ input("\nPress Enter to continue...")
1616
+
1617
+ async def _switch_model_menu(self, local_models):
1618
+ """Switch to a different local model"""
1619
+ print("\nSelect model to switch to:")
1620
+ for i, model in enumerate(local_models):
1621
+ model_id = model.get("id", "unknown")
1622
+ marker = "►" if model_id == self.selected_model else " "
1623
+ print(f"{marker} {i+1:2d}. {model_id}")
1624
+
1625
+ choice = input("\nEnter model number (or press Enter to cancel): ").strip()
1626
+
1627
+ if choice.isdigit():
1628
+ idx = int(choice) - 1
1629
+ if 0 <= idx < len(local_models):
1630
+ old_model = self.selected_model
1631
+ self.selected_model = local_models[idx].get("id", "unknown")
1632
+ print(f"\n✓ Switched from {old_model} to {self.selected_model}")
1633
+
1634
+ async def _switch_model(self):
1635
+ """Switch current model (combines local and available models)"""
1636
+ try:
1637
+ from .api.ollama import OllamaClient
1638
+ client = await OllamaClient.create()
1639
+ local_models = await client.get_available_models()
1640
+ await self._switch_model_menu(local_models)
1641
+ except Exception as e:
1642
+ print(f"Error getting local models: {str(e)}")
1643
+
1644
+ input("\nPress Enter to continue...")
1645
+
717
1646
  async def run(self):
718
1647
  """Main application loop"""
719
1648
  # Create initial conversation
@@ -742,6 +1671,9 @@ class ConsoleUI:
742
1671
  elif user_input == "##SETTINGS##":
743
1672
  await self.show_settings()
744
1673
  continue
1674
+ elif user_input == "##MODELS##":
1675
+ await self.show_model_browser()
1676
+ continue
745
1677
 
746
1678
  # Handle legacy single-letter commands for backward compatibility
747
1679
  if user_input.lower() == 'q':
@@ -756,6 +1688,9 @@ class ConsoleUI:
756
1688
  elif user_input.lower() == 's':
757
1689
  await self.show_settings()
758
1690
  continue
1691
+ elif user_input.lower() == 'm':
1692
+ await self.show_model_browser()
1693
+ continue
759
1694
 
760
1695
  # Generate response
761
1696
  await self.generate_response(user_input)
@@ -774,7 +1709,7 @@ class ConsoleUI:
774
1709
 
775
1710
  def setup_signal_handlers():
776
1711
  """Setup signal handlers for graceful shutdown"""
777
- def signal_handler(signum, frame):
1712
+ def signal_handler(_signum, _frame):
778
1713
  print("\n\nShutting down gracefully...")
779
1714
  sys.exit(0)
780
1715