chat-console 0.4.2__py3-none-any.whl → 0.4.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- app/__init__.py +1 -1
- app/api/ollama.py +1 -1
- app/console_chat.py +816 -0
- app/console_main.py +58 -0
- app/console_utils.py +195 -0
- app/main.py +267 -136
- app/ui/borders.py +154 -0
- app/ui/chat_interface.py +84 -78
- app/ui/model_selector.py +11 -3
- app/ui/styles.py +231 -137
- app/utils.py +22 -3
- {chat_console-0.4.2.dist-info → chat_console-0.4.6.dist-info}/METADATA +2 -2
- chat_console-0.4.6.dist-info/RECORD +28 -0
- {chat_console-0.4.2.dist-info → chat_console-0.4.6.dist-info}/WHEEL +1 -1
- chat_console-0.4.6.dist-info/entry_points.txt +5 -0
- chat_console-0.4.2.dist-info/RECORD +0 -24
- chat_console-0.4.2.dist-info/entry_points.txt +0 -3
- {chat_console-0.4.2.dist-info → chat_console-0.4.6.dist-info}/licenses/LICENSE +0 -0
- {chat_console-0.4.2.dist-info → chat_console-0.4.6.dist-info}/top_level.txt +0 -0
app/console_chat.py
ADDED
@@ -0,0 +1,816 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
Pure Console Chat CLI - No Textual Dependencies
|
4
|
+
A true terminal interface following Dieter Rams principles
|
5
|
+
"""
|
6
|
+
|
7
|
+
import os
|
8
|
+
import sys
|
9
|
+
import asyncio
|
10
|
+
import argparse
|
11
|
+
import signal
|
12
|
+
import threading
|
13
|
+
import time
|
14
|
+
from datetime import datetime
|
15
|
+
from typing import List, Optional, Dict, Any
|
16
|
+
import shutil
|
17
|
+
|
18
|
+
from .models import Message, Conversation
|
19
|
+
from .database import ChatDatabase
|
20
|
+
from .config import CONFIG, save_config
|
21
|
+
from .utils import resolve_model_id, generate_conversation_title
|
22
|
+
from .console_utils import console_streaming_response, apply_style_prefix
|
23
|
+
from .api.base import BaseModelClient
|
24
|
+
|
25
|
+
class ConsoleUI:
|
26
|
+
"""Pure console UI following Rams design principles"""
|
27
|
+
|
28
|
+
def __init__(self):
|
29
|
+
self.width = min(shutil.get_terminal_size().columns, 120)
|
30
|
+
self.height = shutil.get_terminal_size().lines
|
31
|
+
self.db = ChatDatabase()
|
32
|
+
self.current_conversation: Optional[Conversation] = None
|
33
|
+
self.messages: List[Message] = []
|
34
|
+
self.selected_model = resolve_model_id(CONFIG["default_model"])
|
35
|
+
self.selected_style = CONFIG["default_style"]
|
36
|
+
self.running = True
|
37
|
+
self.generating = False
|
38
|
+
self.input_mode = "text" # "text" or "menu"
|
39
|
+
|
40
|
+
# Suppress verbose logging for console mode
|
41
|
+
self._setup_console_logging()
|
42
|
+
|
43
|
+
def _setup_console_logging(self):
|
44
|
+
"""Setup logging to minimize disruption to console UI"""
|
45
|
+
import logging
|
46
|
+
|
47
|
+
# Set root logger to ERROR to suppress all INFO messages
|
48
|
+
logging.getLogger().setLevel(logging.ERROR)
|
49
|
+
|
50
|
+
# Suppress all app module logging
|
51
|
+
logging.getLogger('app').setLevel(logging.ERROR)
|
52
|
+
logging.getLogger('app.api').setLevel(logging.ERROR)
|
53
|
+
logging.getLogger('app.api.base').setLevel(logging.ERROR)
|
54
|
+
logging.getLogger('app.api.ollama').setLevel(logging.ERROR)
|
55
|
+
logging.getLogger('app.utils').setLevel(logging.ERROR)
|
56
|
+
logging.getLogger('app.console_utils').setLevel(logging.ERROR)
|
57
|
+
|
58
|
+
# Suppress third-party library logging
|
59
|
+
logging.getLogger('aiohttp').setLevel(logging.ERROR)
|
60
|
+
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
61
|
+
logging.getLogger('httpx').setLevel(logging.ERROR)
|
62
|
+
logging.getLogger('asyncio').setLevel(logging.ERROR)
|
63
|
+
logging.getLogger('root').setLevel(logging.ERROR)
|
64
|
+
|
65
|
+
# Completely disable all handlers to prevent any output
|
66
|
+
logging.basicConfig(
|
67
|
+
level=logging.CRITICAL, # Only show CRITICAL messages
|
68
|
+
format='', # Empty format
|
69
|
+
handlers=[logging.NullHandler()] # Null handler suppresses all output
|
70
|
+
)
|
71
|
+
|
72
|
+
# Clear any existing handlers
|
73
|
+
for handler in logging.root.handlers[:]:
|
74
|
+
logging.root.removeHandler(handler)
|
75
|
+
|
76
|
+
# Add only NullHandler
|
77
|
+
logging.root.addHandler(logging.NullHandler())
|
78
|
+
|
79
|
+
# Redirect stdout/stderr for subprocess calls (if any)
|
80
|
+
self._dev_null = open(os.devnull, 'w')
|
81
|
+
|
82
|
+
def _suppress_output(self):
|
83
|
+
"""Context manager to suppress all output during sensitive operations"""
|
84
|
+
import sys
|
85
|
+
import contextlib
|
86
|
+
|
87
|
+
@contextlib.contextmanager
|
88
|
+
def suppress():
|
89
|
+
with open(os.devnull, "w") as devnull:
|
90
|
+
old_stdout = sys.stdout
|
91
|
+
old_stderr = sys.stderr
|
92
|
+
try:
|
93
|
+
sys.stdout = devnull
|
94
|
+
sys.stderr = devnull
|
95
|
+
yield
|
96
|
+
finally:
|
97
|
+
sys.stdout = old_stdout
|
98
|
+
sys.stderr = old_stderr
|
99
|
+
|
100
|
+
return suppress()
|
101
|
+
|
102
|
+
def clear_screen(self):
|
103
|
+
"""Clear the terminal screen"""
|
104
|
+
os.system('cls' if os.name == 'nt' else 'clear')
|
105
|
+
|
106
|
+
def get_border_chars(self):
|
107
|
+
"""Get clean ASCII border characters"""
|
108
|
+
return {
|
109
|
+
'horizontal': '─',
|
110
|
+
'vertical': '│',
|
111
|
+
'top_left': '┌',
|
112
|
+
'top_right': '┐',
|
113
|
+
'bottom_left': '└',
|
114
|
+
'bottom_right': '┘',
|
115
|
+
'tee_down': '┬',
|
116
|
+
'tee_up': '┴',
|
117
|
+
'tee_right': '├',
|
118
|
+
'tee_left': '┤'
|
119
|
+
}
|
120
|
+
|
121
|
+
def draw_border_line(self, width: int, position: str = 'top') -> str:
|
122
|
+
"""Draw a clean border line"""
|
123
|
+
chars = self.get_border_chars()
|
124
|
+
|
125
|
+
if position == 'top':
|
126
|
+
return chars['top_left'] + chars['horizontal'] * (width - 2) + chars['top_right']
|
127
|
+
elif position == 'bottom':
|
128
|
+
return chars['bottom_left'] + chars['horizontal'] * (width - 2) + chars['bottom_right']
|
129
|
+
elif position == 'middle':
|
130
|
+
return chars['tee_right'] + chars['horizontal'] * (width - 2) + chars['tee_left']
|
131
|
+
else:
|
132
|
+
return chars['horizontal'] * width
|
133
|
+
|
134
|
+
def draw_header(self) -> List[str]:
|
135
|
+
"""Draw the application header"""
|
136
|
+
from . import __version__
|
137
|
+
chars = self.get_border_chars()
|
138
|
+
|
139
|
+
lines = []
|
140
|
+
|
141
|
+
# Top border with title and model info
|
142
|
+
title = f" Chat Console v{__version__} "
|
143
|
+
model_info = f" Model: {self.selected_model} "
|
144
|
+
|
145
|
+
# Calculate spacing
|
146
|
+
used_space = len(title) + len(model_info)
|
147
|
+
remaining = self.width - used_space - 2
|
148
|
+
spacing = chars['horizontal'] * max(0, remaining)
|
149
|
+
|
150
|
+
header_line = chars['top_left'] + title + spacing + model_info + chars['top_right']
|
151
|
+
lines.append(header_line)
|
152
|
+
|
153
|
+
# Conversation title
|
154
|
+
conv_title = self.current_conversation.title if self.current_conversation else "New Conversation"
|
155
|
+
title_line = chars['vertical'] + f" {conv_title} ".ljust(self.width - 2) + chars['vertical']
|
156
|
+
lines.append(title_line)
|
157
|
+
|
158
|
+
# Separator
|
159
|
+
lines.append(self.draw_border_line(self.width, 'middle'))
|
160
|
+
|
161
|
+
return lines
|
162
|
+
|
163
|
+
def draw_footer(self) -> List[str]:
|
164
|
+
"""Draw the footer with controls"""
|
165
|
+
chars = self.get_border_chars()
|
166
|
+
|
167
|
+
controls = "[Tab] Menu Mode [q] Quit [n] New [h] History [s] Settings"
|
168
|
+
footer_line = chars['vertical'] + f" {controls} ".ljust(self.width - 2) + chars['vertical']
|
169
|
+
|
170
|
+
return [
|
171
|
+
self.draw_border_line(self.width, 'middle'),
|
172
|
+
footer_line,
|
173
|
+
self.draw_border_line(self.width, 'bottom')
|
174
|
+
]
|
175
|
+
|
176
|
+
def format_message(self, message: Message) -> List[str]:
|
177
|
+
"""Format a message for console display"""
|
178
|
+
timestamp = datetime.now().strftime("%H:%M")
|
179
|
+
chars = self.get_border_chars()
|
180
|
+
|
181
|
+
# Calculate available width for content
|
182
|
+
content_width = self.width - 10 # Account for borders and timestamp
|
183
|
+
|
184
|
+
# Word wrap content
|
185
|
+
words = message.content.split()
|
186
|
+
lines = []
|
187
|
+
current_line = ""
|
188
|
+
|
189
|
+
for word in words:
|
190
|
+
if len(current_line) + len(word) + 1 <= content_width:
|
191
|
+
if current_line:
|
192
|
+
current_line += " "
|
193
|
+
current_line += word
|
194
|
+
else:
|
195
|
+
if current_line:
|
196
|
+
lines.append(current_line)
|
197
|
+
current_line = word
|
198
|
+
|
199
|
+
if current_line:
|
200
|
+
lines.append(current_line)
|
201
|
+
|
202
|
+
# Format lines with proper spacing
|
203
|
+
formatted_lines = []
|
204
|
+
for i, line in enumerate(lines):
|
205
|
+
if i == 0:
|
206
|
+
# First line with timestamp
|
207
|
+
prefix = f" {timestamp} " if message.role == "user" else f" {timestamp} "
|
208
|
+
formatted_line = chars['vertical'] + prefix + line.ljust(content_width) + chars['vertical']
|
209
|
+
else:
|
210
|
+
# Continuation lines
|
211
|
+
prefix = " " # Align with content
|
212
|
+
formatted_line = chars['vertical'] + prefix + line.ljust(content_width) + chars['vertical']
|
213
|
+
formatted_lines.append(formatted_line)
|
214
|
+
|
215
|
+
# Add empty line for spacing
|
216
|
+
empty_line = chars['vertical'] + " " * (self.width - 2) + chars['vertical']
|
217
|
+
formatted_lines.append(empty_line)
|
218
|
+
|
219
|
+
return formatted_lines
|
220
|
+
|
221
|
+
def draw_messages(self) -> List[str]:
|
222
|
+
"""Draw all messages in the conversation"""
|
223
|
+
lines = []
|
224
|
+
chars = self.get_border_chars()
|
225
|
+
|
226
|
+
if not self.messages:
|
227
|
+
# Empty state
|
228
|
+
empty_line = chars['vertical'] + " " * (self.width - 2) + chars['vertical']
|
229
|
+
lines.extend([empty_line] * 3)
|
230
|
+
center_text = "Start a conversation by typing a message below"
|
231
|
+
centered_line = chars['vertical'] + center_text.center(self.width - 2) + chars['vertical']
|
232
|
+
lines.append(centered_line)
|
233
|
+
lines.extend([empty_line] * 3)
|
234
|
+
else:
|
235
|
+
# Display messages
|
236
|
+
for message in self.messages[-10:]: # Show last 10 messages
|
237
|
+
lines.extend(self.format_message(message))
|
238
|
+
|
239
|
+
return lines
|
240
|
+
|
241
|
+
def draw_input_area(self, current_input: str = "", prompt: str = "Type your message") -> List[str]:
|
242
|
+
"""Draw the input area with mode indicator"""
|
243
|
+
chars = self.get_border_chars()
|
244
|
+
lines = []
|
245
|
+
|
246
|
+
# Input prompt with mode indicator
|
247
|
+
mode_indicator = "📝" if self.input_mode == "text" else "⚡"
|
248
|
+
mode_text = "TEXT" if self.input_mode == "text" else "MENU"
|
249
|
+
prompt_with_mode = f"{mode_indicator} {prompt} ({mode_text} mode - Tab to switch)"
|
250
|
+
prompt_line = chars['vertical'] + f" {prompt_with_mode}: ".ljust(self.width - 2) + chars['vertical']
|
251
|
+
lines.append(prompt_line)
|
252
|
+
|
253
|
+
# Input field
|
254
|
+
if self.input_mode == "text":
|
255
|
+
input_content = current_input
|
256
|
+
if len(input_content) > self.width - 6:
|
257
|
+
input_content = input_content[-(self.width - 9):] + "..."
|
258
|
+
input_line = chars['vertical'] + f" > {input_content}".ljust(self.width - 2) + chars['vertical']
|
259
|
+
else:
|
260
|
+
# Menu mode - show available hotkeys
|
261
|
+
menu_help = "n)ew h)istory s)ettings q)uit"
|
262
|
+
input_line = chars['vertical'] + f" {menu_help}".ljust(self.width - 2) + chars['vertical']
|
263
|
+
|
264
|
+
lines.append(input_line)
|
265
|
+
|
266
|
+
# Show generating indicator if needed
|
267
|
+
if self.generating:
|
268
|
+
status_line = chars['vertical'] + " ● Generating response...".ljust(self.width - 2) + chars['vertical']
|
269
|
+
lines.append(status_line)
|
270
|
+
|
271
|
+
return lines
|
272
|
+
|
273
|
+
def draw_screen(self, current_input: str = "", input_prompt: str = "Type your message"):
|
274
|
+
"""Draw the complete screen"""
|
275
|
+
self.clear_screen()
|
276
|
+
|
277
|
+
# Calculate layout
|
278
|
+
header_lines = self.draw_header()
|
279
|
+
footer_lines = self.draw_footer()
|
280
|
+
input_lines = self.draw_input_area(current_input, input_prompt)
|
281
|
+
|
282
|
+
# Calculate available space for messages
|
283
|
+
used_lines = len(header_lines) + len(footer_lines) + len(input_lines)
|
284
|
+
available_lines = self.height - used_lines - 2
|
285
|
+
|
286
|
+
# Draw header
|
287
|
+
for line in header_lines:
|
288
|
+
print(line)
|
289
|
+
|
290
|
+
# Draw messages
|
291
|
+
message_lines = self.draw_messages()
|
292
|
+
chars = self.get_border_chars()
|
293
|
+
|
294
|
+
# Pad or truncate message area
|
295
|
+
if len(message_lines) < available_lines:
|
296
|
+
# Pad with empty lines
|
297
|
+
empty_line = chars['vertical'] + " " * (self.width - 2) + chars['vertical']
|
298
|
+
message_lines.extend([empty_line] * (available_lines - len(message_lines)))
|
299
|
+
else:
|
300
|
+
# Truncate to fit
|
301
|
+
message_lines = message_lines[-available_lines:]
|
302
|
+
|
303
|
+
for line in message_lines:
|
304
|
+
print(line)
|
305
|
+
|
306
|
+
# Draw input area
|
307
|
+
for line in input_lines:
|
308
|
+
print(line)
|
309
|
+
|
310
|
+
# Draw footer
|
311
|
+
for line in footer_lines:
|
312
|
+
print(line)
|
313
|
+
|
314
|
+
# Position cursor
|
315
|
+
print("\033[A" * (len(footer_lines) + len(input_lines) - 1), end="")
|
316
|
+
print(f"\033[{len(current_input) + 4}C", end="")
|
317
|
+
sys.stdout.flush()
|
318
|
+
|
319
|
+
def get_input(self, prompt: str = "Type your message") -> str:
|
320
|
+
"""Enhanced input with tab navigation and hotkey support"""
|
321
|
+
current_input = ""
|
322
|
+
|
323
|
+
while True:
|
324
|
+
self.draw_screen(current_input, prompt)
|
325
|
+
|
326
|
+
# Get single character
|
327
|
+
if os.name == 'nt':
|
328
|
+
import msvcrt
|
329
|
+
char = msvcrt.getch().decode('utf-8', errors='ignore')
|
330
|
+
else:
|
331
|
+
import termios, tty
|
332
|
+
fd = sys.stdin.fileno()
|
333
|
+
old_settings = termios.tcgetattr(fd)
|
334
|
+
try:
|
335
|
+
tty.setraw(sys.stdin.fileno())
|
336
|
+
char = sys.stdin.read(1)
|
337
|
+
finally:
|
338
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
339
|
+
|
340
|
+
# Handle special keys first
|
341
|
+
if char == '\t':
|
342
|
+
# Tab - switch between text and menu mode
|
343
|
+
self.input_mode = "menu" if self.input_mode == "text" else "text"
|
344
|
+
continue
|
345
|
+
elif char == '\r' or char == '\n':
|
346
|
+
# Enter
|
347
|
+
if self.input_mode == "text":
|
348
|
+
# Submit text input
|
349
|
+
if current_input.strip():
|
350
|
+
return current_input.strip()
|
351
|
+
# If empty input in text mode, switch to menu mode
|
352
|
+
self.input_mode = "menu"
|
353
|
+
continue
|
354
|
+
else:
|
355
|
+
# In menu mode, Enter does nothing
|
356
|
+
continue
|
357
|
+
elif char == '\x03':
|
358
|
+
# Ctrl+C
|
359
|
+
if self.generating:
|
360
|
+
self.generating = False
|
361
|
+
return ""
|
362
|
+
else:
|
363
|
+
raise KeyboardInterrupt
|
364
|
+
|
365
|
+
# Mode-specific handling
|
366
|
+
if self.input_mode == "text":
|
367
|
+
# Text input mode
|
368
|
+
if char == '\x7f' or char == '\x08':
|
369
|
+
# Backspace
|
370
|
+
current_input = current_input[:-1]
|
371
|
+
elif ord(char) >= 32:
|
372
|
+
# Printable character
|
373
|
+
current_input += char
|
374
|
+
else:
|
375
|
+
# Menu mode - handle hotkeys
|
376
|
+
if char.lower() == 'q':
|
377
|
+
return "##QUIT##"
|
378
|
+
elif char.lower() == 'n':
|
379
|
+
return "##NEW##"
|
380
|
+
elif char.lower() == 'h':
|
381
|
+
return "##HISTORY##"
|
382
|
+
elif char.lower() == 's':
|
383
|
+
return "##SETTINGS##"
|
384
|
+
elif char == '\x1b': # Escape - back to text mode
|
385
|
+
self.input_mode = "text"
|
386
|
+
continue
|
387
|
+
|
388
|
+
async def create_new_conversation(self):
|
389
|
+
"""Create a new conversation"""
|
390
|
+
title = "New Conversation"
|
391
|
+
conversation_id = self.db.create_conversation(title, self.selected_model, self.selected_style)
|
392
|
+
conversation_data = self.db.get_conversation(conversation_id)
|
393
|
+
self.current_conversation = Conversation.from_dict(conversation_data)
|
394
|
+
self.messages = []
|
395
|
+
|
396
|
+
async def add_message(self, role: str, content: str):
|
397
|
+
"""Add a message to the current conversation"""
|
398
|
+
message = Message(role=role, content=content)
|
399
|
+
self.messages.append(message)
|
400
|
+
|
401
|
+
if self.current_conversation:
|
402
|
+
self.db.add_message(self.current_conversation.id, role, content)
|
403
|
+
|
404
|
+
async def _generate_title_background(self, first_message: str):
|
405
|
+
"""Generate conversation title in background after first user message"""
|
406
|
+
if not CONFIG.get("generate_dynamic_titles", True):
|
407
|
+
return
|
408
|
+
|
409
|
+
try:
|
410
|
+
# Get client for title generation
|
411
|
+
with self._suppress_output():
|
412
|
+
client = await BaseModelClient.get_client_for_model(self.selected_model)
|
413
|
+
|
414
|
+
# Generate title
|
415
|
+
new_title = await generate_conversation_title(first_message, self.selected_model, client)
|
416
|
+
|
417
|
+
# Update conversation title in database and UI
|
418
|
+
if self.current_conversation and new_title and new_title != "New Conversation":
|
419
|
+
self.db.update_conversation_title(self.current_conversation.id, new_title)
|
420
|
+
self.current_conversation.title = new_title
|
421
|
+
|
422
|
+
except Exception as e:
|
423
|
+
# Silently fail - title generation is not critical
|
424
|
+
pass
|
425
|
+
|
426
|
+
async def generate_response(self, user_message: str):
|
427
|
+
"""Generate AI response"""
|
428
|
+
self.generating = True
|
429
|
+
|
430
|
+
try:
|
431
|
+
# Add user message
|
432
|
+
await self.add_message("user", user_message)
|
433
|
+
|
434
|
+
# Generate title for first user message if this is a new conversation
|
435
|
+
if (self.current_conversation and
|
436
|
+
self.current_conversation.title == "New Conversation" and
|
437
|
+
len([msg for msg in self.messages if msg.role == "user"]) == 1):
|
438
|
+
# Generate title in background (non-blocking)
|
439
|
+
import asyncio
|
440
|
+
asyncio.create_task(self._generate_title_background(user_message))
|
441
|
+
|
442
|
+
# Prepare messages for API
|
443
|
+
api_messages = []
|
444
|
+
for msg in self.messages:
|
445
|
+
api_messages.append({
|
446
|
+
"role": msg.role,
|
447
|
+
"content": msg.content
|
448
|
+
})
|
449
|
+
|
450
|
+
# Get client with appropriate output suppression
|
451
|
+
model_info = CONFIG["available_models"].get(self.selected_model, {})
|
452
|
+
is_ollama = (model_info.get("provider") == "ollama" or
|
453
|
+
"ollama" in self.selected_model.lower() or
|
454
|
+
self.selected_model in ["gemma:2b", "gemma:7b", "llama3:8b", "mistral:7b"])
|
455
|
+
|
456
|
+
if is_ollama:
|
457
|
+
with self._suppress_output():
|
458
|
+
client = await BaseModelClient.get_client_for_model(self.selected_model)
|
459
|
+
else:
|
460
|
+
client = await BaseModelClient.get_client_for_model(self.selected_model)
|
461
|
+
|
462
|
+
# Add assistant message
|
463
|
+
assistant_message = Message(role="assistant", content="")
|
464
|
+
self.messages.append(assistant_message)
|
465
|
+
|
466
|
+
# Stream response
|
467
|
+
full_response = ""
|
468
|
+
|
469
|
+
def update_callback(content: str):
|
470
|
+
nonlocal full_response
|
471
|
+
full_response = content
|
472
|
+
assistant_message.content = content
|
473
|
+
# Redraw screen periodically
|
474
|
+
self.draw_screen("", "Generating response")
|
475
|
+
|
476
|
+
# Apply style to messages
|
477
|
+
styled_messages = apply_style_prefix(api_messages, self.selected_style)
|
478
|
+
|
479
|
+
# Generate streaming response with output suppression
|
480
|
+
with self._suppress_output():
|
481
|
+
async for chunk in console_streaming_response(
|
482
|
+
styled_messages, self.selected_model, self.selected_style, client, update_callback
|
483
|
+
):
|
484
|
+
if not self.generating:
|
485
|
+
break
|
486
|
+
if chunk:
|
487
|
+
full_response += chunk
|
488
|
+
|
489
|
+
# Update final message content
|
490
|
+
assistant_message.content = full_response
|
491
|
+
|
492
|
+
# Save final response
|
493
|
+
if self.current_conversation and full_response:
|
494
|
+
self.db.add_message(self.current_conversation.id, "assistant", full_response)
|
495
|
+
|
496
|
+
except Exception as e:
|
497
|
+
# Handle errors
|
498
|
+
error_msg = f"Error: {str(e)}"
|
499
|
+
if self.messages and self.messages[-1].role == "assistant":
|
500
|
+
self.messages[-1].content = error_msg
|
501
|
+
else:
|
502
|
+
await self.add_message("assistant", error_msg)
|
503
|
+
finally:
|
504
|
+
self.generating = False
|
505
|
+
|
506
|
+
def show_history(self):
|
507
|
+
"""Show conversation history"""
|
508
|
+
conversations = self.db.get_all_conversations(limit=20)
|
509
|
+
if not conversations:
|
510
|
+
input("No conversations found. Press Enter to continue...")
|
511
|
+
return
|
512
|
+
|
513
|
+
self.clear_screen()
|
514
|
+
print("=" * self.width)
|
515
|
+
print("CONVERSATION HISTORY".center(self.width))
|
516
|
+
print("=" * self.width)
|
517
|
+
|
518
|
+
for i, conv in enumerate(conversations):
|
519
|
+
print(f"{i+1:2d}. {conv['title'][:60]} ({conv['model']})")
|
520
|
+
|
521
|
+
print("\nEnter conversation number to load (or press Enter to cancel):")
|
522
|
+
|
523
|
+
try:
|
524
|
+
choice = input("> ").strip()
|
525
|
+
if choice and choice.isdigit():
|
526
|
+
idx = int(choice) - 1
|
527
|
+
if 0 <= idx < len(conversations):
|
528
|
+
# Load conversation
|
529
|
+
conv_data = self.db.get_conversation(conversations[idx]['id'])
|
530
|
+
self.current_conversation = Conversation.from_dict(conv_data)
|
531
|
+
self.messages = [Message(**msg) for msg in self.current_conversation.messages]
|
532
|
+
except (ValueError, KeyboardInterrupt):
|
533
|
+
pass
|
534
|
+
|
535
|
+
async def show_settings(self):
|
536
|
+
"""Show enhanced settings menu with dynamic model detection"""
|
537
|
+
while True:
|
538
|
+
self.clear_screen()
|
539
|
+
print("=" * self.width)
|
540
|
+
print("SETTINGS".center(self.width))
|
541
|
+
print("=" * self.width)
|
542
|
+
|
543
|
+
print(f"Current Model: {self.selected_model}")
|
544
|
+
print(f"Current Style: {self.selected_style}")
|
545
|
+
print()
|
546
|
+
|
547
|
+
print("What would you like to change?")
|
548
|
+
print("1. Select Model")
|
549
|
+
print("2. Response Style")
|
550
|
+
print("3. Detect Ollama Models")
|
551
|
+
print("0. Back to Chat")
|
552
|
+
|
553
|
+
try:
|
554
|
+
choice = input("\n> ").strip()
|
555
|
+
|
556
|
+
if choice == "1":
|
557
|
+
await self._select_model()
|
558
|
+
elif choice == "2":
|
559
|
+
self._select_style()
|
560
|
+
elif choice == "3":
|
561
|
+
await self._detect_ollama_models()
|
562
|
+
elif choice == "0" or choice == "":
|
563
|
+
break
|
564
|
+
|
565
|
+
except (ValueError, KeyboardInterrupt):
|
566
|
+
break
|
567
|
+
|
568
|
+
async def _select_model(self):
|
569
|
+
"""Enhanced model selection with all providers"""
|
570
|
+
self.clear_screen()
|
571
|
+
print("=" * self.width)
|
572
|
+
print("MODEL SELECTION".center(self.width))
|
573
|
+
print("=" * self.width)
|
574
|
+
|
575
|
+
# Group models by provider
|
576
|
+
providers = {}
|
577
|
+
for model_id, model_info in CONFIG["available_models"].items():
|
578
|
+
provider = model_info["provider"]
|
579
|
+
if provider not in providers:
|
580
|
+
providers[provider] = []
|
581
|
+
providers[provider].append((model_id, model_info))
|
582
|
+
|
583
|
+
# Add dynamically detected Ollama models
|
584
|
+
try:
|
585
|
+
with self._suppress_output():
|
586
|
+
from .api.ollama import OllamaClient
|
587
|
+
client = await OllamaClient.create()
|
588
|
+
local_models = await client.get_available_models()
|
589
|
+
|
590
|
+
if local_models:
|
591
|
+
if "ollama" not in providers:
|
592
|
+
providers["ollama"] = []
|
593
|
+
|
594
|
+
for model in local_models:
|
595
|
+
model_id = model.get("id", "unknown")
|
596
|
+
# Only add if not already in config
|
597
|
+
if model_id not in CONFIG["available_models"]:
|
598
|
+
providers["ollama"].append((model_id, {
|
599
|
+
"provider": "ollama",
|
600
|
+
"display_name": model_id,
|
601
|
+
"max_tokens": 4096
|
602
|
+
}))
|
603
|
+
except Exception as e:
|
604
|
+
pass # Ollama not available
|
605
|
+
|
606
|
+
# Display models by provider
|
607
|
+
model_list = []
|
608
|
+
print("Available Models by Provider:\n")
|
609
|
+
|
610
|
+
for provider, models in providers.items():
|
611
|
+
if models: # Only show providers with available models
|
612
|
+
print(f"=== {provider.upper()} ===")
|
613
|
+
for model_id, model_info in models:
|
614
|
+
marker = "►" if model_id == self.selected_model else " "
|
615
|
+
display_name = model_info.get("display_name", model_id)
|
616
|
+
model_list.append(model_id)
|
617
|
+
print(f"{marker} {len(model_list):2d}. {display_name}")
|
618
|
+
print()
|
619
|
+
|
620
|
+
if not model_list:
|
621
|
+
print("No models available. Please check your API keys or Ollama installation.")
|
622
|
+
input("Press Enter to continue...")
|
623
|
+
return
|
624
|
+
|
625
|
+
print("Enter model number to select (or press Enter to cancel):")
|
626
|
+
|
627
|
+
try:
|
628
|
+
choice = input("> ").strip()
|
629
|
+
if choice and choice.isdigit():
|
630
|
+
idx = int(choice) - 1
|
631
|
+
if 0 <= idx < len(model_list):
|
632
|
+
old_model = self.selected_model
|
633
|
+
self.selected_model = model_list[idx]
|
634
|
+
print(f"Model changed from {old_model} to {self.selected_model}")
|
635
|
+
input("Press Enter to continue...")
|
636
|
+
except (ValueError, KeyboardInterrupt):
|
637
|
+
pass
|
638
|
+
|
639
|
+
def _select_style(self):
|
640
|
+
"""Style selection submenu"""
|
641
|
+
self.clear_screen()
|
642
|
+
print("=" * self.width)
|
643
|
+
print("RESPONSE STYLE SELECTION".center(self.width))
|
644
|
+
print("=" * self.width)
|
645
|
+
|
646
|
+
styles = list(CONFIG["user_styles"].keys())
|
647
|
+
for i, style in enumerate(styles):
|
648
|
+
marker = "►" if style == self.selected_style else " "
|
649
|
+
name = CONFIG["user_styles"][style]["name"]
|
650
|
+
description = CONFIG["user_styles"][style]["description"]
|
651
|
+
print(f"{marker} {i+1:2d}. {name}")
|
652
|
+
print(f" {description}")
|
653
|
+
print()
|
654
|
+
|
655
|
+
print("Enter style number to select (or press Enter to cancel):")
|
656
|
+
|
657
|
+
try:
|
658
|
+
choice = input("> ").strip()
|
659
|
+
if choice and choice.isdigit():
|
660
|
+
idx = int(choice) - 1
|
661
|
+
if 0 <= idx < len(styles):
|
662
|
+
old_style = self.selected_style
|
663
|
+
self.selected_style = styles[idx]
|
664
|
+
print(f"Style changed from {old_style} to {self.selected_style}")
|
665
|
+
input("Press Enter to continue...")
|
666
|
+
except (ValueError, KeyboardInterrupt):
|
667
|
+
pass
|
668
|
+
|
669
|
+
async def _detect_ollama_models(self):
|
670
|
+
"""Detect and add locally available Ollama models"""
|
671
|
+
self.clear_screen()
|
672
|
+
print("=" * self.width)
|
673
|
+
print("OLLAMA MODEL DETECTION".center(self.width))
|
674
|
+
print("=" * self.width)
|
675
|
+
|
676
|
+
print("Checking for local Ollama models...")
|
677
|
+
|
678
|
+
try:
|
679
|
+
with self._suppress_output():
|
680
|
+
from .api.ollama import OllamaClient
|
681
|
+
client = await OllamaClient.create()
|
682
|
+
local_models = await client.get_available_models()
|
683
|
+
|
684
|
+
if not local_models:
|
685
|
+
print("No local Ollama models found.")
|
686
|
+
print("Use the model browser ('m' key) to download models.")
|
687
|
+
else:
|
688
|
+
print(f"Found {len(local_models)} local Ollama models:")
|
689
|
+
print()
|
690
|
+
|
691
|
+
new_models = 0
|
692
|
+
for model in local_models:
|
693
|
+
model_id = model.get("id", "unknown")
|
694
|
+
print(f" • {model_id}")
|
695
|
+
|
696
|
+
# Add to config if not already present
|
697
|
+
if model_id not in CONFIG["available_models"]:
|
698
|
+
CONFIG["available_models"][model_id] = {
|
699
|
+
"provider": "ollama",
|
700
|
+
"display_name": model_id,
|
701
|
+
"max_tokens": 4096
|
702
|
+
}
|
703
|
+
new_models += 1
|
704
|
+
|
705
|
+
if new_models > 0:
|
706
|
+
save_config(CONFIG)
|
707
|
+
print(f"\nAdded {new_models} new models to configuration.")
|
708
|
+
else:
|
709
|
+
print("\nAll models already in configuration.")
|
710
|
+
|
711
|
+
except Exception as e:
|
712
|
+
print(f"Error detecting Ollama models: {str(e)}")
|
713
|
+
print("Make sure Ollama is running and accessible.")
|
714
|
+
|
715
|
+
input("\nPress Enter to continue...")
|
716
|
+
|
717
|
+
async def run(self):
|
718
|
+
"""Main application loop"""
|
719
|
+
# Create initial conversation
|
720
|
+
await self.create_new_conversation()
|
721
|
+
|
722
|
+
# Welcome message
|
723
|
+
self.draw_screen("", "Type your message (or 'q' to quit)")
|
724
|
+
|
725
|
+
while self.running:
|
726
|
+
try:
|
727
|
+
user_input = self.get_input("Type your message")
|
728
|
+
|
729
|
+
if not user_input:
|
730
|
+
continue
|
731
|
+
|
732
|
+
# Handle special command tokens from enhanced input
|
733
|
+
if user_input == "##QUIT##":
|
734
|
+
self.running = False
|
735
|
+
break
|
736
|
+
elif user_input == "##NEW##":
|
737
|
+
await self.create_new_conversation()
|
738
|
+
continue
|
739
|
+
elif user_input == "##HISTORY##":
|
740
|
+
self.show_history()
|
741
|
+
continue
|
742
|
+
elif user_input == "##SETTINGS##":
|
743
|
+
await self.show_settings()
|
744
|
+
continue
|
745
|
+
|
746
|
+
# Handle legacy single-letter commands for backward compatibility
|
747
|
+
if user_input.lower() == 'q':
|
748
|
+
self.running = False
|
749
|
+
break
|
750
|
+
elif user_input.lower() == 'n':
|
751
|
+
await self.create_new_conversation()
|
752
|
+
continue
|
753
|
+
elif user_input.lower() == 'h':
|
754
|
+
self.show_history()
|
755
|
+
continue
|
756
|
+
elif user_input.lower() == 's':
|
757
|
+
await self.show_settings()
|
758
|
+
continue
|
759
|
+
|
760
|
+
# Generate response
|
761
|
+
await self.generate_response(user_input)
|
762
|
+
|
763
|
+
except KeyboardInterrupt:
|
764
|
+
if self.generating:
|
765
|
+
self.generating = False
|
766
|
+
print("\nGeneration cancelled.")
|
767
|
+
time.sleep(1)
|
768
|
+
else:
|
769
|
+
self.running = False
|
770
|
+
break
|
771
|
+
except Exception as e:
|
772
|
+
print(f"\nError: {e}")
|
773
|
+
input("Press Enter to continue...")
|
774
|
+
|
775
|
+
def setup_signal_handlers():
|
776
|
+
"""Setup signal handlers for graceful shutdown"""
|
777
|
+
def signal_handler(signum, frame):
|
778
|
+
print("\n\nShutting down gracefully...")
|
779
|
+
sys.exit(0)
|
780
|
+
|
781
|
+
signal.signal(signal.SIGINT, signal_handler)
|
782
|
+
signal.signal(signal.SIGTERM, signal_handler)
|
783
|
+
|
784
|
+
async def main():
|
785
|
+
"""Main entry point for console version"""
|
786
|
+
parser = argparse.ArgumentParser(description="Chat Console - Pure Terminal Version")
|
787
|
+
parser.add_argument("--model", help="Initial model to use")
|
788
|
+
parser.add_argument("--style", help="Response style")
|
789
|
+
parser.add_argument("message", nargs="?", help="Initial message to send")
|
790
|
+
|
791
|
+
args = parser.parse_args()
|
792
|
+
|
793
|
+
# Setup signal handling
|
794
|
+
setup_signal_handlers()
|
795
|
+
|
796
|
+
# Create console UI
|
797
|
+
console = ConsoleUI()
|
798
|
+
|
799
|
+
if args.model:
|
800
|
+
console.selected_model = resolve_model_id(args.model)
|
801
|
+
if args.style:
|
802
|
+
console.selected_style = args.style
|
803
|
+
|
804
|
+
# Run the application
|
805
|
+
await console.run()
|
806
|
+
|
807
|
+
print("\nGoodbye!")
|
808
|
+
|
809
|
+
if __name__ == "__main__":
|
810
|
+
try:
|
811
|
+
asyncio.run(main())
|
812
|
+
except KeyboardInterrupt:
|
813
|
+
print("\nGoodbye!")
|
814
|
+
except Exception as e:
|
815
|
+
print(f"Error: {e}")
|
816
|
+
sys.exit(1)
|