chat-console 0.4.3__py3-none-any.whl → 0.4.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- app/__init__.py +1 -1
- app/console_chat.py +816 -0
- app/console_main.py +58 -0
- app/console_utils.py +195 -0
- app/main.py +246 -126
- app/ui/borders.py +154 -0
- app/ui/chat_interface.py +84 -78
- app/ui/model_selector.py +11 -3
- app/ui/styles.py +231 -137
- {chat_console-0.4.3.dist-info → chat_console-0.4.6.dist-info}/METADATA +2 -2
- chat_console-0.4.6.dist-info/RECORD +28 -0
- {chat_console-0.4.3.dist-info → chat_console-0.4.6.dist-info}/WHEEL +1 -1
- chat_console-0.4.6.dist-info/entry_points.txt +5 -0
- chat_console-0.4.3.dist-info/RECORD +0 -24
- chat_console-0.4.3.dist-info/entry_points.txt +0 -3
- {chat_console-0.4.3.dist-info → chat_console-0.4.6.dist-info}/licenses/LICENSE +0 -0
- {chat_console-0.4.3.dist-info → chat_console-0.4.6.dist-info}/top_level.txt +0 -0
app/console_main.py
ADDED
@@ -0,0 +1,58 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
Entry point for the pure console version of Chat CLI
|
4
|
+
"""
|
5
|
+
|
6
|
+
import os
|
7
|
+
import sys
|
8
|
+
import asyncio
|
9
|
+
import argparse
|
10
|
+
import logging
|
11
|
+
|
12
|
+
# Setup logging suppression BEFORE any imports
|
13
|
+
def setup_console_logging():
|
14
|
+
"""Setup logging to minimize disruption to console UI - must run before imports"""
|
15
|
+
# Set root logger to ERROR to suppress all INFO messages
|
16
|
+
logging.getLogger().setLevel(logging.ERROR)
|
17
|
+
|
18
|
+
# Completely disable all handlers to prevent any output
|
19
|
+
logging.basicConfig(
|
20
|
+
level=logging.CRITICAL, # Only show CRITICAL messages
|
21
|
+
format='', # Empty format
|
22
|
+
handlers=[logging.NullHandler()] # Null handler suppresses all output
|
23
|
+
)
|
24
|
+
|
25
|
+
# Clear any existing handlers
|
26
|
+
for handler in logging.root.handlers[:]:
|
27
|
+
logging.root.removeHandler(handler)
|
28
|
+
|
29
|
+
# Add only NullHandler
|
30
|
+
logging.root.addHandler(logging.NullHandler())
|
31
|
+
|
32
|
+
# Pre-emptively suppress known noisy loggers
|
33
|
+
for logger_name in ['app', 'app.api', 'app.api.base', 'app.api.ollama',
|
34
|
+
'app.utils', 'app.console_utils', 'aiohttp', 'urllib3',
|
35
|
+
'httpx', 'asyncio', 'root']:
|
36
|
+
logging.getLogger(logger_name).setLevel(logging.ERROR)
|
37
|
+
logging.getLogger(logger_name).addHandler(logging.NullHandler())
|
38
|
+
|
39
|
+
# Apply logging suppression immediately
|
40
|
+
setup_console_logging()
|
41
|
+
|
42
|
+
async def run_console_app():
|
43
|
+
"""Run the console application"""
|
44
|
+
from .console_chat import main as console_main
|
45
|
+
await console_main()
|
46
|
+
|
47
|
+
def main():
|
48
|
+
"""Main entry point for console version"""
|
49
|
+
try:
|
50
|
+
asyncio.run(run_console_app())
|
51
|
+
except KeyboardInterrupt:
|
52
|
+
print("\nGoodbye!")
|
53
|
+
except Exception as e:
|
54
|
+
print(f"Error: {e}")
|
55
|
+
sys.exit(1)
|
56
|
+
|
57
|
+
if __name__ == "__main__":
|
58
|
+
main()
|
app/console_utils.py
ADDED
@@ -0,0 +1,195 @@
|
|
1
|
+
"""
|
2
|
+
Console-specific utilities for the pure terminal version
|
3
|
+
No Textual dependencies
|
4
|
+
"""
|
5
|
+
|
6
|
+
import asyncio
|
7
|
+
import time
|
8
|
+
from typing import Dict, List, Any, Optional, Callable, AsyncGenerator
|
9
|
+
from .api.base import BaseModelClient
|
10
|
+
|
11
|
+
async def console_streaming_response(
|
12
|
+
messages: List[Dict[str, str]],
|
13
|
+
model: str,
|
14
|
+
style: Optional[str],
|
15
|
+
client: BaseModelClient,
|
16
|
+
update_callback: Optional[Callable[[str], None]] = None
|
17
|
+
) -> AsyncGenerator[str, None]:
|
18
|
+
"""
|
19
|
+
Enhanced streaming response for console UI with provider-specific optimizations
|
20
|
+
"""
|
21
|
+
try:
|
22
|
+
# Detect provider type for optimizations
|
23
|
+
client_type = type(client).__name__.lower()
|
24
|
+
is_ollama = 'ollama' in client_type
|
25
|
+
is_openai = 'openai' in client_type
|
26
|
+
is_anthropic = 'anthropic' in client_type
|
27
|
+
|
28
|
+
# Initialize tracking variables
|
29
|
+
full_content = ""
|
30
|
+
buffer = []
|
31
|
+
last_update = time.time()
|
32
|
+
|
33
|
+
# Provider-specific configuration
|
34
|
+
if is_ollama:
|
35
|
+
update_interval = 0.15 # Slower updates for Ollama (model loading)
|
36
|
+
buffer_size = 3 # Smaller buffer for Ollama
|
37
|
+
elif is_openai:
|
38
|
+
update_interval = 0.08 # Fast updates for OpenAI
|
39
|
+
buffer_size = 5 # Medium buffer for OpenAI
|
40
|
+
elif is_anthropic:
|
41
|
+
update_interval = 0.1 # Medium updates for Anthropic
|
42
|
+
buffer_size = 4 # Medium buffer for Anthropic
|
43
|
+
else:
|
44
|
+
update_interval = 0.1 # Default timing
|
45
|
+
buffer_size = 4 # Default buffer
|
46
|
+
|
47
|
+
# Special handling for reasoning models (slower, more deliberate)
|
48
|
+
if model.startswith(("o1", "o3", "o4")) or model in ["o1", "o3", "o4-mini"]:
|
49
|
+
update_interval = 0.2 # Slower updates for reasoning models
|
50
|
+
buffer_size = 2 # Smaller buffer for reasoning
|
51
|
+
|
52
|
+
async for chunk in client.generate_stream(
|
53
|
+
messages=messages,
|
54
|
+
model=model,
|
55
|
+
style=style
|
56
|
+
):
|
57
|
+
if chunk:
|
58
|
+
buffer.append(chunk)
|
59
|
+
current_time = time.time()
|
60
|
+
|
61
|
+
# Update based on time interval or buffer size
|
62
|
+
should_update = (
|
63
|
+
current_time - last_update >= update_interval or
|
64
|
+
len(buffer) >= buffer_size or
|
65
|
+
len(full_content) < 100 # Always update quickly for first few chars
|
66
|
+
)
|
67
|
+
|
68
|
+
if should_update:
|
69
|
+
# Process buffered chunks
|
70
|
+
new_content = ''.join(buffer)
|
71
|
+
full_content += new_content
|
72
|
+
buffer = []
|
73
|
+
last_update = current_time
|
74
|
+
|
75
|
+
# Call update callback with accumulated content
|
76
|
+
if update_callback:
|
77
|
+
update_callback(full_content)
|
78
|
+
|
79
|
+
# Yield the new chunk for compatibility
|
80
|
+
yield new_content
|
81
|
+
|
82
|
+
# Provider-specific delays
|
83
|
+
if is_ollama:
|
84
|
+
await asyncio.sleep(0.02) # Small delay for Ollama
|
85
|
+
elif is_openai and not model.startswith(("o1", "o3", "o4")):
|
86
|
+
await asyncio.sleep(0.01) # Minimal delay for fast OpenAI models
|
87
|
+
else:
|
88
|
+
await asyncio.sleep(0.015) # Default delay
|
89
|
+
|
90
|
+
# Process any remaining buffer content
|
91
|
+
if buffer:
|
92
|
+
final_content = ''.join(buffer)
|
93
|
+
full_content += final_content
|
94
|
+
if update_callback:
|
95
|
+
update_callback(full_content)
|
96
|
+
yield final_content
|
97
|
+
|
98
|
+
except asyncio.CancelledError:
|
99
|
+
# Handle cancellation gracefully
|
100
|
+
if update_callback:
|
101
|
+
update_callback(full_content + "\n[Generation cancelled]")
|
102
|
+
raise
|
103
|
+
|
104
|
+
except Exception as e:
|
105
|
+
error_msg = f"Error generating response: {str(e)}"
|
106
|
+
if update_callback:
|
107
|
+
update_callback(error_msg)
|
108
|
+
yield error_msg
|
109
|
+
|
110
|
+
def apply_style_prefix(messages: List[Dict[str, str]], style: str) -> List[Dict[str, str]]:
|
111
|
+
"""Apply style instructions to the message list"""
|
112
|
+
if not style or style == "default":
|
113
|
+
return messages
|
114
|
+
|
115
|
+
style_instructions = {
|
116
|
+
"concise": "Please provide a brief and concise response.",
|
117
|
+
"detailed": "Please provide a comprehensive and detailed response.",
|
118
|
+
"technical": "Please provide a technical response with precise terminology.",
|
119
|
+
"friendly": "Please provide a warm and friendly response."
|
120
|
+
}
|
121
|
+
|
122
|
+
instruction = style_instructions.get(style, "")
|
123
|
+
if instruction and messages:
|
124
|
+
# Add style instruction to the first user message
|
125
|
+
modified_messages = messages.copy()
|
126
|
+
for msg in modified_messages:
|
127
|
+
if msg["role"] == "user":
|
128
|
+
msg["content"] = f"{instruction}\n\n{msg['content']}"
|
129
|
+
break
|
130
|
+
return modified_messages
|
131
|
+
|
132
|
+
return messages
|
133
|
+
|
134
|
+
async def test_model_connection(model: str) -> bool:
|
135
|
+
"""Test if we can connect to the specified model"""
|
136
|
+
try:
|
137
|
+
client = await BaseModelClient.get_client_for_model(model)
|
138
|
+
# Try a simple test message
|
139
|
+
test_messages = [{"role": "user", "content": "Hello"}]
|
140
|
+
async for _ in client.generate_stream(test_messages, model):
|
141
|
+
break # Just test that we can start streaming
|
142
|
+
return True
|
143
|
+
except Exception:
|
144
|
+
return False
|
145
|
+
|
146
|
+
def format_model_list() -> List[str]:
|
147
|
+
"""Format the available models for console display"""
|
148
|
+
from .config import CONFIG
|
149
|
+
|
150
|
+
formatted = []
|
151
|
+
for model_id, model_info in CONFIG["available_models"].items():
|
152
|
+
provider = model_info["provider"].capitalize()
|
153
|
+
display_name = model_info["display_name"]
|
154
|
+
formatted.append(f"{display_name} ({provider})")
|
155
|
+
|
156
|
+
return formatted
|
157
|
+
|
158
|
+
def get_terminal_size():
|
159
|
+
"""Get terminal size with fallback"""
|
160
|
+
try:
|
161
|
+
import shutil
|
162
|
+
return shutil.get_terminal_size()
|
163
|
+
except Exception:
|
164
|
+
# Fallback size
|
165
|
+
class Size:
|
166
|
+
columns = 80
|
167
|
+
lines = 24
|
168
|
+
return Size()
|
169
|
+
|
170
|
+
def truncate_text(text: str, max_length: int, suffix: str = "...") -> str:
|
171
|
+
"""Truncate text to fit within max_length"""
|
172
|
+
if len(text) <= max_length:
|
173
|
+
return text
|
174
|
+
return text[:max_length - len(suffix)] + suffix
|
175
|
+
|
176
|
+
def word_wrap(text: str, width: int) -> List[str]:
|
177
|
+
"""Wrap text to specified width"""
|
178
|
+
words = text.split()
|
179
|
+
lines = []
|
180
|
+
current_line = ""
|
181
|
+
|
182
|
+
for word in words:
|
183
|
+
if len(current_line) + len(word) + 1 <= width:
|
184
|
+
if current_line:
|
185
|
+
current_line += " "
|
186
|
+
current_line += word
|
187
|
+
else:
|
188
|
+
if current_line:
|
189
|
+
lines.append(current_line)
|
190
|
+
current_line = word
|
191
|
+
|
192
|
+
if current_line:
|
193
|
+
lines.append(current_line)
|
194
|
+
|
195
|
+
return lines or [""]
|