chat-console 0.3.7__tar.gz → 0.3.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {chat_console-0.3.7/chat_console.egg-info → chat_console-0.3.8}/PKG-INFO +1 -1
- {chat_console-0.3.7 → chat_console-0.3.8}/app/__init__.py +1 -1
- {chat_console-0.3.7 → chat_console-0.3.8}/app/main.py +39 -9
- {chat_console-0.3.7 → chat_console-0.3.8}/app/ui/chat_interface.py +22 -1
- {chat_console-0.3.7 → chat_console-0.3.8}/app/utils.py +57 -11
- {chat_console-0.3.7 → chat_console-0.3.8/chat_console.egg-info}/PKG-INFO +1 -1
- {chat_console-0.3.7 → chat_console-0.3.8}/LICENSE +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/README.md +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/api/__init__.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/api/anthropic.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/api/base.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/api/ollama.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/api/openai.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/config.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/database.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/models.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/ui/__init__.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/ui/chat_list.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/ui/model_browser.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/ui/model_selector.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/ui/search.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/app/ui/styles.py +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/chat_console.egg-info/SOURCES.txt +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/chat_console.egg-info/dependency_links.txt +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/chat_console.egg-info/entry_points.txt +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/chat_console.egg-info/requires.txt +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/chat_console.egg-info/top_level.txt +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/setup.cfg +0 -0
- {chat_console-0.3.7 → chat_console-0.3.8}/setup.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.8
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
@@ -940,27 +940,39 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
940
940
|
last_refresh_time = time.time() # Initialize refresh throttling timer
|
941
941
|
|
942
942
|
async def update_ui(content: str):
|
943
|
-
# This function
|
943
|
+
# This function is called by the worker with each content update
|
944
944
|
if not self.is_generating:
|
945
945
|
debug_log("update_ui called but is_generating is False, returning.")
|
946
946
|
return
|
947
947
|
|
948
948
|
async with update_lock:
|
949
949
|
try:
|
950
|
+
# Add more verbose logging
|
951
|
+
debug_log(f"update_ui called with content length: {len(content)}")
|
952
|
+
print(f"update_ui: Updating with content length {len(content)}")
|
953
|
+
|
950
954
|
# Clear thinking indicator on first content
|
951
955
|
if assistant_message.content == "Thinking...":
|
952
956
|
debug_log("First content received, clearing 'Thinking...'")
|
953
957
|
print("First content received, clearing 'Thinking...'")
|
954
|
-
|
955
|
-
|
958
|
+
# We'll let the MessageDisplay.update_content handle this special case
|
959
|
+
|
956
960
|
# Update the message object with the full content
|
957
961
|
assistant_message.content = content
|
958
962
|
|
959
|
-
# Update UI with the content
|
963
|
+
# Update UI with the content - this now has special handling for "Thinking..."
|
964
|
+
debug_log("Calling message_display.update_content")
|
960
965
|
await message_display.update_content(content)
|
961
966
|
|
962
|
-
#
|
967
|
+
# More aggressive UI refresh sequence
|
968
|
+
debug_log("Performing UI refresh sequence")
|
969
|
+
# First do a lightweight refresh
|
970
|
+
self.refresh(layout=False)
|
971
|
+
# Then scroll to end
|
972
|
+
messages_container.scroll_end(animate=False)
|
973
|
+
# Then do a full layout refresh
|
963
974
|
self.refresh(layout=True)
|
975
|
+
# Final scroll to ensure visibility
|
964
976
|
messages_container.scroll_end(animate=False)
|
965
977
|
|
966
978
|
except Exception as e:
|
@@ -1030,14 +1042,32 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
1030
1042
|
error = worker.error
|
1031
1043
|
debug_log(f"Error in generation worker: {error}")
|
1032
1044
|
log.error(f"Error in generation worker: {error}")
|
1033
|
-
|
1045
|
+
|
1046
|
+
# Sanitize error message for UI display
|
1047
|
+
error_str = str(error)
|
1048
|
+
|
1049
|
+
# Check if this is an Ollama error
|
1050
|
+
is_ollama_error = "ollama" in error_str.lower() or "404" in error_str
|
1051
|
+
|
1052
|
+
# Create a user-friendly error message
|
1053
|
+
if is_ollama_error:
|
1054
|
+
# For Ollama errors, provide a more user-friendly message
|
1055
|
+
user_error = "Unable to generate response. The selected model may not be available."
|
1056
|
+
debug_log(f"Sanitizing Ollama error to user-friendly message: {user_error}")
|
1057
|
+
# Show technical details only in notification, not in chat
|
1058
|
+
self.notify(f"Model error: {error_str}", severity="error", timeout=5)
|
1059
|
+
else:
|
1060
|
+
# For other errors, show a generic message
|
1061
|
+
user_error = f"Error generating response: {error_str}"
|
1062
|
+
self.notify(f"Generation error: {error_str}", severity="error", timeout=5)
|
1063
|
+
|
1034
1064
|
# Add error message to UI
|
1035
1065
|
if self.messages and self.messages[-1].role == "assistant":
|
1036
1066
|
debug_log("Removing thinking message")
|
1037
1067
|
self.messages.pop() # Remove thinking message
|
1038
|
-
|
1039
|
-
debug_log(f"Adding error message: {
|
1040
|
-
self.messages.append(Message(role="assistant", content=
|
1068
|
+
|
1069
|
+
debug_log(f"Adding error message: {user_error}")
|
1070
|
+
self.messages.append(Message(role="assistant", content=user_error))
|
1041
1071
|
await self.update_messages_ui()
|
1042
1072
|
|
1043
1073
|
elif worker.state == "success":
|
@@ -121,11 +121,23 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
|
|
121
121
|
|
122
122
|
async def update_content(self, content: str) -> None:
|
123
123
|
"""Update the message content using Static.update() with optimizations for streaming"""
|
124
|
+
# Debug print to verify method is being called with content
|
125
|
+
print(f"MessageDisplay.update_content called with content length: {len(content)}")
|
126
|
+
|
124
127
|
# Quick unchanged content check to avoid unnecessary updates
|
125
128
|
if self.message.content == content:
|
129
|
+
print("Content unchanged, skipping update")
|
126
130
|
return
|
127
131
|
|
128
|
-
#
|
132
|
+
# Special handling for "Thinking..." to ensure it gets replaced
|
133
|
+
if self.message.content == "Thinking..." and content:
|
134
|
+
print("Replacing 'Thinking...' with actual content")
|
135
|
+
# Force a complete replacement rather than an append
|
136
|
+
self.message.content = ""
|
137
|
+
# Add a debug print to confirm this branch is executed
|
138
|
+
print("CRITICAL FIX: Replacing 'Thinking...' placeholder with actual content")
|
139
|
+
|
140
|
+
# Update the stored message object content
|
129
141
|
self.message.content = content
|
130
142
|
|
131
143
|
# Format with fixed-width placeholder to minimize layout shifts
|
@@ -134,6 +146,7 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
|
|
134
146
|
|
135
147
|
# Use a direct update that forces refresh - critical fix for streaming
|
136
148
|
# This ensures content is immediately visible
|
149
|
+
print(f"Updating widget with formatted content length: {len(formatted_content)}")
|
137
150
|
self.update(formatted_content, refresh=True)
|
138
151
|
|
139
152
|
# Force app-level refresh and scroll to ensure visibility
|
@@ -148,6 +161,9 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
|
|
148
161
|
for container in containers:
|
149
162
|
if hasattr(container, 'scroll_end'):
|
150
163
|
container.scroll_end(animate=False)
|
164
|
+
|
165
|
+
# Add an additional refresh after scrolling
|
166
|
+
self.app.refresh(layout=True)
|
151
167
|
except Exception as e:
|
152
168
|
# Log the error and fallback to local refresh
|
153
169
|
print(f"Error refreshing app: {str(e)}")
|
@@ -157,6 +173,11 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
|
|
157
173
|
"""Format message content with timestamp and handle markdown links"""
|
158
174
|
timestamp = datetime.now().strftime("%H:%M")
|
159
175
|
|
176
|
+
# Special handling for "Thinking..." to make it visually distinct
|
177
|
+
if content == "Thinking...":
|
178
|
+
# Use italic style for the thinking indicator
|
179
|
+
return f"[dim]{timestamp}[/dim] [italic]{content}[/italic]"
|
180
|
+
|
160
181
|
# Fix markdown-style links that cause markup errors
|
161
182
|
# Convert [text](url) to a safe format for Textual markup
|
162
183
|
content = re.sub(
|
@@ -63,17 +63,62 @@ async def generate_conversation_title(message: str, model: str, client: Any) ->
|
|
63
63
|
|
64
64
|
# Check if client is OpenAI
|
65
65
|
is_openai = 'openai' in str(type(client)).lower()
|
66
|
-
if is_openai
|
66
|
+
if is_openai:
|
67
67
|
debug_log("Using OpenAI client for title generation")
|
68
68
|
# Use GPT-3.5 for title generation (fast and cost-effective)
|
69
69
|
title_model_id = "gpt-3.5-turbo"
|
70
70
|
debug_log(f"Using OpenAI model for title generation: {title_model_id}")
|
71
|
+
# For OpenAI, we'll always use their model, not fall back to the passed model
|
72
|
+
# This prevents trying to use Ollama models with OpenAI client
|
73
|
+
|
74
|
+
# Check if client is Ollama
|
75
|
+
is_ollama = 'ollama' in str(type(client)).lower()
|
76
|
+
if is_ollama and not title_model_id:
|
77
|
+
debug_log("Using Ollama client for title generation")
|
78
|
+
# For Ollama, check if the model exists before using it
|
79
|
+
try:
|
80
|
+
# Try a quick test request to check if model exists
|
81
|
+
debug_log(f"Testing if Ollama model exists: {model}")
|
82
|
+
import aiohttp
|
83
|
+
async with aiohttp.ClientSession() as session:
|
84
|
+
try:
|
85
|
+
base_url = "http://localhost:11434"
|
86
|
+
async with session.post(
|
87
|
+
f"{base_url}/api/generate",
|
88
|
+
json={"model": model, "prompt": "test", "stream": False},
|
89
|
+
timeout=2
|
90
|
+
) as response:
|
91
|
+
if response.status == 200:
|
92
|
+
# Model exists, use it
|
93
|
+
title_model_id = model
|
94
|
+
debug_log(f"Ollama model {model} exists, using it for title generation")
|
95
|
+
else:
|
96
|
+
debug_log(f"Ollama model {model} returned status {response.status}, falling back to default")
|
97
|
+
# Fall back to a common model
|
98
|
+
title_model_id = "llama3"
|
99
|
+
except Exception as e:
|
100
|
+
debug_log(f"Error testing Ollama model: {str(e)}, falling back to default")
|
101
|
+
# Fall back to a common model
|
102
|
+
title_model_id = "llama3"
|
103
|
+
except Exception as e:
|
104
|
+
debug_log(f"Error checking Ollama model: {str(e)}")
|
105
|
+
# Fall back to a common model
|
106
|
+
title_model_id = "llama3"
|
71
107
|
|
72
108
|
# Fallback logic if no specific model was found
|
73
109
|
if not title_model_id:
|
74
|
-
# Use
|
75
|
-
|
76
|
-
|
110
|
+
# Use a safe default based on client type
|
111
|
+
if is_openai:
|
112
|
+
title_model_id = "gpt-3.5-turbo"
|
113
|
+
elif is_anthropic:
|
114
|
+
title_model_id = "claude-3-haiku-20240307"
|
115
|
+
elif is_ollama:
|
116
|
+
title_model_id = "llama3" # Common default
|
117
|
+
else:
|
118
|
+
# Last resort - use the originally passed model
|
119
|
+
title_model_id = model
|
120
|
+
|
121
|
+
debug_log(f"No specific model found, using fallback model for title generation: {title_model_id}")
|
77
122
|
|
78
123
|
logger.info(f"Generating title for conversation using model: {title_model_id}")
|
79
124
|
debug_log(f"Final model selected for title generation: {title_model_id}")
|
@@ -325,25 +370,26 @@ async def generate_streaming_response(
|
|
325
370
|
full_response += new_content
|
326
371
|
debug_log(f"Updating UI with content length: {len(full_response)}")
|
327
372
|
|
328
|
-
#
|
329
|
-
|
330
|
-
|
331
|
-
|
373
|
+
# Enhanced debug logging
|
374
|
+
print(f"STREAM DEBUG: +{len(new_content)} chars, total: {len(full_response)}")
|
375
|
+
# Print first few characters of content for debugging
|
376
|
+
if len(full_response) < 100:
|
377
|
+
print(f"STREAM CONTENT: '{full_response}'")
|
332
378
|
|
333
379
|
try:
|
334
380
|
# Call the UI callback with the full response so far
|
381
|
+
debug_log("Calling UI callback with content")
|
335
382
|
await callback(full_response)
|
336
383
|
debug_log("UI callback completed successfully")
|
337
384
|
|
338
385
|
# Force app refresh after each update
|
339
386
|
if hasattr(app, 'refresh'):
|
387
|
+
debug_log("Forcing app refresh")
|
340
388
|
app.refresh(layout=True) # Force layout refresh
|
341
389
|
except Exception as callback_err:
|
342
390
|
debug_log(f"Error in UI callback: {str(callback_err)}")
|
343
391
|
logger.error(f"Error in UI callback: {str(callback_err)}")
|
344
|
-
|
345
|
-
if not is_openai:
|
346
|
-
print(f"Error updating UI: {str(callback_err)}")
|
392
|
+
print(f"STREAM ERROR: Error updating UI: {str(callback_err)}")
|
347
393
|
|
348
394
|
buffer = []
|
349
395
|
last_update = current_time
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.8
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|