chat-console 0.3.9__tar.gz → 0.3.94__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {chat_console-0.3.9/chat_console.egg-info → chat_console-0.3.94}/PKG-INFO +1 -1
- {chat_console-0.3.9 → chat_console-0.3.94}/app/__init__.py +1 -1
- {chat_console-0.3.9 → chat_console-0.3.94}/app/api/base.py +10 -6
- {chat_console-0.3.9 → chat_console-0.3.94}/app/api/ollama.py +51 -23
- {chat_console-0.3.9 → chat_console-0.3.94}/app/config.py +29 -26
- {chat_console-0.3.9 → chat_console-0.3.94}/app/main.py +81 -11
- {chat_console-0.3.9 → chat_console-0.3.94}/app/ui/chat_interface.py +31 -58
- {chat_console-0.3.9 → chat_console-0.3.94}/app/ui/model_selector.py +5 -0
- chat_console-0.3.94/app/utils.py +911 -0
- {chat_console-0.3.9 → chat_console-0.3.94/chat_console.egg-info}/PKG-INFO +1 -1
- chat_console-0.3.9/app/utils.py +0 -705
- {chat_console-0.3.9 → chat_console-0.3.94}/LICENSE +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/README.md +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/app/api/__init__.py +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/app/api/anthropic.py +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/app/api/openai.py +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/app/database.py +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/app/models.py +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/app/ui/__init__.py +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/app/ui/chat_list.py +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/app/ui/model_browser.py +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/app/ui/search.py +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/app/ui/styles.py +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/chat_console.egg-info/SOURCES.txt +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/chat_console.egg-info/dependency_links.txt +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/chat_console.egg-info/entry_points.txt +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/chat_console.egg-info/requires.txt +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/chat_console.egg-info/top_level.txt +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/setup.cfg +0 -0
- {chat_console-0.3.9 → chat_console-0.3.94}/setup.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.94
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
@@ -61,10 +61,12 @@ class BaseModelClient(ABC):
|
|
61
61
|
logger.info(f"Found model in config with provider: {provider}")
|
62
62
|
# For custom models, try to infer provider
|
63
63
|
else:
|
64
|
-
#
|
65
|
-
if
|
64
|
+
# Check for common OpenAI model patterns or prefixes
|
65
|
+
if (model_name_lower.startswith(("gpt-", "text-", "davinci")) or
|
66
|
+
"gpt" in model_name_lower or
|
67
|
+
model_name_lower in ["04-mini", "04", "04-turbo", "04-vision"]):
|
66
68
|
provider = "openai"
|
67
|
-
logger.info(f"Identified as OpenAI model
|
69
|
+
logger.info(f"Identified {model_name} as an OpenAI model")
|
68
70
|
# Then check for Anthropic models - these should ALWAYS use Anthropic client
|
69
71
|
elif any(name in model_name_lower for name in ["claude", "anthropic"]):
|
70
72
|
provider = "anthropic"
|
@@ -120,12 +122,14 @@ class BaseModelClient(ABC):
|
|
120
122
|
raise Exception(f"Provider '{provider}' is not available. Please check your configuration.")
|
121
123
|
# For custom models, try to infer provider
|
122
124
|
else:
|
123
|
-
#
|
124
|
-
if
|
125
|
+
# Check for common OpenAI model patterns or prefixes
|
126
|
+
if (model_name_lower.startswith(("gpt-", "text-", "davinci")) or
|
127
|
+
"gpt" in model_name_lower or
|
128
|
+
model_name_lower in ["04-mini", "04", "04-turbo", "04-vision"]):
|
125
129
|
if not AVAILABLE_PROVIDERS["openai"]:
|
126
130
|
raise Exception("OpenAI API key not found. Please set OPENAI_API_KEY environment variable.")
|
127
131
|
provider = "openai"
|
128
|
-
logger.info(f"Identified as OpenAI model
|
132
|
+
logger.info(f"Identified {model_name} as an OpenAI model")
|
129
133
|
# Then check for Anthropic models - these should ALWAYS use Anthropic client
|
130
134
|
elif any(name in model_name_lower for name in ["claude", "anthropic"]):
|
131
135
|
if not AVAILABLE_PROVIDERS["anthropic"]:
|
@@ -11,6 +11,14 @@ from .base import BaseModelClient
|
|
11
11
|
# Set up logging
|
12
12
|
logger = logging.getLogger(__name__)
|
13
13
|
|
14
|
+
# Custom exception for Ollama API errors
|
15
|
+
class OllamaApiError(Exception):
|
16
|
+
"""Exception raised for errors in the Ollama API."""
|
17
|
+
def __init__(self, message: str, status_code: Optional[int] = None):
|
18
|
+
self.message = message
|
19
|
+
self.status_code = status_code
|
20
|
+
super().__init__(self.message)
|
21
|
+
|
14
22
|
class OllamaClient(BaseModelClient):
|
15
23
|
def __init__(self):
|
16
24
|
from ..config import OLLAMA_BASE_URL
|
@@ -280,12 +288,12 @@ class OllamaClient(BaseModelClient):
|
|
280
288
|
break
|
281
289
|
|
282
290
|
if not model_exists:
|
283
|
-
|
284
|
-
# Instead of failing, yield a helpful error message
|
285
|
-
yield f"Model '{model}' not found. Available models include: {', '.join(available_model_names[:5])}"
|
291
|
+
error_msg = f"Model '{model}' not found in available models. Available models include: {', '.join(available_model_names[:5])}"
|
286
292
|
if len(available_model_names) > 5:
|
287
|
-
|
288
|
-
|
293
|
+
error_msg += f" and {len(available_model_names) - 5} more."
|
294
|
+
logger.error(error_msg)
|
295
|
+
# Instead of raising a custom error, yield the message and return
|
296
|
+
yield error_msg
|
289
297
|
return
|
290
298
|
except Exception as e:
|
291
299
|
debug_log(f"Error checking model availability: {str(e)}")
|
@@ -329,10 +337,11 @@ class OllamaClient(BaseModelClient):
|
|
329
337
|
if response.status == 404:
|
330
338
|
error_text = await response.text()
|
331
339
|
debug_log(f"404 error details: {error_text}")
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
340
|
+
error_msg = f"Error: Model '{model}' not found on the Ollama server. Please check if the model name is correct or try pulling it first."
|
341
|
+
logger.error(error_msg)
|
342
|
+
# Instead of raising, yield the error message for user display
|
343
|
+
yield error_msg
|
344
|
+
return # End the generation
|
336
345
|
|
337
346
|
raise aiohttp.ClientError("Model not ready")
|
338
347
|
except (aiohttp.ClientError, asyncio.TimeoutError) as e:
|
@@ -363,12 +372,14 @@ class OllamaClient(BaseModelClient):
|
|
363
372
|
self._model_loading = False # Reset flag on failure
|
364
373
|
|
365
374
|
# Check if this is a 404 Not Found error
|
366
|
-
if
|
367
|
-
error_text = await
|
375
|
+
if response.status == 404:
|
376
|
+
error_text = await response.text()
|
368
377
|
debug_log(f"404 error details: {error_text}")
|
369
378
|
# This is likely a model not found in registry
|
370
|
-
|
371
|
-
|
379
|
+
error_msg = f"Error: Model '{model}' not found in the Ollama registry. Please check if the model name is correct or try a different model."
|
380
|
+
logger.error(error_msg)
|
381
|
+
# Instead of raising a custom error, yield the message and return
|
382
|
+
yield error_msg
|
372
383
|
return
|
373
384
|
|
374
385
|
raise Exception("Failed to pull model")
|
@@ -432,17 +443,34 @@ class OllamaClient(BaseModelClient):
|
|
432
443
|
if chunk_str.startswith('{') and chunk_str.endswith('}'):
|
433
444
|
try:
|
434
445
|
data = json.loads(chunk_str)
|
435
|
-
if isinstance(data, dict)
|
436
|
-
|
437
|
-
if
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
446
|
+
if isinstance(data, dict):
|
447
|
+
# Check for error in the chunk
|
448
|
+
if "error" in data:
|
449
|
+
error_msg = data.get("error", "")
|
450
|
+
debug_log(f"Ollama API error in chunk: {error_msg}")
|
451
|
+
|
452
|
+
# Handle model loading state
|
453
|
+
if "loading model" in error_msg.lower():
|
454
|
+
# Yield a user-friendly message and keep trying
|
455
|
+
yield "The model is still loading. Please wait a moment..."
|
456
|
+
# Add delay before continuing
|
457
|
+
await asyncio.sleep(2)
|
458
|
+
continue
|
459
|
+
|
460
|
+
# Process normal response
|
461
|
+
if "response" in data:
|
462
|
+
response_text = data["response"]
|
463
|
+
if response_text: # Only yield non-empty responses
|
464
|
+
has_yielded_content = True
|
465
|
+
chunk_length = len(response_text)
|
466
|
+
# Only log occasionally to reduce console spam
|
467
|
+
if chunk_length % 20 == 0:
|
468
|
+
debug_log(f"Yielding chunk of length: {chunk_length}")
|
469
|
+
yield response_text
|
470
|
+
else:
|
471
|
+
debug_log(f"JSON chunk missing 'response' key: {chunk_str[:100]}")
|
444
472
|
else:
|
445
|
-
debug_log(f"JSON chunk
|
473
|
+
debug_log(f"JSON chunk is not a dict: {chunk_str[:100]}")
|
446
474
|
except json.JSONDecodeError:
|
447
475
|
debug_log(f"JSON decode error for chunk: {chunk_str[:100]}")
|
448
476
|
else:
|
@@ -175,35 +175,38 @@ CONFIG = load_config()
|
|
175
175
|
|
176
176
|
# --- Dynamically update Anthropic models after initial load ---
|
177
177
|
def update_anthropic_models(config):
|
178
|
-
"""
|
178
|
+
"""Update the config with Anthropic models."""
|
179
179
|
if AVAILABLE_PROVIDERS["anthropic"]:
|
180
180
|
try:
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
181
|
+
# Instead of calling an async method, use a hardcoded fallback list
|
182
|
+
# that matches what's in the AnthropicClient class
|
183
|
+
fallback_models = [
|
184
|
+
{"id": "claude-3-opus-20240229", "name": "Claude 3 Opus"},
|
185
|
+
{"id": "claude-3-sonnet-20240229", "name": "Claude 3 Sonnet"},
|
186
|
+
{"id": "claude-3-haiku-20240307", "name": "Claude 3 Haiku"},
|
187
|
+
{"id": "claude-3-5-sonnet-20240620", "name": "Claude 3.5 Sonnet"},
|
188
|
+
{"id": "claude-3-7-sonnet-20250219", "name": "Claude 3.7 Sonnet"},
|
189
|
+
]
|
190
|
+
|
191
|
+
# Remove old models first
|
192
|
+
models_to_remove = [
|
193
|
+
model_id for model_id, info in config["available_models"].items()
|
194
|
+
if info.get("provider") == "anthropic"
|
195
|
+
]
|
196
|
+
for model_id in models_to_remove:
|
197
|
+
del config["available_models"][model_id]
|
198
|
+
|
199
|
+
# Add the fallback models
|
200
|
+
for model in fallback_models:
|
201
|
+
config["available_models"][model["id"]] = {
|
202
|
+
"provider": "anthropic",
|
203
|
+
"max_tokens": 4096,
|
204
|
+
"display_name": model["name"]
|
205
|
+
}
|
206
|
+
print(f"Updated Anthropic models in config with fallback list")
|
207
|
+
|
205
208
|
except Exception as e:
|
206
|
-
print(f"Error updating Anthropic models in config: {e}")
|
209
|
+
print(f"Error updating Anthropic models in config: {e}")
|
207
210
|
# Keep existing config if update fails
|
208
211
|
|
209
212
|
return config
|
@@ -20,10 +20,10 @@ file_handler = logging.FileHandler(debug_log_file)
|
|
20
20
|
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
|
21
21
|
|
22
22
|
# Get the logger and add the handler
|
23
|
-
debug_logger = logging.getLogger(
|
23
|
+
debug_logger = logging.getLogger() # Root logger
|
24
24
|
debug_logger.setLevel(logging.DEBUG)
|
25
25
|
debug_logger.addHandler(file_handler)
|
26
|
-
#
|
26
|
+
# CRITICAL: Force all output to the file, not stdout
|
27
27
|
debug_logger.propagate = False
|
28
28
|
|
29
29
|
# Add a convenience function to log to this file
|
@@ -766,7 +766,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
766
766
|
input_widget.focus()
|
767
767
|
|
768
768
|
async def generate_response(self) -> None:
|
769
|
-
"""Generate an AI response using a non-blocking worker."""
|
769
|
+
"""Generate an AI response using a non-blocking worker with fallback."""
|
770
770
|
# Import debug_log function from main
|
771
771
|
debug_log(f"Entering generate_response method")
|
772
772
|
|
@@ -774,6 +774,10 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
774
774
|
debug_log("No current conversation or messages, returning")
|
775
775
|
return
|
776
776
|
|
777
|
+
# Track if we've already attempted a fallback to avoid infinite loops
|
778
|
+
if not hasattr(self, 'fallback_attempted'):
|
779
|
+
self.fallback_attempted = False
|
780
|
+
|
777
781
|
self.is_generating = True
|
778
782
|
log("Setting is_generating to True")
|
779
783
|
debug_log("Setting is_generating to True")
|
@@ -1010,11 +1014,15 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
1010
1014
|
self._loading_animation_task.cancel()
|
1011
1015
|
self._loading_animation_task = None
|
1012
1016
|
try:
|
1017
|
+
# Explicitly hide loading indicator
|
1013
1018
|
loading = self.query_one("#loading-indicator")
|
1014
1019
|
loading.add_class("hidden")
|
1020
|
+
loading.remove_class("model-loading") # Also remove model-loading class if present
|
1021
|
+
self.refresh(layout=True) # Force a refresh to ensure UI updates
|
1015
1022
|
self.query_one("#message-input").focus()
|
1016
|
-
except Exception:
|
1017
|
-
|
1023
|
+
except Exception as ui_err:
|
1024
|
+
debug_log(f"Error hiding loading indicator: {str(ui_err)}")
|
1025
|
+
log.error(f"Error hiding loading indicator: {str(ui_err)}")
|
1018
1026
|
|
1019
1027
|
# Rename this method slightly to avoid potential conflicts and clarify purpose
|
1020
1028
|
async def _handle_generation_result(self, worker: Worker[Optional[str]]) -> None:
|
@@ -1043,17 +1051,76 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
1043
1051
|
debug_log(f"Error in generation worker: {error}")
|
1044
1052
|
log.error(f"Error in generation worker: {error}")
|
1045
1053
|
|
1046
|
-
#
|
1054
|
+
# Check if this is a model not found error that we can try to recover from
|
1047
1055
|
error_str = str(error)
|
1056
|
+
is_model_not_found = "not found" in error_str.lower() or "404" in error_str
|
1057
|
+
|
1058
|
+
# Try fallback if this is a model not found error and we haven't tried fallback yet
|
1059
|
+
if is_model_not_found and not self.fallback_attempted:
|
1060
|
+
debug_log("Model not found error detected, attempting fallback")
|
1061
|
+
self.fallback_attempted = True
|
1062
|
+
|
1063
|
+
# Choose an appropriate fallback based on available providers
|
1064
|
+
fallback_model = None
|
1065
|
+
from app.config import OPENAI_API_KEY, ANTHROPIC_API_KEY
|
1066
|
+
|
1067
|
+
if OPENAI_API_KEY:
|
1068
|
+
fallback_model = "gpt-3.5-turbo"
|
1069
|
+
debug_log(f"Falling back to OpenAI model: {fallback_model}")
|
1070
|
+
elif ANTHROPIC_API_KEY:
|
1071
|
+
fallback_model = "claude-3-haiku-20240307"
|
1072
|
+
debug_log(f"Falling back to Anthropic model: {fallback_model}")
|
1073
|
+
else:
|
1074
|
+
# Find a common Ollama model that should exist
|
1075
|
+
try:
|
1076
|
+
from app.api.ollama import OllamaClient
|
1077
|
+
ollama = await OllamaClient.create()
|
1078
|
+
models = await ollama.get_available_models()
|
1079
|
+
for model_name in ["gemma:2b", "phi3:mini", "llama3:8b"]:
|
1080
|
+
if any(m["id"] == model_name for m in models):
|
1081
|
+
fallback_model = model_name
|
1082
|
+
debug_log(f"Found available Ollama model for fallback: {fallback_model}")
|
1083
|
+
break
|
1084
|
+
except Exception as e:
|
1085
|
+
debug_log(f"Error finding Ollama fallback model: {str(e)}")
|
1086
|
+
|
1087
|
+
if fallback_model:
|
1088
|
+
# Update UI to show fallback is happening
|
1089
|
+
loading = self.query_one("#loading-indicator")
|
1090
|
+
loading.remove_class("hidden")
|
1091
|
+
loading.update(f"⚙️ Falling back to {fallback_model}...")
|
1092
|
+
|
1093
|
+
# Update the selected model
|
1094
|
+
self.selected_model = fallback_model
|
1095
|
+
self.update_app_info() # Update the displayed model info
|
1096
|
+
|
1097
|
+
# Remove the "Thinking..." message
|
1098
|
+
if self.messages and self.messages[-1].role == "assistant":
|
1099
|
+
debug_log("Removing thinking message before fallback")
|
1100
|
+
self.messages.pop()
|
1101
|
+
await self.update_messages_ui()
|
1102
|
+
|
1103
|
+
# Try again with the new model
|
1104
|
+
debug_log(f"Retrying with fallback model: {fallback_model}")
|
1105
|
+
self.notify(f"Trying fallback model: {fallback_model}", severity="warning", timeout=3)
|
1106
|
+
await self.generate_response()
|
1107
|
+
return
|
1048
1108
|
|
1049
|
-
#
|
1050
|
-
|
1109
|
+
# If we get here, either it's not a model error or fallback already attempted
|
1110
|
+
# Explicitly hide loading indicator
|
1111
|
+
try:
|
1112
|
+
loading = self.query_one("#loading-indicator")
|
1113
|
+
loading.add_class("hidden")
|
1114
|
+
loading.remove_class("model-loading") # Also remove model-loading class if present
|
1115
|
+
except Exception as ui_err:
|
1116
|
+
debug_log(f"Error hiding loading indicator: {str(ui_err)}")
|
1117
|
+
log.error(f"Error hiding loading indicator: {str(ui_err)}")
|
1051
1118
|
|
1052
1119
|
# Create a user-friendly error message
|
1053
|
-
if
|
1054
|
-
# For
|
1120
|
+
if is_model_not_found:
|
1121
|
+
# For model not found errors, provide a more user-friendly message
|
1055
1122
|
user_error = "Unable to generate response. The selected model may not be available."
|
1056
|
-
debug_log(f"Sanitizing
|
1123
|
+
debug_log(f"Sanitizing model not found error to user-friendly message: {user_error}")
|
1057
1124
|
# Show technical details only in notification, not in chat
|
1058
1125
|
self.notify(f"Model error: {error_str}", severity="error", timeout=5)
|
1059
1126
|
else:
|
@@ -1069,6 +1136,9 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
1069
1136
|
debug_log(f"Adding error message: {user_error}")
|
1070
1137
|
self.messages.append(Message(role="assistant", content=user_error))
|
1071
1138
|
await self.update_messages_ui()
|
1139
|
+
|
1140
|
+
# Force a refresh to ensure UI updates
|
1141
|
+
self.refresh(layout=True)
|
1072
1142
|
|
1073
1143
|
elif worker.state == "success":
|
1074
1144
|
full_response = worker.result
|
@@ -121,77 +121,50 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
|
|
121
121
|
|
122
122
|
async def update_content(self, content: str) -> None:
|
123
123
|
"""Update the message content using Static.update() with optimizations for streaming"""
|
124
|
-
# Use proper logging instead of print statements
|
125
124
|
import logging
|
126
125
|
logger = logging.getLogger(__name__)
|
127
126
|
logger.debug(f"MessageDisplay.update_content called with content length: {len(content)}")
|
128
127
|
|
129
|
-
#
|
130
|
-
if self
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
128
|
+
# Use a lock to prevent race conditions during updates
|
129
|
+
if not hasattr(self, '_update_lock'):
|
130
|
+
self._update_lock = asyncio.Lock()
|
131
|
+
|
132
|
+
async with self._update_lock:
|
133
|
+
# Special handling for "Thinking..." to ensure it gets replaced
|
134
|
+
if self.message.content == "Thinking..." and content:
|
135
|
+
logger.debug("Replacing 'Thinking...' with actual content")
|
136
|
+
# Force a complete replacement
|
137
|
+
self.message.content = content
|
138
|
+
formatted_content = self._format_content(content)
|
139
|
+
self.update(formatted_content, refresh=True)
|
140
|
+
|
141
|
+
# Force app-level refresh
|
142
|
+
try:
|
143
|
+
if self.app:
|
144
|
+
self.app.refresh(layout=True)
|
145
|
+
# Find container and scroll
|
146
|
+
messages_container = self.app.query_one("#messages-container")
|
147
|
+
if messages_container:
|
148
|
+
messages_container.scroll_end(animate=False)
|
149
|
+
except Exception as e:
|
150
|
+
logger.error(f"Error refreshing app: {str(e)}")
|
151
|
+
return
|
152
|
+
|
153
|
+
# For all other updates - ALWAYS update
|
141
154
|
self.message.content = content
|
142
|
-
|
143
|
-
# Format with fixed-width placeholder to minimize layout shifts
|
144
155
|
formatted_content = self._format_content(content)
|
145
|
-
|
146
|
-
# Use a direct update that forces refresh - critical fix for streaming
|
147
156
|
self.update(formatted_content, refresh=True)
|
148
157
|
|
149
|
-
# Force
|
158
|
+
# Force refresh
|
150
159
|
try:
|
151
160
|
if self.app:
|
152
|
-
# Force a full layout refresh to ensure content is visible
|
153
161
|
self.app.refresh(layout=True)
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
if hasattr(container, 'scroll_end'):
|
159
|
-
container.scroll_end(animate=False)
|
162
|
+
# Find container and scroll
|
163
|
+
messages_container = self.app.query_one("#messages-container")
|
164
|
+
if messages_container:
|
165
|
+
messages_container.scroll_end(animate=False)
|
160
166
|
except Exception as e:
|
161
167
|
logger.error(f"Error refreshing app: {str(e)}")
|
162
|
-
self.refresh(layout=True)
|
163
|
-
|
164
|
-
# Return early to avoid duplicate updates
|
165
|
-
return
|
166
|
-
|
167
|
-
# Update the stored message object content
|
168
|
-
self.message.content = content
|
169
|
-
|
170
|
-
# Format with fixed-width placeholder to minimize layout shifts
|
171
|
-
# This avoids text reflowing as new tokens arrive
|
172
|
-
formatted_content = self._format_content(content)
|
173
|
-
|
174
|
-
# Use a direct update that forces refresh - critical fix for streaming
|
175
|
-
# This ensures content is immediately visible
|
176
|
-
logger.debug(f"Updating widget with formatted content length: {len(formatted_content)}")
|
177
|
-
self.update(formatted_content, refresh=True)
|
178
|
-
|
179
|
-
# Force app-level refresh and scroll to ensure visibility
|
180
|
-
try:
|
181
|
-
# Always force app refresh for every update
|
182
|
-
if self.app:
|
183
|
-
# Force a full layout refresh to ensure content is visible
|
184
|
-
self.app.refresh(layout=True)
|
185
|
-
|
186
|
-
# Find the messages container and scroll to end
|
187
|
-
containers = self.app.query("ScrollableContainer")
|
188
|
-
for container in containers:
|
189
|
-
if hasattr(container, 'scroll_end'):
|
190
|
-
container.scroll_end(animate=False)
|
191
|
-
except Exception as e:
|
192
|
-
# Log the error and fallback to local refresh
|
193
|
-
logger.error(f"Error refreshing app: {str(e)}")
|
194
|
-
self.refresh(layout=True)
|
195
168
|
|
196
169
|
def _format_content(self, content: str) -> str:
|
197
170
|
"""Format message content with timestamp and handle markdown links"""
|
@@ -247,6 +247,11 @@ class ModelSelector(Container):
|
|
247
247
|
"""Handle select changes"""
|
248
248
|
if event.select.id == "provider-select":
|
249
249
|
self.selected_provider = event.value
|
250
|
+
|
251
|
+
# IMPORTANT: Clear any cached client
|
252
|
+
if hasattr(self.app, 'cached_client'):
|
253
|
+
self.app.cached_client = None
|
254
|
+
|
250
255
|
# Update model options
|
251
256
|
model_select = self.query_one("#model-select", Select)
|
252
257
|
model_options = await self._get_model_options(self.selected_provider)
|