chat-console 0.2.99__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- app/__init__.py +1 -1
- app/api/anthropic.py +96 -72
- app/api/base.py +2 -2
- app/api/ollama.py +21 -10
- app/api/openai.py +88 -31
- app/main.py +105 -54
- app/ui/chat_interface.py +41 -7
- app/ui/model_selector.py +52 -14
- app/utils.py +130 -91
- {chat_console-0.2.99.dist-info → chat_console-0.3.4.dist-info}/METADATA +1 -1
- chat_console-0.3.4.dist-info/RECORD +24 -0
- {chat_console-0.2.99.dist-info → chat_console-0.3.4.dist-info}/WHEEL +1 -1
- chat_console-0.2.99.dist-info/RECORD +0 -24
- {chat_console-0.2.99.dist-info → chat_console-0.3.4.dist-info}/entry_points.txt +0 -0
- {chat_console-0.2.99.dist-info → chat_console-0.3.4.dist-info}/licenses/LICENSE +0 -0
- {chat_console-0.2.99.dist-info → chat_console-0.3.4.dist-info}/top_level.txt +0 -0
app/main.py
CHANGED
@@ -23,6 +23,8 @@ file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelnam
|
|
23
23
|
debug_logger = logging.getLogger("chat-cli-debug")
|
24
24
|
debug_logger.setLevel(logging.DEBUG)
|
25
25
|
debug_logger.addHandler(file_handler)
|
26
|
+
# Prevent propagation to the root logger (which would print to console)
|
27
|
+
debug_logger.propagate = False
|
26
28
|
|
27
29
|
# Add a convenience function to log to this file
|
28
30
|
def debug_log(message):
|
@@ -642,16 +644,21 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
642
644
|
await self.update_messages_ui()
|
643
645
|
|
644
646
|
# If this is the first message and dynamic titles are enabled, generate one
|
645
|
-
if
|
647
|
+
# Only attempt title generation if the message has sufficient content (at least 3 characters)
|
648
|
+
if is_first_message and self.current_conversation and CONFIG.get("generate_dynamic_titles", True) and len(content) >= 3:
|
646
649
|
log("First message detected, generating title...")
|
647
|
-
|
648
|
-
|
650
|
+
print(f"First message detected, generating conversation title for: {content[:30]}...")
|
651
|
+
debug_log(f"First message detected with length {len(content)}, generating conversation title")
|
652
|
+
|
653
|
+
# Show loading indicator for title generation
|
649
654
|
loading = self.query_one("#loading-indicator")
|
650
|
-
loading.remove_class("hidden")
|
655
|
+
loading.remove_class("hidden")
|
656
|
+
loading.update("🔤 Generating title...")
|
651
657
|
|
652
658
|
try:
|
653
659
|
# Get appropriate client
|
654
660
|
model = self.selected_model
|
661
|
+
print(f"Using model for title generation: {model}")
|
655
662
|
debug_log(f"Selected model for title generation: '{model}'")
|
656
663
|
|
657
664
|
# Check if model is valid
|
@@ -662,24 +669,12 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
662
669
|
model = "gpt-3.5-turbo"
|
663
670
|
debug_log("Falling back to OpenAI gpt-3.5-turbo for title generation")
|
664
671
|
elif ANTHROPIC_API_KEY:
|
665
|
-
model = "claude-
|
666
|
-
debug_log("Falling back to Anthropic
|
672
|
+
model = "claude-3-haiku-20240307" # Updated to newer Claude model
|
673
|
+
debug_log("Falling back to Anthropic Claude 3 Haiku for title generation")
|
667
674
|
else:
|
668
|
-
# Last resort -
|
669
|
-
|
670
|
-
|
671
|
-
ollama = await OllamaClient.create()
|
672
|
-
models = await ollama.get_available_models()
|
673
|
-
if models and len(models) > 0:
|
674
|
-
debug_log(f"Found {len(models)} Ollama models, using first one")
|
675
|
-
model = models[0].get("id", "llama3")
|
676
|
-
else:
|
677
|
-
model = "llama3" # Common default
|
678
|
-
debug_log(f"Falling back to Ollama model: {model}")
|
679
|
-
except Exception as ollama_err:
|
680
|
-
debug_log(f"Error getting Ollama models: {str(ollama_err)}")
|
681
|
-
model = "llama3" # Final fallback
|
682
|
-
debug_log("Final fallback to llama3")
|
675
|
+
# Last resort - use a common Ollama model
|
676
|
+
model = "llama3" # Common default
|
677
|
+
debug_log("Falling back to Ollama model: llama3")
|
683
678
|
|
684
679
|
debug_log(f"Getting client for model: {model}")
|
685
680
|
client = await BaseModelClient.get_client_for_model(model)
|
@@ -707,17 +702,19 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
707
702
|
elif ANTHROPIC_API_KEY:
|
708
703
|
from app.api.anthropic import AnthropicClient
|
709
704
|
client = await AnthropicClient.create()
|
710
|
-
model = "claude-
|
705
|
+
model = "claude-3-haiku-20240307" # Updated to newer Claude model
|
711
706
|
debug_log("Falling back to Anthropic for title generation")
|
712
707
|
else:
|
713
708
|
raise Exception("No valid API clients available for title generation")
|
714
709
|
|
715
710
|
# Generate title
|
711
|
+
print(f"Calling generate_conversation_title with model: {model}")
|
716
712
|
log(f"Calling generate_conversation_title with model: {model}")
|
717
713
|
debug_log(f"Calling generate_conversation_title with model: {model}")
|
718
714
|
title = await generate_conversation_title(content, model, client)
|
719
715
|
debug_log(f"Generated title: {title}")
|
720
716
|
log(f"Generated title: {title}")
|
717
|
+
print(f"Generated title: {title}")
|
721
718
|
|
722
719
|
# Update conversation title in database
|
723
720
|
self.db.update_conversation(
|
@@ -743,11 +740,10 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
743
740
|
except Exception as e:
|
744
741
|
debug_log(f"Failed to generate title: {str(e)}")
|
745
742
|
log.error(f"Failed to generate title: {str(e)}")
|
743
|
+
print(f"Failed to generate title: {str(e)}")
|
746
744
|
self.notify(f"Failed to generate title: {str(e)}", severity="warning")
|
747
745
|
finally:
|
748
|
-
title_generation_in_progress = False
|
749
746
|
# Hide loading indicator *only if* AI response generation isn't about to start
|
750
|
-
# This check might be redundant if generate_response always shows it anyway
|
751
747
|
if not self.is_generating:
|
752
748
|
loading.add_class("hidden")
|
753
749
|
|
@@ -907,13 +903,17 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
907
903
|
|
908
904
|
# Start streaming response
|
909
905
|
debug_log("Creating assistant message with 'Thinking...'")
|
906
|
+
print("Creating assistant message with 'Thinking...'")
|
910
907
|
assistant_message = Message(role="assistant", content="Thinking...")
|
911
908
|
self.messages.append(assistant_message)
|
912
909
|
messages_container = self.query_one("#messages-container")
|
913
910
|
message_display = MessageDisplay(assistant_message, highlight_code=CONFIG["highlight_code"])
|
914
911
|
messages_container.mount(message_display)
|
912
|
+
|
913
|
+
# Force a layout refresh and scroll to end
|
914
|
+
self.refresh(layout=True)
|
915
915
|
messages_container.scroll_end(animate=False)
|
916
|
-
|
916
|
+
|
917
917
|
# Add small delay to show thinking state
|
918
918
|
await asyncio.sleep(0.5)
|
919
919
|
|
@@ -940,48 +940,71 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
940
940
|
# Update the message object with the full content
|
941
941
|
assistant_message.content = content
|
942
942
|
|
943
|
-
# Update UI with the content -
|
943
|
+
# Update UI with the content - the MessageDisplay will now handle its own refresh
|
944
|
+
# This is a critical change that ensures content is immediately visible
|
944
945
|
await message_display.update_content(content)
|
945
|
-
|
946
|
-
#
|
947
|
-
#
|
948
|
-
|
946
|
+
|
947
|
+
# CRITICAL: Force immediate UI refresh after EVERY update
|
948
|
+
# This ensures we don't need a second Enter press to see content
|
949
|
+
self.refresh(layout=True)
|
950
|
+
|
951
|
+
# Always scroll after each update to ensure visibility
|
952
|
+
messages_container.scroll_end(animate=False)
|
953
|
+
|
954
|
+
# For longer responses, we can throttle the heavy refreshes
|
955
|
+
# to reduce visual jitter, but still do light refreshes for every update
|
949
956
|
content_length = len(content)
|
950
957
|
|
951
|
-
# Define
|
958
|
+
# Define key refresh points that require more thorough updates
|
952
959
|
new_paragraph = content.endswith("\n") and content.count("\n") > 0
|
953
|
-
|
954
|
-
|
955
|
-
content_length
|
956
|
-
|
960
|
+
code_block = "```" in content
|
961
|
+
needs_thorough_refresh = (
|
962
|
+
content_length < 30 or # Very aggressive for short responses
|
963
|
+
content_length % 16 == 0 or # More frequent periodic updates
|
964
|
+
new_paragraph or # Refresh on paragraph breaks
|
965
|
+
code_block # Refresh when code blocks are detected
|
957
966
|
)
|
958
967
|
|
959
|
-
# Check if it's been enough time since last refresh
|
968
|
+
# Check if it's been enough time since last heavy refresh
|
969
|
+
# Reduced from 200ms to 100ms for more responsive UI
|
960
970
|
current_time = time.time()
|
961
971
|
time_since_refresh = current_time - last_refresh_time
|
962
972
|
|
963
|
-
if
|
964
|
-
# Store the time we did the refresh
|
973
|
+
if needs_thorough_refresh and time_since_refresh > 0.1:
|
974
|
+
# Store the time we did the heavy refresh
|
965
975
|
last_refresh_time = current_time
|
966
|
-
|
967
|
-
#
|
976
|
+
|
977
|
+
# Ensure content is visible with an aggressive, guaranteed update sequence
|
978
|
+
# 1. Scroll to ensure visibility
|
979
|
+
messages_container.scroll_end(animate=False)
|
980
|
+
|
981
|
+
# 2. Force a comprehensive refresh with layout recalculation
|
982
|
+
self.refresh(layout=True)
|
983
|
+
|
984
|
+
# 3. Small delay for rendering
|
985
|
+
await asyncio.sleep(0.01)
|
986
|
+
|
987
|
+
# 4. Another scroll to account for any layout changes
|
968
988
|
messages_container.scroll_end(animate=False)
|
989
|
+
|
969
990
|
except Exception as e:
|
970
991
|
debug_log(f"Error updating UI: {str(e)}")
|
971
992
|
log.error(f"Error updating UI: {str(e)}")
|
972
993
|
|
973
994
|
# --- Remove the inner run_generation_worker function ---
|
974
995
|
|
975
|
-
# Start the worker
|
976
|
-
debug_log("Starting generate_streaming_response worker")
|
977
|
-
|
978
|
-
|
979
|
-
|
980
|
-
|
981
|
-
|
982
|
-
|
983
|
-
|
984
|
-
|
996
|
+
# Start the worker using Textual's run_worker to ensure state tracking
|
997
|
+
debug_log("Starting generate_streaming_response worker with run_worker")
|
998
|
+
worker = self.run_worker(
|
999
|
+
generate_streaming_response(
|
1000
|
+
self,
|
1001
|
+
api_messages,
|
1002
|
+
model,
|
1003
|
+
style,
|
1004
|
+
client,
|
1005
|
+
update_ui # Pass the callback function
|
1006
|
+
),
|
1007
|
+
name="generate_response"
|
985
1008
|
)
|
986
1009
|
self.current_generation_task = worker
|
987
1010
|
# Worker completion will be handled by on_worker_state_changed
|
@@ -1054,6 +1077,21 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
1054
1077
|
# Update the final message object content (optional, UI should be up-to-date)
|
1055
1078
|
if self.messages and self.messages[-1].role == "assistant":
|
1056
1079
|
self.messages[-1].content = full_response
|
1080
|
+
|
1081
|
+
# Force a UI refresh with the message display to ensure it's fully rendered
|
1082
|
+
try:
|
1083
|
+
# Get the message display for the assistant message
|
1084
|
+
messages_container = self.query_one("#messages-container")
|
1085
|
+
message_displays = messages_container.query("MessageDisplay")
|
1086
|
+
# Check if we found any message displays
|
1087
|
+
if message_displays and len(message_displays) > 0:
|
1088
|
+
# Get the last message display which should be our assistant message
|
1089
|
+
last_message_display = message_displays[-1]
|
1090
|
+
debug_log("Forcing final content update on message display")
|
1091
|
+
# Force a final content update
|
1092
|
+
await last_message_display.update_content(full_response)
|
1093
|
+
except Exception as disp_err:
|
1094
|
+
debug_log(f"Error updating final message display: {str(disp_err)}")
|
1057
1095
|
else:
|
1058
1096
|
debug_log("Worker finished successfully but response was empty or invalid.")
|
1059
1097
|
# Handle case where 'Thinking...' might still be the last message
|
@@ -1061,11 +1099,24 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
1061
1099
|
self.messages.pop() # Remove 'Thinking...' if no content arrived
|
1062
1100
|
await self.update_messages_ui()
|
1063
1101
|
|
1064
|
-
#
|
1065
|
-
# Use layout=False to prevent UI jumping at the end
|
1066
|
-
self.refresh(layout=False)
|
1067
|
-
await asyncio.sleep(0.1) # Allow UI to stabilize
|
1102
|
+
# Force a full UI refresh to ensure content is visible
|
1068
1103
|
messages_container = self.query_one("#messages-container")
|
1104
|
+
|
1105
|
+
# Sequence of UI refreshes to ensure content is properly displayed
|
1106
|
+
# 1. First do a lightweight refresh
|
1107
|
+
self.refresh(layout=False)
|
1108
|
+
|
1109
|
+
# 2. Short delay to allow the UI to process
|
1110
|
+
await asyncio.sleep(0.1)
|
1111
|
+
|
1112
|
+
# 3. Ensure we're scrolled to the end
|
1113
|
+
messages_container.scroll_end(animate=False)
|
1114
|
+
|
1115
|
+
# 4. Full layout refresh
|
1116
|
+
self.refresh(layout=True)
|
1117
|
+
|
1118
|
+
# 5. Final delay and scroll to ensure everything is visible
|
1119
|
+
await asyncio.sleep(0.1)
|
1069
1120
|
messages_container.scroll_end(animate=False)
|
1070
1121
|
|
1071
1122
|
except Exception as e:
|
app/ui/chat_interface.py
CHANGED
@@ -132,17 +132,51 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
|
|
132
132
|
# This avoids text reflowing as new tokens arrive
|
133
133
|
formatted_content = self._format_content(content)
|
134
134
|
|
135
|
-
# Use
|
136
|
-
# This
|
137
|
-
self.update(formatted_content, refresh=
|
135
|
+
# Use a direct update that forces refresh - critical fix for streaming
|
136
|
+
# This ensures content is immediately visible
|
137
|
+
self.update(formatted_content, refresh=True)
|
138
138
|
|
139
|
-
#
|
140
|
-
|
139
|
+
# Force app-level refresh and scroll to ensure visibility
|
140
|
+
try:
|
141
|
+
# Always force app refresh for every update
|
142
|
+
if self.app:
|
143
|
+
# Force a full layout refresh to ensure content is visible
|
144
|
+
self.app.refresh(layout=True)
|
145
|
+
|
146
|
+
# Find the messages container and scroll to end
|
147
|
+
containers = self.app.query("ScrollableContainer")
|
148
|
+
for container in containers:
|
149
|
+
if hasattr(container, 'scroll_end'):
|
150
|
+
container.scroll_end(animate=False)
|
151
|
+
except Exception as e:
|
152
|
+
# Log the error and fallback to local refresh
|
153
|
+
print(f"Error refreshing app: {str(e)}")
|
154
|
+
self.refresh(layout=True)
|
155
|
+
|
156
|
+
# Small delay to allow UI to update
|
157
|
+
await asyncio.sleep(0.02) # Increased delay for better rendering
|
141
158
|
|
142
159
|
def _format_content(self, content: str) -> str:
|
143
|
-
"""Format message content with timestamp"""
|
160
|
+
"""Format message content with timestamp and handle markdown links"""
|
144
161
|
timestamp = datetime.now().strftime("%H:%M")
|
145
|
-
|
162
|
+
|
163
|
+
# Fix markdown-style links that cause markup errors
|
164
|
+
# Convert [text](url) to a safe format for Textual markup
|
165
|
+
content = re.sub(
|
166
|
+
r'\[([^\]]+)\]\(([^)]+)\)',
|
167
|
+
lambda m: f"{m.group(1)} ({m.group(2)})",
|
168
|
+
content
|
169
|
+
)
|
170
|
+
|
171
|
+
# Escape any other potential markup characters
|
172
|
+
content = content.replace("[", "\\[").replace("]", "\\]")
|
173
|
+
# But keep our timestamp markup
|
174
|
+
timestamp_markup = f"[dim]{timestamp}[/dim]"
|
175
|
+
|
176
|
+
# Debug print to verify content is being formatted
|
177
|
+
print(f"Formatting content: {len(content)} chars")
|
178
|
+
|
179
|
+
return f"{timestamp_markup} {content}"
|
146
180
|
|
147
181
|
class InputWithFocus(Input):
|
148
182
|
"""Enhanced Input that better handles focus and maintains cursor position"""
|
app/ui/model_selector.py
CHANGED
@@ -162,14 +162,36 @@ class ModelSelector(Container):
|
|
162
162
|
"""Get model options for a specific provider"""
|
163
163
|
logger = logging.getLogger(__name__)
|
164
164
|
logger.info(f"Getting model options for provider: {provider}")
|
165
|
-
|
165
|
+
|
166
|
+
options = []
|
167
|
+
|
168
|
+
if provider == "openai":
|
169
|
+
try:
|
170
|
+
from ..api.openai import OpenAIClient
|
171
|
+
client = await OpenAIClient.create()
|
172
|
+
models = await client.get_available_models()
|
173
|
+
logger.info(f"Found {len(models)} models from OpenAI API")
|
174
|
+
for model in models:
|
175
|
+
options.append((model["name"], model["id"]))
|
176
|
+
except Exception as e:
|
177
|
+
logger.error(f"Error getting OpenAI models: {str(e)}")
|
178
|
+
# Fallback to static list
|
179
|
+
options = [
|
180
|
+
("gpt-3.5-turbo", "gpt-3.5-turbo"),
|
181
|
+
("gpt-4", "gpt-4"),
|
182
|
+
("gpt-4-turbo", "gpt-4-turbo"),
|
183
|
+
]
|
184
|
+
# Do NOT add custom model option for OpenAI
|
185
|
+
return options
|
186
|
+
|
187
|
+
# Default: config-based models
|
166
188
|
options = [
|
167
189
|
(model_info["display_name"], model_id)
|
168
190
|
for model_id, model_info in CONFIG["available_models"].items()
|
169
191
|
if model_info["provider"] == provider
|
170
192
|
]
|
171
193
|
logger.info(f"Found {len(options)} models in config for {provider}")
|
172
|
-
|
194
|
+
|
173
195
|
# Add available Ollama models
|
174
196
|
if provider == "ollama":
|
175
197
|
try:
|
@@ -214,7 +236,10 @@ class ModelSelector(Container):
|
|
214
236
|
]
|
215
237
|
logger.info("Adding default Ollama models as fallback")
|
216
238
|
options.extend(default_models)
|
217
|
-
|
239
|
+
options.append(("Custom Model...", "custom"))
|
240
|
+
return options
|
241
|
+
|
242
|
+
# For Anthropic and others, allow custom model
|
218
243
|
options.append(("Custom Model...", "custom"))
|
219
244
|
return options
|
220
245
|
|
@@ -243,12 +268,14 @@ class ModelSelector(Container):
|
|
243
268
|
|
244
269
|
# Set the model if we found one
|
245
270
|
if first_model and len(first_model) >= 2:
|
246
|
-
#
|
271
|
+
# Get the original ID from the model option
|
247
272
|
original_id = first_model[1]
|
273
|
+
# Resolve the model ID for internal use and messaging
|
248
274
|
resolved_id = resolve_model_id(original_id)
|
249
275
|
logger.info(f"on_select_changed (provider): Original ID '{original_id}' resolved to '{resolved_id}'")
|
250
276
|
self.selected_model = resolved_id
|
251
|
-
|
277
|
+
# Use the original ID for the select widget to avoid invalid value errors
|
278
|
+
model_select.value = original_id
|
252
279
|
model_select.remove_class("hide")
|
253
280
|
self.query_one("#custom-model-input").add_class("hide")
|
254
281
|
self.post_message(self.ModelSelected(resolved_id))
|
@@ -310,24 +337,35 @@ class ModelSelector(Container):
|
|
310
337
|
def set_selected_model(self, model_id: str) -> None:
|
311
338
|
"""Set the selected model, ensuring it's properly resolved"""
|
312
339
|
# First resolve the model ID to ensure we're using the full ID
|
340
|
+
original_id = model_id
|
313
341
|
resolved_id = resolve_model_id(model_id)
|
314
|
-
logger.info(f"set_selected_model: Original ID '{
|
342
|
+
logger.info(f"set_selected_model: Original ID '{original_id}' resolved to '{resolved_id}'")
|
315
343
|
|
316
|
-
# Store the resolved ID
|
344
|
+
# Store the resolved ID internally
|
317
345
|
self.selected_model = resolved_id
|
318
346
|
|
319
347
|
# Update the UI based on whether this is a known model or custom
|
320
|
-
if
|
321
|
-
|
348
|
+
# Check if the original ID is in the available options
|
349
|
+
model_select = self.query_one("#model-select", Select)
|
350
|
+
available_options = [opt[1] for opt in model_select.options]
|
351
|
+
|
352
|
+
if original_id in available_options:
|
353
|
+
# Use the original ID for the select widget
|
322
354
|
custom_input = self.query_one("#custom-model-input")
|
323
|
-
|
324
|
-
|
355
|
+
model_select.value = original_id
|
356
|
+
model_select.remove_class("hide")
|
357
|
+
custom_input.add_class("hide")
|
358
|
+
elif resolved_id in available_options:
|
359
|
+
# If the resolved ID is in options, use that
|
360
|
+
custom_input = self.query_one("#custom-model-input")
|
361
|
+
model_select.value = resolved_id
|
362
|
+
model_select.remove_class("hide")
|
325
363
|
custom_input.add_class("hide")
|
326
364
|
else:
|
327
|
-
|
365
|
+
# Use custom input for models not in the select options
|
328
366
|
custom_input = self.query_one("#custom-model-input")
|
329
|
-
|
330
|
-
|
367
|
+
model_select.value = "custom"
|
368
|
+
model_select.add_class("hide")
|
331
369
|
custom_input.value = resolved_id
|
332
370
|
custom_input.remove_class("hide")
|
333
371
|
|