chat-console 0.2.9__py3-none-any.whl → 0.2.98__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- app/__init__.py +1 -1
- app/api/anthropic.py +155 -25
- app/api/base.py +43 -0
- app/api/ollama.py +193 -40
- app/api/openai.py +45 -3
- app/config.py +53 -7
- app/main.py +480 -96
- app/ui/chat_interface.py +35 -19
- app/ui/model_browser.py +405 -45
- app/ui/model_selector.py +77 -19
- app/utils.py +354 -82
- {chat_console-0.2.9.dist-info → chat_console-0.2.98.dist-info}/METADATA +1 -1
- chat_console-0.2.98.dist-info/RECORD +24 -0
- chat_console-0.2.9.dist-info/RECORD +0 -24
- {chat_console-0.2.9.dist-info → chat_console-0.2.98.dist-info}/WHEEL +0 -0
- {chat_console-0.2.9.dist-info → chat_console-0.2.98.dist-info}/entry_points.txt +0 -0
- {chat_console-0.2.9.dist-info → chat_console-0.2.98.dist-info}/licenses/LICENSE +0 -0
- {chat_console-0.2.9.dist-info → chat_console-0.2.98.dist-info}/top_level.txt +0 -0
app/main.py
CHANGED
@@ -5,15 +5,35 @@ Simplified version of Chat CLI with AI functionality
|
|
5
5
|
import os
|
6
6
|
import asyncio
|
7
7
|
import typer
|
8
|
+
import logging
|
8
9
|
from typing import List, Optional, Callable, Awaitable
|
9
10
|
from datetime import datetime
|
10
11
|
|
12
|
+
# Create a dedicated logger that definitely writes to a file
|
13
|
+
log_dir = os.path.expanduser("~/.cache/chat-cli")
|
14
|
+
os.makedirs(log_dir, exist_ok=True)
|
15
|
+
debug_log_file = os.path.join(log_dir, "debug.log")
|
16
|
+
|
17
|
+
# Configure the logger
|
18
|
+
file_handler = logging.FileHandler(debug_log_file)
|
19
|
+
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
|
20
|
+
|
21
|
+
# Get the logger and add the handler
|
22
|
+
debug_logger = logging.getLogger("chat-cli-debug")
|
23
|
+
debug_logger.setLevel(logging.DEBUG)
|
24
|
+
debug_logger.addHandler(file_handler)
|
25
|
+
|
26
|
+
# Add a convenience function to log to this file
|
27
|
+
def debug_log(message):
|
28
|
+
debug_logger.info(message)
|
29
|
+
|
11
30
|
from textual.app import App, ComposeResult
|
12
31
|
from textual.containers import Container, Horizontal, Vertical, ScrollableContainer, Center
|
13
32
|
from textual.reactive import reactive
|
14
33
|
from textual.widgets import Button, Input, Label, Static, Header, Footer, ListView, ListItem
|
15
34
|
from textual.binding import Binding
|
16
35
|
from textual import work, log, on
|
36
|
+
from textual.worker import Worker, WorkerState # Import Worker class and WorkerState enum
|
17
37
|
from textual.screen import Screen
|
18
38
|
from openai import OpenAI
|
19
39
|
from app.models import Message, Conversation
|
@@ -25,7 +45,7 @@ from app.ui.model_selector import ModelSelector, StyleSelector
|
|
25
45
|
from app.ui.chat_list import ChatList
|
26
46
|
from app.ui.model_browser import ModelBrowser
|
27
47
|
from app.api.base import BaseModelClient
|
28
|
-
from app.utils import generate_streaming_response, save_settings_to_config, generate_conversation_title # Import
|
48
|
+
from app.utils import generate_streaming_response, save_settings_to_config, generate_conversation_title, resolve_model_id # Import resolver
|
29
49
|
# Import version here to avoid potential circular import issues at top level
|
30
50
|
from app import __version__
|
31
51
|
|
@@ -211,11 +231,17 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
211
231
|
color: $text;
|
212
232
|
content-align: center middle;
|
213
233
|
text-align: center;
|
234
|
+
text-style: bold;
|
214
235
|
}
|
215
236
|
|
216
237
|
#loading-indicator.hidden { # Keep SimpleChatApp CSS
|
217
238
|
display: none;
|
218
239
|
}
|
240
|
+
|
241
|
+
#loading-indicator.model-loading {
|
242
|
+
background: $warning;
|
243
|
+
color: $text;
|
244
|
+
}
|
219
245
|
|
220
246
|
#input-area { # Keep SimpleChatApp CSS
|
221
247
|
width: 100%; # Keep SimpleChatApp CSS
|
@@ -313,12 +339,16 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
313
339
|
current_conversation = reactive(None) # Keep SimpleChatApp reactive var
|
314
340
|
is_generating = reactive(False) # Keep SimpleChatApp reactive var
|
315
341
|
current_generation_task: Optional[asyncio.Task] = None # Add task reference
|
342
|
+
_loading_frame = 0 # Track animation frame
|
343
|
+
_loading_animation_task: Optional[asyncio.Task] = None # Animation task
|
316
344
|
|
317
345
|
def __init__(self, initial_text: Optional[str] = None): # Keep SimpleChatApp __init__
|
318
346
|
super().__init__() # Keep SimpleChatApp __init__
|
319
347
|
self.db = ChatDatabase() # Keep SimpleChatApp __init__
|
320
348
|
self.messages = [] # Keep SimpleChatApp __init__
|
321
|
-
|
349
|
+
# Resolve the default model ID on initialization
|
350
|
+
default_model_from_config = CONFIG["default_model"]
|
351
|
+
self.selected_model = resolve_model_id(default_model_from_config)
|
322
352
|
self.selected_style = CONFIG["default_style"] # Keep SimpleChatApp __init__
|
323
353
|
self.initial_text = initial_text # Keep SimpleChatApp __init__
|
324
354
|
# Removed self.input_widget instance variable
|
@@ -347,7 +377,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
347
377
|
pass
|
348
378
|
|
349
379
|
# Loading indicator
|
350
|
-
yield Static("Generating response...", id="loading-indicator", classes="hidden")
|
380
|
+
yield Static("▪▪▪ Generating response...", id="loading-indicator", classes="hidden", markup=False)
|
351
381
|
|
352
382
|
# Input area
|
353
383
|
with Container(id="input-area"):
|
@@ -511,6 +541,15 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
511
541
|
# This happens if is_generating is True, but no active task found to cancel
|
512
542
|
log("No active generation task found, but is_generating=True. Resetting state.")
|
513
543
|
self.is_generating = False
|
544
|
+
|
545
|
+
# Make sure to cancel animation task too
|
546
|
+
if self._loading_animation_task and not self._loading_animation_task.done():
|
547
|
+
try:
|
548
|
+
self._loading_animation_task.cancel()
|
549
|
+
except Exception as e:
|
550
|
+
log.error(f"Error cancelling animation task: {str(e)}")
|
551
|
+
self._loading_animation_task = None
|
552
|
+
|
514
553
|
loading = self.query_one("#loading-indicator")
|
515
554
|
loading.add_class("hidden")
|
516
555
|
else:
|
@@ -537,20 +576,25 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
537
576
|
pass
|
538
577
|
|
539
578
|
async def update_messages_ui(self) -> None: # Keep SimpleChatApp update_messages_ui
|
540
|
-
"""Update the messages UI.""" # Keep SimpleChatApp update_messages_ui docstring
|
579
|
+
"""Update the messages UI with improved stability.""" # Keep SimpleChatApp update_messages_ui docstring
|
541
580
|
# Clear existing messages # Keep SimpleChatApp update_messages_ui
|
542
581
|
messages_container = self.query_one("#messages-container") # Keep SimpleChatApp update_messages_ui
|
543
582
|
messages_container.remove_children() # Keep SimpleChatApp update_messages_ui
|
544
583
|
|
545
|
-
#
|
584
|
+
# Batch add all messages first without scrolling or refreshing between each mount
|
585
|
+
# This avoids unnecessary layout shifts while adding messages
|
546
586
|
for message in self.messages: # Keep SimpleChatApp update_messages_ui
|
547
587
|
display = MessageDisplay(message, highlight_code=CONFIG["highlight_code"]) # Keep SimpleChatApp update_messages_ui
|
548
588
|
messages_container.mount(display) # Keep SimpleChatApp update_messages_ui
|
549
|
-
|
550
|
-
|
551
|
-
|
552
|
-
#
|
589
|
+
|
590
|
+
# Perform a single refresh and scroll after mounting all messages
|
591
|
+
# This significantly reduces the visual bouncing effect
|
592
|
+
# A small delay before scrolling helps ensure stable layout
|
593
|
+
await asyncio.sleep(0.05) # Single delay after all messages are mounted
|
553
594
|
messages_container.scroll_end(animate=False) # Keep SimpleChatApp update_messages_ui
|
595
|
+
|
596
|
+
# Use layout=False refresh if possible to further reduce bouncing
|
597
|
+
self.refresh(layout=False)
|
554
598
|
|
555
599
|
async def on_input_submitted(self, event: Input.Submitted) -> None: # Keep SimpleChatApp on_input_submitted
|
556
600
|
"""Handle input submission (Enter key in the main input).""" # Keep SimpleChatApp on_input_submitted docstring
|
@@ -588,6 +632,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
588
632
|
# If this is the first message and dynamic titles are enabled, generate one
|
589
633
|
if is_first_message and self.current_conversation and CONFIG.get("generate_dynamic_titles", True):
|
590
634
|
log("First message detected, generating title...")
|
635
|
+
debug_log("First message detected, attempting to generate conversation title")
|
591
636
|
title_generation_in_progress = True # Use a local flag
|
592
637
|
loading = self.query_one("#loading-indicator")
|
593
638
|
loading.remove_class("hidden") # Show loading for title gen
|
@@ -595,13 +640,71 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
595
640
|
try:
|
596
641
|
# Get appropriate client
|
597
642
|
model = self.selected_model
|
643
|
+
debug_log(f"Selected model for title generation: '{model}'")
|
644
|
+
|
645
|
+
# Check if model is valid
|
646
|
+
if not model:
|
647
|
+
debug_log("Model is empty, falling back to default")
|
648
|
+
# Fallback to a safe default model - preferring OpenAI if key exists
|
649
|
+
if OPENAI_API_KEY:
|
650
|
+
model = "gpt-3.5-turbo"
|
651
|
+
debug_log("Falling back to OpenAI gpt-3.5-turbo for title generation")
|
652
|
+
elif ANTHROPIC_API_KEY:
|
653
|
+
model = "claude-instant-1.2"
|
654
|
+
debug_log("Falling back to Anthropic claude-instant-1.2 for title generation")
|
655
|
+
else:
|
656
|
+
# Last resort - check for a common Ollama model
|
657
|
+
try:
|
658
|
+
from app.api.ollama import OllamaClient
|
659
|
+
ollama = OllamaClient()
|
660
|
+
models = await ollama.get_available_models()
|
661
|
+
if models and len(models) > 0:
|
662
|
+
debug_log(f"Found {len(models)} Ollama models, using first one")
|
663
|
+
model = models[0].get("id", "llama3")
|
664
|
+
else:
|
665
|
+
model = "llama3" # Common default
|
666
|
+
debug_log(f"Falling back to Ollama model: {model}")
|
667
|
+
except Exception as ollama_err:
|
668
|
+
debug_log(f"Error getting Ollama models: {str(ollama_err)}")
|
669
|
+
model = "llama3" # Final fallback
|
670
|
+
debug_log("Final fallback to llama3")
|
671
|
+
|
672
|
+
debug_log(f"Getting client for model: {model}")
|
598
673
|
client = BaseModelClient.get_client_for_model(model)
|
674
|
+
|
599
675
|
if client is None:
|
600
|
-
|
676
|
+
debug_log(f"No client available for model: {model}, trying to initialize")
|
677
|
+
# Try to determine client type and initialize manually
|
678
|
+
client_type = BaseModelClient.get_client_type_for_model(model)
|
679
|
+
if client_type:
|
680
|
+
debug_log(f"Found client type {client_type.__name__} for {model}, initializing")
|
681
|
+
try:
|
682
|
+
client = client_type()
|
683
|
+
debug_log("Client initialized successfully")
|
684
|
+
except Exception as init_err:
|
685
|
+
debug_log(f"Error initializing client: {str(init_err)}")
|
686
|
+
|
687
|
+
if client is None:
|
688
|
+
debug_log("Could not initialize client, falling back to safer model")
|
689
|
+
# Try a different model as last resort
|
690
|
+
if OPENAI_API_KEY:
|
691
|
+
from app.api.openai import OpenAIClient
|
692
|
+
client = OpenAIClient()
|
693
|
+
model = "gpt-3.5-turbo"
|
694
|
+
debug_log("Falling back to OpenAI for title generation")
|
695
|
+
elif ANTHROPIC_API_KEY:
|
696
|
+
from app.api.anthropic import AnthropicClient
|
697
|
+
client = AnthropicClient()
|
698
|
+
model = "claude-instant-1.2"
|
699
|
+
debug_log("Falling back to Anthropic for title generation")
|
700
|
+
else:
|
701
|
+
raise Exception("No valid API clients available for title generation")
|
601
702
|
|
602
703
|
# Generate title
|
603
704
|
log(f"Calling generate_conversation_title with model: {model}")
|
705
|
+
debug_log(f"Calling generate_conversation_title with model: {model}")
|
604
706
|
title = await generate_conversation_title(content, model, client)
|
707
|
+
debug_log(f"Generated title: {title}")
|
605
708
|
log(f"Generated title: {title}")
|
606
709
|
|
607
710
|
# Update conversation title in database
|
@@ -616,10 +719,17 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
616
719
|
|
617
720
|
# Update conversation object
|
618
721
|
self.current_conversation.title = title
|
722
|
+
|
723
|
+
# IMPORTANT: Save the successful model for consistency
|
724
|
+
# If the title was generated with a different model than initially selected,
|
725
|
+
# update the selected_model to match so the response uses the same model
|
726
|
+
debug_log(f"Using same model for chat response: '{model}'")
|
727
|
+
self.selected_model = model
|
619
728
|
|
620
729
|
self.notify(f"Conversation title set to: {title}", severity="information", timeout=3)
|
621
730
|
|
622
731
|
except Exception as e:
|
732
|
+
debug_log(f"Failed to generate title: {str(e)}")
|
623
733
|
log.error(f"Failed to generate title: {str(e)}")
|
624
734
|
self.notify(f"Failed to generate title: {str(e)}", severity="warning")
|
625
735
|
finally:
|
@@ -628,7 +738,13 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
628
738
|
# This check might be redundant if generate_response always shows it anyway
|
629
739
|
if not self.is_generating:
|
630
740
|
loading.add_class("hidden")
|
631
|
-
|
741
|
+
|
742
|
+
# Small delay to ensure state is updated
|
743
|
+
await asyncio.sleep(0.1)
|
744
|
+
|
745
|
+
# Log just before generate_response call
|
746
|
+
debug_log(f"About to call generate_response with model: '{self.selected_model}'")
|
747
|
+
|
632
748
|
# Generate AI response (will set self.is_generating and handle loading indicator)
|
633
749
|
await self.generate_response()
|
634
750
|
|
@@ -637,39 +753,148 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
637
753
|
|
638
754
|
async def generate_response(self) -> None:
|
639
755
|
"""Generate an AI response using a non-blocking worker."""
|
756
|
+
# Import debug_log function from main
|
757
|
+
debug_log(f"Entering generate_response method")
|
758
|
+
|
640
759
|
if not self.current_conversation or not self.messages:
|
760
|
+
debug_log("No current conversation or messages, returning")
|
641
761
|
return
|
642
762
|
|
643
763
|
self.is_generating = True
|
644
764
|
log("Setting is_generating to True")
|
765
|
+
debug_log("Setting is_generating to True")
|
645
766
|
loading = self.query_one("#loading-indicator")
|
646
767
|
loading.remove_class("hidden")
|
768
|
+
|
769
|
+
# For Ollama models, show the loading indicator immediately
|
770
|
+
from app.api.ollama import OllamaClient
|
771
|
+
debug_log(f"Current selected model: '{self.selected_model}'")
|
772
|
+
client_type = BaseModelClient.get_client_type_for_model(self.selected_model)
|
773
|
+
debug_log(f"Client type: {client_type.__name__ if client_type else 'None'}")
|
774
|
+
|
775
|
+
if self.selected_model and client_type == OllamaClient:
|
776
|
+
log("Ollama model detected, showing immediate loading indicator")
|
777
|
+
debug_log("Ollama model detected, showing immediate loading indicator")
|
778
|
+
loading.add_class("model-loading")
|
779
|
+
# Update the loading indicator text directly
|
780
|
+
loading.update("⚙️ Preparing Ollama model...")
|
781
|
+
else:
|
782
|
+
loading.remove_class("model-loading")
|
783
|
+
# Start with a simple animation pattern that won't cause markup issues
|
784
|
+
self._loading_frame = 0
|
785
|
+
# Stop any existing animation task
|
786
|
+
if self._loading_animation_task and not self._loading_animation_task.done():
|
787
|
+
self._loading_animation_task.cancel()
|
788
|
+
# Start the animation
|
789
|
+
self._loading_animation_task = asyncio.create_task(self._animate_loading_task(loading))
|
647
790
|
|
648
791
|
try:
|
649
792
|
# Get conversation parameters
|
650
|
-
model
|
793
|
+
# Ensure the model ID is resolved before passing to the API client
|
794
|
+
unresolved_model = self.selected_model
|
795
|
+
model = resolve_model_id(unresolved_model)
|
796
|
+
log(f"Using model for generation: {model} (Resolved from: {unresolved_model})")
|
651
797
|
style = self.selected_style
|
652
|
-
|
653
|
-
|
798
|
+
|
799
|
+
debug_log(f"Using model: '{model}', style: '{style}'")
|
800
|
+
|
801
|
+
# Ensure we have a valid model
|
802
|
+
if not model:
|
803
|
+
debug_log("Model is empty, selecting a default model")
|
804
|
+
# Same fallback logic as in autotitling - this ensures consistency
|
805
|
+
if OPENAI_API_KEY:
|
806
|
+
model = "gpt-3.5-turbo"
|
807
|
+
debug_log("Falling back to OpenAI gpt-3.5-turbo")
|
808
|
+
elif ANTHROPIC_API_KEY:
|
809
|
+
model = "claude-instant-1.2"
|
810
|
+
debug_log("Falling back to Anthropic claude-instant-1.2")
|
811
|
+
else:
|
812
|
+
# Check for a common Ollama model
|
813
|
+
try:
|
814
|
+
ollama = OllamaClient()
|
815
|
+
models = await ollama.get_available_models()
|
816
|
+
if models and len(models) > 0:
|
817
|
+
debug_log(f"Found {len(models)} Ollama models, using first one")
|
818
|
+
model = models[0].get("id", "llama3")
|
819
|
+
else:
|
820
|
+
model = "llama3" # Common default
|
821
|
+
debug_log(f"Falling back to Ollama model: {model}")
|
822
|
+
except Exception as ollama_err:
|
823
|
+
debug_log(f"Error getting Ollama models: {str(ollama_err)}")
|
824
|
+
model = "llama3" # Final fallback
|
825
|
+
debug_log("Final fallback to llama3")
|
826
|
+
|
827
|
+
# Convert messages to API format with enhanced error checking
|
654
828
|
api_messages = []
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
829
|
+
debug_log(f"Converting {len(self.messages)} messages to API format")
|
830
|
+
|
831
|
+
for i, msg in enumerate(self.messages):
|
832
|
+
try:
|
833
|
+
debug_log(f"Processing message {i}: type={type(msg).__name__}, dir={dir(msg)}")
|
834
|
+
debug_log(f"Adding message to API format: role={msg.role}, content_len={len(msg.content)}")
|
835
|
+
|
836
|
+
# Create a fully validated message dict
|
837
|
+
message_dict = {
|
838
|
+
"role": msg.role if hasattr(msg, 'role') and msg.role else "user",
|
839
|
+
"content": msg.content if hasattr(msg, 'content') and msg.content else ""
|
840
|
+
}
|
841
|
+
|
842
|
+
api_messages.append(message_dict)
|
843
|
+
debug_log(f"Successfully added message {i}")
|
844
|
+
except Exception as e:
|
845
|
+
debug_log(f"Error adding message {i} to API format: {str(e)}")
|
846
|
+
# Create a safe fallback message
|
847
|
+
fallback_msg = {
|
848
|
+
"role": "user",
|
849
|
+
"content": str(msg) if msg is not None else "Error retrieving message content"
|
850
|
+
}
|
851
|
+
api_messages.append(fallback_msg)
|
852
|
+
debug_log(f"Added fallback message for {i}")
|
853
|
+
|
854
|
+
debug_log(f"Prepared {len(api_messages)} messages for API")
|
660
855
|
|
661
856
|
# Get appropriate client
|
857
|
+
debug_log(f"Getting client for model: {model}")
|
662
858
|
try:
|
663
859
|
client = BaseModelClient.get_client_for_model(model)
|
860
|
+
debug_log(f"Client: {client.__class__.__name__ if client else 'None'}")
|
861
|
+
|
664
862
|
if client is None:
|
665
|
-
|
863
|
+
debug_log(f"No client available for model: {model}, trying to initialize")
|
864
|
+
# Try to determine client type and initialize manually
|
865
|
+
client_type = BaseModelClient.get_client_type_for_model(model)
|
866
|
+
if client_type:
|
867
|
+
debug_log(f"Found client type {client_type.__name__} for {model}, initializing")
|
868
|
+
try:
|
869
|
+
client = client_type()
|
870
|
+
debug_log(f"Successfully initialized {client_type.__name__}")
|
871
|
+
except Exception as init_err:
|
872
|
+
debug_log(f"Error initializing client: {str(init_err)}")
|
873
|
+
|
874
|
+
if client is None:
|
875
|
+
debug_log("Could not initialize client, falling back to safer model")
|
876
|
+
# Try a different model as last resort
|
877
|
+
if OPENAI_API_KEY:
|
878
|
+
from app.api.openai import OpenAIClient
|
879
|
+
client = OpenAIClient()
|
880
|
+
model = "gpt-3.5-turbo"
|
881
|
+
debug_log("Falling back to OpenAI client")
|
882
|
+
elif ANTHROPIC_API_KEY:
|
883
|
+
from app.api.anthropic import AnthropicClient
|
884
|
+
client = AnthropicClient()
|
885
|
+
model = "claude-instant-1.2"
|
886
|
+
debug_log("Falling back to Anthropic client")
|
887
|
+
else:
|
888
|
+
raise Exception("No valid API clients available")
|
666
889
|
except Exception as e:
|
890
|
+
debug_log(f"Failed to initialize model client: {str(e)}")
|
667
891
|
self.notify(f"Failed to initialize model client: {str(e)}", severity="error")
|
668
892
|
self.is_generating = False
|
669
893
|
loading.add_class("hidden")
|
670
894
|
return
|
671
895
|
|
672
896
|
# Start streaming response
|
897
|
+
debug_log("Creating assistant message with 'Thinking...'")
|
673
898
|
assistant_message = Message(role="assistant", content="Thinking...")
|
674
899
|
self.messages.append(assistant_message)
|
675
900
|
messages_container = self.query_one("#messages-container")
|
@@ -684,94 +909,191 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
684
909
|
update_lock = asyncio.Lock()
|
685
910
|
|
686
911
|
async def update_ui(content: str):
|
912
|
+
# This function remains the same, called by the worker
|
687
913
|
if not self.is_generating:
|
688
|
-
|
914
|
+
debug_log("update_ui called but is_generating is False, returning.")
|
689
915
|
return
|
690
916
|
|
691
917
|
async with update_lock:
|
692
918
|
try:
|
693
919
|
# Clear thinking indicator on first content
|
694
920
|
if assistant_message.content == "Thinking...":
|
921
|
+
debug_log("First content received, clearing 'Thinking...'")
|
695
922
|
assistant_message.content = ""
|
696
923
|
|
697
|
-
# Update message with full content
|
924
|
+
# Update the message object with the full content
|
698
925
|
assistant_message.content = content
|
699
|
-
|
926
|
+
|
927
|
+
# Update UI with the content - this no longer triggers refresh itself
|
700
928
|
await message_display.update_content(content)
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
|
706
|
-
|
929
|
+
|
930
|
+
# Throttle UI updates to reduce visual jitter and improve performance
|
931
|
+
# Only refresh visually every ~5 tokens (estimated by content length changes)
|
932
|
+
content_length = len(content)
|
933
|
+
do_refresh = (
|
934
|
+
content_length < 20 or # Always refresh for the first few tokens
|
935
|
+
content_length % 16 == 0 or # Then periodically
|
936
|
+
content.endswith("\n") # And on newlines
|
937
|
+
)
|
938
|
+
|
939
|
+
if do_refresh:
|
940
|
+
# Only scroll without full layout recalculation
|
941
|
+
messages_container.scroll_end(animate=False)
|
942
|
+
# Light refresh without full layout recalculation
|
943
|
+
self.refresh(layout=False)
|
707
944
|
except Exception as e:
|
945
|
+
debug_log(f"Error updating UI: {str(e)}")
|
708
946
|
log.error(f"Error updating UI: {str(e)}")
|
709
947
|
|
710
|
-
#
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
|
724
|
-
# Save complete response to database
|
725
|
-
if self.is_generating and full_response:
|
726
|
-
log("Generation completed normally, saving to database")
|
727
|
-
self.db.add_message(
|
728
|
-
self.current_conversation.id,
|
729
|
-
"assistant",
|
730
|
-
full_response
|
731
|
-
)
|
732
|
-
|
733
|
-
# Final UI refresh
|
734
|
-
self.refresh(layout=True)
|
735
|
-
|
736
|
-
except asyncio.CancelledError:
|
737
|
-
log.warning("Generation worker was cancelled")
|
738
|
-
# Remove the incomplete message
|
739
|
-
if self.messages and self.messages[-1].role == "assistant":
|
740
|
-
self.messages.pop()
|
741
|
-
await self.update_messages_ui()
|
742
|
-
self.notify("Generation stopped by user", severity="warning", timeout=2)
|
743
|
-
|
744
|
-
except Exception as e:
|
745
|
-
log.error(f"Error in generation worker: {str(e)}")
|
746
|
-
self.notify(f"Generation error: {str(e)}", severity="error", timeout=5)
|
747
|
-
# Add error message to UI
|
748
|
-
if self.messages and self.messages[-1].role == "assistant":
|
749
|
-
self.messages.pop() # Remove thinking message
|
750
|
-
error_msg = f"Error: {str(e)}"
|
751
|
-
self.messages.append(Message(role="assistant", content=error_msg))
|
752
|
-
await self.update_messages_ui()
|
753
|
-
|
754
|
-
finally:
|
755
|
-
# Always clean up state and UI
|
756
|
-
log("Generation worker completed, resetting state")
|
757
|
-
self.is_generating = False
|
758
|
-
self.current_generation_task = None
|
759
|
-
loading = self.query_one("#loading-indicator")
|
760
|
-
loading.add_class("hidden")
|
761
|
-
self.refresh(layout=True)
|
762
|
-
self.query_one("#message-input").focus()
|
763
|
-
|
764
|
-
# Start the worker and keep a reference to it
|
765
|
-
worker = run_generation_worker()
|
948
|
+
# --- Remove the inner run_generation_worker function ---
|
949
|
+
|
950
|
+
# Start the worker directly using the imported function
|
951
|
+
debug_log("Starting generate_streaming_response worker")
|
952
|
+
# Call the @work decorated function directly
|
953
|
+
worker = generate_streaming_response(
|
954
|
+
self,
|
955
|
+
api_messages,
|
956
|
+
model,
|
957
|
+
style,
|
958
|
+
client,
|
959
|
+
update_ui # Pass the callback function
|
960
|
+
)
|
766
961
|
self.current_generation_task = worker
|
767
|
-
|
962
|
+
# Worker completion will be handled by on_worker_state_changed
|
963
|
+
|
768
964
|
except Exception as e:
|
769
|
-
|
965
|
+
# This catches errors during the *setup* before the worker starts
|
966
|
+
debug_log(f"Error setting up generation worker: {str(e)}")
|
967
|
+
log.error(f"Error setting up generation worker: {str(e)}")
|
770
968
|
self.notify(f"Error: {str(e)}", severity="error")
|
969
|
+
# Ensure cleanup if setup fails
|
970
|
+
self.is_generating = False # Reset state
|
971
|
+
self.current_generation_task = None
|
972
|
+
if self._loading_animation_task and not self._loading_animation_task.done():
|
973
|
+
self._loading_animation_task.cancel()
|
974
|
+
self._loading_animation_task = None
|
975
|
+
try:
|
976
|
+
loading = self.query_one("#loading-indicator")
|
977
|
+
loading.add_class("hidden")
|
978
|
+
self.query_one("#message-input").focus()
|
979
|
+
except Exception:
|
980
|
+
pass # Ignore UI errors during cleanup
|
981
|
+
|
982
|
+
# Rename this method slightly to avoid potential conflicts and clarify purpose
|
983
|
+
async def _handle_generation_result(self, worker: Worker[Optional[str]]) -> None:
|
984
|
+
"""Handles the result of the generation worker (success, error, cancelled)."""
|
985
|
+
# Import debug_log again for safety within this callback context
|
986
|
+
try:
|
987
|
+
from app.main import debug_log
|
988
|
+
except ImportError:
|
989
|
+
debug_log = lambda msg: None
|
990
|
+
|
991
|
+
debug_log(f"Generation worker completed. State: {worker.state}")
|
992
|
+
|
993
|
+
try:
|
994
|
+
if worker.state == "cancelled":
|
995
|
+
debug_log("Generation worker was cancelled")
|
996
|
+
log.warning("Generation worker was cancelled")
|
997
|
+
# Remove the incomplete message
|
998
|
+
if self.messages and self.messages[-1].role == "assistant":
|
999
|
+
debug_log("Removing incomplete assistant message")
|
1000
|
+
self.messages.pop()
|
1001
|
+
await self.update_messages_ui()
|
1002
|
+
self.notify("Generation stopped by user", severity="warning", timeout=2)
|
1003
|
+
|
1004
|
+
elif worker.state == "error":
|
1005
|
+
error = worker.error
|
1006
|
+
debug_log(f"Error in generation worker: {error}")
|
1007
|
+
log.error(f"Error in generation worker: {error}")
|
1008
|
+
self.notify(f"Generation error: {error}", severity="error", timeout=5)
|
1009
|
+
# Add error message to UI
|
1010
|
+
if self.messages and self.messages[-1].role == "assistant":
|
1011
|
+
debug_log("Removing thinking message")
|
1012
|
+
self.messages.pop() # Remove thinking message
|
1013
|
+
error_msg = f"Error: {error}"
|
1014
|
+
debug_log(f"Adding error message: {error_msg}")
|
1015
|
+
self.messages.append(Message(role="assistant", content=error_msg))
|
1016
|
+
await self.update_messages_ui()
|
1017
|
+
|
1018
|
+
elif worker.state == "success":
|
1019
|
+
full_response = worker.result
|
1020
|
+
debug_log("Generation completed normally, saving to database")
|
1021
|
+
log("Generation completed normally, saving to database")
|
1022
|
+
# Save complete response to database (check if response is valid)
|
1023
|
+
if full_response and isinstance(full_response, str):
|
1024
|
+
self.db.add_message(
|
1025
|
+
self.current_conversation.id,
|
1026
|
+
"assistant",
|
1027
|
+
full_response
|
1028
|
+
)
|
1029
|
+
# Update the final message object content (optional, UI should be up-to-date)
|
1030
|
+
if self.messages and self.messages[-1].role == "assistant":
|
1031
|
+
self.messages[-1].content = full_response
|
1032
|
+
else:
|
1033
|
+
debug_log("Worker finished successfully but response was empty or invalid.")
|
1034
|
+
# Handle case where 'Thinking...' might still be the last message
|
1035
|
+
if self.messages and self.messages[-1].role == "assistant" and self.messages[-1].content == "Thinking...":
|
1036
|
+
self.messages.pop() # Remove 'Thinking...' if no content arrived
|
1037
|
+
await self.update_messages_ui()
|
1038
|
+
|
1039
|
+
# Final UI refresh with minimal layout recalculation
|
1040
|
+
# Use layout=False to prevent UI jumping at the end
|
1041
|
+
self.refresh(layout=False)
|
1042
|
+
await asyncio.sleep(0.1) # Allow UI to stabilize
|
1043
|
+
messages_container = self.query_one("#messages-container")
|
1044
|
+
messages_container.scroll_end(animate=False)
|
1045
|
+
|
1046
|
+
except Exception as e:
|
1047
|
+
# Catch any unexpected errors during the callback itself
|
1048
|
+
debug_log(f"Error in on_generation_complete callback: {str(e)}")
|
1049
|
+
log.error(f"Error in on_generation_complete callback: {str(e)}")
|
1050
|
+
self.notify(f"Internal error handling response: {str(e)}", severity="error")
|
1051
|
+
|
1052
|
+
finally:
|
1053
|
+
# Always clean up state and UI, regardless of worker outcome
|
1054
|
+
debug_log("Cleaning up after generation worker")
|
771
1055
|
self.is_generating = False
|
772
|
-
|
773
|
-
|
774
|
-
|
1056
|
+
self.current_generation_task = None
|
1057
|
+
|
1058
|
+
# Stop the animation task
|
1059
|
+
if self._loading_animation_task and not self._loading_animation_task.done():
|
1060
|
+
debug_log("Cancelling loading animation task")
|
1061
|
+
self._loading_animation_task.cancel()
|
1062
|
+
self._loading_animation_task = None
|
1063
|
+
|
1064
|
+
try:
|
1065
|
+
loading = self.query_one("#loading-indicator")
|
1066
|
+
loading.add_class("hidden")
|
1067
|
+
self.refresh(layout=True) # Refresh after hiding loading
|
1068
|
+
self.query_one("#message-input").focus()
|
1069
|
+
except Exception as ui_err:
|
1070
|
+
debug_log(f"Error during final UI cleanup: {str(ui_err)}")
|
1071
|
+
log.error(f"Error during final UI cleanup: {str(ui_err)}")
|
1072
|
+
|
1073
|
+
@on(Worker.StateChanged)
|
1074
|
+
async def on_worker_state_changed(self, event: Worker.StateChanged) -> None:
|
1075
|
+
"""Handle worker state changes."""
|
1076
|
+
# Import debug_log again for safety within this callback context
|
1077
|
+
try:
|
1078
|
+
from app.main import debug_log
|
1079
|
+
except ImportError:
|
1080
|
+
debug_log = lambda msg: None
|
1081
|
+
|
1082
|
+
debug_log(f"Worker {event.worker.name} state changed to {event.state}")
|
1083
|
+
|
1084
|
+
# Check if this is the generation worker we are tracking
|
1085
|
+
if event.worker is self.current_generation_task:
|
1086
|
+
# Check if the worker has reached a final state by comparing against enum values
|
1087
|
+
final_states = {WorkerState.SUCCESS, WorkerState.ERROR, WorkerState.CANCELLED}
|
1088
|
+
if event.state in final_states:
|
1089
|
+
debug_log(f"Generation worker ({event.worker.name}) reached final state: {event.state}")
|
1090
|
+
# Call the handler function
|
1091
|
+
await self._handle_generation_result(event.worker)
|
1092
|
+
else:
|
1093
|
+
debug_log(f"Generation worker ({event.worker.name}) is in intermediate state: {event.state}")
|
1094
|
+
else:
|
1095
|
+
debug_log(f"State change event from unrelated worker: {event.worker.name}")
|
1096
|
+
|
775
1097
|
|
776
1098
|
def on_model_selector_model_selected(self, event: ModelSelector.ModelSelected) -> None: # Keep SimpleChatApp on_model_selector_model_selected
|
777
1099
|
"""Handle model selection""" # Keep SimpleChatApp on_model_selector_model_selected docstring
|
@@ -862,8 +1184,23 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
862
1184
|
await self.update_messages_ui() # Keep SimpleChatApp view_chat_history
|
863
1185
|
|
864
1186
|
# Update model and style selectors # Keep SimpleChatApp view_chat_history
|
865
|
-
|
1187
|
+
# Resolve the model ID loaded from the conversation data
|
1188
|
+
loaded_model_id = self.current_conversation.model
|
1189
|
+
resolved_model_id = resolve_model_id(loaded_model_id)
|
1190
|
+
log(f"Loaded model ID from history: {loaded_model_id}, Resolved to: {resolved_model_id}")
|
1191
|
+
|
1192
|
+
self.selected_model = resolved_model_id # Use the resolved ID
|
866
1193
|
self.selected_style = self.current_conversation.style # Keep SimpleChatApp view_chat_history
|
1194
|
+
|
1195
|
+
# Update settings panel selectors if they exist
|
1196
|
+
try:
|
1197
|
+
model_selector = self.query_one(ModelSelector)
|
1198
|
+
model_selector.set_selected_model(self.selected_model) # Use resolved ID here too
|
1199
|
+
style_selector = self.query_one(StyleSelector)
|
1200
|
+
style_selector.set_selected_style(self.selected_style)
|
1201
|
+
except Exception as e:
|
1202
|
+
log(f"Error updating selectors after history load: {e}")
|
1203
|
+
|
867
1204
|
self.update_app_info() # Update info bar after loading history
|
868
1205
|
|
869
1206
|
self.push_screen(HistoryScreen(conversations, handle_selection)) # Keep SimpleChatApp view_chat_history
|
@@ -879,6 +1216,53 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
879
1216
|
"""Open the Ollama model browser screen."""
|
880
1217
|
# Always trigger regardless of focus
|
881
1218
|
self.push_screen(ModelBrowserScreen())
|
1219
|
+
|
1220
|
+
async def _animate_loading_task(self, loading_widget: Static) -> None:
|
1221
|
+
"""Animate the loading indicator with a simple text animation"""
|
1222
|
+
try:
|
1223
|
+
# Animation frames (simple text animation)
|
1224
|
+
frames = [
|
1225
|
+
"▪▫▫ Generating response...",
|
1226
|
+
"▪▪▫ Generating response...",
|
1227
|
+
"▪▪▪ Generating response...",
|
1228
|
+
"▫▪▪ Generating response...",
|
1229
|
+
"▫▫▪ Generating response...",
|
1230
|
+
"▫▫▫ Generating response..."
|
1231
|
+
]
|
1232
|
+
|
1233
|
+
while self.is_generating:
|
1234
|
+
try:
|
1235
|
+
# Update the loading text with safety checks
|
1236
|
+
if frames and len(frames) > 0:
|
1237
|
+
frame_idx = self._loading_frame % len(frames)
|
1238
|
+
loading_widget.update(frames[frame_idx])
|
1239
|
+
else:
|
1240
|
+
# Fallback if frames is empty
|
1241
|
+
loading_widget.update("▪▪▪ Generating response...")
|
1242
|
+
|
1243
|
+
self._loading_frame += 1
|
1244
|
+
# Small delay between frames
|
1245
|
+
await asyncio.sleep(0.3)
|
1246
|
+
except Exception as e:
|
1247
|
+
# If any error occurs, use a simple fallback and continue
|
1248
|
+
log.error(f"Animation frame error: {str(e)}")
|
1249
|
+
try:
|
1250
|
+
loading_widget.update("▪▪▪ Generating response...")
|
1251
|
+
except:
|
1252
|
+
pass
|
1253
|
+
await asyncio.sleep(0.3)
|
1254
|
+
|
1255
|
+
except asyncio.CancelledError:
|
1256
|
+
# Normal cancellation
|
1257
|
+
pass
|
1258
|
+
except Exception as e:
|
1259
|
+
# Log any errors but don't crash
|
1260
|
+
log.error(f"Error in loading animation: {str(e)}")
|
1261
|
+
# Reset to basic text
|
1262
|
+
try:
|
1263
|
+
loading_widget.update("▪▪▪ Generating response...")
|
1264
|
+
except:
|
1265
|
+
pass
|
882
1266
|
|
883
1267
|
def action_settings(self) -> None: # Modify SimpleChatApp action_settings
|
884
1268
|
"""Action to open/close settings panel via key binding."""
|
@@ -907,6 +1291,10 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
907
1291
|
if not self.current_conversation:
|
908
1292
|
self.notify("No active conversation", severity="warning")
|
909
1293
|
return
|
1294
|
+
|
1295
|
+
# Create and mount the title input modal
|
1296
|
+
modal = TitleInputModal(self.current_conversation.title)
|
1297
|
+
await self.mount(modal)
|
910
1298
|
|
911
1299
|
# --- Define the Modal Class ---
|
912
1300
|
class ConfirmDialog(Static):
|
@@ -983,10 +1371,6 @@ class TitleInputModal(Static):
|
|
983
1371
|
"""Focus the input when the modal appears."""
|
984
1372
|
self.query_one("#title-input", Input).focus()
|
985
1373
|
|
986
|
-
# --- Show the modal ---
|
987
|
-
modal = TitleInputModal(self.current_conversation.title)
|
988
|
-
await self.mount(modal) # Use await for mounting
|
989
|
-
|
990
1374
|
async def run_modal(self, modal_type: str, *args, **kwargs) -> bool:
|
991
1375
|
"""Run a modal dialog and return the result."""
|
992
1376
|
if modal_type == "confirm_dialog":
|
@@ -1058,4 +1442,4 @@ def main(initial_text: Optional[str] = typer.Argument(None, help="Initial text t
|
|
1058
1442
|
app.run() # Keep main function
|
1059
1443
|
|
1060
1444
|
if __name__ == "__main__": # Keep main function entry point
|
1061
|
-
typer.run(main) # Keep main function entry point
|
1445
|
+
typer.run(main) # Keep main function entry point
|