chat-console 0.2.98__tar.gz → 0.2.99__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. {chat_console-0.2.98/chat_console.egg-info → chat_console-0.2.99}/PKG-INFO +1 -1
  2. {chat_console-0.2.98 → chat_console-0.2.99}/app/__init__.py +1 -1
  3. {chat_console-0.2.98 → chat_console-0.2.99}/app/api/anthropic.py +8 -1
  4. {chat_console-0.2.98 → chat_console-0.2.99}/app/api/base.py +2 -2
  5. {chat_console-0.2.98 → chat_console-0.2.99}/app/api/ollama.py +9 -3
  6. {chat_console-0.2.98 → chat_console-0.2.99}/app/api/openai.py +8 -1
  7. {chat_console-0.2.98 → chat_console-0.2.99}/app/main.py +56 -31
  8. {chat_console-0.2.98 → chat_console-0.2.99}/app/ui/chat_interface.py +13 -9
  9. {chat_console-0.2.98 → chat_console-0.2.99}/app/utils.py +5 -3
  10. {chat_console-0.2.98 → chat_console-0.2.99/chat_console.egg-info}/PKG-INFO +1 -1
  11. {chat_console-0.2.98 → chat_console-0.2.99}/LICENSE +0 -0
  12. {chat_console-0.2.98 → chat_console-0.2.99}/README.md +0 -0
  13. {chat_console-0.2.98 → chat_console-0.2.99}/app/api/__init__.py +0 -0
  14. {chat_console-0.2.98 → chat_console-0.2.99}/app/config.py +0 -0
  15. {chat_console-0.2.98 → chat_console-0.2.99}/app/database.py +0 -0
  16. {chat_console-0.2.98 → chat_console-0.2.99}/app/models.py +0 -0
  17. {chat_console-0.2.98 → chat_console-0.2.99}/app/ui/__init__.py +0 -0
  18. {chat_console-0.2.98 → chat_console-0.2.99}/app/ui/chat_list.py +0 -0
  19. {chat_console-0.2.98 → chat_console-0.2.99}/app/ui/model_browser.py +0 -0
  20. {chat_console-0.2.98 → chat_console-0.2.99}/app/ui/model_selector.py +0 -0
  21. {chat_console-0.2.98 → chat_console-0.2.99}/app/ui/search.py +0 -0
  22. {chat_console-0.2.98 → chat_console-0.2.99}/app/ui/styles.py +0 -0
  23. {chat_console-0.2.98 → chat_console-0.2.99}/chat_console.egg-info/SOURCES.txt +0 -0
  24. {chat_console-0.2.98 → chat_console-0.2.99}/chat_console.egg-info/dependency_links.txt +0 -0
  25. {chat_console-0.2.98 → chat_console-0.2.99}/chat_console.egg-info/entry_points.txt +0 -0
  26. {chat_console-0.2.98 → chat_console-0.2.99}/chat_console.egg-info/requires.txt +0 -0
  27. {chat_console-0.2.98 → chat_console-0.2.99}/chat_console.egg-info/top_level.txt +0 -0
  28. {chat_console-0.2.98 → chat_console-0.2.99}/setup.cfg +0 -0
  29. {chat_console-0.2.98 → chat_console-0.2.99}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chat-console
3
- Version: 0.2.98
3
+ Version: 0.2.99
4
4
  Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
5
5
  Home-page: https://github.com/wazacraftrfid/chat-console
6
6
  Author: Johnathan Greenaway
@@ -3,4 +3,4 @@ Chat CLI
3
3
  A command-line interface for chatting with various LLM providers like ChatGPT and Claude.
4
4
  """
5
5
 
6
- __version__ = "0.2.98"
6
+ __version__ = "0.2.99"
@@ -7,7 +7,14 @@ from ..utils import resolve_model_id # Import the resolve_model_id function
7
7
 
8
8
  class AnthropicClient(BaseModelClient):
9
9
  def __init__(self):
10
- self.client = anthropic.AsyncAnthropic(api_key=ANTHROPIC_API_KEY)
10
+ self.client = None # Initialize in create()
11
+
12
+ @classmethod
13
+ async def create(cls) -> 'AnthropicClient':
14
+ """Create a new instance with async initialization."""
15
+ instance = cls()
16
+ instance.client = anthropic.AsyncAnthropic(api_key=ANTHROPIC_API_KEY)
17
+ return instance
11
18
 
12
19
  def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> List[Dict[str, str]]:
13
20
  """Prepare messages for Claude API"""
@@ -71,7 +71,7 @@ class BaseModelClient(ABC):
71
71
  return None
72
72
 
73
73
  @staticmethod
74
- def get_client_for_model(model_name: str) -> 'BaseModelClient':
74
+ async def get_client_for_model(model_name: str) -> 'BaseModelClient':
75
75
  """Factory method to get appropriate client for model"""
76
76
  from ..config import CONFIG, AVAILABLE_PROVIDERS
77
77
  from .anthropic import AnthropicClient
@@ -118,7 +118,7 @@ class BaseModelClient(ABC):
118
118
 
119
119
  # Return appropriate client
120
120
  if provider == "ollama":
121
- return OllamaClient()
121
+ return await OllamaClient.create()
122
122
  elif provider == "openai":
123
123
  return OpenAIClient()
124
124
  elif provider == "anthropic":
@@ -3,7 +3,6 @@ import asyncio
3
3
  import json
4
4
  import logging
5
5
  import os
6
- import time
7
6
  from datetime import datetime, timedelta
8
7
  from pathlib import Path
9
8
  from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
@@ -15,7 +14,6 @@ logger = logging.getLogger(__name__)
15
14
  class OllamaClient(BaseModelClient):
16
15
  def __init__(self):
17
16
  from ..config import OLLAMA_BASE_URL
18
- from ..utils import ensure_ollama_running
19
17
  self.base_url = OLLAMA_BASE_URL.rstrip('/')
20
18
  logger.info(f"Initializing Ollama client with base URL: {self.base_url}")
21
19
 
@@ -27,10 +25,18 @@ class OllamaClient(BaseModelClient):
27
25
 
28
26
  # Path to the cached models file
29
27
  self.models_cache_path = Path(__file__).parent.parent / "data" / "ollama-models.json"
28
+
29
+ @classmethod
30
+ async def create(cls) -> 'OllamaClient':
31
+ """Factory method to create and initialize an OllamaClient instance"""
32
+ from ..utils import ensure_ollama_running
33
+ client = cls()
30
34
 
31
35
  # Try to start Ollama if not running
32
- if not ensure_ollama_running():
36
+ if not await ensure_ollama_running():
33
37
  raise Exception(f"Failed to start Ollama server. Please ensure Ollama is installed and try again.")
38
+
39
+ return client
34
40
 
35
41
  def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> str:
36
42
  """Convert chat messages to Ollama format"""
@@ -5,7 +5,14 @@ from ..config import OPENAI_API_KEY
5
5
 
6
6
  class OpenAIClient(BaseModelClient):
7
7
  def __init__(self):
8
- self.client = AsyncOpenAI(api_key=OPENAI_API_KEY)
8
+ self.client = None # Initialize in create()
9
+
10
+ @classmethod
11
+ async def create(cls) -> 'OpenAIClient':
12
+ """Create a new instance with async initialization."""
13
+ instance = cls()
14
+ instance.client = AsyncOpenAI(api_key=OPENAI_API_KEY)
15
+ return instance
9
16
 
10
17
  def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> List[Dict[str, str]]:
11
18
  """Prepare messages for OpenAI API"""
@@ -6,6 +6,7 @@ import os
6
6
  import asyncio
7
7
  import typer
8
8
  import logging
9
+ import time
9
10
  from typing import List, Optional, Callable, Awaitable
10
11
  from datetime import datetime
11
12
 
@@ -161,6 +162,15 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
161
162
  TITLE = "Chat Console"
162
163
  SUB_TITLE = "AI Chat Interface" # Keep SimpleChatApp SUB_TITLE
163
164
  DARK = True # Keep SimpleChatApp DARK
165
+
166
+ # Add better terminal handling to fix UI glitches
167
+ SCREENS = {}
168
+
169
+ # Force full screen mode and prevent background terminal showing through
170
+ FULL_SCREEN = True
171
+
172
+ # Force capturing all mouse events for better stability
173
+ CAPTURE_MOUSE = True
164
174
 
165
175
  # Ensure the log directory exists in a standard cache location
166
176
  log_dir = os.path.expanduser("~/.cache/chat-cli")
@@ -424,7 +434,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
424
434
  # Check for available models # Keep SimpleChatApp on_mount
425
435
  from app.api.ollama import OllamaClient # Keep SimpleChatApp on_mount
426
436
  try: # Keep SimpleChatApp on_mount
427
- ollama = OllamaClient() # Keep SimpleChatApp on_mount
437
+ ollama = await OllamaClient.create() # Keep SimpleChatApp on_mount
428
438
  models = await ollama.get_available_models() # Keep SimpleChatApp on_mount
429
439
  if not models: # Keep SimpleChatApp on_mount
430
440
  api_issues.append("- No Ollama models found") # Keep SimpleChatApp on_mount
@@ -511,7 +521,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
511
521
  # Get the client for the current model first and cancel the connection
512
522
  try:
513
523
  model = self.selected_model
514
- client = BaseModelClient.get_client_for_model(model)
524
+ client = await BaseModelClient.get_client_for_model(model)
515
525
 
516
526
  # Call the client's cancel method if it's supported
517
527
  if hasattr(client, 'cancel_stream'):
@@ -581,19 +591,21 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
581
591
  messages_container = self.query_one("#messages-container") # Keep SimpleChatApp update_messages_ui
582
592
  messages_container.remove_children() # Keep SimpleChatApp update_messages_ui
583
593
 
584
- # Batch add all messages first without scrolling or refreshing between each mount
585
- # This avoids unnecessary layout shifts while adding messages
586
- for message in self.messages: # Keep SimpleChatApp update_messages_ui
587
- display = MessageDisplay(message, highlight_code=CONFIG["highlight_code"]) # Keep SimpleChatApp update_messages_ui
588
- messages_container.mount(display) # Keep SimpleChatApp update_messages_ui
594
+ # Temporarily disable automatic refresh while mounting messages
595
+ # This avoids excessive layout calculations and reduces flickering
596
+ with self.batch_update():
597
+ # Batch add all messages first without any refresh/layout
598
+ for message in self.messages: # Keep SimpleChatApp update_messages_ui
599
+ display = MessageDisplay(message, highlight_code=CONFIG["highlight_code"]) # Keep SimpleChatApp update_messages_ui
600
+ messages_container.mount(display) # Keep SimpleChatApp update_messages_ui
601
+
602
+ # A small delay after mounting all messages helps with layout stability
603
+ await asyncio.sleep(0.05)
589
604
 
590
- # Perform a single refresh and scroll after mounting all messages
591
- # This significantly reduces the visual bouncing effect
592
- # A small delay before scrolling helps ensure stable layout
593
- await asyncio.sleep(0.05) # Single delay after all messages are mounted
605
+ # Scroll after all messages are added without animation
594
606
  messages_container.scroll_end(animate=False) # Keep SimpleChatApp update_messages_ui
595
607
 
596
- # Use layout=False refresh if possible to further reduce bouncing
608
+ # Minimal refresh without full layout recalculation
597
609
  self.refresh(layout=False)
598
610
 
599
611
  async def on_input_submitted(self, event: Input.Submitted) -> None: # Keep SimpleChatApp on_input_submitted
@@ -656,7 +668,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
656
668
  # Last resort - check for a common Ollama model
657
669
  try:
658
670
  from app.api.ollama import OllamaClient
659
- ollama = OllamaClient()
671
+ ollama = await OllamaClient.create()
660
672
  models = await ollama.get_available_models()
661
673
  if models and len(models) > 0:
662
674
  debug_log(f"Found {len(models)} Ollama models, using first one")
@@ -670,7 +682,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
670
682
  debug_log("Final fallback to llama3")
671
683
 
672
684
  debug_log(f"Getting client for model: {model}")
673
- client = BaseModelClient.get_client_for_model(model)
685
+ client = await BaseModelClient.get_client_for_model(model)
674
686
 
675
687
  if client is None:
676
688
  debug_log(f"No client available for model: {model}, trying to initialize")
@@ -679,7 +691,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
679
691
  if client_type:
680
692
  debug_log(f"Found client type {client_type.__name__} for {model}, initializing")
681
693
  try:
682
- client = client_type()
694
+ client = await client_type.create()
683
695
  debug_log("Client initialized successfully")
684
696
  except Exception as init_err:
685
697
  debug_log(f"Error initializing client: {str(init_err)}")
@@ -689,12 +701,12 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
689
701
  # Try a different model as last resort
690
702
  if OPENAI_API_KEY:
691
703
  from app.api.openai import OpenAIClient
692
- client = OpenAIClient()
704
+ client = await OpenAIClient.create()
693
705
  model = "gpt-3.5-turbo"
694
706
  debug_log("Falling back to OpenAI for title generation")
695
707
  elif ANTHROPIC_API_KEY:
696
708
  from app.api.anthropic import AnthropicClient
697
- client = AnthropicClient()
709
+ client = await AnthropicClient.create()
698
710
  model = "claude-instant-1.2"
699
711
  debug_log("Falling back to Anthropic for title generation")
700
712
  else:
@@ -811,7 +823,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
811
823
  else:
812
824
  # Check for a common Ollama model
813
825
  try:
814
- ollama = OllamaClient()
826
+ ollama = await OllamaClient.create()
815
827
  models = await ollama.get_available_models()
816
828
  if models and len(models) > 0:
817
829
  debug_log(f"Found {len(models)} Ollama models, using first one")
@@ -856,7 +868,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
856
868
  # Get appropriate client
857
869
  debug_log(f"Getting client for model: {model}")
858
870
  try:
859
- client = BaseModelClient.get_client_for_model(model)
871
+ client = await BaseModelClient.get_client_for_model(model)
860
872
  debug_log(f"Client: {client.__class__.__name__ if client else 'None'}")
861
873
 
862
874
  if client is None:
@@ -866,7 +878,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
866
878
  if client_type:
867
879
  debug_log(f"Found client type {client_type.__name__} for {model}, initializing")
868
880
  try:
869
- client = client_type()
881
+ client = await client_type.create()
870
882
  debug_log(f"Successfully initialized {client_type.__name__}")
871
883
  except Exception as init_err:
872
884
  debug_log(f"Error initializing client: {str(init_err)}")
@@ -876,12 +888,12 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
876
888
  # Try a different model as last resort
877
889
  if OPENAI_API_KEY:
878
890
  from app.api.openai import OpenAIClient
879
- client = OpenAIClient()
891
+ client = await OpenAIClient.create()
880
892
  model = "gpt-3.5-turbo"
881
893
  debug_log("Falling back to OpenAI client")
882
894
  elif ANTHROPIC_API_KEY:
883
895
  from app.api.anthropic import AnthropicClient
884
- client = AnthropicClient()
896
+ client = await AnthropicClient.create()
885
897
  model = "claude-instant-1.2"
886
898
  debug_log("Falling back to Anthropic client")
887
899
  else:
@@ -907,6 +919,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
907
919
 
908
920
  # Stream chunks to the UI with synchronization
909
921
  update_lock = asyncio.Lock()
922
+ last_refresh_time = time.time() # Initialize refresh throttling timer
910
923
 
911
924
  async def update_ui(content: str):
912
925
  # This function remains the same, called by the worker
@@ -914,6 +927,9 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
914
927
  debug_log("update_ui called but is_generating is False, returning.")
915
928
  return
916
929
 
930
+ # Make last_refresh_time accessible in inner scope
931
+ nonlocal last_refresh_time
932
+
917
933
  async with update_lock:
918
934
  try:
919
935
  # Clear thinking indicator on first content
@@ -927,20 +943,29 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
927
943
  # Update UI with the content - this no longer triggers refresh itself
928
944
  await message_display.update_content(content)
929
945
 
930
- # Throttle UI updates to reduce visual jitter and improve performance
931
- # Only refresh visually every ~5 tokens (estimated by content length changes)
946
+ # Much more aggressive throttling of UI updates to eliminate visual jitter
947
+ # By using a larger modulo value, we significantly reduce refresh frequency
948
+ # This improves stability at the cost of slightly choppier animations
932
949
  content_length = len(content)
950
+
951
+ # Define some key refresh points
952
+ new_paragraph = content.endswith("\n") and content.count("\n") > 0
933
953
  do_refresh = (
934
- content_length < 20 or # Always refresh for the first few tokens
935
- content_length % 16 == 0 or # Then periodically
936
- content.endswith("\n") # And on newlines
954
+ content_length < 5 or # Only first few tokens
955
+ content_length % 64 == 0 or # Very infrequent periodic updates
956
+ new_paragraph # Refresh on paragraph breaks
937
957
  )
938
958
 
939
- if do_refresh:
940
- # Only scroll without full layout recalculation
959
+ # Check if it's been enough time since last refresh (250ms minimum)
960
+ current_time = time.time()
961
+ time_since_refresh = current_time - last_refresh_time
962
+
963
+ if do_refresh and time_since_refresh > 0.25:
964
+ # Store the time we did the refresh
965
+ last_refresh_time = current_time
966
+ # Skip layout updates completely during streaming
967
+ # Just ensure content is still visible by scrolling
941
968
  messages_container.scroll_end(animate=False)
942
- # Light refresh without full layout recalculation
943
- self.refresh(layout=False)
944
969
  except Exception as e:
945
970
  debug_log(f"Error updating UI: {str(e)}")
946
971
  log.error(f"Error updating UI: {str(e)}")
@@ -1,5 +1,4 @@
1
1
  from typing import List, Dict, Any, Optional, Callable, Awaitable
2
- import time
3
2
  import asyncio
4
3
  from datetime import datetime
5
4
  import re
@@ -66,9 +65,9 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
66
65
  padding: 1;
67
66
  text-wrap: wrap; /* Explicitly enable text wrapping via CSS */
68
67
  content-align: left top; /* Anchor content to top-left */
69
- overflow-y: visible; /* Allow content to expand */
68
+ overflow-y: auto; /* Changed from 'visible' to valid 'auto' value */
70
69
  box-sizing: border-box; /* Include padding in size calculations */
71
- transitions: none; /* Disable any transitions that might cause animation */
70
+ transition: none; /* Fixed property name from 'transitions' to 'transition' */
72
71
  }
73
72
 
74
73
  MessageDisplay.user-message {
@@ -121,7 +120,11 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
121
120
  self.update(self._format_content(self.message.content))
122
121
 
123
122
  async def update_content(self, content: str) -> None:
124
- """Update the message content using Static.update()"""
123
+ """Update the message content using Static.update() with optimizations for streaming"""
124
+ # Quick unchanged content check to avoid unnecessary updates
125
+ if self.message.content == content:
126
+ return
127
+
125
128
  # Update the stored message object content first
126
129
  self.message.content = content
127
130
 
@@ -129,11 +132,12 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
129
132
  # This avoids text reflowing as new tokens arrive
130
133
  formatted_content = self._format_content(content)
131
134
 
132
- # Update Static widget with minimal refresh
133
- self.update(formatted_content)
135
+ # Use minimal update that doesn't trigger a refresh
136
+ # This allows parent to control refresh timing and avoid flickering
137
+ self.update(formatted_content, refresh=False)
134
138
 
135
- # Important: Don't call refresh() here - let the parent handle timing
136
- # This prevents constant layout recalculation on each token
139
+ # No refresh or layout recalculation is performed here
140
+ # The parent container will handle refresh timing for better stability
137
141
 
138
142
  def _format_content(self, content: str) -> str:
139
143
  """Format message content with timestamp"""
@@ -179,7 +183,7 @@ class ChatInterface(Container):
179
183
  padding: 0 1;
180
184
  content-align: left top; /* Keep content anchored at top */
181
185
  box-sizing: border-box;
182
- scrollbar-size: 1 1; /* Smaller scrollbars for more stability */
186
+ scrollbar-gutter: stable; /* Better than scrollbar-size which isn't valid */
183
187
  }
184
188
 
185
189
  #input-area {
@@ -165,6 +165,9 @@ async def generate_streaming_response(
165
165
 
166
166
  debug_log(f"Messages validation complete: {len(messages)} total messages")
167
167
 
168
+ # Import time module within the worker function scope
169
+ import time
170
+
168
171
  full_response = ""
169
172
  buffer = []
170
173
  last_update = time.time()
@@ -361,7 +364,7 @@ async def generate_streaming_response(
361
364
  return full_response
362
365
  return None # Indicate completion without full response (e.g., error before loop)
363
366
 
364
- def ensure_ollama_running() -> bool:
367
+ async def ensure_ollama_running() -> bool:
365
368
  """
366
369
  Check if Ollama is running and try to start it if not.
367
370
  Returns True if Ollama is running after check/start attempt.
@@ -388,8 +391,7 @@ def ensure_ollama_running() -> bool:
388
391
  )
389
392
 
390
393
  # Wait a moment for it to start
391
- import time
392
- time.sleep(2)
394
+ await asyncio.sleep(2) # Use asyncio.sleep instead of time.sleep
393
395
 
394
396
  # Check if process is still running
395
397
  if process.poll() is None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chat-console
3
- Version: 0.2.98
3
+ Version: 0.2.99
4
4
  Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
5
5
  Home-page: https://github.com/wazacraftrfid/chat-console
6
6
  Author: Johnathan Greenaway
File without changes
File without changes
File without changes
File without changes