chat-console 0.3.91__py3-none-any.whl → 0.3.95__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/__init__.py CHANGED
@@ -3,4 +3,4 @@ Chat CLI
3
3
  A command-line interface for chatting with various LLM providers like ChatGPT and Claude.
4
4
  """
5
5
 
6
- __version__ = "0.3.91"
6
+ __version__ = "0.3.95"
app/api/base.py CHANGED
@@ -61,10 +61,12 @@ class BaseModelClient(ABC):
61
61
  logger.info(f"Found model in config with provider: {provider}")
62
62
  # For custom models, try to infer provider
63
63
  else:
64
- # First check for OpenAI models - these should ALWAYS use OpenAI client
65
- if any(name in model_name_lower for name in ["gpt", "text-", "davinci"]):
64
+ # Check for common OpenAI model patterns or prefixes
65
+ if (model_name_lower.startswith(("gpt-", "text-", "davinci")) or
66
+ "gpt" in model_name_lower or
67
+ model_name_lower in ["04-mini", "04", "04-turbo", "04-vision"]):
66
68
  provider = "openai"
67
- logger.info(f"Identified as OpenAI model: {model_name}")
69
+ logger.info(f"Identified {model_name} as an OpenAI model")
68
70
  # Then check for Anthropic models - these should ALWAYS use Anthropic client
69
71
  elif any(name in model_name_lower for name in ["claude", "anthropic"]):
70
72
  provider = "anthropic"
@@ -120,12 +122,14 @@ class BaseModelClient(ABC):
120
122
  raise Exception(f"Provider '{provider}' is not available. Please check your configuration.")
121
123
  # For custom models, try to infer provider
122
124
  else:
123
- # First check for OpenAI models - these should ALWAYS use OpenAI client
124
- if any(name in model_name_lower for name in ["gpt", "text-", "davinci"]):
125
+ # Check for common OpenAI model patterns or prefixes
126
+ if (model_name_lower.startswith(("gpt-", "text-", "davinci")) or
127
+ "gpt" in model_name_lower or
128
+ model_name_lower in ["04-mini", "04", "04-turbo", "04-vision"]):
125
129
  if not AVAILABLE_PROVIDERS["openai"]:
126
130
  raise Exception("OpenAI API key not found. Please set OPENAI_API_KEY environment variable.")
127
131
  provider = "openai"
128
- logger.info(f"Identified as OpenAI model: {model_name}")
132
+ logger.info(f"Identified {model_name} as an OpenAI model")
129
133
  # Then check for Anthropic models - these should ALWAYS use Anthropic client
130
134
  elif any(name in model_name_lower for name in ["claude", "anthropic"]):
131
135
  if not AVAILABLE_PROVIDERS["anthropic"]:
app/api/ollama.py CHANGED
@@ -292,7 +292,9 @@ class OllamaClient(BaseModelClient):
292
292
  if len(available_model_names) > 5:
293
293
  error_msg += f" and {len(available_model_names) - 5} more."
294
294
  logger.error(error_msg)
295
- raise OllamaApiError(error_msg)
295
+ # Instead of raising a custom error, yield the message and return
296
+ yield error_msg
297
+ return
296
298
  except Exception as e:
297
299
  debug_log(f"Error checking model availability: {str(e)}")
298
300
  # Continue anyway, the main request will handle errors
@@ -370,13 +372,15 @@ class OllamaClient(BaseModelClient):
370
372
  self._model_loading = False # Reset flag on failure
371
373
 
372
374
  # Check if this is a 404 Not Found error
373
- if pull_response.status == 404:
374
- error_text = await pull_response.text()
375
+ if response.status == 404:
376
+ error_text = await response.text()
375
377
  debug_log(f"404 error details: {error_text}")
376
378
  # This is likely a model not found in registry
377
379
  error_msg = f"Error: Model '{model}' not found in the Ollama registry. Please check if the model name is correct or try a different model."
378
380
  logger.error(error_msg)
379
- raise OllamaApiError(error_msg, status_code=404)
381
+ # Instead of raising a custom error, yield the message and return
382
+ yield error_msg
383
+ return
380
384
 
381
385
  raise Exception("Failed to pull model")
382
386
  logger.info("Model pulled successfully")
@@ -439,17 +443,34 @@ class OllamaClient(BaseModelClient):
439
443
  if chunk_str.startswith('{') and chunk_str.endswith('}'):
440
444
  try:
441
445
  data = json.loads(chunk_str)
442
- if isinstance(data, dict) and "response" in data:
443
- response_text = data["response"]
444
- if response_text: # Only yield non-empty responses
445
- has_yielded_content = True
446
- chunk_length = len(response_text)
447
- # Only log occasionally to reduce console spam
448
- if chunk_length % 20 == 0:
449
- debug_log(f"Yielding chunk of length: {chunk_length}")
450
- yield response_text
446
+ if isinstance(data, dict):
447
+ # Check for error in the chunk
448
+ if "error" in data:
449
+ error_msg = data.get("error", "")
450
+ debug_log(f"Ollama API error in chunk: {error_msg}")
451
+
452
+ # Handle model loading state
453
+ if "loading model" in error_msg.lower():
454
+ # Yield a user-friendly message and keep trying
455
+ yield "The model is still loading. Please wait a moment..."
456
+ # Add delay before continuing
457
+ await asyncio.sleep(2)
458
+ continue
459
+
460
+ # Process normal response
461
+ if "response" in data:
462
+ response_text = data["response"]
463
+ if response_text: # Only yield non-empty responses
464
+ has_yielded_content = True
465
+ chunk_length = len(response_text)
466
+ # Only log occasionally to reduce console spam
467
+ if chunk_length % 20 == 0:
468
+ debug_log(f"Yielding chunk of length: {chunk_length}")
469
+ yield response_text
470
+ else:
471
+ debug_log(f"JSON chunk missing 'response' key: {chunk_str[:100]}")
451
472
  else:
452
- debug_log(f"JSON chunk missing 'response' key: {chunk_str[:100]}")
473
+ debug_log(f"JSON chunk is not a dict: {chunk_str[:100]}")
453
474
  except json.JSONDecodeError:
454
475
  debug_log(f"JSON decode error for chunk: {chunk_str[:100]}")
455
476
  else:
app/main.py CHANGED
@@ -766,7 +766,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
766
766
  input_widget.focus()
767
767
 
768
768
  async def generate_response(self) -> None:
769
- """Generate an AI response using a non-blocking worker."""
769
+ """Generate an AI response using a non-blocking worker with fallback."""
770
770
  # Import debug_log function from main
771
771
  debug_log(f"Entering generate_response method")
772
772
 
@@ -774,6 +774,10 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
774
774
  debug_log("No current conversation or messages, returning")
775
775
  return
776
776
 
777
+ # Track if we've already attempted a fallback to avoid infinite loops
778
+ if not hasattr(self, 'fallback_attempted'):
779
+ self.fallback_attempted = False
780
+
777
781
  self.is_generating = True
778
782
  log("Setting is_generating to True")
779
783
  debug_log("Setting is_generating to True")
@@ -1047,6 +1051,62 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
1047
1051
  debug_log(f"Error in generation worker: {error}")
1048
1052
  log.error(f"Error in generation worker: {error}")
1049
1053
 
1054
+ # Check if this is a model not found error that we can try to recover from
1055
+ error_str = str(error)
1056
+ is_model_not_found = "not found" in error_str.lower() or "404" in error_str
1057
+
1058
+ # Try fallback if this is a model not found error and we haven't tried fallback yet
1059
+ if is_model_not_found and not self.fallback_attempted:
1060
+ debug_log("Model not found error detected, attempting fallback")
1061
+ self.fallback_attempted = True
1062
+
1063
+ # Choose an appropriate fallback based on available providers
1064
+ fallback_model = None
1065
+ from app.config import OPENAI_API_KEY, ANTHROPIC_API_KEY
1066
+
1067
+ if OPENAI_API_KEY:
1068
+ fallback_model = "gpt-3.5-turbo"
1069
+ debug_log(f"Falling back to OpenAI model: {fallback_model}")
1070
+ elif ANTHROPIC_API_KEY:
1071
+ fallback_model = "claude-3-haiku-20240307"
1072
+ debug_log(f"Falling back to Anthropic model: {fallback_model}")
1073
+ else:
1074
+ # Find a common Ollama model that should exist
1075
+ try:
1076
+ from app.api.ollama import OllamaClient
1077
+ ollama = await OllamaClient.create()
1078
+ models = await ollama.get_available_models()
1079
+ for model_name in ["gemma:2b", "phi3:mini", "llama3:8b"]:
1080
+ if any(m["id"] == model_name for m in models):
1081
+ fallback_model = model_name
1082
+ debug_log(f"Found available Ollama model for fallback: {fallback_model}")
1083
+ break
1084
+ except Exception as e:
1085
+ debug_log(f"Error finding Ollama fallback model: {str(e)}")
1086
+
1087
+ if fallback_model:
1088
+ # Update UI to show fallback is happening
1089
+ loading = self.query_one("#loading-indicator")
1090
+ loading.remove_class("hidden")
1091
+ loading.update(f"⚙️ Falling back to {fallback_model}...")
1092
+
1093
+ # Update the selected model
1094
+ self.selected_model = fallback_model
1095
+ self.update_app_info() # Update the displayed model info
1096
+
1097
+ # Remove the "Thinking..." message
1098
+ if self.messages and self.messages[-1].role == "assistant":
1099
+ debug_log("Removing thinking message before fallback")
1100
+ self.messages.pop()
1101
+ await self.update_messages_ui()
1102
+
1103
+ # Try again with the new model
1104
+ debug_log(f"Retrying with fallback model: {fallback_model}")
1105
+ self.notify(f"Trying fallback model: {fallback_model}", severity="warning", timeout=3)
1106
+ await self.generate_response()
1107
+ return
1108
+
1109
+ # If we get here, either it's not a model error or fallback already attempted
1050
1110
  # Explicitly hide loading indicator
1051
1111
  try:
1052
1112
  loading = self.query_one("#loading-indicator")
@@ -1056,17 +1116,11 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
1056
1116
  debug_log(f"Error hiding loading indicator: {str(ui_err)}")
1057
1117
  log.error(f"Error hiding loading indicator: {str(ui_err)}")
1058
1118
 
1059
- # Sanitize error message for UI display
1060
- error_str = str(error)
1061
-
1062
- # Check if this is an Ollama error
1063
- is_ollama_error = "ollama" in error_str.lower() or "404" in error_str
1064
-
1065
1119
  # Create a user-friendly error message
1066
- if is_ollama_error:
1067
- # For Ollama errors, provide a more user-friendly message
1120
+ if is_model_not_found:
1121
+ # For model not found errors, provide a more user-friendly message
1068
1122
  user_error = "Unable to generate response. The selected model may not be available."
1069
- debug_log(f"Sanitizing Ollama error to user-friendly message: {user_error}")
1123
+ debug_log(f"Sanitizing model not found error to user-friendly message: {user_error}")
1070
1124
  # Show technical details only in notification, not in chat
1071
1125
  self.notify(f"Model error: {error_str}", severity="error", timeout=5)
1072
1126
  else:
app/ui/chat_interface.py CHANGED
@@ -120,7 +120,7 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
120
120
  self.update(self._format_content(self.message.content))
121
121
 
122
122
  async def update_content(self, content: str) -> None:
123
- """Update the message content."""
123
+ """Update the message content using Static.update() with optimizations for streaming"""
124
124
  import logging
125
125
  logger = logging.getLogger(__name__)
126
126
  logger.debug(f"MessageDisplay.update_content called with content length: {len(content)}")
@@ -130,51 +130,42 @@ class MessageDisplay(Static): # Inherit from Static instead of RichLog
130
130
  self._update_lock = asyncio.Lock()
131
131
 
132
132
  async with self._update_lock:
133
- # For initial update from "Thinking..."
133
+ # Special handling for "Thinking..." to ensure it gets replaced
134
134
  if self.message.content == "Thinking..." and content:
135
- logger.debug("Replacing 'Thinking...' with initial content")
136
- self.message.content = content # Update the stored content
137
- formatted = self._format_content(content)
138
- self.update(formatted, refresh=True)
135
+ logger.debug("Replacing 'Thinking...' with actual content")
136
+ # Force a complete replacement
137
+ self.message.content = content
138
+ formatted_content = self._format_content(content)
139
+ self.update(formatted_content)
139
140
 
140
- # Force a clean layout update
141
+ # Force app-level refresh
141
142
  try:
142
143
  if self.app:
143
144
  self.app.refresh(layout=True)
144
- await asyncio.sleep(0.05) # Small delay for layout to update
145
-
146
145
  # Find container and scroll
147
146
  messages_container = self.app.query_one("#messages-container")
148
147
  if messages_container:
149
148
  messages_container.scroll_end(animate=False)
150
149
  except Exception as e:
151
- logger.error(f"Error in initial UI update: {str(e)}")
152
- return
153
-
154
- # Quick unchanged content check to avoid unnecessary updates
155
- if self.message.content == content:
156
- logger.debug("Content unchanged, skipping update")
150
+ logger.error(f"Error refreshing app: {str(e)}")
157
151
  return
158
152
 
159
- # For subsequent updates
160
- if self.message.content != content:
161
- self.message.content = content
162
- formatted = self._format_content(content)
163
- self.update(formatted, refresh=True)
164
-
165
- # Use a more targeted refresh approach
166
- try:
167
- if self.app:
168
- self.app.refresh(layout=False) # Lightweight refresh first
169
- # Find container and scroll
170
- messages_container = self.app.query_one("#messages-container")
171
- if messages_container:
172
- messages_container.scroll_end(animate=False)
173
-
174
- # Final full refresh only at end
175
- self.app.refresh(layout=True)
176
- except Exception as e:
177
- logger.error(f"Error refreshing UI: {str(e)}")
153
+ # For all other updates - ALWAYS update
154
+ self.message.content = content
155
+ formatted_content = self._format_content(content)
156
+ # Ensure the update call doesn't have refresh=True
157
+ self.update(formatted_content)
158
+
159
+ # Force refresh using app.refresh() instead of passing to update()
160
+ try:
161
+ if self.app:
162
+ self.app.refresh(layout=True)
163
+ # Find container and scroll
164
+ messages_container = self.app.query_one("#messages-container")
165
+ if messages_container:
166
+ messages_container.scroll_end(animate=False)
167
+ except Exception as e:
168
+ logger.error(f"Error refreshing app: {str(e)}")
178
169
 
179
170
  def _format_content(self, content: str) -> str:
180
171
  """Format message content with timestamp and handle markdown links"""
app/ui/model_selector.py CHANGED
@@ -247,6 +247,11 @@ class ModelSelector(Container):
247
247
  """Handle select changes"""
248
248
  if event.select.id == "provider-select":
249
249
  self.selected_provider = event.value
250
+
251
+ # IMPORTANT: Clear any cached client
252
+ if hasattr(self.app, 'cached_client'):
253
+ self.app.cached_client = None
254
+
250
255
  # Update model options
251
256
  model_select = self.query_one("#model-select", Select)
252
257
  model_options = await self._get_model_options(self.selected_provider)
app/utils.py CHANGED
@@ -27,179 +27,64 @@ async def generate_conversation_title(message: str, model: str, client: Any) ->
27
27
 
28
28
  debug_log(f"Starting title generation with model: {model}, client type: {type(client).__name__}")
29
29
 
30
- # --- Choose a specific, reliable model for title generation ---
31
- # First, determine if we have a valid client
32
- if client is None:
33
- debug_log("Client is None, will use default title")
34
- return f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
35
-
36
- # Determine the best model to use for title generation
37
- title_model_id = None
30
+ # For safety, always use a default title first
31
+ default_title = f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
38
32
 
39
- # Check if client is Anthropic
40
- is_anthropic = 'anthropic' in str(type(client)).lower()
41
- if is_anthropic:
42
- debug_log("Using Anthropic client for title generation")
43
- # Try to get available models safely
44
- try:
45
- available_anthropic_models = client.get_available_models()
46
- debug_log(f"Found {len(available_anthropic_models)} Anthropic models")
47
-
48
- # Try Claude 3 Haiku first (fastest)
49
- haiku_id = "claude-3-haiku-20240307"
50
- if any(m.get("id") == haiku_id for m in available_anthropic_models):
51
- title_model_id = haiku_id
52
- debug_log(f"Using Anthropic Haiku for title generation: {title_model_id}")
53
- else:
54
- # If Haiku not found, try Sonnet
55
- sonnet_id = "claude-3-sonnet-20240229"
56
- if any(m.get("id") == sonnet_id for m in available_anthropic_models):
57
- title_model_id = sonnet_id
58
- debug_log(f"Using Anthropic Sonnet for title generation: {title_model_id}")
59
- else:
60
- debug_log("Neither Haiku nor Sonnet found in Anthropic models list")
61
- except Exception as e:
62
- debug_log(f"Error getting Anthropic models: {str(e)}")
63
-
64
- # Check if client is OpenAI
65
- is_openai = 'openai' in str(type(client)).lower()
66
- if is_openai:
67
- debug_log("Using OpenAI client for title generation")
68
- # Use GPT-3.5 for title generation (fast and cost-effective)
69
- title_model_id = "gpt-3.5-turbo"
70
- debug_log(f"Using OpenAI model for title generation: {title_model_id}")
71
- # For OpenAI, we'll always use their model, not fall back to the passed model
72
- # This prevents trying to use Ollama models with OpenAI client
73
-
74
- # Check if client is Ollama
75
- is_ollama = 'ollama' in str(type(client)).lower()
76
- if is_ollama and not title_model_id:
77
- debug_log("Using Ollama client for title generation")
78
- # For Ollama, check if the model exists before using it
79
- try:
80
- # Try a quick test request to check if model exists
81
- debug_log(f"Testing if Ollama model exists: {model}")
82
- import aiohttp
83
- async with aiohttp.ClientSession() as session:
84
- try:
85
- base_url = "http://localhost:11434"
86
- async with session.post(
87
- f"{base_url}/api/generate",
88
- json={"model": model, "prompt": "test", "stream": False},
89
- timeout=2
90
- ) as response:
91
- if response.status == 200:
92
- # Model exists, use it
93
- title_model_id = model
94
- debug_log(f"Ollama model {model} exists, using it for title generation")
95
- else:
96
- debug_log(f"Ollama model {model} returned status {response.status}, falling back to default")
97
- # Fall back to a common model
98
- title_model_id = "llama3"
99
- except Exception as e:
100
- debug_log(f"Error testing Ollama model: {str(e)}, falling back to default")
101
- # Fall back to a common model
102
- title_model_id = "llama3"
103
- except Exception as e:
104
- debug_log(f"Error checking Ollama model: {str(e)}")
105
- # Fall back to a common model
106
- title_model_id = "llama3"
107
-
108
- # Fallback logic if no specific model was found
109
- if not title_model_id:
110
- # Use a safe default based on client type
111
- if is_openai:
112
- title_model_id = "gpt-3.5-turbo"
113
- elif is_anthropic:
114
- title_model_id = "claude-3-haiku-20240307"
115
- elif is_ollama:
116
- title_model_id = "llama3" # Common default
33
+ # Try-except the entire function to ensure we always return a title
34
+ try:
35
+ # Pick a reliable title generation model - prefer OpenAI if available
36
+ from ..config import OPENAI_API_KEY, ANTHROPIC_API_KEY
37
+
38
+ if OPENAI_API_KEY:
39
+ from ..api.openai import OpenAIClient
40
+ title_client = await OpenAIClient.create()
41
+ title_model = "gpt-3.5-turbo"
42
+ debug_log("Using OpenAI for title generation")
43
+ elif ANTHROPIC_API_KEY:
44
+ from ..api.anthropic import AnthropicClient
45
+ title_client = await AnthropicClient.create()
46
+ title_model = "claude-3-haiku-20240307"
47
+ debug_log("Using Anthropic for title generation")
117
48
  else:
118
- # Last resort - use the originally passed model
119
- title_model_id = model
49
+ # Use the passed client if no API keys available
50
+ title_client = client
51
+ title_model = model
52
+ debug_log(f"Using provided {type(client).__name__} for title generation")
120
53
 
121
- debug_log(f"No specific model found, using fallback model for title generation: {title_model_id}")
122
-
123
- logger.info(f"Generating title for conversation using model: {title_model_id}")
124
- debug_log(f"Final model selected for title generation: {title_model_id}")
125
-
126
- # Create a special prompt for title generation
127
- title_prompt = [
128
- {
129
- "role": "system",
130
- "content": "Generate a brief, descriptive title (maximum 40 characters) for a conversation that starts with the following message. The title should be concise and reflect the main topic or query. Return only the title text with no additional explanation or formatting."
131
- },
132
- {
133
- "role": "user",
134
- "content": message
135
- }
136
- ]
137
-
138
- tries = 2 # Number of retries
139
- last_error = None
140
-
141
- while tries > 0:
142
- try:
143
- debug_log(f"Attempt {3-tries} to generate title")
144
- # First try generate_completion if available
145
- if hasattr(client, 'generate_completion'):
146
- debug_log("Using generate_completion method")
147
- try:
148
- title = await client.generate_completion(
149
- messages=title_prompt,
150
- model=title_model_id,
151
- temperature=0.7,
152
- max_tokens=60 # Titles should be short
153
- )
154
- debug_log(f"Title generated successfully: {title}")
155
- except Exception as completion_error:
156
- debug_log(f"Error in generate_completion: {str(completion_error)}")
157
- raise # Re-raise to be caught by outer try/except
158
- # Fall back to generate_stream if completion not available
159
- elif hasattr(client, 'generate_stream'):
160
- debug_log("Using generate_stream method")
161
- title_chunks = []
162
- try:
163
- async for chunk in client.generate_stream(title_prompt, title_model_id, style=""):
164
- if chunk is not None:
165
- title_chunks.append(chunk)
166
- debug_log(f"Received chunk of length: {len(chunk)}")
167
-
168
- title = "".join(title_chunks)
169
- debug_log(f"Combined title from chunks: {title}")
170
-
171
- # If we didn't get any content, use a default
172
- if not title.strip():
173
- debug_log("Empty title received, using default")
174
- title = f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
175
- except Exception as stream_error:
176
- debug_log(f"Error during title stream processing: {str(stream_error)}")
177
- raise # Re-raise to be caught by outer try/except
178
- else:
179
- debug_log("Client does not support any title generation method")
180
- raise NotImplementedError("Client does not support a suitable method for title generation.")
181
-
182
- # Sanitize and limit the title
183
- title = title.strip().strip('"\'').strip()
184
- if len(title) > 40: # Set a maximum title length
185
- title = title[:37] + "..."
186
-
187
- logger.info(f"Generated title: {title}")
188
- debug_log(f"Final sanitized title: {title}")
189
- return title # Return successful title
54
+ # Create a special prompt for title generation
55
+ title_prompt = [
56
+ {
57
+ "role": "system",
58
+ "content": "Generate a brief, descriptive title (maximum 40 characters) for a conversation that starts with the following message. Return only the title text with no additional explanation or formatting."
59
+ },
60
+ {
61
+ "role": "user",
62
+ "content": message
63
+ }
64
+ ]
65
+
66
+ # Generate title
67
+ debug_log(f"Sending title generation request to {title_model}")
68
+ title = await title_client.generate_completion(
69
+ messages=title_prompt,
70
+ model=title_model,
71
+ temperature=0.7,
72
+ max_tokens=60
73
+ )
74
+
75
+ # Sanitize the title
76
+ title = title.strip().strip('"\'').strip()
77
+ if len(title) > 40:
78
+ title = title[:37] + "..."
190
79
 
191
- except Exception as e:
192
- last_error = str(e)
193
- debug_log(f"Error generating title (tries left: {tries-1}): {last_error}")
194
- logger.error(f"Error generating title (tries left: {tries-1}): {last_error}")
195
- tries -= 1
196
- if tries > 0: # Only sleep if there are more retries
197
- await asyncio.sleep(1) # Small delay before retry
198
-
199
- # If all retries fail, log the error and return a default title
200
- debug_log(f"Failed to generate title after multiple retries. Using default title.")
201
- logger.error(f"Failed to generate title after multiple retries. Last error: {last_error}")
202
- return f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
80
+ debug_log(f"Generated title: {title}")
81
+ return title
82
+
83
+ except Exception as e:
84
+ # Log the error and return default title
85
+ debug_log(f"Title generation failed: {str(e)}")
86
+ logger.error(f"Title generation failed: {str(e)}")
87
+ return default_title
203
88
 
204
89
  # Helper function for OpenAI streaming
205
90
  async def _generate_openai_stream(
@@ -868,7 +753,9 @@ def resolve_model_id(model_id_or_name: str) -> str:
868
753
  "04-turbo": "gpt-4-turbo",
869
754
  "035": "gpt-3.5-turbo",
870
755
  "35-turbo": "gpt-3.5-turbo",
871
- "35": "gpt-3.5-turbo"
756
+ "35": "gpt-3.5-turbo",
757
+ "4.1-mini": "gpt-4.1-mini", # Add support for gpt-4.1-mini
758
+ "4.1": "gpt-4.1" # Add support for gpt-4.1
872
759
  }
873
760
 
874
761
  if input_lower in openai_model_aliases:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chat-console
3
- Version: 0.3.91
3
+ Version: 0.3.95
4
4
  Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
5
5
  Home-page: https://github.com/wazacraftrfid/chat-console
6
6
  Author: Johnathan Greenaway
@@ -1,24 +1,24 @@
1
- app/__init__.py,sha256=1Z7Qdm9b_jcT0nisliyzs6res69PiL36mZcYaHwpzvY,131
1
+ app/__init__.py,sha256=8Y_-BF1_VRcDOSqGO8CR8u53RZRpt8E6zlGMF6GmQFk,131
2
2
  app/config.py,sha256=xeRGXcKbNvAdQGkaJJBipM4yHZJTM1y4ZFoW764APOU,7661
3
3
  app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
4
- app/main.py,sha256=fYwnYiK4FEpTCBP8QByUBaXyAPWR1h2dG7aLdtcnkzs,75602
4
+ app/main.py,sha256=RmQtbIKtgo92Y5Ue0NbozNP_9BsswLee1rw2la9yEH8,78953
5
5
  app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
6
- app/utils.py,sha256=DsK_Zid9HG9v6cX1y8-4uS_86AgMRTrGFjVn-fctbDM,44949
6
+ app/utils.py,sha256=eGHv4VRjP_qSaYTdLanVffWZXoCh2h3SI0gf2Y6ljes,38824
7
7
  app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
8
8
  app/api/anthropic.py,sha256=uInwNvGLJ_iPUs4BjdwaqXTU6NfmK1SzX7498Pt44fI,10667
9
- app/api/base.py,sha256=Oqu674v0NkrJY91tvxGd6YWgyi6XrFvi03quzWGswg8,7425
10
- app/api/ollama.py,sha256=zQcrs3COoS4wu9amp5oSmIsBNYK_ntilcGIPOe4wafI,64649
9
+ app/api/base.py,sha256=zvlHHfIcaObefkJ3w4er9ZSX7YGZ_MM0H-wrzD8CGAM,7629
10
+ app/api/ollama.py,sha256=eFG24nI2MlF57z9EHiA97v02NgFJ0kxaPUX26xAXFsg,66154
11
11
  app/api/openai.py,sha256=hLPr955tUx_2vwRuLP8Zrl3vu7kQZgUETi4cJuaYnFE,10810
12
12
  app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
13
- app/ui/chat_interface.py,sha256=prJNmigK7yD-7hb-61mgG2JXcJeDgADySYSElGEUmTg,18683
13
+ app/ui/chat_interface.py,sha256=oSDZi0Jgj_L8WnBh1RuJpIeIcN-RQ38CNejwsXiWTVg,18267
14
14
  app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
15
15
  app/ui/model_browser.py,sha256=pdblLVkdyVF0_Bo02bqbErGAtieyH-y6IfhMOPEqIso,71124
16
- app/ui/model_selector.py,sha256=ue3rbZfjVsjli-rJN5mfSqq23Ci7NshmTb4xWS-uG5k,18685
16
+ app/ui/model_selector.py,sha256=9fIPpAiqb568idt9pdROAYaxpoqY9czMF-bGdOl4nYk,18861
17
17
  app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
18
18
  app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
19
- chat_console-0.3.91.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
20
- chat_console-0.3.91.dist-info/METADATA,sha256=7mGtC-c-bML6p0ku5YaFSpYO0rZGbF7NZfhuUUB96u0,2922
21
- chat_console-0.3.91.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
22
- chat_console-0.3.91.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
23
- chat_console-0.3.91.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
24
- chat_console-0.3.91.dist-info/RECORD,,
19
+ chat_console-0.3.95.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
20
+ chat_console-0.3.95.dist-info/METADATA,sha256=EqxOX-Bugw7Iu2PKQS63pnUYP64UzjkvrNnv2X-Z4fg,2922
21
+ chat_console-0.3.95.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
22
+ chat_console-0.3.95.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
23
+ chat_console-0.3.95.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
24
+ chat_console-0.3.95.dist-info/RECORD,,