chat-console 0.2.9__py3-none-any.whl → 0.2.98__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/utils.py CHANGED
@@ -4,13 +4,15 @@ import time
4
4
  import asyncio
5
5
  import subprocess
6
6
  import logging
7
- from typing import Optional, Dict, Any, List, TYPE_CHECKING
7
+ import anthropic # Add missing import
8
+ from typing import Optional, Dict, Any, List, TYPE_CHECKING, Callable, Awaitable
8
9
  from datetime import datetime
10
+ from textual import work # Import work decorator
9
11
  from .config import CONFIG, save_config
10
12
 
11
13
  # Import SimpleChatApp for type hinting only if TYPE_CHECKING is True
12
14
  if TYPE_CHECKING:
13
- from .main import SimpleChatApp
15
+ from .main import SimpleChatApp # Keep this for type hinting
14
16
 
15
17
  # Set up logging
16
18
  logging.basicConfig(level=logging.INFO)
@@ -18,8 +20,34 @@ logger = logging.getLogger(__name__)
18
20
 
19
21
  async def generate_conversation_title(message: str, model: str, client: Any) -> str:
20
22
  """Generate a descriptive title for a conversation based on the first message"""
21
- logger.info(f"Generating title for conversation using model: {model}")
22
-
23
+ # --- Choose a specific, reliable model for title generation ---
24
+ # Prefer Haiku if Anthropic is available, otherwise fallback
25
+ title_model_id = None
26
+ if client and isinstance(client, anthropic.AsyncAnthropic): # Check if the passed client is Anthropic
27
+ # Check if Haiku is listed in the client's available models (more robust)
28
+ available_anthropic_models = client.get_available_models()
29
+ haiku_id = "claude-3-haiku-20240307"
30
+ if any(m["id"] == haiku_id for m in available_anthropic_models):
31
+ title_model_id = haiku_id
32
+ logger.info(f"Using Anthropic Haiku for title generation: {title_model_id}")
33
+ else:
34
+ # If Haiku not found, try Sonnet
35
+ sonnet_id = "claude-3-sonnet-20240229"
36
+ if any(m["id"] == sonnet_id for m in available_anthropic_models):
37
+ title_model_id = sonnet_id
38
+ logger.info(f"Using Anthropic Sonnet for title generation: {title_model_id}")
39
+ else:
40
+ logger.warning(f"Neither Haiku nor Sonnet found in Anthropic client's list. Falling back.")
41
+
42
+ # Fallback logic if no specific Anthropic model was found or client is not Anthropic
43
+ if not title_model_id:
44
+ # Use the originally passed model (user's selected chat model) as the final fallback
45
+ title_model_id = model
46
+ logger.warning(f"Falling back to originally selected model for title generation: {title_model_id}")
47
+ # Consider adding fallbacks to OpenAI/Ollama here if needed based on config/availability
48
+
49
+ logger.info(f"Generating title for conversation using model: {title_model_id}")
50
+
23
51
  # Create a special prompt for title generation
24
52
  title_prompt = [
25
53
  {
@@ -43,7 +71,7 @@ async def generate_conversation_title(message: str, model: str, client: Any) ->
43
71
  if hasattr(client, 'generate_completion'):
44
72
  title = await client.generate_completion(
45
73
  messages=title_prompt,
46
- model=model,
74
+ model=title_model_id, # Use the chosen title model
47
75
  temperature=0.7,
48
76
  max_tokens=60 # Titles should be short
49
77
  )
@@ -53,9 +81,18 @@ async def generate_conversation_title(message: str, model: str, client: Any) ->
53
81
  # For now, let's assume a hypothetical non-streaming call or adapt stream
54
82
  # Simplified adaptation: collect stream chunks
55
83
  title_chunks = []
56
- async for chunk in client.generate_stream(title_prompt, model, style=""): # Assuming style might not apply or needs default
57
- title_chunks.append(chunk)
58
- title = "".join(title_chunks)
84
+ try:
85
+ # Use the chosen title model here too
86
+ async for chunk in client.generate_stream(title_prompt, title_model_id, style=""):
87
+ if chunk is not None: # Ensure we only process non-None chunks
88
+ title_chunks.append(chunk)
89
+ title = "".join(title_chunks)
90
+ # If we didn't get any content, use a default
91
+ if not title.strip():
92
+ title = f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
93
+ except Exception as stream_error:
94
+ logger.error(f"Error during title stream processing: {str(stream_error)}")
95
+ title = f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
59
96
  else:
60
97
  raise NotImplementedError("Client does not support a suitable method for title generation.")
61
98
 
@@ -78,120 +115,251 @@ async def generate_conversation_title(message: str, model: str, client: Any) ->
78
115
  logger.error(f"Failed to generate title after multiple retries. Last error: {last_error}")
79
116
  return f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
80
117
 
81
- # Modified signature to accept app instance
82
- async def generate_streaming_response(app: 'SimpleChatApp', messages: List[Dict], model: str, style: str, client: Any, callback: Any) -> str:
83
- """Generate a streaming response from the model"""
118
+ # Make this the worker function directly
119
+ @work(exit_on_error=True)
120
+ async def generate_streaming_response(
121
+ app: 'SimpleChatApp',
122
+ messages: List[Dict],
123
+ model: str,
124
+ style: str,
125
+ client: Any,
126
+ callback: Callable[[str], Awaitable[None]] # More specific type hint for callback
127
+ ) -> Optional[str]: # Return Optional[str] as cancellation might return None implicitly or error
128
+ """Generate a streaming response from the model (as a Textual worker)"""
129
+ # Import debug_log function from main
130
+ # Note: This import might be slightly less reliable inside a worker, but let's try
131
+ try:
132
+ from app.main import debug_log
133
+ except ImportError:
134
+ debug_log = lambda msg: None # Fallback
135
+
136
+ # Worker function needs to handle its own state and cleanup partially
137
+ # The main app will also need cleanup logic in generate_response
138
+
84
139
  logger.info(f"Starting streaming response with model: {model}")
140
+ debug_log(f"Starting streaming response with model: '{model}', client type: {type(client).__name__}")
141
+
142
+ # Very defensive check of messages format
143
+ if not messages:
144
+ debug_log("Error: messages list is empty")
145
+ raise ValueError("Messages list cannot be empty")
146
+
147
+ for i, msg in enumerate(messages):
148
+ try:
149
+ debug_log(f"Message {i}: role={msg.get('role', 'missing')}, content_len={len(msg.get('content', ''))}")
150
+ # Ensure essential fields exist
151
+ if 'role' not in msg:
152
+ debug_log(f"Adding missing 'role' to message {i}")
153
+ msg['role'] = 'user' # Default to user
154
+ if 'content' not in msg:
155
+ debug_log(f"Adding missing 'content' to message {i}")
156
+ msg['content'] = '' # Default to empty string
157
+ except Exception as e:
158
+ debug_log(f"Error checking message {i}: {str(e)}")
159
+ # Try to repair the message
160
+ messages[i] = {
161
+ 'role': 'user',
162
+ 'content': str(msg) if msg else ''
163
+ }
164
+ debug_log(f"Repaired message {i}")
165
+
166
+ debug_log(f"Messages validation complete: {len(messages)} total messages")
167
+
85
168
  full_response = ""
86
169
  buffer = []
87
170
  last_update = time.time()
88
171
  update_interval = 0.1 # Update UI every 100ms
89
172
 
90
173
  try:
91
- # Update UI with model loading state if it's an Ollama client
92
- if hasattr(client, 'is_loading_model'):
93
- # Send signal to update UI for model loading if needed
174
+ # Check that we have a valid client and model before proceeding
175
+ if client is None:
176
+ debug_log("Error: client is None, cannot proceed with streaming")
177
+ raise ValueError("Model client is None, cannot proceed with streaming")
178
+
179
+ # Check if the client has the required generate_stream method
180
+ if not hasattr(client, 'generate_stream'):
181
+ debug_log(f"Error: client {type(client).__name__} does not have generate_stream method")
182
+ raise ValueError(f"Client {type(client).__name__} does not support streaming")
183
+
184
+ # Set initial model loading state if using Ollama
185
+ # Always show the model loading indicator for Ollama until we confirm otherwise
186
+ is_ollama = 'ollama' in str(type(client)).lower()
187
+ debug_log(f"Is Ollama client: {is_ollama}")
188
+
189
+ if is_ollama and hasattr(app, 'query_one'):
94
190
  try:
95
- # The client might be in model loading state even before generating
96
- model_loading = client.is_loading_model()
97
- logger.info(f"Initial model loading state: {model_loading}")
98
-
99
- # Get the chat interface and update loading indicator
100
- if hasattr(app, 'query_one'):
101
- loading = app.query_one("#loading-indicator")
102
- if model_loading:
103
- loading.add_class("model-loading")
104
- app.query_one("#loading-text").update("Loading Ollama model...")
105
- else:
106
- loading.remove_class("model-loading")
191
+ # Show model loading indicator by default for Ollama
192
+ debug_log("Showing initial model loading indicator for Ollama")
193
+ logger.info("Showing initial model loading indicator for Ollama")
194
+ loading = app.query_one("#loading-indicator")
195
+ loading.add_class("model-loading")
196
+ loading.update("⚙️ Loading Ollama model...")
107
197
  except Exception as e:
108
- logger.error(f"Error setting initial loading state: {str(e)}")
198
+ debug_log(f"Error setting initial Ollama loading state: {str(e)}")
199
+ logger.error(f"Error setting initial Ollama loading state: {str(e)}")
109
200
 
110
- stream_generator = client.generate_stream(messages, model, style)
201
+ # Now proceed with streaming
202
+ debug_log(f"Starting stream generation with messages length: {len(messages)}")
203
+ logger.info(f"Starting stream generation for model: {model}")
111
204
 
112
- # Check if we just entered model loading state
113
- if hasattr(client, 'is_loading_model') and client.is_loading_model():
114
- logger.info("Model loading started during generation")
205
+ # Defensive approach - wrap the stream generation in a try-except
206
+ try:
207
+ debug_log("Calling client.generate_stream()")
208
+ stream_generator = client.generate_stream(messages, model, style)
209
+ debug_log("Successfully obtained stream generator")
210
+ except Exception as stream_init_error:
211
+ debug_log(f"Error initializing stream generator: {str(stream_init_error)}")
212
+ logger.error(f"Error initializing stream generator: {str(stream_init_error)}")
213
+ raise # Re-raise to be handled in the main catch block
214
+
215
+ # After getting the generator, check if we're NOT in model loading state
216
+ if hasattr(client, 'is_loading_model') and not client.is_loading_model() and hasattr(app, 'query_one'):
115
217
  try:
116
- if hasattr(app, 'query_one'):
117
- loading = app.query_one("#loading-indicator")
118
- loading.add_class("model-loading")
119
- app.query_one("#loading-text").update("Loading Ollama model...")
218
+ debug_log("Model is ready for generation, updating UI")
219
+ logger.info("Model is ready for generation, updating UI")
220
+ loading = app.query_one("#loading-indicator")
221
+ loading.remove_class("model-loading")
222
+ loading.update("▪▪▪ Generating response...")
120
223
  except Exception as e:
121
- logger.error(f"Error updating UI for model loading: {str(e)}")
224
+ debug_log(f"Error updating UI after stream init: {str(e)}")
225
+ logger.error(f"Error updating UI after stream init: {str(e)}")
122
226
 
123
- # Use asyncio.shield to ensure we can properly interrupt the stream processing
124
- async for chunk in stream_generator:
125
- # Check for cancellation frequently
126
- if asyncio.current_task().cancelled():
127
- logger.info("Task cancellation detected during chunk processing")
128
- # Close the client stream if possible
129
- if hasattr(client, 'cancel_stream'):
130
- await client.cancel_stream()
131
- raise asyncio.CancelledError()
132
-
133
- # Check if model loading state changed
134
- if hasattr(client, 'is_loading_model'):
135
- model_loading = client.is_loading_model()
136
- try:
137
- if hasattr(app, 'query_one'):
138
- loading = app.query_one("#loading-indicator")
139
- loading_text = app.query_one("#loading-text")
227
+ # Process the stream with careful error handling
228
+ debug_log("Beginning to process stream chunks")
229
+ try:
230
+ async for chunk in stream_generator:
231
+ # Check for cancellation frequently
232
+ if asyncio.current_task().cancelled():
233
+ debug_log("Task cancellation detected during chunk processing")
234
+ logger.info("Task cancellation detected during chunk processing")
235
+ # Close the client stream if possible
236
+ if hasattr(client, 'cancel_stream'):
237
+ debug_log("Calling client.cancel_stream() due to task cancellation")
238
+ await client.cancel_stream()
239
+ raise asyncio.CancelledError()
240
+
241
+ # Check if model loading state changed, but more safely
242
+ if hasattr(client, 'is_loading_model'):
243
+ try:
244
+ # Get the model loading state
245
+ model_loading = client.is_loading_model()
246
+ debug_log(f"Model loading state: {model_loading}")
140
247
 
141
- if model_loading and not loading.has_class("model-loading"):
142
- # Model loading started
143
- logger.info("Model loading started during streaming")
144
- loading.add_class("model-loading")
145
- loading_text.update("⚙️ Loading Ollama model...")
146
- elif not model_loading and loading.has_class("model-loading"):
147
- # Model loading finished
148
- logger.info("Model loading finished during streaming")
149
- loading.remove_class("model-loading")
150
- loading_text.update("▪▪▪ Generating response...")
151
- except Exception as e:
152
- logger.error(f"Error updating loading state during streaming: {str(e)}")
153
-
154
- if chunk: # Only process non-empty chunks
155
- buffer.append(chunk)
156
- current_time = time.time()
248
+ # Safely update the UI elements if they exist
249
+ if hasattr(app, 'query_one'):
250
+ try:
251
+ loading = app.query_one("#loading-indicator")
252
+
253
+ # Check for class existence first
254
+ if model_loading and hasattr(loading, 'has_class') and not loading.has_class("model-loading"):
255
+ # Model loading started
256
+ debug_log("Model loading started during streaming")
257
+ logger.info("Model loading started during streaming")
258
+ loading.add_class("model-loading")
259
+ loading.update("⚙️ Loading Ollama model...")
260
+ elif not model_loading and hasattr(loading, 'has_class') and loading.has_class("model-loading"):
261
+ # Model loading finished
262
+ debug_log("Model loading finished during streaming")
263
+ logger.info("Model loading finished during streaming")
264
+ loading.remove_class("model-loading")
265
+ loading.update("▪▪▪ Generating response...")
266
+ except Exception as ui_e:
267
+ debug_log(f"Error updating UI elements: {str(ui_e)}")
268
+ logger.error(f"Error updating UI elements: {str(ui_e)}")
269
+ except Exception as e:
270
+ debug_log(f"Error checking model loading state: {str(e)}")
271
+ logger.error(f"Error checking model loading state: {str(e)}")
157
272
 
158
- # Update UI if enough time has passed or buffer is large
159
- if current_time - last_update >= update_interval or len(''.join(buffer)) > 100:
160
- new_content = ''.join(buffer)
161
- full_response += new_content
162
- # Send content to UI
163
- await callback(full_response)
164
- buffer = []
165
- last_update = current_time
273
+ # Process the chunk - with careful type handling
274
+ if chunk: # Only process non-empty chunks
275
+ # Ensure chunk is a string - critical fix for providers returning other types
276
+ if not isinstance(chunk, str):
277
+ debug_log(f"WARNING: Received non-string chunk of type: {type(chunk).__name__}")
278
+ try:
279
+ # Try to convert to string if possible
280
+ chunk = str(chunk)
281
+ debug_log(f"Successfully converted chunk to string, length: {len(chunk)}")
282
+ except Exception as e:
283
+ debug_log(f"Error converting chunk to string: {str(e)}")
284
+ # Skip this chunk since it can't be converted
285
+ continue
286
+
287
+ debug_log(f"Received chunk of length: {len(chunk)}")
288
+ buffer.append(chunk)
289
+ current_time = time.time()
166
290
 
167
- # Small delay to let UI catch up
168
- await asyncio.sleep(0.05)
291
+ # Update UI if enough time has passed or buffer is large
292
+ if current_time - last_update >= update_interval or len(''.join(buffer)) > 100:
293
+ new_content = ''.join(buffer)
294
+ full_response += new_content
295
+ # Send content to UI
296
+ debug_log(f"Updating UI with content length: {len(full_response)}")
297
+ await callback(full_response)
298
+ buffer = []
299
+ last_update = current_time
300
+
301
+ # Small delay to let UI catch up
302
+ await asyncio.sleep(0.05)
303
+ except asyncio.CancelledError:
304
+ debug_log("CancelledError in stream processing")
305
+ raise
306
+ except Exception as chunk_error:
307
+ debug_log(f"Error processing stream chunks: {str(chunk_error)}")
308
+ logger.error(f"Error processing stream chunks: {str(chunk_error)}")
309
+ raise
169
310
 
170
311
  # Send any remaining content if the loop finished normally
171
312
  if buffer:
172
313
  new_content = ''.join(buffer)
173
314
  full_response += new_content
315
+ debug_log(f"Sending final content, total length: {len(full_response)}")
174
316
  await callback(full_response)
175
317
 
318
+ debug_log(f"Streaming response completed successfully. Response length: {len(full_response)}")
176
319
  logger.info(f"Streaming response completed successfully. Response length: {len(full_response)}")
177
320
  return full_response
178
321
 
179
322
  except asyncio.CancelledError:
180
323
  # This is expected when the user cancels via Escape
324
+ debug_log(f"Streaming response task cancelled. Partial response length: {len(full_response)}")
181
325
  logger.info(f"Streaming response task cancelled. Partial response length: {len(full_response)}")
182
326
  # Ensure the client stream is closed
183
327
  if hasattr(client, 'cancel_stream'):
184
- await client.cancel_stream()
328
+ debug_log("Calling client.cancel_stream() after cancellation")
329
+ try:
330
+ await client.cancel_stream()
331
+ debug_log("Successfully cancelled client stream")
332
+ except Exception as cancel_err:
333
+ debug_log(f"Error cancelling client stream: {str(cancel_err)}")
185
334
  # Return whatever was collected so far
186
335
  return full_response
187
336
 
188
337
  except Exception as e:
338
+ debug_log(f"Error during streaming response: {str(e)}")
189
339
  logger.error(f"Error during streaming response: {str(e)}")
190
340
  # Close the client stream if possible
191
341
  if hasattr(client, 'cancel_stream'):
192
- await client.cancel_stream()
193
- # Re-raise the exception for the caller to handle
342
+ debug_log("Attempting to cancel client stream after error")
343
+ try:
344
+ await client.cancel_stream()
345
+ debug_log("Successfully cancelled client stream after error")
346
+ except Exception as cancel_err:
347
+ debug_log(f"Error cancelling client stream after error: {str(cancel_err)}")
348
+ # Re-raise the exception for the worker runner to handle
349
+ # The @work decorator might catch this depending on exit_on_error
194
350
  raise
351
+ finally:
352
+ # Basic cleanup within the worker itself (optional, main cleanup in app)
353
+ debug_log("generate_streaming_response worker finished or errored.")
354
+ # Return the full response if successful, otherwise error is raised or cancellation occurred
355
+ # Note: If cancelled, CancelledError is raised, and @work might handle it.
356
+ # If successful, return the response.
357
+ # If error, exception is raised.
358
+ # Let's explicitly return the response on success.
359
+ # If cancelled or error, this return might not be reached.
360
+ if 'full_response' in locals():
361
+ return full_response
362
+ return None # Indicate completion without full response (e.g., error before loop)
195
363
 
196
364
  def ensure_ollama_running() -> bool:
197
365
  """
@@ -254,3 +422,107 @@ def save_settings_to_config(model: str, style: str) -> None:
254
422
  CONFIG["default_model"] = model
255
423
  CONFIG["default_style"] = style
256
424
  save_config(CONFIG)
425
+
426
+ def resolve_model_id(model_id_or_name: str) -> str:
427
+ """
428
+ Resolves a potentially short model ID or display name to the full model ID
429
+ stored in the configuration. Tries multiple matching strategies.
430
+ """
431
+ if not model_id_or_name:
432
+ logger.warning("resolve_model_id called with empty input, returning empty string.")
433
+ return ""
434
+
435
+ input_lower = model_id_or_name.lower().strip()
436
+ logger.info(f"Attempting to resolve model identifier: '{input_lower}'")
437
+
438
+ available_models = CONFIG.get("available_models", {})
439
+ if not available_models:
440
+ logger.warning("No available_models found in CONFIG to resolve against.")
441
+ return model_id_or_name # Return original if no models to check
442
+
443
+ # 1. Check if the input is already a valid full ID (must contain a date suffix)
444
+ # Full Claude IDs should have format like "claude-3-opus-20240229" with a date suffix
445
+ for full_id in available_models:
446
+ if full_id.lower() == input_lower:
447
+ # Only consider it a full ID if it contains a date suffix (like -20240229)
448
+ if "-202" in full_id: # Check for date suffix
449
+ logger.info(f"Input '{model_id_or_name}' is already a full ID with date suffix: '{full_id}'.")
450
+ return full_id # Return the canonical full_id
451
+ else:
452
+ logger.warning(f"Input '{model_id_or_name}' matches a model ID but lacks date suffix.")
453
+ # Continue searching for a better match with date suffix
454
+
455
+ logger.debug(f"Input '{input_lower}' is not a direct full ID match. Checking other criteria...")
456
+ logger.debug(f"Available models for matching: {list(available_models.keys())}")
457
+
458
+ best_match = None
459
+ match_type = "None"
460
+
461
+ # 2. Iterate through available models for other matches
462
+ for full_id, model_info in available_models.items():
463
+ full_id_lower = full_id.lower()
464
+ display_name = model_info.get("display_name", "")
465
+ display_name_lower = display_name.lower()
466
+
467
+ logger.debug(f"Comparing '{input_lower}' against '{full_id_lower}' (Display: '{display_name}')")
468
+
469
+ # 2a. Exact match on display name (case-insensitive)
470
+ if display_name_lower == input_lower:
471
+ logger.info(f"Resolved '{model_id_or_name}' to '{full_id}' via exact display name match.")
472
+ return full_id # Exact display name match is high confidence
473
+
474
+ # 2b. Check if input is a known short alias (handle common cases explicitly)
475
+ # Special case for Claude 3.7 Sonnet which seems to be causing issues
476
+ if input_lower == "claude-3.7-sonnet":
477
+ # Hardcoded resolution for this specific model
478
+ claude_37_id = "claude-3-7-sonnet-20250219"
479
+ logger.warning(f"Special case: Directly mapping '{input_lower}' to '{claude_37_id}'")
480
+ # Check if this ID exists in available models
481
+ for model_id in available_models:
482
+ if model_id.lower() == claude_37_id.lower():
483
+ logger.info(f"Found exact match for hardcoded ID: {model_id}")
484
+ return model_id
485
+ # If not found in available models, return the hardcoded ID anyway
486
+ logger.warning(f"Hardcoded ID '{claude_37_id}' not found in available models, returning it anyway")
487
+ return claude_37_id
488
+
489
+ # Map common short names to their expected full ID prefixes
490
+ short_aliases = {
491
+ "claude-3-opus": "claude-3-opus-",
492
+ "claude-3-sonnet": "claude-3-sonnet-",
493
+ "claude-3-haiku": "claude-3-haiku-",
494
+ "claude-3.5-sonnet": "claude-3-5-sonnet-", # Note the dot vs hyphen
495
+ "claude-3.7-sonnet": "claude-3-7-sonnet-" # Added this specific case
496
+ }
497
+ if input_lower in short_aliases and full_id_lower.startswith(short_aliases[input_lower]):
498
+ logger.info(f"Resolved '{model_id_or_name}' to '{full_id}' via known short alias match.")
499
+ # This is also high confidence
500
+ return full_id
501
+
502
+ # 2c. Check if input is a prefix of the full ID (more general, lower confidence)
503
+ if full_id_lower.startswith(input_lower):
504
+ logger.debug(f"Potential prefix match: '{input_lower}' vs '{full_id_lower}'")
505
+ # Don't return immediately, might find a better match (e.g., display name or alias)
506
+ if best_match is None: # Only take prefix if no other match found yet
507
+ best_match = full_id
508
+ match_type = "Prefix"
509
+ logger.debug(f"Setting best_match to '{full_id}' based on prefix.")
510
+
511
+ # 2d. Check derived short name from display name (less reliable, keep as lower priority)
512
+ # Normalize display name: lower, replace space and dot with hyphen
513
+ derived_short_name = display_name_lower.replace(" ", "-").replace(".", "-")
514
+ if derived_short_name == input_lower:
515
+ logger.debug(f"Potential derived short name match: '{input_lower}' vs derived '{derived_short_name}' from '{display_name}'")
516
+ # Prioritize this over a simple prefix match if found
517
+ if best_match is None or match_type == "Prefix":
518
+ best_match = full_id
519
+ match_type = "Derived Short Name"
520
+ logger.debug(f"Updating best_match to '{full_id}' based on derived name.")
521
+
522
+ # 3. Return best match found or original input
523
+ if best_match:
524
+ logger.info(f"Returning best match found for '{model_id_or_name}': '{best_match}' (Type: {match_type})")
525
+ return best_match
526
+ else:
527
+ logger.warning(f"Could not resolve model ID or name '{model_id_or_name}' to any known full ID. Returning original.")
528
+ return model_id_or_name
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chat-console
3
- Version: 0.2.9
3
+ Version: 0.2.98
4
4
  Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
5
5
  Home-page: https://github.com/wazacraftrfid/chat-console
6
6
  Author: Johnathan Greenaway
@@ -0,0 +1,24 @@
1
+ app/__init__.py,sha256=Mx4VF_U7IhLbSFel6dTS0LmWyZ6eBpnmhRlOw9sXLfE,131
2
+ app/config.py,sha256=KawltE7cK2bR9wbe1NSlepwWIjkiFw2bg3vbLmUnP38,7626
3
+ app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
4
+ app/main.py,sha256=cvAdboaSLNB_eilgrPe0nuAa1bCtsSHnaSURFyJt5zk,69475
5
+ app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
6
+ app/utils.py,sha256=y-U3vWGeJaaynQ1vNkht_DYLnRdzJDJh-u2bAinfj2Y,27428
7
+ app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
8
+ app/api/anthropic.py,sha256=jpvx_eKd5WqKc2KvpxjbInEfEmgw9o4YX1SXoUOaQ3M,12082
9
+ app/api/base.py,sha256=PB6loU2_SbnKvYuA-KFqR86xUZg1sX-1IgfMl9HKhR8,5724
10
+ app/api/ollama.py,sha256=B9jTeOmJpeAOg6UvvkcDt0xIe5PDkyUryMlhHBt3plA,60744
11
+ app/api/openai.py,sha256=K_fVJ6YNFgUyE_sRAZMnUaCXuiXNm4iEqzTI0I1sdic,5842
12
+ app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
13
+ app/ui/chat_interface.py,sha256=xU4yFcVS4etS5kx7cmnnUnF5p_nWDNmf68VKbYemJRg,15677
14
+ app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
15
+ app/ui/model_browser.py,sha256=pdblLVkdyVF0_Bo02bqbErGAtieyH-y6IfhMOPEqIso,71124
16
+ app/ui/model_selector.py,sha256=eqwJamLddgt4fS0pJbCyCBe-_shqESm3gM8vJTOWDAs,16956
17
+ app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
18
+ app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
19
+ chat_console-0.2.98.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
20
+ chat_console-0.2.98.dist-info/METADATA,sha256=qJwneYlSKgSj2HrjWs9Gj8sLYFiV5nULI31Xv_kmE68,2922
21
+ chat_console-0.2.98.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
22
+ chat_console-0.2.98.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
23
+ chat_console-0.2.98.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
24
+ chat_console-0.2.98.dist-info/RECORD,,
@@ -1,24 +0,0 @@
1
- app/__init__.py,sha256=g2BzewDN5X96Dl5Zzw8uag1TBEdPIU1ceTm7u-BJrjM,130
2
- app/config.py,sha256=sKNp6Za4ZfW-CZBOvEv0TncAS77AnKi86hTM51C4KQ4,5227
3
- app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
4
- app/main.py,sha256=k726xRBcuPgbUsUg4s-REhtaljccjDLNzA_C-fPkQk4,48866
5
- app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
6
- app/utils.py,sha256=IyINMrM6oGXtN5HRPuKoFEyfKg0fR4FVXIi_0e2KxI0,11798
7
- app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
8
- app/api/anthropic.py,sha256=x5PmBXEKe_ow2NWk8XdqSPR0hLOdCc_ypY5QAySeA78,4234
9
- app/api/base.py,sha256=-6RSxSpqe-OMwkaq1wVWbu3pVkte-ZYy8rmdvt-Qh48,3953
10
- app/api/ollama.py,sha256=FTIlgZmvpZd6K4HL2nUD19-p9Xb1TA859LfnCgewpcU,51354
11
- app/api/openai.py,sha256=1fYgFXXL6yj_7lQ893Yj28RYG4M8d6gt_q1gzhhjcig,3641
12
- app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
13
- app/ui/chat_interface.py,sha256=R8tdy72TcT7veemUzcJOjbPY32WizBdNHgfmq69EFfA,14275
14
- app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
15
- app/ui/model_browser.py,sha256=5h3gVsuGIUrXjYVF-QclZFhYtX2kH14LvT22Ufm9etg,49453
16
- app/ui/model_selector.py,sha256=Aj1irAs9DQMn8wfcPsFZGxWmx0JTzHjSe7pVdDMwqTQ,13182
17
- app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
18
- app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
19
- chat_console-0.2.9.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
20
- chat_console-0.2.9.dist-info/METADATA,sha256=zTSJePqMsi0n6fEz8s4gtLwHe_726-ijfTjPwH_Mumw,2921
21
- chat_console-0.2.9.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
22
- chat_console-0.2.9.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
23
- chat_console-0.2.9.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
24
- chat_console-0.2.9.dist-info/RECORD,,