chat-console 0.2.9__py3-none-any.whl → 0.2.99__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- app/__init__.py +1 -1
- app/api/anthropic.py +163 -26
- app/api/base.py +45 -2
- app/api/ollama.py +202 -43
- app/api/openai.py +53 -4
- app/config.py +53 -7
- app/main.py +512 -103
- app/ui/chat_interface.py +40 -20
- app/ui/model_browser.py +405 -45
- app/ui/model_selector.py +77 -19
- app/utils.py +359 -85
- {chat_console-0.2.9.dist-info → chat_console-0.2.99.dist-info}/METADATA +1 -1
- chat_console-0.2.99.dist-info/RECORD +24 -0
- chat_console-0.2.9.dist-info/RECORD +0 -24
- {chat_console-0.2.9.dist-info → chat_console-0.2.99.dist-info}/WHEEL +0 -0
- {chat_console-0.2.9.dist-info → chat_console-0.2.99.dist-info}/entry_points.txt +0 -0
- {chat_console-0.2.9.dist-info → chat_console-0.2.99.dist-info}/licenses/LICENSE +0 -0
- {chat_console-0.2.9.dist-info → chat_console-0.2.99.dist-info}/top_level.txt +0 -0
app/utils.py
CHANGED
@@ -4,13 +4,15 @@ import time
|
|
4
4
|
import asyncio
|
5
5
|
import subprocess
|
6
6
|
import logging
|
7
|
-
|
7
|
+
import anthropic # Add missing import
|
8
|
+
from typing import Optional, Dict, Any, List, TYPE_CHECKING, Callable, Awaitable
|
8
9
|
from datetime import datetime
|
10
|
+
from textual import work # Import work decorator
|
9
11
|
from .config import CONFIG, save_config
|
10
12
|
|
11
13
|
# Import SimpleChatApp for type hinting only if TYPE_CHECKING is True
|
12
14
|
if TYPE_CHECKING:
|
13
|
-
from .main import SimpleChatApp
|
15
|
+
from .main import SimpleChatApp # Keep this for type hinting
|
14
16
|
|
15
17
|
# Set up logging
|
16
18
|
logging.basicConfig(level=logging.INFO)
|
@@ -18,8 +20,34 @@ logger = logging.getLogger(__name__)
|
|
18
20
|
|
19
21
|
async def generate_conversation_title(message: str, model: str, client: Any) -> str:
|
20
22
|
"""Generate a descriptive title for a conversation based on the first message"""
|
21
|
-
|
22
|
-
|
23
|
+
# --- Choose a specific, reliable model for title generation ---
|
24
|
+
# Prefer Haiku if Anthropic is available, otherwise fallback
|
25
|
+
title_model_id = None
|
26
|
+
if client and isinstance(client, anthropic.AsyncAnthropic): # Check if the passed client is Anthropic
|
27
|
+
# Check if Haiku is listed in the client's available models (more robust)
|
28
|
+
available_anthropic_models = client.get_available_models()
|
29
|
+
haiku_id = "claude-3-haiku-20240307"
|
30
|
+
if any(m["id"] == haiku_id for m in available_anthropic_models):
|
31
|
+
title_model_id = haiku_id
|
32
|
+
logger.info(f"Using Anthropic Haiku for title generation: {title_model_id}")
|
33
|
+
else:
|
34
|
+
# If Haiku not found, try Sonnet
|
35
|
+
sonnet_id = "claude-3-sonnet-20240229"
|
36
|
+
if any(m["id"] == sonnet_id for m in available_anthropic_models):
|
37
|
+
title_model_id = sonnet_id
|
38
|
+
logger.info(f"Using Anthropic Sonnet for title generation: {title_model_id}")
|
39
|
+
else:
|
40
|
+
logger.warning(f"Neither Haiku nor Sonnet found in Anthropic client's list. Falling back.")
|
41
|
+
|
42
|
+
# Fallback logic if no specific Anthropic model was found or client is not Anthropic
|
43
|
+
if not title_model_id:
|
44
|
+
# Use the originally passed model (user's selected chat model) as the final fallback
|
45
|
+
title_model_id = model
|
46
|
+
logger.warning(f"Falling back to originally selected model for title generation: {title_model_id}")
|
47
|
+
# Consider adding fallbacks to OpenAI/Ollama here if needed based on config/availability
|
48
|
+
|
49
|
+
logger.info(f"Generating title for conversation using model: {title_model_id}")
|
50
|
+
|
23
51
|
# Create a special prompt for title generation
|
24
52
|
title_prompt = [
|
25
53
|
{
|
@@ -43,7 +71,7 @@ async def generate_conversation_title(message: str, model: str, client: Any) ->
|
|
43
71
|
if hasattr(client, 'generate_completion'):
|
44
72
|
title = await client.generate_completion(
|
45
73
|
messages=title_prompt,
|
46
|
-
model=model
|
74
|
+
model=title_model_id, # Use the chosen title model
|
47
75
|
temperature=0.7,
|
48
76
|
max_tokens=60 # Titles should be short
|
49
77
|
)
|
@@ -53,9 +81,18 @@ async def generate_conversation_title(message: str, model: str, client: Any) ->
|
|
53
81
|
# For now, let's assume a hypothetical non-streaming call or adapt stream
|
54
82
|
# Simplified adaptation: collect stream chunks
|
55
83
|
title_chunks = []
|
56
|
-
|
57
|
-
|
58
|
-
|
84
|
+
try:
|
85
|
+
# Use the chosen title model here too
|
86
|
+
async for chunk in client.generate_stream(title_prompt, title_model_id, style=""):
|
87
|
+
if chunk is not None: # Ensure we only process non-None chunks
|
88
|
+
title_chunks.append(chunk)
|
89
|
+
title = "".join(title_chunks)
|
90
|
+
# If we didn't get any content, use a default
|
91
|
+
if not title.strip():
|
92
|
+
title = f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
|
93
|
+
except Exception as stream_error:
|
94
|
+
logger.error(f"Error during title stream processing: {str(stream_error)}")
|
95
|
+
title = f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
|
59
96
|
else:
|
60
97
|
raise NotImplementedError("Client does not support a suitable method for title generation.")
|
61
98
|
|
@@ -78,122 +115,256 @@ async def generate_conversation_title(message: str, model: str, client: Any) ->
|
|
78
115
|
logger.error(f"Failed to generate title after multiple retries. Last error: {last_error}")
|
79
116
|
return f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
|
80
117
|
|
81
|
-
#
|
82
|
-
|
83
|
-
|
118
|
+
# Make this the worker function directly
|
119
|
+
@work(exit_on_error=True)
|
120
|
+
async def generate_streaming_response(
|
121
|
+
app: 'SimpleChatApp',
|
122
|
+
messages: List[Dict],
|
123
|
+
model: str,
|
124
|
+
style: str,
|
125
|
+
client: Any,
|
126
|
+
callback: Callable[[str], Awaitable[None]] # More specific type hint for callback
|
127
|
+
) -> Optional[str]: # Return Optional[str] as cancellation might return None implicitly or error
|
128
|
+
"""Generate a streaming response from the model (as a Textual worker)"""
|
129
|
+
# Import debug_log function from main
|
130
|
+
# Note: This import might be slightly less reliable inside a worker, but let's try
|
131
|
+
try:
|
132
|
+
from app.main import debug_log
|
133
|
+
except ImportError:
|
134
|
+
debug_log = lambda msg: None # Fallback
|
135
|
+
|
136
|
+
# Worker function needs to handle its own state and cleanup partially
|
137
|
+
# The main app will also need cleanup logic in generate_response
|
138
|
+
|
84
139
|
logger.info(f"Starting streaming response with model: {model}")
|
140
|
+
debug_log(f"Starting streaming response with model: '{model}', client type: {type(client).__name__}")
|
141
|
+
|
142
|
+
# Very defensive check of messages format
|
143
|
+
if not messages:
|
144
|
+
debug_log("Error: messages list is empty")
|
145
|
+
raise ValueError("Messages list cannot be empty")
|
146
|
+
|
147
|
+
for i, msg in enumerate(messages):
|
148
|
+
try:
|
149
|
+
debug_log(f"Message {i}: role={msg.get('role', 'missing')}, content_len={len(msg.get('content', ''))}")
|
150
|
+
# Ensure essential fields exist
|
151
|
+
if 'role' not in msg:
|
152
|
+
debug_log(f"Adding missing 'role' to message {i}")
|
153
|
+
msg['role'] = 'user' # Default to user
|
154
|
+
if 'content' not in msg:
|
155
|
+
debug_log(f"Adding missing 'content' to message {i}")
|
156
|
+
msg['content'] = '' # Default to empty string
|
157
|
+
except Exception as e:
|
158
|
+
debug_log(f"Error checking message {i}: {str(e)}")
|
159
|
+
# Try to repair the message
|
160
|
+
messages[i] = {
|
161
|
+
'role': 'user',
|
162
|
+
'content': str(msg) if msg else ''
|
163
|
+
}
|
164
|
+
debug_log(f"Repaired message {i}")
|
165
|
+
|
166
|
+
debug_log(f"Messages validation complete: {len(messages)} total messages")
|
167
|
+
|
168
|
+
# Import time module within the worker function scope
|
169
|
+
import time
|
170
|
+
|
85
171
|
full_response = ""
|
86
172
|
buffer = []
|
87
173
|
last_update = time.time()
|
88
174
|
update_interval = 0.1 # Update UI every 100ms
|
89
175
|
|
90
176
|
try:
|
91
|
-
#
|
92
|
-
if
|
93
|
-
|
177
|
+
# Check that we have a valid client and model before proceeding
|
178
|
+
if client is None:
|
179
|
+
debug_log("Error: client is None, cannot proceed with streaming")
|
180
|
+
raise ValueError("Model client is None, cannot proceed with streaming")
|
181
|
+
|
182
|
+
# Check if the client has the required generate_stream method
|
183
|
+
if not hasattr(client, 'generate_stream'):
|
184
|
+
debug_log(f"Error: client {type(client).__name__} does not have generate_stream method")
|
185
|
+
raise ValueError(f"Client {type(client).__name__} does not support streaming")
|
186
|
+
|
187
|
+
# Set initial model loading state if using Ollama
|
188
|
+
# Always show the model loading indicator for Ollama until we confirm otherwise
|
189
|
+
is_ollama = 'ollama' in str(type(client)).lower()
|
190
|
+
debug_log(f"Is Ollama client: {is_ollama}")
|
191
|
+
|
192
|
+
if is_ollama and hasattr(app, 'query_one'):
|
94
193
|
try:
|
95
|
-
#
|
96
|
-
|
97
|
-
logger.info(
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
loading = app.query_one("#loading-indicator")
|
102
|
-
if model_loading:
|
103
|
-
loading.add_class("model-loading")
|
104
|
-
app.query_one("#loading-text").update("Loading Ollama model...")
|
105
|
-
else:
|
106
|
-
loading.remove_class("model-loading")
|
194
|
+
# Show model loading indicator by default for Ollama
|
195
|
+
debug_log("Showing initial model loading indicator for Ollama")
|
196
|
+
logger.info("Showing initial model loading indicator for Ollama")
|
197
|
+
loading = app.query_one("#loading-indicator")
|
198
|
+
loading.add_class("model-loading")
|
199
|
+
loading.update("⚙️ Loading Ollama model...")
|
107
200
|
except Exception as e:
|
108
|
-
|
201
|
+
debug_log(f"Error setting initial Ollama loading state: {str(e)}")
|
202
|
+
logger.error(f"Error setting initial Ollama loading state: {str(e)}")
|
203
|
+
|
204
|
+
# Now proceed with streaming
|
205
|
+
debug_log(f"Starting stream generation with messages length: {len(messages)}")
|
206
|
+
logger.info(f"Starting stream generation for model: {model}")
|
109
207
|
|
110
|
-
|
208
|
+
# Defensive approach - wrap the stream generation in a try-except
|
209
|
+
try:
|
210
|
+
debug_log("Calling client.generate_stream()")
|
211
|
+
stream_generator = client.generate_stream(messages, model, style)
|
212
|
+
debug_log("Successfully obtained stream generator")
|
213
|
+
except Exception as stream_init_error:
|
214
|
+
debug_log(f"Error initializing stream generator: {str(stream_init_error)}")
|
215
|
+
logger.error(f"Error initializing stream generator: {str(stream_init_error)}")
|
216
|
+
raise # Re-raise to be handled in the main catch block
|
111
217
|
|
112
|
-
#
|
113
|
-
if hasattr(client, 'is_loading_model') and client.is_loading_model():
|
114
|
-
logger.info("Model loading started during generation")
|
218
|
+
# After getting the generator, check if we're NOT in model loading state
|
219
|
+
if hasattr(client, 'is_loading_model') and not client.is_loading_model() and hasattr(app, 'query_one'):
|
115
220
|
try:
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
221
|
+
debug_log("Model is ready for generation, updating UI")
|
222
|
+
logger.info("Model is ready for generation, updating UI")
|
223
|
+
loading = app.query_one("#loading-indicator")
|
224
|
+
loading.remove_class("model-loading")
|
225
|
+
loading.update("▪▪▪ Generating response...")
|
120
226
|
except Exception as e:
|
121
|
-
|
227
|
+
debug_log(f"Error updating UI after stream init: {str(e)}")
|
228
|
+
logger.error(f"Error updating UI after stream init: {str(e)}")
|
122
229
|
|
123
|
-
#
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
230
|
+
# Process the stream with careful error handling
|
231
|
+
debug_log("Beginning to process stream chunks")
|
232
|
+
try:
|
233
|
+
async for chunk in stream_generator:
|
234
|
+
# Check for cancellation frequently
|
235
|
+
if asyncio.current_task().cancelled():
|
236
|
+
debug_log("Task cancellation detected during chunk processing")
|
237
|
+
logger.info("Task cancellation detected during chunk processing")
|
238
|
+
# Close the client stream if possible
|
239
|
+
if hasattr(client, 'cancel_stream'):
|
240
|
+
debug_log("Calling client.cancel_stream() due to task cancellation")
|
241
|
+
await client.cancel_stream()
|
242
|
+
raise asyncio.CancelledError()
|
243
|
+
|
244
|
+
# Check if model loading state changed, but more safely
|
245
|
+
if hasattr(client, 'is_loading_model'):
|
246
|
+
try:
|
247
|
+
# Get the model loading state
|
248
|
+
model_loading = client.is_loading_model()
|
249
|
+
debug_log(f"Model loading state: {model_loading}")
|
140
250
|
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
251
|
+
# Safely update the UI elements if they exist
|
252
|
+
if hasattr(app, 'query_one'):
|
253
|
+
try:
|
254
|
+
loading = app.query_one("#loading-indicator")
|
255
|
+
|
256
|
+
# Check for class existence first
|
257
|
+
if model_loading and hasattr(loading, 'has_class') and not loading.has_class("model-loading"):
|
258
|
+
# Model loading started
|
259
|
+
debug_log("Model loading started during streaming")
|
260
|
+
logger.info("Model loading started during streaming")
|
261
|
+
loading.add_class("model-loading")
|
262
|
+
loading.update("⚙️ Loading Ollama model...")
|
263
|
+
elif not model_loading and hasattr(loading, 'has_class') and loading.has_class("model-loading"):
|
264
|
+
# Model loading finished
|
265
|
+
debug_log("Model loading finished during streaming")
|
266
|
+
logger.info("Model loading finished during streaming")
|
267
|
+
loading.remove_class("model-loading")
|
268
|
+
loading.update("▪▪▪ Generating response...")
|
269
|
+
except Exception as ui_e:
|
270
|
+
debug_log(f"Error updating UI elements: {str(ui_e)}")
|
271
|
+
logger.error(f"Error updating UI elements: {str(ui_e)}")
|
272
|
+
except Exception as e:
|
273
|
+
debug_log(f"Error checking model loading state: {str(e)}")
|
274
|
+
logger.error(f"Error checking model loading state: {str(e)}")
|
153
275
|
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
276
|
+
# Process the chunk - with careful type handling
|
277
|
+
if chunk: # Only process non-empty chunks
|
278
|
+
# Ensure chunk is a string - critical fix for providers returning other types
|
279
|
+
if not isinstance(chunk, str):
|
280
|
+
debug_log(f"WARNING: Received non-string chunk of type: {type(chunk).__name__}")
|
281
|
+
try:
|
282
|
+
# Try to convert to string if possible
|
283
|
+
chunk = str(chunk)
|
284
|
+
debug_log(f"Successfully converted chunk to string, length: {len(chunk)}")
|
285
|
+
except Exception as e:
|
286
|
+
debug_log(f"Error converting chunk to string: {str(e)}")
|
287
|
+
# Skip this chunk since it can't be converted
|
288
|
+
continue
|
289
|
+
|
290
|
+
debug_log(f"Received chunk of length: {len(chunk)}")
|
291
|
+
buffer.append(chunk)
|
292
|
+
current_time = time.time()
|
166
293
|
|
167
|
-
#
|
168
|
-
|
294
|
+
# Update UI if enough time has passed or buffer is large
|
295
|
+
if current_time - last_update >= update_interval or len(''.join(buffer)) > 100:
|
296
|
+
new_content = ''.join(buffer)
|
297
|
+
full_response += new_content
|
298
|
+
# Send content to UI
|
299
|
+
debug_log(f"Updating UI with content length: {len(full_response)}")
|
300
|
+
await callback(full_response)
|
301
|
+
buffer = []
|
302
|
+
last_update = current_time
|
303
|
+
|
304
|
+
# Small delay to let UI catch up
|
305
|
+
await asyncio.sleep(0.05)
|
306
|
+
except asyncio.CancelledError:
|
307
|
+
debug_log("CancelledError in stream processing")
|
308
|
+
raise
|
309
|
+
except Exception as chunk_error:
|
310
|
+
debug_log(f"Error processing stream chunks: {str(chunk_error)}")
|
311
|
+
logger.error(f"Error processing stream chunks: {str(chunk_error)}")
|
312
|
+
raise
|
169
313
|
|
170
314
|
# Send any remaining content if the loop finished normally
|
171
315
|
if buffer:
|
172
316
|
new_content = ''.join(buffer)
|
173
317
|
full_response += new_content
|
318
|
+
debug_log(f"Sending final content, total length: {len(full_response)}")
|
174
319
|
await callback(full_response)
|
175
320
|
|
321
|
+
debug_log(f"Streaming response completed successfully. Response length: {len(full_response)}")
|
176
322
|
logger.info(f"Streaming response completed successfully. Response length: {len(full_response)}")
|
177
323
|
return full_response
|
178
324
|
|
179
325
|
except asyncio.CancelledError:
|
180
326
|
# This is expected when the user cancels via Escape
|
327
|
+
debug_log(f"Streaming response task cancelled. Partial response length: {len(full_response)}")
|
181
328
|
logger.info(f"Streaming response task cancelled. Partial response length: {len(full_response)}")
|
182
329
|
# Ensure the client stream is closed
|
183
330
|
if hasattr(client, 'cancel_stream'):
|
184
|
-
|
331
|
+
debug_log("Calling client.cancel_stream() after cancellation")
|
332
|
+
try:
|
333
|
+
await client.cancel_stream()
|
334
|
+
debug_log("Successfully cancelled client stream")
|
335
|
+
except Exception as cancel_err:
|
336
|
+
debug_log(f"Error cancelling client stream: {str(cancel_err)}")
|
185
337
|
# Return whatever was collected so far
|
186
338
|
return full_response
|
187
339
|
|
188
340
|
except Exception as e:
|
341
|
+
debug_log(f"Error during streaming response: {str(e)}")
|
189
342
|
logger.error(f"Error during streaming response: {str(e)}")
|
190
343
|
# Close the client stream if possible
|
191
344
|
if hasattr(client, 'cancel_stream'):
|
192
|
-
|
193
|
-
|
345
|
+
debug_log("Attempting to cancel client stream after error")
|
346
|
+
try:
|
347
|
+
await client.cancel_stream()
|
348
|
+
debug_log("Successfully cancelled client stream after error")
|
349
|
+
except Exception as cancel_err:
|
350
|
+
debug_log(f"Error cancelling client stream after error: {str(cancel_err)}")
|
351
|
+
# Re-raise the exception for the worker runner to handle
|
352
|
+
# The @work decorator might catch this depending on exit_on_error
|
194
353
|
raise
|
354
|
+
finally:
|
355
|
+
# Basic cleanup within the worker itself (optional, main cleanup in app)
|
356
|
+
debug_log("generate_streaming_response worker finished or errored.")
|
357
|
+
# Return the full response if successful, otherwise error is raised or cancellation occurred
|
358
|
+
# Note: If cancelled, CancelledError is raised, and @work might handle it.
|
359
|
+
# If successful, return the response.
|
360
|
+
# If error, exception is raised.
|
361
|
+
# Let's explicitly return the response on success.
|
362
|
+
# If cancelled or error, this return might not be reached.
|
363
|
+
if 'full_response' in locals():
|
364
|
+
return full_response
|
365
|
+
return None # Indicate completion without full response (e.g., error before loop)
|
195
366
|
|
196
|
-
def ensure_ollama_running() -> bool:
|
367
|
+
async def ensure_ollama_running() -> bool:
|
197
368
|
"""
|
198
369
|
Check if Ollama is running and try to start it if not.
|
199
370
|
Returns True if Ollama is running after check/start attempt.
|
@@ -220,8 +391,7 @@ def ensure_ollama_running() -> bool:
|
|
220
391
|
)
|
221
392
|
|
222
393
|
# Wait a moment for it to start
|
223
|
-
|
224
|
-
time.sleep(2)
|
394
|
+
await asyncio.sleep(2) # Use asyncio.sleep instead of time.sleep
|
225
395
|
|
226
396
|
# Check if process is still running
|
227
397
|
if process.poll() is None:
|
@@ -254,3 +424,107 @@ def save_settings_to_config(model: str, style: str) -> None:
|
|
254
424
|
CONFIG["default_model"] = model
|
255
425
|
CONFIG["default_style"] = style
|
256
426
|
save_config(CONFIG)
|
427
|
+
|
428
|
+
def resolve_model_id(model_id_or_name: str) -> str:
|
429
|
+
"""
|
430
|
+
Resolves a potentially short model ID or display name to the full model ID
|
431
|
+
stored in the configuration. Tries multiple matching strategies.
|
432
|
+
"""
|
433
|
+
if not model_id_or_name:
|
434
|
+
logger.warning("resolve_model_id called with empty input, returning empty string.")
|
435
|
+
return ""
|
436
|
+
|
437
|
+
input_lower = model_id_or_name.lower().strip()
|
438
|
+
logger.info(f"Attempting to resolve model identifier: '{input_lower}'")
|
439
|
+
|
440
|
+
available_models = CONFIG.get("available_models", {})
|
441
|
+
if not available_models:
|
442
|
+
logger.warning("No available_models found in CONFIG to resolve against.")
|
443
|
+
return model_id_or_name # Return original if no models to check
|
444
|
+
|
445
|
+
# 1. Check if the input is already a valid full ID (must contain a date suffix)
|
446
|
+
# Full Claude IDs should have format like "claude-3-opus-20240229" with a date suffix
|
447
|
+
for full_id in available_models:
|
448
|
+
if full_id.lower() == input_lower:
|
449
|
+
# Only consider it a full ID if it contains a date suffix (like -20240229)
|
450
|
+
if "-202" in full_id: # Check for date suffix
|
451
|
+
logger.info(f"Input '{model_id_or_name}' is already a full ID with date suffix: '{full_id}'.")
|
452
|
+
return full_id # Return the canonical full_id
|
453
|
+
else:
|
454
|
+
logger.warning(f"Input '{model_id_or_name}' matches a model ID but lacks date suffix.")
|
455
|
+
# Continue searching for a better match with date suffix
|
456
|
+
|
457
|
+
logger.debug(f"Input '{input_lower}' is not a direct full ID match. Checking other criteria...")
|
458
|
+
logger.debug(f"Available models for matching: {list(available_models.keys())}")
|
459
|
+
|
460
|
+
best_match = None
|
461
|
+
match_type = "None"
|
462
|
+
|
463
|
+
# 2. Iterate through available models for other matches
|
464
|
+
for full_id, model_info in available_models.items():
|
465
|
+
full_id_lower = full_id.lower()
|
466
|
+
display_name = model_info.get("display_name", "")
|
467
|
+
display_name_lower = display_name.lower()
|
468
|
+
|
469
|
+
logger.debug(f"Comparing '{input_lower}' against '{full_id_lower}' (Display: '{display_name}')")
|
470
|
+
|
471
|
+
# 2a. Exact match on display name (case-insensitive)
|
472
|
+
if display_name_lower == input_lower:
|
473
|
+
logger.info(f"Resolved '{model_id_or_name}' to '{full_id}' via exact display name match.")
|
474
|
+
return full_id # Exact display name match is high confidence
|
475
|
+
|
476
|
+
# 2b. Check if input is a known short alias (handle common cases explicitly)
|
477
|
+
# Special case for Claude 3.7 Sonnet which seems to be causing issues
|
478
|
+
if input_lower == "claude-3.7-sonnet":
|
479
|
+
# Hardcoded resolution for this specific model
|
480
|
+
claude_37_id = "claude-3-7-sonnet-20250219"
|
481
|
+
logger.warning(f"Special case: Directly mapping '{input_lower}' to '{claude_37_id}'")
|
482
|
+
# Check if this ID exists in available models
|
483
|
+
for model_id in available_models:
|
484
|
+
if model_id.lower() == claude_37_id.lower():
|
485
|
+
logger.info(f"Found exact match for hardcoded ID: {model_id}")
|
486
|
+
return model_id
|
487
|
+
# If not found in available models, return the hardcoded ID anyway
|
488
|
+
logger.warning(f"Hardcoded ID '{claude_37_id}' not found in available models, returning it anyway")
|
489
|
+
return claude_37_id
|
490
|
+
|
491
|
+
# Map common short names to their expected full ID prefixes
|
492
|
+
short_aliases = {
|
493
|
+
"claude-3-opus": "claude-3-opus-",
|
494
|
+
"claude-3-sonnet": "claude-3-sonnet-",
|
495
|
+
"claude-3-haiku": "claude-3-haiku-",
|
496
|
+
"claude-3.5-sonnet": "claude-3-5-sonnet-", # Note the dot vs hyphen
|
497
|
+
"claude-3.7-sonnet": "claude-3-7-sonnet-" # Added this specific case
|
498
|
+
}
|
499
|
+
if input_lower in short_aliases and full_id_lower.startswith(short_aliases[input_lower]):
|
500
|
+
logger.info(f"Resolved '{model_id_or_name}' to '{full_id}' via known short alias match.")
|
501
|
+
# This is also high confidence
|
502
|
+
return full_id
|
503
|
+
|
504
|
+
# 2c. Check if input is a prefix of the full ID (more general, lower confidence)
|
505
|
+
if full_id_lower.startswith(input_lower):
|
506
|
+
logger.debug(f"Potential prefix match: '{input_lower}' vs '{full_id_lower}'")
|
507
|
+
# Don't return immediately, might find a better match (e.g., display name or alias)
|
508
|
+
if best_match is None: # Only take prefix if no other match found yet
|
509
|
+
best_match = full_id
|
510
|
+
match_type = "Prefix"
|
511
|
+
logger.debug(f"Setting best_match to '{full_id}' based on prefix.")
|
512
|
+
|
513
|
+
# 2d. Check derived short name from display name (less reliable, keep as lower priority)
|
514
|
+
# Normalize display name: lower, replace space and dot with hyphen
|
515
|
+
derived_short_name = display_name_lower.replace(" ", "-").replace(".", "-")
|
516
|
+
if derived_short_name == input_lower:
|
517
|
+
logger.debug(f"Potential derived short name match: '{input_lower}' vs derived '{derived_short_name}' from '{display_name}'")
|
518
|
+
# Prioritize this over a simple prefix match if found
|
519
|
+
if best_match is None or match_type == "Prefix":
|
520
|
+
best_match = full_id
|
521
|
+
match_type = "Derived Short Name"
|
522
|
+
logger.debug(f"Updating best_match to '{full_id}' based on derived name.")
|
523
|
+
|
524
|
+
# 3. Return best match found or original input
|
525
|
+
if best_match:
|
526
|
+
logger.info(f"Returning best match found for '{model_id_or_name}': '{best_match}' (Type: {match_type})")
|
527
|
+
return best_match
|
528
|
+
else:
|
529
|
+
logger.warning(f"Could not resolve model ID or name '{model_id_or_name}' to any known full ID. Returning original.")
|
530
|
+
return model_id_or_name
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.99
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
@@ -0,0 +1,24 @@
|
|
1
|
+
app/__init__.py,sha256=sj_ZaaiYluWSCqDTjASHuPv8IDldwoemQfimWN2okt8,131
|
2
|
+
app/config.py,sha256=KawltE7cK2bR9wbe1NSlepwWIjkiFw2bg3vbLmUnP38,7626
|
3
|
+
app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
|
4
|
+
app/main.py,sha256=RmVCecgpAvRu6mzX2bu5kXy_wyDdjGpuGYbTb33vM_8,70711
|
5
|
+
app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
|
6
|
+
app/utils.py,sha256=5AbHvQpiMCDNyVgbjUwNJmrZsx6DpQ9hxm_CsKWjPoI,27541
|
7
|
+
app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
|
8
|
+
app/api/anthropic.py,sha256=q3TeniuiYDw5AWK1isESmtWvN1HnQowcDlkFm0lp5wE,12317
|
9
|
+
app/api/base.py,sha256=e4SdUFmpeZPK3nNyvWnPOGQaiV1v5gwL1QMq445Qzoo,5743
|
10
|
+
app/api/ollama.py,sha256=Yg2K3iqZvlmHhINISSWBQezP3HOzBHvoIIH0TdiKpds,60938
|
11
|
+
app/api/openai.py,sha256=TsxbWOGTdiAa-swMBN3VBkKKkc7nucyMQAhj6fNANV8,6074
|
12
|
+
app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
|
13
|
+
app/ui/chat_interface.py,sha256=fzc6-_12zf1yflSJi7pX5zZaBy5Ar9APfqYISVMLrg4,15971
|
14
|
+
app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
|
15
|
+
app/ui/model_browser.py,sha256=pdblLVkdyVF0_Bo02bqbErGAtieyH-y6IfhMOPEqIso,71124
|
16
|
+
app/ui/model_selector.py,sha256=eqwJamLddgt4fS0pJbCyCBe-_shqESm3gM8vJTOWDAs,16956
|
17
|
+
app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
|
18
|
+
app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
|
19
|
+
chat_console-0.2.99.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
|
20
|
+
chat_console-0.2.99.dist-info/METADATA,sha256=ybXgjn-sJk32u9DaSkrSikyGnC8gNaCEO-GaRCkpTSY,2922
|
21
|
+
chat_console-0.2.99.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
22
|
+
chat_console-0.2.99.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
|
23
|
+
chat_console-0.2.99.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
|
24
|
+
chat_console-0.2.99.dist-info/RECORD,,
|
@@ -1,24 +0,0 @@
|
|
1
|
-
app/__init__.py,sha256=g2BzewDN5X96Dl5Zzw8uag1TBEdPIU1ceTm7u-BJrjM,130
|
2
|
-
app/config.py,sha256=sKNp6Za4ZfW-CZBOvEv0TncAS77AnKi86hTM51C4KQ4,5227
|
3
|
-
app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
|
4
|
-
app/main.py,sha256=k726xRBcuPgbUsUg4s-REhtaljccjDLNzA_C-fPkQk4,48866
|
5
|
-
app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
|
6
|
-
app/utils.py,sha256=IyINMrM6oGXtN5HRPuKoFEyfKg0fR4FVXIi_0e2KxI0,11798
|
7
|
-
app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
|
8
|
-
app/api/anthropic.py,sha256=x5PmBXEKe_ow2NWk8XdqSPR0hLOdCc_ypY5QAySeA78,4234
|
9
|
-
app/api/base.py,sha256=-6RSxSpqe-OMwkaq1wVWbu3pVkte-ZYy8rmdvt-Qh48,3953
|
10
|
-
app/api/ollama.py,sha256=FTIlgZmvpZd6K4HL2nUD19-p9Xb1TA859LfnCgewpcU,51354
|
11
|
-
app/api/openai.py,sha256=1fYgFXXL6yj_7lQ893Yj28RYG4M8d6gt_q1gzhhjcig,3641
|
12
|
-
app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
|
13
|
-
app/ui/chat_interface.py,sha256=R8tdy72TcT7veemUzcJOjbPY32WizBdNHgfmq69EFfA,14275
|
14
|
-
app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
|
15
|
-
app/ui/model_browser.py,sha256=5h3gVsuGIUrXjYVF-QclZFhYtX2kH14LvT22Ufm9etg,49453
|
16
|
-
app/ui/model_selector.py,sha256=Aj1irAs9DQMn8wfcPsFZGxWmx0JTzHjSe7pVdDMwqTQ,13182
|
17
|
-
app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
|
18
|
-
app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
|
19
|
-
chat_console-0.2.9.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
|
20
|
-
chat_console-0.2.9.dist-info/METADATA,sha256=zTSJePqMsi0n6fEz8s4gtLwHe_726-ijfTjPwH_Mumw,2921
|
21
|
-
chat_console-0.2.9.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
22
|
-
chat_console-0.2.9.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
|
23
|
-
chat_console-0.2.9.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
|
24
|
-
chat_console-0.2.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|