chat-console 0.2.99__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/utils.py CHANGED
@@ -116,82 +116,68 @@ async def generate_conversation_title(message: str, model: str, client: Any) ->
116
116
  return f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
117
117
 
118
118
  # Make this the worker function directly
119
- @work(exit_on_error=True)
120
119
  async def generate_streaming_response(
121
120
  app: 'SimpleChatApp',
122
121
  messages: List[Dict],
123
122
  model: str,
124
123
  style: str,
125
124
  client: Any,
126
- callback: Callable[[str], Awaitable[None]] # More specific type hint for callback
127
- ) -> Optional[str]: # Return Optional[str] as cancellation might return None implicitly or error
128
- """Generate a streaming response from the model (as a Textual worker)"""
129
- # Import debug_log function from main
130
- # Note: This import might be slightly less reliable inside a worker, but let's try
125
+ callback: Callable[[str], Awaitable[None]]
126
+ ) -> Optional[str]:
127
+ """
128
+ Generate a streaming response from the model (as a Textual worker).
129
+ Refactored to be a coroutine, not an async generator.
130
+ """
131
131
  try:
132
132
  from app.main import debug_log
133
133
  except ImportError:
134
- debug_log = lambda msg: None # Fallback
135
-
136
- # Worker function needs to handle its own state and cleanup partially
137
- # The main app will also need cleanup logic in generate_response
134
+ debug_log = lambda msg: None
138
135
 
139
136
  logger.info(f"Starting streaming response with model: {model}")
140
137
  debug_log(f"Starting streaming response with model: '{model}', client type: {type(client).__name__}")
141
-
142
- # Very defensive check of messages format
138
+
143
139
  if not messages:
144
140
  debug_log("Error: messages list is empty")
145
141
  raise ValueError("Messages list cannot be empty")
146
-
142
+
147
143
  for i, msg in enumerate(messages):
148
144
  try:
149
145
  debug_log(f"Message {i}: role={msg.get('role', 'missing')}, content_len={len(msg.get('content', ''))}")
150
- # Ensure essential fields exist
151
146
  if 'role' not in msg:
152
147
  debug_log(f"Adding missing 'role' to message {i}")
153
- msg['role'] = 'user' # Default to user
148
+ msg['role'] = 'user'
154
149
  if 'content' not in msg:
155
150
  debug_log(f"Adding missing 'content' to message {i}")
156
- msg['content'] = '' # Default to empty string
151
+ msg['content'] = ''
157
152
  except Exception as e:
158
153
  debug_log(f"Error checking message {i}: {str(e)}")
159
- # Try to repair the message
160
154
  messages[i] = {
161
155
  'role': 'user',
162
156
  'content': str(msg) if msg else ''
163
157
  }
164
158
  debug_log(f"Repaired message {i}")
165
-
166
- debug_log(f"Messages validation complete: {len(messages)} total messages")
167
-
168
- # Import time module within the worker function scope
159
+
169
160
  import time
170
-
161
+
171
162
  full_response = ""
172
163
  buffer = []
173
164
  last_update = time.time()
174
- update_interval = 0.1 # Update UI every 100ms
175
-
165
+ update_interval = 0.05 # Reduced interval for more frequent updates
166
+
176
167
  try:
177
- # Check that we have a valid client and model before proceeding
178
168
  if client is None:
179
169
  debug_log("Error: client is None, cannot proceed with streaming")
180
170
  raise ValueError("Model client is None, cannot proceed with streaming")
181
-
182
- # Check if the client has the required generate_stream method
171
+
183
172
  if not hasattr(client, 'generate_stream'):
184
173
  debug_log(f"Error: client {type(client).__name__} does not have generate_stream method")
185
174
  raise ValueError(f"Client {type(client).__name__} does not support streaming")
186
-
187
- # Set initial model loading state if using Ollama
188
- # Always show the model loading indicator for Ollama until we confirm otherwise
175
+
189
176
  is_ollama = 'ollama' in str(type(client)).lower()
190
177
  debug_log(f"Is Ollama client: {is_ollama}")
191
-
178
+
192
179
  if is_ollama and hasattr(app, 'query_one'):
193
180
  try:
194
- # Show model loading indicator by default for Ollama
195
181
  debug_log("Showing initial model loading indicator for Ollama")
196
182
  logger.info("Showing initial model loading indicator for Ollama")
197
183
  loading = app.query_one("#loading-indicator")
@@ -200,12 +186,10 @@ async def generate_streaming_response(
200
186
  except Exception as e:
201
187
  debug_log(f"Error setting initial Ollama loading state: {str(e)}")
202
188
  logger.error(f"Error setting initial Ollama loading state: {str(e)}")
203
-
204
- # Now proceed with streaming
189
+
205
190
  debug_log(f"Starting stream generation with messages length: {len(messages)}")
206
191
  logger.info(f"Starting stream generation for model: {model}")
207
-
208
- # Defensive approach - wrap the stream generation in a try-except
192
+
209
193
  try:
210
194
  debug_log("Calling client.generate_stream()")
211
195
  stream_generator = client.generate_stream(messages, model, style)
@@ -213,9 +197,8 @@ async def generate_streaming_response(
213
197
  except Exception as stream_init_error:
214
198
  debug_log(f"Error initializing stream generator: {str(stream_init_error)}")
215
199
  logger.error(f"Error initializing stream generator: {str(stream_init_error)}")
216
- raise # Re-raise to be handled in the main catch block
217
-
218
- # After getting the generator, check if we're NOT in model loading state
200
+ raise
201
+
219
202
  if hasattr(client, 'is_loading_model') and not client.is_loading_model() and hasattr(app, 'query_one'):
220
203
  try:
221
204
  debug_log("Model is ready for generation, updating UI")
@@ -226,42 +209,31 @@ async def generate_streaming_response(
226
209
  except Exception as e:
227
210
  debug_log(f"Error updating UI after stream init: {str(e)}")
228
211
  logger.error(f"Error updating UI after stream init: {str(e)}")
229
-
230
- # Process the stream with careful error handling
212
+
231
213
  debug_log("Beginning to process stream chunks")
232
214
  try:
233
215
  async for chunk in stream_generator:
234
- # Check for cancellation frequently
235
216
  if asyncio.current_task().cancelled():
236
217
  debug_log("Task cancellation detected during chunk processing")
237
218
  logger.info("Task cancellation detected during chunk processing")
238
- # Close the client stream if possible
239
219
  if hasattr(client, 'cancel_stream'):
240
220
  debug_log("Calling client.cancel_stream() due to task cancellation")
241
221
  await client.cancel_stream()
242
222
  raise asyncio.CancelledError()
243
-
244
- # Check if model loading state changed, but more safely
223
+
245
224
  if hasattr(client, 'is_loading_model'):
246
225
  try:
247
- # Get the model loading state
248
226
  model_loading = client.is_loading_model()
249
227
  debug_log(f"Model loading state: {model_loading}")
250
-
251
- # Safely update the UI elements if they exist
252
228
  if hasattr(app, 'query_one'):
253
229
  try:
254
230
  loading = app.query_one("#loading-indicator")
255
-
256
- # Check for class existence first
257
231
  if model_loading and hasattr(loading, 'has_class') and not loading.has_class("model-loading"):
258
- # Model loading started
259
232
  debug_log("Model loading started during streaming")
260
233
  logger.info("Model loading started during streaming")
261
234
  loading.add_class("model-loading")
262
235
  loading.update("⚙️ Loading Ollama model...")
263
236
  elif not model_loading and hasattr(loading, 'has_class') and loading.has_class("model-loading"):
264
- # Model loading finished
265
237
  debug_log("Model loading finished during streaming")
266
238
  logger.info("Model loading finished during streaming")
267
239
  loading.remove_class("model-loading")
@@ -272,37 +244,51 @@ async def generate_streaming_response(
272
244
  except Exception as e:
273
245
  debug_log(f"Error checking model loading state: {str(e)}")
274
246
  logger.error(f"Error checking model loading state: {str(e)}")
275
-
276
- # Process the chunk - with careful type handling
277
- if chunk: # Only process non-empty chunks
278
- # Ensure chunk is a string - critical fix for providers returning other types
247
+
248
+ if chunk:
279
249
  if not isinstance(chunk, str):
280
250
  debug_log(f"WARNING: Received non-string chunk of type: {type(chunk).__name__}")
281
251
  try:
282
- # Try to convert to string if possible
283
252
  chunk = str(chunk)
284
253
  debug_log(f"Successfully converted chunk to string, length: {len(chunk)}")
285
254
  except Exception as e:
286
255
  debug_log(f"Error converting chunk to string: {str(e)}")
287
- # Skip this chunk since it can't be converted
288
256
  continue
289
-
257
+
290
258
  debug_log(f"Received chunk of length: {len(chunk)}")
291
259
  buffer.append(chunk)
292
260
  current_time = time.time()
293
-
294
- # Update UI if enough time has passed or buffer is large
295
- if current_time - last_update >= update_interval or len(''.join(buffer)) > 100:
261
+
262
+ # Always update immediately for the first few chunks
263
+ if (current_time - last_update >= update_interval or
264
+ len(''.join(buffer)) > 5 or # Reduced buffer size threshold
265
+ len(full_response) < 50): # More aggressive updates for early content
266
+
296
267
  new_content = ''.join(buffer)
297
268
  full_response += new_content
298
- # Send content to UI
299
269
  debug_log(f"Updating UI with content length: {len(full_response)}")
300
- await callback(full_response)
270
+
271
+ # Print to console for debugging
272
+ print(f"Streaming update: +{len(new_content)} chars, total: {len(full_response)}")
273
+
274
+ try:
275
+ # Call the UI callback with the full response so far
276
+ await callback(full_response)
277
+ debug_log("UI callback completed successfully")
278
+
279
+ # Force app refresh after each update
280
+ if hasattr(app, 'refresh'):
281
+ app.refresh(layout=True) # Force layout refresh for all models
282
+ except Exception as callback_err:
283
+ debug_log(f"Error in UI callback: {str(callback_err)}")
284
+ logger.error(f"Error in UI callback: {str(callback_err)}")
285
+ print(f"Error updating UI: {str(callback_err)}")
286
+
301
287
  buffer = []
302
288
  last_update = current_time
303
289
 
304
- # Small delay to let UI catch up
305
- await asyncio.sleep(0.05)
290
+ # Shorter sleep between updates for more responsive streaming
291
+ await asyncio.sleep(0.02)
306
292
  except asyncio.CancelledError:
307
293
  debug_log("CancelledError in stream processing")
308
294
  raise
@@ -311,22 +297,55 @@ async def generate_streaming_response(
311
297
  logger.error(f"Error processing stream chunks: {str(chunk_error)}")
312
298
  raise
313
299
 
314
- # Send any remaining content if the loop finished normally
315
300
  if buffer:
316
301
  new_content = ''.join(buffer)
317
302
  full_response += new_content
318
303
  debug_log(f"Sending final content, total length: {len(full_response)}")
304
+ try:
305
+ await callback(full_response)
306
+ debug_log("Final UI callback completed successfully")
307
+
308
+ debug_log("Forcing final UI refresh sequence for all models")
309
+ try:
310
+ if hasattr(app, 'refresh'):
311
+ app.refresh(layout=False)
312
+ await asyncio.sleep(0.02)
313
+ try:
314
+ messages_container = app.query_one("#messages-container")
315
+ if messages_container and hasattr(messages_container, 'scroll_end'):
316
+ messages_container.scroll_end(animate=False)
317
+ except Exception:
318
+ pass
319
+ app.refresh(layout=True)
320
+ await asyncio.sleep(0.02)
321
+ try:
322
+ messages_container = app.query_one("#messages-container")
323
+ if messages_container and hasattr(messages_container, 'scroll_end'):
324
+ messages_container.scroll_end(animate=False)
325
+ except Exception:
326
+ pass
327
+ except Exception as refresh_err:
328
+ debug_log(f"Error forcing final UI refresh: {str(refresh_err)}")
329
+ except Exception as callback_err:
330
+ debug_log(f"Error in final UI callback: {str(callback_err)}")
331
+ logger.error(f"Error in final UI callback: {str(callback_err)}")
332
+
333
+ try:
334
+ await asyncio.sleep(0.05)
335
+ debug_log("Sending one final callback to ensure UI refresh")
319
336
  await callback(full_response)
337
+ if hasattr(app, 'refresh'):
338
+ app.refresh(layout=True)
339
+ except Exception as final_err:
340
+ debug_log(f"Error in final extra callback: {str(final_err)}")
320
341
 
321
342
  debug_log(f"Streaming response completed successfully. Response length: {len(full_response)}")
322
343
  logger.info(f"Streaming response completed successfully. Response length: {len(full_response)}")
323
344
  return full_response
324
-
345
+
325
346
  except asyncio.CancelledError:
326
- # This is expected when the user cancels via Escape
327
347
  debug_log(f"Streaming response task cancelled. Partial response length: {len(full_response)}")
328
348
  logger.info(f"Streaming response task cancelled. Partial response length: {len(full_response)}")
329
- # Ensure the client stream is closed
330
349
  if hasattr(client, 'cancel_stream'):
331
350
  debug_log("Calling client.cancel_stream() after cancellation")
332
351
  try:
@@ -334,13 +353,11 @@ async def generate_streaming_response(
334
353
  debug_log("Successfully cancelled client stream")
335
354
  except Exception as cancel_err:
336
355
  debug_log(f"Error cancelling client stream: {str(cancel_err)}")
337
- # Return whatever was collected so far
338
356
  return full_response
339
-
357
+
340
358
  except Exception as e:
341
359
  debug_log(f"Error during streaming response: {str(e)}")
342
360
  logger.error(f"Error during streaming response: {str(e)}")
343
- # Close the client stream if possible
344
361
  if hasattr(client, 'cancel_stream'):
345
362
  debug_log("Attempting to cancel client stream after error")
346
363
  try:
@@ -348,21 +365,13 @@ async def generate_streaming_response(
348
365
  debug_log("Successfully cancelled client stream after error")
349
366
  except Exception as cancel_err:
350
367
  debug_log(f"Error cancelling client stream after error: {str(cancel_err)}")
351
- # Re-raise the exception for the worker runner to handle
352
- # The @work decorator might catch this depending on exit_on_error
353
368
  raise
369
+
354
370
  finally:
355
- # Basic cleanup within the worker itself (optional, main cleanup in app)
356
371
  debug_log("generate_streaming_response worker finished or errored.")
357
- # Return the full response if successful, otherwise error is raised or cancellation occurred
358
- # Note: If cancelled, CancelledError is raised, and @work might handle it.
359
- # If successful, return the response.
360
- # If error, exception is raised.
361
- # Let's explicitly return the response on success.
362
- # If cancelled or error, this return might not be reached.
363
372
  if 'full_response' in locals():
364
- return full_response
365
- return None # Indicate completion without full response (e.g., error before loop)
373
+ return full_response
374
+ return None
366
375
 
367
376
  async def ensure_ollama_running() -> bool:
368
377
  """
@@ -429,6 +438,8 @@ def resolve_model_id(model_id_or_name: str) -> str:
429
438
  """
430
439
  Resolves a potentially short model ID or display name to the full model ID
431
440
  stored in the configuration. Tries multiple matching strategies.
441
+
442
+ Fix: Only apply dot-to-colon conversion for Ollama models, not for OpenAI/Anthropic/custom.
432
443
  """
433
444
  if not model_id_or_name:
434
445
  logger.warning("resolve_model_id called with empty input, returning empty string.")
@@ -442,7 +453,35 @@ def resolve_model_id(model_id_or_name: str) -> str:
442
453
  logger.warning("No available_models found in CONFIG to resolve against.")
443
454
  return model_id_or_name # Return original if no models to check
444
455
 
445
- # 1. Check if the input is already a valid full ID (must contain a date suffix)
456
+ # Determine provider if possible
457
+ provider = None
458
+ if input_lower in available_models:
459
+ provider = available_models[input_lower].get("provider")
460
+ else:
461
+ # Try to find by display name
462
+ for model_info in available_models.values():
463
+ if model_info.get("display_name", "").lower() == input_lower:
464
+ provider = model_info.get("provider")
465
+ break
466
+
467
+ # Special case for Ollama models with version format (model:version)
468
+ if provider == "ollama" and ":" in input_lower and not input_lower.startswith("claude-"):
469
+ logger.info(f"Input '{input_lower}' appears to be an Ollama model with version, returning as-is")
470
+ return model_id_or_name
471
+
472
+ # Only apply dot-to-colon for Ollama models
473
+ if provider == "ollama" and "." in input_lower and not input_lower.startswith("claude-"):
474
+ logger.info(f"Input '{input_lower}' appears to be an Ollama model with dot notation")
475
+ if ":" not in input_lower:
476
+ parts = input_lower.split(".")
477
+ if len(parts) == 2:
478
+ base_model, version = parts
479
+ ollama_format = f"{base_model}:{version}"
480
+ logger.info(f"Converting '{input_lower}' to Ollama format: '{ollama_format}'")
481
+ return ollama_format
482
+ return model_id_or_name
483
+
484
+ # 2. Check if the input is already a valid full ID (must contain a date suffix)
446
485
  # Full Claude IDs should have format like "claude-3-opus-20240229" with a date suffix
447
486
  for full_id in available_models:
448
487
  if full_id.lower() == input_lower:
@@ -460,7 +499,7 @@ def resolve_model_id(model_id_or_name: str) -> str:
460
499
  best_match = None
461
500
  match_type = "None"
462
501
 
463
- # 2. Iterate through available models for other matches
502
+ # 3. Iterate through available models for other matches
464
503
  for full_id, model_info in available_models.items():
465
504
  full_id_lower = full_id.lower()
466
505
  display_name = model_info.get("display_name", "")
@@ -468,12 +507,12 @@ def resolve_model_id(model_id_or_name: str) -> str:
468
507
 
469
508
  logger.debug(f"Comparing '{input_lower}' against '{full_id_lower}' (Display: '{display_name}')")
470
509
 
471
- # 2a. Exact match on display name (case-insensitive)
510
+ # 3a. Exact match on display name (case-insensitive)
472
511
  if display_name_lower == input_lower:
473
512
  logger.info(f"Resolved '{model_id_or_name}' to '{full_id}' via exact display name match.")
474
513
  return full_id # Exact display name match is high confidence
475
514
 
476
- # 2b. Check if input is a known short alias (handle common cases explicitly)
515
+ # 3b. Check if input is a known short alias (handle common cases explicitly)
477
516
  # Special case for Claude 3.7 Sonnet which seems to be causing issues
478
517
  if input_lower == "claude-3.7-sonnet":
479
518
  # Hardcoded resolution for this specific model
@@ -501,7 +540,7 @@ def resolve_model_id(model_id_or_name: str) -> str:
501
540
  # This is also high confidence
502
541
  return full_id
503
542
 
504
- # 2c. Check if input is a prefix of the full ID (more general, lower confidence)
543
+ # 3c. Check if input is a prefix of the full ID (more general, lower confidence)
505
544
  if full_id_lower.startswith(input_lower):
506
545
  logger.debug(f"Potential prefix match: '{input_lower}' vs '{full_id_lower}'")
507
546
  # Don't return immediately, might find a better match (e.g., display name or alias)
@@ -510,7 +549,7 @@ def resolve_model_id(model_id_or_name: str) -> str:
510
549
  match_type = "Prefix"
511
550
  logger.debug(f"Setting best_match to '{full_id}' based on prefix.")
512
551
 
513
- # 2d. Check derived short name from display name (less reliable, keep as lower priority)
552
+ # 3d. Check derived short name from display name (less reliable, keep as lower priority)
514
553
  # Normalize display name: lower, replace space and dot with hyphen
515
554
  derived_short_name = display_name_lower.replace(" ", "-").replace(".", "-")
516
555
  if derived_short_name == input_lower:
@@ -521,7 +560,7 @@ def resolve_model_id(model_id_or_name: str) -> str:
521
560
  match_type = "Derived Short Name"
522
561
  logger.debug(f"Updating best_match to '{full_id}' based on derived name.")
523
562
 
524
- # 3. Return best match found or original input
563
+ # 4. Return best match found or original input
525
564
  if best_match:
526
565
  logger.info(f"Returning best match found for '{model_id_or_name}': '{best_match}' (Type: {match_type})")
527
566
  return best_match
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chat-console
3
- Version: 0.2.99
3
+ Version: 0.3.4
4
4
  Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
5
5
  Home-page: https://github.com/wazacraftrfid/chat-console
6
6
  Author: Johnathan Greenaway
@@ -0,0 +1,24 @@
1
+ app/__init__.py,sha256=LkWSI2xSwkZ6_nVnreshrwqa32Mt9MUFijeJIusFt5I,130
2
+ app/config.py,sha256=KawltE7cK2bR9wbe1NSlepwWIjkiFw2bg3vbLmUnP38,7626
3
+ app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
4
+ app/main.py,sha256=FpufDX9CdSIvLU_sCVaRs0jqr8Lz56E_qCk1goHqNvI,73653
5
+ app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
6
+ app/utils.py,sha256=QBKL6TXS93jooHhZiWCRRzN0ACqQNERBE1hUq0K-S0c,28634
7
+ app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
8
+ app/api/anthropic.py,sha256=UpIP3CgAOUimdVyif41MhBOCAgOyFO8mX9SFQMKRAmc,12483
9
+ app/api/base.py,sha256=bqBT4jne_W6Cvj_GoWWclV4Uk95fQvt-kkYqqZFJd8M,5769
10
+ app/api/ollama.py,sha256=EBEEKXbgAYWEg_zF5PO_UKO5l_aoU3J_7tfCj9e-fqs,61699
11
+ app/api/openai.py,sha256=6ORruzuuZtIjME3WK-g7kXf7cBmM4td5Njv9JLaWh7E,9557
12
+ app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
13
+ app/ui/chat_interface.py,sha256=IwNFirHEK55ZHscmMWyC4OhCfD9gSqd4FRK-1RLefhw,17393
14
+ app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
15
+ app/ui/model_browser.py,sha256=pdblLVkdyVF0_Bo02bqbErGAtieyH-y6IfhMOPEqIso,71124
16
+ app/ui/model_selector.py,sha256=ue3rbZfjVsjli-rJN5mfSqq23Ci7NshmTb4xWS-uG5k,18685
17
+ app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
18
+ app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
19
+ chat_console-0.3.4.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
20
+ chat_console-0.3.4.dist-info/METADATA,sha256=GdZ4D-htpXwUx8w_eM9_z9PQgUIDjrQk4Q3igAXuff0,2921
21
+ chat_console-0.3.4.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
22
+ chat_console-0.3.4.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
23
+ chat_console-0.3.4.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
24
+ chat_console-0.3.4.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.0)
2
+ Generator: setuptools (79.0.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,24 +0,0 @@
1
- app/__init__.py,sha256=sj_ZaaiYluWSCqDTjASHuPv8IDldwoemQfimWN2okt8,131
2
- app/config.py,sha256=KawltE7cK2bR9wbe1NSlepwWIjkiFw2bg3vbLmUnP38,7626
3
- app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
4
- app/main.py,sha256=RmVCecgpAvRu6mzX2bu5kXy_wyDdjGpuGYbTb33vM_8,70711
5
- app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
6
- app/utils.py,sha256=5AbHvQpiMCDNyVgbjUwNJmrZsx6DpQ9hxm_CsKWjPoI,27541
7
- app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
8
- app/api/anthropic.py,sha256=q3TeniuiYDw5AWK1isESmtWvN1HnQowcDlkFm0lp5wE,12317
9
- app/api/base.py,sha256=e4SdUFmpeZPK3nNyvWnPOGQaiV1v5gwL1QMq445Qzoo,5743
10
- app/api/ollama.py,sha256=Yg2K3iqZvlmHhINISSWBQezP3HOzBHvoIIH0TdiKpds,60938
11
- app/api/openai.py,sha256=TsxbWOGTdiAa-swMBN3VBkKKkc7nucyMQAhj6fNANV8,6074
12
- app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
13
- app/ui/chat_interface.py,sha256=fzc6-_12zf1yflSJi7pX5zZaBy5Ar9APfqYISVMLrg4,15971
14
- app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
15
- app/ui/model_browser.py,sha256=pdblLVkdyVF0_Bo02bqbErGAtieyH-y6IfhMOPEqIso,71124
16
- app/ui/model_selector.py,sha256=eqwJamLddgt4fS0pJbCyCBe-_shqESm3gM8vJTOWDAs,16956
17
- app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
18
- app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
19
- chat_console-0.2.99.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
20
- chat_console-0.2.99.dist-info/METADATA,sha256=ybXgjn-sJk32u9DaSkrSikyGnC8gNaCEO-GaRCkpTSY,2922
21
- chat_console-0.2.99.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
22
- chat_console-0.2.99.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
23
- chat_console-0.2.99.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
24
- chat_console-0.2.99.dist-info/RECORD,,