chat-console 0.2.3__tar.gz → 0.2.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {chat_console-0.2.3 → chat_console-0.2.6}/PKG-INFO +1 -1
- {chat_console-0.2.3 → chat_console-0.2.6}/app/__init__.py +1 -1
- {chat_console-0.2.3 → chat_console-0.2.6}/app/api/ollama.py +201 -170
- {chat_console-0.2.3 → chat_console-0.2.6}/app/main.py +3 -4
- {chat_console-0.2.3 → chat_console-0.2.6}/app/ui/model_browser.py +351 -202
- {chat_console-0.2.3 → chat_console-0.2.6}/chat_console.egg-info/PKG-INFO +1 -1
- {chat_console-0.2.3 → chat_console-0.2.6}/LICENSE +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/README.md +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/api/__init__.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/api/anthropic.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/api/base.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/api/openai.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/config.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/database.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/models.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/ui/__init__.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/ui/chat_interface.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/ui/chat_list.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/ui/model_selector.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/ui/search.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/ui/styles.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/app/utils.py +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/chat_console.egg-info/SOURCES.txt +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/chat_console.egg-info/dependency_links.txt +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/chat_console.egg-info/entry_points.txt +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/chat_console.egg-info/requires.txt +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/chat_console.egg-info/top_level.txt +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/setup.cfg +0 -0
- {chat_console-0.2.3 → chat_console-0.2.6}/setup.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.6
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
@@ -2,6 +2,10 @@ import aiohttp
|
|
2
2
|
import asyncio
|
3
3
|
import json
|
4
4
|
import logging
|
5
|
+
import os
|
6
|
+
import time
|
7
|
+
from datetime import datetime, timedelta
|
8
|
+
from pathlib import Path
|
5
9
|
from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
|
6
10
|
from .base import BaseModelClient
|
7
11
|
|
@@ -18,6 +22,9 @@ class OllamaClient(BaseModelClient):
|
|
18
22
|
# Track active stream session
|
19
23
|
self._active_stream_session = None
|
20
24
|
|
25
|
+
# Path to the cached models file
|
26
|
+
self.models_cache_path = Path(__file__).parent.parent / "data" / "ollama-models.json"
|
27
|
+
|
21
28
|
# Try to start Ollama if not running
|
22
29
|
if not ensure_ollama_running():
|
23
30
|
raise Exception(f"Failed to start Ollama server. Please ensure Ollama is installed and try again.")
|
@@ -280,32 +287,59 @@ class OllamaClient(BaseModelClient):
|
|
280
287
|
"modified_at": None
|
281
288
|
}
|
282
289
|
|
283
|
-
async def
|
284
|
-
"""
|
285
|
-
logger.info("
|
290
|
+
async def _fetch_and_cache_models(self) -> List[Dict[str, Any]]:
|
291
|
+
"""Fetch models from Ollama website and cache them for 24 hours"""
|
292
|
+
logger.info("Performing a full fetch of Ollama models to update cache")
|
293
|
+
|
286
294
|
try:
|
287
|
-
# First
|
295
|
+
# First load models from base file
|
296
|
+
base_models = []
|
297
|
+
try:
|
298
|
+
# Read the base models file
|
299
|
+
base_file_path = Path(__file__).parent.parent / "data" / "ollama-models-base.json"
|
300
|
+
if base_file_path.exists():
|
301
|
+
with open(base_file_path, 'r') as f:
|
302
|
+
base_data = json.load(f)
|
303
|
+
if "models" in base_data:
|
304
|
+
base_models = base_data["models"]
|
305
|
+
logger.info(f"Loaded {len(base_models)} models from base file")
|
306
|
+
|
307
|
+
# Process models from the base file to ensure consistent format
|
308
|
+
for model in base_models:
|
309
|
+
# Convert any missing fields to expected format
|
310
|
+
if "parameter_size" not in model and "variants" in model and model["variants"]:
|
311
|
+
# Use the first variant as the default parameter size if not specified
|
312
|
+
for variant in model["variants"]:
|
313
|
+
if any(char.isdigit() for char in variant):
|
314
|
+
# This looks like a size variant (e.g., "7b", "70b")
|
315
|
+
if variant.lower().endswith('b'):
|
316
|
+
model["parameter_size"] = variant.upper()
|
317
|
+
else:
|
318
|
+
model["parameter_size"] = f"{variant}B"
|
319
|
+
break
|
320
|
+
|
321
|
+
except Exception as e:
|
322
|
+
logger.warning(f"Error loading base models file: {str(e)}")
|
323
|
+
|
324
|
+
# Web scraping for more models
|
325
|
+
scraped_models = []
|
288
326
|
try:
|
289
327
|
async with aiohttp.ClientSession() as session:
|
290
|
-
#
|
328
|
+
# Get model data from the Ollama website search page (without query to get all models)
|
291
329
|
search_url = "https://ollama.com/search"
|
292
|
-
if query:
|
293
|
-
search_url += f"?q={query}"
|
294
330
|
|
295
|
-
logger.info(f"Fetching models from Ollama web: {search_url}")
|
331
|
+
logger.info(f"Fetching all models from Ollama web: {search_url}")
|
296
332
|
async with session.get(
|
297
333
|
search_url,
|
298
|
-
timeout=
|
334
|
+
timeout=20, # Longer timeout for comprehensive scrape
|
299
335
|
headers={"User-Agent": "Mozilla/5.0 (compatible; chat-console/1.0)"}
|
300
336
|
) as response:
|
301
337
|
if response.status == 200:
|
302
338
|
html = await response.text()
|
303
339
|
|
304
340
|
# Extract model data from JSON embedded in the page
|
305
|
-
# The data is in a script tag with JSON containing model information
|
306
341
|
try:
|
307
342
|
import re
|
308
|
-
import json
|
309
343
|
|
310
344
|
# Look for model data in JSON format
|
311
345
|
model_match = re.search(r'window\.__NEXT_DATA__\s*=\s*({.+?});', html, re.DOTALL)
|
@@ -321,7 +355,6 @@ class OllamaClient(BaseModelClient):
|
|
321
355
|
logger.info(f"Found {len(web_models)} models on Ollama website")
|
322
356
|
|
323
357
|
# Process models
|
324
|
-
processed_models = []
|
325
358
|
for model in web_models:
|
326
359
|
try:
|
327
360
|
# Skip models without necessary data
|
@@ -335,6 +368,10 @@ class OllamaClient(BaseModelClient):
|
|
335
368
|
"model_family": model.get('modelFamily', 'Unknown'),
|
336
369
|
}
|
337
370
|
|
371
|
+
# Add variants if available
|
372
|
+
if model.get('variants'):
|
373
|
+
processed_model["variants"] = model.get('variants', [])
|
374
|
+
|
338
375
|
# Extract parameter size from model details
|
339
376
|
if model.get('parameterSize'):
|
340
377
|
processed_model["parameter_size"] = f"{model.get('parameterSize')}B"
|
@@ -390,6 +427,7 @@ class OllamaClient(BaseModelClient):
|
|
390
427
|
"phi": "3B",
|
391
428
|
"phi2": "3B",
|
392
429
|
"phi3": "3B",
|
430
|
+
"phi4": "7B",
|
393
431
|
"orca-mini": "7B",
|
394
432
|
"llava": "7B",
|
395
433
|
"codellama": "7B",
|
@@ -423,6 +461,12 @@ class OllamaClient(BaseModelClient):
|
|
423
461
|
first_variant = variants[0]
|
424
462
|
if first_variant and 'parameterSize' in first_variant:
|
425
463
|
param_size = f"{first_variant['parameterSize']}B"
|
464
|
+
# Just use the first variant if it looks like a size
|
465
|
+
elif isinstance(first_variant, str) and any(char.isdigit() for char in first_variant):
|
466
|
+
if first_variant.lower().endswith('b'):
|
467
|
+
param_size = first_variant.upper()
|
468
|
+
else:
|
469
|
+
param_size = f"{first_variant}B"
|
426
470
|
except Exception as e:
|
427
471
|
logger.warning(f"Error getting parameter size from variants: {str(e)}")
|
428
472
|
|
@@ -455,179 +499,166 @@ class OllamaClient(BaseModelClient):
|
|
455
499
|
else:
|
456
500
|
processed_model["size"] = 4500000000 # Default to ~4.5GB
|
457
501
|
|
458
|
-
|
502
|
+
scraped_models.append(processed_model)
|
459
503
|
except Exception as e:
|
460
504
|
logger.warning(f"Error processing web model {model.get('name', 'unknown')}: {str(e)}")
|
461
|
-
|
462
|
-
if processed_models:
|
463
|
-
logger.info(f"Successfully processed {len(processed_models)} models from Ollama website")
|
464
|
-
return processed_models
|
465
505
|
except Exception as e:
|
466
506
|
logger.warning(f"Error extracting model data from Ollama website: {str(e)}")
|
467
507
|
except Exception as web_e:
|
468
508
|
logger.warning(f"Error fetching from Ollama website: {str(web_e)}")
|
469
509
|
|
470
|
-
#
|
510
|
+
# Add curated models from the registry
|
511
|
+
curated_models = await self.get_registry_models("")
|
512
|
+
|
513
|
+
# Combine all models - prefer base models, then scraped models, then curated
|
514
|
+
all_models = []
|
515
|
+
existing_names = set()
|
516
|
+
|
517
|
+
# First add all base models (highest priority)
|
518
|
+
for model in base_models:
|
519
|
+
if model.get("name"):
|
520
|
+
all_models.append(model)
|
521
|
+
existing_names.add(model["name"])
|
522
|
+
|
523
|
+
# Then add scraped models if not already added
|
524
|
+
for model in scraped_models:
|
525
|
+
if model.get("name") and model["name"] not in existing_names:
|
526
|
+
all_models.append(model)
|
527
|
+
existing_names.add(model["name"])
|
528
|
+
|
529
|
+
# Finally add curated models if not already added
|
530
|
+
for model in curated_models:
|
531
|
+
if model.get("name") and model["name"] not in existing_names:
|
532
|
+
all_models.append(model)
|
533
|
+
existing_names.add(model["name"])
|
534
|
+
|
535
|
+
# Cache the combined models
|
536
|
+
cache_data = {
|
537
|
+
"last_updated": datetime.now().isoformat(),
|
538
|
+
"models": all_models
|
539
|
+
}
|
540
|
+
|
471
541
|
try:
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
) as response:
|
480
|
-
if response.status == 200:
|
481
|
-
data = await response.json()
|
482
|
-
logger.debug(f"Ollama library response: {data}")
|
483
|
-
if "models" in data:
|
484
|
-
# If this succeeded, we'll use registry models instead of the curated list
|
485
|
-
# Ensure models have all the needed fields
|
486
|
-
registry_models = data.get("models", [])
|
487
|
-
|
488
|
-
# Create a new list to store processed models
|
489
|
-
processed_models = []
|
490
|
-
|
491
|
-
for model in registry_models:
|
492
|
-
if "name" not in model:
|
493
|
-
continue
|
494
|
-
|
495
|
-
# Process the model
|
496
|
-
try:
|
497
|
-
# Add default values for missing fields to avoid UNKNOWN displays
|
498
|
-
if "model_family" not in model:
|
499
|
-
try:
|
500
|
-
# Try to infer family from name
|
501
|
-
name = str(model["name"]).lower() if isinstance(model["name"], (str, int, float)) else ""
|
502
|
-
if "llama" in name:
|
503
|
-
model["model_family"] = "Llama"
|
504
|
-
elif "mistral" in name:
|
505
|
-
model["model_family"] = "Mistral"
|
506
|
-
elif "phi" in name:
|
507
|
-
model["model_family"] = "Phi"
|
508
|
-
elif "gemma" in name:
|
509
|
-
model["model_family"] = "Gemma"
|
510
|
-
else:
|
511
|
-
model["model_family"] = "General"
|
512
|
-
except (KeyError, TypeError, ValueError) as e:
|
513
|
-
logger.warning(f"Error inferring model family: {str(e)}")
|
514
|
-
model["model_family"] = "General"
|
515
|
-
|
516
|
-
try:
|
517
|
-
if "description" not in model or not model["description"]:
|
518
|
-
model_name = str(model["name"]) if isinstance(model["name"], (str, int, float)) else "Unknown"
|
519
|
-
model["description"] = f"{model_name} model"
|
520
|
-
except (KeyError, TypeError, ValueError) as e:
|
521
|
-
logger.warning(f"Error setting model description: {str(e)}")
|
522
|
-
model["description"] = "Model description unavailable"
|
523
|
-
|
524
|
-
# Ensure size is present and has a reasonable value
|
525
|
-
if "size" not in model or not model["size"] or model["size"] == 0:
|
526
|
-
# Set a reasonable default size based on model name
|
527
|
-
try:
|
528
|
-
name = str(model["name"]).lower() if isinstance(model["name"], (str, int, float)) else ""
|
529
|
-
if "70b" in name or "65b" in name:
|
530
|
-
model["size"] = 40000000000 # 40GB for 70B models
|
531
|
-
elif "405b" in name or "400b" in name:
|
532
|
-
model["size"] = 200000000000 # 200GB for 405B models
|
533
|
-
elif "34b" in name or "35b" in name:
|
534
|
-
model["size"] = 20000000000 # 20GB for 34B models
|
535
|
-
elif "27b" in name or "28b" in name:
|
536
|
-
model["size"] = 15000000000 # 15GB for 27B models
|
537
|
-
elif "13b" in name or "14b" in name:
|
538
|
-
model["size"] = 8000000000 # 8GB for 13B models
|
539
|
-
elif "7b" in name or "8b" in name:
|
540
|
-
model["size"] = 4500000000 # 4.5GB for 7-8B models
|
541
|
-
elif "6b" in name:
|
542
|
-
model["size"] = 3500000000 # 3.5GB for 6B models
|
543
|
-
elif "3b" in name:
|
544
|
-
model["size"] = 2000000000 # 2GB for 3B models
|
545
|
-
elif "1b" in name or "2b" in name:
|
546
|
-
model["size"] = 1500000000 # 1.5GB for 1-2B models
|
547
|
-
else:
|
548
|
-
model["size"] = 4500000000 # Default to 4.5GB if unknown
|
549
|
-
except (KeyError, TypeError, ValueError) as e:
|
550
|
-
logger.warning(f"Error setting model size: {str(e)}")
|
551
|
-
model["size"] = 4500000000 # Default fallback size
|
552
|
-
|
553
|
-
# Add parameter_size field based on model name
|
554
|
-
if "parameter_size" not in model:
|
555
|
-
try:
|
556
|
-
name = str(model["name"]).lower() if isinstance(model["name"], (str, int, float)) else ""
|
557
|
-
if "70b" in name:
|
558
|
-
model["parameter_size"] = "70B"
|
559
|
-
elif "405b" in name or "400b" in name:
|
560
|
-
model["parameter_size"] = "405B"
|
561
|
-
elif "34b" in name or "35b" in name:
|
562
|
-
model["parameter_size"] = "34B"
|
563
|
-
elif "27b" in name or "28b" in name:
|
564
|
-
model["parameter_size"] = "27B"
|
565
|
-
elif "13b" in name or "14b" in name:
|
566
|
-
model["parameter_size"] = "13B"
|
567
|
-
elif "8b" in name:
|
568
|
-
model["parameter_size"] = "8B"
|
569
|
-
elif "7b" in name:
|
570
|
-
model["parameter_size"] = "7B"
|
571
|
-
elif "6b" in name:
|
572
|
-
model["parameter_size"] = "6B"
|
573
|
-
elif "3b" in name:
|
574
|
-
model["parameter_size"] = "3B"
|
575
|
-
elif "2b" in name:
|
576
|
-
model["parameter_size"] = "2B"
|
577
|
-
elif "1b" in name:
|
578
|
-
model["parameter_size"] = "1B"
|
579
|
-
elif "mini" in name:
|
580
|
-
model["parameter_size"] = "3B"
|
581
|
-
elif "small" in name:
|
582
|
-
model["parameter_size"] = "7B"
|
583
|
-
elif "medium" in name:
|
584
|
-
model["parameter_size"] = "13B"
|
585
|
-
elif "large" in name:
|
586
|
-
model["parameter_size"] = "34B"
|
587
|
-
else:
|
588
|
-
model["parameter_size"] = "Unknown"
|
589
|
-
except (KeyError, TypeError, ValueError) as e:
|
590
|
-
logger.warning(f"Error setting parameter size: {str(e)}")
|
591
|
-
model["parameter_size"] = "Unknown"
|
592
|
-
|
593
|
-
# Add to processed models list
|
594
|
-
processed_models.append(model)
|
595
|
-
except Exception as e:
|
596
|
-
logger.warning(f"Error processing model {model.get('name', 'unknown')}: {str(e)}")
|
597
|
-
|
598
|
-
return processed_models
|
599
|
-
except Exception as lib_e:
|
600
|
-
logger.warning(f"Error using /api/library endpoint: {str(lib_e)}, falling back to /api/tags")
|
542
|
+
with open(self.models_cache_path, 'w') as f:
|
543
|
+
json.dump(cache_data, f, indent=2)
|
544
|
+
logger.info(f"Cached {len(all_models)} models to {self.models_cache_path}")
|
545
|
+
except Exception as cache_error:
|
546
|
+
logger.error(f"Error caching models: {str(cache_error)}")
|
547
|
+
|
548
|
+
return all_models
|
601
549
|
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
550
|
+
except Exception as e:
|
551
|
+
logger.error(f"Error during model fetch and cache: {str(e)}")
|
552
|
+
# Return an empty list in case of catastrophic failure
|
553
|
+
return []
|
554
|
+
|
555
|
+
async def list_available_models_from_registry(self, query: str = "") -> List[Dict[str, Any]]:
|
556
|
+
"""List available models from Ollama registry with cache support"""
|
557
|
+
logger.info(f"Fetching available models from Ollama registry, query: '{query}'")
|
558
|
+
|
559
|
+
# Check if we need to update the cache
|
560
|
+
need_cache_update = True
|
561
|
+
models_from_cache = []
|
562
|
+
|
563
|
+
try:
|
564
|
+
# Try to read from cache first
|
565
|
+
if self.models_cache_path.exists():
|
566
|
+
try:
|
567
|
+
with open(self.models_cache_path, 'r') as f:
|
568
|
+
cache_data = json.load(f)
|
614
569
|
|
615
|
-
if
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
570
|
+
# Check if cache is still valid (less than 24 hours old)
|
571
|
+
if cache_data.get("last_updated"):
|
572
|
+
last_updated = datetime.fromisoformat(cache_data["last_updated"])
|
573
|
+
# Cache valid if less than 24 hours old
|
574
|
+
if datetime.now() - last_updated < timedelta(hours=24):
|
575
|
+
need_cache_update = False
|
576
|
+
models_from_cache = cache_data.get("models", [])
|
577
|
+
logger.info(f"Using cached models from {last_updated.isoformat()} ({len(models_from_cache)} models)")
|
578
|
+
else:
|
579
|
+
logger.info(f"Cache from {last_updated.isoformat()} is older than 24 hours, refreshing")
|
580
|
+
except Exception as e:
|
581
|
+
logger.warning(f"Error reading cache: {str(e)}, will refresh")
|
582
|
+
else:
|
583
|
+
logger.info("No cache found, creating a new one")
|
584
|
+
except Exception as e:
|
585
|
+
logger.warning(f"Error checking cache: {str(e)}")
|
586
|
+
|
587
|
+
# Always read the base file first
|
588
|
+
base_models = []
|
589
|
+
try:
|
590
|
+
# Read the base models file
|
591
|
+
base_file_path = Path(__file__).parent.parent / "data" / "ollama-models-base.json"
|
592
|
+
if base_file_path.exists():
|
593
|
+
with open(base_file_path, 'r') as f:
|
594
|
+
base_data = json.load(f)
|
595
|
+
if "models" in base_data:
|
596
|
+
base_models = base_data["models"]
|
597
|
+
logger.info(f"Loaded {len(base_models)} models from base file")
|
598
|
+
|
599
|
+
# Process base models to ensure they have proper format
|
600
|
+
for model in base_models:
|
601
|
+
# Make sure they have model_family
|
602
|
+
if "model_family" not in model and "name" in model:
|
603
|
+
name = model["name"].lower()
|
604
|
+
if "llama" in name:
|
605
|
+
model["model_family"] = "Llama"
|
606
|
+
elif "mistral" in name:
|
607
|
+
model["model_family"] = "Mistral"
|
608
|
+
elif "phi" in name:
|
609
|
+
model["model_family"] = "Phi"
|
610
|
+
elif "gemma" in name:
|
611
|
+
model["model_family"] = "Gemma"
|
612
|
+
elif "qwen" in name:
|
613
|
+
model["model_family"] = "Qwen"
|
614
|
+
else:
|
615
|
+
# Try to extract family from name (before any colon)
|
616
|
+
base_name = name.split(":")[0]
|
617
|
+
model["model_family"] = base_name.capitalize()
|
618
|
+
|
619
|
+
# If no cache yet but base file exists, use base models and trigger update
|
620
|
+
if not models_from_cache and base_models:
|
621
|
+
models_from_cache = base_models
|
622
|
+
logger.info(f"Using {len(base_models)} models from base file while cache updates")
|
620
623
|
|
621
|
-
#
|
622
|
-
|
623
|
-
|
624
|
+
# Start cache update in background
|
625
|
+
asyncio.create_task(self._fetch_and_cache_models())
|
626
|
+
need_cache_update = False
|
624
627
|
except Exception as e:
|
625
|
-
logger.
|
626
|
-
|
628
|
+
logger.warning(f"Error loading base models file: {str(e)}")
|
629
|
+
|
630
|
+
# If we need to update the cache, do it now
|
631
|
+
if need_cache_update:
|
632
|
+
# Run the cache update in the background if we have cached data
|
633
|
+
if models_from_cache:
|
634
|
+
# We can use cached data for now but update in background
|
635
|
+
asyncio.create_task(self._fetch_and_cache_models())
|
636
|
+
else:
|
637
|
+
# We need to wait for the cache update
|
638
|
+
models_from_cache = await self._fetch_and_cache_models()
|
639
|
+
|
640
|
+
# Always make sure base models are included
|
641
|
+
if base_models:
|
642
|
+
# Create a set of existing model names
|
643
|
+
existing_names = set(model.get("name", "") for model in models_from_cache)
|
644
|
+
|
645
|
+
# Add base models if not already in cache
|
646
|
+
for model in base_models:
|
647
|
+
if model.get("name") and model["name"] not in existing_names:
|
648
|
+
models_from_cache.append(model)
|
649
|
+
existing_names.add(model["name"])
|
650
|
+
|
651
|
+
logger.info(f"Combined total: {len(models_from_cache)} models")
|
652
|
+
|
653
|
+
# Log the number of models available
|
654
|
+
logger.info(f"Total available models: {len(models_from_cache)}")
|
655
|
+
|
656
|
+
# No filtering here - the UI will handle filtering
|
657
|
+
return models_from_cache
|
627
658
|
|
628
659
|
async def get_registry_models(self, query: str = "") -> List[Dict[str, Any]]:
|
629
660
|
"""Get a curated list of popular Ollama models"""
|
630
|
-
logger.info("Returning a curated list of popular Ollama models")
|
661
|
+
logger.info("Returning a curated list of popular Ollama models (query: {})".format(query or "none"))
|
631
662
|
|
632
663
|
# Provide a curated list of popular models as fallback
|
633
664
|
models = [
|
@@ -300,14 +300,13 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
300
300
|
|
301
301
|
BINDINGS = [ # Keep SimpleChatApp BINDINGS, ensure Enter is not globally bound for settings
|
302
302
|
Binding("q", "quit", "Quit", show=True, key_display="q"),
|
303
|
-
#
|
304
|
-
Binding("
|
305
|
-
Binding("c", "action_new_conversation", "New Chat", show=False, key_display="c", priority=True), # Add priority to alias too
|
303
|
+
# Removed binding for "n" (new chat) since there's a dedicated button
|
304
|
+
Binding("c", "action_new_conversation", "New Chat", show=False, key_display="c", priority=True), # Keep alias with priority
|
306
305
|
Binding("escape", "escape", "Cancel / Stop", show=True, key_display="esc"), # Escape might close settings panel too
|
307
306
|
Binding("ctrl+c", "quit", "Quit", show=False),
|
308
307
|
Binding("h", "view_history", "History", show=True, key_display="h", priority=True), # Add priority
|
309
308
|
Binding("s", "settings", "Settings", show=True, key_display="s", priority=True), # Add priority
|
310
|
-
|
309
|
+
# Removed binding for "t" (title update) since there's a dedicated button
|
311
310
|
Binding("m", "model_browser", "Model Browser", show=True, key_display="m", priority=True), # Add model browser binding
|
312
311
|
] # Keep SimpleChatApp BINDINGS end
|
313
312
|
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import asyncio
|
1
2
|
import logging
|
2
3
|
from typing import Dict, List, Any, Optional
|
3
4
|
from textual.app import ComposeResult
|
@@ -253,11 +254,30 @@ class ModelBrowser(Container):
|
|
253
254
|
available_table.add_columns("Model", "Size", "Family", "Description")
|
254
255
|
available_table.cursor_type = "row"
|
255
256
|
|
257
|
+
# Show notification about model loading
|
258
|
+
self.notify("Initializing model browser, this might take a moment on first run...",
|
259
|
+
severity="information", timeout=5)
|
260
|
+
|
256
261
|
# Load models
|
257
262
|
await self.load_local_models()
|
258
263
|
|
264
|
+
# Start loading available models in the background
|
265
|
+
asyncio.create_task(self.preload_available_models())
|
266
|
+
|
259
267
|
# Focus search input
|
260
268
|
self.query_one("#model-search").focus()
|
269
|
+
|
270
|
+
async def preload_available_models(self) -> None:
|
271
|
+
"""Preload available models in the background"""
|
272
|
+
# Load the available models list in the background to make it faster when
|
273
|
+
# the user switches to the Available Models tab
|
274
|
+
try:
|
275
|
+
# This will trigger cache creation if needed, making tab switching faster
|
276
|
+
models = await self.ollama_client.list_available_models_from_registry()
|
277
|
+
if models:
|
278
|
+
logger.info(f"Preloaded {len(models)} available models")
|
279
|
+
except Exception as e:
|
280
|
+
logger.error(f"Error preloading available models: {str(e)}")
|
261
281
|
|
262
282
|
async def load_local_models(self) -> None:
|
263
283
|
"""Load locally installed Ollama models"""
|
@@ -422,28 +442,58 @@ class ModelBrowser(Container):
|
|
422
442
|
search_input = self.query_one("#model-search", Input)
|
423
443
|
query = search_input.value.strip()
|
424
444
|
|
425
|
-
#
|
445
|
+
# Debug to track model loading
|
446
|
+
logger.info(f"Loading available models, query: '{query}'")
|
447
|
+
|
448
|
+
# Load models from registry - don't apply the query here, get ALL models
|
426
449
|
try:
|
427
450
|
# First try the API-based registry
|
428
|
-
self.available_models = await self.ollama_client.list_available_models_from_registry(
|
451
|
+
self.available_models = await self.ollama_client.list_available_models_from_registry("")
|
452
|
+
logger.info(f"Got {len(self.available_models)} models from registry")
|
453
|
+
|
429
454
|
# If no models found, use the curated list
|
430
455
|
if not self.available_models:
|
431
|
-
self.available_models = await self.ollama_client.get_registry_models()
|
456
|
+
self.available_models = await self.ollama_client.get_registry_models("")
|
457
|
+
logger.info(f"Got {len(self.available_models)} models from curated list")
|
432
458
|
except Exception as e:
|
433
|
-
|
459
|
+
logger.error(f"Error from registry API: {str(e)}")
|
434
460
|
# Fallback to curated list
|
435
|
-
self.available_models = await self.ollama_client.get_registry_models()
|
461
|
+
self.available_models = await self.ollama_client.get_registry_models("")
|
462
|
+
logger.info(f"Fallback: Got {len(self.available_models)} models from curated list")
|
436
463
|
|
437
464
|
# Clear and populate table
|
438
465
|
available_table = self.query_one("#available-models-table", DataTable)
|
439
466
|
available_table.clear()
|
440
467
|
|
441
|
-
# Get number of models loaded
|
468
|
+
# Get number of models loaded (but don't notify to avoid notification spam)
|
442
469
|
model_count = len(self.available_models)
|
443
|
-
|
470
|
+
logger.info(f"Found {model_count} models to display")
|
471
|
+
|
472
|
+
# Filter models by search query if provided
|
473
|
+
filtered_models = self.available_models
|
474
|
+
if query:
|
475
|
+
query = query.lower()
|
476
|
+
filtered_models = []
|
477
|
+
for model in self.available_models:
|
478
|
+
# Check if query matches name, description or family
|
479
|
+
name = str(model.get("name", "")).lower()
|
480
|
+
desc = str(model.get("description", "")).lower()
|
481
|
+
family = str(model.get("model_family", "")).lower()
|
482
|
+
|
483
|
+
# Also check variants if available
|
484
|
+
variants_match = False
|
485
|
+
if "variants" in model and model["variants"]:
|
486
|
+
variants_text = " ".join([str(v).lower() for v in model["variants"]])
|
487
|
+
if query in variants_text:
|
488
|
+
variants_match = True
|
489
|
+
|
490
|
+
if query in name or query in desc or query in family or variants_match:
|
491
|
+
filtered_models.append(model)
|
492
|
+
|
493
|
+
logger.info(f"Filtered to {len(filtered_models)} models matching '{query}'")
|
444
494
|
|
445
|
-
# Add all models to the table - no pagination limit
|
446
|
-
for model in
|
495
|
+
# Add all filtered models to the table - no pagination limit
|
496
|
+
for model in filtered_models:
|
447
497
|
name = model.get("name", "Unknown")
|
448
498
|
|
449
499
|
# Extract parameter size info (in billions)
|
@@ -455,6 +505,17 @@ class ModelBrowser(Container):
|
|
455
505
|
# Make sure it ends with B for billions if it doesn't already
|
456
506
|
if not size.upper().endswith("B"):
|
457
507
|
size += "B"
|
508
|
+
# Check if we can extract from variants
|
509
|
+
elif "variants" in model and model["variants"]:
|
510
|
+
for variant in model["variants"]:
|
511
|
+
if any(char.isdigit() for char in str(variant)):
|
512
|
+
# This looks like a size variant (e.g., "7b", "70b")
|
513
|
+
variant_str = str(variant).lower()
|
514
|
+
if variant_str.endswith('b'):
|
515
|
+
size = str(variant).upper()
|
516
|
+
else:
|
517
|
+
size = f"{variant}B"
|
518
|
+
break
|
458
519
|
else:
|
459
520
|
# Extract from name if not available
|
460
521
|
model_name = str(name).lower()
|
@@ -504,6 +565,7 @@ class ModelBrowser(Container):
|
|
504
565
|
"phi": "3B",
|
505
566
|
"phi2": "3B",
|
506
567
|
"phi3": "3B",
|
568
|
+
"phi4": "7B",
|
507
569
|
"orca-mini": "7B",
|
508
570
|
"llava": "7B",
|
509
571
|
"codellama": "7B",
|
@@ -526,12 +588,16 @@ class ModelBrowser(Container):
|
|
526
588
|
family = model.get("model_family", "Unknown")
|
527
589
|
description = model.get("description", "No description available")
|
528
590
|
|
591
|
+
# Keep this for debugging
|
592
|
+
# logger.info(f"Adding model to table: {name} - {size} - {family}")
|
593
|
+
|
529
594
|
available_table.add_row(name, size, family, description)
|
530
595
|
|
531
596
|
actual_displayed = available_table.row_count
|
532
|
-
|
597
|
+
logger.info(f"Loaded {actual_displayed} available models")
|
533
598
|
|
534
599
|
except Exception as e:
|
600
|
+
logger.error(f"Error loading available models: {str(e)}")
|
535
601
|
self.notify(f"Error loading available models: {str(e)}", severity="error")
|
536
602
|
finally:
|
537
603
|
self.is_loading = False
|
@@ -745,218 +811,295 @@ class ModelBrowser(Container):
|
|
745
811
|
model_id = self._get_selected_model_id()
|
746
812
|
|
747
813
|
if not model_id:
|
748
|
-
|
749
|
-
|
814
|
+
# Try to select the first model in the table
|
815
|
+
if self.current_tab == "local" and self.local_models:
|
816
|
+
model_id = self.local_models[0]["id"]
|
817
|
+
elif self.current_tab == "available" and self.available_models:
|
818
|
+
model_id = self.available_models[0]["name"]
|
819
|
+
|
820
|
+
# If we still don't have a model ID, show warning and return
|
821
|
+
if not model_id:
|
822
|
+
self.notify("No model selected", severity="warning")
|
823
|
+
return
|
750
824
|
|
751
825
|
# Get model details container
|
752
826
|
details_container = self.query_one("#model-details")
|
753
827
|
details_content = self.query_one("#details-content", Static)
|
754
828
|
|
755
|
-
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
|
760
|
-
|
761
|
-
|
762
|
-
|
829
|
+
# Check if we're in "available" tab or "local" tab
|
830
|
+
if self.current_tab == "available":
|
831
|
+
# For available models, use cached info instead of making API calls
|
832
|
+
try:
|
833
|
+
# Find the model in our available_models list
|
834
|
+
model_info = None
|
835
|
+
for model in self.available_models:
|
836
|
+
if model.get("name") == model_id:
|
837
|
+
model_info = model
|
838
|
+
break
|
839
|
+
|
840
|
+
if not model_info:
|
841
|
+
details_content.update(f"No details found for model: {model_id}")
|
842
|
+
details_container.add_class("visible")
|
843
|
+
return
|
844
|
+
|
845
|
+
# Format the details from the cached info
|
846
|
+
formatted_details = f"Model: {model_id}\n"
|
847
|
+
|
848
|
+
# Add parameters info
|
849
|
+
param_size = model_info.get("parameter_size", "Unknown")
|
850
|
+
if param_size and not str(param_size).upper().endswith("B"):
|
851
|
+
param_size = f"{param_size}B"
|
852
|
+
formatted_details += f"Parameters: {param_size}\n"
|
853
|
+
|
854
|
+
# Add family info
|
855
|
+
family = model_info.get("model_family", "Unknown")
|
856
|
+
formatted_details += f"Family: {family}\n"
|
857
|
+
|
858
|
+
# Add description
|
859
|
+
description = model_info.get("description", "No description available.")
|
860
|
+
formatted_details += f"\nDescription:\n{description}\n"
|
861
|
+
|
862
|
+
# Add variants if available
|
863
|
+
if "variants" in model_info and model_info["variants"]:
|
864
|
+
formatted_details += f"\nVariants: {', '.join(model_info['variants'])}\n"
|
865
|
+
|
866
|
+
# Add stats if available
|
867
|
+
if "stats" in model_info and model_info["stats"]:
|
868
|
+
stats = model_info["stats"]
|
869
|
+
formatted_details += f"\nStats:\n"
|
870
|
+
if "pulls" in stats:
|
871
|
+
formatted_details += f"Pulls: {stats['pulls']}\n"
|
872
|
+
if "tags" in stats:
|
873
|
+
formatted_details += f"Tags: {stats['tags']}\n"
|
874
|
+
if "last_updated" in stats:
|
875
|
+
formatted_details += f"Last Updated: {stats['last_updated']}\n"
|
876
|
+
|
877
|
+
# Update and show details
|
878
|
+
details_content.update(formatted_details)
|
763
879
|
details_container.add_class("visible")
|
764
|
-
|
765
|
-
|
766
|
-
|
767
|
-
|
768
|
-
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
|
774
|
-
|
775
|
-
|
776
|
-
|
777
|
-
|
778
|
-
|
779
|
-
|
780
|
-
# If not found in modelfile, try to extract from name
|
781
|
-
if param_size == "Unknown":
|
782
|
-
model_name = str(model_id).lower()
|
783
|
-
if "70b" in model_name:
|
784
|
-
param_size = "70B"
|
785
|
-
elif "405b" in model_name or "400b" in model_name:
|
786
|
-
param_size = "405B"
|
787
|
-
elif "34b" in model_name or "35b" in model_name:
|
788
|
-
param_size = "34B"
|
789
|
-
elif "27b" in model_name or "28b" in model_name:
|
790
|
-
param_size = "27B"
|
791
|
-
elif "13b" in model_name or "14b" in model_name:
|
792
|
-
param_size = "13B"
|
793
|
-
elif "8b" in model_name:
|
794
|
-
param_size = "8B"
|
795
|
-
elif "7b" in model_name:
|
796
|
-
param_size = "7B"
|
797
|
-
elif "6b" in model_name:
|
798
|
-
param_size = "6B"
|
799
|
-
elif "3b" in model_name:
|
800
|
-
param_size = "3B"
|
801
|
-
elif "2b" in model_name:
|
802
|
-
param_size = "2B"
|
803
|
-
elif "1b" in model_name:
|
804
|
-
param_size = "1B"
|
805
|
-
elif "mini" in model_name:
|
806
|
-
param_size = "3B"
|
807
|
-
elif "small" in model_name:
|
808
|
-
param_size = "7B"
|
809
|
-
elif "medium" in model_name:
|
810
|
-
param_size = "13B"
|
811
|
-
elif "large" in model_name:
|
812
|
-
param_size = "34B"
|
880
|
+
except Exception as e:
|
881
|
+
logger.error(f"Error showing available model details: {str(e)}")
|
882
|
+
details_content.update(f"Error loading details: {str(e)}")
|
883
|
+
details_container.add_class("visible")
|
884
|
+
else:
|
885
|
+
# For local models, we still need to get details from API
|
886
|
+
try:
|
887
|
+
# Get model details from Ollama
|
888
|
+
details = await self.ollama_client.get_model_details(model_id)
|
889
|
+
|
890
|
+
# Check for error in response
|
891
|
+
if "error" in details:
|
892
|
+
error_msg = f"Error: {details['error']}"
|
893
|
+
details_content.update(error_msg)
|
894
|
+
details_container.add_class("visible")
|
895
|
+
return
|
813
896
|
|
814
|
-
|
897
|
+
formatted_details = f"Model: {model_id}\n"
|
898
|
+
|
899
|
+
# Extract parameter size info
|
900
|
+
param_size = "Unknown"
|
901
|
+
|
902
|
+
# First try to get parameter size from modelfile if available
|
903
|
+
if "modelfile" in details and details["modelfile"] is not None:
|
904
|
+
modelfile = details["modelfile"]
|
905
|
+
if "parameter_size" in modelfile and modelfile["parameter_size"]:
|
906
|
+
param_size = str(modelfile["parameter_size"])
|
907
|
+
# Make sure it ends with B for billions if it doesn't already
|
908
|
+
if not param_size.upper().endswith("B"):
|
909
|
+
param_size += "B"
|
910
|
+
|
911
|
+
# If not found in modelfile, try to extract from name
|
815
912
|
if param_size == "Unknown":
|
816
|
-
|
817
|
-
|
913
|
+
model_name = str(model_id).lower()
|
914
|
+
if "70b" in model_name:
|
915
|
+
param_size = "70B"
|
916
|
+
elif "405b" in model_name or "400b" in model_name:
|
917
|
+
param_size = "405B"
|
918
|
+
elif "34b" in model_name or "35b" in model_name:
|
919
|
+
param_size = "34B"
|
920
|
+
elif "27b" in model_name or "28b" in model_name:
|
921
|
+
param_size = "27B"
|
922
|
+
elif "13b" in model_name or "14b" in model_name:
|
923
|
+
param_size = "13B"
|
924
|
+
elif "8b" in model_name:
|
925
|
+
param_size = "8B"
|
926
|
+
elif "7b" in model_name:
|
927
|
+
param_size = "7B"
|
928
|
+
elif "6b" in model_name:
|
929
|
+
param_size = "6B"
|
930
|
+
elif "3b" in model_name:
|
931
|
+
param_size = "3B"
|
932
|
+
elif "2b" in model_name:
|
933
|
+
param_size = "2B"
|
934
|
+
elif "1b" in model_name:
|
935
|
+
param_size = "1B"
|
936
|
+
elif "mini" in model_name:
|
937
|
+
param_size = "3B"
|
938
|
+
elif "small" in model_name:
|
939
|
+
param_size = "7B"
|
940
|
+
elif "medium" in model_name:
|
941
|
+
param_size = "13B"
|
942
|
+
elif "large" in model_name:
|
943
|
+
param_size = "34B"
|
818
944
|
|
819
|
-
#
|
820
|
-
|
821
|
-
|
822
|
-
"
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
827
|
-
|
828
|
-
|
829
|
-
|
830
|
-
|
831
|
-
|
832
|
-
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
|
838
|
-
|
839
|
-
|
840
|
-
|
945
|
+
# Special handling for base models with no size indicator
|
946
|
+
if param_size == "Unknown":
|
947
|
+
# Remove tag part if present to get base model
|
948
|
+
base_name = model_name.split(":")[0]
|
949
|
+
|
950
|
+
# Check if we have default parameter sizes for known models
|
951
|
+
model_defaults = {
|
952
|
+
"llama3": "8B",
|
953
|
+
"llama2": "7B",
|
954
|
+
"mistral": "7B",
|
955
|
+
"gemma": "7B",
|
956
|
+
"gemma2": "9B",
|
957
|
+
"phi": "3B",
|
958
|
+
"phi2": "3B",
|
959
|
+
"phi3": "3B",
|
960
|
+
"phi4": "7B",
|
961
|
+
"orca-mini": "7B",
|
962
|
+
"llava": "7B",
|
963
|
+
"codellama": "7B",
|
964
|
+
"neural-chat": "7B",
|
965
|
+
"wizard-math": "7B",
|
966
|
+
"yi": "6B",
|
967
|
+
"deepseek": "7B",
|
968
|
+
"deepseek-coder": "7B",
|
969
|
+
"qwen": "7B",
|
970
|
+
"falcon": "7B",
|
971
|
+
"stable-code": "3B"
|
972
|
+
}
|
973
|
+
|
974
|
+
# Try to find a match in default sizes
|
975
|
+
for model_name, default_size in model_defaults.items():
|
976
|
+
if model_name in base_name:
|
977
|
+
param_size = default_size
|
978
|
+
break
|
841
979
|
|
842
|
-
|
843
|
-
|
844
|
-
|
845
|
-
param_size = default_size
|
846
|
-
break
|
980
|
+
# Show both parameter size and disk size
|
981
|
+
formatted_details += f"Parameters: {param_size}\n"
|
982
|
+
formatted_details += f"Disk Size: {self._format_size(details.get('size', 0))}\n"
|
847
983
|
|
848
|
-
|
849
|
-
|
850
|
-
|
851
|
-
|
852
|
-
|
853
|
-
|
854
|
-
|
855
|
-
|
856
|
-
system_prompt = ""
|
857
|
-
|
858
|
-
if "modelfile" in details and details["modelfile"] is not None:
|
859
|
-
modelfile = details["modelfile"]
|
984
|
+
# Extract family info - check multiple possible locations
|
985
|
+
family = "Unknown"
|
986
|
+
template = "Unknown"
|
987
|
+
license_info = "Unknown"
|
988
|
+
system_prompt = ""
|
989
|
+
|
990
|
+
if "modelfile" in details and details["modelfile"] is not None:
|
991
|
+
modelfile = details["modelfile"]
|
860
992
|
|
861
|
-
|
862
|
-
|
863
|
-
|
864
|
-
|
865
|
-
|
866
|
-
|
867
|
-
|
868
|
-
|
869
|
-
|
870
|
-
|
871
|
-
|
872
|
-
|
873
|
-
|
874
|
-
|
875
|
-
|
876
|
-
|
877
|
-
|
878
|
-
|
879
|
-
|
880
|
-
|
993
|
+
# Ensure modelfile is a dictionary before accessing keys
|
994
|
+
if isinstance(modelfile, dict):
|
995
|
+
# Extract family/parameter size
|
996
|
+
if "parameter_size" in modelfile:
|
997
|
+
family = modelfile.get("parameter_size")
|
998
|
+
elif "family" in modelfile:
|
999
|
+
family = modelfile.get("family")
|
1000
|
+
else:
|
1001
|
+
# Try to infer from model name if not explicitly set
|
1002
|
+
try:
|
1003
|
+
name = str(model_id).lower() if model_id is not None else ""
|
1004
|
+
if "llama" in name:
|
1005
|
+
family = "Llama"
|
1006
|
+
elif "mistral" in name:
|
1007
|
+
family = "Mistral"
|
1008
|
+
elif "phi" in name:
|
1009
|
+
family = "Phi"
|
1010
|
+
elif "gemma" in name:
|
1011
|
+
family = "Gemma"
|
1012
|
+
else:
|
1013
|
+
family = "Unknown"
|
1014
|
+
except (TypeError, ValueError) as e:
|
1015
|
+
logger.error(f"Error inferring model family: {str(e)}")
|
881
1016
|
family = "Unknown"
|
882
|
-
except (TypeError, ValueError) as e:
|
883
|
-
logger.error(f"Error inferring model family: {str(e)}")
|
884
|
-
family = "Unknown"
|
885
1017
|
|
886
|
-
|
887
|
-
|
1018
|
+
# Get template
|
1019
|
+
template = modelfile.get("template", "Unknown")
|
888
1020
|
|
889
|
-
|
890
|
-
|
1021
|
+
# Get license
|
1022
|
+
license_info = modelfile.get("license", "Unknown")
|
891
1023
|
|
892
|
-
|
893
|
-
|
894
|
-
|
895
|
-
|
896
|
-
|
897
|
-
|
898
|
-
|
899
|
-
|
900
|
-
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
|
906
|
-
|
907
|
-
|
908
|
-
|
909
|
-
formatted_details += f"Family: {family}\n"
|
910
|
-
formatted_details += f"Template: {template}\n"
|
911
|
-
formatted_details += f"License: {license_info}\n"
|
912
|
-
|
913
|
-
# Add timestamps if available
|
914
|
-
if "modified_at" in details and details["modified_at"]:
|
915
|
-
formatted_details += f"Modified: {details['modified_at']}\n"
|
916
|
-
elif "created_at" in details and details["created_at"]:
|
917
|
-
formatted_details += f"Created: {details['created_at']}\n"
|
1024
|
+
# Get system prompt if available
|
1025
|
+
if "system" in modelfile:
|
1026
|
+
system_prompt = modelfile.get("system", "") # Use get for safety
|
1027
|
+
else:
|
1028
|
+
# If modelfile is not a dict (e.g., a string), set defaults
|
1029
|
+
logger.warning(f"Modelfile for {model_id} is not a dictionary. Type: {type(modelfile)}")
|
1030
|
+
# Keep existing defaults or try to infer family from name again
|
1031
|
+
if family == "Unknown":
|
1032
|
+
try:
|
1033
|
+
name = str(model_id).lower() if model_id is not None else ""
|
1034
|
+
if "llama" in name: family = "Llama"
|
1035
|
+
elif "mistral" in name: family = "Mistral"
|
1036
|
+
elif "phi" in name: family = "Phi"
|
1037
|
+
elif "gemma" in name: family = "Gemma"
|
1038
|
+
except (TypeError, ValueError): pass # Ignore errors here
|
1039
|
+
# template, license_info, system_prompt remain "Unknown" or empty
|
918
1040
|
|
919
|
-
|
920
|
-
|
921
|
-
formatted_details += f"
|
922
|
-
|
923
|
-
|
924
|
-
|
925
|
-
|
926
|
-
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
1041
|
+
formatted_details += f"Family: {family}\n"
|
1042
|
+
formatted_details += f"Template: {template}\n"
|
1043
|
+
formatted_details += f"License: {license_info}\n"
|
1044
|
+
|
1045
|
+
# Add timestamps if available
|
1046
|
+
if "modified_at" in details and details["modified_at"]:
|
1047
|
+
formatted_details += f"Modified: {details['modified_at']}\n"
|
1048
|
+
elif "created_at" in details and details["created_at"]:
|
1049
|
+
formatted_details += f"Created: {details['created_at']}\n"
|
1050
|
+
|
1051
|
+
# Add system prompt if available
|
1052
|
+
if system_prompt:
|
1053
|
+
formatted_details += f"\nSystem Prompt:\n{system_prompt}\n"
|
1054
|
+
|
1055
|
+
# Update and show details
|
1056
|
+
details_content.update(formatted_details)
|
1057
|
+
details_container.add_class("visible")
|
1058
|
+
|
1059
|
+
except Exception as e:
|
1060
|
+
self.notify(f"Error getting model details: {str(e)}", severity="error")
|
1061
|
+
details_content.update(f"Error loading details: {str(e)}")
|
1062
|
+
details_container.add_class("visible")
|
931
1063
|
|
932
1064
|
def _get_selected_model_id(self) -> str:
|
933
1065
|
"""Get the ID of the currently selected model"""
|
934
|
-
|
935
|
-
|
936
|
-
|
937
|
-
|
938
|
-
|
939
|
-
|
940
|
-
|
941
|
-
|
942
|
-
|
943
|
-
|
944
|
-
|
945
|
-
|
946
|
-
|
947
|
-
|
948
|
-
|
949
|
-
|
950
|
-
|
951
|
-
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
|
956
|
-
|
957
|
-
|
958
|
-
|
959
|
-
|
1066
|
+
try:
|
1067
|
+
if self.current_tab == "local":
|
1068
|
+
table = self.query_one("#local-models-table", DataTable)
|
1069
|
+
if table.cursor_row is not None:
|
1070
|
+
row = table.get_row_at(table.cursor_row)
|
1071
|
+
# Get model ID from local models list
|
1072
|
+
try:
|
1073
|
+
if row and len(row) > 0:
|
1074
|
+
row_name = str(row[0]) if row[0] is not None else ""
|
1075
|
+
for model in self.local_models:
|
1076
|
+
if model["name"] == row_name:
|
1077
|
+
return model["id"]
|
1078
|
+
except (IndexError, TypeError) as e:
|
1079
|
+
logger.error(f"Error processing row data: {str(e)}")
|
1080
|
+
else:
|
1081
|
+
table = self.query_one("#available-models-table", DataTable)
|
1082
|
+
if table.cursor_row is not None:
|
1083
|
+
try:
|
1084
|
+
row = table.get_row_at(table.cursor_row)
|
1085
|
+
# Return the model name as ID
|
1086
|
+
if row and len(row) > 0:
|
1087
|
+
return str(row[0]) if row[0] is not None else ""
|
1088
|
+
except Exception as e:
|
1089
|
+
logger.error(f"Error getting row at cursor: {str(e)}")
|
1090
|
+
|
1091
|
+
# If we couldn't get a valid row, check if there are any rows and select the first one
|
1092
|
+
if table.row_count > 0:
|
1093
|
+
try:
|
1094
|
+
# Select the first row and get its ID
|
1095
|
+
table.cursor_row = 0
|
1096
|
+
row = table.get_row_at(0)
|
1097
|
+
if row and len(row) > 0:
|
1098
|
+
return str(row[0]) if row[0] is not None else ""
|
1099
|
+
except Exception as e:
|
1100
|
+
logger.error(f"Error selecting first row: {str(e)}")
|
1101
|
+
except Exception as e:
|
1102
|
+
logger.error(f"Error in _get_selected_model_id: {str(e)}")
|
960
1103
|
|
961
1104
|
return ""
|
962
1105
|
|
@@ -995,3 +1138,9 @@ class ModelBrowser(Container):
|
|
995
1138
|
self.app.call_later(self.load_local_models)
|
996
1139
|
else:
|
997
1140
|
self.app.call_later(self.load_available_models)
|
1141
|
+
|
1142
|
+
def on_input_changed(self, event: Input.Changed) -> None:
|
1143
|
+
"""Handle input changes for live search"""
|
1144
|
+
if event.input.id == "model-search" and self.current_tab == "available":
|
1145
|
+
# Auto-search as user types in the available models tab
|
1146
|
+
self.app.call_later(self.load_available_models)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.6
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|