chat-console 0.1.2__py3-none-any.whl → 0.1.5.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/api/anthropic.py CHANGED
@@ -5,7 +5,7 @@ from ..config import ANTHROPIC_API_KEY
5
5
 
6
6
  class AnthropicClient(BaseModelClient):
7
7
  def __init__(self):
8
- self.client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)
8
+ self.client = anthropic.AsyncAnthropic(api_key=ANTHROPIC_API_KEY)
9
9
 
10
10
  def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> List[Dict[str, str]]:
11
11
  """Prepare messages for Claude API"""
@@ -47,7 +47,7 @@ class AnthropicClient(BaseModelClient):
47
47
 
48
48
  return styles.get(style, "")
49
49
 
50
- def generate_completion(self, messages: List[Dict[str, str]],
50
+ async def generate_completion(self, messages: List[Dict[str, str]],
51
51
  model: str,
52
52
  style: Optional[str] = None,
53
53
  temperature: float = 0.7,
@@ -55,7 +55,7 @@ class AnthropicClient(BaseModelClient):
55
55
  """Generate a text completion using Claude"""
56
56
  processed_messages = self._prepare_messages(messages, style)
57
57
 
58
- response = self.client.messages.create(
58
+ response = await self.client.messages.create(
59
59
  model=model,
60
60
  messages=processed_messages,
61
61
  temperature=temperature,
@@ -72,7 +72,7 @@ class AnthropicClient(BaseModelClient):
72
72
  """Generate a streaming text completion using Claude"""
73
73
  processed_messages = self._prepare_messages(messages, style)
74
74
 
75
- stream = self.client.messages.stream(
75
+ stream = await self.client.messages.stream(
76
76
  model=model,
77
77
  messages=processed_messages,
78
78
  temperature=temperature,
app/api/base.py CHANGED
@@ -13,7 +13,6 @@ class BaseModelClient(ABC):
13
13
  """Generate a text completion"""
14
14
  pass
15
15
 
16
- @abstractmethod
17
16
  @abstractmethod
18
17
  async def generate_stream(self, messages: List[Dict[str, str]],
19
18
  model: str,
@@ -31,44 +30,55 @@ class BaseModelClient(ABC):
31
30
  @staticmethod
32
31
  def get_client_for_model(model_name: str) -> 'BaseModelClient':
33
32
  """Factory method to get appropriate client for model"""
34
- from ..config import CONFIG
33
+ from ..config import CONFIG, AVAILABLE_PROVIDERS
35
34
  from .anthropic import AnthropicClient
36
35
  from .openai import OpenAIClient
36
+ from .ollama import OllamaClient
37
+ import logging
38
+
39
+ logger = logging.getLogger(__name__)
37
40
 
38
- # For known models, use their configured provider
41
+ # Get model info and provider
39
42
  model_info = CONFIG["available_models"].get(model_name)
43
+ model_name_lower = model_name.lower()
44
+
45
+ # If model is in config, use its provider
40
46
  if model_info:
41
47
  provider = model_info["provider"]
48
+ if not AVAILABLE_PROVIDERS[provider]:
49
+ raise Exception(f"Provider '{provider}' is not available. Please check your configuration.")
50
+ # For custom models, try to infer provider
42
51
  else:
43
- # For custom models, infer provider from name prefix
44
- model_name_lower = model_name.lower()
45
- if any(name in model_name_lower for name in ["gpt", "text-", "davinci"]):
52
+ # First try Ollama for known model names or if selected from Ollama UI
53
+ if (any(name in model_name_lower for name in ["llama", "mistral", "codellama", "gemma"]) or
54
+ model_name in [m["id"] for m in CONFIG.get("ollama_models", [])]):
55
+ if not AVAILABLE_PROVIDERS["ollama"]:
56
+ raise Exception("Ollama server is not running. Please start Ollama and try again.")
57
+ provider = "ollama"
58
+ logger.info(f"Using Ollama for model: {model_name}")
59
+ # Then try other providers if they're available
60
+ elif any(name in model_name_lower for name in ["gpt", "text-", "davinci"]):
61
+ if not AVAILABLE_PROVIDERS["openai"]:
62
+ raise Exception("OpenAI API key not found. Please set OPENAI_API_KEY environment variable.")
46
63
  provider = "openai"
47
64
  elif any(name in model_name_lower for name in ["claude", "anthropic"]):
65
+ if not AVAILABLE_PROVIDERS["anthropic"]:
66
+ raise Exception("Anthropic API key not found. Please set ANTHROPIC_API_KEY environment variable.")
48
67
  provider = "anthropic"
49
- elif any(name in model_name_lower for name in ["llama", "mistral", "codellama", "gemma"]):
50
- provider = "ollama"
51
68
  else:
52
- # Try to get from Ollama API first
53
- from .ollama import OllamaClient
54
- try:
55
- client = OllamaClient()
56
- models = client.get_available_models()
57
- if any(model["id"] == model_name for model in models):
58
- provider = "ollama"
59
- else:
60
- # Default to OpenAI if not found
61
- provider = "openai"
62
- except:
63
- # Default to OpenAI if Ollama not available
64
- provider = "openai"
69
+ # Default to Ollama for unknown models
70
+ if AVAILABLE_PROVIDERS["ollama"]:
71
+ provider = "ollama"
72
+ logger.info(f"Defaulting to Ollama for unknown model: {model_name}")
73
+ else:
74
+ raise Exception(f"Unknown model: {model_name}")
65
75
 
66
- if provider == "anthropic":
67
- return AnthropicClient()
76
+ # Return appropriate client
77
+ if provider == "ollama":
78
+ return OllamaClient()
68
79
  elif provider == "openai":
69
80
  return OpenAIClient()
70
- elif provider == "ollama":
71
- from .ollama import OllamaClient
72
- return OllamaClient()
81
+ elif provider == "anthropic":
82
+ return AnthropicClient()
73
83
  else:
74
84
  raise ValueError(f"Unknown provider: {provider}")
app/api/ollama.py CHANGED
@@ -1,34 +1,36 @@
1
1
  import aiohttp
2
+ import asyncio
2
3
  import json
4
+ import logging
3
5
  from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
4
6
  from .base import BaseModelClient
5
- from ..config import CONFIG
7
+
8
+ # Set up logging
9
+ logger = logging.getLogger(__name__)
6
10
 
7
11
  class OllamaClient(BaseModelClient):
8
12
  def __init__(self):
9
- self.base_url = "http://localhost:11434"
13
+ from ..config import OLLAMA_BASE_URL
14
+ from ..utils import ensure_ollama_running
15
+ self.base_url = OLLAMA_BASE_URL.rstrip('/')
16
+ logger.info(f"Initializing Ollama client with base URL: {self.base_url}")
17
+
18
+ # Try to start Ollama if not running
19
+ if not ensure_ollama_running():
20
+ raise Exception(f"Failed to start Ollama server. Please ensure Ollama is installed and try again.")
10
21
 
11
22
  def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> str:
12
23
  """Convert chat messages to Ollama format"""
13
- # Convert messages to a single string with role prefixes
24
+ # Start with any style instructions
14
25
  formatted_messages = []
15
-
16
- for msg in messages:
17
- role = msg["role"]
18
- content = msg["content"]
19
-
20
- if role == "system":
21
- formatted_messages.append(f"System: {content}")
22
- elif role == "user":
23
- formatted_messages.append(f"Human: {content}")
24
- elif role == "assistant":
25
- formatted_messages.append(f"Assistant: {content}")
26
-
27
- # Add style instructions if provided
28
26
  if style and style != "default":
29
- style_instructions = self._get_style_instructions(style)
30
- formatted_messages.insert(0, f"System: {style_instructions}")
27
+ formatted_messages.append(self._get_style_instructions(style))
28
+
29
+ # Add message content, preserving conversation flow
30
+ for msg in messages:
31
+ formatted_messages.append(msg["content"])
31
32
 
33
+ # Join with double newlines for better readability
32
34
  return "\n\n".join(formatted_messages)
33
35
 
34
36
  def _get_style_instructions(self, style: str) -> str:
@@ -42,75 +44,161 @@ class OllamaClient(BaseModelClient):
42
44
 
43
45
  return styles.get(style, "")
44
46
 
45
- async def generate_completion(self, messages: List[Dict[str, str]],
46
- model: str,
47
- style: Optional[str] = None,
48
- temperature: float = 0.7,
47
+ async def get_available_models(self) -> List[Dict[str, Any]]:
48
+ """Get list of available Ollama models"""
49
+ logger.info("Fetching available Ollama models...")
50
+ try:
51
+ async with aiohttp.ClientSession() as session:
52
+ async with session.get(
53
+ f"{self.base_url}/api/tags",
54
+ timeout=5,
55
+ headers={"Accept": "application/json"}
56
+ ) as response:
57
+ response.raise_for_status()
58
+ data = await response.json()
59
+ logger.debug(f"Ollama API response: {data}")
60
+
61
+ if not isinstance(data, dict):
62
+ logger.error("Invalid response format: expected object")
63
+ raise Exception("Invalid response format: expected object")
64
+ if "models" not in data:
65
+ logger.error("Invalid response format: missing 'models' key")
66
+ raise Exception("Invalid response format: missing 'models' key")
67
+ if not isinstance(data["models"], list):
68
+ logger.error("Invalid response format: 'models' is not an array")
69
+ raise Exception("Invalid response format: 'models' is not an array")
70
+
71
+ models = []
72
+ for model in data["models"]:
73
+ if not isinstance(model, dict) or "name" not in model:
74
+ continue # Skip invalid models
75
+ models.append({
76
+ "id": model["name"],
77
+ "name": model["name"].title(),
78
+ "tags": model.get("tags", [])
79
+ })
80
+
81
+ logger.info(f"Found {len(models)} Ollama models")
82
+ return models
83
+
84
+ except aiohttp.ClientConnectorError:
85
+ error_msg = f"Could not connect to Ollama server at {self.base_url}. Please ensure Ollama is running and the URL is correct."
86
+ logger.error(error_msg)
87
+ raise Exception(error_msg)
88
+ except aiohttp.ClientTimeout:
89
+ error_msg = f"Connection to Ollama server at {self.base_url} timed out after 5 seconds. The server might be busy or unresponsive."
90
+ logger.error(error_msg)
91
+ raise Exception(error_msg)
92
+ except aiohttp.ClientError as e:
93
+ error_msg = f"Ollama API error: {str(e)}"
94
+ logger.error(error_msg)
95
+ raise Exception(error_msg)
96
+ except Exception as e:
97
+ error_msg = f"Unexpected error getting models: {str(e)}"
98
+ logger.error(error_msg)
99
+ raise Exception(error_msg)
100
+
101
+ async def generate_completion(self, messages: List[Dict[str, str]],
102
+ model: str,
103
+ style: Optional[str] = None,
104
+ temperature: float = 0.7,
49
105
  max_tokens: Optional[int] = None) -> str:
50
106
  """Generate a text completion using Ollama"""
107
+ logger.info(f"Generating completion with model: {model}")
51
108
  prompt = self._prepare_messages(messages, style)
109
+ retries = 2
110
+ last_error = None
52
111
 
53
- async with aiohttp.ClientSession() as session:
54
- async with session.post(
55
- f"{self.base_url}/api/generate",
56
- json={
57
- "model": model,
58
- "prompt": prompt,
59
- "temperature": temperature,
60
- "stream": False
61
- }
62
- ) as response:
63
- response.raise_for_status()
64
- data = await response.json()
65
- return data["response"]
112
+ while retries >= 0:
113
+ try:
114
+ async with aiohttp.ClientSession() as session:
115
+ logger.debug(f"Sending request to {self.base_url}/api/generate")
116
+ async with session.post(
117
+ f"{self.base_url}/api/generate",
118
+ json={
119
+ "model": model,
120
+ "prompt": prompt,
121
+ "temperature": temperature,
122
+ "stream": False
123
+ },
124
+ timeout=30
125
+ ) as response:
126
+ response.raise_for_status()
127
+ data = await response.json()
128
+ if "response" not in data:
129
+ raise Exception("Invalid response format from Ollama server")
130
+ return data["response"]
131
+
132
+ except aiohttp.ClientConnectorError:
133
+ last_error = "Could not connect to Ollama server. Make sure Ollama is running and accessible at " + self.base_url
134
+ except aiohttp.ClientResponseError as e:
135
+ last_error = f"Ollama API error: {e.status} - {e.message}"
136
+ except aiohttp.ClientTimeout:
137
+ last_error = "Request to Ollama server timed out"
138
+ except json.JSONDecodeError:
139
+ last_error = "Invalid JSON response from Ollama server"
140
+ except Exception as e:
141
+ last_error = f"Error generating completion: {str(e)}"
142
+
143
+ logger.error(f"Attempt failed: {last_error}")
144
+ retries -= 1
145
+ if retries >= 0:
146
+ logger.info(f"Retrying... {retries} attempts remaining")
147
+ await asyncio.sleep(1)
148
+
149
+ raise Exception(last_error)
66
150
 
67
- async def generate_stream(self, messages: List[Dict[str, str]],
68
- model: str,
151
+ async def generate_stream(self, messages: List[Dict[str, str]],
152
+ model: str,
69
153
  style: Optional[str] = None,
70
- temperature: float = 0.7,
154
+ temperature: float = 0.7,
71
155
  max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
72
156
  """Generate a streaming text completion using Ollama"""
157
+ logger.info(f"Starting streaming generation with model: {model}")
73
158
  prompt = self._prepare_messages(messages, style)
159
+ retries = 2
160
+ last_error = None
74
161
 
75
- async with aiohttp.ClientSession() as session:
76
- async with session.post(
77
- f"{self.base_url}/api/generate",
78
- json={
79
- "model": model,
80
- "prompt": prompt,
81
- "temperature": temperature,
82
- "stream": True
83
- }
84
- ) as response:
85
- response.raise_for_status()
86
- async for line in response.content:
87
- if line:
88
- chunk = line.decode().strip()
89
- try:
90
- data = json.loads(chunk)
91
- if "response" in data:
92
- yield data["response"]
93
- except json.JSONDecodeError:
94
- continue
95
-
96
- async def get_available_models(self) -> List[Dict[str, Any]]:
97
- """Get list of available Ollama models"""
98
- try:
99
- async with aiohttp.ClientSession() as session:
100
- async with session.get(f"{self.base_url}/api/tags") as response:
101
- response.raise_for_status()
102
- data = await response.json()
103
- models = data["models"]
104
-
105
- return [
106
- {"id": model["name"], "name": model["name"].title()}
107
- for model in models
108
- ]
109
- except:
110
- # Return some default models if Ollama is not running
111
- return [
112
- {"id": "llama2", "name": "Llama 2"},
113
- {"id": "mistral", "name": "Mistral"},
114
- {"id": "codellama", "name": "Code Llama"},
115
- {"id": "gemma", "name": "Gemma"}
116
- ]
162
+ while retries >= 0:
163
+ try:
164
+ async with aiohttp.ClientSession() as session:
165
+ logger.debug(f"Sending streaming request to {self.base_url}/api/generate")
166
+ async with session.post(
167
+ f"{self.base_url}/api/generate",
168
+ json={
169
+ "model": model,
170
+ "prompt": prompt,
171
+ "temperature": temperature,
172
+ "stream": True
173
+ },
174
+ timeout=30
175
+ ) as response:
176
+ response.raise_for_status()
177
+ async for line in response.content:
178
+ if line:
179
+ chunk = line.decode().strip()
180
+ try:
181
+ data = json.loads(chunk)
182
+ if "response" in data:
183
+ yield data["response"]
184
+ except json.JSONDecodeError:
185
+ continue
186
+ logger.info("Streaming completed successfully")
187
+ return
188
+
189
+ except aiohttp.ClientConnectorError:
190
+ last_error = "Could not connect to Ollama server. Make sure Ollama is running and accessible at " + self.base_url
191
+ except aiohttp.ClientResponseError as e:
192
+ last_error = f"Ollama API error: {e.status} - {e.message}"
193
+ except aiohttp.ClientTimeout:
194
+ last_error = "Request to Ollama server timed out"
195
+ except Exception as e:
196
+ last_error = f"Error streaming completion: {str(e)}"
197
+
198
+ logger.error(f"Streaming attempt failed: {last_error}")
199
+ retries -= 1
200
+ if retries >= 0:
201
+ logger.info(f"Retrying stream... {retries} attempts remaining")
202
+ await asyncio.sleep(1)
203
+
204
+ raise Exception(last_error)
app/api/openai.py CHANGED
@@ -57,17 +57,20 @@ class OpenAIClient(BaseModelClient):
57
57
  """Generate a streaming text completion using OpenAI"""
58
58
  processed_messages = self._prepare_messages(messages, style)
59
59
 
60
- stream = await self.client.chat.completions.create(
61
- model=model,
62
- messages=processed_messages,
63
- temperature=temperature,
64
- max_tokens=max_tokens,
65
- stream=True,
66
- )
67
-
68
- async for chunk in stream:
69
- if chunk.choices and chunk.choices[0].delta.content:
70
- yield chunk.choices[0].delta.content
60
+ try:
61
+ stream = await self.client.chat.completions.create(
62
+ model=model,
63
+ messages=[{"role": m["role"], "content": m["content"]} for m in processed_messages],
64
+ temperature=temperature,
65
+ max_tokens=max_tokens,
66
+ stream=True,
67
+ )
68
+
69
+ async for chunk in stream:
70
+ if chunk.choices and chunk.choices[0].delta.content is not None:
71
+ yield chunk.choices[0].delta.content
72
+ except Exception as e:
73
+ raise Exception(f"OpenAI streaming error: {str(e)}")
71
74
 
72
75
  def get_available_models(self) -> List[Dict[str, Any]]:
73
76
  """Get list of available OpenAI models"""
app/config.py CHANGED
@@ -12,14 +12,35 @@ APP_DIR.mkdir(exist_ok=True)
12
12
  DB_PATH = APP_DIR / "chat_history.db"
13
13
  CONFIG_PATH = APP_DIR / "config.json"
14
14
 
15
- # API Keys
15
+ # API Keys and Provider Configuration
16
16
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
17
17
  ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "")
18
18
  OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
19
19
 
20
+ def check_provider_availability():
21
+ """Check which providers are available"""
22
+ providers = {
23
+ "openai": bool(OPENAI_API_KEY),
24
+ "anthropic": bool(ANTHROPIC_API_KEY),
25
+ "ollama": False
26
+ }
27
+
28
+ # Check if Ollama is running
29
+ import requests
30
+ try:
31
+ response = requests.get(OLLAMA_BASE_URL + "/api/tags", timeout=2)
32
+ providers["ollama"] = response.status_code == 200
33
+ except:
34
+ pass
35
+
36
+ return providers
37
+
38
+ # Get available providers
39
+ AVAILABLE_PROVIDERS = check_provider_availability()
40
+
20
41
  # Default configuration
21
42
  DEFAULT_CONFIG = {
22
- "default_model": "gpt-3.5-turbo",
43
+ "default_model": "mistral" if AVAILABLE_PROVIDERS["ollama"] else "gpt-3.5-turbo",
23
44
  "available_models": {
24
45
  "gpt-3.5-turbo": {
25
46
  "provider": "openai",
@@ -101,11 +122,26 @@ DEFAULT_CONFIG = {
101
122
  "auto_save": True
102
123
  }
103
124
 
125
+ def validate_config(config):
126
+ """Validate and fix configuration issues"""
127
+ # Only validate non-Ollama providers since Ollama can be started on demand
128
+ default_model = config.get("default_model")
129
+ if default_model in config["available_models"]:
130
+ provider = config["available_models"][default_model]["provider"]
131
+ if provider != "ollama" and not AVAILABLE_PROVIDERS[provider]:
132
+ # Find first available model, preferring Ollama
133
+ for model, info in config["available_models"].items():
134
+ if info["provider"] == "ollama" or AVAILABLE_PROVIDERS[info["provider"]]:
135
+ config["default_model"] = model
136
+ break
137
+ return config
138
+
104
139
  def load_config():
105
140
  """Load the user configuration or create default if not exists"""
106
141
  if not CONFIG_PATH.exists():
107
- save_config(DEFAULT_CONFIG)
108
- return DEFAULT_CONFIG
142
+ validated_config = validate_config(DEFAULT_CONFIG.copy())
143
+ save_config(validated_config)
144
+ return validated_config
109
145
 
110
146
  try:
111
147
  with open(CONFIG_PATH, 'r') as f:
@@ -113,10 +149,14 @@ def load_config():
113
149
  # Merge with defaults to ensure all keys exist
114
150
  merged_config = DEFAULT_CONFIG.copy()
115
151
  merged_config.update(config)
116
- return merged_config
152
+ # Validate and fix any issues
153
+ validated_config = validate_config(merged_config)
154
+ if validated_config != merged_config:
155
+ save_config(validated_config)
156
+ return validated_config
117
157
  except Exception as e:
118
158
  print(f"Error loading config: {e}. Using defaults.")
119
- return DEFAULT_CONFIG
159
+ return validate_config(DEFAULT_CONFIG.copy())
120
160
 
121
161
  def save_config(config):
122
162
  """Save the configuration to disk"""
app/main.py CHANGED
@@ -90,15 +90,21 @@ class SettingsScreen(Screen):
90
90
  self.app.pop_screen()
91
91
 
92
92
  # Only update settings if Done was pressed
93
- if event.button.label == "Done" and self.app.current_conversation:
93
+ if event.button.label == "Done":
94
94
  try:
95
- self.app.db.update_conversation(
96
- self.app.current_conversation.id,
97
- model=self.app.selected_model,
98
- style=self.app.selected_style
99
- )
100
- self.app.current_conversation.model = self.app.selected_model
101
- self.app.current_conversation.style = self.app.selected_style
95
+ # Save settings globally
96
+ from app.utils import save_settings_to_config
97
+ save_settings_to_config(self.app.selected_model, self.app.selected_style)
98
+
99
+ # Update current conversation if one exists
100
+ if self.app.current_conversation:
101
+ self.app.db.update_conversation(
102
+ self.app.current_conversation.id,
103
+ model=self.app.selected_model,
104
+ style=self.app.selected_style
105
+ )
106
+ self.app.current_conversation.model = self.app.selected_model
107
+ self.app.current_conversation.style = self.app.selected_style
102
108
  except Exception as e:
103
109
  self.app.notify(f"Error updating settings: {str(e)}", severity="error")
104
110
 
@@ -344,15 +350,20 @@ class SimpleChatApp(App):
344
350
  if not ANTHROPIC_API_KEY:
345
351
  api_issues.append("- ANTHROPIC_API_KEY is not set")
346
352
 
347
- # Check Ollama availability
348
- from app.api.ollama import OllamaClient
349
- try:
350
- ollama = OllamaClient()
351
- models = await ollama.get_available_models()
352
- if not models:
353
- api_issues.append("- No Ollama models found")
354
- except Exception:
355
- api_issues.append("- Ollama server not running")
353
+ # Check Ollama availability and try to start if not running
354
+ from app.utils import ensure_ollama_running
355
+ if not ensure_ollama_running():
356
+ api_issues.append("- Ollama server not running and could not be started")
357
+ else:
358
+ # Check for available models
359
+ from app.api.ollama import OllamaClient
360
+ try:
361
+ ollama = OllamaClient()
362
+ models = await ollama.get_available_models()
363
+ if not models:
364
+ api_issues.append("- No Ollama models found")
365
+ except Exception:
366
+ api_issues.append("- Error connecting to Ollama server")
356
367
 
357
368
  if api_issues:
358
369
  self.notify(
@@ -499,22 +510,30 @@ class SimpleChatApp(App):
499
510
  return
500
511
 
501
512
  # Start streaming response
502
- assistant_message = Message(role="assistant", content="")
513
+ assistant_message = Message(role="assistant", content="Thinking...")
503
514
  self.messages.append(assistant_message)
504
515
  messages_container = self.query_one("#messages-container")
505
516
  message_display = MessageDisplay(assistant_message, highlight_code=CONFIG["highlight_code"])
506
517
  messages_container.mount(message_display)
507
518
  messages_container.scroll_end(animate=False)
508
519
 
520
+ # Add small delay to show thinking state
521
+ await asyncio.sleep(0.5)
522
+
509
523
  # Stream chunks to the UI
510
- async def update_ui(chunk: str):
524
+ async def update_ui(content: str):
511
525
  if not self.is_generating:
512
526
  return
513
527
 
514
528
  try:
515
- assistant_message.content += chunk
516
- # Update UI directly
517
- message_display.update_content(assistant_message.content)
529
+ # Clear thinking indicator on first content
530
+ if assistant_message.content == "Thinking...":
531
+ assistant_message.content = ""
532
+
533
+ # Update message with full content so far
534
+ assistant_message.content = content
535
+ # Update UI with full content
536
+ message_display.update_content(content)
518
537
  messages_container.scroll_end(animate=False)
519
538
  # Let the event loop process the update
520
539
  await asyncio.sleep(0)
app/ui/chat_interface.py CHANGED
@@ -234,20 +234,30 @@ class ChatInterface(Container):
234
234
 
235
235
  async def add_message(self, role: str, content: str, update_last: bool = False) -> None:
236
236
  """Add or update a message in the chat"""
237
+ messages_container = self.query_one("#messages-container")
238
+
237
239
  if update_last and self.current_message_display and role == "assistant":
238
240
  # Update existing message
239
241
  await self.current_message_display.update_content(content)
242
+ # Update message in history
243
+ if self.messages and self.messages[-1].role == "assistant":
244
+ self.messages[-1].content = content
240
245
  else:
241
246
  # Add new message
242
247
  message = Message(role=role, content=content)
243
248
  self.messages.append(message)
244
- messages_container = self.query_one("#messages-container")
245
249
  self.current_message_display = MessageDisplay(
246
250
  message,
247
251
  highlight_code=CONFIG["highlight_code"]
248
252
  )
249
253
  messages_container.mount(self.current_message_display)
250
254
 
255
+ # Save to conversation if exists
256
+ if self.conversation and self.conversation.id:
257
+ from ..database import ChatDatabase
258
+ db = ChatDatabase()
259
+ db.add_message(self.conversation.id, role, content)
260
+
251
261
  self.scroll_to_bottom()
252
262
 
253
263
  async def send_message(self) -> None:
app/ui/model_selector.py CHANGED
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  from typing import Dict, List, Any, Optional
2
3
  from textual.app import ComposeResult
3
4
  from textual.containers import Container
@@ -6,8 +7,12 @@ from textual.widget import Widget
6
7
  from textual.message import Message
7
8
 
8
9
  from ..config import CONFIG
10
+ from ..api.ollama import OllamaClient
9
11
  from .chat_interface import ChatInterface
10
12
 
13
+ # Set up logging
14
+ logger = logging.getLogger(__name__)
15
+
11
16
  class ModelSelector(Container):
12
17
  """Widget for selecting the AI model to use"""
13
18
 
@@ -68,8 +73,8 @@ class ModelSelector(Container):
68
73
  if self.selected_model in CONFIG["available_models"]:
69
74
  self.selected_provider = CONFIG["available_models"][self.selected_model]["provider"]
70
75
  else:
71
- # Default to OpenAI for custom models
72
- self.selected_provider = "openai"
76
+ # Default to Ollama for unknown models since it's more flexible
77
+ self.selected_provider = "ollama"
73
78
 
74
79
  def compose(self) -> ComposeResult:
75
80
  """Set up the model selector"""
@@ -121,35 +126,79 @@ class ModelSelector(Container):
121
126
 
122
127
  async def on_mount(self) -> None:
123
128
  """Initialize model options after mount"""
124
- # Only update options if using Ollama provider since it needs async API call
125
- if self.selected_provider == "ollama":
126
- model_select = self.query_one("#model-select", Select)
127
- model_options = await self._get_model_options(self.selected_provider)
128
- model_select.set_options(model_options)
129
- if not self.selected_model or self.selected_model not in CONFIG["available_models"]:
130
- model_select.value = "custom"
131
- else:
132
- model_select.value = self.selected_model
129
+ # Always update model options to ensure we have the latest
130
+ model_select = self.query_one("#model-select", Select)
131
+ model_options = await self._get_model_options(self.selected_provider)
132
+ model_select.set_options(model_options)
133
+
134
+ # Handle model selection
135
+ if self.selected_model in [opt[1] for opt in model_options]:
136
+ model_select.value = self.selected_model
137
+ model_select.remove_class("hide")
138
+ self.query_one("#custom-model-input").add_class("hide")
139
+ else:
140
+ model_select.value = "custom"
141
+ model_select.add_class("hide")
142
+ custom_input = self.query_one("#custom-model-input")
143
+ custom_input.value = self.selected_model
144
+ custom_input.remove_class("hide")
133
145
 
134
146
  async def _get_model_options(self, provider: str) -> List[tuple]:
135
147
  """Get model options for a specific provider"""
148
+ logger = logging.getLogger(__name__)
149
+ logger.info(f"Getting model options for provider: {provider}")
150
+
136
151
  options = [
137
152
  (model_info["display_name"], model_id)
138
153
  for model_id, model_info in CONFIG["available_models"].items()
139
154
  if model_info["provider"] == provider
140
155
  ]
156
+ logger.info(f"Found {len(options)} models in config for {provider}")
141
157
 
142
158
  # Add available Ollama models
143
159
  if provider == "ollama":
144
160
  try:
145
- from app.api.ollama import OllamaClient
161
+ logger.info("Initializing Ollama client...")
146
162
  ollama = OllamaClient()
147
- ollama_models = await ollama.get_available_models()
148
- for model in ollama_models:
149
- if model["id"] not in CONFIG["available_models"]:
150
- options.append((model["name"], model["id"]))
151
- except:
152
- pass
163
+ logger.info("Getting available Ollama models...")
164
+ try:
165
+ models = await ollama.get_available_models()
166
+ logger.info(f"Found {len(models)} models from Ollama API")
167
+
168
+ # Store models in config for later use
169
+ CONFIG["ollama_models"] = models
170
+ from ..config import save_config
171
+ save_config(CONFIG)
172
+ logger.info("Saved Ollama models to config")
173
+
174
+ for model in models:
175
+ if model["id"] not in CONFIG["available_models"]:
176
+ logger.info(f"Adding new Ollama model: {model['name']}")
177
+ options.append((model["name"], model["id"]))
178
+ except AttributeError:
179
+ # Fallback for sync method
180
+ models = ollama.get_available_models()
181
+ logger.info(f"Found {len(models)} models from Ollama API (sync)")
182
+ CONFIG["ollama_models"] = models
183
+ from ..config import save_config
184
+ save_config(CONFIG)
185
+ logger.info("Saved Ollama models to config (sync)")
186
+
187
+ for model in models:
188
+ if model["id"] not in CONFIG["available_models"]:
189
+ logger.info(f"Adding new Ollama model: {model['name']}")
190
+ options.append((model["name"], model["id"]))
191
+ except Exception as e:
192
+ logger.error(f"Error getting Ollama models: {str(e)}")
193
+ # Add default Ollama models if API fails
194
+ default_models = [
195
+ ("Llama 2", "llama2"),
196
+ ("Mistral", "mistral"),
197
+ ("Code Llama", "codellama"),
198
+ ("Gemma", "gemma")
199
+ ]
200
+ logger.info("Adding default Ollama models as fallback")
201
+ options.extend(default_models)
153
202
 
154
203
  options.append(("Custom Model...", "custom"))
155
204
  return options
@@ -166,6 +215,8 @@ class ModelSelector(Container):
166
215
  if model_options:
167
216
  self.selected_model = model_options[0][1]
168
217
  model_select.value = self.selected_model
218
+ model_select.remove_class("hide")
219
+ self.query_one("#custom-model-input").add_class("hide")
169
220
  self.post_message(self.ModelSelected(self.selected_model))
170
221
 
171
222
  elif event.select.id == "model-select":
@@ -193,7 +244,6 @@ class ModelSelector(Container):
193
244
  self.selected_model = value
194
245
  self.post_message(self.ModelSelected(value))
195
246
 
196
-
197
247
  def get_selected_model(self) -> str:
198
248
  """Get the current selected model ID"""
199
249
  return self.selected_model
app/ui/styles.py CHANGED
@@ -108,14 +108,19 @@ Screen {
108
108
  /* Action buttons */
109
109
  .action-button {
110
110
  background: $primary;
111
- color: $text;
111
+ color: #FFFFFF !important; /* Explicit white text */
112
112
  border: none;
113
113
  min-width: 10;
114
114
  margin-left: 1;
115
+ padding: 0 1; /* Add padding */
116
+ text-style: bold;
117
+ font-size: 1.1;
115
118
  }
116
119
 
117
120
  .action-button:hover {
118
121
  background: $primary-lighten-1;
122
+ color: #FFFFFF !important;
123
+ text-style: bold;
119
124
  }
120
125
 
121
126
  /* Sidebar */
app/utils.py CHANGED
@@ -1,202 +1,87 @@
1
- from datetime import datetime
2
- import re
3
- import asyncio
4
- import time
5
- from typing import List, Dict, Any, Optional, Generator, Awaitable, Callable
6
- import textwrap
7
- import threading
8
- from rich.text import Text
9
- from rich.markdown import Markdown
10
- from rich.syntax import Syntax
11
- from rich.panel import Panel
12
- from rich.console import Console
1
+ import os
2
+ import json
3
+ import subprocess
4
+ import logging
5
+ from typing import Optional, Dict, Any, List
6
+ from .config import CONFIG, save_config
13
7
 
14
- from .models import Message, Conversation
15
- from .database import ChatDatabase
16
- from .api.base import BaseModelClient
8
+ # Set up logging
9
+ logging.basicConfig(level=logging.INFO)
10
+ logger = logging.getLogger(__name__)
17
11
 
18
- def generate_conversation_title(messages: List[Message], model: str) -> str:
19
- """Generate a title for a conversation based on its content"""
20
- # Find the first user message
21
- first_user_message = None
22
- for msg in messages:
23
- if msg.role == "user":
24
- first_user_message = msg
25
- break
26
-
27
- if first_user_message is None:
28
- return f"New conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
29
-
30
- # Use first line of the first user message (up to 40 chars)
31
- content = first_user_message.content.strip()
32
-
33
- # Get first line
34
- first_line = content.split('\n')[0]
35
-
36
- # Truncate if needed
37
- if len(first_line) > 40:
38
- title = first_line[:37] + "..."
39
- else:
40
- title = first_line
41
-
42
- return title
43
-
44
- def format_code_blocks(text: str) -> str:
45
- """Ensure code blocks have proper formatting"""
46
- # Make sure code blocks are properly formatted with triple backticks
47
- pattern = r"```(\w*)\n(.*?)\n```"
48
-
49
- def code_replace(match):
50
- lang = match.group(1)
51
- code = match.group(2)
52
- # Ensure code has proper indentation
53
- code_lines = code.split('\n')
54
- code = '\n'.join([line.rstrip() for line in code_lines])
55
- return f"```{lang}\n{code}\n```"
56
-
57
- return re.sub(pattern, code_replace, text, flags=re.DOTALL)
58
-
59
- def extract_code_blocks(text: str) -> List[Dict[str, str]]:
60
- """Extract code blocks from text content"""
61
- blocks = []
62
- pattern = r"```(\w*)\n(.*?)\n```"
63
- matches = re.finditer(pattern, text, re.DOTALL)
64
-
65
- for match in matches:
66
- lang = match.group(1) or "text"
67
- code = match.group(2).strip()
68
- blocks.append({
69
- "language": lang,
70
- "code": code,
71
- "start": match.start(),
72
- "end": match.end()
73
- })
74
-
75
- return blocks
76
-
77
- def format_text(text: str, highlight_code: bool = True) -> Text:
78
- """Format text with optional code highlighting"""
79
- result = Text()
80
-
81
- if not highlight_code:
82
- return Text(text)
83
-
84
- # Split by code blocks
85
- parts = re.split(r'(```\w*\n.*?\n```)', text, flags=re.DOTALL)
86
-
87
- for part in parts:
88
- if part.startswith('```'):
89
- # Handle code block
90
- match = re.match(r'```(\w*)\n(.*?)\n```', part, re.DOTALL)
91
- if match:
92
- lang = match.group(1) or "text"
93
- code = match.group(2).strip()
94
- syntax = Syntax(
95
- code,
96
- lang,
97
- theme="monokai",
98
- line_numbers=True,
99
- word_wrap=True,
100
- indent_guides=True
101
- )
102
- result.append("\n")
103
- result.append(syntax)
104
- result.append("\n")
105
- else:
106
- # Handle regular text
107
- if part.strip():
108
- result.append(Text(part.strip()))
109
- result.append("\n")
110
-
111
- return result
112
-
113
- def create_new_conversation(db: ChatDatabase, model: str, style: str = "default") -> Conversation:
114
- """Create a new conversation in the database"""
115
- title = f"New conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
116
- conversation_id = db.create_conversation(title, model, style)
117
-
118
- # Get full conversation object
119
- conversation_data = db.get_conversation(conversation_id)
120
- return Conversation.from_dict(conversation_data)
121
-
122
- def update_conversation_title(db: ChatDatabase, conversation: Conversation) -> None:
123
- """Update the title of a conversation based on its content"""
124
- if not conversation.messages:
125
- return
126
-
127
- title = generate_conversation_title(conversation.messages, conversation.model)
128
- db.update_conversation(conversation.id, title=title)
129
- conversation.title = title
130
-
131
- def add_message_to_conversation(
132
- db: ChatDatabase,
133
- conversation: Conversation,
134
- role: str,
135
- content: str
136
- ) -> Message:
137
- """Add a message to a conversation in the database"""
138
- message_id = db.add_message(conversation.id, role, content)
139
-
140
- # Create message object
141
- message = Message(
142
- id=message_id,
143
- conversation_id=conversation.id,
144
- role=role,
145
- content=content,
146
- timestamp=datetime.now().isoformat()
147
- )
148
-
149
- # Add to conversation
150
- conversation.messages.append(message)
151
-
152
- # Update conversation title if it's the default
153
- if conversation.title.startswith("New conversation"):
154
- update_conversation_title(db, conversation)
155
-
156
- return message
157
-
158
- def run_in_thread(func: Callable, *args, **kwargs) -> threading.Thread:
159
- """Run a function in a separate thread"""
160
- thread = threading.Thread(target=func, args=args, kwargs=kwargs)
161
- thread.daemon = True
162
- thread.start()
163
- return thread
164
-
165
- async def generate_streaming_response(
166
- messages: List[Dict[str, str]],
167
- model: str,
168
- style: str,
169
- client: BaseModelClient,
170
- callback: Callable[[str], Awaitable[None]]
171
- ) -> str:
172
- """Generate a streaming response and call the callback for each chunk"""
12
+ async def generate_streaming_response(messages: List[Dict], model: str, style: str, client: Any, callback: Any) -> str:
13
+ """Generate a streaming response from the model"""
14
+ logger.info(f"Starting streaming response with model: {model}")
173
15
  full_response = ""
174
-
175
16
  try:
176
- # Get the async generator from the client
177
- stream = client.generate_stream(messages, model, style)
178
- # Iterate over the generator properly
179
- async for chunk in stream:
17
+ async for chunk in client.generate_stream(messages, model, style):
180
18
  if chunk: # Only process non-empty chunks
181
19
  full_response += chunk
182
- # Update UI and ensure event loop processes it
183
- await callback(chunk)
184
- # Small delay to prevent overwhelming the event loop
185
- await asyncio.sleep(0.01)
20
+ await callback(full_response) # Send full response so far
21
+ logger.info("Streaming response completed")
22
+ return full_response
186
23
  except Exception as e:
187
- error_msg = f"\n\nError generating response: {str(e)}"
188
- full_response += error_msg
189
- await callback(error_msg)
190
-
191
- return full_response
24
+ logger.error(f"Error in streaming response: {str(e)}")
25
+ raise
192
26
 
193
- def get_elapsed_time(start_time: float) -> str:
194
- """Get the elapsed time as a formatted string"""
195
- elapsed = time.time() - start_time
27
+ def ensure_ollama_running() -> bool:
28
+ """
29
+ Check if Ollama is running and try to start it if not.
30
+ Returns True if Ollama is running after check/start attempt.
31
+ """
32
+ import requests
33
+ try:
34
+ logger.info("Checking if Ollama is running...")
35
+ response = requests.get("http://localhost:11434/api/tags", timeout=2)
36
+ if response.status_code == 200:
37
+ logger.info("Ollama is running")
38
+ return True
39
+ else:
40
+ logger.warning(f"Ollama returned status code: {response.status_code}")
41
+ return False
42
+ except requests.exceptions.ConnectionError:
43
+ logger.info("Ollama not running, attempting to start...")
44
+ try:
45
+ # Try to start Ollama
46
+ process = subprocess.Popen(
47
+ ["ollama", "serve"],
48
+ stdout=subprocess.PIPE,
49
+ stderr=subprocess.PIPE,
50
+ text=True
51
+ )
52
+
53
+ # Wait a moment for it to start
54
+ import time
55
+ time.sleep(2)
56
+
57
+ # Check if process is still running
58
+ if process.poll() is None:
59
+ logger.info("Ollama server started successfully")
60
+ # Check if we can connect
61
+ try:
62
+ response = requests.get("http://localhost:11434/api/tags", timeout=2)
63
+ if response.status_code == 200:
64
+ logger.info("Successfully connected to Ollama")
65
+ return True
66
+ else:
67
+ logger.error(f"Ollama returned status code: {response.status_code}")
68
+ except Exception as e:
69
+ logger.error(f"Failed to connect to Ollama after starting: {str(e)}")
70
+ else:
71
+ stdout, stderr = process.communicate()
72
+ logger.error(f"Ollama failed to start. stdout: {stdout}, stderr: {stderr}")
73
+ except FileNotFoundError:
74
+ logger.error("Ollama command not found. Please ensure Ollama is installed.")
75
+ except Exception as e:
76
+ logger.error(f"Error starting Ollama: {str(e)}")
77
+ except Exception as e:
78
+ logger.error(f"Error checking Ollama status: {str(e)}")
196
79
 
197
- if elapsed < 60:
198
- return f"{elapsed:.1f}s"
199
- else:
200
- minutes = int(elapsed // 60)
201
- seconds = elapsed % 60
202
- return f"{minutes}m {seconds:.1f}s"
80
+ return False
81
+
82
+ def save_settings_to_config(model: str, style: str) -> None:
83
+ """Save settings to global config file"""
84
+ logger.info(f"Saving settings to config - model: {model}, style: {style}")
85
+ CONFIG["default_model"] = model
86
+ CONFIG["default_style"] = style
87
+ save_config(CONFIG)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: chat-console
3
- Version: 0.1.2
3
+ Version: 0.1.5.dev1
4
4
  Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
5
5
  Home-page: https://github.com/wazacraftrfid/chat-console
6
6
  Author: Johnathan Greenaway
@@ -0,0 +1,23 @@
1
+ app/__init__.py,sha256=u5X4kPcpqZ12ZLnhwwOCScNvftaknDTrb0DMXqR_iLc,130
2
+ app/config.py,sha256=7C09kn2bmda9frTPfZ7f1JhagqHAZjGM5BYqZmhegYM,5190
3
+ app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
4
+ app/main.py,sha256=ZeLroiPrlGgXArL0Po545PB3SU6imkx2KATjld1hV6s,21996
5
+ app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
6
+ app/utils.py,sha256=tbMhutE3vg9seGstD5k8MyUhJo5XbJ17p64dl2wTqYY,3481
7
+ app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
8
+ app/api/anthropic.py,sha256=x5PmBXEKe_ow2NWk8XdqSPR0hLOdCc_ypY5QAySeA78,4234
9
+ app/api/base.py,sha256=-6RSxSpqe-OMwkaq1wVWbu3pVkte-ZYy8rmdvt-Qh48,3953
10
+ app/api/ollama.py,sha256=2Yqyc6d3lwShAx4j1A97y7iPZWLeMw-wumtnhvQzAxY,9869
11
+ app/api/openai.py,sha256=1fYgFXXL6yj_7lQ893Yj28RYG4M8d6gt_q1gzhhjcig,3641
12
+ app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
13
+ app/ui/chat_interface.py,sha256=wFmCiSvwqp8Jia3nkMUxrYAou7Hr3UAqGhTvZoClVL8,11548
14
+ app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
15
+ app/ui/model_selector.py,sha256=xCuaohgYvebgP0Eel6-XzUn-7Y0SrJUArdTr-CDBZXc,12840
16
+ app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
17
+ app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
18
+ chat_console-0.1.5.dev1.dist-info/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
19
+ chat_console-0.1.5.dev1.dist-info/METADATA,sha256=XZ5qunyAZ9WupEaGJBg7DuUw2ZvkhXmAYn-q0xYM8ss,2904
20
+ chat_console-0.1.5.dev1.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
21
+ chat_console-0.1.5.dev1.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
22
+ chat_console-0.1.5.dev1.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
23
+ chat_console-0.1.5.dev1.dist-info/RECORD,,
@@ -1,23 +0,0 @@
1
- app/__init__.py,sha256=u5X4kPcpqZ12ZLnhwwOCScNvftaknDTrb0DMXqR_iLc,130
2
- app/config.py,sha256=PLEic_jwfWvWJxDfQMbKSbJ4ULrcmDhVe0apqegMO_g,3571
3
- app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
4
- app/main.py,sha256=suyVFuVughSz4ld533sdwqtAedVN4EW1rZpoxNHksBY,21000
5
- app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
6
- app/utils.py,sha256=oUpQpqrxvvQn0S0lMCSwDC1Rx0PHpoAIRDySohYV5Oo,6586
7
- app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
8
- app/api/anthropic.py,sha256=leWSnCfqKnxHB5k3l_oVty4km3q18dodJkPAxwvhEt0,4211
9
- app/api/base.py,sha256=-Lx6nfgvEPjrAnQXuCgG-zr8soD1AibTtP15gVD3O48,3138
10
- app/api/ollama.py,sha256=naD5-WVCthZ-0s4iBo_bYV1hRMcuczly-lghmB2_loQ,5033
11
- app/api/openai.py,sha256=70NITI4upGld_xpaCZLoMd0ObSeVdhtiyUfY9hYHlhE,3420
12
- app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
13
- app/ui/chat_interface.py,sha256=5gSOa7zT9bWujkPYctB8gVm4yypnkmKHcY1VtaKcEQs,11126
14
- app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
15
- app/ui/model_selector.py,sha256=Rv0i2VjLL2-cp4Pn_uMnAnAIV7Zk9gBX1XoWKBzkxHg,10367
16
- app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
17
- app/ui/styles.py,sha256=eVDBTpBGnQ-mg5SeLi6i74ZjhCpItxAwWh1IelD09GY,5445
18
- chat_console-0.1.2.dist-info/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
19
- chat_console-0.1.2.dist-info/METADATA,sha256=4VoR4lrcZq-m37Kkpy4-Ld7QSKsUtqDwpvjj8xCW9WE,2899
20
- chat_console-0.1.2.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
21
- chat_console-0.1.2.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
22
- chat_console-0.1.2.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
23
- chat_console-0.1.2.dist-info/RECORD,,