chat-console 0.3.991__py3-none-any.whl → 0.3.995__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/__init__.py CHANGED
@@ -3,4 +3,4 @@ Chat CLI
3
3
  A command-line interface for chatting with various LLM providers like ChatGPT and Claude.
4
4
  """
5
5
 
6
- __version__ = "0.3.991"
6
+ __version__ = "0.3.995"
app/api/base.py CHANGED
@@ -61,6 +61,26 @@ class BaseModelClient(ABC):
61
61
  logger.info(f"Found model in config with provider: {provider}")
62
62
  # For custom models, try to infer provider
63
63
  else:
64
+ # Check if this model was selected from a specific provider in the UI
65
+ # This would be stored in a temporary attribute on the app instance
66
+ try:
67
+ from ..main import SimpleChatApp
68
+ import inspect
69
+ frame = inspect.currentframe()
70
+ while frame:
71
+ if 'self' in frame.f_locals and isinstance(frame.f_locals['self'], SimpleChatApp):
72
+ app_instance = frame.f_locals['self']
73
+ if hasattr(app_instance, 'selected_provider'):
74
+ provider = app_instance.selected_provider
75
+ logger.info(f"Using provider from UI selection: {provider}")
76
+ return OllamaClient if provider == "ollama" else (
77
+ OpenAIClient if provider == "openai" else
78
+ AnthropicClient if provider == "anthropic" else None)
79
+ frame = frame.f_back
80
+ except Exception as e:
81
+ logger.error(f"Error checking for UI provider selection: {str(e)}")
82
+
83
+ # If we couldn't get the provider from the UI, infer it from the model name
64
84
  # Check for common OpenAI model patterns or prefixes
65
85
  if (model_name_lower.startswith(("gpt-", "text-", "davinci")) or
66
86
  "gpt" in model_name_lower or
@@ -122,34 +142,57 @@ class BaseModelClient(ABC):
122
142
  raise Exception(f"Provider '{provider}' is not available. Please check your configuration.")
123
143
  # For custom models, try to infer provider
124
144
  else:
125
- # Check for common OpenAI model patterns or prefixes
126
- if (model_name_lower.startswith(("gpt-", "text-", "davinci")) or
127
- "gpt" in model_name_lower or
128
- model_name_lower in ["04-mini", "04", "04-turbo", "04-vision"]):
129
- if not AVAILABLE_PROVIDERS["openai"]:
130
- raise Exception("OpenAI API key not found. Please set OPENAI_API_KEY environment variable.")
131
- provider = "openai"
132
- logger.info(f"Identified {model_name} as an OpenAI model")
133
- # Then check for Anthropic models - these should ALWAYS use Anthropic client
134
- elif any(name in model_name_lower for name in ["claude", "anthropic"]):
135
- if not AVAILABLE_PROVIDERS["anthropic"]:
136
- raise Exception("Anthropic API key not found. Please set ANTHROPIC_API_KEY environment variable.")
137
- provider = "anthropic"
138
- logger.info(f"Identified as Anthropic model: {model_name}")
139
- # Then try Ollama for known model names or if selected from Ollama UI
140
- elif (any(name in model_name_lower for name in ["llama", "mistral", "codellama", "gemma"]) or
141
- model_name in [m["id"] for m in CONFIG.get("ollama_models", [])]):
142
- if not AVAILABLE_PROVIDERS["ollama"]:
143
- raise Exception("Ollama server is not running. Please start Ollama and try again.")
144
- provider = "ollama"
145
- logger.info(f"Identified as Ollama model: {model_name}")
146
- else:
147
- # Default to Ollama for unknown models
148
- if AVAILABLE_PROVIDERS["ollama"]:
145
+ # Check if this model was selected from a specific provider in the UI
146
+ provider = None
147
+ try:
148
+ from ..main import SimpleChatApp
149
+ import inspect
150
+ frame = inspect.currentframe()
151
+ while frame:
152
+ if 'self' in frame.f_locals and isinstance(frame.f_locals['self'], SimpleChatApp):
153
+ app_instance = frame.f_locals['self']
154
+ if hasattr(app_instance, 'selected_provider'):
155
+ provider = app_instance.selected_provider
156
+ logger.info(f"Using provider from UI selection: {provider}")
157
+ break
158
+ frame = frame.f_back
159
+ except Exception as e:
160
+ logger.error(f"Error checking for UI provider selection: {str(e)}")
161
+
162
+ # If we couldn't get the provider from the UI, infer it from the model name
163
+ if not provider:
164
+ # Check for common OpenAI model patterns or prefixes
165
+ if (model_name_lower.startswith(("gpt-", "text-", "davinci")) or
166
+ "gpt" in model_name_lower or
167
+ model_name_lower in ["04-mini", "04", "04-turbo", "04-vision"]):
168
+ if not AVAILABLE_PROVIDERS["openai"]:
169
+ raise Exception("OpenAI API key not found. Please set OPENAI_API_KEY environment variable.")
170
+ provider = "openai"
171
+ logger.info(f"Identified {model_name} as an OpenAI model")
172
+ # Then check for Anthropic models - these should ALWAYS use Anthropic client
173
+ elif any(name in model_name_lower for name in ["claude", "anthropic"]):
174
+ if not AVAILABLE_PROVIDERS["anthropic"]:
175
+ raise Exception("Anthropic API key not found. Please set ANTHROPIC_API_KEY environment variable.")
176
+ provider = "anthropic"
177
+ logger.info(f"Identified as Anthropic model: {model_name}")
178
+ # Then try Ollama for known model names or if selected from Ollama UI
179
+ elif (any(name in model_name_lower for name in ["llama", "mistral", "codellama", "gemma"]) or
180
+ model_name in [m["id"] for m in CONFIG.get("ollama_models", [])]):
181
+ if not AVAILABLE_PROVIDERS["ollama"]:
182
+ raise Exception("Ollama server is not running. Please start Ollama and try again.")
149
183
  provider = "ollama"
150
- logger.info(f"Unknown model type, defaulting to Ollama: {model_name}")
184
+ logger.info(f"Identified as Ollama model: {model_name}")
151
185
  else:
152
- raise Exception(f"Unknown model: {model_name}")
186
+ # Default to Ollama for unknown models
187
+ if AVAILABLE_PROVIDERS["ollama"]:
188
+ provider = "ollama"
189
+ logger.info(f"Unknown model type, defaulting to Ollama: {model_name}")
190
+ else:
191
+ raise Exception(f"Unknown model: {model_name}")
192
+
193
+ # Verify the selected provider is available
194
+ if provider and not AVAILABLE_PROVIDERS.get(provider, False):
195
+ raise Exception(f"Provider '{provider}' is not available. Please check your configuration.")
153
196
 
154
197
  # Return appropriate client
155
198
  if provider == "ollama":
app/api/openai.py CHANGED
@@ -53,12 +53,18 @@ class OpenAIClient(BaseModelClient):
53
53
  """Generate a text completion using OpenAI"""
54
54
  processed_messages = self._prepare_messages(messages, style)
55
55
 
56
- response = await self.client.chat.completions.create(
57
- model=model,
58
- messages=processed_messages,
59
- temperature=temperature,
60
- max_tokens=max_tokens,
61
- )
56
+ # Create parameters dict, omitting max_tokens if it's None
57
+ params = {
58
+ "model": model,
59
+ "messages": processed_messages,
60
+ "temperature": temperature,
61
+ }
62
+
63
+ # Only add max_tokens if it's not None
64
+ if max_tokens is not None:
65
+ params["max_tokens"] = max_tokens
66
+
67
+ response = await self.client.chat.completions.create(**params)
62
68
 
63
69
  return response.choices[0].message.content
64
70
 
@@ -113,13 +119,20 @@ class OpenAIClient(BaseModelClient):
113
119
 
114
120
  while retry_count <= max_retries:
115
121
  try:
116
- stream = await self.client.chat.completions.create(
117
- model=model,
118
- messages=api_messages,
119
- temperature=temperature,
120
- max_tokens=max_tokens,
121
- stream=True,
122
- )
122
+ # Create parameters dict, omitting max_tokens if it's None
123
+ params = {
124
+ "model": model,
125
+ "messages": api_messages,
126
+ "temperature": temperature,
127
+ "stream": True,
128
+ }
129
+
130
+ # Only add max_tokens if it's not None
131
+ if max_tokens is not None:
132
+ params["max_tokens"] = max_tokens
133
+
134
+ debug_log(f"OpenAI: creating stream with params: {params}")
135
+ stream = await self.client.chat.completions.create(**params)
123
136
 
124
137
  # Store the stream for potential cancellation
125
138
  self._active_stream = stream
app/main.py CHANGED
@@ -1215,9 +1215,16 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
1215
1215
  debug_log(f"State change event from unrelated worker: {event.worker.name}")
1216
1216
 
1217
1217
 
1218
- def on_model_selector_model_selected(self, event: ModelSelector.ModelSelected) -> None: # Keep SimpleChatApp on_model_selector_model_selected
1219
- """Handle model selection""" # Keep SimpleChatApp on_model_selector_model_selected docstring
1220
- self.selected_model = event.model_id # Keep SimpleChatApp on_model_selector_model_selected
1218
+ def on_model_selector_model_selected(self, event: ModelSelector.ModelSelected) -> None:
1219
+ """Handle model selection"""
1220
+ self.selected_model = event.model_id
1221
+
1222
+ # Store the selected provider for use in client resolution
1223
+ model_selector = self.query_one(ModelSelector)
1224
+ if model_selector:
1225
+ self.selected_provider = model_selector.selected_provider
1226
+ log(f"Stored selected provider: {self.selected_provider} for model: {self.selected_model}")
1227
+
1221
1228
  self.update_app_info() # Update the displayed model info
1222
1229
 
1223
1230
  def on_style_selector_style_selected(self, event: StyleSelector.StyleSelected) -> None: # Keep SimpleChatApp on_style_selector_style_selected
app/ui/model_selector.py CHANGED
@@ -251,6 +251,11 @@ class ModelSelector(Container):
251
251
  # IMPORTANT: Clear any cached client
252
252
  if hasattr(self.app, 'cached_client'):
253
253
  self.app.cached_client = None
254
+
255
+ # Store the selected provider in the app instance for client resolution
256
+ if hasattr(self.app, 'selected_provider'):
257
+ self.app.selected_provider = self.selected_provider
258
+ logger.info(f"Updated app.selected_provider to: {self.selected_provider}")
254
259
 
255
260
  # Update model options
256
261
  model_select = self.query_one("#model-select", Select)
app/utils.py CHANGED
@@ -774,6 +774,13 @@ def resolve_model_id(model_id_or_name: str) -> str:
774
774
  "o4-vision": "04-vision"
775
775
  }
776
776
 
777
+ # Check for more complex typo patterns with dates
778
+ if input_lower.startswith("o1-") and "-202" in input_lower:
779
+ corrected = "01" + input_lower[2:]
780
+ logger.info(f"Converting '{input_lower}' to '{corrected}' (letter 'o' to zero '0')")
781
+ input_lower = corrected
782
+ model_id_or_name = corrected
783
+
777
784
  if input_lower in typo_corrections:
778
785
  corrected = typo_corrections[input_lower]
779
786
  logger.info(f"Converting '{input_lower}' to '{corrected}' (letter 'o' to zero '0')")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chat-console
3
- Version: 0.3.991
3
+ Version: 0.3.995
4
4
  Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
5
5
  Home-page: https://github.com/wazacraftrfid/chat-console
6
6
  Author: Johnathan Greenaway
@@ -1,24 +1,24 @@
1
- app/__init__.py,sha256=ZWMbaTFHYgXqy4qglFzcSE1yk3aIh1w759fHYF8d-K4,132
1
+ app/__init__.py,sha256=7Y7rb5EnGtgcyFHL5Z-6rT9sxtKiZfSrs4YsouTOYNw,132
2
2
  app/config.py,sha256=xeRGXcKbNvAdQGkaJJBipM4yHZJTM1y4ZFoW764APOU,7661
3
3
  app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
4
- app/main.py,sha256=dqQ2kxJL_94jcQFCT6tF5PLIH336tBTVUGkZhK4V8i4,77258
4
+ app/main.py,sha256=WOcMP6yRwoEzftTSHf0e3zVK1aEuBgKMAsNbzHyKgiA,77427
5
5
  app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
6
- app/utils.py,sha256=dDT7iiH-BXPiFhJCK095X05ofb65z97p-YXH-OuOK3k,38830
6
+ app/utils.py,sha256=AfB6USZdSwkUj75TQzGt_WPAUt1K8wlghON4vRVHUbE,39158
7
7
  app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
8
8
  app/api/anthropic.py,sha256=uInwNvGLJ_iPUs4BjdwaqXTU6NfmK1SzX7498Pt44fI,10667
9
- app/api/base.py,sha256=zvlHHfIcaObefkJ3w4er9ZSX7YGZ_MM0H-wrzD8CGAM,7629
9
+ app/api/base.py,sha256=ELHl7K0jn0OuOfub7lVboigIbym0sv1se_-bCLscPJ8,10232
10
10
  app/api/ollama.py,sha256=eFG24nI2MlF57z9EHiA97v02NgFJ0kxaPUX26xAXFsg,66154
11
- app/api/openai.py,sha256=hLPr955tUx_2vwRuLP8Zrl3vu7kQZgUETi4cJuaYnFE,10810
11
+ app/api/openai.py,sha256=vWk8kmA5VWbYcGpXbNp1fk9ZzvxMn6g8nLHHA3CO0vY,11395
12
12
  app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
13
13
  app/ui/chat_interface.py,sha256=oSDZi0Jgj_L8WnBh1RuJpIeIcN-RQ38CNejwsXiWTVg,18267
14
14
  app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
15
15
  app/ui/model_browser.py,sha256=pdblLVkdyVF0_Bo02bqbErGAtieyH-y6IfhMOPEqIso,71124
16
- app/ui/model_selector.py,sha256=9fIPpAiqb568idt9pdROAYaxpoqY9czMF-bGdOl4nYk,18861
16
+ app/ui/model_selector.py,sha256=2G0TOXfcNodrXZOhLeaJJ2iG3Nck4c_NN1AvUAmaF3M,19172
17
17
  app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
18
18
  app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
19
- chat_console-0.3.991.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
20
- chat_console-0.3.991.dist-info/METADATA,sha256=LrgZQlvm_Pslv_T4RhVul8rPCfHvL0riJw5p4AsZ-iM,2923
21
- chat_console-0.3.991.dist-info/WHEEL,sha256=ck4Vq1_RXyvS4Jt6SI0Vz6fyVs4GWg7AINwpsaGEgPE,91
22
- chat_console-0.3.991.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
23
- chat_console-0.3.991.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
24
- chat_console-0.3.991.dist-info/RECORD,,
19
+ chat_console-0.3.995.dist-info/licenses/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
20
+ chat_console-0.3.995.dist-info/METADATA,sha256=bJfytsa2etmHsB_yjV-SrlBTRXX6X7Omf85_c1miTR4,2923
21
+ chat_console-0.3.995.dist-info/WHEEL,sha256=ck4Vq1_RXyvS4Jt6SI0Vz6fyVs4GWg7AINwpsaGEgPE,91
22
+ chat_console-0.3.995.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
23
+ chat_console-0.3.995.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
24
+ chat_console-0.3.995.dist-info/RECORD,,