chat-console 0.2.9__py3-none-any.whl → 0.2.98__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/api/openai.py CHANGED
@@ -55,21 +55,63 @@ class OpenAIClient(BaseModelClient):
55
55
  temperature: float = 0.7,
56
56
  max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
57
57
  """Generate a streaming text completion using OpenAI"""
58
+ try:
59
+ from app.main import debug_log # Import debug logging if available
60
+ debug_log(f"OpenAI: starting streaming generation with model: {model}")
61
+ except ImportError:
62
+ # If debug_log not available, create a no-op function
63
+ debug_log = lambda msg: None
64
+
58
65
  processed_messages = self._prepare_messages(messages, style)
59
66
 
60
67
  try:
68
+ debug_log(f"OpenAI: preparing {len(processed_messages)} messages for stream")
69
+
70
+ # Safely prepare messages
71
+ try:
72
+ api_messages = []
73
+ for m in processed_messages:
74
+ if isinstance(m, dict) and "role" in m and "content" in m:
75
+ api_messages.append({"role": m["role"], "content": m["content"]})
76
+ else:
77
+ debug_log(f"OpenAI: skipping invalid message: {m}")
78
+
79
+ debug_log(f"OpenAI: prepared {len(api_messages)} valid messages")
80
+ except Exception as msg_error:
81
+ debug_log(f"OpenAI: error preparing messages: {str(msg_error)}")
82
+ # Fallback to a simpler message format if processing fails
83
+ api_messages = [{"role": "user", "content": "Please respond to my request."}]
84
+
85
+ debug_log("OpenAI: requesting stream")
61
86
  stream = await self.client.chat.completions.create(
62
87
  model=model,
63
- messages=[{"role": m["role"], "content": m["content"]} for m in processed_messages],
88
+ messages=api_messages,
64
89
  temperature=temperature,
65
90
  max_tokens=max_tokens,
66
91
  stream=True,
67
92
  )
68
93
 
94
+ debug_log("OpenAI: stream created successfully, processing chunks")
69
95
  async for chunk in stream:
70
- if chunk.choices and chunk.choices[0].delta.content is not None:
71
- yield chunk.choices[0].delta.content
96
+ try:
97
+ if chunk.choices and hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
98
+ content = chunk.choices[0].delta.content
99
+ if content is not None:
100
+ # Ensure we're returning a string
101
+ text = str(content)
102
+ debug_log(f"OpenAI: yielding chunk of length: {len(text)}")
103
+ yield text
104
+ else:
105
+ debug_log("OpenAI: skipping None content chunk")
106
+ else:
107
+ debug_log("OpenAI: skipping chunk with missing content")
108
+ except Exception as chunk_error:
109
+ debug_log(f"OpenAI: error processing chunk: {str(chunk_error)}")
110
+ # Skip problematic chunks but continue processing
111
+ continue
112
+
72
113
  except Exception as e:
114
+ debug_log(f"OpenAI: error in generate_stream: {str(e)}")
73
115
  raise Exception(f"OpenAI streaming error: {str(e)}")
74
116
 
75
117
  def get_available_models(self) -> List[Dict[str, Any]]:
app/config.py CHANGED
@@ -52,24 +52,30 @@ DEFAULT_CONFIG = {
52
52
  "max_tokens": 8192,
53
53
  "display_name": "GPT-4"
54
54
  },
55
- "claude-3-opus": {
55
+ # Use the corrected keys from anthropic.py
56
+ "claude-3-opus-20240229": {
56
57
  "provider": "anthropic",
57
- "max_tokens": 4096,
58
+ "max_tokens": 4096, # Note: Max tokens might differ per model version
58
59
  "display_name": "Claude 3 Opus"
59
60
  },
60
- "claude-3-sonnet": {
61
+ "claude-3-sonnet-20240229": {
61
62
  "provider": "anthropic",
62
- "max_tokens": 4096,
63
+ "max_tokens": 4096, # Note: Max tokens might differ per model version
63
64
  "display_name": "Claude 3 Sonnet"
64
65
  },
65
- "claude-3-haiku": {
66
+ "claude-3-haiku-20240307": {
66
67
  "provider": "anthropic",
67
68
  "max_tokens": 4096,
68
69
  "display_name": "Claude 3 Haiku"
69
70
  },
70
- "claude-3.7-sonnet": {
71
+ "claude-3-5-sonnet-20240620": {
71
72
  "provider": "anthropic",
72
- "max_tokens": 4096,
73
+ "max_tokens": 4096, # Note: Max tokens might differ per model version
74
+ "display_name": "Claude 3.5 Sonnet" # Corrected display name
75
+ },
76
+ "claude-3-7-sonnet-20250219": {
77
+ "provider": "anthropic",
78
+ "max_tokens": 4096, # Note: Max tokens might differ per model version
73
79
  "display_name": "Claude 3.7 Sonnet"
74
80
  },
75
81
  "llama2": {
@@ -166,3 +172,43 @@ def save_config(config):
166
172
 
167
173
  # Current configuration
168
174
  CONFIG = load_config()
175
+
176
+ # --- Dynamically update Anthropic models after initial load ---
177
+ def update_anthropic_models(config):
178
+ """Fetch models from Anthropic API and update the config dict."""
179
+ if AVAILABLE_PROVIDERS["anthropic"]:
180
+ try:
181
+ from app.api.anthropic import AnthropicClient # Import here to avoid circular dependency at top level
182
+ client = AnthropicClient()
183
+ fetched_models = client.get_available_models() # This now fetches (or uses fallback)
184
+
185
+ if fetched_models:
186
+ # Remove old hardcoded anthropic models first
187
+ models_to_remove = [
188
+ model_id for model_id, info in config["available_models"].items()
189
+ if info.get("provider") == "anthropic"
190
+ ]
191
+ for model_id in models_to_remove:
192
+ del config["available_models"][model_id]
193
+
194
+ # Add fetched models
195
+ for model in fetched_models:
196
+ config["available_models"][model["id"]] = {
197
+ "provider": "anthropic",
198
+ "max_tokens": 4096, # Assign a default max_tokens
199
+ "display_name": model["name"]
200
+ }
201
+ print(f"Updated Anthropic models in config: {[m['id'] for m in fetched_models]}") # Add print statement
202
+ else:
203
+ print("Could not fetch or find Anthropic models to update config.") # Add print statement
204
+
205
+ except Exception as e:
206
+ print(f"Error updating Anthropic models in config: {e}") # Add print statement
207
+ # Keep existing config if update fails
208
+
209
+ return config
210
+
211
+ # Update the global CONFIG after loading it
212
+ CONFIG = update_anthropic_models(CONFIG)
213
+ # Optionally save the updated config back immediately (or rely on later saves)
214
+ # save_config(CONFIG) # Uncomment if you want to persist the fetched models immediately