chat-console 0.2.9__py3-none-any.whl → 0.2.99__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/api/openai.py CHANGED
@@ -5,7 +5,14 @@ from ..config import OPENAI_API_KEY
5
5
 
6
6
  class OpenAIClient(BaseModelClient):
7
7
  def __init__(self):
8
- self.client = AsyncOpenAI(api_key=OPENAI_API_KEY)
8
+ self.client = None # Initialize in create()
9
+
10
+ @classmethod
11
+ async def create(cls) -> 'OpenAIClient':
12
+ """Create a new instance with async initialization."""
13
+ instance = cls()
14
+ instance.client = AsyncOpenAI(api_key=OPENAI_API_KEY)
15
+ return instance
9
16
 
10
17
  def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> List[Dict[str, str]]:
11
18
  """Prepare messages for OpenAI API"""
@@ -55,21 +62,63 @@ class OpenAIClient(BaseModelClient):
55
62
  temperature: float = 0.7,
56
63
  max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
57
64
  """Generate a streaming text completion using OpenAI"""
65
+ try:
66
+ from app.main import debug_log # Import debug logging if available
67
+ debug_log(f"OpenAI: starting streaming generation with model: {model}")
68
+ except ImportError:
69
+ # If debug_log not available, create a no-op function
70
+ debug_log = lambda msg: None
71
+
58
72
  processed_messages = self._prepare_messages(messages, style)
59
73
 
60
74
  try:
75
+ debug_log(f"OpenAI: preparing {len(processed_messages)} messages for stream")
76
+
77
+ # Safely prepare messages
78
+ try:
79
+ api_messages = []
80
+ for m in processed_messages:
81
+ if isinstance(m, dict) and "role" in m and "content" in m:
82
+ api_messages.append({"role": m["role"], "content": m["content"]})
83
+ else:
84
+ debug_log(f"OpenAI: skipping invalid message: {m}")
85
+
86
+ debug_log(f"OpenAI: prepared {len(api_messages)} valid messages")
87
+ except Exception as msg_error:
88
+ debug_log(f"OpenAI: error preparing messages: {str(msg_error)}")
89
+ # Fallback to a simpler message format if processing fails
90
+ api_messages = [{"role": "user", "content": "Please respond to my request."}]
91
+
92
+ debug_log("OpenAI: requesting stream")
61
93
  stream = await self.client.chat.completions.create(
62
94
  model=model,
63
- messages=[{"role": m["role"], "content": m["content"]} for m in processed_messages],
95
+ messages=api_messages,
64
96
  temperature=temperature,
65
97
  max_tokens=max_tokens,
66
98
  stream=True,
67
99
  )
68
100
 
101
+ debug_log("OpenAI: stream created successfully, processing chunks")
69
102
  async for chunk in stream:
70
- if chunk.choices and chunk.choices[0].delta.content is not None:
71
- yield chunk.choices[0].delta.content
103
+ try:
104
+ if chunk.choices and hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
105
+ content = chunk.choices[0].delta.content
106
+ if content is not None:
107
+ # Ensure we're returning a string
108
+ text = str(content)
109
+ debug_log(f"OpenAI: yielding chunk of length: {len(text)}")
110
+ yield text
111
+ else:
112
+ debug_log("OpenAI: skipping None content chunk")
113
+ else:
114
+ debug_log("OpenAI: skipping chunk with missing content")
115
+ except Exception as chunk_error:
116
+ debug_log(f"OpenAI: error processing chunk: {str(chunk_error)}")
117
+ # Skip problematic chunks but continue processing
118
+ continue
119
+
72
120
  except Exception as e:
121
+ debug_log(f"OpenAI: error in generate_stream: {str(e)}")
73
122
  raise Exception(f"OpenAI streaming error: {str(e)}")
74
123
 
75
124
  def get_available_models(self) -> List[Dict[str, Any]]:
app/config.py CHANGED
@@ -52,24 +52,30 @@ DEFAULT_CONFIG = {
52
52
  "max_tokens": 8192,
53
53
  "display_name": "GPT-4"
54
54
  },
55
- "claude-3-opus": {
55
+ # Use the corrected keys from anthropic.py
56
+ "claude-3-opus-20240229": {
56
57
  "provider": "anthropic",
57
- "max_tokens": 4096,
58
+ "max_tokens": 4096, # Note: Max tokens might differ per model version
58
59
  "display_name": "Claude 3 Opus"
59
60
  },
60
- "claude-3-sonnet": {
61
+ "claude-3-sonnet-20240229": {
61
62
  "provider": "anthropic",
62
- "max_tokens": 4096,
63
+ "max_tokens": 4096, # Note: Max tokens might differ per model version
63
64
  "display_name": "Claude 3 Sonnet"
64
65
  },
65
- "claude-3-haiku": {
66
+ "claude-3-haiku-20240307": {
66
67
  "provider": "anthropic",
67
68
  "max_tokens": 4096,
68
69
  "display_name": "Claude 3 Haiku"
69
70
  },
70
- "claude-3.7-sonnet": {
71
+ "claude-3-5-sonnet-20240620": {
71
72
  "provider": "anthropic",
72
- "max_tokens": 4096,
73
+ "max_tokens": 4096, # Note: Max tokens might differ per model version
74
+ "display_name": "Claude 3.5 Sonnet" # Corrected display name
75
+ },
76
+ "claude-3-7-sonnet-20250219": {
77
+ "provider": "anthropic",
78
+ "max_tokens": 4096, # Note: Max tokens might differ per model version
73
79
  "display_name": "Claude 3.7 Sonnet"
74
80
  },
75
81
  "llama2": {
@@ -166,3 +172,43 @@ def save_config(config):
166
172
 
167
173
  # Current configuration
168
174
  CONFIG = load_config()
175
+
176
+ # --- Dynamically update Anthropic models after initial load ---
177
+ def update_anthropic_models(config):
178
+ """Fetch models from Anthropic API and update the config dict."""
179
+ if AVAILABLE_PROVIDERS["anthropic"]:
180
+ try:
181
+ from app.api.anthropic import AnthropicClient # Import here to avoid circular dependency at top level
182
+ client = AnthropicClient()
183
+ fetched_models = client.get_available_models() # This now fetches (or uses fallback)
184
+
185
+ if fetched_models:
186
+ # Remove old hardcoded anthropic models first
187
+ models_to_remove = [
188
+ model_id for model_id, info in config["available_models"].items()
189
+ if info.get("provider") == "anthropic"
190
+ ]
191
+ for model_id in models_to_remove:
192
+ del config["available_models"][model_id]
193
+
194
+ # Add fetched models
195
+ for model in fetched_models:
196
+ config["available_models"][model["id"]] = {
197
+ "provider": "anthropic",
198
+ "max_tokens": 4096, # Assign a default max_tokens
199
+ "display_name": model["name"]
200
+ }
201
+ print(f"Updated Anthropic models in config: {[m['id'] for m in fetched_models]}") # Add print statement
202
+ else:
203
+ print("Could not fetch or find Anthropic models to update config.") # Add print statement
204
+
205
+ except Exception as e:
206
+ print(f"Error updating Anthropic models in config: {e}") # Add print statement
207
+ # Keep existing config if update fails
208
+
209
+ return config
210
+
211
+ # Update the global CONFIG after loading it
212
+ CONFIG = update_anthropic_models(CONFIG)
213
+ # Optionally save the updated config back immediately (or rely on later saves)
214
+ # save_config(CONFIG) # Uncomment if you want to persist the fetched models immediately