chat-console 0.3.8__py3-none-any.whl → 0.3.91__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- app/__init__.py +1 -1
- app/api/anthropic.py +180 -195
- app/api/base.py +5 -0
- app/api/ollama.py +52 -0
- app/api/openai.py +31 -0
- app/config.py +29 -26
- app/main.py +20 -4
- app/ui/chat_interface.py +55 -48
- app/utils.py +548 -210
- {chat_console-0.3.8.dist-info → chat_console-0.3.91.dist-info}/METADATA +1 -1
- chat_console-0.3.91.dist-info/RECORD +24 -0
- chat_console-0.3.8.dist-info/RECORD +0 -24
- {chat_console-0.3.8.dist-info → chat_console-0.3.91.dist-info}/WHEEL +0 -0
- {chat_console-0.3.8.dist-info → chat_console-0.3.91.dist-info}/entry_points.txt +0 -0
- {chat_console-0.3.8.dist-info → chat_console-0.3.91.dist-info}/licenses/LICENSE +0 -0
- {chat_console-0.3.8.dist-info → chat_console-0.3.91.dist-info}/top_level.txt +0 -0
app/__init__.py
CHANGED
app/api/anthropic.py
CHANGED
@@ -1,13 +1,17 @@
|
|
1
1
|
import anthropic
|
2
|
-
import asyncio
|
2
|
+
import asyncio
|
3
|
+
import logging
|
3
4
|
from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
|
4
5
|
from .base import BaseModelClient
|
5
6
|
from ..config import ANTHROPIC_API_KEY
|
6
|
-
|
7
|
+
|
8
|
+
# Set up logging
|
9
|
+
logger = logging.getLogger(__name__)
|
7
10
|
|
8
11
|
class AnthropicClient(BaseModelClient):
|
9
12
|
def __init__(self):
|
10
13
|
self.client = None # Initialize in create()
|
14
|
+
self._active_stream = None # Track active stream for cancellation
|
11
15
|
|
12
16
|
@classmethod
|
13
17
|
async def create(cls) -> 'AnthropicClient':
|
@@ -17,237 +21,218 @@ class AnthropicClient(BaseModelClient):
|
|
17
21
|
return instance
|
18
22
|
|
19
23
|
def _prepare_messages(self, messages: List[Dict[str, str]], style: Optional[str] = None) -> List[Dict[str, str]]:
|
20
|
-
"""Prepare messages for
|
21
|
-
# Anthropic expects role to be 'user' or 'assistant'
|
24
|
+
"""Prepare messages for Anthropic API"""
|
22
25
|
processed_messages = []
|
23
26
|
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
27
|
+
# Add style instructions if provided
|
28
|
+
if style and style != "default":
|
29
|
+
style_instructions = self._get_style_instructions(style)
|
30
|
+
processed_messages.append({
|
31
|
+
"role": "system",
|
32
|
+
"content": style_instructions
|
33
|
+
})
|
34
|
+
|
35
|
+
# Add the rest of the messages
|
36
|
+
for message in messages:
|
37
|
+
# Ensure message has required fields
|
38
|
+
if "role" not in message or "content" not in message:
|
39
|
+
continue
|
40
|
+
|
41
|
+
# Map 'user' and 'assistant' roles directly
|
42
|
+
# Anthropic only supports 'user' and 'assistant' roles
|
43
|
+
if message["role"] in ["user", "assistant"]:
|
44
|
+
processed_messages.append(message)
|
45
|
+
elif message["role"] == "system":
|
46
|
+
# For system messages, we need to add them as system messages
|
28
47
|
processed_messages.append({
|
29
|
-
"role": "
|
30
|
-
"content":
|
48
|
+
"role": "system",
|
49
|
+
"content": message["content"]
|
31
50
|
})
|
32
51
|
else:
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
for i, msg in enumerate(processed_messages):
|
39
|
-
if msg["role"] == "user":
|
40
|
-
content = msg["content"]
|
41
|
-
if "<userStyle>" not in content:
|
42
|
-
style_instructions = self._get_style_instructions(style)
|
43
|
-
msg["content"] = f"<userStyle>{style_instructions}</userStyle>\n\n{content}"
|
44
|
-
break
|
52
|
+
# For any other role, treat as user message
|
53
|
+
processed_messages.append({
|
54
|
+
"role": "user",
|
55
|
+
"content": message["content"]
|
56
|
+
})
|
45
57
|
|
46
58
|
return processed_messages
|
47
59
|
|
48
60
|
def _get_style_instructions(self, style: str) -> str:
|
49
61
|
"""Get formatting instructions for different styles"""
|
50
62
|
styles = {
|
51
|
-
"concise": "
|
52
|
-
"detailed": "
|
53
|
-
"technical": "
|
54
|
-
"friendly": "
|
63
|
+
"concise": "Please provide concise, to-the-point responses without unnecessary elaboration.",
|
64
|
+
"detailed": "Please provide comprehensive responses with thorough explanations and examples.",
|
65
|
+
"technical": "Please use precise technical language and focus on accuracy and technical details.",
|
66
|
+
"friendly": "Please use a warm, conversational tone and relatable examples.",
|
55
67
|
}
|
56
68
|
|
57
69
|
return styles.get(style, "")
|
58
70
|
|
59
|
-
async def generate_completion(self, messages: List[Dict[str, str]],
|
60
|
-
model: str,
|
61
|
-
style: Optional[str] = None,
|
62
|
-
temperature: float = 0.7,
|
71
|
+
async def generate_completion(self, messages: List[Dict[str, str]],
|
72
|
+
model: str,
|
73
|
+
style: Optional[str] = None,
|
74
|
+
temperature: float = 0.7,
|
63
75
|
max_tokens: Optional[int] = None) -> str:
|
64
|
-
"""Generate a text completion using
|
65
|
-
try:
|
66
|
-
from app.main import debug_log
|
67
|
-
except ImportError:
|
68
|
-
debug_log = lambda msg: None
|
69
|
-
|
70
|
-
# Resolve the model ID right before making the API call
|
71
|
-
original_model = model
|
72
|
-
resolved_model = resolve_model_id(model)
|
73
|
-
debug_log(f"Anthropic: Original model ID '{original_model}' resolved to '{resolved_model}' in generate_completion")
|
74
|
-
|
76
|
+
"""Generate a text completion using Anthropic"""
|
75
77
|
processed_messages = self._prepare_messages(messages, style)
|
76
78
|
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
79
|
+
try:
|
80
|
+
response = await self.client.messages.create(
|
81
|
+
model=model,
|
82
|
+
messages=processed_messages,
|
83
|
+
temperature=temperature,
|
84
|
+
max_tokens=max_tokens if max_tokens else 4096,
|
85
|
+
)
|
86
|
+
|
87
|
+
return response.content[0].text
|
88
|
+
except Exception as e:
|
89
|
+
logger.error(f"Error generating completion: {str(e)}")
|
90
|
+
raise Exception(f"Anthropic API error: {str(e)}")
|
85
91
|
|
86
|
-
async def generate_stream(self, messages: List[Dict[str, str]],
|
87
|
-
model: str,
|
92
|
+
async def generate_stream(self, messages: List[Dict[str, str]],
|
93
|
+
model: str,
|
88
94
|
style: Optional[str] = None,
|
89
|
-
temperature: float = 0.7,
|
95
|
+
temperature: float = 0.7,
|
90
96
|
max_tokens: Optional[int] = None) -> AsyncGenerator[str, None]:
|
91
|
-
"""Generate a streaming text completion using
|
97
|
+
"""Generate a streaming text completion using Anthropic"""
|
92
98
|
try:
|
93
99
|
from app.main import debug_log # Import debug logging if available
|
100
|
+
debug_log(f"Anthropic: starting streaming generation with model: {model}")
|
94
101
|
except ImportError:
|
95
102
|
# If debug_log not available, create a no-op function
|
96
103
|
debug_log = lambda msg: None
|
97
104
|
|
98
|
-
# Resolve the model ID right before making the API call
|
99
|
-
original_model = model
|
100
|
-
resolved_model = resolve_model_id(model)
|
101
|
-
debug_log(f"Anthropic: Original model ID '{original_model}' resolved to '{resolved_model}'")
|
102
|
-
debug_log(f"Anthropic: starting streaming generation with model: {resolved_model}")
|
103
|
-
|
104
105
|
processed_messages = self._prepare_messages(messages, style)
|
105
106
|
|
106
107
|
try:
|
107
|
-
debug_log(f"Anthropic:
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
temperature=temperature,
|
113
|
-
max_tokens=max_tokens or 1024,
|
114
|
-
)
|
108
|
+
debug_log(f"Anthropic: preparing {len(processed_messages)} messages for stream")
|
109
|
+
|
110
|
+
# Use more robust error handling with retry for connection issues
|
111
|
+
max_retries = 2
|
112
|
+
retry_count = 0
|
115
113
|
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
except Exception as chunk_error: # Restore the except block for chunk processing
|
132
|
-
debug_log(f"Anthropic: error processing chunk: {str(chunk_error)}")
|
133
|
-
# Skip problematic chunks but continue processing
|
134
|
-
continue # This continue is now correctly inside the loop and except block
|
114
|
+
while retry_count <= max_retries:
|
115
|
+
try:
|
116
|
+
debug_log(f"Anthropic: creating stream with model {model}")
|
117
|
+
|
118
|
+
# Create the stream
|
119
|
+
stream = await self.client.messages.create(
|
120
|
+
model=model,
|
121
|
+
messages=processed_messages,
|
122
|
+
temperature=temperature,
|
123
|
+
max_tokens=max_tokens if max_tokens else 4096,
|
124
|
+
stream=True
|
125
|
+
)
|
126
|
+
|
127
|
+
# Store the stream for potential cancellation
|
128
|
+
self._active_stream = stream
|
135
129
|
|
130
|
+
debug_log("Anthropic: stream created successfully")
|
131
|
+
|
132
|
+
# Process stream chunks
|
133
|
+
chunk_count = 0
|
134
|
+
debug_log("Anthropic: starting to process chunks")
|
135
|
+
|
136
|
+
async for chunk in stream:
|
137
|
+
# Check if stream has been cancelled
|
138
|
+
if self._active_stream is None:
|
139
|
+
debug_log("Anthropic: stream was cancelled, stopping generation")
|
140
|
+
break
|
141
|
+
|
142
|
+
chunk_count += 1
|
143
|
+
try:
|
144
|
+
if hasattr(chunk, 'delta') and hasattr(chunk.delta, 'text'):
|
145
|
+
content = chunk.delta.text
|
146
|
+
if content is not None:
|
147
|
+
debug_log(f"Anthropic: yielding chunk {chunk_count} of length: {len(content)}")
|
148
|
+
yield content
|
149
|
+
else:
|
150
|
+
debug_log(f"Anthropic: skipping None content chunk {chunk_count}")
|
151
|
+
else:
|
152
|
+
debug_log(f"Anthropic: skipping chunk {chunk_count} with missing content")
|
153
|
+
except Exception as chunk_error:
|
154
|
+
debug_log(f"Anthropic: error processing chunk {chunk_count}: {str(chunk_error)}")
|
155
|
+
# Skip problematic chunks but continue processing
|
156
|
+
continue
|
157
|
+
|
158
|
+
debug_log(f"Anthropic: stream completed successfully with {chunk_count} chunks")
|
159
|
+
|
160
|
+
# Clear the active stream reference when done
|
161
|
+
self._active_stream = None
|
162
|
+
|
163
|
+
# If we reach this point, we've successfully processed the stream
|
164
|
+
break
|
165
|
+
|
166
|
+
except Exception as e:
|
167
|
+
debug_log(f"Anthropic: error in attempt {retry_count+1}/{max_retries+1}: {str(e)}")
|
168
|
+
retry_count += 1
|
169
|
+
if retry_count <= max_retries:
|
170
|
+
debug_log(f"Anthropic: retrying after error (attempt {retry_count+1})")
|
171
|
+
# Simple exponential backoff
|
172
|
+
await asyncio.sleep(1 * retry_count)
|
173
|
+
else:
|
174
|
+
debug_log("Anthropic: max retries reached, raising exception")
|
175
|
+
raise Exception(f"Anthropic streaming error after {max_retries+1} attempts: {str(e)}")
|
176
|
+
|
136
177
|
except Exception as e:
|
137
178
|
debug_log(f"Anthropic: error in generate_stream: {str(e)}")
|
179
|
+
# Yield a simple error message as a last resort to ensure UI updates
|
180
|
+
yield f"Error: {str(e)}"
|
138
181
|
raise Exception(f"Anthropic streaming error: {str(e)}")
|
139
|
-
|
140
|
-
async def
|
141
|
-
"""
|
182
|
+
|
183
|
+
async def cancel_stream(self) -> None:
|
184
|
+
"""Cancel any active streaming request"""
|
185
|
+
logger.info("Cancelling active Anthropic stream")
|
142
186
|
try:
|
143
187
|
from app.main import debug_log
|
188
|
+
debug_log("Anthropic: cancelling active stream")
|
144
189
|
except ImportError:
|
145
|
-
|
146
|
-
|
147
|
-
# Always include a reliable fallback list in case API calls fail
|
148
|
-
fallback_models = [
|
149
|
-
{"id": "claude-3-opus-20240229", "name": "Claude 3 Opus"},
|
150
|
-
{"id": "claude-3-sonnet-20240229", "name": "Claude 3 Sonnet"},
|
151
|
-
{"id": "claude-3-haiku-20240307", "name": "Claude 3 Haiku"},
|
152
|
-
{"id": "claude-3-5-sonnet-20240620", "name": "Claude 3.5 Sonnet"},
|
153
|
-
{"id": "claude-3-7-sonnet-20250219", "name": "Claude 3.7 Sonnet"},
|
154
|
-
]
|
155
|
-
|
156
|
-
# If no client is initialized, return fallback immediately
|
157
|
-
if not self.client:
|
158
|
-
debug_log("Anthropic: No client initialized, using fallback models")
|
159
|
-
return fallback_models
|
160
|
-
|
161
|
-
try:
|
162
|
-
debug_log("Anthropic: Fetching models from API...")
|
163
|
-
|
164
|
-
# Try using the models.list method if available in newer SDK versions
|
165
|
-
if hasattr(self.client, 'models') and hasattr(self.client.models, 'list'):
|
166
|
-
try:
|
167
|
-
debug_log("Anthropic: Using client.models.list() method")
|
168
|
-
models_response = await self.client.models.list()
|
169
|
-
if hasattr(models_response, 'data') and isinstance(models_response.data, list):
|
170
|
-
formatted_models = [
|
171
|
-
{"id": model.id, "name": getattr(model, "name", model.id)}
|
172
|
-
for model in models_response.data
|
173
|
-
]
|
174
|
-
debug_log(f"Anthropic: Found {len(formatted_models)} models via SDK")
|
175
|
-
return formatted_models
|
176
|
-
except Exception as sdk_err:
|
177
|
-
debug_log(f"Anthropic: Error using models.list(): {str(sdk_err)}")
|
178
|
-
# Continue to next method
|
190
|
+
pass
|
179
191
|
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
192
|
+
# Simply set the active stream to None
|
193
|
+
# This will cause the generate_stream method to stop processing chunks
|
194
|
+
self._active_stream = None
|
195
|
+
logger.info("Anthropic stream cancelled successfully")
|
196
|
+
|
197
|
+
async def get_available_models(self) -> List[Dict[str, Any]]:
|
198
|
+
"""Get list of available Anthropic models"""
|
199
|
+
# Anthropic doesn't have a models endpoint, so we return a static list
|
200
|
+
models = [
|
201
|
+
{
|
202
|
+
"id": "claude-3-opus-20240229",
|
203
|
+
"name": "Claude 3 Opus",
|
204
|
+
"description": "Most powerful model for highly complex tasks",
|
205
|
+
"context_window": 200000,
|
206
|
+
"provider": "anthropic"
|
207
|
+
},
|
208
|
+
{
|
209
|
+
"id": "claude-3-sonnet-20240229",
|
210
|
+
"name": "Claude 3 Sonnet",
|
211
|
+
"description": "Balanced model for most tasks",
|
212
|
+
"context_window": 200000,
|
213
|
+
"provider": "anthropic"
|
214
|
+
},
|
215
|
+
{
|
216
|
+
"id": "claude-3-haiku-20240307",
|
217
|
+
"name": "Claude 3 Haiku",
|
218
|
+
"description": "Fastest and most compact model",
|
219
|
+
"context_window": 200000,
|
220
|
+
"provider": "anthropic"
|
221
|
+
},
|
222
|
+
{
|
223
|
+
"id": "claude-3-5-sonnet-20240620",
|
224
|
+
"name": "Claude 3.5 Sonnet",
|
225
|
+
"description": "Latest model with improved capabilities",
|
226
|
+
"context_window": 200000,
|
227
|
+
"provider": "anthropic"
|
228
|
+
},
|
229
|
+
{
|
230
|
+
"id": "claude-3-7-sonnet-20250219",
|
231
|
+
"name": "Claude 3.7 Sonnet",
|
232
|
+
"description": "Newest model with advanced reasoning",
|
233
|
+
"context_window": 200000,
|
234
|
+
"provider": "anthropic"
|
235
|
+
}
|
223
236
|
]
|
224
237
|
|
225
|
-
|
226
|
-
# Check if we're already in an event loop
|
227
|
-
try:
|
228
|
-
loop = asyncio.get_running_loop()
|
229
|
-
in_loop = True
|
230
|
-
except RuntimeError:
|
231
|
-
in_loop = False
|
232
|
-
|
233
|
-
if in_loop:
|
234
|
-
# We're already in an event loop, create a future
|
235
|
-
try:
|
236
|
-
from app.main import debug_log
|
237
|
-
except ImportError:
|
238
|
-
debug_log = lambda msg: None
|
239
|
-
|
240
|
-
debug_log("Anthropic: Already in event loop, using fallback models")
|
241
|
-
return fallback_models
|
242
|
-
else:
|
243
|
-
# Not in an event loop, we can use asyncio.run
|
244
|
-
models = asyncio.run(self._fetch_models_from_api())
|
245
|
-
return models
|
246
|
-
except Exception as e:
|
247
|
-
try:
|
248
|
-
from app.main import debug_log
|
249
|
-
except ImportError:
|
250
|
-
debug_log = lambda msg: None
|
251
|
-
|
252
|
-
debug_log(f"Anthropic: Error in get_available_models: {str(e)}")
|
253
|
-
return fallback_models
|
238
|
+
return models
|
app/api/base.py
CHANGED
@@ -22,6 +22,11 @@ class BaseModelClient(ABC):
|
|
22
22
|
"""Generate a streaming text completion"""
|
23
23
|
yield "" # Placeholder implementation
|
24
24
|
|
25
|
+
@abstractmethod
|
26
|
+
async def cancel_stream(self) -> None:
|
27
|
+
"""Cancel any active streaming request"""
|
28
|
+
pass
|
29
|
+
|
25
30
|
@abstractmethod
|
26
31
|
def get_available_models(self) -> List[Dict[str, Any]]:
|
27
32
|
"""Get list of available models from this provider"""
|
app/api/ollama.py
CHANGED
@@ -11,6 +11,14 @@ from .base import BaseModelClient
|
|
11
11
|
# Set up logging
|
12
12
|
logger = logging.getLogger(__name__)
|
13
13
|
|
14
|
+
# Custom exception for Ollama API errors
|
15
|
+
class OllamaApiError(Exception):
|
16
|
+
"""Exception raised for errors in the Ollama API."""
|
17
|
+
def __init__(self, message: str, status_code: Optional[int] = None):
|
18
|
+
self.message = message
|
19
|
+
self.status_code = status_code
|
20
|
+
super().__init__(self.message)
|
21
|
+
|
14
22
|
class OllamaClient(BaseModelClient):
|
15
23
|
def __init__(self):
|
16
24
|
from ..config import OLLAMA_BASE_URL
|
@@ -266,6 +274,29 @@ class OllamaClient(BaseModelClient):
|
|
266
274
|
last_error = None
|
267
275
|
self._active_stream_session = None # Track the active session
|
268
276
|
|
277
|
+
# First check if the model exists in our available models
|
278
|
+
try:
|
279
|
+
available_models = await self.get_available_models()
|
280
|
+
model_exists = False
|
281
|
+
available_model_names = []
|
282
|
+
|
283
|
+
for m in available_models:
|
284
|
+
model_id = m.get("id", "")
|
285
|
+
available_model_names.append(model_id)
|
286
|
+
if model_id == model:
|
287
|
+
model_exists = True
|
288
|
+
break
|
289
|
+
|
290
|
+
if not model_exists:
|
291
|
+
error_msg = f"Model '{model}' not found in available models. Available models include: {', '.join(available_model_names[:5])}"
|
292
|
+
if len(available_model_names) > 5:
|
293
|
+
error_msg += f" and {len(available_model_names) - 5} more."
|
294
|
+
logger.error(error_msg)
|
295
|
+
raise OllamaApiError(error_msg)
|
296
|
+
except Exception as e:
|
297
|
+
debug_log(f"Error checking model availability: {str(e)}")
|
298
|
+
# Continue anyway, the main request will handle errors
|
299
|
+
|
269
300
|
while retries >= 0:
|
270
301
|
try:
|
271
302
|
# First try a quick test request to check if model is loaded
|
@@ -299,6 +330,17 @@ class OllamaClient(BaseModelClient):
|
|
299
330
|
if response.status != 200:
|
300
331
|
logger.warning(f"Model test request failed with status {response.status}")
|
301
332
|
debug_log(f"Model test request failed with status {response.status}")
|
333
|
+
|
334
|
+
# Check if this is a 404 Not Found error
|
335
|
+
if response.status == 404:
|
336
|
+
error_text = await response.text()
|
337
|
+
debug_log(f"404 error details: {error_text}")
|
338
|
+
error_msg = f"Error: Model '{model}' not found on the Ollama server. Please check if the model name is correct or try pulling it first."
|
339
|
+
logger.error(error_msg)
|
340
|
+
# Instead of raising, yield the error message for user display
|
341
|
+
yield error_msg
|
342
|
+
return # End the generation
|
343
|
+
|
302
344
|
raise aiohttp.ClientError("Model not ready")
|
303
345
|
except (aiohttp.ClientError, asyncio.TimeoutError) as e:
|
304
346
|
logger.info(f"Model cold start detected: {str(e)}")
|
@@ -326,6 +368,16 @@ class OllamaClient(BaseModelClient):
|
|
326
368
|
logger.error("Failed to pull model")
|
327
369
|
debug_log("Failed to pull model")
|
328
370
|
self._model_loading = False # Reset flag on failure
|
371
|
+
|
372
|
+
# Check if this is a 404 Not Found error
|
373
|
+
if pull_response.status == 404:
|
374
|
+
error_text = await pull_response.text()
|
375
|
+
debug_log(f"404 error details: {error_text}")
|
376
|
+
# This is likely a model not found in registry
|
377
|
+
error_msg = f"Error: Model '{model}' not found in the Ollama registry. Please check if the model name is correct or try a different model."
|
378
|
+
logger.error(error_msg)
|
379
|
+
raise OllamaApiError(error_msg, status_code=404)
|
380
|
+
|
329
381
|
raise Exception("Failed to pull model")
|
330
382
|
logger.info("Model pulled successfully")
|
331
383
|
debug_log("Model pulled successfully")
|
app/api/openai.py
CHANGED
@@ -3,10 +3,15 @@ import asyncio
|
|
3
3
|
from typing import List, Dict, Any, Optional, Generator, AsyncGenerator
|
4
4
|
from .base import BaseModelClient
|
5
5
|
from ..config import OPENAI_API_KEY
|
6
|
+
import logging
|
7
|
+
|
8
|
+
# Set up logging
|
9
|
+
logger = logging.getLogger(__name__)
|
6
10
|
|
7
11
|
class OpenAIClient(BaseModelClient):
|
8
12
|
def __init__(self):
|
9
13
|
self.client = None # Initialize in create()
|
14
|
+
self._active_stream = None # Track active stream for cancellation
|
10
15
|
|
11
16
|
@classmethod
|
12
17
|
async def create(cls) -> 'OpenAIClient':
|
@@ -115,6 +120,10 @@ class OpenAIClient(BaseModelClient):
|
|
115
120
|
max_tokens=max_tokens,
|
116
121
|
stream=True,
|
117
122
|
)
|
123
|
+
|
124
|
+
# Store the stream for potential cancellation
|
125
|
+
self._active_stream = stream
|
126
|
+
|
118
127
|
debug_log("OpenAI: stream created successfully")
|
119
128
|
|
120
129
|
# Yield a small padding token at the beginning for very short prompts
|
@@ -128,6 +137,11 @@ class OpenAIClient(BaseModelClient):
|
|
128
137
|
debug_log("OpenAI: starting to process chunks")
|
129
138
|
|
130
139
|
async for chunk in stream:
|
140
|
+
# Check if stream has been cancelled
|
141
|
+
if self._active_stream is None:
|
142
|
+
debug_log("OpenAI: stream was cancelled, stopping generation")
|
143
|
+
break
|
144
|
+
|
131
145
|
chunk_count += 1
|
132
146
|
try:
|
133
147
|
if chunk.choices and hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
|
@@ -148,6 +162,9 @@ class OpenAIClient(BaseModelClient):
|
|
148
162
|
|
149
163
|
debug_log(f"OpenAI: stream completed successfully with {chunk_count} chunks")
|
150
164
|
|
165
|
+
# Clear the active stream reference when done
|
166
|
+
self._active_stream = None
|
167
|
+
|
151
168
|
# If we reach this point, we've successfully processed the stream
|
152
169
|
break
|
153
170
|
|
@@ -168,6 +185,20 @@ class OpenAIClient(BaseModelClient):
|
|
168
185
|
yield f"Error: {str(e)}"
|
169
186
|
raise Exception(f"OpenAI streaming error: {str(e)}")
|
170
187
|
|
188
|
+
async def cancel_stream(self) -> None:
|
189
|
+
"""Cancel any active streaming request"""
|
190
|
+
logger.info("Cancelling active OpenAI stream")
|
191
|
+
try:
|
192
|
+
from app.main import debug_log
|
193
|
+
debug_log("OpenAI: cancelling active stream")
|
194
|
+
except ImportError:
|
195
|
+
pass
|
196
|
+
|
197
|
+
# Simply set the active stream to None
|
198
|
+
# This will cause the generate_stream method to stop processing chunks
|
199
|
+
self._active_stream = None
|
200
|
+
logger.info("OpenAI stream cancelled successfully")
|
201
|
+
|
171
202
|
async def get_available_models(self) -> List[Dict[str, Any]]:
|
172
203
|
"""Fetch list of available OpenAI models from the /models endpoint"""
|
173
204
|
try:
|