webscout 8.2.9__py3-none-any.whl → 8.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +6 -6
- webscout/AIbase.py +61 -1
- webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
- webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
- webscout/Extra/YTToolkit/ytapi/video.py +10 -10
- webscout/Extra/autocoder/autocoder_utiles.py +1 -1
- webscout/Litlogger/formats.py +9 -0
- webscout/Litlogger/handlers.py +18 -0
- webscout/Litlogger/logger.py +43 -1
- webscout/Provider/AISEARCH/scira_search.py +3 -2
- webscout/Provider/Blackboxai.py +2 -0
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +1 -1
- webscout/Provider/HeckAI.py +1 -1
- webscout/Provider/LambdaChat.py +8 -1
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +396 -113
- webscout/Provider/OPENAI/Cloudflare.py +31 -14
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +29 -13
- webscout/Provider/OPENAI/NEMOTRON.py +26 -14
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +161 -140
- webscout/Provider/OPENAI/README.md +3 -0
- webscout/Provider/OPENAI/TogetherAI.py +355 -0
- webscout/Provider/OPENAI/TwoAI.py +29 -12
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +33 -23
- webscout/Provider/OPENAI/api.py +375 -24
- webscout/Provider/OPENAI/autoproxy.py +39 -0
- webscout/Provider/OPENAI/base.py +91 -12
- webscout/Provider/OPENAI/c4ai.py +31 -10
- webscout/Provider/OPENAI/chatgpt.py +56 -24
- webscout/Provider/OPENAI/chatgptclone.py +46 -16
- webscout/Provider/OPENAI/chatsandbox.py +7 -3
- webscout/Provider/OPENAI/copilot.py +26 -10
- webscout/Provider/OPENAI/deepinfra.py +29 -12
- webscout/Provider/OPENAI/e2b.py +358 -158
- webscout/Provider/OPENAI/exaai.py +13 -10
- webscout/Provider/OPENAI/exachat.py +10 -6
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +10 -6
- webscout/Provider/OPENAI/glider.py +10 -6
- webscout/Provider/OPENAI/heckai.py +11 -8
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +10 -7
- webscout/Provider/OPENAI/multichat.py +3 -1
- webscout/Provider/OPENAI/netwrck.py +10 -6
- webscout/Provider/OPENAI/oivscode.py +12 -9
- webscout/Provider/OPENAI/opkfc.py +31 -8
- webscout/Provider/OPENAI/scirachat.py +17 -10
- webscout/Provider/OPENAI/sonus.py +10 -6
- webscout/Provider/OPENAI/standardinput.py +18 -9
- webscout/Provider/OPENAI/textpollinations.py +14 -7
- webscout/Provider/OPENAI/toolbaz.py +16 -11
- webscout/Provider/OPENAI/typefully.py +14 -7
- webscout/Provider/OPENAI/typegpt.py +10 -6
- webscout/Provider/OPENAI/uncovrAI.py +22 -8
- webscout/Provider/OPENAI/venice.py +10 -6
- webscout/Provider/OPENAI/writecream.py +13 -10
- webscout/Provider/OPENAI/x0gpt.py +11 -9
- webscout/Provider/OPENAI/yep.py +12 -10
- webscout/Provider/PI.py +2 -1
- webscout/Provider/STT/__init__.py +3 -0
- webscout/Provider/STT/base.py +281 -0
- webscout/Provider/STT/elevenlabs.py +265 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +399 -365
- webscout/Provider/TTI/base.py +74 -2
- webscout/Provider/TTI/fastflux.py +63 -30
- webscout/Provider/TTI/gpt1image.py +149 -0
- webscout/Provider/TTI/imagen.py +196 -0
- webscout/Provider/TTI/magicstudio.py +60 -29
- webscout/Provider/TTI/piclumen.py +43 -32
- webscout/Provider/TTI/pixelmuse.py +232 -225
- webscout/Provider/TTI/pollinations.py +43 -32
- webscout/Provider/TTI/together.py +287 -0
- webscout/Provider/TTI/utils.py +2 -1
- webscout/Provider/TTS/README.md +1 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/freetts.py +140 -0
- webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
- webscout/Provider/__init__.py +3 -2
- webscout/Provider/granite.py +41 -6
- webscout/Provider/oivscode.py +37 -37
- webscout/Provider/scira_chat.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/toolbaz.py +0 -1
- webscout/litagent/Readme.md +12 -3
- webscout/litagent/agent.py +99 -62
- webscout/version.py +1 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/METADATA +2 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/RECORD +98 -87
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/WHEEL +1 -1
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/artbit.py +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
|
@@ -1,22 +1,204 @@
|
|
|
1
|
+
# from pickle import NONE
|
|
2
|
+
import requests
|
|
1
3
|
import requests
|
|
2
4
|
import random
|
|
3
5
|
import string
|
|
4
6
|
import base64
|
|
5
7
|
from datetime import datetime, timedelta
|
|
6
8
|
from typing import Generator, List, Dict, Optional, Any, Union
|
|
7
|
-
import json # Not used directly in this snippet, but often useful
|
|
8
9
|
import uuid
|
|
9
10
|
import time
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
11
|
+
import codecs
|
|
12
|
+
import gzip
|
|
13
|
+
import zstandard as zstd
|
|
14
|
+
import brotli
|
|
15
|
+
import zlib
|
|
13
16
|
from webscout.Provider.OPENAI.utils import (
|
|
14
17
|
ChatCompletion, Choice,
|
|
15
18
|
ChatCompletionMessage, CompletionUsage, count_tokens,
|
|
16
|
-
ChatCompletionChunk, ChoiceDelta
|
|
19
|
+
ChatCompletionChunk, ChoiceDelta # Ensure ChoiceDelta is always imported at the top
|
|
17
20
|
)
|
|
18
|
-
|
|
19
|
-
|
|
21
|
+
try:
|
|
22
|
+
from webscout.litagent import LitAgent
|
|
23
|
+
agent = LitAgent()
|
|
24
|
+
except ImportError:
|
|
25
|
+
print("Warning: LitAgent not available, using default user agent")
|
|
26
|
+
class MockAgent:
|
|
27
|
+
def random(self):
|
|
28
|
+
return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36'
|
|
29
|
+
agent = MockAgent()
|
|
30
|
+
|
|
31
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
32
|
+
|
|
33
|
+
class StreamingDecompressor:
|
|
34
|
+
"""
|
|
35
|
+
A streaming decompressor that can handle partial compressed data in real-time.
|
|
36
|
+
This allows for true streaming decompression without buffering entire response.
|
|
37
|
+
"""
|
|
38
|
+
def __init__(self, content_encoding: str):
|
|
39
|
+
self.encoding = content_encoding.lower().strip() if content_encoding else None
|
|
40
|
+
self.decompressor = None
|
|
41
|
+
self.text_decoder = codecs.getincrementaldecoder("utf-8")("replace")
|
|
42
|
+
self.zstd_buffer = b"" # Buffer for zstd incomplete frames
|
|
43
|
+
|
|
44
|
+
if self.encoding == 'gzip':
|
|
45
|
+
self.decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS) # gzip format
|
|
46
|
+
elif self.encoding == 'deflate':
|
|
47
|
+
self.decompressor = zlib.decompressobj() # deflate format
|
|
48
|
+
elif self.encoding == 'zstd':
|
|
49
|
+
self.decompressor = zstd.ZstdDecompressor()
|
|
50
|
+
elif self.encoding == 'br':
|
|
51
|
+
self.decompressor = brotli.Decompressor()
|
|
52
|
+
|
|
53
|
+
def decompress_chunk(self, chunk: bytes) -> str:
|
|
54
|
+
"""
|
|
55
|
+
Decompress a chunk of data and return decoded text.
|
|
56
|
+
Handles partial compressed data properly for real-time streaming.
|
|
57
|
+
"""
|
|
58
|
+
try:
|
|
59
|
+
if not chunk:
|
|
60
|
+
return ""
|
|
61
|
+
|
|
62
|
+
if not self.encoding or self.encoding not in ['gzip', 'deflate', 'zstd', 'br']:
|
|
63
|
+
# No compression or unsupported - decode directly
|
|
64
|
+
return self.text_decoder.decode(chunk, final=False)
|
|
65
|
+
|
|
66
|
+
if self.encoding in ['gzip', 'deflate']:
|
|
67
|
+
# Use zlib decompressor for gzip/deflate
|
|
68
|
+
decompressed_data = self.decompressor.decompress(chunk)
|
|
69
|
+
return self.text_decoder.decode(decompressed_data, final=False)
|
|
70
|
+
|
|
71
|
+
elif self.encoding == 'zstd':
|
|
72
|
+
# Zstandard streaming decompression with buffering for incomplete frames
|
|
73
|
+
self.zstd_buffer += chunk
|
|
74
|
+
try:
|
|
75
|
+
# Try to decompress the current buffer
|
|
76
|
+
decompressed_data = self.decompressor.decompress(self.zstd_buffer)
|
|
77
|
+
# If successful, clear the buffer and return decoded text
|
|
78
|
+
self.zstd_buffer = b""
|
|
79
|
+
return self.text_decoder.decode(decompressed_data, final=False)
|
|
80
|
+
except zstd.ZstdError:
|
|
81
|
+
# Frame is incomplete, keep buffering
|
|
82
|
+
# Try to decompress any complete frames from buffer start
|
|
83
|
+
try:
|
|
84
|
+
# Process buffer in chunks to find complete frames
|
|
85
|
+
buffer_len = len(self.zstd_buffer)
|
|
86
|
+
if buffer_len > 4: # Minimum zstd frame size
|
|
87
|
+
# Try smaller chunks of the buffer
|
|
88
|
+
for end_pos in range(4, buffer_len + 1):
|
|
89
|
+
try:
|
|
90
|
+
partial_data = self.decompressor.decompress(self.zstd_buffer[:end_pos])
|
|
91
|
+
# If we got here, we found a complete frame
|
|
92
|
+
self.zstd_buffer = self.zstd_buffer[end_pos:]
|
|
93
|
+
return self.text_decoder.decode(partial_data, final=False)
|
|
94
|
+
except zstd.ZstdError:
|
|
95
|
+
continue
|
|
96
|
+
except Exception:
|
|
97
|
+
pass
|
|
98
|
+
return ""
|
|
99
|
+
|
|
100
|
+
elif self.encoding == 'br':
|
|
101
|
+
# Brotli streaming decompression
|
|
102
|
+
try:
|
|
103
|
+
decompressed_data = self.decompressor.decompress(chunk)
|
|
104
|
+
return self.text_decoder.decode(decompressed_data, final=False)
|
|
105
|
+
except brotli.error:
|
|
106
|
+
# If brotli fails, it might need more data or be at end
|
|
107
|
+
return ""
|
|
108
|
+
|
|
109
|
+
except Exception as e:
|
|
110
|
+
# If decompression fails, try to decode the chunk as-is (fallback)
|
|
111
|
+
try:
|
|
112
|
+
return self.text_decoder.decode(chunk, final=False)
|
|
113
|
+
except UnicodeDecodeError:
|
|
114
|
+
return ""
|
|
115
|
+
|
|
116
|
+
def finalize(self) -> str:
|
|
117
|
+
"""
|
|
118
|
+
Finalize the decompression and return any remaining decoded text.
|
|
119
|
+
"""
|
|
120
|
+
try:
|
|
121
|
+
remaining_text = ""
|
|
122
|
+
|
|
123
|
+
if self.encoding in ['gzip', 'deflate'] and self.decompressor:
|
|
124
|
+
# Flush any remaining compressed data
|
|
125
|
+
remaining_data = self.decompressor.flush()
|
|
126
|
+
if remaining_data:
|
|
127
|
+
remaining_text = self.text_decoder.decode(remaining_data, final=True)
|
|
128
|
+
else:
|
|
129
|
+
remaining_text = self.text_decoder.decode(b"", final=True)
|
|
130
|
+
elif self.encoding == 'zstd':
|
|
131
|
+
# Process any remaining buffered data
|
|
132
|
+
if self.zstd_buffer:
|
|
133
|
+
try:
|
|
134
|
+
remaining_data = self.decompressor.decompress(self.zstd_buffer)
|
|
135
|
+
remaining_text = self.text_decoder.decode(remaining_data, final=True)
|
|
136
|
+
except:
|
|
137
|
+
# If buffered data can't be decompressed, finalize decoder
|
|
138
|
+
remaining_text = self.text_decoder.decode(b"", final=True)
|
|
139
|
+
else:
|
|
140
|
+
remaining_text = self.text_decoder.decode(b"", final=True)
|
|
141
|
+
else:
|
|
142
|
+
# Finalize the text decoder for other encodings
|
|
143
|
+
remaining_text = self.text_decoder.decode(b"", final=True)
|
|
144
|
+
|
|
145
|
+
return remaining_text
|
|
146
|
+
except Exception:
|
|
147
|
+
# Ensure we always finalize the text decoder
|
|
148
|
+
try:
|
|
149
|
+
return self.text_decoder.decode(b"", final=True)
|
|
150
|
+
except:
|
|
151
|
+
return ""
|
|
152
|
+
|
|
153
|
+
def decompress_response(response_content: bytes, content_encoding: str) -> str:
|
|
154
|
+
"""
|
|
155
|
+
Decompress response content based on the Content-Encoding header.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
response_content: The raw response content as bytes
|
|
159
|
+
content_encoding: The Content-Encoding header value
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
str: The decompressed and decoded content as UTF-8 string
|
|
163
|
+
|
|
164
|
+
Raises:
|
|
165
|
+
IOError: If decompression fails
|
|
166
|
+
"""
|
|
167
|
+
try:
|
|
168
|
+
if not content_encoding:
|
|
169
|
+
# No compression, decode directly
|
|
170
|
+
return response_content.decode('utf-8')
|
|
171
|
+
|
|
172
|
+
encoding = content_encoding.lower().strip()
|
|
173
|
+
|
|
174
|
+
if encoding == 'zstd':
|
|
175
|
+
# Decompress using zstandard
|
|
176
|
+
dctx = zstd.ZstdDecompressor()
|
|
177
|
+
decompressed_data = dctx.decompress(response_content)
|
|
178
|
+
return decompressed_data.decode('utf-8')
|
|
179
|
+
|
|
180
|
+
elif encoding == 'gzip':
|
|
181
|
+
# Decompress using gzip
|
|
182
|
+
decompressed_data = gzip.decompress(response_content)
|
|
183
|
+
return decompressed_data.decode('utf-8')
|
|
184
|
+
|
|
185
|
+
elif encoding == 'br':
|
|
186
|
+
# Decompress using brotli
|
|
187
|
+
decompressed_data = brotli.decompress(response_content)
|
|
188
|
+
return decompressed_data.decode('utf-8')
|
|
189
|
+
|
|
190
|
+
elif encoding == 'deflate':
|
|
191
|
+
# Decompress using zlib (deflate)
|
|
192
|
+
import zlib
|
|
193
|
+
decompressed_data = zlib.decompress(response_content)
|
|
194
|
+
return decompressed_data.decode('utf-8')
|
|
195
|
+
|
|
196
|
+
else:
|
|
197
|
+
# Unknown or unsupported encoding, try to decode as-is
|
|
198
|
+
return response_content.decode('utf-8')
|
|
199
|
+
|
|
200
|
+
except Exception as e:
|
|
201
|
+
raise IOError(f"Failed to decompress response with encoding '{content_encoding}': {str(e)}") from e
|
|
20
202
|
|
|
21
203
|
def to_data_uri(image_data):
|
|
22
204
|
"""Convert image data to a data URI format"""
|
|
@@ -38,6 +220,19 @@ def to_data_uri(image_data):
|
|
|
38
220
|
|
|
39
221
|
return f"data:{mime_type};base64,{encoded}"
|
|
40
222
|
|
|
223
|
+
def clean_text(text):
|
|
224
|
+
"""Clean text by removing null bytes and control characters except newlines and tabs."""
|
|
225
|
+
import re
|
|
226
|
+
if not isinstance(text, str):
|
|
227
|
+
return text
|
|
228
|
+
|
|
229
|
+
# Remove null bytes
|
|
230
|
+
text = text.replace('\x00', '')
|
|
231
|
+
|
|
232
|
+
# Keep newlines, tabs, and other printable characters, remove other control chars
|
|
233
|
+
# This regex matches control characters except \n, \r, \t
|
|
234
|
+
return re.sub(r'[\x01-\x08\x0b\x0c\x0e-\x1f\x7f]', '', text)
|
|
235
|
+
|
|
41
236
|
|
|
42
237
|
class Completions(BaseCompletions):
|
|
43
238
|
def __init__(self, client: 'BLACKBOXAI'):
|
|
@@ -52,6 +247,8 @@ class Completions(BaseCompletions):
|
|
|
52
247
|
stream: bool = False,
|
|
53
248
|
temperature: Optional[float] = None,
|
|
54
249
|
top_p: Optional[float] = None,
|
|
250
|
+
timeout: Optional[int] = None,
|
|
251
|
+
proxies: Optional[dict] = None,
|
|
55
252
|
**kwargs: Any
|
|
56
253
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
57
254
|
"""
|
|
@@ -117,7 +314,9 @@ class Completions(BaseCompletions):
|
|
|
117
314
|
max_tokens=max_tokens,
|
|
118
315
|
temperature=temperature,
|
|
119
316
|
top_p=top_p,
|
|
120
|
-
media=media
|
|
317
|
+
media=media,
|
|
318
|
+
timeout=timeout,
|
|
319
|
+
proxies=proxies
|
|
121
320
|
)
|
|
122
321
|
|
|
123
322
|
# Use non-streaming implementation
|
|
@@ -130,7 +329,9 @@ class Completions(BaseCompletions):
|
|
|
130
329
|
max_tokens=max_tokens,
|
|
131
330
|
temperature=temperature,
|
|
132
331
|
top_p=top_p,
|
|
133
|
-
media=media
|
|
332
|
+
media=media,
|
|
333
|
+
timeout=timeout,
|
|
334
|
+
proxies=proxies
|
|
134
335
|
)
|
|
135
336
|
|
|
136
337
|
|
|
@@ -145,9 +346,18 @@ class Completions(BaseCompletions):
|
|
|
145
346
|
max_tokens: Optional[int] = None,
|
|
146
347
|
temperature: Optional[float] = None,
|
|
147
348
|
top_p: Optional[float] = None,
|
|
148
|
-
media: List = None
|
|
349
|
+
media: List = None,
|
|
350
|
+
timeout: Optional[int] = None,
|
|
351
|
+
proxies: Optional[dict] = None
|
|
149
352
|
) -> ChatCompletion:
|
|
150
353
|
"""Implementation for non-streaming chat completions."""
|
|
354
|
+
original_proxies = self._client.session.proxies.copy()
|
|
355
|
+
# Only use proxies if they are explicitly provided and not causing issues
|
|
356
|
+
if proxies is not None and proxies:
|
|
357
|
+
self._client.session.proxies.update(proxies)
|
|
358
|
+
else:
|
|
359
|
+
# Clear proxies to avoid connection issues
|
|
360
|
+
self._client.session.proxies = {}
|
|
151
361
|
try:
|
|
152
362
|
# Prepare user messages for BlackboxAI API format
|
|
153
363
|
blackbox_messages = []
|
|
@@ -191,20 +401,39 @@ class Completions(BaseCompletions):
|
|
|
191
401
|
model=model
|
|
192
402
|
)
|
|
193
403
|
|
|
194
|
-
# Make the API request with cookies
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
404
|
+
# Make the API request with cookies and retry logic
|
|
405
|
+
max_retries = 3
|
|
406
|
+
for attempt in range(max_retries):
|
|
407
|
+
try:
|
|
408
|
+
response = self._client.session.post(
|
|
409
|
+
self._client.api_endpoint,
|
|
410
|
+
json=payload,
|
|
411
|
+
headers=self._client.headers,
|
|
412
|
+
cookies=self._client.cookies,
|
|
413
|
+
timeout=timeout if timeout is not None else self._client.timeout
|
|
414
|
+
)
|
|
415
|
+
break # Success, exit retry loop
|
|
416
|
+
except (requests.exceptions.ConnectionError, requests.exceptions.ProxyError) as e:
|
|
417
|
+
if attempt == max_retries - 1:
|
|
418
|
+
raise IOError(f"BlackboxAI connection failed after {max_retries} attempts: {str(e)}") from e
|
|
419
|
+
# Clear proxies and retry
|
|
420
|
+
self._client.session.proxies = {}
|
|
421
|
+
time.sleep(1) # Wait before retry
|
|
202
422
|
|
|
203
423
|
# Process the response
|
|
204
424
|
full_content = ""
|
|
205
425
|
if response.status_code == 200:
|
|
206
|
-
#
|
|
207
|
-
|
|
426
|
+
# Check for Content-Encoding header
|
|
427
|
+
content_encoding = response.headers.get('Content-Encoding')
|
|
428
|
+
|
|
429
|
+
# Decompress the response if needed
|
|
430
|
+
try:
|
|
431
|
+
response_text = decompress_response(response.content, content_encoding)
|
|
432
|
+
except IOError as e:
|
|
433
|
+
# If decompression fails, fall back to the original method
|
|
434
|
+
print(f"Warning: {e}. Falling back to original decoding method.")
|
|
435
|
+
decoder = codecs.getincrementaldecoder("utf-8")("replace")
|
|
436
|
+
response_text = decoder.decode(response.content, final=True)
|
|
208
437
|
|
|
209
438
|
# Handle possible SSE format in response
|
|
210
439
|
if "data: " in response_text:
|
|
@@ -227,10 +456,11 @@ class Completions(BaseCompletions):
|
|
|
227
456
|
# Handle error response
|
|
228
457
|
raise IOError(f"BlackboxAI request failed with status code {response.status_code}")
|
|
229
458
|
|
|
230
|
-
#
|
|
459
|
+
# Clean and create the completion message
|
|
460
|
+
cleaned_content = clean_text(full_content)
|
|
231
461
|
message = ChatCompletionMessage(
|
|
232
462
|
role="assistant",
|
|
233
|
-
content=
|
|
463
|
+
content=cleaned_content
|
|
234
464
|
)
|
|
235
465
|
|
|
236
466
|
# Create the choice with the message
|
|
@@ -242,7 +472,7 @@ class Completions(BaseCompletions):
|
|
|
242
472
|
|
|
243
473
|
# Estimate token usage using count_tokens
|
|
244
474
|
prompt_tokens = count_tokens([str(msg.get("content", "")) for msg in messages])
|
|
245
|
-
completion_tokens = count_tokens(
|
|
475
|
+
completion_tokens = count_tokens(cleaned_content)
|
|
246
476
|
|
|
247
477
|
# Create the final completion object
|
|
248
478
|
completion = ChatCompletion(
|
|
@@ -261,6 +491,9 @@ class Completions(BaseCompletions):
|
|
|
261
491
|
|
|
262
492
|
except Exception as e:
|
|
263
493
|
raise IOError(f"BlackboxAI request failed: {str(e)}") from e
|
|
494
|
+
finally:
|
|
495
|
+
# Restore original proxies
|
|
496
|
+
self._client.session.proxies = original_proxies
|
|
264
497
|
|
|
265
498
|
def _create_streaming(
|
|
266
499
|
self,
|
|
@@ -273,91 +506,146 @@ class Completions(BaseCompletions):
|
|
|
273
506
|
max_tokens: Optional[int] = None,
|
|
274
507
|
temperature: Optional[float] = None,
|
|
275
508
|
top_p: Optional[float] = None,
|
|
276
|
-
media: List = None
|
|
509
|
+
media: List = None,
|
|
510
|
+
timeout: Optional[int] = None,
|
|
511
|
+
proxies: Optional[dict] = None
|
|
277
512
|
):
|
|
278
513
|
"""Implementation for streaming chat completions (OpenAI-compatible chunks)."""
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
session_data=
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
while len(buffer) >= chunk_size:
|
|
335
|
-
out = buffer[:chunk_size]
|
|
336
|
-
buffer = buffer[chunk_size:]
|
|
337
|
-
if out.strip():
|
|
338
|
-
# Wrap the chunk in OpenAI-compatible structure
|
|
339
|
-
delta = ChoiceDelta(content=out, role="assistant")
|
|
340
|
-
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
341
|
-
chunk_obj = ChatCompletionChunk(
|
|
342
|
-
id=request_id,
|
|
343
|
-
choices=[choice],
|
|
344
|
-
created=created_time,
|
|
345
|
-
model=model,
|
|
346
|
-
system_fingerprint=None
|
|
514
|
+
original_proxies = self._client.session.proxies.copy()
|
|
515
|
+
# Only use proxies if they are explicitly provided and not causing issues
|
|
516
|
+
if proxies is not None and proxies:
|
|
517
|
+
self._client.session.proxies.update(proxies)
|
|
518
|
+
else:
|
|
519
|
+
# Clear proxies to avoid connection issues
|
|
520
|
+
self._client.session.proxies = {}
|
|
521
|
+
try:
|
|
522
|
+
# Prepare user messages for BlackboxAI API format
|
|
523
|
+
blackbox_messages = []
|
|
524
|
+
for i, msg in enumerate(messages):
|
|
525
|
+
if msg["role"] == "system":
|
|
526
|
+
continue # System message handled separately
|
|
527
|
+
msg_id = self._client.generate_id() if i > 0 else request_id
|
|
528
|
+
blackbox_messages.append({
|
|
529
|
+
"id": msg_id,
|
|
530
|
+
"content": msg["content"],
|
|
531
|
+
"role": msg["role"]
|
|
532
|
+
})
|
|
533
|
+
# Add image data if provided
|
|
534
|
+
if media and blackbox_messages:
|
|
535
|
+
blackbox_messages[-1]['data'] = {
|
|
536
|
+
"imagesData": [
|
|
537
|
+
{
|
|
538
|
+
"filePath": f"/",
|
|
539
|
+
"contents": to_data_uri(image[0])
|
|
540
|
+
} for image in media
|
|
541
|
+
],
|
|
542
|
+
"fileText": "",
|
|
543
|
+
"title": ""
|
|
544
|
+
}
|
|
545
|
+
# Generate request payload with session
|
|
546
|
+
request_email = f"{self._client.generate_random_string(8)}@blackbox.ai"
|
|
547
|
+
session_data = self._client.generate_session(request_email)
|
|
548
|
+
payload = self._client.create_request_payload(
|
|
549
|
+
messages=blackbox_messages,
|
|
550
|
+
chat_id=request_id,
|
|
551
|
+
system_message=system_message,
|
|
552
|
+
max_tokens=max_tokens,
|
|
553
|
+
temperature=temperature,
|
|
554
|
+
top_p=top_p,
|
|
555
|
+
session_data=session_data,
|
|
556
|
+
model=model
|
|
557
|
+
)
|
|
558
|
+
# Make the API request with cookies, stream=True and retry logic
|
|
559
|
+
max_retries = 3
|
|
560
|
+
for attempt in range(max_retries):
|
|
561
|
+
try:
|
|
562
|
+
response = self._client.session.post(
|
|
563
|
+
self._client.api_endpoint,
|
|
564
|
+
json=payload,
|
|
565
|
+
headers=self._client.headers,
|
|
566
|
+
cookies=self._client.cookies,
|
|
567
|
+
stream=True,
|
|
568
|
+
timeout=timeout if timeout is not None else self._client.timeout
|
|
347
569
|
)
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
570
|
+
break # Success, exit retry loop
|
|
571
|
+
except (requests.exceptions.ConnectionError, requests.exceptions.ProxyError) as e:
|
|
572
|
+
if attempt == max_retries - 1:
|
|
573
|
+
raise IOError(f"BlackboxAI connection failed after {max_retries} attempts: {str(e)}") from e
|
|
574
|
+
# Clear proxies and retry
|
|
575
|
+
self._client.session.proxies = {}
|
|
576
|
+
time.sleep(1) # Wait before retry
|
|
577
|
+
# Blackbox streams as raw text, no line breaks, so chunk manually
|
|
578
|
+
chunk_size = 32 # Tune as needed for smoothness
|
|
579
|
+
# ChoiceDelta is already imported at the top of the file
|
|
580
|
+
|
|
581
|
+
# Check if the response is compressed and create appropriate decompressor
|
|
582
|
+
content_encoding = response.headers.get('Content-Encoding')
|
|
583
|
+
streaming_decompressor = StreamingDecompressor(content_encoding)
|
|
584
|
+
|
|
585
|
+
# Stream with real-time decompression
|
|
586
|
+
for chunk in response.iter_content(chunk_size=chunk_size):
|
|
587
|
+
if not chunk:
|
|
588
|
+
continue
|
|
589
|
+
|
|
590
|
+
# Decompress chunk in real-time
|
|
591
|
+
text = streaming_decompressor.decompress_chunk(chunk)
|
|
592
|
+
|
|
593
|
+
if text:
|
|
594
|
+
cleaned_chunk = clean_text(text)
|
|
595
|
+
if cleaned_chunk.strip():
|
|
596
|
+
delta = ChoiceDelta(content=cleaned_chunk, role="assistant")
|
|
597
|
+
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
598
|
+
chunk_obj = ChatCompletionChunk(
|
|
599
|
+
id=request_id,
|
|
600
|
+
choices=[choice],
|
|
601
|
+
created=created_time,
|
|
602
|
+
model=model,
|
|
603
|
+
system_fingerprint=None
|
|
604
|
+
)
|
|
605
|
+
yield chunk_obj
|
|
606
|
+
|
|
607
|
+
# Finalize decompression and get any remaining text
|
|
608
|
+
final_text = streaming_decompressor.finalize()
|
|
609
|
+
if final_text.strip():
|
|
610
|
+
cleaned_final = clean_text(final_text)
|
|
611
|
+
delta = ChoiceDelta(content=cleaned_final, role="assistant")
|
|
612
|
+
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
613
|
+
chunk_obj = ChatCompletionChunk(
|
|
614
|
+
id=request_id,
|
|
615
|
+
choices=[choice],
|
|
616
|
+
created=created_time,
|
|
617
|
+
model=model,
|
|
618
|
+
system_fingerprint=None
|
|
619
|
+
)
|
|
620
|
+
yield chunk_obj
|
|
621
|
+
|
|
622
|
+
# Send final chunk with finish_reason="stop"
|
|
623
|
+
delta = ChoiceDelta(content="", role="assistant")
|
|
624
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
625
|
+
final_chunk = ChatCompletionChunk(
|
|
354
626
|
id=request_id,
|
|
355
627
|
choices=[choice],
|
|
356
628
|
created=created_time,
|
|
357
629
|
model=model,
|
|
358
630
|
system_fingerprint=None
|
|
359
631
|
)
|
|
360
|
-
yield
|
|
632
|
+
yield final_chunk
|
|
633
|
+
|
|
634
|
+
except Exception as e:
|
|
635
|
+
# Handle errors gracefully by yielding an error chunk
|
|
636
|
+
error_delta = ChoiceDelta(content=f"Error: {str(e)}", role="assistant")
|
|
637
|
+
error_choice = Choice(index=0, delta=error_delta, finish_reason="stop")
|
|
638
|
+
error_chunk = ChatCompletionChunk(
|
|
639
|
+
id=request_id,
|
|
640
|
+
choices=[error_choice],
|
|
641
|
+
created=created_time,
|
|
642
|
+
model=model,
|
|
643
|
+
system_fingerprint=None
|
|
644
|
+
)
|
|
645
|
+
yield error_chunk
|
|
646
|
+
finally:
|
|
647
|
+
# Restore original proxies
|
|
648
|
+
self._client.session.proxies = original_proxies
|
|
361
649
|
|
|
362
650
|
|
|
363
651
|
class Chat(BaseChat):
|
|
@@ -381,12 +669,7 @@ class BLACKBOXAI(OpenAICompatibleProvider):
|
|
|
381
669
|
default_model = "GPT-4.1"
|
|
382
670
|
default_vision_model = default_model
|
|
383
671
|
api_endpoint = "https://www.blackbox.ai/api/chat"
|
|
384
|
-
timeout =
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
# Default model (remains the same as per original class)
|
|
388
|
-
default_model = "GPT-4.1"
|
|
389
|
-
default_vision_model = default_model
|
|
672
|
+
timeout = None
|
|
390
673
|
|
|
391
674
|
# New OpenRouter models list
|
|
392
675
|
openrouter_models = [
|
|
@@ -437,6 +720,7 @@ class BLACKBOXAI(OpenAICompatibleProvider):
|
|
|
437
720
|
|
|
438
721
|
# New base models list
|
|
439
722
|
models = [
|
|
723
|
+
"gpt-4.1-mini", # Added new model
|
|
440
724
|
default_model,
|
|
441
725
|
"o3-mini",
|
|
442
726
|
"gpt-4.1-nano",
|
|
@@ -525,6 +809,7 @@ class BLACKBOXAI(OpenAICompatibleProvider):
|
|
|
525
809
|
default_model: {'mode': True, 'id': "openai/gpt-4.1", 'name': default_model}, # Assuming GPT-4.1 is agent-compatible
|
|
526
810
|
'o3-mini': {'mode': True, 'id': "o3-mini", 'name': "o3-mini"}, # Assuming o3-mini is agent-compatible
|
|
527
811
|
'gpt-4.1-nano': {'mode': True, 'id': "gpt-4.1-nano", 'name': "gpt-4.1-nano"}, # Assuming gpt-4.1-nano is agent-compatible
|
|
812
|
+
'gpt-4.1-mini': {'mode': True, 'id': "gpt-4.1-mini", 'name': "gpt-4.1-mini"}, # Added agent mode for gpt-4.1-mini
|
|
528
813
|
}
|
|
529
814
|
|
|
530
815
|
# Trending agent modes
|
|
@@ -565,17 +850,15 @@ class BLACKBOXAI(OpenAICompatibleProvider):
|
|
|
565
850
|
|
|
566
851
|
|
|
567
852
|
def __init__(
|
|
568
|
-
self
|
|
569
|
-
proxies: dict = {}
|
|
853
|
+
self
|
|
570
854
|
):
|
|
571
855
|
"""
|
|
572
856
|
Initialize the BlackboxAI provider with OpenAI compatibility.
|
|
573
|
-
|
|
574
|
-
Args:
|
|
575
|
-
proxies: Optional proxy configuration
|
|
576
857
|
"""
|
|
577
858
|
# Initialize session
|
|
578
859
|
self.session = requests.Session()
|
|
860
|
+
# Remove any proxy configuration to avoid connection issues
|
|
861
|
+
self.session.proxies = {}
|
|
579
862
|
|
|
580
863
|
# Set headers based on GitHub reference
|
|
581
864
|
self.headers = {
|
|
@@ -602,9 +885,6 @@ class BLACKBOXAI(OpenAICompatibleProvider):
|
|
|
602
885
|
'__cf_bm': self.generate_id(32),
|
|
603
886
|
}
|
|
604
887
|
|
|
605
|
-
# Set proxies if provided
|
|
606
|
-
self.session.proxies = proxies
|
|
607
|
-
|
|
608
888
|
# Initialize chat interface with completions
|
|
609
889
|
self.chat = Chat(self)
|
|
610
890
|
|
|
@@ -764,3 +1044,6 @@ if __name__ == "__main__":
|
|
|
764
1044
|
)
|
|
765
1045
|
for chunk in response:
|
|
766
1046
|
print(chunk.choices[0].delta.content, end='', flush=True)
|
|
1047
|
+
print()
|
|
1048
|
+
print("Proxies on instance:", client.proxies)
|
|
1049
|
+
print("Proxies on session:", client.session.proxies)
|