webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +34 -16
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +155 -35
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +172 -82
- webscout/Provider/LambdaChat.py +1 -0
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
- webscout/Provider/OPENAI/Cloudflare.py +38 -21
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +35 -18
- webscout/Provider/OPENAI/NEMOTRON.py +34 -34
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +304 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +374 -0
- webscout/Provider/OPENAI/__init__.py +7 -1
- webscout/Provider/OPENAI/ai4chat.py +73 -63
- webscout/Provider/OPENAI/api.py +869 -644
- webscout/Provider/OPENAI/base.py +2 -0
- webscout/Provider/OPENAI/c4ai.py +34 -13
- webscout/Provider/OPENAI/chatgpt.py +575 -556
- webscout/Provider/OPENAI/chatgptclone.py +512 -487
- webscout/Provider/OPENAI/chatsandbox.py +11 -6
- webscout/Provider/OPENAI/copilot.py +258 -0
- webscout/Provider/OPENAI/deepinfra.py +327 -318
- webscout/Provider/OPENAI/e2b.py +140 -104
- webscout/Provider/OPENAI/exaai.py +420 -411
- webscout/Provider/OPENAI/exachat.py +448 -443
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +12 -8
- webscout/Provider/OPENAI/glider.py +15 -8
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +311 -307
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +18 -9
- webscout/Provider/OPENAI/multichat.py +7 -5
- webscout/Provider/OPENAI/netwrck.py +16 -11
- webscout/Provider/OPENAI/oivscode.py +290 -0
- webscout/Provider/OPENAI/opkfc.py +507 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +29 -17
- webscout/Provider/OPENAI/sonus.py +308 -303
- webscout/Provider/OPENAI/standardinput.py +442 -433
- webscout/Provider/OPENAI/textpollinations.py +18 -11
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +17 -10
- webscout/Provider/OPENAI/typegpt.py +21 -11
- webscout/Provider/OPENAI/uncovrAI.py +477 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +435 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +166 -163
- webscout/Provider/OPENAI/x0gpt.py +26 -37
- webscout/Provider/OPENAI/yep.py +384 -356
- webscout/Provider/PI.py +2 -1
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/granite.py +41 -6
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
- webscout-8.3.dist-info/RECORD +290 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- webscout-8.2.8.dist-info/RECORD +0 -334
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
|
@@ -1,413 +1,419 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import base64
|
|
4
|
-
import json
|
|
5
|
-
import random
|
|
6
|
-
import string
|
|
7
|
-
import re
|
|
8
|
-
import cloudscraper
|
|
9
|
-
from datetime import datetime
|
|
10
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
11
|
-
|
|
12
|
-
from webscout.litagent import LitAgent
|
|
13
|
-
from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
|
|
14
|
-
from .utils import (
|
|
15
|
-
ChatCompletion,
|
|
16
|
-
ChatCompletionChunk,
|
|
17
|
-
Choice,
|
|
18
|
-
ChatCompletionMessage,
|
|
19
|
-
ChoiceDelta,
|
|
20
|
-
CompletionUsage,
|
|
21
|
-
format_prompt,
|
|
22
|
-
get_system_prompt
|
|
23
|
-
)
|
|
24
|
-
|
|
25
|
-
# ANSI escape codes for formatting
|
|
26
|
-
BOLD = "\033[1m"
|
|
27
|
-
RED = "\033[91m"
|
|
28
|
-
RESET = "\033[0m"
|
|
29
|
-
|
|
30
|
-
class Completions(BaseCompletions):
|
|
31
|
-
def __init__(self, client: 'Toolbaz'):
|
|
32
|
-
self._client = client
|
|
33
|
-
|
|
34
|
-
def create(
|
|
35
|
-
self,
|
|
36
|
-
*,
|
|
37
|
-
model: str,
|
|
38
|
-
messages: List[Dict[str, str]],
|
|
39
|
-
max_tokens: Optional[int] = None,
|
|
40
|
-
stream: bool = False,
|
|
41
|
-
temperature: Optional[float] = None,
|
|
42
|
-
top_p: Optional[float] = None,
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
"
|
|
62
|
-
"
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
return self.
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
resp.
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
)
|
|
243
|
-
|
|
244
|
-
# Create the
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
"
|
|
294
|
-
"
|
|
295
|
-
"
|
|
296
|
-
"
|
|
297
|
-
"
|
|
298
|
-
"
|
|
299
|
-
"
|
|
300
|
-
"
|
|
301
|
-
"
|
|
302
|
-
"
|
|
303
|
-
"
|
|
304
|
-
"
|
|
305
|
-
"
|
|
306
|
-
"
|
|
307
|
-
"
|
|
308
|
-
"
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
self.
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
"
|
|
342
|
-
"
|
|
343
|
-
"
|
|
344
|
-
"
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
"
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
"
|
|
368
|
-
"
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
"
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import base64
|
|
4
|
+
import json
|
|
5
|
+
import random
|
|
6
|
+
import string
|
|
7
|
+
import re
|
|
8
|
+
import cloudscraper
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
11
|
+
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
|
+
from webscout.Provider.OPENAI.base import BaseChat, BaseCompletions, OpenAICompatibleProvider
|
|
14
|
+
from webscout.Provider.OPENAI.utils import (
|
|
15
|
+
ChatCompletion,
|
|
16
|
+
ChatCompletionChunk,
|
|
17
|
+
Choice,
|
|
18
|
+
ChatCompletionMessage,
|
|
19
|
+
ChoiceDelta,
|
|
20
|
+
CompletionUsage,
|
|
21
|
+
format_prompt,
|
|
22
|
+
get_system_prompt
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
# ANSI escape codes for formatting
|
|
26
|
+
BOLD = "\033[1m"
|
|
27
|
+
RED = "\033[91m"
|
|
28
|
+
RESET = "\033[0m"
|
|
29
|
+
|
|
30
|
+
class Completions(BaseCompletions):
|
|
31
|
+
def __init__(self, client: 'Toolbaz'):
|
|
32
|
+
self._client = client
|
|
33
|
+
|
|
34
|
+
def create(
|
|
35
|
+
self,
|
|
36
|
+
*,
|
|
37
|
+
model: str,
|
|
38
|
+
messages: List[Dict[str, str]],
|
|
39
|
+
max_tokens: Optional[int] = None,
|
|
40
|
+
stream: bool = False,
|
|
41
|
+
temperature: Optional[float] = None,
|
|
42
|
+
top_p: Optional[float] = None,
|
|
43
|
+
timeout: Optional[int] = None,
|
|
44
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
45
|
+
**kwargs: Any
|
|
46
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
47
|
+
"""
|
|
48
|
+
Creates a model response for the given chat conversation.
|
|
49
|
+
Mimics openai.chat.completions.create
|
|
50
|
+
"""
|
|
51
|
+
# Format the messages using the format_prompt utility
|
|
52
|
+
formatted_prompt = format_prompt(messages, add_special_tokens=True, do_continue=True)
|
|
53
|
+
|
|
54
|
+
# Get authentication token
|
|
55
|
+
auth = self._client.get_auth()
|
|
56
|
+
if not auth:
|
|
57
|
+
raise IOError("Failed to authenticate with Toolbaz API")
|
|
58
|
+
|
|
59
|
+
# Prepare the request data
|
|
60
|
+
data = {
|
|
61
|
+
"text": formatted_prompt,
|
|
62
|
+
"capcha": auth["token"],
|
|
63
|
+
"model": model,
|
|
64
|
+
"session_id": auth["session_id"]
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
# Generate a unique request ID
|
|
68
|
+
request_id = f"chatcmpl-{uuid.uuid4().hex}"
|
|
69
|
+
created_time = int(time.time())
|
|
70
|
+
|
|
71
|
+
# Handle streaming response
|
|
72
|
+
if stream:
|
|
73
|
+
return self._handle_streaming_response(request_id, created_time, model, data, timeout, proxies)
|
|
74
|
+
else:
|
|
75
|
+
return self._handle_non_streaming_response(request_id, created_time, model, data, timeout, proxies)
|
|
76
|
+
|
|
77
|
+
def _handle_streaming_response(
|
|
78
|
+
self,
|
|
79
|
+
request_id: str,
|
|
80
|
+
created_time: int,
|
|
81
|
+
model: str,
|
|
82
|
+
data: Dict[str, Any],
|
|
83
|
+
timeout: Optional[int] = None,
|
|
84
|
+
proxies: Optional[Dict[str, str]] = None
|
|
85
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
86
|
+
"""Handle streaming response from Toolbaz API"""
|
|
87
|
+
try:
|
|
88
|
+
resp = self._client.session.post(
|
|
89
|
+
"https://data.toolbaz.com/writing.php",
|
|
90
|
+
data=data,
|
|
91
|
+
stream=True,
|
|
92
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
93
|
+
timeout=timeout or self._client.timeout
|
|
94
|
+
)
|
|
95
|
+
resp.raise_for_status()
|
|
96
|
+
|
|
97
|
+
buffer = ""
|
|
98
|
+
tag_start = "[model:"
|
|
99
|
+
streaming_text = ""
|
|
100
|
+
|
|
101
|
+
for chunk in resp.iter_content(chunk_size=1):
|
|
102
|
+
if chunk:
|
|
103
|
+
text = chunk.decode(errors="ignore")
|
|
104
|
+
buffer += text
|
|
105
|
+
|
|
106
|
+
# Remove all complete [model: ...] tags in buffer
|
|
107
|
+
while True:
|
|
108
|
+
match = re.search(r"\[model:.*?\]", buffer)
|
|
109
|
+
if not match:
|
|
110
|
+
break
|
|
111
|
+
buffer = buffer[:match.start()] + buffer[match.end():]
|
|
112
|
+
|
|
113
|
+
# Only yield up to the last possible start of a tag
|
|
114
|
+
last_tag = buffer.rfind(tag_start)
|
|
115
|
+
if last_tag == -1 or last_tag + len(tag_start) > len(buffer):
|
|
116
|
+
if buffer:
|
|
117
|
+
streaming_text += buffer
|
|
118
|
+
|
|
119
|
+
# Create the delta object
|
|
120
|
+
delta = ChoiceDelta(
|
|
121
|
+
content=buffer,
|
|
122
|
+
role="assistant"
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
# Create the choice object
|
|
126
|
+
choice = Choice(
|
|
127
|
+
index=0,
|
|
128
|
+
delta=delta,
|
|
129
|
+
finish_reason=None
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# Create the chunk object
|
|
133
|
+
chunk = ChatCompletionChunk(
|
|
134
|
+
id=request_id,
|
|
135
|
+
choices=[choice],
|
|
136
|
+
created=created_time,
|
|
137
|
+
model=model
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
yield chunk
|
|
141
|
+
buffer = ""
|
|
142
|
+
else:
|
|
143
|
+
if buffer[:last_tag]:
|
|
144
|
+
streaming_text += buffer[:last_tag]
|
|
145
|
+
|
|
146
|
+
# Create the delta object
|
|
147
|
+
delta = ChoiceDelta(
|
|
148
|
+
content=buffer[:last_tag],
|
|
149
|
+
role="assistant"
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# Create the choice object
|
|
153
|
+
choice = Choice(
|
|
154
|
+
index=0,
|
|
155
|
+
delta=delta,
|
|
156
|
+
finish_reason=None
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# Create the chunk object
|
|
160
|
+
chunk = ChatCompletionChunk(
|
|
161
|
+
id=request_id,
|
|
162
|
+
choices=[choice],
|
|
163
|
+
created=created_time,
|
|
164
|
+
model=model
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
yield chunk
|
|
168
|
+
buffer = buffer[last_tag:]
|
|
169
|
+
|
|
170
|
+
# Remove any remaining [model: ...] tag in the buffer
|
|
171
|
+
buffer = re.sub(r"\[model:.*?\]", "", buffer)
|
|
172
|
+
if buffer:
|
|
173
|
+
# Create the delta object
|
|
174
|
+
delta = ChoiceDelta(
|
|
175
|
+
content=buffer,
|
|
176
|
+
role="assistant"
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
# Create the choice object
|
|
180
|
+
choice = Choice(
|
|
181
|
+
index=0,
|
|
182
|
+
delta=delta,
|
|
183
|
+
finish_reason="stop"
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Create the chunk object
|
|
187
|
+
chunk = ChatCompletionChunk(
|
|
188
|
+
id=request_id,
|
|
189
|
+
choices=[choice],
|
|
190
|
+
created=created_time,
|
|
191
|
+
model=model
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
yield chunk
|
|
195
|
+
|
|
196
|
+
# Final chunk with finish_reason
|
|
197
|
+
delta = ChoiceDelta(
|
|
198
|
+
content=None,
|
|
199
|
+
role=None
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
choice = Choice(
|
|
203
|
+
index=0,
|
|
204
|
+
delta=delta,
|
|
205
|
+
finish_reason="stop"
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
chunk = ChatCompletionChunk(
|
|
209
|
+
id=request_id,
|
|
210
|
+
choices=[choice],
|
|
211
|
+
created=created_time,
|
|
212
|
+
model=model
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
yield chunk
|
|
216
|
+
|
|
217
|
+
except Exception as e:
|
|
218
|
+
print(f"{RED}Error during Toolbaz streaming request: {e}{RESET}")
|
|
219
|
+
raise IOError(f"Toolbaz streaming request failed: {e}") from e
|
|
220
|
+
|
|
221
|
+
def _handle_non_streaming_response(
|
|
222
|
+
self,
|
|
223
|
+
request_id: str,
|
|
224
|
+
created_time: int,
|
|
225
|
+
model: str,
|
|
226
|
+
data: Dict[str, Any],
|
|
227
|
+
timeout: Optional[int] = None,
|
|
228
|
+
proxies: Optional[Dict[str, str]] = None
|
|
229
|
+
) -> ChatCompletion:
|
|
230
|
+
"""Handle non-streaming response from Toolbaz API"""
|
|
231
|
+
try:
|
|
232
|
+
resp = self._client.session.post(
|
|
233
|
+
"https://data.toolbaz.com/writing.php",
|
|
234
|
+
data=data,
|
|
235
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
236
|
+
timeout=timeout or self._client.timeout
|
|
237
|
+
)
|
|
238
|
+
resp.raise_for_status()
|
|
239
|
+
|
|
240
|
+
text = resp.text
|
|
241
|
+
# Remove [model: ...] tags
|
|
242
|
+
text = re.sub(r"\[model:.*?\]", "", text)
|
|
243
|
+
|
|
244
|
+
# Create the message object
|
|
245
|
+
message = ChatCompletionMessage(
|
|
246
|
+
role="assistant",
|
|
247
|
+
content=text
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
# Create the choice object
|
|
251
|
+
choice = Choice(
|
|
252
|
+
index=0,
|
|
253
|
+
message=message,
|
|
254
|
+
finish_reason="stop"
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
# Usage data is not provided by this API in a standard way, set to 0
|
|
258
|
+
usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
|
|
259
|
+
|
|
260
|
+
# Create the completion object
|
|
261
|
+
completion = ChatCompletion(
|
|
262
|
+
id=request_id,
|
|
263
|
+
choices=[choice],
|
|
264
|
+
created=created_time,
|
|
265
|
+
model=model,
|
|
266
|
+
usage=usage
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
return completion
|
|
270
|
+
|
|
271
|
+
except Exception as e:
|
|
272
|
+
print(f"{RED}Error during Toolbaz non-stream request: {e}{RESET}")
|
|
273
|
+
raise IOError(f"Toolbaz request failed: {e}") from e
|
|
274
|
+
|
|
275
|
+
class Chat(BaseChat):
|
|
276
|
+
def __init__(self, client: 'Toolbaz'):
|
|
277
|
+
self.completions = Completions(client)
|
|
278
|
+
|
|
279
|
+
class Toolbaz(OpenAICompatibleProvider):
|
|
280
|
+
"""
|
|
281
|
+
OpenAI-compatible client for Toolbaz API.
|
|
282
|
+
|
|
283
|
+
Usage:
|
|
284
|
+
client = Toolbaz()
|
|
285
|
+
response = client.chat.completions.create(
|
|
286
|
+
model="gemini-2.0-flash",
|
|
287
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
288
|
+
)
|
|
289
|
+
print(response.choices[0].message.content)
|
|
290
|
+
"""
|
|
291
|
+
|
|
292
|
+
AVAILABLE_MODELS = [
|
|
293
|
+
"gemini-2.5-flash",
|
|
294
|
+
"gemini-2.0-flash-thinking",
|
|
295
|
+
"gemini-2.0-flash",
|
|
296
|
+
"gemini-1.5-flash",
|
|
297
|
+
"o3-mini",
|
|
298
|
+
"gpt-4o-latest",
|
|
299
|
+
"gpt-4o",
|
|
300
|
+
"deepseek-r1",
|
|
301
|
+
"Llama-4-Maverick",
|
|
302
|
+
"Llama-4-Scout",
|
|
303
|
+
"Llama-3.3-70B",
|
|
304
|
+
"Qwen2.5-72B",
|
|
305
|
+
"Qwen2-72B",
|
|
306
|
+
"grok-2-1212",
|
|
307
|
+
"grok-3-beta",
|
|
308
|
+
"toolbaz_v3.5_pro",
|
|
309
|
+
"toolbaz_v3",
|
|
310
|
+
"mixtral_8x22b",
|
|
311
|
+
"L3-70B-Euryale-v2.1",
|
|
312
|
+
"midnight-rose",
|
|
313
|
+
"unity",
|
|
314
|
+
"unfiltered_x"
|
|
315
|
+
]
|
|
316
|
+
|
|
317
|
+
def __init__(
|
|
318
|
+
self,
|
|
319
|
+
api_key: Optional[str] = None, # Not used but kept for compatibility
|
|
320
|
+
timeout: int = 30,
|
|
321
|
+
proxies: dict = {},
|
|
322
|
+
browser: str = "chrome"
|
|
323
|
+
):
|
|
324
|
+
"""
|
|
325
|
+
Initialize the Toolbaz client.
|
|
326
|
+
|
|
327
|
+
Args:
|
|
328
|
+
api_key: Not used but kept for compatibility with OpenAI interface
|
|
329
|
+
timeout: Request timeout in seconds
|
|
330
|
+
proxies: Proxy configuration for requests
|
|
331
|
+
browser: Browser name for LitAgent to generate User-Agent
|
|
332
|
+
"""
|
|
333
|
+
self.timeout = timeout
|
|
334
|
+
self.proxies = proxies
|
|
335
|
+
|
|
336
|
+
# Initialize session with cloudscraper
|
|
337
|
+
self.session = cloudscraper.create_scraper()
|
|
338
|
+
|
|
339
|
+
# Set up headers
|
|
340
|
+
self.session.headers.update({
|
|
341
|
+
"user-agent": LitAgent().generate_fingerprint(browser=browser)["user_agent"],
|
|
342
|
+
"accept": "*/*",
|
|
343
|
+
"accept-language": "en-US",
|
|
344
|
+
"cache-control": "no-cache",
|
|
345
|
+
"connection": "keep-alive",
|
|
346
|
+
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
|
|
347
|
+
"origin": "https://toolbaz.com",
|
|
348
|
+
"pragma": "no-cache",
|
|
349
|
+
"referer": "https://toolbaz.com/",
|
|
350
|
+
"sec-fetch-mode": "cors"
|
|
351
|
+
})
|
|
352
|
+
|
|
353
|
+
# Initialize chat property
|
|
354
|
+
self.chat = Chat(self)
|
|
355
|
+
|
|
356
|
+
def random_string(self, length):
|
|
357
|
+
"""Generate a random string of specified length"""
|
|
358
|
+
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
|
|
359
|
+
|
|
360
|
+
def generate_token(self):
|
|
361
|
+
"""Generate authentication token for Toolbaz API"""
|
|
362
|
+
payload = {
|
|
363
|
+
"bR6wF": {
|
|
364
|
+
"nV5kP": self.session.headers.get("user-agent"),
|
|
365
|
+
"lQ9jX": "en-US",
|
|
366
|
+
"sD2zR": "431x958",
|
|
367
|
+
"tY4hL": time.tzname[0] if time.tzname else "UTC",
|
|
368
|
+
"pL8mC": "Linux armv81",
|
|
369
|
+
"cQ3vD": datetime.now().year,
|
|
370
|
+
"hK7jN": datetime.now().hour
|
|
371
|
+
},
|
|
372
|
+
"uT4bX": {
|
|
373
|
+
"mM9wZ": [],
|
|
374
|
+
"kP8jY": []
|
|
375
|
+
},
|
|
376
|
+
"tuTcS": int(time.time()),
|
|
377
|
+
"tDfxy": None,
|
|
378
|
+
"RtyJt": str(uuid.uuid4())
|
|
379
|
+
}
|
|
380
|
+
return "d8TW0v" + base64.b64encode(json.dumps(payload).encode()).decode()
|
|
381
|
+
|
|
382
|
+
def get_auth(self):
|
|
383
|
+
"""Get authentication credentials for Toolbaz API"""
|
|
384
|
+
try:
|
|
385
|
+
session_id = self.random_string(36)
|
|
386
|
+
token = self.generate_token()
|
|
387
|
+
data = {
|
|
388
|
+
"session_id": session_id,
|
|
389
|
+
"token": token
|
|
390
|
+
}
|
|
391
|
+
resp = self.session.post("https://data.toolbaz.com/token.php", data=data)
|
|
392
|
+
resp.raise_for_status()
|
|
393
|
+
result = resp.json()
|
|
394
|
+
if result.get("success"):
|
|
395
|
+
return {"token": result["token"], "session_id": session_id}
|
|
396
|
+
return None
|
|
397
|
+
except Exception as e:
|
|
398
|
+
print(f"{RED}Error getting Toolbaz authentication: {e}{RESET}")
|
|
399
|
+
return None
|
|
400
|
+
|
|
401
|
+
@property
|
|
402
|
+
def models(self):
|
|
403
|
+
class _ModelList:
|
|
404
|
+
def list(inner_self):
|
|
405
|
+
return type(self).AVAILABLE_MODELS
|
|
406
|
+
return _ModelList()
|
|
407
|
+
|
|
408
|
+
# Example usage
|
|
409
|
+
if __name__ == "__main__":
|
|
410
|
+
# Test the provider
|
|
411
|
+
client = Toolbaz()
|
|
412
|
+
response = client.chat.completions.create(
|
|
413
|
+
model="gemini-2.0-flash",
|
|
414
|
+
messages=[
|
|
415
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
416
|
+
{"role": "user", "content": "Hello! How are you today?"}
|
|
417
|
+
]
|
|
418
|
+
)
|
|
419
|
+
print(response.choices[0].message.content)
|