webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +7 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +3 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +3 -2
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/TogetherAI.py +2 -2
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -58
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +6 -6
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +1 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -11
- webscout/Provider/OPENAI/toolbaz.py +14 -11
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTS/__init__.py +18 -10
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -12
- webscout/Provider/TogetherAI.py +86 -87
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -86
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +115 -9
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -12
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
webscout/Provider/meta.py
CHANGED
|
@@ -3,7 +3,7 @@ import logging
|
|
|
3
3
|
import time
|
|
4
4
|
import urllib
|
|
5
5
|
import uuid
|
|
6
|
-
from typing import Dict, Generator,
|
|
6
|
+
from typing import Dict, Generator, List, Union
|
|
7
7
|
|
|
8
8
|
import random
|
|
9
9
|
from curl_cffi import CurlError
|
|
@@ -12,7 +12,7 @@ from webscout.scout import Scout
|
|
|
12
12
|
|
|
13
13
|
from webscout.AIutel import Optimizers
|
|
14
14
|
from webscout.AIutel import Conversation
|
|
15
|
-
from webscout.AIutel import AwesomePrompts
|
|
15
|
+
from webscout.AIutel import AwesomePrompts
|
|
16
16
|
from webscout.AIbase import Provider
|
|
17
17
|
from webscout import exceptions
|
|
18
18
|
from webscout.litagent import LitAgent as Lit
|
|
@@ -299,7 +299,7 @@ class Meta(Provider):
|
|
|
299
299
|
A class to interact with the Meta AI API to obtain and use access tokens for sending
|
|
300
300
|
and receiving messages from the Meta AI Chat API.
|
|
301
301
|
"""
|
|
302
|
-
|
|
302
|
+
required_auth = False
|
|
303
303
|
def __init__(
|
|
304
304
|
self,
|
|
305
305
|
fb_email: str = None,
|
webscout/Provider/oivscode.py
CHANGED
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
import secrets
|
|
2
2
|
import requests
|
|
3
|
-
import json
|
|
4
3
|
import random
|
|
5
4
|
import string
|
|
6
|
-
from typing import Union, Any, Dict,
|
|
5
|
+
from typing import Union, Any, Dict, Generator
|
|
7
6
|
|
|
8
7
|
from webscout.AIutel import Optimizers
|
|
9
8
|
from webscout.AIutel import Conversation
|
|
@@ -16,6 +15,7 @@ class oivscode(Provider):
|
|
|
16
15
|
"""
|
|
17
16
|
A class to interact with a test API.
|
|
18
17
|
"""
|
|
18
|
+
required_auth = False
|
|
19
19
|
AVAILABLE_MODELS = [
|
|
20
20
|
"*",
|
|
21
21
|
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
webscout/Provider/scira_chat.py
CHANGED
|
@@ -16,52 +16,18 @@ class SciraAI(Provider):
|
|
|
16
16
|
"""
|
|
17
17
|
A class to interact with the Scira AI chat API.
|
|
18
18
|
"""
|
|
19
|
-
|
|
19
|
+
required_auth = False
|
|
20
20
|
# Model mapping: actual model names to Scira API format
|
|
21
21
|
MODEL_MAPPING = {
|
|
22
22
|
"grok-3-mini": "scira-default",
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
"
|
|
26
|
-
"
|
|
27
|
-
"grok-4": "scira-grok-4",
|
|
28
|
-
"grok-2-vision-1212": "scira-vision",
|
|
29
|
-
"grok-2-latest": "scira-g2",
|
|
30
|
-
"gpt-4o-mini": "scira-4o-mini",
|
|
31
|
-
"o4-mini-2025-04-16": "scira-o4-mini",
|
|
32
|
-
"o3": "scira-o3",
|
|
33
|
-
"qwen/qwen3-32b": "scira-qwen-32b",
|
|
34
|
-
"qwen3-30b-a3b": "scira-qwen-30b",
|
|
35
|
-
"deepseek-v3-0324": "scira-deepseek-v3",
|
|
36
|
-
"claude-3-5-haiku-20241022": "scira-haiku",
|
|
37
|
-
"mistral-small-latest": "scira-mistral",
|
|
38
|
-
"gemini-2.5-flash-lite-preview-06-17": "scira-google-lite",
|
|
39
|
-
"gemini-2.5-flash": "scira-google",
|
|
40
|
-
"gemini-2.5-pro": "scira-google-pro",
|
|
41
|
-
"claude-sonnet-4-20250514": "scira-anthropic",
|
|
42
|
-
"claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
|
|
43
|
-
"claude-4-opus-20250514": "scira-opus",
|
|
44
|
-
"claude-4-opus-20250514-pro": "scira-opus-pro",
|
|
45
|
-
"meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
|
|
46
|
-
"kimi-k2-instruct": "scira-kimi-k2",
|
|
47
|
-
"scira-kimi-k2": "kimi-k2-instruct",
|
|
23
|
+
"llama-4-maverick": "scira-llama-4",
|
|
24
|
+
"qwen3-4b": "scira-qwen-4b",
|
|
25
|
+
"qwen3-32b": "scira-qwen-32b",
|
|
26
|
+
"qwen3-4b-thinking": "scira-qwen-4b-thinking",
|
|
48
27
|
}
|
|
49
28
|
|
|
50
29
|
# Reverse mapping: Scira format to actual model names
|
|
51
30
|
SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
|
|
52
|
-
# Add special cases for aliases and duplicate mappings
|
|
53
|
-
SCIRA_TO_MODEL["scira-anthropic-thinking"] = "claude-sonnet-4-20250514"
|
|
54
|
-
SCIRA_TO_MODEL["scira-opus-pro"] = "claude-4-opus-20250514"
|
|
55
|
-
SCIRA_TO_MODEL["scira-x-fast"] = "grok-3-fast"
|
|
56
|
-
SCIRA_TO_MODEL["scira-x-fast-mini"] = "grok-3-mini-fast"
|
|
57
|
-
SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
|
|
58
|
-
SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
|
|
59
|
-
SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
|
|
60
|
-
SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
|
|
61
|
-
SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
|
|
62
|
-
SCIRA_TO_MODEL["scira-kimi-k2"] = "kimi-k2-instruct"
|
|
63
|
-
SCIRA_TO_MODEL["kimi-k2-instruct"] = "scira-kimi-k2"
|
|
64
|
-
MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
|
|
65
31
|
# Available models list (actual model names + scira aliases)
|
|
66
32
|
AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
|
|
67
33
|
|
|
@@ -139,19 +105,21 @@ class SciraAI(Provider):
|
|
|
139
105
|
|
|
140
106
|
# Use the fingerprint for headers
|
|
141
107
|
self.headers = {
|
|
142
|
-
"Accept":
|
|
108
|
+
"Accept": "*/*",
|
|
143
109
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
144
|
-
"Accept-Language":
|
|
110
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
145
111
|
"Content-Type": "application/json",
|
|
146
112
|
"Origin": "https://scira.ai",
|
|
147
113
|
"Referer": "https://scira.ai/",
|
|
148
|
-
"Sec-CH-UA":
|
|
114
|
+
"Sec-CH-UA": '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
|
|
149
115
|
"Sec-CH-UA-Mobile": "?0",
|
|
150
|
-
"Sec-CH-UA-Platform":
|
|
151
|
-
"User-Agent":
|
|
116
|
+
"Sec-CH-UA-Platform": '"Windows"',
|
|
117
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0",
|
|
152
118
|
"Sec-Fetch-Dest": "empty",
|
|
153
119
|
"Sec-Fetch-Mode": "cors",
|
|
154
|
-
"Sec-Fetch-Site": "same-origin"
|
|
120
|
+
"Sec-Fetch-Site": "same-origin",
|
|
121
|
+
"DNT": "1",
|
|
122
|
+
"Priority": "u=1, i"
|
|
155
123
|
}
|
|
156
124
|
|
|
157
125
|
self.session = Session() # Use curl_cffi Session
|
|
@@ -196,13 +164,13 @@ class SciraAI(Provider):
|
|
|
196
164
|
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
197
165
|
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
198
166
|
|
|
199
|
-
# Update headers with new fingerprint
|
|
167
|
+
# Update headers with new fingerprint (keeping the updated values)
|
|
200
168
|
self.headers.update({
|
|
201
|
-
"Accept":
|
|
202
|
-
"Accept-Language":
|
|
203
|
-
"Sec-CH-UA":
|
|
204
|
-
"Sec-CH-UA-Platform":
|
|
205
|
-
"User-Agent":
|
|
169
|
+
"Accept": "*/*",
|
|
170
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
171
|
+
"Sec-CH-UA": '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
|
|
172
|
+
"Sec-CH-UA-Platform": '"Windows"',
|
|
173
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0",
|
|
206
174
|
})
|
|
207
175
|
|
|
208
176
|
# Update session headers
|
|
@@ -213,19 +181,18 @@ class SciraAI(Provider):
|
|
|
213
181
|
|
|
214
182
|
@staticmethod
|
|
215
183
|
def _scira_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[dict]:
|
|
216
|
-
"""Extracts
|
|
217
|
-
Returns a dict
|
|
184
|
+
"""Extracts JSON chunks from the Scira stream format.
|
|
185
|
+
Returns a dict with the parsed JSON data.
|
|
218
186
|
"""
|
|
219
187
|
if isinstance(chunk, str):
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
return result
|
|
188
|
+
if chunk.startswith("data: "):
|
|
189
|
+
json_str = chunk[6:].strip() # Remove "data: " prefix
|
|
190
|
+
if json_str == "[DONE]":
|
|
191
|
+
return {"type": "done"}
|
|
192
|
+
try:
|
|
193
|
+
return json.loads(json_str)
|
|
194
|
+
except json.JSONDecodeError:
|
|
195
|
+
return None
|
|
229
196
|
return None
|
|
230
197
|
|
|
231
198
|
def ask(
|
|
@@ -246,8 +213,7 @@ class SciraAI(Provider):
|
|
|
246
213
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
247
214
|
|
|
248
215
|
messages = [
|
|
249
|
-
{"role": "
|
|
250
|
-
{"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}]}
|
|
216
|
+
{"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}], "id": str(uuid.uuid4())[:16]}
|
|
251
217
|
]
|
|
252
218
|
|
|
253
219
|
# Prepare the request payload
|
|
@@ -257,7 +223,9 @@ class SciraAI(Provider):
|
|
|
257
223
|
"model": self.model,
|
|
258
224
|
"group": self.search_mode,
|
|
259
225
|
"user_id": self.user_id,
|
|
260
|
-
"timezone": "Asia/Calcutta"
|
|
226
|
+
"timezone": "Asia/Calcutta",
|
|
227
|
+
"isCustomInstructionsEnabled": False,
|
|
228
|
+
"searchProvider": "parallel"
|
|
261
229
|
}
|
|
262
230
|
|
|
263
231
|
def for_stream():
|
|
@@ -306,41 +274,37 @@ class SciraAI(Provider):
|
|
|
306
274
|
if content is None:
|
|
307
275
|
continue
|
|
308
276
|
if isinstance(content, dict):
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
zero_chunk = content.get("0")
|
|
312
|
-
if g_chunks:
|
|
277
|
+
event_type = content.get("type")
|
|
278
|
+
if event_type == "reasoning-start":
|
|
313
279
|
if not in_think:
|
|
314
280
|
if raw:
|
|
315
281
|
yield "<think>\n\n"
|
|
316
282
|
else:
|
|
317
283
|
yield "<think>\n\n"
|
|
318
284
|
in_think = True
|
|
319
|
-
|
|
285
|
+
elif event_type == "reasoning-delta":
|
|
286
|
+
if in_think:
|
|
287
|
+
delta = content.get("delta", "")
|
|
320
288
|
if raw:
|
|
321
|
-
yield
|
|
289
|
+
yield delta
|
|
322
290
|
else:
|
|
323
|
-
yield dict(text=
|
|
324
|
-
|
|
291
|
+
yield dict(text=delta)
|
|
292
|
+
elif event_type == "reasoning-end":
|
|
325
293
|
if in_think:
|
|
326
294
|
if raw:
|
|
327
295
|
yield "</think>\n\n"
|
|
328
296
|
else:
|
|
329
297
|
yield "</think>\n\n"
|
|
330
298
|
in_think = False
|
|
299
|
+
elif event_type == "text-delta":
|
|
300
|
+
delta = content.get("delta", "")
|
|
331
301
|
if raw:
|
|
332
|
-
yield
|
|
302
|
+
yield delta
|
|
333
303
|
else:
|
|
334
|
-
streaming_response +=
|
|
335
|
-
yield dict(text=
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
if raw:
|
|
339
|
-
yield content
|
|
340
|
-
else:
|
|
341
|
-
if content and isinstance(content, str):
|
|
342
|
-
streaming_response += content
|
|
343
|
-
yield dict(text=content)
|
|
304
|
+
streaming_response += delta
|
|
305
|
+
yield dict(text=delta)
|
|
306
|
+
elif event_type == "done":
|
|
307
|
+
break # End of stream
|
|
344
308
|
if not raw:
|
|
345
309
|
self.last_response = {"text": streaming_response}
|
|
346
310
|
self.conversation.update_chat_history(prompt, streaming_response)
|
|
@@ -415,43 +379,6 @@ class SciraAI(Provider):
|
|
|
415
379
|
return response.get("text", "")
|
|
416
380
|
|
|
417
381
|
if __name__ == "__main__":
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
# Test all available models
|
|
423
|
-
working = 0
|
|
424
|
-
total = len(SciraAI.AVAILABLE_MODELS)
|
|
425
|
-
|
|
426
|
-
for model in SciraAI.AVAILABLE_MODELS:
|
|
427
|
-
try:
|
|
428
|
-
test_ai = SciraAI(model=model, timeout=60)
|
|
429
|
-
# Test stream first
|
|
430
|
-
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
431
|
-
response_text = ""
|
|
432
|
-
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
433
|
-
for chunk in response_stream:
|
|
434
|
-
response_text += chunk
|
|
435
|
-
# Optional: print chunks as they arrive for visual feedback
|
|
436
|
-
# print(chunk, end="", flush=True)
|
|
437
|
-
|
|
438
|
-
if response_text and len(response_text.strip()) > 0:
|
|
439
|
-
status = "✓"
|
|
440
|
-
# Clean and truncate response
|
|
441
|
-
clean_text = response_text.strip() # Already decoded in get_message
|
|
442
|
-
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
443
|
-
else:
|
|
444
|
-
status = "✗ (Stream)"
|
|
445
|
-
display_text = "Empty or invalid stream response"
|
|
446
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
447
|
-
|
|
448
|
-
# Optional: Add non-stream test if needed, but stream test covers basic functionality
|
|
449
|
-
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
450
|
-
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
451
|
-
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
452
|
-
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
except Exception as e:
|
|
456
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
457
|
-
|
|
382
|
+
ai = SciraAI(model="grok-3-mini", is_conversation=True, system_prompt="You are a helpful assistant.")
|
|
383
|
+
for resp in ai.chat("Explain the theory of relativity in simple terms.", stream=True, raw=False):
|
|
384
|
+
print(resp, end="", flush=True)
|
webscout/Provider/searchchat.py
CHANGED
webscout/Provider/sonus.py
CHANGED
webscout/Provider/toolbaz.py
CHANGED
|
@@ -21,30 +21,33 @@ class Toolbaz(Provider):
|
|
|
21
21
|
A class to interact with the Toolbaz API. Supports streaming responses.
|
|
22
22
|
"""
|
|
23
23
|
|
|
24
|
+
required_auth = False
|
|
24
25
|
AVAILABLE_MODELS = [
|
|
25
26
|
"gemini-2.5-flash",
|
|
27
|
+
"gemini-2.5-pro",
|
|
26
28
|
"gemini-2.0-flash-thinking",
|
|
27
|
-
"sonar",
|
|
28
29
|
"gemini-2.0-flash",
|
|
29
|
-
|
|
30
|
+
|
|
31
|
+
"claude-sonnet-4",
|
|
32
|
+
|
|
33
|
+
"gpt-5",
|
|
34
|
+
"gpt-oss-120b",
|
|
30
35
|
"o3-mini",
|
|
31
36
|
"gpt-4o-latest",
|
|
32
|
-
|
|
37
|
+
|
|
38
|
+
"toolbaz_v4",
|
|
39
|
+
"toolbaz_v3.5_pro",
|
|
40
|
+
|
|
33
41
|
"deepseek-r1",
|
|
42
|
+
"deepseek-v3.1",
|
|
43
|
+
"deepseek-v3",
|
|
44
|
+
|
|
34
45
|
"Llama-4-Maverick",
|
|
35
|
-
"Llama-4-Scout",
|
|
36
46
|
"Llama-3.3-70B",
|
|
37
|
-
|
|
38
|
-
"Qwen2.5-72B",
|
|
39
|
-
"grok-2-1212",
|
|
40
|
-
"grok-3-beta",
|
|
41
|
-
"toolbaz_v3",
|
|
42
|
-
"toolbaz_v3.5_pro",
|
|
43
|
-
"toolbaz_v4",
|
|
47
|
+
|
|
44
48
|
"mixtral_8x22b",
|
|
45
49
|
"L3-70B-Euryale-v2.1",
|
|
46
50
|
"midnight-rose",
|
|
47
|
-
"unity",
|
|
48
51
|
"unfiltered_x"
|
|
49
52
|
]
|
|
50
53
|
|
webscout/Provider/turboseek.py
CHANGED
|
@@ -1,19 +1,21 @@
|
|
|
1
|
+
|
|
2
|
+
import re
|
|
3
|
+
from typing import Optional, Union, Any, AsyncGenerator, Dict
|
|
1
4
|
from curl_cffi.requests import Session
|
|
2
5
|
from curl_cffi import CurlError
|
|
3
|
-
import json
|
|
4
6
|
|
|
5
7
|
from webscout.AIutel import Optimizers
|
|
6
8
|
from webscout.AIutel import Conversation
|
|
7
9
|
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
10
|
from webscout.AIbase import Provider
|
|
9
11
|
from webscout import exceptions
|
|
10
|
-
from typing import Optional, Union, Any, AsyncGenerator, Dict
|
|
11
12
|
from webscout.litagent import LitAgent
|
|
12
13
|
|
|
13
14
|
class TurboSeek(Provider):
|
|
14
15
|
"""
|
|
15
16
|
This class provides methods for interacting with the TurboSeek API.
|
|
16
17
|
"""
|
|
18
|
+
required_auth = False
|
|
17
19
|
AVAILABLE_MODELS = ["Llama 3.1 70B"]
|
|
18
20
|
|
|
19
21
|
def __init__(
|
|
@@ -58,13 +60,14 @@ class TurboSeek(Provider):
|
|
|
58
60
|
"dnt": "1",
|
|
59
61
|
"origin": "https://www.turboseek.io",
|
|
60
62
|
"priority": "u=1, i",
|
|
61
|
-
"referer": "https://www.turboseek.io
|
|
62
|
-
"sec-ch-ua": '"
|
|
63
|
+
"referer": "https://www.turboseek.io/",
|
|
64
|
+
"sec-ch-ua": '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
|
|
63
65
|
"sec-ch-ua-mobile": "?0",
|
|
64
66
|
"sec-ch-ua-platform": '"Windows"',
|
|
65
67
|
"sec-fetch-dest": "empty",
|
|
66
68
|
"sec-fetch-mode": "cors",
|
|
67
69
|
"sec-fetch-site": "same-origin",
|
|
70
|
+
"sec-gpc": "1",
|
|
68
71
|
"user-agent": LitAgent().random(),
|
|
69
72
|
}
|
|
70
73
|
|
|
@@ -88,11 +91,27 @@ class TurboSeek(Provider):
|
|
|
88
91
|
)
|
|
89
92
|
self.conversation.history_offset = history_offset
|
|
90
93
|
|
|
94
|
+
@staticmethod
|
|
95
|
+
def _strip_html_tags(text: str) -> str:
|
|
96
|
+
"""Remove HTML tags from text."""
|
|
97
|
+
import re
|
|
98
|
+
# Remove HTML tags and entities
|
|
99
|
+
text = re.sub(r'<[^>]*>', '', text)
|
|
100
|
+
text = re.sub(r'&[^;]+;', ' ', text)
|
|
101
|
+
text = re.sub(r'\s+', ' ', text).strip()
|
|
102
|
+
return text
|
|
103
|
+
|
|
91
104
|
@staticmethod
|
|
92
105
|
def _turboseek_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
93
106
|
"""Extracts content from TurboSeek stream JSON objects."""
|
|
94
107
|
if isinstance(chunk, dict) and "text" in chunk:
|
|
95
|
-
|
|
108
|
+
text = chunk.get("text")
|
|
109
|
+
if text:
|
|
110
|
+
# Clean HTML tags from the response
|
|
111
|
+
return TurboSeek._strip_html_tags(str(text))
|
|
112
|
+
elif isinstance(chunk, str):
|
|
113
|
+
# Handle raw string content
|
|
114
|
+
return TurboSeek._strip_html_tags(chunk)
|
|
96
115
|
return None
|
|
97
116
|
|
|
98
117
|
def ask(
|
|
@@ -155,7 +174,9 @@ class TurboSeek(Provider):
|
|
|
155
174
|
to_json=True,
|
|
156
175
|
content_extractor=self._turboseek_extractor,
|
|
157
176
|
yield_raw_on_error=False,
|
|
158
|
-
raw=raw
|
|
177
|
+
raw=raw,
|
|
178
|
+
extract_regexes=[r'<[^>]*>([^<]*)<[^>]*>', r'([^<]+)'],
|
|
179
|
+
skip_regexes=[r'<script[^>]*>.*?</script>', r'<style[^>]*>.*?</style>']
|
|
159
180
|
)
|
|
160
181
|
for content_chunk in processed_stream:
|
|
161
182
|
if isinstance(content_chunk, bytes):
|
|
@@ -247,19 +268,7 @@ class TurboSeek(Provider):
|
|
|
247
268
|
if __name__ == '__main__':
|
|
248
269
|
# Ensure curl_cffi is installed
|
|
249
270
|
from rich import print
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
for chunk in response_stream:
|
|
255
|
-
print(chunk, end="", flush=True)
|
|
256
|
-
# Optional: Test non-stream
|
|
257
|
-
# print("[bold blue]Testing Non-Stream:[/bold blue]")
|
|
258
|
-
# response_non_stream = ai.chat("What is the capital of France?", stream=False)
|
|
259
|
-
# print(response_non_stream)
|
|
260
|
-
# print("[bold green]Non-Stream Test Complete.[/bold green]")
|
|
261
|
-
|
|
262
|
-
except exceptions.FailedToGenerateResponseError as e:
|
|
263
|
-
print(f"\n[bold red]API Error:[/bold red] {e}")
|
|
264
|
-
except Exception as e:
|
|
265
|
-
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|
|
271
|
+
ai = TurboSeek(timeout=60)
|
|
272
|
+
response_stream = ai.chat("How can I get a 6 pack in 3 months?", stream=True, raw=False)
|
|
273
|
+
for chunk in response_stream:
|
|
274
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/typefully.py
CHANGED
|
@@ -12,6 +12,7 @@ from curl_cffi.requests import Session
|
|
|
12
12
|
from curl_cffi import CurlError
|
|
13
13
|
|
|
14
14
|
class TypefullyAI(Provider):
|
|
15
|
+
required_auth = False
|
|
15
16
|
AVAILABLE_MODELS = ["openai:gpt-4o-mini", "openai:gpt-4o", "anthropic:claude-3-5-haiku-20241022", "groq:llama-3.3-70b-versatile"]
|
|
16
17
|
|
|
17
18
|
def __init__(
|
|
@@ -204,4 +205,4 @@ if __name__ == "__main__":
|
|
|
204
205
|
display_text = "Empty or invalid stream response"
|
|
205
206
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
206
207
|
except Exception as e:
|
|
207
|
-
print(f"\r{model:<50} {'FAIL':<10} {str(e)}")
|
|
208
|
+
print(f"\r{model:<50} {'FAIL':<10} {str(e)}")
|
webscout/Provider/x0gpt.py
CHANGED
webscout/Provider/yep.py
CHANGED
|
@@ -21,6 +21,7 @@ class YEPCHAT(Provider):
|
|
|
21
21
|
AVAILABLE_MODELS (list): List of available models for the provider.
|
|
22
22
|
"""
|
|
23
23
|
|
|
24
|
+
required_auth = False
|
|
24
25
|
AVAILABLE_MODELS = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
|
|
25
26
|
|
|
26
27
|
def __init__(
|
|
@@ -366,7 +367,7 @@ if __name__ == "__main__":
|
|
|
366
367
|
# except Exception as e:
|
|
367
368
|
# print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
368
369
|
ai = YEPCHAT(model="DeepSeek-R1-Distill-Qwen-32B", timeout=60)
|
|
369
|
-
response = ai.chat("Say 'Hello' in one word", raw=
|
|
370
|
+
response = ai.chat("Say 'Hello' in one word", raw=False, stream=True)
|
|
370
371
|
for chunk in response:
|
|
371
372
|
|
|
372
373
|
print(chunk, end='', flush=True)
|
webscout/tempid.py
CHANGED
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "8.3.
|
|
1
|
+
__version__ = "8.3.7"
|
|
2
2
|
__prog__ = "webscout"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 8.3.
|
|
3
|
+
Version: 8.3.7
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
|
|
5
5
|
Author-email: OEvortex <helpingai5@gmail.com>
|
|
6
6
|
License: HelpingAI
|
|
@@ -74,6 +74,7 @@ Requires-Dist: motor; extra == "api"
|
|
|
74
74
|
Requires-Dist: jinja2; extra == "api"
|
|
75
75
|
Requires-Dist: supabase; extra == "api"
|
|
76
76
|
Requires-Dist: websockets>=11.0; extra == "api"
|
|
77
|
+
Requires-Dist: starlette; extra == "api"
|
|
77
78
|
Dynamic: license-file
|
|
78
79
|
|
|
79
80
|
<div align="center">
|