webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/meta.py
CHANGED
|
@@ -6,7 +6,8 @@ import uuid
|
|
|
6
6
|
from typing import Dict, Generator, Iterator, List, Union
|
|
7
7
|
|
|
8
8
|
import random
|
|
9
|
-
import
|
|
9
|
+
from curl_cffi import CurlError
|
|
10
|
+
from curl_cffi.requests import Session
|
|
10
11
|
from webscout.scout import Scout
|
|
11
12
|
|
|
12
13
|
from webscout.AIutel import Optimizers
|
|
@@ -105,12 +106,17 @@ def get_fb_session(email, password, proxies=None):
|
|
|
105
106
|
"upgrade-insecure-requests": "1",
|
|
106
107
|
"user-agent": Lit().random(),
|
|
107
108
|
}
|
|
109
|
+
# Create a session
|
|
110
|
+
session = Session()
|
|
111
|
+
if proxies:
|
|
112
|
+
session.proxies = proxies
|
|
113
|
+
|
|
108
114
|
# Send the GET request
|
|
109
|
-
response =
|
|
110
|
-
|
|
115
|
+
response = session.get(login_url, headers=headers)
|
|
116
|
+
|
|
111
117
|
# Use Scout for parsing instead of BeautifulSoup
|
|
112
118
|
scout = Scout(response.text)
|
|
113
|
-
|
|
119
|
+
|
|
114
120
|
# Parse necessary parameters from the login form
|
|
115
121
|
lsd = scout.find_first('input[name="lsd"]').get('value')
|
|
116
122
|
jazoest = scout.find_first('input[name="jazoest"]').get('value')
|
|
@@ -151,9 +157,6 @@ def get_fb_session(email, password, proxies=None):
|
|
|
151
157
|
}
|
|
152
158
|
|
|
153
159
|
# Send the POST request
|
|
154
|
-
session = requests.session()
|
|
155
|
-
session.proxies = proxies
|
|
156
|
-
|
|
157
160
|
result = session.post(post_url, headers=headers, data=data)
|
|
158
161
|
if "sb" not in session.cookies:
|
|
159
162
|
raise exceptions.FacebookInvalidCredentialsException(
|
|
@@ -195,7 +198,12 @@ def get_fb_session(email, password, proxies=None):
|
|
|
195
198
|
"viewport-width": "1728",
|
|
196
199
|
}
|
|
197
200
|
|
|
198
|
-
|
|
201
|
+
# Create a new session for this request
|
|
202
|
+
req_session = Session()
|
|
203
|
+
if proxies:
|
|
204
|
+
req_session.proxies = proxies
|
|
205
|
+
|
|
206
|
+
response = req_session.post(url, headers=headers, data=payload)
|
|
199
207
|
|
|
200
208
|
state = extract_value(response.text, start_str='"state":"', end_str='"')
|
|
201
209
|
|
|
@@ -214,9 +222,13 @@ def get_fb_session(email, password, proxies=None):
|
|
|
214
222
|
"upgrade-insecure-requests": "1",
|
|
215
223
|
"user-agent": Lit().random(),
|
|
216
224
|
}
|
|
217
|
-
|
|
218
|
-
session
|
|
219
|
-
|
|
225
|
+
|
|
226
|
+
# Create a new session for Facebook
|
|
227
|
+
fb_session = Session()
|
|
228
|
+
if proxies:
|
|
229
|
+
fb_session.proxies = proxies
|
|
230
|
+
|
|
231
|
+
response = fb_session.get(url, headers=headers, data=payload, allow_redirects=False)
|
|
220
232
|
|
|
221
233
|
next_url = response.headers["Location"]
|
|
222
234
|
|
|
@@ -238,8 +250,8 @@ def get_fb_session(email, password, proxies=None):
|
|
|
238
250
|
"Sec-Fetch-User": "?1",
|
|
239
251
|
"TE": "trailers",
|
|
240
252
|
}
|
|
241
|
-
|
|
242
|
-
cookies =
|
|
253
|
+
fb_session.get(url, headers=headers, data=payload)
|
|
254
|
+
cookies = fb_session.cookies.get_dict()
|
|
243
255
|
if "abra_sess" not in cookies:
|
|
244
256
|
raise exceptions.FacebookInvalidCredentialsException(
|
|
245
257
|
"Was not able to login to Facebook. Please check your credentials. "
|
|
@@ -249,24 +261,20 @@ def get_fb_session(email, password, proxies=None):
|
|
|
249
261
|
return cookies
|
|
250
262
|
|
|
251
263
|
|
|
252
|
-
def get_cookies(
|
|
264
|
+
def get_cookies() -> dict:
|
|
253
265
|
"""
|
|
254
266
|
Extracts necessary cookies from the Meta AI main page.
|
|
255
267
|
|
|
256
268
|
Returns:
|
|
257
269
|
dict: A dictionary containing essential cookies.
|
|
258
270
|
"""
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
response = requests.get(
|
|
271
|
+
# Create a session
|
|
272
|
+
session = Session()
|
|
273
|
+
|
|
274
|
+
response = session.get(
|
|
265
275
|
"https://www.meta.ai/",
|
|
266
|
-
headers=headers,
|
|
267
|
-
proxies=self.proxy,
|
|
268
276
|
)
|
|
269
|
-
|
|
277
|
+
|
|
270
278
|
cookies = {
|
|
271
279
|
"_js_datr": extract_value(
|
|
272
280
|
response.text, start_str='_js_datr":{"value":"', end_str='",'
|
|
@@ -280,14 +288,10 @@ def get_cookies(self) -> dict:
|
|
|
280
288
|
"fb_dtsg": extract_value(
|
|
281
289
|
response.text, start_str='DTSGInitData",[],{"token":"', end_str='"'
|
|
282
290
|
),
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
if len(headers) > 0:
|
|
286
|
-
cookies["abra_sess"] = fb_session["abra_sess"]
|
|
287
|
-
else:
|
|
288
|
-
cookies["abra_csrf"] = extract_value(
|
|
291
|
+
"abra_csrf": extract_value(
|
|
289
292
|
response.text, start_str='abra_csrf":{"value":"', end_str='",'
|
|
290
293
|
)
|
|
294
|
+
}
|
|
291
295
|
return cookies
|
|
292
296
|
|
|
293
297
|
class Meta(Provider):
|
|
@@ -328,7 +332,7 @@ class Meta(Provider):
|
|
|
328
332
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
329
333
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
330
334
|
"""
|
|
331
|
-
self.session =
|
|
335
|
+
self.session = Session()
|
|
332
336
|
self.session.headers.update(
|
|
333
337
|
{
|
|
334
338
|
"user-agent": Lit().random(),
|
|
@@ -385,7 +389,7 @@ class Meta(Provider):
|
|
|
385
389
|
self.session.proxies = self.proxy
|
|
386
390
|
return True
|
|
387
391
|
return False
|
|
388
|
-
except
|
|
392
|
+
except CurlError:
|
|
389
393
|
return False
|
|
390
394
|
|
|
391
395
|
def get_access_token(self) -> str:
|
|
@@ -513,7 +517,7 @@ class Meta(Provider):
|
|
|
513
517
|
if self.is_authed:
|
|
514
518
|
headers["cookie"] = f'abra_sess={self.cookies["abra_sess"]}'
|
|
515
519
|
# Recreate the session to avoid cookie leakage when user is authenticated
|
|
516
|
-
self.session =
|
|
520
|
+
self.session = Session()
|
|
517
521
|
self.session.proxies = self.proxy
|
|
518
522
|
|
|
519
523
|
if stream:
|
|
@@ -700,13 +704,13 @@ class Meta(Provider):
|
|
|
700
704
|
if self.fb_email is not None and self.fb_password is not None:
|
|
701
705
|
fb_session = get_fb_session(self.fb_email, self.fb_password, self.proxy)
|
|
702
706
|
headers = {"cookie": f"abra_sess={fb_session['abra_sess']}"}
|
|
703
|
-
|
|
704
|
-
response =
|
|
705
|
-
"https://www.meta.ai/",
|
|
707
|
+
|
|
708
|
+
response = self.session.get(
|
|
709
|
+
url="https://www.meta.ai/",
|
|
706
710
|
headers=headers,
|
|
707
711
|
proxies=self.proxy,
|
|
708
712
|
)
|
|
709
|
-
|
|
713
|
+
|
|
710
714
|
cookies = {
|
|
711
715
|
"_js_datr": extract_value(
|
|
712
716
|
response.text, start_str='_js_datr":{"value":"', end_str='",'
|
|
@@ -786,7 +790,7 @@ class Meta(Provider):
|
|
|
786
790
|
"""
|
|
787
791
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
788
792
|
return response["message"]
|
|
789
|
-
|
|
793
|
+
|
|
790
794
|
if __name__ == "__main__":
|
|
791
795
|
Meta = Meta()
|
|
792
796
|
ai = Meta.chat("hi")
|
webscout/Provider/multichat.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
import uuid
|
|
4
5
|
from typing import Any, Dict, Union
|
|
5
6
|
from datetime import datetime
|
|
6
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
7
8
|
from webscout.AIbase import Provider
|
|
8
9
|
from webscout import exceptions
|
|
9
10
|
from webscout.litagent import LitAgent
|
|
@@ -108,7 +109,7 @@ class MultiChatAI(Provider):
|
|
|
108
109
|
def __init__(
|
|
109
110
|
self,
|
|
110
111
|
is_conversation: bool = True,
|
|
111
|
-
max_tokens: int = 4000,
|
|
112
|
+
max_tokens: int = 4000, # Note: max_tokens is not directly used by this API
|
|
112
113
|
timeout: int = 30,
|
|
113
114
|
intro: str = None,
|
|
114
115
|
filepath: str = None,
|
|
@@ -119,14 +120,15 @@ class MultiChatAI(Provider):
|
|
|
119
120
|
model: str = "llama-3.3-70b-versatile",
|
|
120
121
|
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
121
122
|
temperature: float = 0.5,
|
|
122
|
-
presence_penalty: int = 0,
|
|
123
|
-
frequency_penalty: int = 0,
|
|
124
|
-
top_p: float = 1
|
|
123
|
+
presence_penalty: int = 0, # Note: presence_penalty is not used by this API
|
|
124
|
+
frequency_penalty: int = 0, # Note: frequency_penalty is not used by this API
|
|
125
|
+
top_p: float = 1 # Note: top_p is not used by this API
|
|
125
126
|
):
|
|
126
127
|
"""Initializes the MultiChatAI API client."""
|
|
127
128
|
if model not in self.AVAILABLE_MODELS:
|
|
128
129
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
129
|
-
|
|
130
|
+
# Initialize curl_cffi Session
|
|
131
|
+
self.session = Session()
|
|
130
132
|
self.is_conversation = is_conversation
|
|
131
133
|
self.max_tokens_to_sample = max_tokens
|
|
132
134
|
self.timeout = timeout
|
|
@@ -138,21 +140,24 @@ class MultiChatAI(Provider):
|
|
|
138
140
|
self.frequency_penalty = frequency_penalty
|
|
139
141
|
self.top_p = top_p
|
|
140
142
|
|
|
141
|
-
# Initialize LitAgent for user agent generation
|
|
143
|
+
# Initialize LitAgent for user agent generation (keep if needed for other headers)
|
|
142
144
|
self.agent = LitAgent()
|
|
143
145
|
|
|
144
146
|
self.headers = {
|
|
145
147
|
"accept": "*/*",
|
|
146
148
|
"accept-language": "en-US,en;q=0.9",
|
|
147
|
-
"content-type": "text/plain;charset=UTF-8",
|
|
149
|
+
"content-type": "text/plain;charset=UTF-8", # Keep content-type
|
|
148
150
|
"origin": "https://www.multichatai.com",
|
|
149
151
|
"referer": "https://www.multichatai.com/",
|
|
150
152
|
"user-agent": self.agent.random(),
|
|
153
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
151
154
|
}
|
|
152
155
|
|
|
156
|
+
# Update curl_cffi session headers, proxies, and cookies
|
|
153
157
|
self.session.headers.update(self.headers)
|
|
154
|
-
self.session.proxies = proxies
|
|
155
|
-
|
|
158
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
159
|
+
# Set cookies on the session object for curl_cffi
|
|
160
|
+
self.session.cookies.set("session", uuid.uuid4().hex)
|
|
156
161
|
|
|
157
162
|
self.__available_optimizers = (
|
|
158
163
|
method for method in dir(Optimizers)
|
|
@@ -225,34 +230,41 @@ class MultiChatAI(Provider):
|
|
|
225
230
|
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
226
231
|
raise ValueError(error_msg)
|
|
227
232
|
|
|
228
|
-
def _make_request(self, payload: Dict[str, Any]) ->
|
|
233
|
+
def _make_request(self, payload: Dict[str, Any]) -> Any:
|
|
229
234
|
"""Make the API request with proper error handling."""
|
|
230
235
|
try:
|
|
236
|
+
# Use curl_cffi session post with impersonate
|
|
237
|
+
# Cookies are handled by the session
|
|
231
238
|
response = self.session.post(
|
|
232
239
|
self._get_endpoint(),
|
|
233
|
-
headers
|
|
240
|
+
# headers are set on the session
|
|
234
241
|
json=payload,
|
|
235
242
|
timeout=self.timeout,
|
|
243
|
+
# proxies are set on the session
|
|
244
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
236
245
|
)
|
|
237
|
-
response.raise_for_status()
|
|
246
|
+
response.raise_for_status() # Check for HTTP errors
|
|
238
247
|
return response
|
|
239
|
-
except
|
|
240
|
-
raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
|
|
248
|
+
except CurlError as e: # Catch CurlError
|
|
249
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed (CurlError): {e}") from e
|
|
250
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
251
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
252
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
241
253
|
|
|
242
254
|
def ask(
|
|
243
255
|
self,
|
|
244
256
|
prompt: str,
|
|
245
|
-
raw: bool = False,
|
|
257
|
+
raw: bool = False, # Keep raw param for interface consistency
|
|
246
258
|
optimizer: str = None,
|
|
247
259
|
conversationally: bool = False,
|
|
260
|
+
# Add stream parameter for consistency, though API doesn't stream
|
|
261
|
+
stream: bool = False
|
|
248
262
|
) -> Dict[str, Any]:
|
|
249
263
|
"""Sends a prompt to the MultiChatAI API and returns the response."""
|
|
250
264
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
251
265
|
if optimizer:
|
|
252
266
|
if optimizer in self.__available_optimizers:
|
|
253
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
254
|
-
conversation_prompt if conversationally else prompt
|
|
255
|
-
)
|
|
267
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
256
268
|
else:
|
|
257
269
|
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
258
270
|
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
@@ -263,26 +275,53 @@ class MultiChatAI(Provider):
|
|
|
263
275
|
"customModelId": "",
|
|
264
276
|
}
|
|
265
277
|
|
|
278
|
+
# API does not stream, implement non-stream logic directly
|
|
266
279
|
response = self._make_request(payload)
|
|
267
280
|
try:
|
|
268
|
-
|
|
269
|
-
|
|
281
|
+
# Use response.text which is already decoded
|
|
282
|
+
response_text_raw = response.text # Get raw text
|
|
283
|
+
|
|
284
|
+
# Process the text using sanitize_stream (even though it's not streaming)
|
|
285
|
+
processed_stream = sanitize_stream(
|
|
286
|
+
data=response_text_raw,
|
|
287
|
+
intro_value=None, # No prefix
|
|
288
|
+
to_json=False # It's plain text
|
|
289
|
+
)
|
|
290
|
+
# Aggregate the single result
|
|
291
|
+
full_response = "".join(list(processed_stream)).strip()
|
|
292
|
+
|
|
293
|
+
self.last_response = {"text": full_response} # Store processed text
|
|
270
294
|
self.conversation.update_chat_history(prompt, full_response)
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
295
|
+
# Return dict or raw string based on raw flag
|
|
296
|
+
return full_response if raw else self.last_response
|
|
297
|
+
except Exception as e: # Catch potential errors during text processing
|
|
298
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to process response: {e}") from e
|
|
274
299
|
|
|
275
300
|
def chat(
|
|
276
301
|
self,
|
|
277
302
|
prompt: str,
|
|
278
303
|
optimizer: str = None,
|
|
279
304
|
conversationally: bool = False,
|
|
305
|
+
# Add stream parameter for consistency
|
|
306
|
+
stream: bool = False
|
|
280
307
|
) -> str:
|
|
281
308
|
"""Generate response."""
|
|
282
|
-
|
|
283
|
-
|
|
309
|
+
# Since ask() now handles both stream=True/False by returning the full response dict/str:
|
|
310
|
+
response_data = self.ask(
|
|
311
|
+
prompt,
|
|
312
|
+
stream=False, # Call ask in non-stream mode internally
|
|
313
|
+
raw=False, # Ensure ask returns dict
|
|
314
|
+
optimizer=optimizer,
|
|
315
|
+
conversationally=conversationally
|
|
284
316
|
)
|
|
285
|
-
|
|
317
|
+
# If stream=True was requested, simulate streaming by yielding the full message at once
|
|
318
|
+
if stream:
|
|
319
|
+
def stream_wrapper():
|
|
320
|
+
yield self.get_message(response_data)
|
|
321
|
+
return stream_wrapper()
|
|
322
|
+
else:
|
|
323
|
+
# If stream=False, return the full message directly
|
|
324
|
+
return self.get_message(response_data)
|
|
286
325
|
|
|
287
326
|
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
288
327
|
"""
|
webscout/Provider/scira_chat.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
from os import system
|
|
2
|
-
import
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
from curl_cffi.requests import Session
|
|
3
4
|
import json
|
|
4
5
|
import uuid
|
|
5
6
|
import re
|
|
6
|
-
from typing import Any, Dict, Optional, Union
|
|
7
|
+
from typing import Any, Dict, Optional, Union, List
|
|
7
8
|
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
9
10
|
from webscout.AIutel import AwesomePrompts
|
|
10
11
|
from webscout.AIbase import Provider
|
|
11
12
|
from webscout import exceptions
|
|
@@ -17,15 +18,13 @@ class SciraAI(Provider):
|
|
|
17
18
|
"""
|
|
18
19
|
|
|
19
20
|
AVAILABLE_MODELS = {
|
|
20
|
-
"scira-default": "Grok3",
|
|
21
|
-
"scira-grok-3
|
|
21
|
+
"scira-default": "Grok3-mini", # thinking model
|
|
22
|
+
"scira-grok-3": "Grok3",
|
|
22
23
|
"scira-vision" : "Grok2-Vision", # vision model
|
|
23
24
|
"scira-4.1-mini": "GPT4.1-mini",
|
|
24
25
|
"scira-qwq": "QWQ-32B",
|
|
25
26
|
"scira-o4-mini": "o4-mini",
|
|
26
27
|
"scira-google": "gemini 2.5 flash"
|
|
27
|
-
|
|
28
|
-
|
|
29
28
|
}
|
|
30
29
|
|
|
31
30
|
def __init__(
|
|
@@ -92,9 +91,9 @@ class SciraAI(Provider):
|
|
|
92
91
|
"Sec-Fetch-Site": "same-origin"
|
|
93
92
|
}
|
|
94
93
|
|
|
95
|
-
self.session =
|
|
94
|
+
self.session = Session() # Use curl_cffi Session
|
|
96
95
|
self.session.headers.update(self.headers)
|
|
97
|
-
self.session.proxies
|
|
96
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
98
97
|
|
|
99
98
|
self.is_conversation = is_conversation
|
|
100
99
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -150,12 +149,23 @@ class SciraAI(Provider):
|
|
|
150
149
|
|
|
151
150
|
return self.fingerprint
|
|
152
151
|
|
|
152
|
+
@staticmethod
|
|
153
|
+
def _scira_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
154
|
+
"""Extracts content from the Scira stream format '0:"..."'."""
|
|
155
|
+
if isinstance(chunk, str):
|
|
156
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
157
|
+
if match:
|
|
158
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
159
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
160
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
161
|
+
return None
|
|
162
|
+
|
|
153
163
|
def ask(
|
|
154
164
|
self,
|
|
155
165
|
prompt: str,
|
|
156
166
|
optimizer: str = None,
|
|
157
167
|
conversationally: bool = False,
|
|
158
|
-
) -> Dict[str, Any]:
|
|
168
|
+
) -> Dict[str, Any]: # Note: Stream parameter removed as API doesn't seem to support it
|
|
159
169
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
160
170
|
if optimizer:
|
|
161
171
|
if optimizer in self.__available_optimizers:
|
|
@@ -181,10 +191,16 @@ class SciraAI(Provider):
|
|
|
181
191
|
}
|
|
182
192
|
|
|
183
193
|
try:
|
|
184
|
-
|
|
194
|
+
# Use curl_cffi post with impersonate
|
|
195
|
+
response = self.session.post(
|
|
196
|
+
self.url,
|
|
197
|
+
json=payload,
|
|
198
|
+
timeout=self.timeout,
|
|
199
|
+
impersonate="chrome120" # Add impersonate
|
|
200
|
+
)
|
|
185
201
|
if response.status_code != 200:
|
|
186
202
|
# Try to get response content for better error messages
|
|
187
|
-
try:
|
|
203
|
+
try: # Use try-except for reading response content
|
|
188
204
|
error_content = response.text
|
|
189
205
|
except:
|
|
190
206
|
error_content = "<could not read response content>"
|
|
@@ -192,7 +208,10 @@ class SciraAI(Provider):
|
|
|
192
208
|
if response.status_code in [403, 429]:
|
|
193
209
|
print(f"Received status code {response.status_code}, refreshing identity...")
|
|
194
210
|
self.refresh_identity()
|
|
195
|
-
response = self.session.post(
|
|
211
|
+
response = self.session.post(
|
|
212
|
+
self.url, json=payload, timeout=self.timeout,
|
|
213
|
+
impersonate="chrome120" # Add impersonate to retry
|
|
214
|
+
)
|
|
196
215
|
if not response.ok:
|
|
197
216
|
raise exceptions.FailedToGenerateResponseError(
|
|
198
217
|
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
|
|
@@ -203,28 +222,27 @@ class SciraAI(Provider):
|
|
|
203
222
|
f"Request failed with status code {response.status_code}. Response: {error_content}"
|
|
204
223
|
)
|
|
205
224
|
|
|
206
|
-
|
|
207
|
-
debug_lines = []
|
|
208
|
-
|
|
209
|
-
# Collect the first few lines for debugging
|
|
210
|
-
for i, line in enumerate(response.iter_lines()):
|
|
211
|
-
if line:
|
|
212
|
-
try:
|
|
213
|
-
line_str = line.decode('utf-8')
|
|
214
|
-
debug_lines.append(line_str)
|
|
225
|
+
response_text_raw = response.text # Get raw response text
|
|
215
226
|
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
227
|
+
# Process the text using sanitize_stream line by line
|
|
228
|
+
processed_stream = sanitize_stream(
|
|
229
|
+
data=response_text_raw.splitlines(), # Split into lines
|
|
230
|
+
intro_value=None, # No simple prefix
|
|
231
|
+
to_json=False, # Content is not JSON
|
|
232
|
+
content_extractor=self._scira_extractor # Use the specific extractor
|
|
233
|
+
)
|
|
222
234
|
|
|
235
|
+
# Aggregate the results from the generator
|
|
236
|
+
full_response = ""
|
|
237
|
+
for content in processed_stream:
|
|
238
|
+
if content and isinstance(content, str):
|
|
239
|
+
full_response += content
|
|
223
240
|
|
|
224
|
-
except: pass
|
|
225
241
|
self.last_response = {"text": full_response}
|
|
226
242
|
self.conversation.update_chat_history(prompt, full_response)
|
|
227
243
|
return {"text": full_response}
|
|
244
|
+
except CurlError as e: # Catch CurlError
|
|
245
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
228
246
|
except Exception as e:
|
|
229
247
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
230
248
|
|
|
@@ -242,7 +260,8 @@ class SciraAI(Provider):
|
|
|
242
260
|
|
|
243
261
|
def get_message(self, response: dict) -> str:
|
|
244
262
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
245
|
-
|
|
263
|
+
# Extractor handles formatting
|
|
264
|
+
return response.get("text", "").replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
246
265
|
|
|
247
266
|
if __name__ == "__main__":
|
|
248
267
|
print("-" * 100)
|