webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
webscout/Provider/meta.py
CHANGED
|
@@ -6,7 +6,8 @@ import uuid
|
|
|
6
6
|
from typing import Dict, Generator, Iterator, List, Union
|
|
7
7
|
|
|
8
8
|
import random
|
|
9
|
-
import
|
|
9
|
+
from curl_cffi import CurlError
|
|
10
|
+
from curl_cffi.requests import Session
|
|
10
11
|
from webscout.scout import Scout
|
|
11
12
|
|
|
12
13
|
from webscout.AIutel import Optimizers
|
|
@@ -105,12 +106,17 @@ def get_fb_session(email, password, proxies=None):
|
|
|
105
106
|
"upgrade-insecure-requests": "1",
|
|
106
107
|
"user-agent": Lit().random(),
|
|
107
108
|
}
|
|
109
|
+
# Create a session
|
|
110
|
+
session = Session()
|
|
111
|
+
if proxies:
|
|
112
|
+
session.proxies = proxies
|
|
113
|
+
|
|
108
114
|
# Send the GET request
|
|
109
|
-
response =
|
|
110
|
-
|
|
115
|
+
response = session.get(login_url, headers=headers)
|
|
116
|
+
|
|
111
117
|
# Use Scout for parsing instead of BeautifulSoup
|
|
112
118
|
scout = Scout(response.text)
|
|
113
|
-
|
|
119
|
+
|
|
114
120
|
# Parse necessary parameters from the login form
|
|
115
121
|
lsd = scout.find_first('input[name="lsd"]').get('value')
|
|
116
122
|
jazoest = scout.find_first('input[name="jazoest"]').get('value')
|
|
@@ -151,9 +157,6 @@ def get_fb_session(email, password, proxies=None):
|
|
|
151
157
|
}
|
|
152
158
|
|
|
153
159
|
# Send the POST request
|
|
154
|
-
session = requests.session()
|
|
155
|
-
session.proxies = proxies
|
|
156
|
-
|
|
157
160
|
result = session.post(post_url, headers=headers, data=data)
|
|
158
161
|
if "sb" not in session.cookies:
|
|
159
162
|
raise exceptions.FacebookInvalidCredentialsException(
|
|
@@ -195,7 +198,12 @@ def get_fb_session(email, password, proxies=None):
|
|
|
195
198
|
"viewport-width": "1728",
|
|
196
199
|
}
|
|
197
200
|
|
|
198
|
-
|
|
201
|
+
# Create a new session for this request
|
|
202
|
+
req_session = Session()
|
|
203
|
+
if proxies:
|
|
204
|
+
req_session.proxies = proxies
|
|
205
|
+
|
|
206
|
+
response = req_session.post(url, headers=headers, data=payload)
|
|
199
207
|
|
|
200
208
|
state = extract_value(response.text, start_str='"state":"', end_str='"')
|
|
201
209
|
|
|
@@ -214,9 +222,13 @@ def get_fb_session(email, password, proxies=None):
|
|
|
214
222
|
"upgrade-insecure-requests": "1",
|
|
215
223
|
"user-agent": Lit().random(),
|
|
216
224
|
}
|
|
217
|
-
|
|
218
|
-
session
|
|
219
|
-
|
|
225
|
+
|
|
226
|
+
# Create a new session for Facebook
|
|
227
|
+
fb_session = Session()
|
|
228
|
+
if proxies:
|
|
229
|
+
fb_session.proxies = proxies
|
|
230
|
+
|
|
231
|
+
response = fb_session.get(url, headers=headers, data=payload, allow_redirects=False)
|
|
220
232
|
|
|
221
233
|
next_url = response.headers["Location"]
|
|
222
234
|
|
|
@@ -238,8 +250,8 @@ def get_fb_session(email, password, proxies=None):
|
|
|
238
250
|
"Sec-Fetch-User": "?1",
|
|
239
251
|
"TE": "trailers",
|
|
240
252
|
}
|
|
241
|
-
|
|
242
|
-
cookies =
|
|
253
|
+
fb_session.get(url, headers=headers, data=payload)
|
|
254
|
+
cookies = fb_session.cookies.get_dict()
|
|
243
255
|
if "abra_sess" not in cookies:
|
|
244
256
|
raise exceptions.FacebookInvalidCredentialsException(
|
|
245
257
|
"Was not able to login to Facebook. Please check your credentials. "
|
|
@@ -249,24 +261,20 @@ def get_fb_session(email, password, proxies=None):
|
|
|
249
261
|
return cookies
|
|
250
262
|
|
|
251
263
|
|
|
252
|
-
def get_cookies(
|
|
264
|
+
def get_cookies() -> dict:
|
|
253
265
|
"""
|
|
254
266
|
Extracts necessary cookies from the Meta AI main page.
|
|
255
267
|
|
|
256
268
|
Returns:
|
|
257
269
|
dict: A dictionary containing essential cookies.
|
|
258
270
|
"""
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
response = requests.get(
|
|
271
|
+
# Create a session
|
|
272
|
+
session = Session()
|
|
273
|
+
|
|
274
|
+
response = session.get(
|
|
265
275
|
"https://www.meta.ai/",
|
|
266
|
-
headers=headers,
|
|
267
|
-
proxies=self.proxy,
|
|
268
276
|
)
|
|
269
|
-
|
|
277
|
+
|
|
270
278
|
cookies = {
|
|
271
279
|
"_js_datr": extract_value(
|
|
272
280
|
response.text, start_str='_js_datr":{"value":"', end_str='",'
|
|
@@ -280,14 +288,10 @@ def get_cookies(self) -> dict:
|
|
|
280
288
|
"fb_dtsg": extract_value(
|
|
281
289
|
response.text, start_str='DTSGInitData",[],{"token":"', end_str='"'
|
|
282
290
|
),
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
if len(headers) > 0:
|
|
286
|
-
cookies["abra_sess"] = fb_session["abra_sess"]
|
|
287
|
-
else:
|
|
288
|
-
cookies["abra_csrf"] = extract_value(
|
|
291
|
+
"abra_csrf": extract_value(
|
|
289
292
|
response.text, start_str='abra_csrf":{"value":"', end_str='",'
|
|
290
293
|
)
|
|
294
|
+
}
|
|
291
295
|
return cookies
|
|
292
296
|
|
|
293
297
|
class Meta(Provider):
|
|
@@ -328,7 +332,7 @@ class Meta(Provider):
|
|
|
328
332
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
329
333
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
330
334
|
"""
|
|
331
|
-
self.session =
|
|
335
|
+
self.session = Session()
|
|
332
336
|
self.session.headers.update(
|
|
333
337
|
{
|
|
334
338
|
"user-agent": Lit().random(),
|
|
@@ -385,7 +389,7 @@ class Meta(Provider):
|
|
|
385
389
|
self.session.proxies = self.proxy
|
|
386
390
|
return True
|
|
387
391
|
return False
|
|
388
|
-
except
|
|
392
|
+
except CurlError:
|
|
389
393
|
return False
|
|
390
394
|
|
|
391
395
|
def get_access_token(self) -> str:
|
|
@@ -513,7 +517,7 @@ class Meta(Provider):
|
|
|
513
517
|
if self.is_authed:
|
|
514
518
|
headers["cookie"] = f'abra_sess={self.cookies["abra_sess"]}'
|
|
515
519
|
# Recreate the session to avoid cookie leakage when user is authenticated
|
|
516
|
-
self.session =
|
|
520
|
+
self.session = Session()
|
|
517
521
|
self.session.proxies = self.proxy
|
|
518
522
|
|
|
519
523
|
if stream:
|
|
@@ -700,13 +704,13 @@ class Meta(Provider):
|
|
|
700
704
|
if self.fb_email is not None and self.fb_password is not None:
|
|
701
705
|
fb_session = get_fb_session(self.fb_email, self.fb_password, self.proxy)
|
|
702
706
|
headers = {"cookie": f"abra_sess={fb_session['abra_sess']}"}
|
|
703
|
-
|
|
704
|
-
response =
|
|
705
|
-
"https://www.meta.ai/",
|
|
707
|
+
|
|
708
|
+
response = self.session.get(
|
|
709
|
+
url="https://www.meta.ai/",
|
|
706
710
|
headers=headers,
|
|
707
711
|
proxies=self.proxy,
|
|
708
712
|
)
|
|
709
|
-
|
|
713
|
+
|
|
710
714
|
cookies = {
|
|
711
715
|
"_js_datr": extract_value(
|
|
712
716
|
response.text, start_str='_js_datr":{"value":"', end_str='",'
|
|
@@ -786,7 +790,7 @@ class Meta(Provider):
|
|
|
786
790
|
"""
|
|
787
791
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
788
792
|
return response["message"]
|
|
789
|
-
|
|
793
|
+
|
|
790
794
|
if __name__ == "__main__":
|
|
791
795
|
Meta = Meta()
|
|
792
796
|
ai = Meta.chat("hi")
|
webscout/Provider/multichat.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
import uuid
|
|
4
5
|
from typing import Any, Dict, Union
|
|
@@ -108,7 +109,7 @@ class MultiChatAI(Provider):
|
|
|
108
109
|
def __init__(
|
|
109
110
|
self,
|
|
110
111
|
is_conversation: bool = True,
|
|
111
|
-
max_tokens: int = 4000,
|
|
112
|
+
max_tokens: int = 4000, # Note: max_tokens is not directly used by this API
|
|
112
113
|
timeout: int = 30,
|
|
113
114
|
intro: str = None,
|
|
114
115
|
filepath: str = None,
|
|
@@ -119,14 +120,15 @@ class MultiChatAI(Provider):
|
|
|
119
120
|
model: str = "llama-3.3-70b-versatile",
|
|
120
121
|
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
121
122
|
temperature: float = 0.5,
|
|
122
|
-
presence_penalty: int = 0,
|
|
123
|
-
frequency_penalty: int = 0,
|
|
124
|
-
top_p: float = 1
|
|
123
|
+
presence_penalty: int = 0, # Note: presence_penalty is not used by this API
|
|
124
|
+
frequency_penalty: int = 0, # Note: frequency_penalty is not used by this API
|
|
125
|
+
top_p: float = 1 # Note: top_p is not used by this API
|
|
125
126
|
):
|
|
126
127
|
"""Initializes the MultiChatAI API client."""
|
|
127
128
|
if model not in self.AVAILABLE_MODELS:
|
|
128
129
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
129
|
-
|
|
130
|
+
# Initialize curl_cffi Session
|
|
131
|
+
self.session = Session()
|
|
130
132
|
self.is_conversation = is_conversation
|
|
131
133
|
self.max_tokens_to_sample = max_tokens
|
|
132
134
|
self.timeout = timeout
|
|
@@ -138,21 +140,24 @@ class MultiChatAI(Provider):
|
|
|
138
140
|
self.frequency_penalty = frequency_penalty
|
|
139
141
|
self.top_p = top_p
|
|
140
142
|
|
|
141
|
-
# Initialize LitAgent for user agent generation
|
|
143
|
+
# Initialize LitAgent for user agent generation (keep if needed for other headers)
|
|
142
144
|
self.agent = LitAgent()
|
|
143
145
|
|
|
144
146
|
self.headers = {
|
|
145
147
|
"accept": "*/*",
|
|
146
148
|
"accept-language": "en-US,en;q=0.9",
|
|
147
|
-
"content-type": "text/plain;charset=UTF-8",
|
|
149
|
+
"content-type": "text/plain;charset=UTF-8", # Keep content-type
|
|
148
150
|
"origin": "https://www.multichatai.com",
|
|
149
151
|
"referer": "https://www.multichatai.com/",
|
|
150
152
|
"user-agent": self.agent.random(),
|
|
153
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
151
154
|
}
|
|
152
155
|
|
|
156
|
+
# Update curl_cffi session headers, proxies, and cookies
|
|
153
157
|
self.session.headers.update(self.headers)
|
|
154
|
-
self.session.proxies = proxies
|
|
155
|
-
|
|
158
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
159
|
+
# Set cookies on the session object for curl_cffi
|
|
160
|
+
self.session.cookies.set("session", uuid.uuid4().hex)
|
|
156
161
|
|
|
157
162
|
self.__available_optimizers = (
|
|
158
163
|
method for method in dir(Optimizers)
|
|
@@ -225,34 +230,41 @@ class MultiChatAI(Provider):
|
|
|
225
230
|
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
226
231
|
raise ValueError(error_msg)
|
|
227
232
|
|
|
228
|
-
def _make_request(self, payload: Dict[str, Any]) ->
|
|
233
|
+
def _make_request(self, payload: Dict[str, Any]) -> Any:
|
|
229
234
|
"""Make the API request with proper error handling."""
|
|
230
235
|
try:
|
|
236
|
+
# Use curl_cffi session post with impersonate
|
|
237
|
+
# Cookies are handled by the session
|
|
231
238
|
response = self.session.post(
|
|
232
239
|
self._get_endpoint(),
|
|
233
|
-
headers
|
|
240
|
+
# headers are set on the session
|
|
234
241
|
json=payload,
|
|
235
242
|
timeout=self.timeout,
|
|
243
|
+
# proxies are set on the session
|
|
244
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
236
245
|
)
|
|
237
|
-
response.raise_for_status()
|
|
246
|
+
response.raise_for_status() # Check for HTTP errors
|
|
238
247
|
return response
|
|
239
|
-
except
|
|
240
|
-
raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
|
|
248
|
+
except CurlError as e: # Catch CurlError
|
|
249
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed (CurlError): {e}") from e
|
|
250
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
251
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
252
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
241
253
|
|
|
242
254
|
def ask(
|
|
243
255
|
self,
|
|
244
256
|
prompt: str,
|
|
245
|
-
raw: bool = False,
|
|
257
|
+
raw: bool = False, # Keep raw param for interface consistency
|
|
246
258
|
optimizer: str = None,
|
|
247
259
|
conversationally: bool = False,
|
|
260
|
+
# Add stream parameter for consistency, though API doesn't stream
|
|
261
|
+
stream: bool = False
|
|
248
262
|
) -> Dict[str, Any]:
|
|
249
263
|
"""Sends a prompt to the MultiChatAI API and returns the response."""
|
|
250
264
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
251
265
|
if optimizer:
|
|
252
266
|
if optimizer in self.__available_optimizers:
|
|
253
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
254
|
-
conversation_prompt if conversationally else prompt
|
|
255
|
-
)
|
|
267
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
256
268
|
else:
|
|
257
269
|
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
258
270
|
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
@@ -263,26 +275,43 @@ class MultiChatAI(Provider):
|
|
|
263
275
|
"customModelId": "",
|
|
264
276
|
}
|
|
265
277
|
|
|
278
|
+
# API does not stream, implement non-stream logic directly
|
|
266
279
|
response = self._make_request(payload)
|
|
267
280
|
try:
|
|
281
|
+
# Use response.text which is already decoded
|
|
268
282
|
full_response = response.text.strip()
|
|
269
283
|
self.last_response = {"text": full_response}
|
|
270
284
|
self.conversation.update_chat_history(prompt, full_response)
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
285
|
+
# Return dict or raw string based on raw flag
|
|
286
|
+
return full_response if raw else self.last_response
|
|
287
|
+
except Exception as e: # Catch potential errors during text processing
|
|
288
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to process response: {e}") from e
|
|
274
289
|
|
|
275
290
|
def chat(
|
|
276
291
|
self,
|
|
277
292
|
prompt: str,
|
|
278
293
|
optimizer: str = None,
|
|
279
294
|
conversationally: bool = False,
|
|
295
|
+
# Add stream parameter for consistency
|
|
296
|
+
stream: bool = False
|
|
280
297
|
) -> str:
|
|
281
298
|
"""Generate response."""
|
|
282
|
-
|
|
283
|
-
|
|
299
|
+
# Since ask() now handles both stream=True/False by returning the full response dict/str:
|
|
300
|
+
response_data = self.ask(
|
|
301
|
+
prompt,
|
|
302
|
+
stream=False, # Call ask in non-stream mode internally
|
|
303
|
+
raw=False, # Ensure ask returns dict
|
|
304
|
+
optimizer=optimizer,
|
|
305
|
+
conversationally=conversationally
|
|
284
306
|
)
|
|
285
|
-
|
|
307
|
+
# If stream=True was requested, simulate streaming by yielding the full message at once
|
|
308
|
+
if stream:
|
|
309
|
+
def stream_wrapper():
|
|
310
|
+
yield self.get_message(response_data)
|
|
311
|
+
return stream_wrapper()
|
|
312
|
+
else:
|
|
313
|
+
# If stream=False, return the full message directly
|
|
314
|
+
return self.get_message(response_data)
|
|
286
315
|
|
|
287
316
|
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
288
317
|
"""
|
webscout/Provider/scnet.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
import secrets
|
|
4
5
|
from typing import Any, Dict, Optional, Generator, Union
|
|
@@ -26,13 +27,13 @@ class SCNet(Provider):
|
|
|
26
27
|
self,
|
|
27
28
|
model: str = "QWQ-32B",
|
|
28
29
|
is_conversation: bool = True,
|
|
29
|
-
max_tokens: int = 2048,
|
|
30
|
+
max_tokens: int = 2048, # Note: max_tokens is not used by this API
|
|
30
31
|
timeout: int = 30,
|
|
31
32
|
intro: Optional[str] = None,
|
|
32
33
|
filepath: Optional[str] = None,
|
|
33
34
|
update_file: bool = True,
|
|
34
35
|
proxies: Optional[dict] = None,
|
|
35
|
-
history_offset: int = 0,
|
|
36
|
+
history_offset: int = 0, # Note: history_offset might not be fully effective due to API structure
|
|
36
37
|
act: Optional[str] = None,
|
|
37
38
|
system_prompt: str = (
|
|
38
39
|
"You are a helpful, advanced LLM assistant. "
|
|
@@ -46,14 +47,15 @@ class SCNet(Provider):
|
|
|
46
47
|
self.model = model
|
|
47
48
|
self.modelId = self.MODEL_NAME_TO_ID[model]
|
|
48
49
|
self.system_prompt = system_prompt
|
|
49
|
-
|
|
50
|
+
# Initialize curl_cffi Session
|
|
51
|
+
self.session = Session()
|
|
50
52
|
self.is_conversation = is_conversation
|
|
51
53
|
self.max_tokens_to_sample = max_tokens
|
|
52
54
|
self.timeout = timeout
|
|
53
55
|
self.last_response: Dict[str, Any] = {}
|
|
54
56
|
self.proxies = proxies or {}
|
|
55
57
|
self.cookies = {
|
|
56
|
-
"Token": secrets.token_hex(16),
|
|
58
|
+
"Token": secrets.token_hex(16), # Keep cookie generation logic
|
|
57
59
|
}
|
|
58
60
|
self.headers = {
|
|
59
61
|
"accept": "text/event-stream",
|
|
@@ -61,8 +63,17 @@ class SCNet(Provider):
|
|
|
61
63
|
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
|
|
62
64
|
"referer": "https://www.scnet.cn/ui/chatbot/temp_1744712663464",
|
|
63
65
|
"origin": "https://www.scnet.cn",
|
|
66
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
64
67
|
}
|
|
65
68
|
self.url = "https://www.scnet.cn/acx/chatbot/v1/chat/completion"
|
|
69
|
+
|
|
70
|
+
# Update curl_cffi session headers, proxies, and cookies
|
|
71
|
+
self.session.headers.update(self.headers)
|
|
72
|
+
self.session.proxies = self.proxies # Assign proxies directly
|
|
73
|
+
# Set cookies on the session object for curl_cffi
|
|
74
|
+
for name, value in self.cookies.items():
|
|
75
|
+
self.session.cookies.set(name, value)
|
|
76
|
+
|
|
66
77
|
self.__available_optimizers = (
|
|
67
78
|
method for method in dir(Optimizers)
|
|
68
79
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
@@ -86,9 +97,7 @@ class SCNet(Provider):
|
|
|
86
97
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
87
98
|
if optimizer:
|
|
88
99
|
if optimizer in self.__available_optimizers:
|
|
89
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
90
|
-
conversation_prompt if conversationally else prompt
|
|
91
|
-
)
|
|
100
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
92
101
|
else:
|
|
93
102
|
raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {list(self.__available_optimizers)}")
|
|
94
103
|
|
|
@@ -105,39 +114,61 @@ class SCNet(Provider):
|
|
|
105
114
|
|
|
106
115
|
def for_stream():
|
|
107
116
|
try:
|
|
108
|
-
|
|
117
|
+
# Use curl_cffi session post with impersonate
|
|
118
|
+
# Cookies are now handled by the session object
|
|
119
|
+
response = self.session.post(
|
|
109
120
|
self.url,
|
|
110
|
-
headers=self.headers,
|
|
111
|
-
cookies=self.cookies,
|
|
112
121
|
json=payload,
|
|
113
122
|
stream=True,
|
|
114
123
|
timeout=self.timeout,
|
|
115
|
-
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
124
|
+
impersonate="chrome120" # Changed impersonation to chrome120
|
|
125
|
+
)
|
|
126
|
+
response.raise_for_status() # Check for HTTP errors
|
|
127
|
+
|
|
128
|
+
streaming_text = ""
|
|
129
|
+
# Iterate over bytes and decode manually
|
|
130
|
+
for line_bytes in response.iter_lines():
|
|
131
|
+
if line_bytes:
|
|
132
|
+
line = line_bytes.decode('utf-8') # Decode bytes
|
|
133
|
+
if line.startswith("data:"):
|
|
120
134
|
data = line[5:].strip()
|
|
121
135
|
if data and data != "[done]":
|
|
122
136
|
try:
|
|
123
137
|
obj = json.loads(data)
|
|
124
138
|
content = obj.get("content", "")
|
|
125
139
|
streaming_text += content
|
|
126
|
-
|
|
127
|
-
|
|
140
|
+
resp = {"text": content}
|
|
141
|
+
# Yield dict or raw string
|
|
142
|
+
yield resp if not raw else content
|
|
143
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
128
144
|
continue
|
|
129
145
|
elif data == "[done]":
|
|
130
146
|
break
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
147
|
+
|
|
148
|
+
# Update history and last response after stream finishes
|
|
149
|
+
self.last_response = {"text": streaming_text}
|
|
150
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
151
|
+
|
|
152
|
+
except CurlError as e: # Catch CurlError
|
|
153
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
154
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
155
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
156
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
|
|
135
157
|
|
|
136
158
|
def for_non_stream():
|
|
159
|
+
# Aggregate the stream using the updated for_stream logic
|
|
137
160
|
text = ""
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
161
|
+
# Ensure raw=False so for_stream yields dicts
|
|
162
|
+
for chunk_data in for_stream():
|
|
163
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
164
|
+
text += chunk_data["text"]
|
|
165
|
+
# Handle raw string case if raw=True was passed
|
|
166
|
+
elif isinstance(chunk_data, str):
|
|
167
|
+
text += chunk_data
|
|
168
|
+
# last_response and history are updated within for_stream
|
|
169
|
+
# Return the final aggregated response dict or raw string
|
|
170
|
+
return text if raw else self.last_response
|
|
171
|
+
|
|
141
172
|
|
|
142
173
|
return for_stream() if stream else for_non_stream()
|
|
143
174
|
|
|
@@ -148,40 +179,59 @@ class SCNet(Provider):
|
|
|
148
179
|
optimizer: Optional[str] = None,
|
|
149
180
|
conversationally: bool = False,
|
|
150
181
|
) -> Union[str, Generator[str, None, None]]:
|
|
151
|
-
def
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
def for_non_stream():
|
|
157
|
-
return self.get_message(
|
|
158
|
-
self.ask(
|
|
159
|
-
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
160
|
-
)
|
|
182
|
+
def for_stream_chat():
|
|
183
|
+
# ask() yields dicts or strings when streaming
|
|
184
|
+
gen = self.ask(
|
|
185
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
186
|
+
optimizer=optimizer, conversationally=conversationally
|
|
161
187
|
)
|
|
162
|
-
|
|
188
|
+
for response_dict in gen:
|
|
189
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
190
|
+
|
|
191
|
+
def for_non_stream_chat():
|
|
192
|
+
# ask() returns dict or str when not streaming
|
|
193
|
+
response_data = self.ask(
|
|
194
|
+
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
195
|
+
optimizer=optimizer, conversationally=conversationally
|
|
196
|
+
)
|
|
197
|
+
return self.get_message(response_data) # get_message expects dict
|
|
198
|
+
|
|
199
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
163
200
|
|
|
164
201
|
def get_message(self, response: dict) -> str:
|
|
165
202
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
166
203
|
return response["text"]
|
|
167
204
|
|
|
168
205
|
if __name__ == "__main__":
|
|
206
|
+
# Ensure curl_cffi is installed
|
|
169
207
|
print("-" * 80)
|
|
170
208
|
print(f"{'ModelId':<10} {'Model':<30} {'Status':<10} {'Response'}")
|
|
171
209
|
print("-" * 80)
|
|
172
210
|
for model in SCNet.AVAILABLE_MODELS:
|
|
173
211
|
try:
|
|
174
212
|
test_ai = SCNet(model=model["name"], timeout=60)
|
|
175
|
-
|
|
213
|
+
# Test stream first
|
|
214
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
176
215
|
response_text = ""
|
|
177
|
-
|
|
216
|
+
print(f"\r{model['modelId']:<10} {model['name']:<30} {'Streaming...':<10}", end="", flush=True)
|
|
217
|
+
for chunk in response_stream:
|
|
178
218
|
response_text += chunk
|
|
219
|
+
|
|
179
220
|
if response_text and len(response_text.strip()) > 0:
|
|
180
221
|
status = "✓"
|
|
181
|
-
|
|
222
|
+
# Clean and truncate response
|
|
223
|
+
clean_text = response_text.strip()
|
|
224
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
182
225
|
else:
|
|
183
|
-
status = "✗"
|
|
184
|
-
display_text = "Empty or invalid response"
|
|
185
|
-
print(f"{model['modelId']:<10} {model['name']:<30} {status:<10} {display_text}")
|
|
226
|
+
status = "✗ (Stream)"
|
|
227
|
+
display_text = "Empty or invalid stream response"
|
|
228
|
+
print(f"\r{model['modelId']:<10} {model['name']:<30} {status:<10} {display_text}")
|
|
229
|
+
|
|
230
|
+
# Optional: Add non-stream test if needed
|
|
231
|
+
# print(f"\r{model['modelId']:<10} {model['name']:<30} {'Non-Stream...':<10}", end="", flush=True)
|
|
232
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
233
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
234
|
+
# print(f"\r{model['modelId']:<10} {model['name']:<30} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
235
|
+
|
|
186
236
|
except Exception as e:
|
|
187
|
-
print(f"{model['modelId']:<10} {model['name']:<30} {'✗':<10} {str(e)}")
|
|
237
|
+
print(f"\r{model['modelId']:<10} {model['name']:<30} {'✗':<10} {str(e)}")
|