webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import time
|
|
2
2
|
import uuid
|
|
3
|
-
import cloudscraper
|
|
3
|
+
# import cloudscraper
|
|
4
|
+
from curl_cffi.requests import Session, RequestsError
|
|
4
5
|
import json
|
|
5
6
|
import re
|
|
6
7
|
from typing import Any, Dict, Optional, Generator, Union
|
|
@@ -9,10 +10,10 @@ from datetime import date
|
|
|
9
10
|
|
|
10
11
|
from webscout.AIutel import Optimizers
|
|
11
12
|
from webscout.AIutel import Conversation
|
|
12
|
-
from webscout.AIutel import AwesomePrompts
|
|
13
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
13
14
|
from webscout.AIbase import Provider
|
|
14
15
|
from webscout import WEBS, exceptions
|
|
15
|
-
from webscout.litagent import LitAgent
|
|
16
|
+
# from webscout.litagent import LitAgent
|
|
16
17
|
|
|
17
18
|
class ChatGPTClone(Provider):
|
|
18
19
|
"""
|
|
@@ -22,6 +23,11 @@ class ChatGPTClone(Provider):
|
|
|
22
23
|
|
|
23
24
|
url = "https://chatgpt-clone-ten-nu.vercel.app"
|
|
24
25
|
AVAILABLE_MODELS = ["gpt-4", "gpt-3.5-turbo"]
|
|
26
|
+
SUPPORTED_IMPERSONATION = [
|
|
27
|
+
"chrome110", "chrome116", "chrome119", "chrome120",
|
|
28
|
+
"chrome99_android", "edge99", "edge101",
|
|
29
|
+
"safari15_3", "safari15_6_1", "safari17_0", "safari17_2_1"
|
|
30
|
+
]
|
|
25
31
|
|
|
26
32
|
def __init__(
|
|
27
33
|
self,
|
|
@@ -37,15 +43,18 @@ class ChatGPTClone(Provider):
|
|
|
37
43
|
model: str = "gpt-4",
|
|
38
44
|
temperature: float = 0.6,
|
|
39
45
|
top_p: float = 0.7,
|
|
40
|
-
|
|
46
|
+
impersonate: str = "chrome120",
|
|
41
47
|
system_prompt: str = "You are a helpful assistant."
|
|
42
48
|
):
|
|
43
|
-
"""Initialize the ChatGPT Clone client."""
|
|
49
|
+
"""Initialize the ChatGPT Clone client using curl_cffi."""
|
|
44
50
|
if model not in self.AVAILABLE_MODELS:
|
|
45
51
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
52
|
+
if impersonate not in self.SUPPORTED_IMPERSONATION:
|
|
53
|
+
raise ValueError(f"Invalid impersonate browser: {impersonate}. Choose from: {self.SUPPORTED_IMPERSONATION}")
|
|
46
54
|
|
|
47
55
|
self.model = model
|
|
48
|
-
self.
|
|
56
|
+
self.impersonate = impersonate
|
|
57
|
+
self.session = Session(impersonate=self.impersonate, proxies=proxies, timeout=timeout)
|
|
49
58
|
self.is_conversation = is_conversation
|
|
50
59
|
self.max_tokens_to_sample = max_tokens
|
|
51
60
|
self.timeout = timeout
|
|
@@ -54,28 +63,17 @@ class ChatGPTClone(Provider):
|
|
|
54
63
|
self.top_p = top_p
|
|
55
64
|
self.system_prompt = system_prompt
|
|
56
65
|
|
|
57
|
-
# Initialize LitAgent for user agent generation
|
|
58
|
-
self.agent = LitAgent()
|
|
59
|
-
# Use fingerprinting to create a consistent browser identity
|
|
60
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
61
|
-
|
|
62
|
-
# Use the fingerprint for headers
|
|
63
66
|
self.headers = {
|
|
64
|
-
"Accept": self.fingerprint["accept"],
|
|
65
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
66
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
67
67
|
"Content-Type": "application/json",
|
|
68
|
-
"DNT": "1",
|
|
69
68
|
"Origin": self.url,
|
|
70
69
|
"Referer": f"{self.url}/",
|
|
71
|
-
"
|
|
72
|
-
"Sec-
|
|
73
|
-
"Sec-
|
|
74
|
-
"
|
|
70
|
+
"DNT": "1",
|
|
71
|
+
"Sec-Fetch-Dest": "empty",
|
|
72
|
+
"Sec-Fetch-Mode": "cors",
|
|
73
|
+
"Sec-Fetch-Site": "same-origin",
|
|
74
|
+
"TE": "trailers"
|
|
75
75
|
}
|
|
76
|
-
|
|
77
|
-
# Create session cookies with unique identifiers
|
|
78
|
-
self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
|
|
76
|
+
self.session.headers.update(self.headers)
|
|
79
77
|
|
|
80
78
|
self.__available_optimizers = (
|
|
81
79
|
method
|
|
@@ -92,35 +90,32 @@ class ChatGPTClone(Provider):
|
|
|
92
90
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
93
91
|
)
|
|
94
92
|
self.conversation.history_offset = history_offset
|
|
95
|
-
self.session.proxies = proxies
|
|
96
|
-
|
|
97
|
-
# Set consistent headers for the scraper session
|
|
98
|
-
for header, value in self.headers.items():
|
|
99
|
-
self.session.headers[header] = value
|
|
100
|
-
|
|
101
|
-
def refresh_identity(self, browser: str = None):
|
|
102
|
-
"""Refreshes the browser identity fingerprint."""
|
|
103
|
-
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
104
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
105
|
-
|
|
106
|
-
# Update headers with new fingerprint
|
|
107
|
-
self.headers.update({
|
|
108
|
-
"Accept": self.fingerprint["accept"],
|
|
109
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
110
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
|
|
111
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
112
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
113
|
-
})
|
|
114
|
-
|
|
115
|
-
# Update session headers
|
|
116
|
-
for header, value in self.headers.items():
|
|
117
|
-
self.session.headers[header] = value
|
|
118
|
-
|
|
119
|
-
# Generate new cookies
|
|
120
|
-
self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
|
|
121
|
-
|
|
122
|
-
return self.fingerprint
|
|
123
93
|
|
|
94
|
+
def refresh_identity(self, impersonate: str = None):
|
|
95
|
+
"""Re-initializes the curl_cffi session with a new impersonation target."""
|
|
96
|
+
impersonate = impersonate or self.impersonate
|
|
97
|
+
if impersonate not in self.SUPPORTED_IMPERSONATION:
|
|
98
|
+
raise ValueError(f"Invalid impersonate browser: {impersonate}. Choose from: {self.SUPPORTED_IMPERSONATION}")
|
|
99
|
+
self.impersonate = impersonate
|
|
100
|
+
self.session = Session(
|
|
101
|
+
impersonate=self.impersonate,
|
|
102
|
+
proxies=self.session.proxies,
|
|
103
|
+
timeout=self.timeout
|
|
104
|
+
)
|
|
105
|
+
self.session.headers.update(self.headers)
|
|
106
|
+
return self.impersonate
|
|
107
|
+
|
|
108
|
+
@staticmethod
|
|
109
|
+
def _chatgptclone_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
110
|
+
"""Extracts content from the ChatGPTClone stream format '0:"..."'."""
|
|
111
|
+
if isinstance(chunk, str):
|
|
112
|
+
match = re.search(r'0:"((?:[^\\"]|\\.)*)"', chunk) # Use the existing regex
|
|
113
|
+
if match:
|
|
114
|
+
content = match.group(1)
|
|
115
|
+
# Decode JSON string escapes and then unicode escapes
|
|
116
|
+
decoded_content = json.loads(f'"{content}"').encode().decode('unicode_escape')
|
|
117
|
+
return decoded_content
|
|
118
|
+
return None
|
|
124
119
|
def ask(
|
|
125
120
|
self,
|
|
126
121
|
prompt: str,
|
|
@@ -129,7 +124,7 @@ class ChatGPTClone(Provider):
|
|
|
129
124
|
optimizer: str = None,
|
|
130
125
|
conversationally: bool = False,
|
|
131
126
|
) -> Union[Dict[str, Any], Generator]:
|
|
132
|
-
"""Send a message to the ChatGPT Clone API"""
|
|
127
|
+
"""Send a message to the ChatGPT Clone API using curl_cffi"""
|
|
133
128
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
134
129
|
if optimizer:
|
|
135
130
|
if optimizer in self.__available_optimizers:
|
|
@@ -149,38 +144,53 @@ class ChatGPTClone(Provider):
|
|
|
149
144
|
"model": self.model
|
|
150
145
|
}
|
|
151
146
|
|
|
147
|
+
api_url = f"{self.url}/api/chat"
|
|
148
|
+
|
|
149
|
+
def _make_request(attempt_refresh=True):
|
|
150
|
+
try:
|
|
151
|
+
response = self.session.post(api_url, json=payload, stream=True)
|
|
152
|
+
response.raise_for_status()
|
|
153
|
+
return response
|
|
154
|
+
except RequestsError as e:
|
|
155
|
+
if attempt_refresh and e.response and e.response.status_code in [403, 429]:
|
|
156
|
+
self.refresh_identity()
|
|
157
|
+
return _make_request(attempt_refresh=False)
|
|
158
|
+
else:
|
|
159
|
+
err_msg = f"Request failed: {e}"
|
|
160
|
+
if e.response is not None:
|
|
161
|
+
err_msg = f"Failed to generate response - ({e.response.status_code}, {e.response.reason}) - {e.response.text}"
|
|
162
|
+
raise exceptions.FailedToGenerateResponseError(err_msg) from e
|
|
163
|
+
except Exception as e:
|
|
164
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}") from e
|
|
165
|
+
|
|
152
166
|
def for_stream():
|
|
167
|
+
response = _make_request()
|
|
168
|
+
streaming_text = "" # Initialize outside try block
|
|
153
169
|
try:
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
streaming_text = ""
|
|
172
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
173
|
-
if line:
|
|
174
|
-
match = re.search(r'0:"(.*?)"', line)
|
|
175
|
-
if match:
|
|
176
|
-
content = match.group(1)
|
|
177
|
-
streaming_text += content
|
|
178
|
-
yield content if raw else dict(text=content)
|
|
179
|
-
|
|
180
|
-
self.last_response.update(dict(text=streaming_text))
|
|
181
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
170
|
+
# Use sanitize_stream
|
|
171
|
+
processed_stream = sanitize_stream(
|
|
172
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
173
|
+
intro_value=None, # No simple prefix
|
|
174
|
+
to_json=False, # Content is text after extraction
|
|
175
|
+
content_extractor=self._chatgptclone_extractor, # Use the specific extractor
|
|
176
|
+
yield_raw_on_error=True # Yield even if extractor fails (might get metadata lines)
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
for content_chunk in processed_stream:
|
|
180
|
+
# content_chunk is the string extracted by _chatgptclone_extractor
|
|
181
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
182
|
+
streaming_text += content_chunk
|
|
183
|
+
yield content_chunk if raw else dict(text=content_chunk)
|
|
184
|
+
|
|
185
|
+
except RequestsError as e:
|
|
186
|
+
raise exceptions.FailedToGenerateResponseError(f"Stream interrupted by request error: {e}") from e
|
|
182
187
|
except Exception as e:
|
|
183
|
-
raise exceptions.FailedToGenerateResponseError(f"
|
|
188
|
+
raise exceptions.FailedToGenerateResponseError(f"Error processing stream: {e}") from e
|
|
189
|
+
finally:
|
|
190
|
+
# Update history after stream finishes or fails
|
|
191
|
+
self.last_response.update(dict(text=streaming_text))
|
|
192
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
193
|
+
response.close()
|
|
184
194
|
|
|
185
195
|
def for_non_stream():
|
|
186
196
|
for _ in for_stream():
|
|
@@ -202,25 +212,26 @@ class ChatGPTClone(Provider):
|
|
|
202
212
|
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
203
213
|
):
|
|
204
214
|
yield self.get_message(response)
|
|
205
|
-
|
|
206
215
|
def for_non_stream():
|
|
207
216
|
return self.get_message(
|
|
208
217
|
self.ask(
|
|
209
218
|
prompt, False, optimizer=optimizer, conversationally=conversationally
|
|
210
219
|
)
|
|
211
220
|
)
|
|
212
|
-
|
|
213
221
|
return for_stream() if stream else for_non_stream()
|
|
214
222
|
|
|
215
223
|
def get_message(self, response: dict) -> str:
|
|
216
224
|
"""Extract message text from response"""
|
|
217
225
|
assert isinstance(response, dict)
|
|
218
|
-
|
|
226
|
+
if not isinstance(response, dict) or "text" not in response:
|
|
227
|
+
return str(response)
|
|
228
|
+
# Extractor handles formatting
|
|
229
|
+
formatted_text = response.get("text", "")
|
|
219
230
|
return formatted_text
|
|
220
231
|
|
|
221
232
|
if __name__ == "__main__":
|
|
222
233
|
from rich import print
|
|
223
|
-
ai = ChatGPTClone(timeout=
|
|
234
|
+
ai = ChatGPTClone(timeout=120, impersonate="chrome120")
|
|
224
235
|
response = ai.chat("write a poem about AI", stream=True)
|
|
225
236
|
for chunk in response:
|
|
226
|
-
print(chunk, end="", flush=True)
|
|
237
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,342 @@
|
|
|
1
|
+
from typing import Optional, Union, Any, Dict, Generator, List
|
|
2
|
+
from uuid import uuid4
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
import random
|
|
6
|
+
from curl_cffi import CurlError
|
|
7
|
+
from curl_cffi.requests import Session
|
|
8
|
+
from curl_cffi.const import CurlHttpVersion
|
|
9
|
+
|
|
10
|
+
from webscout.AIutel import sanitize_stream
|
|
11
|
+
from webscout.AIutel import Optimizers
|
|
12
|
+
from webscout.AIutel import Conversation
|
|
13
|
+
from webscout.AIutel import AwesomePrompts
|
|
14
|
+
from webscout.AIbase import Provider
|
|
15
|
+
from webscout import exceptions
|
|
16
|
+
from webscout.litagent import LitAgent
|
|
17
|
+
|
|
18
|
+
class ChatSandbox(Provider):
|
|
19
|
+
"""
|
|
20
|
+
Sends a chat message to the specified model via the chatsandbox API.
|
|
21
|
+
|
|
22
|
+
This provider allows you to interact with various AI models through the chatsandbox.com
|
|
23
|
+
interface, supporting different models/models like OpenAI, DeepSeek, Llama, etc.
|
|
24
|
+
|
|
25
|
+
Attributes:
|
|
26
|
+
model (str): The model to chat with (e.g., "openai", "deepseek", "llama").
|
|
27
|
+
|
|
28
|
+
Examples:
|
|
29
|
+
>>> from webscout.Provider.chatsandbox import ChatSandbox
|
|
30
|
+
>>> ai = ChatSandbox(model="openai")
|
|
31
|
+
>>> response = ai.chat("Hello, how are you?")
|
|
32
|
+
>>> print(response)
|
|
33
|
+
'I'm doing well, thank you for asking! How can I assist you today?'
|
|
34
|
+
"""
|
|
35
|
+
AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large"]
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
model: str = "openai",
|
|
40
|
+
is_conversation: bool = True,
|
|
41
|
+
max_tokens: int = 600,
|
|
42
|
+
timeout: int = 30,
|
|
43
|
+
intro: str = None,
|
|
44
|
+
filepath: str = None,
|
|
45
|
+
update_file: bool = True,
|
|
46
|
+
proxies: dict = {},
|
|
47
|
+
history_offset: int = 10250,
|
|
48
|
+
act: str = None,
|
|
49
|
+
):
|
|
50
|
+
"""
|
|
51
|
+
Initializes the ChatSandbox API with given parameters.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
model (str): The model to chat with (e.g., "openai", "deepseek", "llama").
|
|
55
|
+
is_conversation (bool): Whether the provider is in conversation mode.
|
|
56
|
+
max_tokens (int): Maximum number of tokens to sample.
|
|
57
|
+
timeout (int): Timeout for API requests.
|
|
58
|
+
intro (str): Introduction message for the conversation.
|
|
59
|
+
filepath (str): Filepath for storing conversation history.
|
|
60
|
+
update_file (bool): Whether to update the conversation history file.
|
|
61
|
+
proxies (dict): Proxies for the API requests.
|
|
62
|
+
history_offset (int): Offset for conversation history.
|
|
63
|
+
act (str): Act for the conversation.
|
|
64
|
+
|
|
65
|
+
Examples:
|
|
66
|
+
>>> ai = ChatSandbox(model="openai", system_prompt="You are a friendly assistant.")
|
|
67
|
+
>>> print(ai.model)
|
|
68
|
+
'openai'
|
|
69
|
+
"""
|
|
70
|
+
if model not in self.AVAILABLE_MODELS:
|
|
71
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
72
|
+
|
|
73
|
+
# Initialize curl_cffi Session
|
|
74
|
+
self.session = Session()
|
|
75
|
+
self.model = model
|
|
76
|
+
self.is_conversation = is_conversation
|
|
77
|
+
self.max_tokens_to_sample = max_tokens
|
|
78
|
+
self.api_endpoint = "https://chatsandbox.com/api/chat"
|
|
79
|
+
self.timeout = timeout
|
|
80
|
+
self.last_response = {}
|
|
81
|
+
|
|
82
|
+
# Initialize LitAgent for user agent generation
|
|
83
|
+
self.agent = LitAgent()
|
|
84
|
+
|
|
85
|
+
# Set up headers
|
|
86
|
+
self.headers = {
|
|
87
|
+
'authority': 'chatsandbox.com',
|
|
88
|
+
'accept': '*/*',
|
|
89
|
+
'accept-encoding': 'gzip, deflate, br',
|
|
90
|
+
'accept-language': 'en-US,en;q=0.9',
|
|
91
|
+
'content-type': 'application/json',
|
|
92
|
+
'origin': 'https://chatsandbox.com',
|
|
93
|
+
'referer': f'https://chatsandbox.com/chat/{self.model}',
|
|
94
|
+
'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
95
|
+
'sec-ch-ua-mobile': '?0',
|
|
96
|
+
'sec-ch-ua-platform': '"Windows"',
|
|
97
|
+
'sec-fetch-dest': 'empty',
|
|
98
|
+
'sec-fetch-mode': 'cors',
|
|
99
|
+
'sec-fetch-site': 'same-origin',
|
|
100
|
+
'user-agent': self.agent.random(),
|
|
101
|
+
'dnt': '1',
|
|
102
|
+
'sec-gpc': '1',
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
self.__available_optimizers = (
|
|
106
|
+
method
|
|
107
|
+
for method in dir(Optimizers)
|
|
108
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# Update curl_cffi session headers and proxies
|
|
112
|
+
self.session.headers.update(self.headers)
|
|
113
|
+
self.session.proxies = proxies
|
|
114
|
+
|
|
115
|
+
Conversation.intro = (
|
|
116
|
+
AwesomePrompts().get_act(
|
|
117
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
118
|
+
)
|
|
119
|
+
if act
|
|
120
|
+
else intro or Conversation.intro
|
|
121
|
+
)
|
|
122
|
+
self.conversation = Conversation(
|
|
123
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
124
|
+
)
|
|
125
|
+
self.conversation.history_offset = history_offset
|
|
126
|
+
|
|
127
|
+
@staticmethod
|
|
128
|
+
def _chatsandbox_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
129
|
+
"""Extracts content from the chatsandbox stream format."""
|
|
130
|
+
if isinstance(chunk, str):
|
|
131
|
+
try:
|
|
132
|
+
data = json.loads(chunk)
|
|
133
|
+
if isinstance(data, dict) and "reasoning_content" in data:
|
|
134
|
+
return data["reasoning_content"]
|
|
135
|
+
return chunk
|
|
136
|
+
except json.JSONDecodeError:
|
|
137
|
+
return chunk
|
|
138
|
+
return None
|
|
139
|
+
|
|
140
|
+
def ask(
|
|
141
|
+
self,
|
|
142
|
+
prompt: str,
|
|
143
|
+
stream: bool = False,
|
|
144
|
+
raw: bool = False,
|
|
145
|
+
optimizer: str = None,
|
|
146
|
+
conversationally: bool = False,
|
|
147
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
148
|
+
"""
|
|
149
|
+
Sends a prompt to the ChatSandbox API and returns the response.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
prompt (str): The prompt to send to the API.
|
|
153
|
+
stream (bool): Whether to stream the response.
|
|
154
|
+
raw (bool): Whether to return the raw response.
|
|
155
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
156
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
Union[Dict[str, Any], Generator]: The API response.
|
|
160
|
+
|
|
161
|
+
Examples:
|
|
162
|
+
>>> ai = ChatSandbox()
|
|
163
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
164
|
+
>>> print(response)
|
|
165
|
+
{'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
|
|
166
|
+
"""
|
|
167
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
168
|
+
if optimizer:
|
|
169
|
+
if optimizer in self.__available_optimizers:
|
|
170
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
171
|
+
conversation_prompt if conversationally else prompt
|
|
172
|
+
)
|
|
173
|
+
else:
|
|
174
|
+
raise Exception(
|
|
175
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# Prepare the payload
|
|
179
|
+
payload = {
|
|
180
|
+
"messages": [conversation_prompt],
|
|
181
|
+
"character": self.model
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
def for_stream():
|
|
185
|
+
try:
|
|
186
|
+
# Use curl_cffi session post with updated impersonate and http_version
|
|
187
|
+
response = self.session.post(
|
|
188
|
+
self.api_endpoint,
|
|
189
|
+
headers=self.headers,
|
|
190
|
+
json=payload,
|
|
191
|
+
stream=True,
|
|
192
|
+
timeout=self.timeout,
|
|
193
|
+
impersonate="chrome120", # Try a different impersonation profile
|
|
194
|
+
http_version=CurlHttpVersion.V1_1 # Force HTTP/1.1
|
|
195
|
+
)
|
|
196
|
+
if not response.ok:
|
|
197
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
198
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
streaming_response = ""
|
|
202
|
+
# Use sanitize_stream with the custom extractor
|
|
203
|
+
processed_stream = sanitize_stream(
|
|
204
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
205
|
+
intro_value=None, # No simple prefix to remove here
|
|
206
|
+
to_json=False, # Content is not JSON
|
|
207
|
+
content_extractor=self._chatsandbox_extractor # Use the specific extractor
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
for content_chunk in processed_stream:
|
|
211
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
212
|
+
streaming_response += content_chunk
|
|
213
|
+
yield content_chunk if raw else dict(text=content_chunk)
|
|
214
|
+
|
|
215
|
+
self.last_response.update(dict(text=streaming_response))
|
|
216
|
+
self.conversation.update_chat_history(
|
|
217
|
+
prompt, self.get_message(self.last_response)
|
|
218
|
+
)
|
|
219
|
+
except CurlError as e: # Catch CurlError
|
|
220
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
221
|
+
except Exception as e: # Catch other potential exceptions
|
|
222
|
+
# Include the original exception type in the message for clarity
|
|
223
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
224
|
+
|
|
225
|
+
def for_non_stream():
|
|
226
|
+
# This function implicitly uses the updated for_stream
|
|
227
|
+
for _ in for_stream():
|
|
228
|
+
pass
|
|
229
|
+
return self.last_response
|
|
230
|
+
|
|
231
|
+
return for_stream() if stream else for_non_stream()
|
|
232
|
+
|
|
233
|
+
def chat(
|
|
234
|
+
self,
|
|
235
|
+
prompt: str,
|
|
236
|
+
stream: bool = False,
|
|
237
|
+
optimizer: str = None,
|
|
238
|
+
conversationally: bool = False,
|
|
239
|
+
) -> str:
|
|
240
|
+
"""
|
|
241
|
+
Generates a response from the ChatSandbox API.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
prompt (str): The prompt to send to the API.
|
|
245
|
+
stream (bool): Whether to stream the response.
|
|
246
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
247
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
str: The API response.
|
|
251
|
+
|
|
252
|
+
Examples:
|
|
253
|
+
>>> ai = ChatSandbox()
|
|
254
|
+
>>> response = ai.chat("What's the weather today?")
|
|
255
|
+
>>> print(response)
|
|
256
|
+
'I don't have real-time weather data, but I can help you find weather information online.'
|
|
257
|
+
"""
|
|
258
|
+
def for_stream():
|
|
259
|
+
for response in self.ask(
|
|
260
|
+
prompt,
|
|
261
|
+
stream=True,
|
|
262
|
+
raw=False,
|
|
263
|
+
optimizer=optimizer,
|
|
264
|
+
conversationally=conversationally,
|
|
265
|
+
):
|
|
266
|
+
yield response.get("text", "")
|
|
267
|
+
|
|
268
|
+
if stream:
|
|
269
|
+
return for_stream()
|
|
270
|
+
else:
|
|
271
|
+
return self.get_message(
|
|
272
|
+
self.ask(
|
|
273
|
+
prompt,
|
|
274
|
+
stream=False,
|
|
275
|
+
raw=False,
|
|
276
|
+
optimizer=optimizer,
|
|
277
|
+
conversationally=conversationally,
|
|
278
|
+
)
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
282
|
+
"""
|
|
283
|
+
Extract the message from the API response.
|
|
284
|
+
|
|
285
|
+
Args:
|
|
286
|
+
response (Dict[str, Any]): The API response.
|
|
287
|
+
|
|
288
|
+
Returns:
|
|
289
|
+
str: The extracted message.
|
|
290
|
+
"""
|
|
291
|
+
if not isinstance(response, dict):
|
|
292
|
+
return str(response)
|
|
293
|
+
|
|
294
|
+
raw_text = response.get("text", "")
|
|
295
|
+
|
|
296
|
+
# Try to parse as JSON
|
|
297
|
+
try:
|
|
298
|
+
data = json.loads(raw_text)
|
|
299
|
+
if isinstance(data, dict):
|
|
300
|
+
# Check for different response formats
|
|
301
|
+
if "reasoning_content" in data:
|
|
302
|
+
return data["reasoning_content"]
|
|
303
|
+
elif "content" in data:
|
|
304
|
+
return data["content"]
|
|
305
|
+
elif "message" in data:
|
|
306
|
+
return data["message"]
|
|
307
|
+
elif "response" in data:
|
|
308
|
+
return data["response"]
|
|
309
|
+
elif "text" in data:
|
|
310
|
+
return data["text"]
|
|
311
|
+
# Return the whole JSON if no specific field is found
|
|
312
|
+
return json.dumps(data, ensure_ascii=False)
|
|
313
|
+
except json.JSONDecodeError:
|
|
314
|
+
# If it's not JSON, return the raw text
|
|
315
|
+
pass
|
|
316
|
+
|
|
317
|
+
return raw_text.strip()
|
|
318
|
+
|
|
319
|
+
# --- Example Usage ---
|
|
320
|
+
if __name__ == "__main__":
|
|
321
|
+
from rich import print
|
|
322
|
+
# Ensure curl_cffi is installed
|
|
323
|
+
print("-" * 80)
|
|
324
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
325
|
+
print("-" * 80)
|
|
326
|
+
|
|
327
|
+
for model in ChatSandbox.AVAILABLE_MODELS:
|
|
328
|
+
try:
|
|
329
|
+
test_ai = ChatSandbox(model=model, timeout=60)
|
|
330
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
331
|
+
response_text = response
|
|
332
|
+
|
|
333
|
+
if response_text and len(response_text.strip()) > 0:
|
|
334
|
+
status = "✓"
|
|
335
|
+
# Truncate response if too long
|
|
336
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
337
|
+
else:
|
|
338
|
+
status = "✗"
|
|
339
|
+
display_text = "Empty or invalid response"
|
|
340
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
341
|
+
except Exception as e:
|
|
342
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|