webscout 8.3.3__py3-none-any.whl → 8.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +53 -800
- webscout/Bard.py +2 -22
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +26 -11
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +81 -57
- webscout/Provider/ExaChat.py +9 -5
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/FreeGemini.py +2 -2
- webscout/Provider/Gemini.py +3 -10
- webscout/Provider/GeminiProxy.py +31 -5
- webscout/Provider/LambdaChat.py +39 -31
- webscout/Provider/Netwrck.py +5 -8
- webscout/Provider/OLLAMA.py +8 -9
- webscout/Provider/OPENAI/README.md +1 -1
- webscout/Provider/OPENAI/TogetherAI.py +57 -48
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +1 -3
- webscout/Provider/OPENAI/autoproxy.py +1 -1
- webscout/Provider/OPENAI/copilot.py +73 -26
- webscout/Provider/OPENAI/deepinfra.py +60 -24
- webscout/Provider/OPENAI/exachat.py +9 -5
- webscout/Provider/OPENAI/monochat.py +3 -3
- webscout/Provider/OPENAI/netwrck.py +4 -7
- webscout/Provider/OPENAI/qodo.py +630 -0
- webscout/Provider/OPENAI/scirachat.py +86 -49
- webscout/Provider/OPENAI/textpollinations.py +19 -14
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +478 -0
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/monochat.py +3 -3
- webscout/Provider/TTI/together.py +7 -6
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TextPollinationsAI.py +19 -14
- webscout/Provider/TogetherAI.py +57 -44
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +4 -10
- webscout/Provider/copilot.py +58 -61
- webscout/Provider/freeaichat.py +64 -55
- webscout/Provider/monochat.py +275 -0
- webscout/Provider/scira_chat.py +115 -21
- webscout/Provider/toolbaz.py +5 -10
- webscout/Provider/typefully.py +1 -11
- webscout/Provider/x0gpt.py +325 -315
- webscout/__init__.py +4 -11
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +119 -5
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/sanitize.py +1074 -0
- webscout/version.py +1 -1
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -150
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/RECORD +70 -72
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/freeaichat.py +0 -363
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
webscout/Provider/copilot.py
CHANGED
|
@@ -112,10 +112,18 @@ class Copilot(Provider):
|
|
|
112
112
|
raw: bool = False,
|
|
113
113
|
optimizer: str = None,
|
|
114
114
|
conversationally: bool = False,
|
|
115
|
-
images
|
|
115
|
+
images=None,
|
|
116
116
|
api_key: str = None,
|
|
117
117
|
**kwargs
|
|
118
118
|
) -> Union[Dict[str, Any], Generator]:
|
|
119
|
+
"""
|
|
120
|
+
Enhanced Copilot.ask with:
|
|
121
|
+
- return_conversation support
|
|
122
|
+
- multiple image upload
|
|
123
|
+
- event dispatch for websocket events
|
|
124
|
+
- suggested followups and metadata
|
|
125
|
+
- improved error handling
|
|
126
|
+
"""
|
|
119
127
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
120
128
|
if optimizer:
|
|
121
129
|
if optimizer in self.__available_optimizers:
|
|
@@ -125,7 +133,33 @@ class Copilot(Provider):
|
|
|
125
133
|
else:
|
|
126
134
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
127
135
|
|
|
128
|
-
|
|
136
|
+
def handle_event(msg, state):
|
|
137
|
+
event = msg.get("event")
|
|
138
|
+
if event == "appendText":
|
|
139
|
+
state["is_started"] = True
|
|
140
|
+
content = msg.get("text")
|
|
141
|
+
state["streaming_text"] += content
|
|
142
|
+
resp = {"text": content}
|
|
143
|
+
return resp if raw else resp
|
|
144
|
+
elif event == "generatingImage":
|
|
145
|
+
state["image_prompt"] = msg.get("prompt")
|
|
146
|
+
elif event == "imageGenerated":
|
|
147
|
+
return {"type": "image", "url": msg.get("url"), "prompt": state.get("image_prompt"), "preview": msg.get("thumbnailUrl")}
|
|
148
|
+
elif event == "done":
|
|
149
|
+
state["done"] = True
|
|
150
|
+
elif event == "suggestedFollowups":
|
|
151
|
+
return {"type": "suggested_followups", "suggestions": msg.get("suggestions")}
|
|
152
|
+
elif event == "replaceText":
|
|
153
|
+
content = msg.get("text")
|
|
154
|
+
state["streaming_text"] += content
|
|
155
|
+
resp = {"text": content}
|
|
156
|
+
return resp if raw else resp
|
|
157
|
+
elif event == "error":
|
|
158
|
+
raise exceptions.FailedToGenerateResponseError(f"Error: {msg}")
|
|
159
|
+
elif event not in ["received", "startMessage", "citation", "partCompleted"]:
|
|
160
|
+
pass
|
|
161
|
+
return None
|
|
162
|
+
|
|
129
163
|
def for_stream():
|
|
130
164
|
try:
|
|
131
165
|
if not has_curl_cffi:
|
|
@@ -133,15 +167,15 @@ class Copilot(Provider):
|
|
|
133
167
|
|
|
134
168
|
websocket_url = self.websocket_url
|
|
135
169
|
headers = None
|
|
136
|
-
|
|
137
|
-
|
|
170
|
+
|
|
171
|
+
# Auth logic (token/cookies)
|
|
172
|
+
if images is not None or api_key is not None:
|
|
138
173
|
if api_key is not None:
|
|
139
174
|
self._access_token = api_key
|
|
140
175
|
if self._access_token is None:
|
|
141
176
|
try:
|
|
142
177
|
self._access_token, self._cookies = readHAR(self.url)
|
|
143
178
|
except NoValidHarFileError as h:
|
|
144
|
-
# print(f"Copilot: {h}")
|
|
145
179
|
if has_nodriver:
|
|
146
180
|
yield {"type": "login", "provider": self.label, "url": os.environ.get("webscout_login", "")}
|
|
147
181
|
self._access_token, self._cookies = asyncio.run(get_access_token_and_cookies(self.url, self.proxies.get("https")))
|
|
@@ -159,7 +193,7 @@ class Copilot(Provider):
|
|
|
159
193
|
) as session:
|
|
160
194
|
if self._access_token is not None:
|
|
161
195
|
self._cookies = session.cookies.jar if hasattr(session.cookies, "jar") else session.cookies
|
|
162
|
-
|
|
196
|
+
|
|
163
197
|
response = session.get(f"{self.url}/c/api/user")
|
|
164
198
|
if response.status_code == 401:
|
|
165
199
|
raise exceptions.AuthenticationError("Status 401: Invalid access token")
|
|
@@ -168,9 +202,8 @@ class Copilot(Provider):
|
|
|
168
202
|
user = response.json().get('firstName')
|
|
169
203
|
if user is None:
|
|
170
204
|
self._access_token = None
|
|
171
|
-
# print(f"Copilot: User: {user or 'null'}")
|
|
172
205
|
|
|
173
|
-
#
|
|
206
|
+
# Conversation management
|
|
174
207
|
conversation = kwargs.get("conversation", None)
|
|
175
208
|
if conversation is None:
|
|
176
209
|
response = session.post(self.conversation_url)
|
|
@@ -180,30 +213,26 @@ class Copilot(Provider):
|
|
|
180
213
|
conversation = CopilotConversation(conversation_id)
|
|
181
214
|
if kwargs.get("return_conversation", False):
|
|
182
215
|
yield conversation
|
|
183
|
-
# print(f"Copilot: Created conversation: {conversation_id}")
|
|
184
216
|
else:
|
|
185
217
|
conversation_id = conversation.conversation_id
|
|
186
|
-
# print(f"Copilot: Use conversation: {conversation_id}")
|
|
187
218
|
|
|
188
|
-
#
|
|
219
|
+
# Multiple image upload
|
|
189
220
|
uploaded_images = []
|
|
190
221
|
if images is not None:
|
|
191
|
-
for
|
|
222
|
+
for image_tuple in images:
|
|
223
|
+
image = image_tuple[0] if isinstance(image_tuple, (tuple, list)) else image_tuple
|
|
192
224
|
# Convert image to bytes if needed
|
|
193
225
|
if isinstance(image, str):
|
|
194
226
|
if image.startswith("data:"):
|
|
195
|
-
# Data URL
|
|
196
227
|
header, encoded = image.split(",", 1)
|
|
197
228
|
data = base64.b64decode(encoded)
|
|
198
229
|
else:
|
|
199
|
-
# File path or URL
|
|
200
230
|
with open(image, "rb") as f:
|
|
201
231
|
data = f.read()
|
|
202
232
|
else:
|
|
203
233
|
data = image
|
|
204
|
-
|
|
205
234
|
# Get content type
|
|
206
|
-
content_type = "image/jpeg"
|
|
235
|
+
content_type = "image/jpeg"
|
|
207
236
|
if data[:2] == b'\xff\xd8':
|
|
208
237
|
content_type = "image/jpeg"
|
|
209
238
|
elif data[:8] == b'\x89PNG\r\n\x1a\n':
|
|
@@ -212,7 +241,6 @@ class Copilot(Provider):
|
|
|
212
241
|
content_type = "image/gif"
|
|
213
242
|
elif data[:2] in (b'BM', b'BA'):
|
|
214
243
|
content_type = "image/bmp"
|
|
215
|
-
|
|
216
244
|
response = session.post(
|
|
217
245
|
f"{self.url}/c/api/attachments",
|
|
218
246
|
headers={"content-type": content_type},
|
|
@@ -220,12 +248,11 @@ class Copilot(Provider):
|
|
|
220
248
|
)
|
|
221
249
|
if response.status_code != 200:
|
|
222
250
|
raise exceptions.APIConnectionError(f"Status {response.status_code}: {response.text}")
|
|
223
|
-
uploaded_images.append({"type":"image", "url": response.json().get("url")})
|
|
224
|
-
break
|
|
251
|
+
uploaded_images.append({"type": "image", "url": response.json().get("url")})
|
|
225
252
|
|
|
226
|
-
#
|
|
253
|
+
# WebSocket connection
|
|
227
254
|
wss = session.ws_connect(websocket_url)
|
|
228
|
-
wss.send(json.dumps({"event":"setOptions","supportedCards":["weather","local","image","sports","video","ads","finance"],"ads":{"supportedTypes":["multimedia","product","tourActivity","propertyPromotion","text"]}}))
|
|
255
|
+
wss.send(json.dumps({"event": "setOptions", "supportedCards": ["weather", "local", "image", "sports", "video", "ads", "finance"], "ads": {"supportedTypes": ["multimedia", "product", "tourActivity", "propertyPromotion", "text"]}}))
|
|
229
256
|
wss.send(json.dumps({
|
|
230
257
|
"event": "send",
|
|
231
258
|
"conversationId": conversation_id,
|
|
@@ -236,56 +263,26 @@ class Copilot(Provider):
|
|
|
236
263
|
"mode": "reasoning" if "Think" in self.model else "chat"
|
|
237
264
|
}).encode(), CurlWsFlag.TEXT)
|
|
238
265
|
|
|
239
|
-
#
|
|
240
|
-
|
|
241
|
-
msg = None
|
|
242
|
-
image_prompt: str = None
|
|
266
|
+
# Event-driven response loop
|
|
267
|
+
state = {"is_started": False, "image_prompt": None, "done": False, "streaming_text": ""}
|
|
243
268
|
last_msg = None
|
|
244
|
-
streaming_text = ""
|
|
245
|
-
|
|
246
269
|
try:
|
|
247
|
-
while
|
|
270
|
+
while not state["done"]:
|
|
248
271
|
try:
|
|
249
272
|
msg = wss.recv()[0]
|
|
250
273
|
msg = json.loads(msg)
|
|
251
|
-
except:
|
|
274
|
+
except Exception:
|
|
252
275
|
break
|
|
253
276
|
last_msg = msg
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
resp = {"text": content}
|
|
259
|
-
yield resp if raw else resp
|
|
260
|
-
elif msg.get("event") == "generatingImage":
|
|
261
|
-
image_prompt = msg.get("prompt")
|
|
262
|
-
elif msg.get("event") == "imageGenerated":
|
|
263
|
-
yield {"type": "image", "url": msg.get("url"), "prompt": image_prompt, "preview": msg.get("thumbnailUrl")}
|
|
264
|
-
elif msg.get("event") == "done":
|
|
265
|
-
break
|
|
266
|
-
elif msg.get("event") == "suggestedFollowups":
|
|
267
|
-
yield {"type": "suggested_followups", "suggestions": msg.get("suggestions")}
|
|
268
|
-
break
|
|
269
|
-
elif msg.get("event") == "replaceText":
|
|
270
|
-
content = msg.get("text")
|
|
271
|
-
streaming_text += content
|
|
272
|
-
resp = {"text": content}
|
|
273
|
-
yield resp if raw else resp
|
|
274
|
-
elif msg.get("event") == "error":
|
|
275
|
-
raise exceptions.FailedToGenerateResponseError(f"Error: {msg}")
|
|
276
|
-
elif msg.get("event") not in ["received", "startMessage", "citation", "partCompleted"]:
|
|
277
|
-
print(f"Copilot Message: {msg}")
|
|
278
|
-
|
|
279
|
-
if not is_started:
|
|
277
|
+
result = handle_event(msg, state)
|
|
278
|
+
if result is not None:
|
|
279
|
+
yield result
|
|
280
|
+
if not state["is_started"]:
|
|
280
281
|
raise exceptions.FailedToGenerateResponseError(f"Invalid response: {last_msg}")
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
284
|
-
self.last_response = {"text": streaming_text}
|
|
285
|
-
|
|
282
|
+
self.conversation.update_chat_history(prompt, state["streaming_text"])
|
|
283
|
+
self.last_response = {"text": state["streaming_text"]}
|
|
286
284
|
finally:
|
|
287
285
|
wss.close()
|
|
288
|
-
|
|
289
286
|
except Exception as e:
|
|
290
287
|
raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
|
|
291
288
|
|
webscout/Provider/freeaichat.py
CHANGED
|
@@ -2,6 +2,8 @@ import re
|
|
|
2
2
|
import requests
|
|
3
3
|
import json
|
|
4
4
|
import uuid
|
|
5
|
+
import random
|
|
6
|
+
import string
|
|
5
7
|
from typing import Any, Dict, Optional, Generator, Union
|
|
6
8
|
|
|
7
9
|
from webscout.AIutel import Optimizers
|
|
@@ -18,70 +20,74 @@ class FreeAIChat(Provider):
|
|
|
18
20
|
|
|
19
21
|
AVAILABLE_MODELS = [
|
|
20
22
|
# OpenAI Models
|
|
23
|
+
"Deepseek R1 Latest",
|
|
21
24
|
"GPT 4o",
|
|
22
|
-
"GPT 4.5 Preview",
|
|
23
|
-
"GPT 4o Latest",
|
|
24
|
-
"GPT 4o mini",
|
|
25
|
-
"GPT 4o Search Preview",
|
|
26
|
-
"O1",
|
|
27
|
-
"O1 Mini",
|
|
28
|
-
"O3 Mini",
|
|
29
|
-
"O3 Mini High",
|
|
30
|
-
"O3 Mini Low",
|
|
31
25
|
"O4 Mini",
|
|
32
26
|
"O4 Mini High",
|
|
27
|
+
"QwQ Plus",
|
|
28
|
+
"Llama 4 Maverick",
|
|
29
|
+
"Grok 3",
|
|
30
|
+
"GPT 4o mini",
|
|
31
|
+
"Deepseek v3 0324",
|
|
32
|
+
"Grok 3 Mini",
|
|
33
33
|
"GPT 4.1",
|
|
34
|
-
"o3",
|
|
35
34
|
"GPT 4.1 Mini",
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
# Anthropic Models
|
|
39
|
-
"Claude 3.5 haiku",
|
|
40
|
-
"claude 3.5 sonnet",
|
|
41
|
-
"Claude 3.7 Sonnet",
|
|
42
35
|
"Claude 3.7 Sonnet (Thinking)",
|
|
43
|
-
|
|
44
|
-
# Deepseek Models
|
|
45
|
-
"Deepseek R1",
|
|
46
|
-
"Deepseek R1 Fast",
|
|
47
|
-
"Deepseek V3",
|
|
48
|
-
"Deepseek v3 0324",
|
|
49
|
-
|
|
50
|
-
# Google Models
|
|
51
|
-
"Gemini 1.5 Flash",
|
|
52
|
-
"Gemini 1.5 Pro",
|
|
53
|
-
"Gemini 2.0 Flash",
|
|
54
|
-
"Gemini 2.0 Pro",
|
|
55
|
-
"Gemini 2.5 Pro",
|
|
56
|
-
|
|
57
|
-
# Llama Models
|
|
58
|
-
"Llama 3.1 405B",
|
|
59
|
-
"Llama 3.1 70B Fast",
|
|
60
|
-
"Llama 3.3 70B",
|
|
61
|
-
"Llama 3.2 90B Vision",
|
|
62
36
|
"Llama 4 Scout",
|
|
63
|
-
"
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
"
|
|
67
|
-
"
|
|
68
|
-
"
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
"
|
|
72
|
-
"
|
|
73
|
-
"
|
|
74
|
-
"QwQ 32B",
|
|
75
|
-
"QwQ Plus",
|
|
76
|
-
|
|
77
|
-
# XAI Models
|
|
78
|
-
"Grok 2",
|
|
79
|
-
"Grok 3",
|
|
37
|
+
"O3 High",
|
|
38
|
+
"Gemini 2.5 Pro",
|
|
39
|
+
"Magistral Medium 2506",
|
|
40
|
+
"O3",
|
|
41
|
+
"Gemini 2.5 Flash",
|
|
42
|
+
"Qwen 3 235B A22B",
|
|
43
|
+
"Claude 4 Sonnet",
|
|
44
|
+
"Claude 4 Sonnet (Thinking)",
|
|
45
|
+
"Claude 4 Opus",
|
|
46
|
+
"Claude 4 Opus (Thinking)",
|
|
47
|
+
"Google: Gemini 2.5 Pro (thinking)",
|
|
80
48
|
]
|
|
81
49
|
|
|
50
|
+
def _auto_fetch_api_key(self, proxies=None, timeout=30):
|
|
51
|
+
"""
|
|
52
|
+
Automatically register a new user and fetch an API key from FreeAIChat Playground.
|
|
53
|
+
"""
|
|
54
|
+
session = requests.Session()
|
|
55
|
+
if proxies:
|
|
56
|
+
session.proxies.update(proxies)
|
|
57
|
+
def random_email():
|
|
58
|
+
user = ''.join(random.choices(string.ascii_lowercase + string.digits, k=12))
|
|
59
|
+
return f"{user}@bltiwd.com"
|
|
60
|
+
email = random_email()
|
|
61
|
+
payload = {"email": email, "password": email}
|
|
62
|
+
headers = {
|
|
63
|
+
'User-Agent': LitAgent().random(),
|
|
64
|
+
'Accept': '*/*',
|
|
65
|
+
'Content-Type': 'application/json',
|
|
66
|
+
'Origin': 'https://freeaichatplayground.com',
|
|
67
|
+
'Referer': 'https://freeaichatplayground.com/register',
|
|
68
|
+
}
|
|
69
|
+
try:
|
|
70
|
+
resp = session.post(
|
|
71
|
+
"https://freeaichatplayground.com/api/v1/auth/register",
|
|
72
|
+
headers=headers,
|
|
73
|
+
json=payload,
|
|
74
|
+
timeout=timeout
|
|
75
|
+
)
|
|
76
|
+
if resp.status_code == 201:
|
|
77
|
+
data = resp.json()
|
|
78
|
+
apikey = data.get("user", {}).get("apikey")
|
|
79
|
+
if apikey:
|
|
80
|
+
return apikey
|
|
81
|
+
else:
|
|
82
|
+
raise exceptions.FailedToGenerateResponseError("API key not found in registration response.")
|
|
83
|
+
else:
|
|
84
|
+
raise exceptions.FailedToGenerateResponseError(f"Registration failed: {resp.status_code} {resp.text}")
|
|
85
|
+
except Exception as e:
|
|
86
|
+
raise exceptions.FailedToGenerateResponseError(f"API key auto-fetch failed: {e}")
|
|
87
|
+
|
|
82
88
|
def __init__(
|
|
83
89
|
self,
|
|
84
|
-
api_key: str,
|
|
90
|
+
api_key: str = None,
|
|
85
91
|
is_conversation: bool = True,
|
|
86
92
|
max_tokens: int = 150,
|
|
87
93
|
timeout: int = 30,
|
|
@@ -95,7 +101,7 @@ class FreeAIChat(Provider):
|
|
|
95
101
|
system_prompt: str = "You are a helpful AI assistant.",
|
|
96
102
|
temperature: float = 0.7,
|
|
97
103
|
):
|
|
98
|
-
"""Initializes the FreeAIChat API client."""
|
|
104
|
+
"""Initializes the FreeAIChat API client. If api_key is not provided, auto-register and fetch one."""
|
|
99
105
|
if model not in self.AVAILABLE_MODELS:
|
|
100
106
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
101
107
|
|
|
@@ -120,7 +126,10 @@ class FreeAIChat(Provider):
|
|
|
120
126
|
self.model = model
|
|
121
127
|
self.system_prompt = system_prompt
|
|
122
128
|
self.temperature = temperature
|
|
123
|
-
|
|
129
|
+
if not api_key:
|
|
130
|
+
self.api_key = self._auto_fetch_api_key(proxies=proxies, timeout=timeout)
|
|
131
|
+
else:
|
|
132
|
+
self.api_key = api_key
|
|
124
133
|
|
|
125
134
|
self.__available_optimizers = (
|
|
126
135
|
method
|
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
from typing import Generator, Optional, Union, Any, Dict
|
|
2
|
+
from uuid import uuid4
|
|
3
|
+
from curl_cffi import CurlError
|
|
4
|
+
from curl_cffi.requests import Session
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
|
+
|
|
14
|
+
class MonoChat(Provider):
|
|
15
|
+
"""
|
|
16
|
+
MonoChat provider for interacting with the gg.is-a-furry.dev API (OpenAI-compatible).
|
|
17
|
+
"""
|
|
18
|
+
AVAILABLE_MODELS = [
|
|
19
|
+
"deepseek-r1",
|
|
20
|
+
"deepseek-v3",
|
|
21
|
+
"uncensored-r1-32b",
|
|
22
|
+
"o3-pro",
|
|
23
|
+
"o4-mini",
|
|
24
|
+
"o3",
|
|
25
|
+
"gpt-4.5-preview",
|
|
26
|
+
"gpt-4.1",
|
|
27
|
+
"gpt-4.1-mini",
|
|
28
|
+
"gpt-4.1-nano",
|
|
29
|
+
"gpt-4o",
|
|
30
|
+
"gpt-4o-mini",
|
|
31
|
+
"gpt-4o-search-preview",
|
|
32
|
+
"gpt-4o-mini-search-preview",
|
|
33
|
+
"gpt-4-turbo"
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
is_conversation: bool = True,
|
|
39
|
+
max_tokens: int = 2049,
|
|
40
|
+
timeout: int = 30,
|
|
41
|
+
intro: str = None,
|
|
42
|
+
filepath: str = None,
|
|
43
|
+
update_file: bool = True,
|
|
44
|
+
proxies: dict = {},
|
|
45
|
+
history_offset: int = 10250,
|
|
46
|
+
act: str = None,
|
|
47
|
+
model: str = "gpt-4.1",
|
|
48
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
49
|
+
browser: str = "chrome"
|
|
50
|
+
):
|
|
51
|
+
if model not in self.AVAILABLE_MODELS:
|
|
52
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
53
|
+
self.session = Session()
|
|
54
|
+
self.is_conversation = is_conversation
|
|
55
|
+
self.max_tokens_to_sample = max_tokens
|
|
56
|
+
self.api_endpoint = "https://gg.is-a-furry.dev/api/chat"
|
|
57
|
+
self.timeout = timeout
|
|
58
|
+
self.last_response = {}
|
|
59
|
+
self.model = model
|
|
60
|
+
self.system_prompt = system_prompt
|
|
61
|
+
self.agent = LitAgent()
|
|
62
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
63
|
+
self.headers = {
|
|
64
|
+
"accept": "*/*",
|
|
65
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
66
|
+
"accept-language": self.fingerprint["accept_language"],
|
|
67
|
+
"content-type": "application/json",
|
|
68
|
+
"origin": "https://gg.is-a-furry.dev",
|
|
69
|
+
"referer": "https://gg.is-a-furry.dev/",
|
|
70
|
+
"user-agent": self.fingerprint["user_agent"]
|
|
71
|
+
}
|
|
72
|
+
self.session.headers.update(self.headers)
|
|
73
|
+
self.session.proxies = proxies
|
|
74
|
+
self.__available_optimizers = (
|
|
75
|
+
method
|
|
76
|
+
for method in dir(Optimizers)
|
|
77
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
78
|
+
)
|
|
79
|
+
Conversation.intro = (
|
|
80
|
+
AwesomePrompts().get_act(
|
|
81
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
82
|
+
)
|
|
83
|
+
if act
|
|
84
|
+
else intro or Conversation.intro
|
|
85
|
+
)
|
|
86
|
+
self.conversation = Conversation(
|
|
87
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
88
|
+
)
|
|
89
|
+
self.conversation.history_offset = history_offset
|
|
90
|
+
|
|
91
|
+
def refresh_identity(self, browser: str = None):
|
|
92
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
93
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
94
|
+
self.headers.update({
|
|
95
|
+
"accept-language": self.fingerprint["accept_language"],
|
|
96
|
+
"user-agent": self.fingerprint["user_agent"]
|
|
97
|
+
})
|
|
98
|
+
self.session.headers.update(self.headers)
|
|
99
|
+
return self.fingerprint
|
|
100
|
+
|
|
101
|
+
def ask(
|
|
102
|
+
self,
|
|
103
|
+
prompt: str,
|
|
104
|
+
stream: bool = False,
|
|
105
|
+
raw: bool = False,
|
|
106
|
+
optimizer: str = None,
|
|
107
|
+
conversationally: bool = False,
|
|
108
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
109
|
+
"""
|
|
110
|
+
Sends a prompt to the gg.is-a-furry.dev API and returns the response.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
prompt (str): The prompt to send to the API.
|
|
114
|
+
stream (bool): Whether to stream the response.
|
|
115
|
+
raw (bool): Whether to return the raw response.
|
|
116
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
117
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Dict[str, Any]: The API response.
|
|
121
|
+
"""
|
|
122
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
123
|
+
if optimizer:
|
|
124
|
+
if optimizer in self.__available_optimizers:
|
|
125
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
126
|
+
conversation_prompt if conversationally else prompt
|
|
127
|
+
)
|
|
128
|
+
else:
|
|
129
|
+
raise Exception(
|
|
130
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
payload = {
|
|
134
|
+
"messages": [
|
|
135
|
+
{"role": "system", "content": self.system_prompt},
|
|
136
|
+
{"role": "user", "content": conversation_prompt}
|
|
137
|
+
],
|
|
138
|
+
"model": self.model,
|
|
139
|
+
"max_tokens": self.max_tokens_to_sample
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
def for_stream():
|
|
143
|
+
try:
|
|
144
|
+
response = self.session.post(
|
|
145
|
+
self.api_endpoint,
|
|
146
|
+
headers=self.headers,
|
|
147
|
+
json=payload,
|
|
148
|
+
stream=True,
|
|
149
|
+
timeout=self.timeout
|
|
150
|
+
)
|
|
151
|
+
if not response.ok:
|
|
152
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
153
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
154
|
+
)
|
|
155
|
+
streaming_response = ""
|
|
156
|
+
# Use sanitize_stream with regex-based extraction and filtering (like x0gpt)
|
|
157
|
+
processed_stream = sanitize_stream(
|
|
158
|
+
data=response.iter_content(chunk_size=None),
|
|
159
|
+
intro_value=None,
|
|
160
|
+
to_json=False,
|
|
161
|
+
extract_regexes=[r'0:"(.*?)"'],
|
|
162
|
+
skip_regexes=[
|
|
163
|
+
r'^f:',
|
|
164
|
+
r'^e:',
|
|
165
|
+
r'^d:',
|
|
166
|
+
r'^\s*$',
|
|
167
|
+
r'data:\s*\[DONE\]',
|
|
168
|
+
r'event:\s*',
|
|
169
|
+
r'^\d+:\s*$',
|
|
170
|
+
r'^:\s*$',
|
|
171
|
+
r'^\s*[\x00-\x1f]+\s*$',
|
|
172
|
+
],
|
|
173
|
+
raw=raw
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
for content_chunk in processed_stream:
|
|
177
|
+
if isinstance(content_chunk, bytes):
|
|
178
|
+
content_chunk = content_chunk.decode('utf-8', errors='ignore')
|
|
179
|
+
if raw:
|
|
180
|
+
yield content_chunk
|
|
181
|
+
else:
|
|
182
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
183
|
+
try:
|
|
184
|
+
clean_content = content_chunk.encode().decode('unicode_escape')
|
|
185
|
+
clean_content = clean_content.replace('\\\\', '\\').replace('\\"', '"')
|
|
186
|
+
streaming_response += clean_content
|
|
187
|
+
yield dict(text=clean_content)
|
|
188
|
+
except (UnicodeDecodeError, UnicodeEncodeError):
|
|
189
|
+
streaming_response += content_chunk
|
|
190
|
+
yield dict(text=content_chunk)
|
|
191
|
+
|
|
192
|
+
self.last_response.update(dict(text=streaming_response))
|
|
193
|
+
self.conversation.update_chat_history(
|
|
194
|
+
prompt, self.get_message(self.last_response)
|
|
195
|
+
)
|
|
196
|
+
except CurlError as e:
|
|
197
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
198
|
+
except Exception as e:
|
|
199
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
200
|
+
|
|
201
|
+
def for_non_stream():
|
|
202
|
+
if stream:
|
|
203
|
+
return for_stream()
|
|
204
|
+
for _ in for_stream():
|
|
205
|
+
pass
|
|
206
|
+
return self.last_response
|
|
207
|
+
|
|
208
|
+
return for_stream() if stream else for_non_stream()
|
|
209
|
+
|
|
210
|
+
def chat(
|
|
211
|
+
self,
|
|
212
|
+
prompt: str,
|
|
213
|
+
stream: bool = False,
|
|
214
|
+
optimizer: str = None,
|
|
215
|
+
conversationally: bool = False,
|
|
216
|
+
raw: bool = False,
|
|
217
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
218
|
+
"""
|
|
219
|
+
Generates a response from the MonoChat API.
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
prompt (str): The prompt to send to the API.
|
|
223
|
+
stream (bool): Whether to stream the response.
|
|
224
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
225
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
226
|
+
raw (bool): Whether to return raw response chunks.
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
str: The API response.
|
|
230
|
+
"""
|
|
231
|
+
|
|
232
|
+
def for_stream():
|
|
233
|
+
for response in self.ask(
|
|
234
|
+
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
235
|
+
):
|
|
236
|
+
if raw:
|
|
237
|
+
yield response
|
|
238
|
+
else:
|
|
239
|
+
yield self.get_message(response)
|
|
240
|
+
|
|
241
|
+
def for_non_stream():
|
|
242
|
+
result = self.ask(
|
|
243
|
+
prompt,
|
|
244
|
+
False,
|
|
245
|
+
raw=raw,
|
|
246
|
+
optimizer=optimizer,
|
|
247
|
+
conversationally=conversationally,
|
|
248
|
+
)
|
|
249
|
+
if raw:
|
|
250
|
+
return result
|
|
251
|
+
else:
|
|
252
|
+
return self.get_message(result)
|
|
253
|
+
|
|
254
|
+
return for_stream() if stream else for_non_stream()
|
|
255
|
+
|
|
256
|
+
def get_message(self, response: dict) -> str:
|
|
257
|
+
"""
|
|
258
|
+
Extracts the message from the API response.
|
|
259
|
+
|
|
260
|
+
Args:
|
|
261
|
+
response (dict): The API response.
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
str: The message content.
|
|
265
|
+
"""
|
|
266
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
267
|
+
text = response.get("text", "")
|
|
268
|
+
return text
|
|
269
|
+
|
|
270
|
+
if __name__ == "__main__":
|
|
271
|
+
from rich import print
|
|
272
|
+
ai = MonoChat(timeout=60)
|
|
273
|
+
response = ai.chat("In points tell me about humans", stream=True, raw=False)
|
|
274
|
+
for chunk in response:
|
|
275
|
+
print(chunk, end="", flush=True)
|