webscout 8.3.3__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (46) hide show
  1. webscout/AIutel.py +221 -4
  2. webscout/Bard.py +2 -22
  3. webscout/Provider/AISEARCH/scira_search.py +24 -11
  4. webscout/Provider/Deepinfra.py +75 -57
  5. webscout/Provider/ExaChat.py +9 -5
  6. webscout/Provider/Flowith.py +1 -1
  7. webscout/Provider/FreeGemini.py +2 -2
  8. webscout/Provider/Gemini.py +3 -10
  9. webscout/Provider/GeminiProxy.py +31 -5
  10. webscout/Provider/LambdaChat.py +39 -31
  11. webscout/Provider/Netwrck.py +5 -8
  12. webscout/Provider/OLLAMA.py +8 -9
  13. webscout/Provider/OPENAI/README.md +1 -1
  14. webscout/Provider/OPENAI/__init__.py +1 -1
  15. webscout/Provider/OPENAI/autoproxy.py +1 -1
  16. webscout/Provider/OPENAI/copilot.py +73 -26
  17. webscout/Provider/OPENAI/deepinfra.py +54 -24
  18. webscout/Provider/OPENAI/exachat.py +9 -5
  19. webscout/Provider/OPENAI/monochat.py +3 -3
  20. webscout/Provider/OPENAI/netwrck.py +4 -7
  21. webscout/Provider/OPENAI/qodo.py +630 -0
  22. webscout/Provider/OPENAI/scirachat.py +82 -49
  23. webscout/Provider/OPENAI/textpollinations.py +13 -12
  24. webscout/Provider/OPENAI/typegpt.py +3 -3
  25. webscout/Provider/Qodo.py +454 -0
  26. webscout/Provider/TTI/monochat.py +3 -3
  27. webscout/Provider/TextPollinationsAI.py +13 -12
  28. webscout/Provider/__init__.py +4 -4
  29. webscout/Provider/copilot.py +58 -61
  30. webscout/Provider/freeaichat.py +64 -55
  31. webscout/Provider/monochat.py +275 -0
  32. webscout/Provider/scira_chat.py +111 -21
  33. webscout/Provider/typegpt.py +2 -2
  34. webscout/Provider/x0gpt.py +325 -315
  35. webscout/__init__.py +7 -2
  36. webscout/auth/routes.py +20 -3
  37. webscout/version.py +1 -1
  38. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/METADATA +1 -2
  39. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/RECORD +43 -43
  40. webscout/Provider/AI21.py +0 -177
  41. webscout/Provider/HuggingFaceChat.py +0 -469
  42. webscout/Provider/OPENAI/freeaichat.py +0 -363
  43. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  44. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  45. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  46. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -175,15 +175,15 @@ class MonoChatAI(TTICompatibleProvider):
175
175
  AVAILABLE_MODELS = ["nextlm-image-1", "gpt-image-1", "dall-e-3", "dall-e-2"]
176
176
 
177
177
  def __init__(self):
178
- self.api_endpoint = "https://www.chatwithmono.xyz/api"
178
+ self.api_endpoint = "https://gg.is-a-furry.dev/api"
179
179
  self.session = requests.Session()
180
180
  self._setup_session_with_retries()
181
181
  self.user_agent = LitAgent().random()
182
182
  self.headers = {
183
183
  "accept": "*/*",
184
184
  "content-type": "application/json",
185
- "origin": "https://www.chatwithmono.xyz",
186
- "referer": "https://www.chatwithmono.xyz/",
185
+ "origin": "https://gg.is-a-furry.dev",
186
+ "referer": "https://gg.is-a-furry.dev/",
187
187
  "user-agent": self.user_agent,
188
188
  }
189
189
  self.session.headers.update(self.headers)
@@ -18,25 +18,26 @@ class TextPollinationsAI(Provider):
18
18
  "openai",
19
19
  "openai-fast",
20
20
  "openai-large",
21
+ "openai-reasoning",
21
22
  "openai-roblox",
22
- "qwen-coder",
23
- "llama",
23
+ "openai-audio",
24
+ "deepseek",
25
+ "deepseek-reasoning",
26
+ "grok",
24
27
  "llamascout",
25
28
  "mistral",
26
- "unity",
27
- "mirexa",
28
- "midijourney",
29
- "rtist",
29
+ "phi",
30
+ "qwen-coder",
30
31
  "searchgpt",
32
+ "bidara",
33
+ "elixposearch",
31
34
  "evil",
32
- "deepseek-reasoning",
33
- "phi",
34
- "hormoz",
35
35
  "hypnosis-tracy",
36
- "deepseek",
36
+ "midijourney",
37
+ "mirexa",
38
+ "rtist",
37
39
  "sur",
38
- "bidara",
39
- "openai-audio",
40
+ "unity",
40
41
  ]
41
42
  _models_url = "https://text.pollinations.ai/models"
42
43
 
@@ -24,7 +24,6 @@ from .yep import *
24
24
  from .Cloudflare import *
25
25
  from .turboseek import *
26
26
  from .TeachAnything import *
27
- from .AI21 import *
28
27
  from .x0gpt import *
29
28
  from .cerebras import *
30
29
  from .geminiapi import *
@@ -52,7 +51,6 @@ from .AllenAI import *
52
51
  from .HeckAI import *
53
52
  from .TwoAI import *
54
53
  from .Venice import *
55
- from .HuggingFaceChat import *
56
54
  from .GithubChat import *
57
55
  from .copilot import *
58
56
  from .sonus import *
@@ -87,9 +85,13 @@ from .deepseek_assistant import DeepSeekAssistant
87
85
  from .GeminiProxy import GeminiProxy
88
86
  from .TogetherAI import TogetherAI
89
87
  from .MiniMax import MiniMax
88
+ from .Qodo import *
89
+ from .monochat import MonoChat
90
90
  __all__ = [
91
91
  'SCNet',
92
+ 'MonoChat',
92
93
  'MiniMax',
94
+ 'QodoAI',
93
95
  'GeminiProxy',
94
96
  'TogetherAI',
95
97
  'oivscode',
@@ -109,7 +111,6 @@ __all__ = [
109
111
  'Venice',
110
112
  'ExaAI',
111
113
  'Copilot',
112
- 'HuggingFaceChat',
113
114
  'TwoAI',
114
115
  'HeckAI',
115
116
  'AllenAI',
@@ -145,7 +146,6 @@ __all__ = [
145
146
  'Cloudflare',
146
147
  'TurboSeek',
147
148
  'TeachAnything',
148
- 'AI21',
149
149
  'X0GPT',
150
150
  'Cerebras',
151
151
  'GEMINIAPI',
@@ -112,10 +112,18 @@ class Copilot(Provider):
112
112
  raw: bool = False,
113
113
  optimizer: str = None,
114
114
  conversationally: bool = False,
115
- images = None,
115
+ images=None,
116
116
  api_key: str = None,
117
117
  **kwargs
118
118
  ) -> Union[Dict[str, Any], Generator]:
119
+ """
120
+ Enhanced Copilot.ask with:
121
+ - return_conversation support
122
+ - multiple image upload
123
+ - event dispatch for websocket events
124
+ - suggested followups and metadata
125
+ - improved error handling
126
+ """
119
127
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
120
128
  if optimizer:
121
129
  if optimizer in self.__available_optimizers:
@@ -125,7 +133,33 @@ class Copilot(Provider):
125
133
  else:
126
134
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
127
135
 
128
- # Main logic for calling Copilot API
136
+ def handle_event(msg, state):
137
+ event = msg.get("event")
138
+ if event == "appendText":
139
+ state["is_started"] = True
140
+ content = msg.get("text")
141
+ state["streaming_text"] += content
142
+ resp = {"text": content}
143
+ return resp if raw else resp
144
+ elif event == "generatingImage":
145
+ state["image_prompt"] = msg.get("prompt")
146
+ elif event == "imageGenerated":
147
+ return {"type": "image", "url": msg.get("url"), "prompt": state.get("image_prompt"), "preview": msg.get("thumbnailUrl")}
148
+ elif event == "done":
149
+ state["done"] = True
150
+ elif event == "suggestedFollowups":
151
+ return {"type": "suggested_followups", "suggestions": msg.get("suggestions")}
152
+ elif event == "replaceText":
153
+ content = msg.get("text")
154
+ state["streaming_text"] += content
155
+ resp = {"text": content}
156
+ return resp if raw else resp
157
+ elif event == "error":
158
+ raise exceptions.FailedToGenerateResponseError(f"Error: {msg}")
159
+ elif event not in ["received", "startMessage", "citation", "partCompleted"]:
160
+ pass
161
+ return None
162
+
129
163
  def for_stream():
130
164
  try:
131
165
  if not has_curl_cffi:
@@ -133,15 +167,15 @@ class Copilot(Provider):
133
167
 
134
168
  websocket_url = self.websocket_url
135
169
  headers = None
136
-
137
- if images is not None:
170
+
171
+ # Auth logic (token/cookies)
172
+ if images is not None or api_key is not None:
138
173
  if api_key is not None:
139
174
  self._access_token = api_key
140
175
  if self._access_token is None:
141
176
  try:
142
177
  self._access_token, self._cookies = readHAR(self.url)
143
178
  except NoValidHarFileError as h:
144
- # print(f"Copilot: {h}")
145
179
  if has_nodriver:
146
180
  yield {"type": "login", "provider": self.label, "url": os.environ.get("webscout_login", "")}
147
181
  self._access_token, self._cookies = asyncio.run(get_access_token_and_cookies(self.url, self.proxies.get("https")))
@@ -159,7 +193,7 @@ class Copilot(Provider):
159
193
  ) as session:
160
194
  if self._access_token is not None:
161
195
  self._cookies = session.cookies.jar if hasattr(session.cookies, "jar") else session.cookies
162
-
196
+
163
197
  response = session.get(f"{self.url}/c/api/user")
164
198
  if response.status_code == 401:
165
199
  raise exceptions.AuthenticationError("Status 401: Invalid access token")
@@ -168,9 +202,8 @@ class Copilot(Provider):
168
202
  user = response.json().get('firstName')
169
203
  if user is None:
170
204
  self._access_token = None
171
- # print(f"Copilot: User: {user or 'null'}")
172
205
 
173
- # Create or use existing conversation
206
+ # Conversation management
174
207
  conversation = kwargs.get("conversation", None)
175
208
  if conversation is None:
176
209
  response = session.post(self.conversation_url)
@@ -180,30 +213,26 @@ class Copilot(Provider):
180
213
  conversation = CopilotConversation(conversation_id)
181
214
  if kwargs.get("return_conversation", False):
182
215
  yield conversation
183
- # print(f"Copilot: Created conversation: {conversation_id}")
184
216
  else:
185
217
  conversation_id = conversation.conversation_id
186
- # print(f"Copilot: Use conversation: {conversation_id}")
187
218
 
188
- # Handle image uploads if any
219
+ # Multiple image upload
189
220
  uploaded_images = []
190
221
  if images is not None:
191
- for image, _ in images:
222
+ for image_tuple in images:
223
+ image = image_tuple[0] if isinstance(image_tuple, (tuple, list)) else image_tuple
192
224
  # Convert image to bytes if needed
193
225
  if isinstance(image, str):
194
226
  if image.startswith("data:"):
195
- # Data URL
196
227
  header, encoded = image.split(",", 1)
197
228
  data = base64.b64decode(encoded)
198
229
  else:
199
- # File path or URL
200
230
  with open(image, "rb") as f:
201
231
  data = f.read()
202
232
  else:
203
233
  data = image
204
-
205
234
  # Get content type
206
- content_type = "image/jpeg" # Default
235
+ content_type = "image/jpeg"
207
236
  if data[:2] == b'\xff\xd8':
208
237
  content_type = "image/jpeg"
209
238
  elif data[:8] == b'\x89PNG\r\n\x1a\n':
@@ -212,7 +241,6 @@ class Copilot(Provider):
212
241
  content_type = "image/gif"
213
242
  elif data[:2] in (b'BM', b'BA'):
214
243
  content_type = "image/bmp"
215
-
216
244
  response = session.post(
217
245
  f"{self.url}/c/api/attachments",
218
246
  headers={"content-type": content_type},
@@ -220,12 +248,11 @@ class Copilot(Provider):
220
248
  )
221
249
  if response.status_code != 200:
222
250
  raise exceptions.APIConnectionError(f"Status {response.status_code}: {response.text}")
223
- uploaded_images.append({"type":"image", "url": response.json().get("url")})
224
- break
251
+ uploaded_images.append({"type": "image", "url": response.json().get("url")})
225
252
 
226
- # Connect to WebSocket
253
+ # WebSocket connection
227
254
  wss = session.ws_connect(websocket_url)
228
- wss.send(json.dumps({"event":"setOptions","supportedCards":["weather","local","image","sports","video","ads","finance"],"ads":{"supportedTypes":["multimedia","product","tourActivity","propertyPromotion","text"]}}));
255
+ wss.send(json.dumps({"event": "setOptions", "supportedCards": ["weather", "local", "image", "sports", "video", "ads", "finance"], "ads": {"supportedTypes": ["multimedia", "product", "tourActivity", "propertyPromotion", "text"]}}))
229
256
  wss.send(json.dumps({
230
257
  "event": "send",
231
258
  "conversationId": conversation_id,
@@ -236,56 +263,26 @@ class Copilot(Provider):
236
263
  "mode": "reasoning" if "Think" in self.model else "chat"
237
264
  }).encode(), CurlWsFlag.TEXT)
238
265
 
239
- # Process response
240
- is_started = False
241
- msg = None
242
- image_prompt: str = None
266
+ # Event-driven response loop
267
+ state = {"is_started": False, "image_prompt": None, "done": False, "streaming_text": ""}
243
268
  last_msg = None
244
- streaming_text = ""
245
-
246
269
  try:
247
- while True:
270
+ while not state["done"]:
248
271
  try:
249
272
  msg = wss.recv()[0]
250
273
  msg = json.loads(msg)
251
- except:
274
+ except Exception:
252
275
  break
253
276
  last_msg = msg
254
- if msg.get("event") == "appendText":
255
- is_started = True
256
- content = msg.get("text")
257
- streaming_text += content
258
- resp = {"text": content}
259
- yield resp if raw else resp
260
- elif msg.get("event") == "generatingImage":
261
- image_prompt = msg.get("prompt")
262
- elif msg.get("event") == "imageGenerated":
263
- yield {"type": "image", "url": msg.get("url"), "prompt": image_prompt, "preview": msg.get("thumbnailUrl")}
264
- elif msg.get("event") == "done":
265
- break
266
- elif msg.get("event") == "suggestedFollowups":
267
- yield {"type": "suggested_followups", "suggestions": msg.get("suggestions")}
268
- break
269
- elif msg.get("event") == "replaceText":
270
- content = msg.get("text")
271
- streaming_text += content
272
- resp = {"text": content}
273
- yield resp if raw else resp
274
- elif msg.get("event") == "error":
275
- raise exceptions.FailedToGenerateResponseError(f"Error: {msg}")
276
- elif msg.get("event") not in ["received", "startMessage", "citation", "partCompleted"]:
277
- print(f"Copilot Message: {msg}")
278
-
279
- if not is_started:
277
+ result = handle_event(msg, state)
278
+ if result is not None:
279
+ yield result
280
+ if not state["is_started"]:
280
281
  raise exceptions.FailedToGenerateResponseError(f"Invalid response: {last_msg}")
281
-
282
- # Update conversation history
283
- self.conversation.update_chat_history(prompt, streaming_text)
284
- self.last_response = {"text": streaming_text}
285
-
282
+ self.conversation.update_chat_history(prompt, state["streaming_text"])
283
+ self.last_response = {"text": state["streaming_text"]}
286
284
  finally:
287
285
  wss.close()
288
-
289
286
  except Exception as e:
290
287
  raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
291
288
 
@@ -2,6 +2,8 @@ import re
2
2
  import requests
3
3
  import json
4
4
  import uuid
5
+ import random
6
+ import string
5
7
  from typing import Any, Dict, Optional, Generator, Union
6
8
 
7
9
  from webscout.AIutel import Optimizers
@@ -18,70 +20,74 @@ class FreeAIChat(Provider):
18
20
 
19
21
  AVAILABLE_MODELS = [
20
22
  # OpenAI Models
23
+ "Deepseek R1 Latest",
21
24
  "GPT 4o",
22
- "GPT 4.5 Preview",
23
- "GPT 4o Latest",
24
- "GPT 4o mini",
25
- "GPT 4o Search Preview",
26
- "O1",
27
- "O1 Mini",
28
- "O3 Mini",
29
- "O3 Mini High",
30
- "O3 Mini Low",
31
25
  "O4 Mini",
32
26
  "O4 Mini High",
27
+ "QwQ Plus",
28
+ "Llama 4 Maverick",
29
+ "Grok 3",
30
+ "GPT 4o mini",
31
+ "Deepseek v3 0324",
32
+ "Grok 3 Mini",
33
33
  "GPT 4.1",
34
- "o3",
35
34
  "GPT 4.1 Mini",
36
-
37
-
38
- # Anthropic Models
39
- "Claude 3.5 haiku",
40
- "claude 3.5 sonnet",
41
- "Claude 3.7 Sonnet",
42
35
  "Claude 3.7 Sonnet (Thinking)",
43
-
44
- # Deepseek Models
45
- "Deepseek R1",
46
- "Deepseek R1 Fast",
47
- "Deepseek V3",
48
- "Deepseek v3 0324",
49
-
50
- # Google Models
51
- "Gemini 1.5 Flash",
52
- "Gemini 1.5 Pro",
53
- "Gemini 2.0 Flash",
54
- "Gemini 2.0 Pro",
55
- "Gemini 2.5 Pro",
56
-
57
- # Llama Models
58
- "Llama 3.1 405B",
59
- "Llama 3.1 70B Fast",
60
- "Llama 3.3 70B",
61
- "Llama 3.2 90B Vision",
62
36
  "Llama 4 Scout",
63
- "Llama 4 Maverick",
64
-
65
- # Mistral Models
66
- "Mistral Large",
67
- "Mistral Nemo",
68
- "Mixtral 8x22B",
69
-
70
- # Qwen Models
71
- "Qwen Max",
72
- "Qwen Plus",
73
- "Qwen Turbo",
74
- "QwQ 32B",
75
- "QwQ Plus",
76
-
77
- # XAI Models
78
- "Grok 2",
79
- "Grok 3",
37
+ "O3 High",
38
+ "Gemini 2.5 Pro",
39
+ "Magistral Medium 2506",
40
+ "O3",
41
+ "Gemini 2.5 Flash",
42
+ "Qwen 3 235B A22B",
43
+ "Claude 4 Sonnet",
44
+ "Claude 4 Sonnet (Thinking)",
45
+ "Claude 4 Opus",
46
+ "Claude 4 Opus (Thinking)",
47
+ "Google: Gemini 2.5 Pro (thinking)",
80
48
  ]
81
49
 
50
+ def _auto_fetch_api_key(self, proxies=None, timeout=30):
51
+ """
52
+ Automatically register a new user and fetch an API key from FreeAIChat Playground.
53
+ """
54
+ session = requests.Session()
55
+ if proxies:
56
+ session.proxies.update(proxies)
57
+ def random_email():
58
+ user = ''.join(random.choices(string.ascii_lowercase + string.digits, k=12))
59
+ return f"{user}@bltiwd.com"
60
+ email = random_email()
61
+ payload = {"email": email, "password": email}
62
+ headers = {
63
+ 'User-Agent': LitAgent().random(),
64
+ 'Accept': '*/*',
65
+ 'Content-Type': 'application/json',
66
+ 'Origin': 'https://freeaichatplayground.com',
67
+ 'Referer': 'https://freeaichatplayground.com/register',
68
+ }
69
+ try:
70
+ resp = session.post(
71
+ "https://freeaichatplayground.com/api/v1/auth/register",
72
+ headers=headers,
73
+ json=payload,
74
+ timeout=timeout
75
+ )
76
+ if resp.status_code == 201:
77
+ data = resp.json()
78
+ apikey = data.get("user", {}).get("apikey")
79
+ if apikey:
80
+ return apikey
81
+ else:
82
+ raise exceptions.FailedToGenerateResponseError("API key not found in registration response.")
83
+ else:
84
+ raise exceptions.FailedToGenerateResponseError(f"Registration failed: {resp.status_code} {resp.text}")
85
+ except Exception as e:
86
+ raise exceptions.FailedToGenerateResponseError(f"API key auto-fetch failed: {e}")
87
+
82
88
  def __init__(
83
89
  self,
84
- api_key: str,
90
+ api_key: str = None,
85
91
  is_conversation: bool = True,
86
92
  max_tokens: int = 150,
87
93
  timeout: int = 30,
@@ -95,7 +101,7 @@ class FreeAIChat(Provider):
95
101
  system_prompt: str = "You are a helpful AI assistant.",
96
102
  temperature: float = 0.7,
97
103
  ):
98
- """Initializes the FreeAIChat API client."""
104
+ """Initializes the FreeAIChat API client. If api_key is not provided, auto-register and fetch one."""
99
105
  if model not in self.AVAILABLE_MODELS:
100
106
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
101
107
 
@@ -120,7 +126,10 @@ class FreeAIChat(Provider):
120
126
  self.model = model
121
127
  self.system_prompt = system_prompt
122
128
  self.temperature = temperature
123
- self.api_key = api_key
129
+ if not api_key:
130
+ self.api_key = self._auto_fetch_api_key(proxies=proxies, timeout=timeout)
131
+ else:
132
+ self.api_key = api_key
124
133
 
125
134
  self.__available_optimizers = (
126
135
  method