webscout 8.3.3__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (46) hide show
  1. webscout/AIutel.py +221 -4
  2. webscout/Bard.py +2 -22
  3. webscout/Provider/AISEARCH/scira_search.py +24 -11
  4. webscout/Provider/Deepinfra.py +75 -57
  5. webscout/Provider/ExaChat.py +9 -5
  6. webscout/Provider/Flowith.py +1 -1
  7. webscout/Provider/FreeGemini.py +2 -2
  8. webscout/Provider/Gemini.py +3 -10
  9. webscout/Provider/GeminiProxy.py +31 -5
  10. webscout/Provider/LambdaChat.py +39 -31
  11. webscout/Provider/Netwrck.py +5 -8
  12. webscout/Provider/OLLAMA.py +8 -9
  13. webscout/Provider/OPENAI/README.md +1 -1
  14. webscout/Provider/OPENAI/__init__.py +1 -1
  15. webscout/Provider/OPENAI/autoproxy.py +1 -1
  16. webscout/Provider/OPENAI/copilot.py +73 -26
  17. webscout/Provider/OPENAI/deepinfra.py +54 -24
  18. webscout/Provider/OPENAI/exachat.py +9 -5
  19. webscout/Provider/OPENAI/monochat.py +3 -3
  20. webscout/Provider/OPENAI/netwrck.py +4 -7
  21. webscout/Provider/OPENAI/qodo.py +630 -0
  22. webscout/Provider/OPENAI/scirachat.py +82 -49
  23. webscout/Provider/OPENAI/textpollinations.py +13 -12
  24. webscout/Provider/OPENAI/typegpt.py +3 -3
  25. webscout/Provider/Qodo.py +454 -0
  26. webscout/Provider/TTI/monochat.py +3 -3
  27. webscout/Provider/TextPollinationsAI.py +13 -12
  28. webscout/Provider/__init__.py +4 -4
  29. webscout/Provider/copilot.py +58 -61
  30. webscout/Provider/freeaichat.py +64 -55
  31. webscout/Provider/monochat.py +275 -0
  32. webscout/Provider/scira_chat.py +111 -21
  33. webscout/Provider/typegpt.py +2 -2
  34. webscout/Provider/x0gpt.py +325 -315
  35. webscout/__init__.py +7 -2
  36. webscout/auth/routes.py +20 -3
  37. webscout/version.py +1 -1
  38. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/METADATA +1 -2
  39. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/RECORD +43 -43
  40. webscout/Provider/AI21.py +0 -177
  41. webscout/Provider/HuggingFaceChat.py +0 -469
  42. webscout/Provider/OPENAI/freeaichat.py +0 -363
  43. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  44. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  45. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  46. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -86,14 +86,8 @@ class Completions(BaseCompletions):
86
86
  raise RuntimeError(f"Image upload failed: {r.text}")
87
87
  images.append({"type": "image", "url": r.json().get("url")})
88
88
 
89
- # Connect to websocket
90
- # Note: ws_connect might not use timeout in the same way as POST/GET
91
89
  ws = s.ws_connect(self._client.websocket_url)
92
-
93
- # Use model to set mode ("reasoning" for Think Deeper)
94
90
  mode = "reasoning" if "Think" in model else "chat"
95
-
96
- # Send the message to Copilot
97
91
  ws.send(json.dumps({
98
92
  "event": "send",
99
93
  "conversationId": conv_id,
@@ -101,79 +95,132 @@ class Completions(BaseCompletions):
101
95
  "mode": mode
102
96
  }).encode(), CurlWsFlag.TEXT)
103
97
 
104
- # Track token usage using count_tokens
105
98
  prompt_tokens = count_tokens(prompt_text)
106
99
  completion_tokens = 0
107
100
  total_tokens = prompt_tokens
108
-
109
101
  started = False
102
+ image_prompt = None
110
103
  while True:
111
104
  try:
112
105
  msg = json.loads(ws.recv()[0])
113
106
  except Exception:
114
107
  break
115
108
 
116
- if msg.get("event") == "appendText":
109
+ event = msg.get("event")
110
+ if event not in ["appendText", "done", "error", "generatingImage", "imageGenerated", "suggestedFollowups", "replaceText"]:
111
+ print(f"[Copilot] Unhandled event: {event} | msg: {msg}")
112
+
113
+ if event == "appendText":
117
114
  started = True
118
115
  content = msg.get("text", "")
119
-
120
- # Update token counts using count_tokens
121
116
  content_tokens = count_tokens(content)
122
117
  completion_tokens += content_tokens
123
118
  total_tokens = prompt_tokens + completion_tokens
124
-
125
- # Create the delta object
126
119
  delta = ChoiceDelta(
127
120
  content=content,
128
121
  role="assistant"
129
122
  )
130
-
131
- # Create the choice object
132
123
  choice = Choice(
133
124
  index=0,
134
125
  delta=delta,
135
126
  finish_reason=None
136
127
  )
137
-
138
- # Create the chunk object
139
128
  chunk = ChatCompletionChunk(
140
129
  id=request_id,
141
130
  choices=[choice],
142
131
  created=created_time,
143
132
  model=model
144
133
  )
145
-
146
134
  yield chunk
147
- elif msg.get("event") == "done":
148
- # Final chunk with finish_reason
135
+ elif event == "replaceText":
136
+ # treat as appendText for OpenAI compatibility
137
+ content = msg.get("text", "")
138
+ content_tokens = count_tokens(content)
139
+ completion_tokens += content_tokens
140
+ total_tokens = prompt_tokens + completion_tokens
141
+ delta = ChoiceDelta(
142
+ content=content,
143
+ role="assistant"
144
+ )
145
+ choice = Choice(
146
+ index=0,
147
+ delta=delta,
148
+ finish_reason=None
149
+ )
150
+ chunk = ChatCompletionChunk(
151
+ id=request_id,
152
+ choices=[choice],
153
+ created=created_time,
154
+ model=model
155
+ )
156
+ yield chunk
157
+ elif event == "generatingImage":
158
+ image_prompt = msg.get("prompt")
159
+ elif event == "imageGenerated":
160
+ # Yield a chunk with image metadata in the delta (custom extension)
161
+ delta = ChoiceDelta(
162
+ content=None,
163
+ role=None
164
+ )
165
+ choice = Choice(
166
+ index=0,
167
+ delta=delta,
168
+ finish_reason=None
169
+ )
170
+ chunk = ChatCompletionChunk(
171
+ id=request_id,
172
+ choices=[choice],
173
+ created=created_time,
174
+ model=model
175
+ )
176
+ chunk.image_url = msg.get("url")
177
+ chunk.image_prompt = image_prompt
178
+ chunk.image_preview = msg.get("thumbnailUrl")
179
+ yield chunk
180
+ elif event == "suggestedFollowups":
181
+ # Yield a chunk with followups in the delta (custom extension)
182
+ delta = ChoiceDelta(
183
+ content=None,
184
+ role=None
185
+ )
186
+ choice = Choice(
187
+ index=0,
188
+ delta=delta,
189
+ finish_reason=None
190
+ )
191
+ chunk = ChatCompletionChunk(
192
+ id=request_id,
193
+ choices=[choice],
194
+ created=created_time,
195
+ model=model
196
+ )
197
+ chunk.suggested_followups = msg.get("suggestions")
198
+ yield chunk
199
+ elif event == "done":
149
200
  delta = ChoiceDelta(
150
201
  content=None,
151
202
  role=None
152
203
  )
153
-
154
204
  choice = Choice(
155
205
  index=0,
156
206
  delta=delta,
157
207
  finish_reason="stop"
158
208
  )
159
-
160
209
  chunk = ChatCompletionChunk(
161
210
  id=request_id,
162
211
  choices=[choice],
163
212
  created=created_time,
164
213
  model=model
165
214
  )
166
-
167
215
  yield chunk
168
216
  break
169
- elif msg.get("event") == "error":
217
+ elif event == "error":
218
+ print(f"[Copilot] Error event: {msg}")
170
219
  raise RuntimeError(f"Copilot error: {msg}")
171
220
 
172
221
  ws.close()
173
-
174
222
  if not started:
175
223
  raise RuntimeError("No response received from Copilot")
176
-
177
224
  except Exception as e:
178
225
  raise RuntimeError(f"Stream error: {e}") from e
179
226
  finally:
@@ -195,40 +195,68 @@ class Chat(BaseChat):
195
195
 
196
196
  class DeepInfra(OpenAICompatibleProvider):
197
197
  AVAILABLE_MODELS = [
198
- "deepseek-ai/DeepSeek-R1-0528",
199
- "deepseek-ai/DeepSeek-R1",
200
- "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
201
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
202
- "deepseek-ai/DeepSeek-R1-Turbo",
203
- "deepseek-ai/DeepSeek-V3",
198
+ "anthropic/claude-4-opus",
199
+ "anthropic/claude-4-sonnet",
200
+ "deepseek-ai/DeepSeek-R1-0528-Turbo",
201
+ "Qwen/Qwen3-235B-A22B",
202
+ "Qwen/Qwen3-30B-A3B",
203
+ "Qwen/Qwen3-32B",
204
+ "Qwen/Qwen3-14B",
205
+ "deepseek-ai/DeepSeek-V3-0324-Turbo",
204
206
  "deepseek-ai/DeepSeek-Prover-V2-671B",
205
- "google/gemma-2-27b-it",
206
- "google/gemma-2-9b-it",
207
- "google/gemma-3-12b-it",
207
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo",
208
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
209
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
210
+ "deepseek-ai/DeepSeek-R1-0528",
211
+ "deepseek-ai/DeepSeek-V3-0324",
212
+ "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
213
+ "microsoft/phi-4-reasoning-plus",
214
+ "Qwen/QwQ-32B",
215
+ "google/gemini-2.5-flash",
216
+ "google/gemini-2.5-pro",
208
217
  "google/gemma-3-27b-it",
218
+ "google/gemma-3-12b-it",
209
219
  "google/gemma-3-4b-it",
210
- "meta-llama/Llama-3.3-70B-Instruct",
220
+ "microsoft/Phi-4-multimodal-instruct",
221
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
222
+ "deepseek-ai/DeepSeek-V3",
211
223
  "meta-llama/Llama-3.3-70B-Instruct-Turbo",
212
- "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
213
- "meta-llama/Llama-4-Scout-17B-16E-Instruct",
214
- "meta-llama/Llama-Guard-4-12B",
224
+ "meta-llama/Llama-3.3-70B-Instruct",
225
+ "microsoft/phi-4",
226
+ "Gryphe/MythoMax-L2-13b",
227
+ "NousResearch/Hermes-3-Llama-3.1-405B",
228
+ "NousResearch/Hermes-3-Llama-3.1-70B",
229
+ "NovaSky-AI/Sky-T1-32B-Preview",
230
+ "Qwen/Qwen2.5-72B-Instruct",
231
+ "Qwen/Qwen2.5-7B-Instruct",
232
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
233
+ "Sao10K/L3-8B-Lunaris-v1-Turbo",
234
+ "Sao10K/L3.1-70B-Euryale-v2.2",
235
+ "Sao10K/L3.3-70B-Euryale-v2.3",
236
+ "anthropic/claude-3-7-sonnet-latest",
237
+ "deepseek-ai/DeepSeek-R1",
238
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
239
+ "deepseek-ai/DeepSeek-R1-Turbo",
240
+ "google/gemini-2.0-flash-001",
241
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
242
+ "meta-llama/Llama-3.2-1B-Instruct",
243
+ "meta-llama/Llama-3.2-3B-Instruct",
244
+ "meta-llama/Llama-3.2-90B-Vision-Instruct",
245
+ "meta-llama/Meta-Llama-3-70B-Instruct",
246
+ "meta-llama/Meta-Llama-3-8B-Instruct",
247
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
215
248
  "meta-llama/Meta-Llama-3.1-8B-Instruct",
216
249
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
217
- "microsoft/Phi-4-multimodal-instruct",
218
250
  "microsoft/WizardLM-2-8x22B",
219
- "microsoft/phi-4",
220
- "microsoft/phi-4-reasoning-plus",
251
+ "mistralai/Devstral-Small-2505",
252
+ "mistralai/Mistral-7B-Instruct-v0.3",
253
+ "mistralai/Mistral-Nemo-Instruct-2407",
221
254
  "mistralai/Mistral-Small-24B-Instruct-2501",
255
+ "mistralai/Mistral-Small-3.2-24B-Instruct-2506",
256
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
222
257
  "nvidia/Llama-3.1-Nemotron-70B-Instruct",
223
- "Qwen/QwQ-32B",
224
- "Qwen/Qwen2.5-72B-Instruct",
225
- "Qwen/Qwen2.5-Coder-32B-Instruct",
226
- "Qwen/Qwen3-14B",
227
- "Qwen/Qwen3-30B-A3B",
228
- "Qwen/Qwen3-32B",
229
- "Qwen/Qwen3-235B-A22B",
230
258
  ]
231
- def __init__(self, browser: str = "chrome"):
259
+ def __init__(self, browser: str = "chrome", api_key: str = None):
232
260
  self.timeout = None
233
261
  self.base_url = "https://api.deepinfra.com/v1/openai/chat/completions"
234
262
  self.session = requests.Session()
@@ -253,6 +281,8 @@ class DeepInfra(OpenAICompatibleProvider):
253
281
  "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
254
282
  "User-Agent": fingerprint["user_agent"],
255
283
  }
284
+ if api_key is not None:
285
+ self.headers["Authorization"] = f"Bearer {api_key}"
256
286
  self.session.headers.update(self.headers)
257
287
  self.chat = Chat(self)
258
288
  @property
@@ -34,11 +34,9 @@ MODEL_CONFIGS = {
34
34
  "gemini-2.0-flash",
35
35
  "gemini-2.0-flash-exp-image-generation",
36
36
  "gemini-2.0-flash-thinking-exp-01-21",
37
- "gemini-2.5-pro-exp-03-25",
37
+ "gemini-2.5-flash-lite-preview-06-17",
38
38
  "gemini-2.0-pro-exp-02-05",
39
- "gemini-2.5-flash-preview-04-17",
40
-
41
-
39
+ "gemini-2.5-flash",
42
40
  ],
43
41
  },
44
42
  "openrouter": {
@@ -75,7 +73,9 @@ MODEL_CONFIGS = {
75
73
  "endpoint": "https://ayle.chat/api/cerebras",
76
74
  "models": [
77
75
  "llama3.1-8b",
78
- "llama-3.3-70b"
76
+ "llama-3.3-70b",
77
+ "llama-4-scout-17b-16e-instruct",
78
+ "qwen-3-32b"
79
79
  ],
80
80
  },
81
81
  "xai": {
@@ -299,6 +299,7 @@ class ExaChat(OpenAICompatibleProvider):
299
299
  "gemini-2.0-flash-exp-image-generation",
300
300
  "gemini-2.0-flash-thinking-exp-01-21",
301
301
  "gemini-2.5-pro-exp-03-25",
302
+ "gemini-2.5-flash-lite-preview-06-17",
302
303
  "gemini-2.0-pro-exp-02-05",
303
304
  "gemini-2.5-flash-preview-04-17",
304
305
 
@@ -330,6 +331,8 @@ class ExaChat(OpenAICompatibleProvider):
330
331
  # Cerebras Models
331
332
  "llama3.1-8b",
332
333
  "llama-3.3-70b",
334
+ "llama-4-scout-17b-16e-instruct",
335
+ "qwen-3-32b",
333
336
 
334
337
  ]
335
338
 
@@ -446,3 +449,4 @@ if __name__ == "__main__":
446
449
  print(f"{model:<50} {status:<10} {display_text}")
447
450
  except Exception as e:
448
451
  print(f"{model:<50} {'✗':<10} {str(e)}")
452
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -251,7 +251,7 @@ class MonoChat(OpenAICompatibleProvider):
251
251
  browser: Browser to emulate in user agent
252
252
  """
253
253
  self.timeout = None
254
- self.api_endpoint = "https://www.chatwithmono.xyz/api/chat"
254
+ self.api_endpoint = "https://gg.is-a-furry.dev/api/chat"
255
255
  self.session = requests.Session()
256
256
 
257
257
  agent = LitAgent()
@@ -262,8 +262,8 @@ class MonoChat(OpenAICompatibleProvider):
262
262
  "accept-encoding": "gzip, deflate, br, zstd",
263
263
  "accept-language": self.fingerprint["accept_language"],
264
264
  "content-type": "application/json",
265
- "origin": "https://www.chatwithmono.xyz",
266
- "referer": "https://www.chatwithmono.xyz/",
265
+ "origin": "https://gg.is-a-furry.dev",
266
+ "referer": "https://gg.is-a-furry.dev/",
267
267
  "user-agent": self.fingerprint["user_agent"]
268
268
  }
269
269
 
@@ -204,18 +204,15 @@ class Netwrck(OpenAICompatibleProvider):
204
204
  """
205
205
 
206
206
  AVAILABLE_MODELS = [
207
- "neversleep/llama-3-lumimaid-8b:extended",
208
- "x-ai/grok-2",
209
- "anthropic/claude-3-7-sonnet-20250219",
207
+ "thedrummer/valkyrie-49b-v1",
210
208
  "sao10k/l3-euryale-70b",
209
+ "deepseek/deepseek-chat",
210
+ "deepseek/deepseek-r1",
211
+ "anthropic/claude-sonnet-4-20250514",
211
212
  "openai/gpt-4.1-mini",
212
213
  "gryphe/mythomax-l2-13b",
213
- "google/gemini-pro-1.5",
214
214
  "google/gemini-2.5-flash-preview-04-17",
215
215
  "nvidia/llama-3.1-nemotron-70b-instruct",
216
- "deepseek/deepseek-r1",
217
- "deepseek/deepseek-chat"
218
-
219
216
  ]
220
217
 
221
218
  # Default greeting used by Netwrck