webscout 8.3.3__py3-none-any.whl → 8.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (79) hide show
  1. webscout/AIutel.py +53 -800
  2. webscout/Bard.py +2 -22
  3. webscout/Provider/AISEARCH/__init__.py +11 -10
  4. webscout/Provider/AISEARCH/felo_search.py +7 -3
  5. webscout/Provider/AISEARCH/scira_search.py +26 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +53 -8
  7. webscout/Provider/Deepinfra.py +81 -57
  8. webscout/Provider/ExaChat.py +9 -5
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/LambdaChat.py +39 -31
  14. webscout/Provider/Netwrck.py +5 -8
  15. webscout/Provider/OLLAMA.py +8 -9
  16. webscout/Provider/OPENAI/README.md +1 -1
  17. webscout/Provider/OPENAI/TogetherAI.py +57 -48
  18. webscout/Provider/OPENAI/TwoAI.py +94 -1
  19. webscout/Provider/OPENAI/__init__.py +1 -3
  20. webscout/Provider/OPENAI/autoproxy.py +1 -1
  21. webscout/Provider/OPENAI/copilot.py +73 -26
  22. webscout/Provider/OPENAI/deepinfra.py +60 -24
  23. webscout/Provider/OPENAI/exachat.py +9 -5
  24. webscout/Provider/OPENAI/monochat.py +3 -3
  25. webscout/Provider/OPENAI/netwrck.py +4 -7
  26. webscout/Provider/OPENAI/qodo.py +630 -0
  27. webscout/Provider/OPENAI/scirachat.py +86 -49
  28. webscout/Provider/OPENAI/textpollinations.py +19 -14
  29. webscout/Provider/OPENAI/venice.py +1 -0
  30. webscout/Provider/Perplexitylabs.py +163 -147
  31. webscout/Provider/Qodo.py +478 -0
  32. webscout/Provider/TTI/__init__.py +1 -0
  33. webscout/Provider/TTI/monochat.py +3 -3
  34. webscout/Provider/TTI/together.py +7 -6
  35. webscout/Provider/TTI/venice.py +368 -0
  36. webscout/Provider/TextPollinationsAI.py +19 -14
  37. webscout/Provider/TogetherAI.py +57 -44
  38. webscout/Provider/TwoAI.py +96 -2
  39. webscout/Provider/TypliAI.py +33 -27
  40. webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
  41. webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
  42. webscout/Provider/Venice.py +1 -0
  43. webscout/Provider/WiseCat.py +18 -20
  44. webscout/Provider/__init__.py +4 -10
  45. webscout/Provider/copilot.py +58 -61
  46. webscout/Provider/freeaichat.py +64 -55
  47. webscout/Provider/monochat.py +275 -0
  48. webscout/Provider/scira_chat.py +115 -21
  49. webscout/Provider/toolbaz.py +5 -10
  50. webscout/Provider/typefully.py +1 -11
  51. webscout/Provider/x0gpt.py +325 -315
  52. webscout/__init__.py +4 -11
  53. webscout/auth/__init__.py +19 -4
  54. webscout/auth/api_key_manager.py +189 -189
  55. webscout/auth/auth_system.py +25 -40
  56. webscout/auth/config.py +105 -6
  57. webscout/auth/database.py +377 -22
  58. webscout/auth/models.py +185 -130
  59. webscout/auth/request_processing.py +175 -11
  60. webscout/auth/routes.py +119 -5
  61. webscout/auth/server.py +9 -2
  62. webscout/auth/simple_logger.py +236 -0
  63. webscout/sanitize.py +1074 -0
  64. webscout/version.py +1 -1
  65. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -150
  66. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/RECORD +70 -72
  67. webscout/Provider/AI21.py +0 -177
  68. webscout/Provider/HuggingFaceChat.py +0 -469
  69. webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
  70. webscout/Provider/OPENAI/freeaichat.py +0 -363
  71. webscout/Provider/OPENAI/typegpt.py +0 -368
  72. webscout/Provider/OPENAI/uncovrAI.py +0 -477
  73. webscout/Provider/WritingMate.py +0 -273
  74. webscout/Provider/typegpt.py +0 -284
  75. webscout/Provider/uncovr.py +0 -333
  76. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
  77. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
  78. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
  79. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
@@ -195,40 +195,74 @@ class Chat(BaseChat):
195
195
 
196
196
  class DeepInfra(OpenAICompatibleProvider):
197
197
  AVAILABLE_MODELS = [
198
- "deepseek-ai/DeepSeek-R1-0528",
199
- "deepseek-ai/DeepSeek-R1",
200
- "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
201
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
202
- "deepseek-ai/DeepSeek-R1-Turbo",
203
- "deepseek-ai/DeepSeek-V3",
198
+ "anthropic/claude-4-opus",
199
+ "moonshotai/Kimi-K2-Instruct",
200
+ "anthropic/claude-4-sonnet",
201
+ "deepseek-ai/DeepSeek-R1-0528-Turbo",
202
+ "Qwen/Qwen3-235B-A22B-Thinking-2507",
203
+ "Qwen/Qwen3-Coder-480B-A35B-Instruct",
204
+ "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo",
205
+ "Qwen/Qwen3-235B-A22B-Instruct-2507",
206
+ "Qwen/Qwen3-235B-A22B",
207
+ "Qwen/Qwen3-30B-A3B",
208
+ "Qwen/Qwen3-32B",
209
+ "Qwen/Qwen3-14B",
210
+ "deepseek-ai/DeepSeek-V3-0324-Turbo",
204
211
  "deepseek-ai/DeepSeek-Prover-V2-671B",
205
- "google/gemma-2-27b-it",
206
- "google/gemma-2-9b-it",
207
- "google/gemma-3-12b-it",
212
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo",
213
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
214
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
215
+ "deepseek-ai/DeepSeek-R1-0528",
216
+ "deepseek-ai/DeepSeek-V3-0324",
217
+ "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
218
+ "microsoft/phi-4-reasoning-plus",
219
+ "Qwen/QwQ-32B",
220
+ "google/gemini-2.5-flash",
221
+ "google/gemini-2.5-pro",
208
222
  "google/gemma-3-27b-it",
223
+ "google/gemma-3-12b-it",
209
224
  "google/gemma-3-4b-it",
210
- "meta-llama/Llama-3.3-70B-Instruct",
225
+ "microsoft/Phi-4-multimodal-instruct",
226
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
227
+ "deepseek-ai/DeepSeek-V3",
211
228
  "meta-llama/Llama-3.3-70B-Instruct-Turbo",
212
- "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
213
- "meta-llama/Llama-4-Scout-17B-16E-Instruct",
214
- "meta-llama/Llama-Guard-4-12B",
229
+ "meta-llama/Llama-3.3-70B-Instruct",
230
+ "microsoft/phi-4",
231
+ "Gryphe/MythoMax-L2-13b",
232
+ "NousResearch/Hermes-3-Llama-3.1-405B",
233
+ "NousResearch/Hermes-3-Llama-3.1-70B",
234
+ "NovaSky-AI/Sky-T1-32B-Preview",
235
+ "Qwen/Qwen2.5-72B-Instruct",
236
+ "Qwen/Qwen2.5-7B-Instruct",
237
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
238
+ "Sao10K/L3-8B-Lunaris-v1-Turbo",
239
+ "Sao10K/L3.1-70B-Euryale-v2.2",
240
+ "Sao10K/L3.3-70B-Euryale-v2.3",
241
+ "anthropic/claude-3-7-sonnet-latest",
242
+ "deepseek-ai/DeepSeek-R1",
243
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
244
+ "deepseek-ai/DeepSeek-R1-Turbo",
245
+ "google/gemini-2.0-flash-001",
246
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
247
+ "meta-llama/Llama-3.2-1B-Instruct",
248
+ "meta-llama/Llama-3.2-3B-Instruct",
249
+ "meta-llama/Llama-3.2-90B-Vision-Instruct",
250
+ "meta-llama/Meta-Llama-3-70B-Instruct",
251
+ "meta-llama/Meta-Llama-3-8B-Instruct",
252
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
215
253
  "meta-llama/Meta-Llama-3.1-8B-Instruct",
216
254
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
217
- "microsoft/Phi-4-multimodal-instruct",
218
255
  "microsoft/WizardLM-2-8x22B",
219
- "microsoft/phi-4",
220
- "microsoft/phi-4-reasoning-plus",
256
+ "mistralai/Devstral-Small-2505",
257
+ "mistralai/Devstral-Small-2507",
258
+ "mistralai/Mistral-7B-Instruct-v0.3",
259
+ "mistralai/Mistral-Nemo-Instruct-2407",
221
260
  "mistralai/Mistral-Small-24B-Instruct-2501",
261
+ "mistralai/Mistral-Small-3.2-24B-Instruct-2506",
262
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
222
263
  "nvidia/Llama-3.1-Nemotron-70B-Instruct",
223
- "Qwen/QwQ-32B",
224
- "Qwen/Qwen2.5-72B-Instruct",
225
- "Qwen/Qwen2.5-Coder-32B-Instruct",
226
- "Qwen/Qwen3-14B",
227
- "Qwen/Qwen3-30B-A3B",
228
- "Qwen/Qwen3-32B",
229
- "Qwen/Qwen3-235B-A22B",
230
264
  ]
231
- def __init__(self, browser: str = "chrome"):
265
+ def __init__(self, browser: str = "chrome", api_key: str = None):
232
266
  self.timeout = None
233
267
  self.base_url = "https://api.deepinfra.com/v1/openai/chat/completions"
234
268
  self.session = requests.Session()
@@ -253,6 +287,8 @@ class DeepInfra(OpenAICompatibleProvider):
253
287
  "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
254
288
  "User-Agent": fingerprint["user_agent"],
255
289
  }
290
+ if api_key is not None:
291
+ self.headers["Authorization"] = f"Bearer {api_key}"
256
292
  self.session.headers.update(self.headers)
257
293
  self.chat = Chat(self)
258
294
  @property
@@ -34,11 +34,9 @@ MODEL_CONFIGS = {
34
34
  "gemini-2.0-flash",
35
35
  "gemini-2.0-flash-exp-image-generation",
36
36
  "gemini-2.0-flash-thinking-exp-01-21",
37
- "gemini-2.5-pro-exp-03-25",
37
+ "gemini-2.5-flash-lite-preview-06-17",
38
38
  "gemini-2.0-pro-exp-02-05",
39
- "gemini-2.5-flash-preview-04-17",
40
-
41
-
39
+ "gemini-2.5-flash",
42
40
  ],
43
41
  },
44
42
  "openrouter": {
@@ -75,7 +73,9 @@ MODEL_CONFIGS = {
75
73
  "endpoint": "https://ayle.chat/api/cerebras",
76
74
  "models": [
77
75
  "llama3.1-8b",
78
- "llama-3.3-70b"
76
+ "llama-3.3-70b",
77
+ "llama-4-scout-17b-16e-instruct",
78
+ "qwen-3-32b"
79
79
  ],
80
80
  },
81
81
  "xai": {
@@ -299,6 +299,7 @@ class ExaChat(OpenAICompatibleProvider):
299
299
  "gemini-2.0-flash-exp-image-generation",
300
300
  "gemini-2.0-flash-thinking-exp-01-21",
301
301
  "gemini-2.5-pro-exp-03-25",
302
+ "gemini-2.5-flash-lite-preview-06-17",
302
303
  "gemini-2.0-pro-exp-02-05",
303
304
  "gemini-2.5-flash-preview-04-17",
304
305
 
@@ -330,6 +331,8 @@ class ExaChat(OpenAICompatibleProvider):
330
331
  # Cerebras Models
331
332
  "llama3.1-8b",
332
333
  "llama-3.3-70b",
334
+ "llama-4-scout-17b-16e-instruct",
335
+ "qwen-3-32b",
333
336
 
334
337
  ]
335
338
 
@@ -446,3 +449,4 @@ if __name__ == "__main__":
446
449
  print(f"{model:<50} {status:<10} {display_text}")
447
450
  except Exception as e:
448
451
  print(f"{model:<50} {'✗':<10} {str(e)}")
452
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -251,7 +251,7 @@ class MonoChat(OpenAICompatibleProvider):
251
251
  browser: Browser to emulate in user agent
252
252
  """
253
253
  self.timeout = None
254
- self.api_endpoint = "https://www.chatwithmono.xyz/api/chat"
254
+ self.api_endpoint = "https://gg.is-a-furry.dev/api/chat"
255
255
  self.session = requests.Session()
256
256
 
257
257
  agent = LitAgent()
@@ -262,8 +262,8 @@ class MonoChat(OpenAICompatibleProvider):
262
262
  "accept-encoding": "gzip, deflate, br, zstd",
263
263
  "accept-language": self.fingerprint["accept_language"],
264
264
  "content-type": "application/json",
265
- "origin": "https://www.chatwithmono.xyz",
266
- "referer": "https://www.chatwithmono.xyz/",
265
+ "origin": "https://gg.is-a-furry.dev",
266
+ "referer": "https://gg.is-a-furry.dev/",
267
267
  "user-agent": self.fingerprint["user_agent"]
268
268
  }
269
269
 
@@ -204,18 +204,15 @@ class Netwrck(OpenAICompatibleProvider):
204
204
  """
205
205
 
206
206
  AVAILABLE_MODELS = [
207
- "neversleep/llama-3-lumimaid-8b:extended",
208
- "x-ai/grok-2",
209
- "anthropic/claude-3-7-sonnet-20250219",
207
+ "thedrummer/valkyrie-49b-v1",
210
208
  "sao10k/l3-euryale-70b",
209
+ "deepseek/deepseek-chat",
210
+ "deepseek/deepseek-r1",
211
+ "anthropic/claude-sonnet-4-20250514",
211
212
  "openai/gpt-4.1-mini",
212
213
  "gryphe/mythomax-l2-13b",
213
- "google/gemini-pro-1.5",
214
214
  "google/gemini-2.5-flash-preview-04-17",
215
215
  "nvidia/llama-3.1-nemotron-70b-instruct",
216
- "deepseek/deepseek-r1",
217
- "deepseek/deepseek-chat"
218
-
219
216
  ]
220
217
 
221
218
  # Default greeting used by Netwrck