webscout 8.3.3__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (46) hide show
  1. webscout/AIutel.py +221 -4
  2. webscout/Bard.py +2 -22
  3. webscout/Provider/AISEARCH/scira_search.py +24 -11
  4. webscout/Provider/Deepinfra.py +75 -57
  5. webscout/Provider/ExaChat.py +9 -5
  6. webscout/Provider/Flowith.py +1 -1
  7. webscout/Provider/FreeGemini.py +2 -2
  8. webscout/Provider/Gemini.py +3 -10
  9. webscout/Provider/GeminiProxy.py +31 -5
  10. webscout/Provider/LambdaChat.py +39 -31
  11. webscout/Provider/Netwrck.py +5 -8
  12. webscout/Provider/OLLAMA.py +8 -9
  13. webscout/Provider/OPENAI/README.md +1 -1
  14. webscout/Provider/OPENAI/__init__.py +1 -1
  15. webscout/Provider/OPENAI/autoproxy.py +1 -1
  16. webscout/Provider/OPENAI/copilot.py +73 -26
  17. webscout/Provider/OPENAI/deepinfra.py +54 -24
  18. webscout/Provider/OPENAI/exachat.py +9 -5
  19. webscout/Provider/OPENAI/monochat.py +3 -3
  20. webscout/Provider/OPENAI/netwrck.py +4 -7
  21. webscout/Provider/OPENAI/qodo.py +630 -0
  22. webscout/Provider/OPENAI/scirachat.py +82 -49
  23. webscout/Provider/OPENAI/textpollinations.py +13 -12
  24. webscout/Provider/OPENAI/typegpt.py +3 -3
  25. webscout/Provider/Qodo.py +454 -0
  26. webscout/Provider/TTI/monochat.py +3 -3
  27. webscout/Provider/TextPollinationsAI.py +13 -12
  28. webscout/Provider/__init__.py +4 -4
  29. webscout/Provider/copilot.py +58 -61
  30. webscout/Provider/freeaichat.py +64 -55
  31. webscout/Provider/monochat.py +275 -0
  32. webscout/Provider/scira_chat.py +111 -21
  33. webscout/Provider/typegpt.py +2 -2
  34. webscout/Provider/x0gpt.py +325 -315
  35. webscout/__init__.py +7 -2
  36. webscout/auth/routes.py +20 -3
  37. webscout/version.py +1 -1
  38. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/METADATA +1 -2
  39. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/RECORD +43 -43
  40. webscout/Provider/AI21.py +0 -177
  41. webscout/Provider/HuggingFaceChat.py +0 -469
  42. webscout/Provider/OPENAI/freeaichat.py +0 -363
  43. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  44. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  45. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  46. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -17,62 +17,66 @@ class DeepInfra(Provider):
17
17
  """
18
18
 
19
19
  AVAILABLE_MODELS = [
20
- # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
21
- "deepseek-ai/DeepSeek-R1-0528",
22
- "deepseek-ai/DeepSeek-R1",
23
- "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
24
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
25
- "deepseek-ai/DeepSeek-R1-Turbo",
26
- "deepseek-ai/DeepSeek-V3",
20
+ "anthropic/claude-4-opus",
21
+ "anthropic/claude-4-sonnet",
22
+ "deepseek-ai/DeepSeek-R1-0528-Turbo",
23
+ "Qwen/Qwen3-235B-A22B",
24
+ "Qwen/Qwen3-30B-A3B",
25
+ "Qwen/Qwen3-32B",
26
+ "Qwen/Qwen3-14B",
27
+ "deepseek-ai/DeepSeek-V3-0324-Turbo",
27
28
  "deepseek-ai/DeepSeek-Prover-V2-671B",
28
- "google/gemma-2-27b-it",
29
- "google/gemma-2-9b-it",
30
- "google/gemma-3-12b-it",
29
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo",
30
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
31
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
32
+ "deepseek-ai/DeepSeek-R1-0528",
33
+ "deepseek-ai/DeepSeek-V3-0324",
34
+ "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
35
+ "microsoft/phi-4-reasoning-plus",
36
+ "Qwen/QwQ-32B",
37
+ "google/gemini-2.5-flash",
38
+ "google/gemini-2.5-pro",
31
39
  "google/gemma-3-27b-it",
40
+ "google/gemma-3-12b-it",
32
41
  "google/gemma-3-4b-it",
33
- "meta-llama/Llama-3.3-70B-Instruct",
42
+ "microsoft/Phi-4-multimodal-instruct",
43
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
44
+ "deepseek-ai/DeepSeek-V3",
34
45
  "meta-llama/Llama-3.3-70B-Instruct-Turbo",
35
- "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
36
- "meta-llama/Llama-4-Scout-17B-16E-Instruct",
37
- "meta-llama/Llama-Guard-4-12B",
46
+ "meta-llama/Llama-3.3-70B-Instruct",
47
+ "microsoft/phi-4",
48
+ "Gryphe/MythoMax-L2-13b",
49
+ "NousResearch/Hermes-3-Llama-3.1-405B",
50
+ "NousResearch/Hermes-3-Llama-3.1-70B",
51
+ "NovaSky-AI/Sky-T1-32B-Preview",
52
+ "Qwen/Qwen2.5-72B-Instruct",
53
+ "Qwen/Qwen2.5-7B-Instruct",
54
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
55
+ "Sao10K/L3-8B-Lunaris-v1-Turbo",
56
+ "Sao10K/L3.1-70B-Euryale-v2.2",
57
+ "Sao10K/L3.3-70B-Euryale-v2.3",
58
+ "anthropic/claude-3-7-sonnet-latest",
59
+ "deepseek-ai/DeepSeek-R1",
60
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
61
+ "deepseek-ai/DeepSeek-R1-Turbo",
62
+ "google/gemini-2.0-flash-001",
63
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
64
+ "meta-llama/Llama-3.2-1B-Instruct",
65
+ "meta-llama/Llama-3.2-3B-Instruct",
66
+ "meta-llama/Llama-3.2-90B-Vision-Instruct",
67
+ "meta-llama/Meta-Llama-3-70B-Instruct",
68
+ "meta-llama/Meta-Llama-3-8B-Instruct",
69
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
38
70
  "meta-llama/Meta-Llama-3.1-8B-Instruct",
39
71
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
40
- "microsoft/Phi-4-multimodal-instruct",
41
72
  "microsoft/WizardLM-2-8x22B",
42
- "microsoft/phi-4",
43
- "microsoft/phi-4-reasoning-plus",
73
+ "mistralai/Devstral-Small-2505",
74
+ "mistralai/Mistral-7B-Instruct-v0.3",
75
+ "mistralai/Mistral-Nemo-Instruct-2407",
44
76
  "mistralai/Mistral-Small-24B-Instruct-2501",
77
+ "mistralai/Mistral-Small-3.2-24B-Instruct-2506",
78
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
45
79
  "nvidia/Llama-3.1-Nemotron-70B-Instruct",
46
- "Qwen/QwQ-32B",
47
- "Qwen/Qwen2.5-72B-Instruct",
48
- "Qwen/Qwen2.5-Coder-32B-Instruct",
49
- "Qwen/Qwen3-14B",
50
- "Qwen/Qwen3-30B-A3B",
51
- "Qwen/Qwen3-32B",
52
- "Qwen/Qwen3-235B-A22B",
53
- # "google/gemini-1.5-flash", # >>>> NOT WORKING
54
- # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
55
- # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
56
-
57
- # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
58
-
59
- # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
60
- # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
61
- # "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
62
- # "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
63
- # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
64
- # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
65
- # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
66
- # "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
67
- # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
68
- # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
69
- # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
70
- # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
71
- # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
72
- # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
73
- # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
74
- # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
75
- # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
76
80
  ]
77
81
 
78
82
  @staticmethod
@@ -84,6 +88,7 @@ class DeepInfra(Provider):
84
88
 
85
89
  def __init__(
86
90
  self,
91
+ api_key: Optional[str] = None,
87
92
  is_conversation: bool = True,
88
93
  max_tokens: int = 2049,
89
94
  timeout: int = 30,
@@ -107,21 +112,34 @@ class DeepInfra(Provider):
107
112
  self.agent = LitAgent()
108
113
  # Fingerprint generation might be less relevant with impersonate
109
114
  self.fingerprint = self.agent.generate_fingerprint(browser)
110
-
115
+ self.api = api_key
111
116
  # Use the fingerprint for headers (keep relevant ones)
112
117
  self.headers = {
113
- "Accept": self.fingerprint["accept"], # Keep Accept
114
- "Accept-Language": self.fingerprint["accept_language"], # Keep Accept-Language
118
+ "Accept": self.fingerprint["accept"],
119
+ "Accept-Language": self.fingerprint["accept_language"],
115
120
  "Content-Type": "application/json",
116
- "Cache-Control": "no-cache", # Keep Cache-Control
117
- "Origin": "https://deepinfra.com", # Keep Origin
118
- "Pragma": "no-cache", # Keep Pragma
119
- "Referer": "https://deepinfra.com/", # Keep Referer
120
- "Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-*
121
+ "Cache-Control": "no-cache",
122
+ "Origin": "https://deepinfra.com",
123
+ "Pragma": "no-cache",
124
+ "Referer": "https://deepinfra.com/",
125
+ "Sec-Fetch-Dest": "empty",
121
126
  "Sec-Fetch-Mode": "cors",
122
127
  "Sec-Fetch-Site": "same-site",
123
- "X-Deepinfra-Source": "web-embed", # Keep custom headers
128
+ "X-Deepinfra-Source": "web-embed",
129
+ # Additional headers from LitAgent.generate_fingerprint
130
+ "User-Agent": self.fingerprint.get("user_agent", ""),
131
+ "Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
132
+ "Sec-CH-UA-Mobile": "?0",
133
+ "Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
134
+ "X-Forwarded-For": self.fingerprint.get("x-forwarded-for", ""),
135
+ "X-Real-IP": self.fingerprint.get("x-real-ip", ""),
136
+ "X-Client-IP": self.fingerprint.get("x-client-ip", ""),
137
+ "Forwarded": self.fingerprint.get("forwarded", ""),
138
+ "X-Forwarded-Proto": self.fingerprint.get("x-forwarded-proto", ""),
139
+ "X-Request-Id": self.fingerprint.get("x-request-id", ""),
124
140
  }
141
+ if self.api is not None:
142
+ self.headers["Authorization"] = f"Bearer {self.api}"
125
143
 
126
144
  # Initialize curl_cffi Session
127
145
  self.session = Session()
@@ -321,7 +339,7 @@ if __name__ == "__main__":
321
339
 
322
340
  for model in DeepInfra.AVAILABLE_MODELS:
323
341
  try:
324
- test_ai = DeepInfra(model=model, timeout=60)
342
+ test_ai = DeepInfra(model=model, timeout=60, api_key="jwt:eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJnaDoxNTg5ODg0NzgiLCJleHAiOjE3NTI3NDI5NDV9.qM93p6bPZYi_ejaOo1Dbe4UjYXrFiM7XvBLN4-9BWag")
325
343
  response = test_ai.chat("Say 'Hello' in one word", stream=True)
326
344
  response_text = ""
327
345
  for chunk in response:
@@ -21,9 +21,9 @@ MODEL_CONFIGS = {
21
21
  "gemini-2.0-flash",
22
22
  "gemini-2.0-flash-exp-image-generation",
23
23
  "gemini-2.0-flash-thinking-exp-01-21",
24
- "gemini-2.5-pro-exp-03-25",
24
+ "gemini-2.5-flash-lite-preview-06-17",
25
25
  "gemini-2.0-pro-exp-02-05",
26
- "gemini-2.5-flash-preview-04-17",
26
+ "gemini-2.5-flash",
27
27
 
28
28
 
29
29
  ],
@@ -62,7 +62,9 @@ MODEL_CONFIGS = {
62
62
  "endpoint": "https://ayle.chat/api/cerebras",
63
63
  "models": [
64
64
  "llama3.1-8b",
65
- "llama-3.3-70b"
65
+ "llama-3.3-70b",
66
+ "llama-4-scout-17b-16e-instruct",
67
+ "qwen-3-32b"
66
68
  ],
67
69
  },
68
70
  "xai": {
@@ -88,9 +90,9 @@ class ExaChat(Provider):
88
90
  "gemini-2.0-flash",
89
91
  "gemini-2.0-flash-exp-image-generation",
90
92
  "gemini-2.0-flash-thinking-exp-01-21",
91
- "gemini-2.5-pro-exp-03-25",
92
93
  "gemini-2.0-pro-exp-02-05",
93
- "gemini-2.5-flash-preview-04-17",
94
+ "gemini-2.5-flash",
95
+ "gemini-2.5-flash-lite-preview-06-17",
94
96
 
95
97
  # OpenRouter Models
96
98
  "mistralai/mistral-small-3.1-24b-instruct:free",
@@ -120,6 +122,8 @@ class ExaChat(Provider):
120
122
  # Cerebras Models
121
123
  "llama3.1-8b",
122
124
  "llama-3.3-70b",
125
+ "llama-4-scout-17b-16e-instruct",
126
+ "qwen-3-32b",
123
127
 
124
128
  ]
125
129
 
@@ -15,7 +15,7 @@ class Flowith(Provider):
15
15
  """
16
16
  A provider class for interacting with the Flowith API.
17
17
  """
18
- AVAILABLE_MODELS = ["gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku", "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"]
18
+ AVAILABLE_MODELS = ["gpt-4.1-nano", "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku", "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"]
19
19
 
20
20
  def __init__(
21
21
  self,
@@ -83,7 +83,7 @@ class FreeGemini(Provider):
83
83
  self.last_response = {}
84
84
  self.system_prompt = system_prompt # Stored for consistency
85
85
 
86
- self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse"
86
+ self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.5-flash:streamGenerateContent?alt=sse"
87
87
 
88
88
  self.agent = LitAgent()
89
89
  self.headers = {
@@ -246,5 +246,5 @@ class FreeGemini(Provider):
246
246
  if __name__ == "__main__":
247
247
  # Example usage
248
248
  free_gemini = FreeGemini()
249
- response = free_gemini.chat("What is the capital of France?", stream=False)
249
+ response = free_gemini.chat("how many r in strawberry", stream=False)
250
250
  print(response) # Should print the response from the API
@@ -10,22 +10,15 @@ from ..Bard import Chatbot, Model
10
10
 
11
11
  warnings.simplefilter("ignore", category=UserWarning)
12
12
 
13
- # Define model aliases for easy usage
13
+ # Define model aliases for easy usage (only supported models)
14
14
  MODEL_ALIASES: Dict[str, Model] = {
15
15
  "unspecified": Model.UNSPECIFIED,
16
- "gemini-2.0-flash": Model.G_2_0_FLASH,
17
- "gemini-2.0-flash-thinking": Model.G_2_0_FLASH_THINKING,
18
- "gemini-2.5-pro": Model.G_2_5_PRO,
19
- "gemini-2.0-exp-advanced": Model.G_2_0_EXP_ADVANCED,
20
- "gemini-2.5-exp-advanced": Model.G_2_5_EXP_ADVANCED,
21
16
  "gemini-2.5-flash": Model.G_2_5_FLASH,
17
+ "gemini-2.5-pro": Model.G_2_5_PRO,
22
18
  # Add shorter aliases for convenience
23
- "flash": Model.G_2_0_FLASH,
24
19
  "flash-2.5": Model.G_2_5_FLASH,
25
- "thinking": Model.G_2_0_FLASH_THINKING,
26
20
  "pro": Model.G_2_5_PRO,
27
- "advanced": Model.G_2_0_EXP_ADVANCED,
28
- "advanced-2.5": Model.G_2_5_EXP_ADVANCED,
21
+ "unspecified": Model.UNSPECIFIED,
29
22
  }
30
23
 
31
24
  # List of available models (friendly names)
@@ -14,10 +14,11 @@ class GeminiProxy(Provider):
14
14
  AVAILABLE_MODELS = [
15
15
  "gemini-2.0-flash-lite",
16
16
  "gemini-2.0-flash",
17
- "gemini-2.5-pro-preview-06-05",
18
- "gemini-2.5-pro-preview-05-06",
19
17
  "gemini-2.5-flash-preview-04-17",
20
18
  "gemini-2.5-flash-preview-05-20",
19
+ "gemini-2.5-flash-lite-preview-06-17",
20
+ "gemini-2.5-pro",
21
+ "gemini-2.5-flash",
21
22
 
22
23
  ]
23
24
 
@@ -135,6 +136,31 @@ class GeminiProxy(Provider):
135
136
  return str(response)
136
137
 
137
138
  if __name__ == "__main__":
138
- ai = GeminiProxy(timeout=30, model="gemini-2.5-flash-preview-05-20")
139
- response = ai.chat("write a poem about AI")
140
- print(response)
139
+ # Ensure curl_cffi is installed
140
+ print("-" * 80)
141
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
142
+ print("-" * 80)
143
+
144
+ # Test all available models
145
+ working = 0
146
+ total = len(GeminiProxy.AVAILABLE_MODELS)
147
+
148
+ for model in GeminiProxy.AVAILABLE_MODELS:
149
+ try:
150
+ test_ai = GeminiProxy(model=model, timeout=60)
151
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
152
+ response_text = ""
153
+ for chunk in response:
154
+ response_text += chunk
155
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
156
+
157
+ if response_text and len(response_text.strip()) > 0:
158
+ status = "✓"
159
+ # Truncate response if too long
160
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
161
+ else:
162
+ status = "✗"
163
+ display_text = "Empty or invalid response"
164
+ print(f"\r{model:<50} {status:<10} {display_text}")
165
+ except Exception as e:
166
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -93,49 +93,49 @@ class LambdaChat(Provider):
93
93
  self.session.proxies = proxies # Assign proxies directly
94
94
 
95
95
  def create_conversation(self, model: str):
96
- """Create a new conversation with the specified model."""
96
+ """Create a new conversation with the specified model, using updated headers and cookies."""
97
97
  url = f"{self.url}/conversation"
98
98
  payload = {
99
99
  "model": model,
100
- "preprompt": self.system_prompt,
101
-
100
+ "preprompt": self.system_prompt
102
101
  }
103
-
104
- # Update referer for this specific request
102
+
103
+ # Update headers for this specific request
105
104
  headers = self.headers.copy()
106
- headers["Referer"] = f"{self.url}/models/{model}"
107
-
105
+ headers["Referer"] = f"{self.url}/"
106
+ # Add browser-like headers for best compatibility
107
+ headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
108
+ headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
109
+ headers["Sec-GPC"] = "1"
110
+ headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
111
+ headers["Sec-Ch-Ua-Mobile"] = "?0"
112
+ headers["Sec-Ch-Ua-Platform"] = '"Windows"'
113
+ headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
114
+ headers["Origin"] = self.url
115
+ # cookies are handled by curl_cffi session automatically
116
+
108
117
  try:
109
- # Use curl_cffi session post with impersonate
110
118
  response = self.session.post(
111
- url,
112
- json=payload,
113
- headers=headers, # Use updated headers with specific Referer
114
- impersonate="chrome110" # Use a common impersonation profile
119
+ url,
120
+ json=payload,
121
+ headers=headers,
122
+ impersonate="chrome110"
115
123
  )
116
-
117
124
  if response.status_code == 401:
118
125
  raise exceptions.AuthenticationError("Authentication failed.")
119
-
120
- # Handle other error codes
121
126
  if response.status_code != 200:
122
127
  return None
123
-
124
128
  data = response.json()
125
129
  conversation_id = data.get("conversationId")
126
-
127
- # Store conversation data
128
130
  if model not in self._conversation_data:
129
131
  self._conversation_data[model] = {
130
132
  "conversationId": conversation_id,
131
- "messageId": str(uuid.uuid4()) # Initial message ID
133
+ "messageId": str(uuid.uuid4())
132
134
  }
133
-
134
135
  return conversation_id
135
- except CurlError as e: # Catch CurlError
136
- # Log or handle CurlError specifically if needed
136
+ except CurlError:
137
137
  return None
138
- except Exception: # Catch other potential exceptions (like JSONDecodeError, HTTPError)
138
+ except Exception:
139
139
  return None
140
140
 
141
141
  def fetch_message_id(self, conversation_id: str) -> str:
@@ -230,35 +230,43 @@ class LambdaChat(Provider):
230
230
  url = f"{self.url}/conversation/{conversation_id}"
231
231
  message_id = self._conversation_data[model]["messageId"]
232
232
 
233
- # Data to send
233
+ # Data to send (tools should be empty list by default)
234
234
  request_data = {
235
235
  "inputs": prompt,
236
236
  "id": message_id,
237
237
  "is_retry": False,
238
238
  "is_continue": False,
239
239
  "web_search": web_search,
240
- "tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
240
+ "tools": []
241
241
  }
242
-
242
+
243
243
  # Update headers for this specific request
244
244
  headers = self.headers.copy()
245
245
  headers["Referer"] = f"{self.url}/conversation/{conversation_id}"
246
-
246
+ headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
247
+ headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
248
+ headers["Sec-GPC"] = "1"
249
+ headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
250
+ headers["Sec-Ch-Ua-Mobile"] = "?0"
251
+ headers["Sec-Ch-Ua-Platform"] = '"Windows"'
252
+ headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
253
+ headers["Origin"] = self.url
254
+
247
255
  # Create multipart form data
248
256
  boundary = self.generate_boundary()
249
257
  multipart_headers = headers.copy()
250
258
  multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
251
-
259
+
252
260
  # Serialize the data to JSON
253
261
  data_json = json.dumps(request_data, separators=(',', ':'))
254
-
262
+
255
263
  # Create the multipart form data body
256
264
  body = f"--{boundary}\r\n"
257
265
  body += f'Content-Disposition: form-data; name="data"\r\n'
258
- body += f"Content-Type: application/json\r\n\r\n"
266
+ body += f"\r\n"
259
267
  body += f"{data_json}\r\n"
260
268
  body += f"--{boundary}--\r\n"
261
-
269
+
262
270
  multipart_headers["Content-Length"] = str(len(body))
263
271
 
264
272
  def for_stream():
@@ -14,23 +14,20 @@ class Netwrck(Provider):
14
14
  greeting = """Hello! I'm a helpful assistant. How can I help you today?"""
15
15
 
16
16
  AVAILABLE_MODELS = [
17
- "neversleep/llama-3-lumimaid-8b:extended",
18
- "x-ai/grok-2",
19
- "anthropic/claude-3-7-sonnet-20250219",
17
+ "thedrummer/valkyrie-49b-v1",
20
18
  "sao10k/l3-euryale-70b",
19
+ "deepseek/deepseek-chat",
20
+ "deepseek/deepseek-r1",
21
+ "anthropic/claude-sonnet-4-20250514",
21
22
  "openai/gpt-4.1-mini",
22
23
  "gryphe/mythomax-l2-13b",
23
- "google/gemini-pro-1.5",
24
24
  "google/gemini-2.5-flash-preview-04-17",
25
25
  "nvidia/llama-3.1-nemotron-70b-instruct",
26
- "deepseek/deepseek-r1",
27
- "deepseek/deepseek-chat"
28
-
29
26
  ]
30
27
 
31
28
  def __init__(
32
29
  self,
33
- model: str = "anthropic/claude-3-7-sonnet-20250219",
30
+ model: str = "anthropic/claude-sonnet-4-20250514",
34
31
  is_conversation: bool = True,
35
32
  max_tokens: int = 4096, # Note: max_tokens is not used by this API
36
33
  timeout: int = 30,
@@ -1,14 +1,13 @@
1
1
  from webscout.AIutel import Optimizers
2
2
  from webscout.AIutel import Conversation
3
- from webscout.AIutel import AwesomePrompts, sanitize_stream
4
- from webscout.AIbase import Provider, AsyncProvider
5
- from webscout import exceptions
6
- from typing import Any, AsyncGenerator, Dict, List, Optional, Union
7
- import ollama
8
- from ollama import AsyncClient, Client, ResponseError
9
- import asyncio
10
- import base64
11
- from pathlib import Path
3
+ from webscout.AIutel import AwesomePrompts
4
+ from webscout.AIbase import Provider
5
+ from typing import AsyncGenerator, Dict, List, Optional, Union
6
+
7
+ try:
8
+ from ollama import AsyncClient, Client, ResponseError
9
+ except ImportError as e:
10
+ pass
12
11
 
13
12
  class OLLAMA(Provider):
14
13
  def __init__(
@@ -39,7 +39,6 @@ Currently, the following providers are implemented with OpenAI-compatible interf
39
39
  - TypeGPT
40
40
  - SciraChat
41
41
  - LLMChatCo
42
- - FreeAIChat
43
42
  - YEPCHAT
44
43
  - HeckAI
45
44
  - SonusAI
@@ -73,6 +72,7 @@ Currently, the following providers are implemented with OpenAI-compatible interf
73
72
  - MonoChat
74
73
  - Friendli
75
74
  - MiniMax
75
+ - QodoAI
76
76
 
77
77
  ## 💻 Usage Examples
78
78
 
@@ -8,7 +8,6 @@ from .venice import *
8
8
  from .exaai import *
9
9
  from .typegpt import *
10
10
  from .scirachat import *
11
- from .freeaichat import *
12
11
  from .llmchatco import *
13
12
  from .yep import * # Add YEPCHAT
14
13
  from .heckai import *
@@ -46,6 +45,7 @@ from .GeminiProxy import * # Add GeminiProxy provider
46
45
  from .friendli import *
47
46
  from .monochat import *
48
47
  from .MiniMax import * # Add MiniMaxAI provider
48
+ from .qodo import * # Add QodoAI provider
49
49
  # Export auto-proxy functionality
50
50
  from .autoproxy import (
51
51
  get_auto_proxy,
@@ -32,7 +32,7 @@ _proxy_cache = {
32
32
  'cache_duration': 300 # 5 minutes
33
33
  }
34
34
 
35
- PROXY_SOURCE_URL = "http://207.180.209.185:5000/ips.txt"
35
+ PROXY_SOURCE_URL = "https://proxies.typegpt.net/ips.txt"
36
36
 
37
37
  # --- Static Proxy Lists ---
38
38
  # NordVPN proxies (format: https://host:port:user:pass)