webscout 8.3.3__py3-none-any.whl → 8.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (79) hide show
  1. webscout/AIutel.py +53 -800
  2. webscout/Bard.py +2 -22
  3. webscout/Provider/AISEARCH/__init__.py +11 -10
  4. webscout/Provider/AISEARCH/felo_search.py +7 -3
  5. webscout/Provider/AISEARCH/scira_search.py +26 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +53 -8
  7. webscout/Provider/Deepinfra.py +81 -57
  8. webscout/Provider/ExaChat.py +9 -5
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/LambdaChat.py +39 -31
  14. webscout/Provider/Netwrck.py +5 -8
  15. webscout/Provider/OLLAMA.py +8 -9
  16. webscout/Provider/OPENAI/README.md +1 -1
  17. webscout/Provider/OPENAI/TogetherAI.py +57 -48
  18. webscout/Provider/OPENAI/TwoAI.py +94 -1
  19. webscout/Provider/OPENAI/__init__.py +1 -3
  20. webscout/Provider/OPENAI/autoproxy.py +1 -1
  21. webscout/Provider/OPENAI/copilot.py +73 -26
  22. webscout/Provider/OPENAI/deepinfra.py +60 -24
  23. webscout/Provider/OPENAI/exachat.py +9 -5
  24. webscout/Provider/OPENAI/monochat.py +3 -3
  25. webscout/Provider/OPENAI/netwrck.py +4 -7
  26. webscout/Provider/OPENAI/qodo.py +630 -0
  27. webscout/Provider/OPENAI/scirachat.py +86 -49
  28. webscout/Provider/OPENAI/textpollinations.py +19 -14
  29. webscout/Provider/OPENAI/venice.py +1 -0
  30. webscout/Provider/Perplexitylabs.py +163 -147
  31. webscout/Provider/Qodo.py +478 -0
  32. webscout/Provider/TTI/__init__.py +1 -0
  33. webscout/Provider/TTI/monochat.py +3 -3
  34. webscout/Provider/TTI/together.py +7 -6
  35. webscout/Provider/TTI/venice.py +368 -0
  36. webscout/Provider/TextPollinationsAI.py +19 -14
  37. webscout/Provider/TogetherAI.py +57 -44
  38. webscout/Provider/TwoAI.py +96 -2
  39. webscout/Provider/TypliAI.py +33 -27
  40. webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
  41. webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
  42. webscout/Provider/Venice.py +1 -0
  43. webscout/Provider/WiseCat.py +18 -20
  44. webscout/Provider/__init__.py +4 -10
  45. webscout/Provider/copilot.py +58 -61
  46. webscout/Provider/freeaichat.py +64 -55
  47. webscout/Provider/monochat.py +275 -0
  48. webscout/Provider/scira_chat.py +115 -21
  49. webscout/Provider/toolbaz.py +5 -10
  50. webscout/Provider/typefully.py +1 -11
  51. webscout/Provider/x0gpt.py +325 -315
  52. webscout/__init__.py +4 -11
  53. webscout/auth/__init__.py +19 -4
  54. webscout/auth/api_key_manager.py +189 -189
  55. webscout/auth/auth_system.py +25 -40
  56. webscout/auth/config.py +105 -6
  57. webscout/auth/database.py +377 -22
  58. webscout/auth/models.py +185 -130
  59. webscout/auth/request_processing.py +175 -11
  60. webscout/auth/routes.py +119 -5
  61. webscout/auth/server.py +9 -2
  62. webscout/auth/simple_logger.py +236 -0
  63. webscout/sanitize.py +1074 -0
  64. webscout/version.py +1 -1
  65. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -150
  66. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/RECORD +70 -72
  67. webscout/Provider/AI21.py +0 -177
  68. webscout/Provider/HuggingFaceChat.py +0 -469
  69. webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
  70. webscout/Provider/OPENAI/freeaichat.py +0 -363
  71. webscout/Provider/OPENAI/typegpt.py +0 -368
  72. webscout/Provider/OPENAI/uncovrAI.py +0 -477
  73. webscout/Provider/WritingMate.py +0 -273
  74. webscout/Provider/typegpt.py +0 -284
  75. webscout/Provider/uncovr.py +0 -333
  76. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
  77. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
  78. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
  79. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
@@ -93,49 +93,49 @@ class LambdaChat(Provider):
93
93
  self.session.proxies = proxies # Assign proxies directly
94
94
 
95
95
  def create_conversation(self, model: str):
96
- """Create a new conversation with the specified model."""
96
+ """Create a new conversation with the specified model, using updated headers and cookies."""
97
97
  url = f"{self.url}/conversation"
98
98
  payload = {
99
99
  "model": model,
100
- "preprompt": self.system_prompt,
101
-
100
+ "preprompt": self.system_prompt
102
101
  }
103
-
104
- # Update referer for this specific request
102
+
103
+ # Update headers for this specific request
105
104
  headers = self.headers.copy()
106
- headers["Referer"] = f"{self.url}/models/{model}"
107
-
105
+ headers["Referer"] = f"{self.url}/"
106
+ # Add browser-like headers for best compatibility
107
+ headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
108
+ headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
109
+ headers["Sec-GPC"] = "1"
110
+ headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
111
+ headers["Sec-Ch-Ua-Mobile"] = "?0"
112
+ headers["Sec-Ch-Ua-Platform"] = '"Windows"'
113
+ headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
114
+ headers["Origin"] = self.url
115
+ # cookies are handled by curl_cffi session automatically
116
+
108
117
  try:
109
- # Use curl_cffi session post with impersonate
110
118
  response = self.session.post(
111
- url,
112
- json=payload,
113
- headers=headers, # Use updated headers with specific Referer
114
- impersonate="chrome110" # Use a common impersonation profile
119
+ url,
120
+ json=payload,
121
+ headers=headers,
122
+ impersonate="chrome110"
115
123
  )
116
-
117
124
  if response.status_code == 401:
118
125
  raise exceptions.AuthenticationError("Authentication failed.")
119
-
120
- # Handle other error codes
121
126
  if response.status_code != 200:
122
127
  return None
123
-
124
128
  data = response.json()
125
129
  conversation_id = data.get("conversationId")
126
-
127
- # Store conversation data
128
130
  if model not in self._conversation_data:
129
131
  self._conversation_data[model] = {
130
132
  "conversationId": conversation_id,
131
- "messageId": str(uuid.uuid4()) # Initial message ID
133
+ "messageId": str(uuid.uuid4())
132
134
  }
133
-
134
135
  return conversation_id
135
- except CurlError as e: # Catch CurlError
136
- # Log or handle CurlError specifically if needed
136
+ except CurlError:
137
137
  return None
138
- except Exception: # Catch other potential exceptions (like JSONDecodeError, HTTPError)
138
+ except Exception:
139
139
  return None
140
140
 
141
141
  def fetch_message_id(self, conversation_id: str) -> str:
@@ -230,35 +230,43 @@ class LambdaChat(Provider):
230
230
  url = f"{self.url}/conversation/{conversation_id}"
231
231
  message_id = self._conversation_data[model]["messageId"]
232
232
 
233
- # Data to send
233
+ # Data to send (tools should be empty list by default)
234
234
  request_data = {
235
235
  "inputs": prompt,
236
236
  "id": message_id,
237
237
  "is_retry": False,
238
238
  "is_continue": False,
239
239
  "web_search": web_search,
240
- "tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
240
+ "tools": []
241
241
  }
242
-
242
+
243
243
  # Update headers for this specific request
244
244
  headers = self.headers.copy()
245
245
  headers["Referer"] = f"{self.url}/conversation/{conversation_id}"
246
-
246
+ headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
247
+ headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
248
+ headers["Sec-GPC"] = "1"
249
+ headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
250
+ headers["Sec-Ch-Ua-Mobile"] = "?0"
251
+ headers["Sec-Ch-Ua-Platform"] = '"Windows"'
252
+ headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
253
+ headers["Origin"] = self.url
254
+
247
255
  # Create multipart form data
248
256
  boundary = self.generate_boundary()
249
257
  multipart_headers = headers.copy()
250
258
  multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
251
-
259
+
252
260
  # Serialize the data to JSON
253
261
  data_json = json.dumps(request_data, separators=(',', ':'))
254
-
262
+
255
263
  # Create the multipart form data body
256
264
  body = f"--{boundary}\r\n"
257
265
  body += f'Content-Disposition: form-data; name="data"\r\n'
258
- body += f"Content-Type: application/json\r\n\r\n"
266
+ body += f"\r\n"
259
267
  body += f"{data_json}\r\n"
260
268
  body += f"--{boundary}--\r\n"
261
-
269
+
262
270
  multipart_headers["Content-Length"] = str(len(body))
263
271
 
264
272
  def for_stream():
@@ -14,23 +14,20 @@ class Netwrck(Provider):
14
14
  greeting = """Hello! I'm a helpful assistant. How can I help you today?"""
15
15
 
16
16
  AVAILABLE_MODELS = [
17
- "neversleep/llama-3-lumimaid-8b:extended",
18
- "x-ai/grok-2",
19
- "anthropic/claude-3-7-sonnet-20250219",
17
+ "thedrummer/valkyrie-49b-v1",
20
18
  "sao10k/l3-euryale-70b",
19
+ "deepseek/deepseek-chat",
20
+ "deepseek/deepseek-r1",
21
+ "anthropic/claude-sonnet-4-20250514",
21
22
  "openai/gpt-4.1-mini",
22
23
  "gryphe/mythomax-l2-13b",
23
- "google/gemini-pro-1.5",
24
24
  "google/gemini-2.5-flash-preview-04-17",
25
25
  "nvidia/llama-3.1-nemotron-70b-instruct",
26
- "deepseek/deepseek-r1",
27
- "deepseek/deepseek-chat"
28
-
29
26
  ]
30
27
 
31
28
  def __init__(
32
29
  self,
33
- model: str = "anthropic/claude-3-7-sonnet-20250219",
30
+ model: str = "anthropic/claude-sonnet-4-20250514",
34
31
  is_conversation: bool = True,
35
32
  max_tokens: int = 4096, # Note: max_tokens is not used by this API
36
33
  timeout: int = 30,
@@ -1,14 +1,13 @@
1
1
  from webscout.AIutel import Optimizers
2
2
  from webscout.AIutel import Conversation
3
- from webscout.AIutel import AwesomePrompts, sanitize_stream
4
- from webscout.AIbase import Provider, AsyncProvider
5
- from webscout import exceptions
6
- from typing import Any, AsyncGenerator, Dict, List, Optional, Union
7
- import ollama
8
- from ollama import AsyncClient, Client, ResponseError
9
- import asyncio
10
- import base64
11
- from pathlib import Path
3
+ from webscout.AIutel import AwesomePrompts
4
+ from webscout.AIbase import Provider
5
+ from typing import AsyncGenerator, Dict, List, Optional, Union
6
+
7
+ try:
8
+ from ollama import AsyncClient, Client, ResponseError
9
+ except ImportError as e:
10
+ pass
12
11
 
13
12
  class OLLAMA(Provider):
14
13
  def __init__(
@@ -39,7 +39,6 @@ Currently, the following providers are implemented with OpenAI-compatible interf
39
39
  - TypeGPT
40
40
  - SciraChat
41
41
  - LLMChatCo
42
- - FreeAIChat
43
42
  - YEPCHAT
44
43
  - HeckAI
45
44
  - SonusAI
@@ -73,6 +72,7 @@ Currently, the following providers are implemented with OpenAI-compatible interf
73
72
  - MonoChat
74
73
  - Friendli
75
74
  - MiniMax
75
+ - QodoAI
76
76
 
77
77
  ## 💻 Usage Examples
78
78
 
@@ -199,68 +199,77 @@ class Chat(BaseChat):
199
199
  self.completions = Completions(client)
200
200
 
201
201
 
202
- class TogetherAI(OpenAICompatibleProvider):
203
- """
204
- OpenAI-compatible client for TogetherAI API.
205
- """
206
202
  class TogetherAI(OpenAICompatibleProvider):
207
203
  """
208
204
  OpenAI-compatible client for TogetherAI API.
209
205
  """
210
206
  AVAILABLE_MODELS = [
211
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
212
- "Qwen/QwQ-32B",
213
- "Qwen/Qwen2-72B-Instruct",
214
- "Qwen/Qwen2-VL-72B-Instruct",
215
- "Qwen/Qwen2.5-72B-Instruct-Turbo",
207
+ "mistralai/Mistral-7B-Instruct-v0.3",
208
+ "togethercomputer/MoA-1",
216
209
  "Qwen/Qwen2.5-7B-Instruct-Turbo",
210
+ "meta-llama/Llama-3-8b-chat-hf",
211
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
212
+ "togethercomputer/MoA-1-Turbo",
213
+ "eddiehou/meta-llama/Llama-3.1-405B",
214
+ "mistralai/Mistral-7B-Instruct-v0.2",
215
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
216
+ "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
217
+ "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
218
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
217
219
  "Qwen/Qwen2.5-VL-72B-Instruct",
218
- "Qwen/Qwen3-235B-A22B-fp8-tput",
219
- "Salesforce/Llama-Rank-V1",
220
- "arcee-ai/arcee-blitz",
221
- "arcee-ai/caller",
222
- "arcee-ai/coder-large",
220
+ "arcee-ai/AFM-4.5B-Preview",
221
+ "lgai/exaone-3-5-32b-instruct",
222
+ "meta-llama/Llama-3-70b-chat-hf",
223
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
224
+ "google/gemma-2-27b-it",
225
+ "Qwen/Qwen2-72B-Instruct",
226
+ "mistralai/Mistral-Small-24B-Instruct-2501",
227
+ "Qwen/Qwen2-VL-72B-Instruct",
228
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
229
+ "meta-llama/Llama-Vision-Free",
230
+ "perplexity-ai/r1-1776",
231
+ "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
223
232
  "arcee-ai/maestro-reasoning",
224
- "arcee-ai/virtuoso-large",
225
- "arcee-ai/virtuoso-medium-v2",
233
+ "togethercomputer/Refuel-Llm-V2-Small",
234
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
235
+ "arcee-ai/coder-large",
236
+ "Qwen/QwQ-32B",
226
237
  "arcee_ai/arcee-spotlight",
227
- "blackbox/meta-llama-3-1-8b",
228
- "deepseek-ai/DeepSeek-R1",
229
- "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
230
- "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
231
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
232
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
233
- "deepseek-ai/DeepSeek-V3",
234
- "google/gemma-2-27b-it",
235
- "lgai/exaone-3-5-32b-instruct",
236
- "lgai/exaone-deep-32b",
238
+ "deepseek-ai/DeepSeek-R1-0528-tput",
237
239
  "marin-community/marin-8b-instruct",
238
- "meta-llama-llama-2-70b-hf",
239
- "meta-llama/Llama-2-70b-hf",
240
- "meta-llama/Llama-3-8b-chat-hf",
241
- "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
242
- "meta-llama/Llama-3.2-3B-Instruct-Turbo",
243
- "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
244
- "meta-llama/Llama-3.3-70B-Instruct-Turbo",
245
- "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
240
+ "lgai/exaone-deep-32b",
241
+ "google/gemma-3-27b-it",
242
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
243
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
244
+ "mistralai/Mistral-7B-Instruct-v0.1",
246
245
  "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
247
- "meta-llama/Llama-4-Scout-17B-16E-Instruct",
248
- "meta-llama/Llama-Vision-Free",
249
- "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
246
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
247
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
248
+ "scb10x/scb10x-typhoon-2-1-gemma3-12b",
249
+ "togethercomputer/Refuel-Llm-V2",
250
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
250
251
  "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
251
- "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
252
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
253
+ "meta-llama/Llama-3.2-3B-Instruct-Turbo",
254
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
255
+ "deepseek-ai/DeepSeek-V3",
256
+ "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
252
257
  "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
258
+ "Qwen/Qwen3-32B-FP8",
259
+ "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
260
+ "arcee-ai/virtuoso-large",
261
+ "google/gemma-3n-E4B-it",
262
+ "moonshotai/Kimi-K2-Instruct",
253
263
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
254
- "mistralai/Mistral-7B-Instruct-v0.1",
255
- "mistralai/Mistral-7B-Instruct-v0.2",
256
- "mistralai/Mistral-7B-Instruct-v0.3",
257
- "mistralai/Mistral-Small-24B-Instruct-2501",
258
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
259
- "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
260
- "perplexity-ai/r1-1776",
261
- "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
262
- "scb10x/scb10x-typhoon-2-1-gemma3-12b",
263
- "togethercomputer/Refuel-Llm-V2-Small",
264
+ "deepseek-ai/DeepSeek-R1",
265
+ "Qwen/Qwen3-235B-A22B-fp8-tput",
266
+ "Qwen/Qwen3-235B-A22B-Instruct-2507-tput",
267
+ "Rrrr/nim/nvidia/llama-3.3-nemotron-super-49b-v1-de6a6453",
268
+ "Rrrr/mistralai/Devstral-Small-2505-306f5881",
269
+ "Qwen/Qwen3-235B-A22B-Thinking-2507",
270
+ "Rrrr/ChatGPT-5",
271
+ "Rrrr/MeowGPT-3.5",
272
+ "blackbox/meta-llama-3-1-8b"
264
273
  ]
265
274
 
266
275
  def __init__(self, browser: str = "chrome"):
@@ -5,6 +5,9 @@ import time
5
5
  import uuid
6
6
  import re
7
7
  import urllib.parse
8
+ import os
9
+ import pickle
10
+ import tempfile
8
11
  from typing import List, Dict, Optional, Union, Generator, Any
9
12
 
10
13
  from webscout.Extra.tempmail import get_random_email
@@ -208,6 +211,96 @@ class TwoAI(OpenAICompatibleProvider):
208
211
  """OpenAI-compatible client for the TwoAI API."""
209
212
 
210
213
  AVAILABLE_MODELS = ["sutra-v2", "sutra-r0"]
214
+
215
+ # Class-level cache for API keys
216
+ _api_key_cache = None
217
+ _cache_file = os.path.join(tempfile.gettempdir(), "webscout_twoai_openai_cache.pkl")
218
+
219
+ @classmethod
220
+ def _load_cached_api_key(cls) -> Optional[str]:
221
+ """Load cached API key from file."""
222
+ try:
223
+ if os.path.exists(cls._cache_file):
224
+ with open(cls._cache_file, 'rb') as f:
225
+ cache_data = pickle.load(f)
226
+ # Check if cache is not too old (24 hours)
227
+ if time.time() - cache_data.get('timestamp', 0) < 86400:
228
+ return cache_data.get('api_key')
229
+ except Exception:
230
+ # If cache is corrupted or unreadable, ignore and regenerate
231
+ pass
232
+ return None
233
+
234
+ @classmethod
235
+ def _save_cached_api_key(cls, api_key: str):
236
+ """Save API key to cache file."""
237
+ try:
238
+ cache_data = {
239
+ 'api_key': api_key,
240
+ 'timestamp': time.time()
241
+ }
242
+ with open(cls._cache_file, 'wb') as f:
243
+ pickle.dump(cache_data, f)
244
+ except Exception:
245
+ # If caching fails, continue without caching
246
+ pass
247
+
248
+ @classmethod
249
+ def _validate_api_key(cls, api_key: str) -> bool:
250
+ """Validate if an API key is still working."""
251
+ try:
252
+ session = Session()
253
+ headers = {
254
+ 'User-Agent': LitAgent().random(),
255
+ 'Accept': 'application/json',
256
+ 'Content-Type': 'application/json',
257
+ 'Authorization': f'Bearer {api_key}',
258
+ }
259
+
260
+ # Test with a simple request
261
+ test_payload = {
262
+ "messages": [{"role": "user", "content": "test"}],
263
+ "model": "sutra-v2",
264
+ "max_tokens": 1,
265
+ "stream": False
266
+ }
267
+
268
+ response = session.post(
269
+ "https://api.two.ai/v2/chat/completions",
270
+ headers=headers,
271
+ json=test_payload,
272
+ timeout=10,
273
+ impersonate="chrome120"
274
+ )
275
+
276
+ # If we get a 200 or 400 (bad request but auth worked), key is valid
277
+ # If we get 401/403, key is invalid
278
+ return response.status_code not in [401, 403]
279
+ except Exception:
280
+ # If validation fails, assume key is invalid
281
+ return False
282
+
283
+ @classmethod
284
+ def get_cached_api_key(cls) -> str:
285
+ """Get a cached API key or generate a new one if needed."""
286
+ # First check class-level cache
287
+ if cls._api_key_cache:
288
+ if cls._validate_api_key(cls._api_key_cache):
289
+ return cls._api_key_cache
290
+ else:
291
+ cls._api_key_cache = None
292
+
293
+ # Then check file cache
294
+ cached_key = cls._load_cached_api_key()
295
+ if cached_key and cls._validate_api_key(cached_key):
296
+ cls._api_key_cache = cached_key
297
+ return cached_key
298
+
299
+ # Generate new key if no valid cached key
300
+ new_key = cls.generate_api_key()
301
+ cls._api_key_cache = new_key
302
+ cls._save_cached_api_key(new_key)
303
+ return new_key
211
304
 
212
305
  @staticmethod
213
306
  def generate_api_key() -> str:
@@ -302,7 +395,7 @@ class TwoAI(OpenAICompatibleProvider):
302
395
  return api_key
303
396
 
304
397
  def __init__(self, browser: str = "chrome"):
305
- api_key = self.generate_api_key()
398
+ api_key = self.get_cached_api_key()
306
399
  self.timeout = 30
307
400
  self.base_url = "https://api.two.ai/v2/chat/completions"
308
401
  self.api_key = api_key
@@ -6,9 +6,7 @@ from .x0gpt import *
6
6
  from .wisecat import *
7
7
  from .venice import *
8
8
  from .exaai import *
9
- from .typegpt import *
10
9
  from .scirachat import *
11
- from .freeaichat import *
12
10
  from .llmchatco import *
13
11
  from .yep import * # Add YEPCHAT
14
12
  from .heckai import *
@@ -18,7 +16,6 @@ from .netwrck import *
18
16
  from .standardinput import *
19
17
  from .writecream import *
20
18
  from .toolbaz import *
21
- from .uncovrAI import *
22
19
  from .opkfc import *
23
20
  from .chatgpt import *
24
21
  from .textpollinations import *
@@ -46,6 +43,7 @@ from .GeminiProxy import * # Add GeminiProxy provider
46
43
  from .friendli import *
47
44
  from .monochat import *
48
45
  from .MiniMax import * # Add MiniMaxAI provider
46
+ from .qodo import * # Add QodoAI provider
49
47
  # Export auto-proxy functionality
50
48
  from .autoproxy import (
51
49
  get_auto_proxy,
@@ -32,7 +32,7 @@ _proxy_cache = {
32
32
  'cache_duration': 300 # 5 minutes
33
33
  }
34
34
 
35
- PROXY_SOURCE_URL = "http://207.180.209.185:5000/ips.txt"
35
+ PROXY_SOURCE_URL = "https://proxies.typegpt.net/ips.txt"
36
36
 
37
37
  # --- Static Proxy Lists ---
38
38
  # NordVPN proxies (format: https://host:port:user:pass)
@@ -86,14 +86,8 @@ class Completions(BaseCompletions):
86
86
  raise RuntimeError(f"Image upload failed: {r.text}")
87
87
  images.append({"type": "image", "url": r.json().get("url")})
88
88
 
89
- # Connect to websocket
90
- # Note: ws_connect might not use timeout in the same way as POST/GET
91
89
  ws = s.ws_connect(self._client.websocket_url)
92
-
93
- # Use model to set mode ("reasoning" for Think Deeper)
94
90
  mode = "reasoning" if "Think" in model else "chat"
95
-
96
- # Send the message to Copilot
97
91
  ws.send(json.dumps({
98
92
  "event": "send",
99
93
  "conversationId": conv_id,
@@ -101,79 +95,132 @@ class Completions(BaseCompletions):
101
95
  "mode": mode
102
96
  }).encode(), CurlWsFlag.TEXT)
103
97
 
104
- # Track token usage using count_tokens
105
98
  prompt_tokens = count_tokens(prompt_text)
106
99
  completion_tokens = 0
107
100
  total_tokens = prompt_tokens
108
-
109
101
  started = False
102
+ image_prompt = None
110
103
  while True:
111
104
  try:
112
105
  msg = json.loads(ws.recv()[0])
113
106
  except Exception:
114
107
  break
115
108
 
116
- if msg.get("event") == "appendText":
109
+ event = msg.get("event")
110
+ if event not in ["appendText", "done", "error", "generatingImage", "imageGenerated", "suggestedFollowups", "replaceText"]:
111
+ print(f"[Copilot] Unhandled event: {event} | msg: {msg}")
112
+
113
+ if event == "appendText":
117
114
  started = True
118
115
  content = msg.get("text", "")
119
-
120
- # Update token counts using count_tokens
121
116
  content_tokens = count_tokens(content)
122
117
  completion_tokens += content_tokens
123
118
  total_tokens = prompt_tokens + completion_tokens
124
-
125
- # Create the delta object
126
119
  delta = ChoiceDelta(
127
120
  content=content,
128
121
  role="assistant"
129
122
  )
130
-
131
- # Create the choice object
132
123
  choice = Choice(
133
124
  index=0,
134
125
  delta=delta,
135
126
  finish_reason=None
136
127
  )
137
-
138
- # Create the chunk object
139
128
  chunk = ChatCompletionChunk(
140
129
  id=request_id,
141
130
  choices=[choice],
142
131
  created=created_time,
143
132
  model=model
144
133
  )
145
-
146
134
  yield chunk
147
- elif msg.get("event") == "done":
148
- # Final chunk with finish_reason
135
+ elif event == "replaceText":
136
+ # treat as appendText for OpenAI compatibility
137
+ content = msg.get("text", "")
138
+ content_tokens = count_tokens(content)
139
+ completion_tokens += content_tokens
140
+ total_tokens = prompt_tokens + completion_tokens
141
+ delta = ChoiceDelta(
142
+ content=content,
143
+ role="assistant"
144
+ )
145
+ choice = Choice(
146
+ index=0,
147
+ delta=delta,
148
+ finish_reason=None
149
+ )
150
+ chunk = ChatCompletionChunk(
151
+ id=request_id,
152
+ choices=[choice],
153
+ created=created_time,
154
+ model=model
155
+ )
156
+ yield chunk
157
+ elif event == "generatingImage":
158
+ image_prompt = msg.get("prompt")
159
+ elif event == "imageGenerated":
160
+ # Yield a chunk with image metadata in the delta (custom extension)
161
+ delta = ChoiceDelta(
162
+ content=None,
163
+ role=None
164
+ )
165
+ choice = Choice(
166
+ index=0,
167
+ delta=delta,
168
+ finish_reason=None
169
+ )
170
+ chunk = ChatCompletionChunk(
171
+ id=request_id,
172
+ choices=[choice],
173
+ created=created_time,
174
+ model=model
175
+ )
176
+ chunk.image_url = msg.get("url")
177
+ chunk.image_prompt = image_prompt
178
+ chunk.image_preview = msg.get("thumbnailUrl")
179
+ yield chunk
180
+ elif event == "suggestedFollowups":
181
+ # Yield a chunk with followups in the delta (custom extension)
182
+ delta = ChoiceDelta(
183
+ content=None,
184
+ role=None
185
+ )
186
+ choice = Choice(
187
+ index=0,
188
+ delta=delta,
189
+ finish_reason=None
190
+ )
191
+ chunk = ChatCompletionChunk(
192
+ id=request_id,
193
+ choices=[choice],
194
+ created=created_time,
195
+ model=model
196
+ )
197
+ chunk.suggested_followups = msg.get("suggestions")
198
+ yield chunk
199
+ elif event == "done":
149
200
  delta = ChoiceDelta(
150
201
  content=None,
151
202
  role=None
152
203
  )
153
-
154
204
  choice = Choice(
155
205
  index=0,
156
206
  delta=delta,
157
207
  finish_reason="stop"
158
208
  )
159
-
160
209
  chunk = ChatCompletionChunk(
161
210
  id=request_id,
162
211
  choices=[choice],
163
212
  created=created_time,
164
213
  model=model
165
214
  )
166
-
167
215
  yield chunk
168
216
  break
169
- elif msg.get("event") == "error":
217
+ elif event == "error":
218
+ print(f"[Copilot] Error event: {msg}")
170
219
  raise RuntimeError(f"Copilot error: {msg}")
171
220
 
172
221
  ws.close()
173
-
174
222
  if not started:
175
223
  raise RuntimeError("No response received from Copilot")
176
-
177
224
  except Exception as e:
178
225
  raise RuntimeError(f"Stream error: {e}") from e
179
226
  finally: