webscout 8.3.4__py3-none-any.whl → 8.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (98) hide show
  1. webscout/AIutel.py +52 -1016
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/AISEARCH/PERPLEXED_search.py +214 -0
  5. webscout/Provider/AISEARCH/__init__.py +11 -10
  6. webscout/Provider/AISEARCH/felo_search.py +7 -3
  7. webscout/Provider/AISEARCH/scira_search.py +2 -0
  8. webscout/Provider/AISEARCH/stellar_search.py +53 -8
  9. webscout/Provider/Deepinfra.py +13 -1
  10. webscout/Provider/Flowith.py +6 -1
  11. webscout/Provider/GithubChat.py +1 -0
  12. webscout/Provider/GptOss.py +207 -0
  13. webscout/Provider/Kimi.py +445 -0
  14. webscout/Provider/Netwrck.py +3 -6
  15. webscout/Provider/OPENAI/README.md +2 -1
  16. webscout/Provider/OPENAI/TogetherAI.py +12 -8
  17. webscout/Provider/OPENAI/TwoAI.py +94 -1
  18. webscout/Provider/OPENAI/__init__.py +4 -4
  19. webscout/Provider/OPENAI/copilot.py +20 -4
  20. webscout/Provider/OPENAI/deepinfra.py +12 -0
  21. webscout/Provider/OPENAI/e2b.py +60 -8
  22. webscout/Provider/OPENAI/flowith.py +4 -3
  23. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  24. webscout/Provider/OPENAI/gptoss.py +288 -0
  25. webscout/Provider/OPENAI/kimi.py +469 -0
  26. webscout/Provider/OPENAI/netwrck.py +8 -12
  27. webscout/Provider/OPENAI/refact.py +274 -0
  28. webscout/Provider/OPENAI/scirachat.py +4 -0
  29. webscout/Provider/OPENAI/textpollinations.py +11 -10
  30. webscout/Provider/OPENAI/toolbaz.py +1 -0
  31. webscout/Provider/OPENAI/venice.py +1 -0
  32. webscout/Provider/Perplexitylabs.py +163 -147
  33. webscout/Provider/Qodo.py +30 -6
  34. webscout/Provider/TTI/__init__.py +1 -0
  35. webscout/Provider/TTI/bing.py +14 -2
  36. webscout/Provider/TTI/together.py +11 -9
  37. webscout/Provider/TTI/venice.py +368 -0
  38. webscout/Provider/TTS/README.md +0 -1
  39. webscout/Provider/TTS/__init__.py +0 -1
  40. webscout/Provider/TTS/base.py +479 -159
  41. webscout/Provider/TTS/deepgram.py +409 -156
  42. webscout/Provider/TTS/elevenlabs.py +425 -111
  43. webscout/Provider/TTS/freetts.py +317 -140
  44. webscout/Provider/TTS/gesserit.py +192 -128
  45. webscout/Provider/TTS/murfai.py +248 -113
  46. webscout/Provider/TTS/openai_fm.py +347 -129
  47. webscout/Provider/TTS/speechma.py +620 -586
  48. webscout/Provider/TextPollinationsAI.py +11 -10
  49. webscout/Provider/TogetherAI.py +12 -4
  50. webscout/Provider/TwoAI.py +96 -2
  51. webscout/Provider/TypliAI.py +33 -27
  52. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  53. webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
  54. webscout/Provider/Venice.py +1 -0
  55. webscout/Provider/WiseCat.py +18 -20
  56. webscout/Provider/__init__.py +2 -96
  57. webscout/Provider/cerebras.py +83 -33
  58. webscout/Provider/copilot.py +42 -23
  59. webscout/Provider/scira_chat.py +4 -0
  60. webscout/Provider/toolbaz.py +6 -10
  61. webscout/Provider/typefully.py +1 -11
  62. webscout/__init__.py +3 -15
  63. webscout/auth/__init__.py +19 -4
  64. webscout/auth/api_key_manager.py +189 -189
  65. webscout/auth/auth_system.py +25 -40
  66. webscout/auth/config.py +105 -6
  67. webscout/auth/database.py +377 -22
  68. webscout/auth/models.py +185 -130
  69. webscout/auth/request_processing.py +175 -11
  70. webscout/auth/routes.py +99 -2
  71. webscout/auth/server.py +9 -2
  72. webscout/auth/simple_logger.py +236 -0
  73. webscout/conversation.py +22 -20
  74. webscout/sanitize.py +1078 -0
  75. webscout/scout/README.md +20 -23
  76. webscout/scout/core/crawler.py +125 -38
  77. webscout/scout/core/scout.py +26 -5
  78. webscout/version.py +1 -1
  79. webscout/webscout_search.py +13 -6
  80. webscout/webscout_search_async.py +10 -8
  81. webscout/yep_search.py +13 -5
  82. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/METADATA +10 -149
  83. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/RECORD +88 -87
  84. webscout/Provider/Glider.py +0 -225
  85. webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
  86. webscout/Provider/OPENAI/c4ai.py +0 -394
  87. webscout/Provider/OPENAI/glider.py +0 -330
  88. webscout/Provider/OPENAI/typegpt.py +0 -368
  89. webscout/Provider/OPENAI/uncovrAI.py +0 -477
  90. webscout/Provider/TTS/sthir.py +0 -94
  91. webscout/Provider/WritingMate.py +0 -273
  92. webscout/Provider/typegpt.py +0 -284
  93. webscout/Provider/uncovr.py +0 -333
  94. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  95. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
  96. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
  97. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
  98. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
@@ -15,22 +15,23 @@ class TextPollinationsAI(Provider):
15
15
  """
16
16
 
17
17
  AVAILABLE_MODELS = [
18
+ "deepseek-reasoning",
19
+ "glm",
20
+ "gpt-5-nano",
21
+ "llama-fast-roblox",
22
+ "llama-roblox",
23
+ "llamascout",
24
+ "mistral",
25
+ "mistral-nemo-roblox",
26
+ "mistral-roblox",
27
+ "nova-fast",
18
28
  "openai",
29
+ "openai-audio",
19
30
  "openai-fast",
20
31
  "openai-large",
21
- "openai-reasoning",
22
32
  "openai-roblox",
23
- "openai-audio",
24
- "deepseek",
25
- "deepseek-reasoning",
26
- "grok",
27
- "llamascout",
28
- "mistral",
29
- "phi",
30
33
  "qwen-coder",
31
- "searchgpt",
32
34
  "bidara",
33
- "elixposearch",
34
35
  "evil",
35
36
  "hypnosis-tracy",
36
37
  "midijourney",
@@ -21,29 +21,35 @@ class TogetherAI(Provider):
21
21
  "Qwen/Qwen2-VL-72B-Instruct",
22
22
  "Qwen/Qwen2.5-72B-Instruct-Turbo",
23
23
  "Qwen/Qwen2.5-7B-Instruct-Turbo",
24
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
24
25
  "Qwen/Qwen2.5-VL-72B-Instruct",
26
+ "Qwen/Qwen3-235B-A22B-Instruct-2507-tput",
27
+ "Qwen/Qwen3-235B-A22B-Thinking-2507",
25
28
  "Qwen/Qwen3-235B-A22B-fp8-tput",
29
+ "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
26
30
  "Salesforce/Llama-Rank-V1",
27
- "arcee-ai/arcee-blitz",
28
- "arcee-ai/caller",
31
+ "Virtue-AI/VirtueGuard-Text-Lite",
32
+ "arcee-ai/AFM-4.5B",
29
33
  "arcee-ai/coder-large",
30
34
  "arcee-ai/maestro-reasoning",
31
35
  "arcee-ai/virtuoso-large",
32
- "arcee-ai/virtuoso-medium-v2",
33
36
  "arcee_ai/arcee-spotlight",
34
37
  "blackbox/meta-llama-3-1-8b",
38
+ "deepcogito/cogito-v2-preview-deepseek-671b",
35
39
  "deepseek-ai/DeepSeek-R1",
40
+ "deepseek-ai/DeepSeek-R1-0528-tput",
36
41
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
37
42
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
38
43
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
39
44
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
40
45
  "deepseek-ai/DeepSeek-V3",
41
46
  "google/gemma-2-27b-it",
47
+ "google/gemma-3n-E4B-it",
42
48
  "lgai/exaone-3-5-32b-instruct",
43
49
  "lgai/exaone-deep-32b",
44
50
  "marin-community/marin-8b-instruct",
45
- "meta-llama-llama-2-70b-hf",
46
51
  "meta-llama/Llama-2-70b-hf",
52
+ "meta-llama/Llama-3-70b-chat-hf",
47
53
  "meta-llama/Llama-3-8b-chat-hf",
48
54
  "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
49
55
  "meta-llama/Llama-3.2-3B-Instruct-Turbo",
@@ -63,11 +69,13 @@ class TogetherAI(Provider):
63
69
  "mistralai/Mistral-7B-Instruct-v0.3",
64
70
  "mistralai/Mistral-Small-24B-Instruct-2501",
65
71
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
72
+ "moonshotai/Kimi-K2-Instruct",
66
73
  "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
67
74
  "perplexity-ai/r1-1776",
68
75
  "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
69
76
  "scb10x/scb10x-typhoon-2-1-gemma3-12b",
70
77
  "togethercomputer/Refuel-Llm-V2-Small",
78
+ "zai-org/GLM-4.5-Air-FP8"
71
79
  ]
72
80
 
73
81
  @staticmethod
@@ -3,6 +3,9 @@ from curl_cffi import CurlError
3
3
  import json
4
4
  import base64
5
5
  import time
6
+ import os
7
+ import pickle
8
+ import tempfile
6
9
  from typing import Any, Dict, Optional, Generator, Union
7
10
  import re # Import re for parsing SSE
8
11
  import urllib.parse
@@ -25,12 +28,103 @@ class TwoAI(Provider):
25
28
 
26
29
  API keys can be generated using the generate_api_key() method, which uses a temporary email
27
30
  to register for the Two AI service and extract the API key from the confirmation email.
31
+ API keys are cached to avoid regenerating them on every initialization.
28
32
  """
29
33
 
30
34
  AVAILABLE_MODELS = [
31
35
  "sutra-v2", # Multilingual AI model for instruction execution and conversational intelligence
32
36
  "sutra-r0", # Advanced reasoning model for complex problem-solving and deep contextual understanding
33
37
  ]
38
+
39
+ # Class-level cache for API keys
40
+ _api_key_cache = None
41
+ _cache_file = os.path.join(tempfile.gettempdir(), "webscout_twoai_cache.pkl")
42
+
43
+ @classmethod
44
+ def _load_cached_api_key(cls) -> Optional[str]:
45
+ """Load cached API key from file."""
46
+ try:
47
+ if os.path.exists(cls._cache_file):
48
+ with open(cls._cache_file, 'rb') as f:
49
+ cache_data = pickle.load(f)
50
+ # Check if cache is not too old (24 hours)
51
+ if time.time() - cache_data.get('timestamp', 0) < 86400:
52
+ return cache_data.get('api_key')
53
+ except Exception:
54
+ # If cache is corrupted or unreadable, ignore and regenerate
55
+ pass
56
+ return None
57
+
58
+ @classmethod
59
+ def _save_cached_api_key(cls, api_key: str):
60
+ """Save API key to cache file."""
61
+ try:
62
+ cache_data = {
63
+ 'api_key': api_key,
64
+ 'timestamp': time.time()
65
+ }
66
+ with open(cls._cache_file, 'wb') as f:
67
+ pickle.dump(cache_data, f)
68
+ except Exception:
69
+ # If caching fails, continue without caching
70
+ pass
71
+
72
+ @classmethod
73
+ def _validate_api_key(cls, api_key: str) -> bool:
74
+ """Validate if an API key is still working."""
75
+ try:
76
+ session = Session()
77
+ headers = {
78
+ 'User-Agent': LitAgent().random(),
79
+ 'Accept': 'application/json',
80
+ 'Content-Type': 'application/json',
81
+ 'Authorization': f'Bearer {api_key}',
82
+ }
83
+
84
+ # Test with a simple request
85
+ test_payload = {
86
+ "messages": [{"role": "user", "content": "test"}],
87
+ "model": "sutra-v2",
88
+ "max_tokens": 1,
89
+ "stream": False
90
+ }
91
+
92
+ response = session.post(
93
+ "https://api.two.ai/v2/chat/completions",
94
+ headers=headers,
95
+ json=test_payload,
96
+ timeout=10,
97
+ impersonate="chrome120"
98
+ )
99
+
100
+ # If we get a 200 or 400 (bad request but auth worked), key is valid
101
+ # If we get 401/403, key is invalid
102
+ return response.status_code not in [401, 403]
103
+ except Exception:
104
+ # If validation fails, assume key is invalid
105
+ return False
106
+
107
+ @classmethod
108
+ def get_cached_api_key(cls) -> str:
109
+ """Get a cached API key or generate a new one if needed."""
110
+ # First check class-level cache
111
+ if cls._api_key_cache:
112
+ if cls._validate_api_key(cls._api_key_cache):
113
+ return cls._api_key_cache
114
+ else:
115
+ cls._api_key_cache = None
116
+
117
+ # Then check file cache
118
+ cached_key = cls._load_cached_api_key()
119
+ if cached_key and cls._validate_api_key(cached_key):
120
+ cls._api_key_cache = cached_key
121
+ return cached_key
122
+
123
+ # Generate new key if no valid cached key
124
+ new_key = cls.generate_api_key()
125
+ cls._api_key_cache = new_key
126
+ cls._save_cached_api_key(new_key)
127
+ return new_key
34
128
 
35
129
  @staticmethod
36
130
  def generate_api_key() -> str:
@@ -193,8 +287,8 @@ class TwoAI(Provider):
193
287
  if model not in self.AVAILABLE_MODELS:
194
288
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
195
289
 
196
- # Always auto-generate API key
197
- api_key = self.generate_api_key()
290
+ # Use cached API key or generate new one if needed
291
+ api_key = self.get_cached_api_key()
198
292
 
199
293
  self.url = "https://api.two.ai/v2/chat/completions" # API endpoint
200
294
  self.headers = {
@@ -33,7 +33,7 @@ class TypliAI(Provider):
33
33
  >>> print(response)
34
34
  'I don't have access to real-time weather information...'
35
35
  """
36
- AVAILABLE_MODELS = ["free-no-sign-up-chatgpt"]
36
+ AVAILABLE_MODELS = ["gpt-4o-mini"]
37
37
 
38
38
  def __init__(
39
39
  self,
@@ -47,7 +47,7 @@ class TypliAI(Provider):
47
47
  history_offset: int = 10250,
48
48
  act: str = None,
49
49
  system_prompt: str = "You are a helpful assistant.",
50
- model: str = "free-no-sign-up-chatgpt"
50
+ model: str = "gpt-4o-mini"
51
51
  ):
52
52
  """
53
53
  Initializes the TypliAI API with given parameters.
@@ -119,16 +119,6 @@ class TypliAI(Provider):
119
119
  self.conversation.history_offset = history_offset
120
120
 
121
121
 
122
- @staticmethod
123
- def _typli_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
124
- """Extracts content from the Typli.ai stream format '0:"..."'."""
125
- if isinstance(chunk, str):
126
- match = re.search(r'0:"(.*?)"', chunk)
127
- if match:
128
- # Decode potential unicode escapes like \u00e9
129
- content = match.group(1).encode().decode('unicode_escape')
130
- return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
131
- return None
132
122
 
133
123
  def ask(
134
124
  self,
@@ -182,7 +172,7 @@ class TypliAI(Provider):
182
172
  ]
183
173
  }
184
174
  ],
185
- "slug": self.model
175
+ "slug": "free-no-sign-up-chatgpt"
186
176
  }
187
177
 
188
178
  def for_stream():
@@ -202,13 +192,21 @@ class TypliAI(Provider):
202
192
  raise exceptions.FailedToGenerateResponseError(error_msg)
203
193
 
204
194
  streaming_response = ""
205
- # Use sanitize_stream with the custom extractor
195
+ # Use sanitize_stream with extract_regexes
206
196
  processed_stream = sanitize_stream(
207
197
  data=response.iter_content(chunk_size=None), # Pass byte iterator
208
198
  intro_value=None, # No simple prefix like 'data:'
209
199
  to_json=False, # Content is extracted as string, not JSON object per line
210
- content_extractor=self._typli_extractor, # Use the specific extractor
211
- skip_markers=["f:{", "e:{", "d:{", "8:[", "2:["] # Skip metadata lines based on observed format
200
+ extract_regexes=[r'0:"(.*?)"'], # Extract content from '0:"..."' format
201
+ skip_regexes=[
202
+ r'^f:\{.*\}$', # Skip metadata lines starting with f:{
203
+ r'^e:\{.*\}$', # Skip metadata lines starting with e:{
204
+ r'^d:\{.*\}$', # Skip metadata lines starting with d:{
205
+ r'^8:\[.*\]$', # Skip metadata lines starting with 8:[
206
+ r'^2:\[.*\]$', # Skip metadata lines starting with 2:[
207
+ r'^\s*$' # Skip empty lines
208
+ ],
209
+ raw=raw # Pass the raw parameter to sanitize_stream
212
210
  )
213
211
 
214
212
  for content_chunk in processed_stream:
@@ -244,6 +242,7 @@ class TypliAI(Provider):
244
242
  self,
245
243
  prompt: str,
246
244
  stream: bool = False,
245
+ raw: bool = False,
247
246
  optimizer: str = None,
248
247
  conversationally: bool = False,
249
248
  ) -> Union[str, Generator[str, None, None]]:
@@ -253,6 +252,7 @@ class TypliAI(Provider):
253
252
  Args:
254
253
  prompt (str): The prompt to send to the API.
255
254
  stream (bool): Whether to stream the response.
255
+ raw (bool): Whether to return the raw response.
256
256
  optimizer (str): Optimizer to use for the prompt.
257
257
  conversationally (bool): Whether to generate the prompt conversationally.
258
258
 
@@ -262,19 +262,25 @@ class TypliAI(Provider):
262
262
 
263
263
  def for_stream():
264
264
  for response in self.ask(
265
- prompt, True, optimizer=optimizer, conversationally=conversationally
265
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
266
266
  ):
267
- yield self.get_message(response)
267
+ if raw:
268
+ yield response
269
+ else:
270
+ yield self.get_message(response)
268
271
 
269
272
  def for_non_stream():
270
- return self.get_message(
271
- self.ask(
272
- prompt,
273
- False,
274
- optimizer=optimizer,
275
- conversationally=conversationally,
276
- )
273
+ result = self.ask(
274
+ prompt,
275
+ False,
276
+ raw=raw,
277
+ optimizer=optimizer,
278
+ conversationally=conversationally,
277
279
  )
280
+ if raw:
281
+ return result
282
+ else:
283
+ return self.get_message(result)
278
284
 
279
285
  return for_stream() if stream else for_non_stream()
280
286
 
@@ -290,7 +296,7 @@ class TypliAI(Provider):
290
296
  """
291
297
  assert isinstance(response, dict), "Response should be of dict data-type only"
292
298
  # Ensure text exists before processing
293
- return response.get("text", "")
299
+ return response.get("text", "").replace('\\n', '\n').replace('\\n\\n', '\n\n')
294
300
 
295
301
 
296
302
 
@@ -298,7 +304,7 @@ if __name__ == "__main__":
298
304
  from rich import print
299
305
  try:
300
306
  ai = TypliAI(timeout=60)
301
- response = ai.chat("Write a short poem about AI", stream=True)
307
+ response = ai.chat("Write a short poem about AI", stream=True, raw=False)
302
308
  for chunk in response:
303
309
  print(chunk, end="", flush=True)
304
310
  except Exception as e:
@@ -0,0 +1,339 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import random
5
+ import string
6
+ from typing import Any, Dict, Optional, Generator, Union, List
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
11
+ from webscout.AIbase import Provider, AsyncProvider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+ # Using LitProxy for intelligent proxy management
15
+ try:
16
+ from litproxy import (
17
+ get_auto_proxy, get_proxy_dict, test_proxy, get_working_proxy,
18
+ refresh_proxy_cache, get_proxy_stats, set_proxy_cache_duration,
19
+ patch, use_proxy, proxyify, list_proxies, test_all_proxies,
20
+ current_proxy, make_request_with_auto_retry, create_auto_retry_session
21
+ )
22
+ LITPROXY_AVAILABLE = True
23
+ except ImportError:
24
+ LITPROXY_AVAILABLE = False
25
+
26
+ import requests
27
+
28
+ class VercelAIGateway(Provider):
29
+ """
30
+ A class to interact with the Vercel AI SDK Gateway Demo API with intelligent proxy management using LitProxy.
31
+
32
+ Install LitProxy for advanced proxy features:
33
+ pip install litproxy
34
+
35
+ Features:
36
+ - Intelligent proxy rotation and health monitoring
37
+ - Automatic retry with proxy fallback on failures
38
+ - Support for multiple proxy sources (Webshare, NordVPN, Remote lists)
39
+ - Seamless curl_cffi session integration
40
+ - Comprehensive proxy diagnostics and statistics
41
+ """
42
+
43
+ AVAILABLE_MODELS = [
44
+ "amazon/nova-lite",
45
+ "amazon/nova-micro",
46
+ "anthropic/claude-3.5-haiku",
47
+ "google/gemini-2.0-flash",
48
+ "meta/llama-3.1-8b",
49
+ "mistral/ministral-3b",
50
+ "openai/gpt-3.5-turbo",
51
+ "openai/gpt-4o-mini",
52
+ "xai/grok-3"
53
+ ]
54
+
55
+ @staticmethod
56
+ def _vercel_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
57
+ """Extracts content from Vercel AI Gateway stream JSON objects."""
58
+ if isinstance(chunk, dict):
59
+ if chunk.get("type") == "text-delta":
60
+ return chunk.get("delta")
61
+ return None
62
+
63
+ def __init__(
64
+ self,
65
+ is_conversation: bool = True,
66
+ max_tokens: int = 2049,
67
+ timeout: int = 30,
68
+ intro: str = None,
69
+ filepath: str = None,
70
+ update_file: bool = True,
71
+ history_offset: int = 10250,
72
+ act: str = None,
73
+ model: str = "openai/gpt-4o-mini",
74
+ system_prompt: str = "You are a helpful assistant.",
75
+ browser: str = "chrome",
76
+ use_proxy: bool = True,
77
+ max_proxy_attempts: int = 3,
78
+ proxy_cache_duration: int = 300
79
+ ):
80
+ """
81
+ Initializes the Vercel AI Gateway API client with LitProxy integration.
82
+
83
+ Args:
84
+ use_proxy (bool): Enable proxy usage via LitProxy (default: True)
85
+ max_proxy_attempts (int): Maximum proxy retry attempts (default: 3)
86
+ proxy_cache_duration (int): Proxy cache duration in seconds (default: 300)
87
+ """
88
+ if model not in self.AVAILABLE_MODELS:
89
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
90
+
91
+ self.url = "https://ai-sdk-gateway-demo.labs.vercel.dev/api/chat"
92
+
93
+ # Initialize LitAgent
94
+ self.agent = LitAgent()
95
+ self.fingerprint = self.agent.generate_fingerprint(browser)
96
+
97
+ self.headers = {
98
+ "Accept": "*/*",
99
+ "Accept-Encoding": "gzip, deflate, br, zstd",
100
+ "Accept-Language": self.fingerprint["accept_language"],
101
+ "Content-Type": "application/json",
102
+ "DNT": "1",
103
+ "Origin": "https://ai-sdk-gateway-demo.labs.vercel.dev",
104
+ "Priority": "u=1, i",
105
+ "Referer": f"https://ai-sdk-gateway-demo.labs.vercel.dev/?modelId={model.replace('/', '%2F')}",
106
+ "Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
107
+ "Sec-CH-UA-Mobile": "?0",
108
+ "Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
109
+ "Sec-Fetch-Dest": "empty",
110
+ "Sec-Fetch-Mode": "cors",
111
+ "Sec-Fetch-Site": "same-origin",
112
+ "Sec-GPC": "1",
113
+ "User-Agent": self.fingerprint.get("user_agent", ""),
114
+ "X-Forwarded-For": self.fingerprint.get("x-forwarded-for", ""),
115
+ "X-Real-IP": self.fingerprint.get("x-real-ip", ""),
116
+ "X-Client-IP": self.fingerprint.get("x-client-ip", ""),
117
+ }
118
+
119
+ # Initialize curl_cffi Session
120
+ self.session = Session()
121
+ self.session.headers.update(self.headers)
122
+
123
+ # Configure proxy settings
124
+ self.use_proxy = use_proxy
125
+ self.max_proxy_attempts = max_proxy_attempts
126
+ self.proxy_cache_duration = proxy_cache_duration
127
+
128
+ # Integrate LitProxy for intelligent proxy management
129
+ if use_proxy and LITPROXY_AVAILABLE:
130
+ try:
131
+ # Configure proxy cache duration
132
+ set_proxy_cache_duration(proxy_cache_duration)
133
+ # Patch the session with proxy support
134
+ patch(self.session)
135
+ self.proxy_enabled = True
136
+ except Exception as e:
137
+ self.proxy_enabled = False
138
+ else:
139
+ self.proxy_enabled = False
140
+ if use_proxy and not LITPROXY_AVAILABLE:
141
+ # Silently disable proxy if LitProxy not available
142
+ pass
143
+
144
+ self.system_prompt = system_prompt
145
+ self.is_conversation = is_conversation
146
+ self.max_tokens_to_sample = max_tokens
147
+ self.timeout = timeout
148
+ self.last_response = {}
149
+ self.model = model
150
+
151
+ self.__available_optimizers = (
152
+ method
153
+ for method in dir(Optimizers)
154
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
155
+ )
156
+
157
+ Conversation.intro = (
158
+ AwesomePrompts().get_act(
159
+ act, raise_not_found=True, default=None, case_insensitive=True
160
+ )
161
+ if act
162
+ else intro or Conversation.intro
163
+ )
164
+
165
+ self.conversation = Conversation(
166
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
167
+ )
168
+ self.conversation.history_offset = history_offset
169
+
170
+ def refresh_identity(self, browser: str = None):
171
+ """
172
+ Refreshes the browser identity fingerprint.
173
+
174
+ Args:
175
+ browser: Specific browser to use for the new fingerprint
176
+ """
177
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
178
+ self.fingerprint = self.agent.generate_fingerprint(browser)
179
+
180
+ # Update headers with new fingerprint
181
+ self.headers.update({
182
+ "Accept-Language": self.fingerprint["accept_language"],
183
+ "User-Agent": self.fingerprint.get("user_agent", ""),
184
+ "Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
185
+ "Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
186
+ })
187
+
188
+ # Update session headers
189
+ self.session.headers.update(self.headers)
190
+ return self.fingerprint
191
+
192
+ def _make_request(self, payload: dict, stream: bool = False):
193
+ """
194
+ Make a request to the API. The session is already patched with LitProxy auto-retry if enabled.
195
+
196
+ Args:
197
+ payload: Request payload
198
+ stream: Whether to stream the response
199
+
200
+ Returns:
201
+ Response object
202
+ """
203
+ # Use the session directly - it's already patched with proxy auto-retry if enabled
204
+ response = self.session.post(
205
+ self.url,
206
+ data=json.dumps(payload),
207
+ stream=stream,
208
+ timeout=self.timeout,
209
+ impersonate="chrome110"
210
+ )
211
+ response.raise_for_status()
212
+ return response
213
+
214
+ def ask(
215
+ self,
216
+ prompt: str,
217
+ stream: bool = False,
218
+ raw: bool = False,
219
+ optimizer: str = None,
220
+ conversationally: bool = False,
221
+ ) -> Union[Dict[str, Any], Generator]:
222
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
223
+ if optimizer:
224
+ if optimizer in self.__available_optimizers:
225
+ conversation_prompt = getattr(Optimizers, optimizer)(
226
+ conversation_prompt if conversationally else prompt
227
+ )
228
+ else:
229
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
230
+
231
+ # Generate random IDs
232
+ conversation_id = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
233
+ message_id = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
234
+
235
+ # Payload construction
236
+ payload = {
237
+ "modelId": self.model,
238
+ "id": conversation_id,
239
+ "messages": [
240
+ {
241
+ "parts": [{"type": "text", "text": conversation_prompt}],
242
+ "id": message_id,
243
+ "role": "user"
244
+ }
245
+ ],
246
+ "trigger": "submit-message"
247
+ }
248
+
249
+ def for_stream():
250
+ streaming_text = ""
251
+ try:
252
+ response = self._make_request(payload, stream=True)
253
+
254
+ # Use sanitize_stream for SSE format
255
+ processed_stream = sanitize_stream(
256
+ data=response.iter_content(chunk_size=None),
257
+ intro_value="data:",
258
+ to_json=True,
259
+ skip_markers=["[DONE]"],
260
+ content_extractor=self._vercel_extractor,
261
+ yield_raw_on_error=False
262
+ )
263
+
264
+ for content_chunk in processed_stream:
265
+ if content_chunk and isinstance(content_chunk, str):
266
+ streaming_text += content_chunk
267
+ resp = dict(text=content_chunk)
268
+ yield resp if not raw else content_chunk
269
+
270
+ except CurlError as e:
271
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
272
+ except Exception as e:
273
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
274
+ finally:
275
+ if streaming_text:
276
+ self.last_response = {"text": streaming_text}
277
+ self.conversation.update_chat_history(prompt, streaming_text)
278
+
279
+ def for_non_stream():
280
+ try:
281
+ response = self._make_request(payload, stream=False)
282
+
283
+ # Collect all streaming chunks for non-stream mode
284
+ full_text = ""
285
+ processed_stream = sanitize_stream(
286
+ data=response.iter_content(chunk_size=None),
287
+ intro_value="data:",
288
+ to_json=True,
289
+ skip_markers=["[DONE]"],
290
+ content_extractor=self._vercel_extractor,
291
+ yield_raw_on_error=False
292
+ )
293
+
294
+ for content_chunk in processed_stream:
295
+ if content_chunk and isinstance(content_chunk, str):
296
+ full_text += content_chunk
297
+
298
+ self.last_response = {"text": full_text}
299
+ self.conversation.update_chat_history(prompt, full_text)
300
+ return self.last_response if not raw else full_text
301
+
302
+ except CurlError as e:
303
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
304
+ except Exception as e:
305
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
306
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
307
+
308
+ return for_stream() if stream else for_non_stream()
309
+
310
+ def chat(
311
+ self,
312
+ prompt: str,
313
+ stream: bool = False,
314
+ optimizer: str = None,
315
+ conversationally: bool = False,
316
+ ) -> Union[str, Generator[str, None, None]]:
317
+ def for_stream_chat():
318
+ gen = self.ask(
319
+ prompt, stream=True, raw=False,
320
+ optimizer=optimizer, conversationally=conversationally
321
+ )
322
+ for response_dict in gen:
323
+ yield self.get_message(response_dict)
324
+
325
+ def for_non_stream_chat():
326
+ response_data = self.ask(
327
+ prompt, stream=False, raw=False,
328
+ optimizer=optimizer, conversationally=conversationally
329
+ )
330
+ return self.get_message(response_data)
331
+
332
+ return for_stream_chat() if stream else for_non_stream_chat()
333
+
334
+ def get_message(self, response: dict) -> str:
335
+ assert isinstance(response, dict), "Response should be of dict data-type only"
336
+ return response["text"]
337
+ if __name__ == "__main__":
338
+ test_ai = VercelAIGateway(use_proxy=True, max_proxy_attempts=3, proxy_cache_duration=300)
339
+ print(test_ai.chat("Hello, how are you?"))