webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (304) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -58
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +33 -11
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -314
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -315
  64. webscout/Provider/Andi.py +3 -3
  65. webscout/Provider/Apriel.py +306 -0
  66. webscout/Provider/ChatGPTClone.py +236 -236
  67. webscout/Provider/ChatSandbox.py +343 -342
  68. webscout/Provider/Cloudflare.py +324 -324
  69. webscout/Provider/Cohere.py +208 -207
  70. webscout/Provider/Deepinfra.py +370 -369
  71. webscout/Provider/ExaAI.py +260 -260
  72. webscout/Provider/ExaChat.py +308 -387
  73. webscout/Provider/Flowith.py +221 -221
  74. webscout/Provider/GMI.py +293 -0
  75. webscout/Provider/Gemini.py +164 -162
  76. webscout/Provider/GeminiProxy.py +167 -166
  77. webscout/Provider/GithubChat.py +371 -370
  78. webscout/Provider/Groq.py +800 -800
  79. webscout/Provider/HeckAI.py +383 -379
  80. webscout/Provider/Jadve.py +282 -297
  81. webscout/Provider/K2Think.py +308 -0
  82. webscout/Provider/Koboldai.py +206 -384
  83. webscout/Provider/LambdaChat.py +423 -425
  84. webscout/Provider/Nemotron.py +244 -245
  85. webscout/Provider/Netwrck.py +248 -247
  86. webscout/Provider/OLLAMA.py +395 -394
  87. webscout/Provider/OPENAI/Cloudflare.py +394 -395
  88. webscout/Provider/OPENAI/FalconH1.py +452 -457
  89. webscout/Provider/OPENAI/FreeGemini.py +297 -299
  90. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
  91. webscout/Provider/OPENAI/NEMOTRON.py +241 -244
  92. webscout/Provider/OPENAI/PI.py +428 -427
  93. webscout/Provider/OPENAI/README.md +959 -959
  94. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  95. webscout/Provider/OPENAI/TwoAI.py +466 -467
  96. webscout/Provider/OPENAI/__init__.py +33 -59
  97. webscout/Provider/OPENAI/ai4chat.py +313 -303
  98. webscout/Provider/OPENAI/base.py +249 -269
  99. webscout/Provider/OPENAI/chatglm.py +528 -0
  100. webscout/Provider/OPENAI/chatgpt.py +593 -588
  101. webscout/Provider/OPENAI/chatgptclone.py +521 -524
  102. webscout/Provider/OPENAI/chatsandbox.py +202 -177
  103. webscout/Provider/OPENAI/deepinfra.py +319 -315
  104. webscout/Provider/OPENAI/e2b.py +1665 -1665
  105. webscout/Provider/OPENAI/exaai.py +420 -420
  106. webscout/Provider/OPENAI/exachat.py +452 -452
  107. webscout/Provider/OPENAI/friendli.py +232 -232
  108. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  109. webscout/Provider/OPENAI/groq.py +364 -364
  110. webscout/Provider/OPENAI/heckai.py +314 -311
  111. webscout/Provider/OPENAI/llmchatco.py +337 -337
  112. webscout/Provider/OPENAI/netwrck.py +355 -354
  113. webscout/Provider/OPENAI/oivscode.py +290 -290
  114. webscout/Provider/OPENAI/opkfc.py +518 -518
  115. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  116. webscout/Provider/OPENAI/scirachat.py +535 -529
  117. webscout/Provider/OPENAI/sonus.py +308 -308
  118. webscout/Provider/OPENAI/standardinput.py +442 -442
  119. webscout/Provider/OPENAI/textpollinations.py +340 -348
  120. webscout/Provider/OPENAI/toolbaz.py +419 -413
  121. webscout/Provider/OPENAI/typefully.py +362 -362
  122. webscout/Provider/OPENAI/utils.py +295 -295
  123. webscout/Provider/OPENAI/venice.py +436 -436
  124. webscout/Provider/OPENAI/wisecat.py +387 -387
  125. webscout/Provider/OPENAI/writecream.py +166 -166
  126. webscout/Provider/OPENAI/x0gpt.py +378 -378
  127. webscout/Provider/OPENAI/yep.py +389 -389
  128. webscout/Provider/OpenGPT.py +230 -230
  129. webscout/Provider/Openai.py +244 -496
  130. webscout/Provider/PI.py +405 -404
  131. webscout/Provider/Perplexitylabs.py +430 -431
  132. webscout/Provider/QwenLM.py +272 -254
  133. webscout/Provider/STT/__init__.py +32 -2
  134. webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
  135. webscout/Provider/StandardInput.py +309 -309
  136. webscout/Provider/TTI/README.md +82 -82
  137. webscout/Provider/TTI/__init__.py +33 -12
  138. webscout/Provider/TTI/aiarta.py +413 -413
  139. webscout/Provider/TTI/base.py +136 -136
  140. webscout/Provider/TTI/bing.py +243 -243
  141. webscout/Provider/TTI/gpt1image.py +149 -149
  142. webscout/Provider/TTI/imagen.py +196 -196
  143. webscout/Provider/TTI/infip.py +211 -211
  144. webscout/Provider/TTI/magicstudio.py +232 -232
  145. webscout/Provider/TTI/monochat.py +219 -219
  146. webscout/Provider/TTI/piclumen.py +214 -214
  147. webscout/Provider/TTI/pixelmuse.py +232 -232
  148. webscout/Provider/TTI/pollinations.py +232 -232
  149. webscout/Provider/TTI/together.py +288 -288
  150. webscout/Provider/TTI/utils.py +12 -12
  151. webscout/Provider/TTI/venice.py +367 -367
  152. webscout/Provider/TTS/README.md +192 -192
  153. webscout/Provider/TTS/__init__.py +33 -10
  154. webscout/Provider/TTS/parler.py +110 -110
  155. webscout/Provider/TTS/streamElements.py +333 -333
  156. webscout/Provider/TTS/utils.py +280 -280
  157. webscout/Provider/TeachAnything.py +237 -236
  158. webscout/Provider/TextPollinationsAI.py +311 -318
  159. webscout/Provider/TogetherAI.py +356 -357
  160. webscout/Provider/TwoAI.py +313 -569
  161. webscout/Provider/TypliAI.py +312 -311
  162. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  163. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  164. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
  165. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
  166. webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
  167. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  168. webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
  169. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  170. webscout/Provider/UNFINISHED/liner.py +334 -0
  171. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  172. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  173. webscout/Provider/UNFINISHED/samurai.py +223 -223
  174. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  175. webscout/Provider/Venice.py +251 -250
  176. webscout/Provider/VercelAI.py +256 -255
  177. webscout/Provider/WiseCat.py +232 -231
  178. webscout/Provider/WrDoChat.py +367 -366
  179. webscout/Provider/__init__.py +33 -86
  180. webscout/Provider/ai4chat.py +174 -174
  181. webscout/Provider/akashgpt.py +331 -334
  182. webscout/Provider/cerebras.py +446 -340
  183. webscout/Provider/chatglm.py +394 -214
  184. webscout/Provider/cleeai.py +211 -212
  185. webscout/Provider/deepseek_assistant.py +1 -1
  186. webscout/Provider/elmo.py +282 -282
  187. webscout/Provider/geminiapi.py +208 -208
  188. webscout/Provider/granite.py +261 -261
  189. webscout/Provider/hermes.py +263 -265
  190. webscout/Provider/julius.py +223 -222
  191. webscout/Provider/learnfastai.py +309 -309
  192. webscout/Provider/llama3mitril.py +214 -214
  193. webscout/Provider/llmchat.py +243 -243
  194. webscout/Provider/llmchatco.py +290 -290
  195. webscout/Provider/meta.py +801 -801
  196. webscout/Provider/oivscode.py +309 -309
  197. webscout/Provider/scira_chat.py +384 -457
  198. webscout/Provider/searchchat.py +292 -291
  199. webscout/Provider/sonus.py +258 -258
  200. webscout/Provider/toolbaz.py +370 -364
  201. webscout/Provider/turboseek.py +274 -265
  202. webscout/Provider/typefully.py +208 -207
  203. webscout/Provider/x0gpt.py +1 -0
  204. webscout/Provider/yep.py +372 -371
  205. webscout/__init__.py +30 -31
  206. webscout/__main__.py +5 -5
  207. webscout/auth/api_key_manager.py +189 -189
  208. webscout/auth/config.py +175 -175
  209. webscout/auth/models.py +185 -185
  210. webscout/auth/routes.py +664 -664
  211. webscout/auth/simple_logger.py +236 -236
  212. webscout/cli.py +523 -523
  213. webscout/conversation.py +438 -438
  214. webscout/exceptions.py +361 -361
  215. webscout/litagent/Readme.md +298 -298
  216. webscout/litagent/__init__.py +28 -28
  217. webscout/litagent/agent.py +581 -581
  218. webscout/litagent/constants.py +59 -59
  219. webscout/litprinter/__init__.py +58 -58
  220. webscout/models.py +181 -181
  221. webscout/optimizers.py +419 -419
  222. webscout/prompt_manager.py +288 -288
  223. webscout/sanitize.py +1078 -1078
  224. webscout/scout/README.md +401 -401
  225. webscout/scout/__init__.py +8 -8
  226. webscout/scout/core/__init__.py +6 -6
  227. webscout/scout/core/crawler.py +297 -297
  228. webscout/scout/core/scout.py +706 -706
  229. webscout/scout/core/search_result.py +95 -95
  230. webscout/scout/core/text_analyzer.py +62 -62
  231. webscout/scout/core/text_utils.py +277 -277
  232. webscout/scout/core/web_analyzer.py +51 -51
  233. webscout/scout/element.py +599 -599
  234. webscout/scout/parsers/__init__.py +69 -69
  235. webscout/scout/parsers/html5lib_parser.py +172 -172
  236. webscout/scout/parsers/html_parser.py +236 -236
  237. webscout/scout/parsers/lxml_parser.py +178 -178
  238. webscout/scout/utils.py +37 -37
  239. webscout/swiftcli/Readme.md +323 -323
  240. webscout/swiftcli/__init__.py +95 -95
  241. webscout/swiftcli/core/__init__.py +7 -7
  242. webscout/swiftcli/core/cli.py +308 -308
  243. webscout/swiftcli/core/context.py +104 -104
  244. webscout/swiftcli/core/group.py +241 -241
  245. webscout/swiftcli/decorators/__init__.py +28 -28
  246. webscout/swiftcli/decorators/command.py +221 -221
  247. webscout/swiftcli/decorators/options.py +220 -220
  248. webscout/swiftcli/decorators/output.py +302 -302
  249. webscout/swiftcli/exceptions.py +21 -21
  250. webscout/swiftcli/plugins/__init__.py +9 -9
  251. webscout/swiftcli/plugins/base.py +135 -135
  252. webscout/swiftcli/plugins/manager.py +269 -269
  253. webscout/swiftcli/utils/__init__.py +59 -59
  254. webscout/swiftcli/utils/formatting.py +252 -252
  255. webscout/swiftcli/utils/parsing.py +267 -267
  256. webscout/update_checker.py +117 -117
  257. webscout/version.py +1 -1
  258. webscout/webscout_search.py +1183 -1183
  259. webscout/webscout_search_async.py +649 -649
  260. webscout/yep_search.py +346 -346
  261. webscout/zeroart/README.md +89 -89
  262. webscout/zeroart/__init__.py +134 -134
  263. webscout/zeroart/base.py +66 -66
  264. webscout/zeroart/effects.py +100 -100
  265. webscout/zeroart/fonts.py +1238 -1238
  266. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
  267. webscout-2025.10.11.dist-info/RECORD +300 -0
  268. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  269. webscout/Provider/AllenAI.py +0 -440
  270. webscout/Provider/Blackboxai.py +0 -793
  271. webscout/Provider/FreeGemini.py +0 -250
  272. webscout/Provider/GptOss.py +0 -207
  273. webscout/Provider/Hunyuan.py +0 -283
  274. webscout/Provider/Kimi.py +0 -445
  275. webscout/Provider/MCPCore.py +0 -322
  276. webscout/Provider/MiniMax.py +0 -207
  277. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  278. webscout/Provider/OPENAI/MiniMax.py +0 -298
  279. webscout/Provider/OPENAI/Qwen3.py +0 -304
  280. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  281. webscout/Provider/OPENAI/copilot.py +0 -321
  282. webscout/Provider/OPENAI/gptoss.py +0 -288
  283. webscout/Provider/OPENAI/kimi.py +0 -469
  284. webscout/Provider/OPENAI/mcpcore.py +0 -431
  285. webscout/Provider/OPENAI/multichat.py +0 -378
  286. webscout/Provider/OPENAI/qodo.py +0 -630
  287. webscout/Provider/OPENAI/xenai.py +0 -514
  288. webscout/Provider/Reka.py +0 -214
  289. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  290. webscout/Provider/asksteve.py +0 -220
  291. webscout/Provider/copilot.py +0 -441
  292. webscout/Provider/freeaichat.py +0 -294
  293. webscout/Provider/koala.py +0 -182
  294. webscout/Provider/lmarena.py +0 -198
  295. webscout/Provider/monochat.py +0 -275
  296. webscout/Provider/multichat.py +0 -375
  297. webscout/Provider/scnet.py +0 -244
  298. webscout/Provider/talkai.py +0 -194
  299. webscout/tempid.py +0 -128
  300. webscout-8.3.6.dist-info/RECORD +0 -327
  301. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  302. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  303. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  304. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,339 +1,339 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import random
5
- import string
6
- from typing import Any, Dict, Optional, Generator, Union, List
7
-
8
- from webscout.AIutel import Optimizers
9
- from webscout.AIutel import Conversation
10
- from webscout.AIutel import AwesomePrompts, sanitize_stream
11
- from webscout.AIbase import Provider, AsyncProvider
12
- from webscout import exceptions
13
- from webscout.litagent import LitAgent
14
- # Using LitProxy for intelligent proxy management
15
- try:
16
- from litproxy import (
17
- get_auto_proxy, get_proxy_dict, test_proxy, get_working_proxy,
18
- refresh_proxy_cache, get_proxy_stats, set_proxy_cache_duration,
19
- patch, use_proxy, proxyify, list_proxies, test_all_proxies,
20
- current_proxy, make_request_with_auto_retry, create_auto_retry_session
21
- )
22
- LITPROXY_AVAILABLE = True
23
- except ImportError:
24
- LITPROXY_AVAILABLE = False
25
-
26
- import requests
27
-
28
- class VercelAIGateway(Provider):
29
- """
30
- A class to interact with the Vercel AI SDK Gateway Demo API with intelligent proxy management using LitProxy.
31
-
32
- Install LitProxy for advanced proxy features:
33
- pip install litproxy
34
-
35
- Features:
36
- - Intelligent proxy rotation and health monitoring
37
- - Automatic retry with proxy fallback on failures
38
- - Support for multiple proxy sources (Webshare, NordVPN, Remote lists)
39
- - Seamless curl_cffi session integration
40
- - Comprehensive proxy diagnostics and statistics
41
- """
42
-
43
- AVAILABLE_MODELS = [
44
- "amazon/nova-lite",
45
- "amazon/nova-micro",
46
- "anthropic/claude-3.5-haiku",
47
- "google/gemini-2.0-flash",
48
- "meta/llama-3.1-8b",
49
- "mistral/ministral-3b",
50
- "openai/gpt-3.5-turbo",
51
- "openai/gpt-4o-mini",
52
- "xai/grok-3"
53
- ]
54
-
55
- @staticmethod
56
- def _vercel_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
57
- """Extracts content from Vercel AI Gateway stream JSON objects."""
58
- if isinstance(chunk, dict):
59
- if chunk.get("type") == "text-delta":
60
- return chunk.get("delta")
61
- return None
62
-
63
- def __init__(
64
- self,
65
- is_conversation: bool = True,
66
- max_tokens: int = 2049,
67
- timeout: int = 30,
68
- intro: str = None,
69
- filepath: str = None,
70
- update_file: bool = True,
71
- history_offset: int = 10250,
72
- act: str = None,
73
- model: str = "openai/gpt-4o-mini",
74
- system_prompt: str = "You are a helpful assistant.",
75
- browser: str = "chrome",
76
- use_proxy: bool = True,
77
- max_proxy_attempts: int = 3,
78
- proxy_cache_duration: int = 300
79
- ):
80
- """
81
- Initializes the Vercel AI Gateway API client with LitProxy integration.
82
-
83
- Args:
84
- use_proxy (bool): Enable proxy usage via LitProxy (default: True)
85
- max_proxy_attempts (int): Maximum proxy retry attempts (default: 3)
86
- proxy_cache_duration (int): Proxy cache duration in seconds (default: 300)
87
- """
88
- if model not in self.AVAILABLE_MODELS:
89
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
90
-
91
- self.url = "https://ai-sdk-gateway-demo.labs.vercel.dev/api/chat"
92
-
93
- # Initialize LitAgent
94
- self.agent = LitAgent()
95
- self.fingerprint = self.agent.generate_fingerprint(browser)
96
-
97
- self.headers = {
98
- "Accept": "*/*",
99
- "Accept-Encoding": "gzip, deflate, br, zstd",
100
- "Accept-Language": self.fingerprint["accept_language"],
101
- "Content-Type": "application/json",
102
- "DNT": "1",
103
- "Origin": "https://ai-sdk-gateway-demo.labs.vercel.dev",
104
- "Priority": "u=1, i",
105
- "Referer": f"https://ai-sdk-gateway-demo.labs.vercel.dev/?modelId={model.replace('/', '%2F')}",
106
- "Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
107
- "Sec-CH-UA-Mobile": "?0",
108
- "Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
109
- "Sec-Fetch-Dest": "empty",
110
- "Sec-Fetch-Mode": "cors",
111
- "Sec-Fetch-Site": "same-origin",
112
- "Sec-GPC": "1",
113
- "User-Agent": self.fingerprint.get("user_agent", ""),
114
- "X-Forwarded-For": self.fingerprint.get("x-forwarded-for", ""),
115
- "X-Real-IP": self.fingerprint.get("x-real-ip", ""),
116
- "X-Client-IP": self.fingerprint.get("x-client-ip", ""),
117
- }
118
-
119
- # Initialize curl_cffi Session
120
- self.session = Session()
121
- self.session.headers.update(self.headers)
122
-
123
- # Configure proxy settings
124
- self.use_proxy = use_proxy
125
- self.max_proxy_attempts = max_proxy_attempts
126
- self.proxy_cache_duration = proxy_cache_duration
127
-
128
- # Integrate LitProxy for intelligent proxy management
129
- if use_proxy and LITPROXY_AVAILABLE:
130
- try:
131
- # Configure proxy cache duration
132
- set_proxy_cache_duration(proxy_cache_duration)
133
- # Patch the session with proxy support
134
- patch(self.session)
135
- self.proxy_enabled = True
136
- except Exception as e:
137
- self.proxy_enabled = False
138
- else:
139
- self.proxy_enabled = False
140
- if use_proxy and not LITPROXY_AVAILABLE:
141
- # Silently disable proxy if LitProxy not available
142
- pass
143
-
144
- self.system_prompt = system_prompt
145
- self.is_conversation = is_conversation
146
- self.max_tokens_to_sample = max_tokens
147
- self.timeout = timeout
148
- self.last_response = {}
149
- self.model = model
150
-
151
- self.__available_optimizers = (
152
- method
153
- for method in dir(Optimizers)
154
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
155
- )
156
-
157
- Conversation.intro = (
158
- AwesomePrompts().get_act(
159
- act, raise_not_found=True, default=None, case_insensitive=True
160
- )
161
- if act
162
- else intro or Conversation.intro
163
- )
164
-
165
- self.conversation = Conversation(
166
- is_conversation, self.max_tokens_to_sample, filepath, update_file
167
- )
168
- self.conversation.history_offset = history_offset
169
-
170
- def refresh_identity(self, browser: str = None):
171
- """
172
- Refreshes the browser identity fingerprint.
173
-
174
- Args:
175
- browser: Specific browser to use for the new fingerprint
176
- """
177
- browser = browser or self.fingerprint.get("browser_type", "chrome")
178
- self.fingerprint = self.agent.generate_fingerprint(browser)
179
-
180
- # Update headers with new fingerprint
181
- self.headers.update({
182
- "Accept-Language": self.fingerprint["accept_language"],
183
- "User-Agent": self.fingerprint.get("user_agent", ""),
184
- "Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
185
- "Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
186
- })
187
-
188
- # Update session headers
189
- self.session.headers.update(self.headers)
190
- return self.fingerprint
191
-
192
- def _make_request(self, payload: dict, stream: bool = False):
193
- """
194
- Make a request to the API. The session is already patched with LitProxy auto-retry if enabled.
195
-
196
- Args:
197
- payload: Request payload
198
- stream: Whether to stream the response
199
-
200
- Returns:
201
- Response object
202
- """
203
- # Use the session directly - it's already patched with proxy auto-retry if enabled
204
- response = self.session.post(
205
- self.url,
206
- data=json.dumps(payload),
207
- stream=stream,
208
- timeout=self.timeout,
209
- impersonate="chrome110"
210
- )
211
- response.raise_for_status()
212
- return response
213
-
214
- def ask(
215
- self,
216
- prompt: str,
217
- stream: bool = False,
218
- raw: bool = False,
219
- optimizer: str = None,
220
- conversationally: bool = False,
221
- ) -> Union[Dict[str, Any], Generator]:
222
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
223
- if optimizer:
224
- if optimizer in self.__available_optimizers:
225
- conversation_prompt = getattr(Optimizers, optimizer)(
226
- conversation_prompt if conversationally else prompt
227
- )
228
- else:
229
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
230
-
231
- # Generate random IDs
232
- conversation_id = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
233
- message_id = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
234
-
235
- # Payload construction
236
- payload = {
237
- "modelId": self.model,
238
- "id": conversation_id,
239
- "messages": [
240
- {
241
- "parts": [{"type": "text", "text": conversation_prompt}],
242
- "id": message_id,
243
- "role": "user"
244
- }
245
- ],
246
- "trigger": "submit-message"
247
- }
248
-
249
- def for_stream():
250
- streaming_text = ""
251
- try:
252
- response = self._make_request(payload, stream=True)
253
-
254
- # Use sanitize_stream for SSE format
255
- processed_stream = sanitize_stream(
256
- data=response.iter_content(chunk_size=None),
257
- intro_value="data:",
258
- to_json=True,
259
- skip_markers=["[DONE]"],
260
- content_extractor=self._vercel_extractor,
261
- yield_raw_on_error=False
262
- )
263
-
264
- for content_chunk in processed_stream:
265
- if content_chunk and isinstance(content_chunk, str):
266
- streaming_text += content_chunk
267
- resp = dict(text=content_chunk)
268
- yield resp if not raw else content_chunk
269
-
270
- except CurlError as e:
271
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
272
- except Exception as e:
273
- raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
274
- finally:
275
- if streaming_text:
276
- self.last_response = {"text": streaming_text}
277
- self.conversation.update_chat_history(prompt, streaming_text)
278
-
279
- def for_non_stream():
280
- try:
281
- response = self._make_request(payload, stream=False)
282
-
283
- # Collect all streaming chunks for non-stream mode
284
- full_text = ""
285
- processed_stream = sanitize_stream(
286
- data=response.iter_content(chunk_size=None),
287
- intro_value="data:",
288
- to_json=True,
289
- skip_markers=["[DONE]"],
290
- content_extractor=self._vercel_extractor,
291
- yield_raw_on_error=False
292
- )
293
-
294
- for content_chunk in processed_stream:
295
- if content_chunk and isinstance(content_chunk, str):
296
- full_text += content_chunk
297
-
298
- self.last_response = {"text": full_text}
299
- self.conversation.update_chat_history(prompt, full_text)
300
- return self.last_response if not raw else full_text
301
-
302
- except CurlError as e:
303
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
304
- except Exception as e:
305
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
306
- raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
307
-
308
- return for_stream() if stream else for_non_stream()
309
-
310
- def chat(
311
- self,
312
- prompt: str,
313
- stream: bool = False,
314
- optimizer: str = None,
315
- conversationally: bool = False,
316
- ) -> Union[str, Generator[str, None, None]]:
317
- def for_stream_chat():
318
- gen = self.ask(
319
- prompt, stream=True, raw=False,
320
- optimizer=optimizer, conversationally=conversationally
321
- )
322
- for response_dict in gen:
323
- yield self.get_message(response_dict)
324
-
325
- def for_non_stream_chat():
326
- response_data = self.ask(
327
- prompt, stream=False, raw=False,
328
- optimizer=optimizer, conversationally=conversationally
329
- )
330
- return self.get_message(response_data)
331
-
332
- return for_stream_chat() if stream else for_non_stream_chat()
333
-
334
- def get_message(self, response: dict) -> str:
335
- assert isinstance(response, dict), "Response should be of dict data-type only"
336
- return response["text"]
337
- if __name__ == "__main__":
338
- test_ai = VercelAIGateway(use_proxy=True, max_proxy_attempts=3, proxy_cache_duration=300)
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import random
5
+ import string
6
+ from typing import Any, Dict, Optional, Generator, Union, List
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
11
+ from webscout.AIbase import Provider, AsyncProvider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+ # Using LitProxy for intelligent proxy management
15
+ try:
16
+ from litproxy import (
17
+ get_auto_proxy, get_proxy_dict, test_proxy, get_working_proxy,
18
+ refresh_proxy_cache, get_proxy_stats, set_proxy_cache_duration,
19
+ patch, use_proxy, proxyify, list_proxies, test_all_proxies,
20
+ current_proxy, make_request_with_auto_retry, create_auto_retry_session
21
+ )
22
+ LITPROXY_AVAILABLE = True
23
+ except ImportError:
24
+ LITPROXY_AVAILABLE = False
25
+
26
+ import requests
27
+
28
+ class VercelAIGateway(Provider):
29
+ """
30
+ A class to interact with the Vercel AI SDK Gateway Demo API with intelligent proxy management using LitProxy.
31
+
32
+ Install LitProxy for advanced proxy features:
33
+ pip install litproxy
34
+
35
+ Features:
36
+ - Intelligent proxy rotation and health monitoring
37
+ - Automatic retry with proxy fallback on failures
38
+ - Support for multiple proxy sources (Webshare, NordVPN, Remote lists)
39
+ - Seamless curl_cffi session integration
40
+ - Comprehensive proxy diagnostics and statistics
41
+ """
42
+
43
+ AVAILABLE_MODELS = [
44
+ "amazon/nova-lite",
45
+ "amazon/nova-micro",
46
+ "anthropic/claude-3.5-haiku",
47
+ "google/gemini-2.0-flash",
48
+ "meta/llama-3.1-8b",
49
+ "mistral/ministral-3b",
50
+ "openai/gpt-3.5-turbo",
51
+ "openai/gpt-4o-mini",
52
+ "xai/grok-3"
53
+ ]
54
+
55
+ @staticmethod
56
+ def _vercel_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
57
+ """Extracts content from Vercel AI Gateway stream JSON objects."""
58
+ if isinstance(chunk, dict):
59
+ if chunk.get("type") == "text-delta":
60
+ return chunk.get("delta")
61
+ return None
62
+
63
+ def __init__(
64
+ self,
65
+ is_conversation: bool = True,
66
+ max_tokens: int = 2049,
67
+ timeout: int = 30,
68
+ intro: str = None,
69
+ filepath: str = None,
70
+ update_file: bool = True,
71
+ history_offset: int = 10250,
72
+ act: str = None,
73
+ model: str = "openai/gpt-4o-mini",
74
+ system_prompt: str = "You are a helpful assistant.",
75
+ browser: str = "chrome",
76
+ use_proxy: bool = True,
77
+ max_proxy_attempts: int = 3,
78
+ proxy_cache_duration: int = 300
79
+ ):
80
+ """
81
+ Initializes the Vercel AI Gateway API client with LitProxy integration.
82
+
83
+ Args:
84
+ use_proxy (bool): Enable proxy usage via LitProxy (default: True)
85
+ max_proxy_attempts (int): Maximum proxy retry attempts (default: 3)
86
+ proxy_cache_duration (int): Proxy cache duration in seconds (default: 300)
87
+ """
88
+ if model not in self.AVAILABLE_MODELS:
89
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
90
+
91
+ self.url = "https://ai-sdk-gateway-demo.labs.vercel.dev/api/chat"
92
+
93
+ # Initialize LitAgent
94
+ self.agent = LitAgent()
95
+ self.fingerprint = self.agent.generate_fingerprint(browser)
96
+
97
+ self.headers = {
98
+ "Accept": "*/*",
99
+ "Accept-Encoding": "gzip, deflate, br, zstd",
100
+ "Accept-Language": self.fingerprint["accept_language"],
101
+ "Content-Type": "application/json",
102
+ "DNT": "1",
103
+ "Origin": "https://ai-sdk-gateway-demo.labs.vercel.dev",
104
+ "Priority": "u=1, i",
105
+ "Referer": f"https://ai-sdk-gateway-demo.labs.vercel.dev/?modelId={model.replace('/', '%2F')}",
106
+ "Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
107
+ "Sec-CH-UA-Mobile": "?0",
108
+ "Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
109
+ "Sec-Fetch-Dest": "empty",
110
+ "Sec-Fetch-Mode": "cors",
111
+ "Sec-Fetch-Site": "same-origin",
112
+ "Sec-GPC": "1",
113
+ "User-Agent": self.fingerprint.get("user_agent", ""),
114
+ "X-Forwarded-For": self.fingerprint.get("x-forwarded-for", ""),
115
+ "X-Real-IP": self.fingerprint.get("x-real-ip", ""),
116
+ "X-Client-IP": self.fingerprint.get("x-client-ip", ""),
117
+ }
118
+
119
+ # Initialize curl_cffi Session
120
+ self.session = Session()
121
+ self.session.headers.update(self.headers)
122
+
123
+ # Configure proxy settings
124
+ self.use_proxy = use_proxy
125
+ self.max_proxy_attempts = max_proxy_attempts
126
+ self.proxy_cache_duration = proxy_cache_duration
127
+
128
+ # Integrate LitProxy for intelligent proxy management
129
+ if use_proxy and LITPROXY_AVAILABLE:
130
+ try:
131
+ # Configure proxy cache duration
132
+ set_proxy_cache_duration(proxy_cache_duration)
133
+ # Patch the session with proxy support
134
+ patch(self.session)
135
+ self.proxy_enabled = True
136
+ except Exception as e:
137
+ self.proxy_enabled = False
138
+ else:
139
+ self.proxy_enabled = False
140
+ if use_proxy and not LITPROXY_AVAILABLE:
141
+ # Silently disable proxy if LitProxy not available
142
+ pass
143
+
144
+ self.system_prompt = system_prompt
145
+ self.is_conversation = is_conversation
146
+ self.max_tokens_to_sample = max_tokens
147
+ self.timeout = timeout
148
+ self.last_response = {}
149
+ self.model = model
150
+
151
+ self.__available_optimizers = (
152
+ method
153
+ for method in dir(Optimizers)
154
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
155
+ )
156
+
157
+ Conversation.intro = (
158
+ AwesomePrompts().get_act(
159
+ act, raise_not_found=True, default=None, case_insensitive=True
160
+ )
161
+ if act
162
+ else intro or Conversation.intro
163
+ )
164
+
165
+ self.conversation = Conversation(
166
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
167
+ )
168
+ self.conversation.history_offset = history_offset
169
+
170
+ def refresh_identity(self, browser: str = None):
171
+ """
172
+ Refreshes the browser identity fingerprint.
173
+
174
+ Args:
175
+ browser: Specific browser to use for the new fingerprint
176
+ """
177
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
178
+ self.fingerprint = self.agent.generate_fingerprint(browser)
179
+
180
+ # Update headers with new fingerprint
181
+ self.headers.update({
182
+ "Accept-Language": self.fingerprint["accept_language"],
183
+ "User-Agent": self.fingerprint.get("user_agent", ""),
184
+ "Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
185
+ "Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
186
+ })
187
+
188
+ # Update session headers
189
+ self.session.headers.update(self.headers)
190
+ return self.fingerprint
191
+
192
+ def _make_request(self, payload: dict, stream: bool = False):
193
+ """
194
+ Make a request to the API. The session is already patched with LitProxy auto-retry if enabled.
195
+
196
+ Args:
197
+ payload: Request payload
198
+ stream: Whether to stream the response
199
+
200
+ Returns:
201
+ Response object
202
+ """
203
+ # Use the session directly - it's already patched with proxy auto-retry if enabled
204
+ response = self.session.post(
205
+ self.url,
206
+ data=json.dumps(payload),
207
+ stream=stream,
208
+ timeout=self.timeout,
209
+ impersonate="chrome110"
210
+ )
211
+ response.raise_for_status()
212
+ return response
213
+
214
+ def ask(
215
+ self,
216
+ prompt: str,
217
+ stream: bool = False,
218
+ raw: bool = False,
219
+ optimizer: str = None,
220
+ conversationally: bool = False,
221
+ ) -> Union[Dict[str, Any], Generator]:
222
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
223
+ if optimizer:
224
+ if optimizer in self.__available_optimizers:
225
+ conversation_prompt = getattr(Optimizers, optimizer)(
226
+ conversation_prompt if conversationally else prompt
227
+ )
228
+ else:
229
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
230
+
231
+ # Generate random IDs
232
+ conversation_id = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
233
+ message_id = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
234
+
235
+ # Payload construction
236
+ payload = {
237
+ "modelId": self.model,
238
+ "id": conversation_id,
239
+ "messages": [
240
+ {
241
+ "parts": [{"type": "text", "text": conversation_prompt}],
242
+ "id": message_id,
243
+ "role": "user"
244
+ }
245
+ ],
246
+ "trigger": "submit-message"
247
+ }
248
+
249
+ def for_stream():
250
+ streaming_text = ""
251
+ try:
252
+ response = self._make_request(payload, stream=True)
253
+
254
+ # Use sanitize_stream for SSE format
255
+ processed_stream = sanitize_stream(
256
+ data=response.iter_content(chunk_size=None),
257
+ intro_value="data:",
258
+ to_json=True,
259
+ skip_markers=["[DONE]"],
260
+ content_extractor=self._vercel_extractor,
261
+ yield_raw_on_error=False
262
+ )
263
+
264
+ for content_chunk in processed_stream:
265
+ if content_chunk and isinstance(content_chunk, str):
266
+ streaming_text += content_chunk
267
+ resp = dict(text=content_chunk)
268
+ yield resp if not raw else content_chunk
269
+
270
+ except CurlError as e:
271
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
272
+ except Exception as e:
273
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
274
+ finally:
275
+ if streaming_text:
276
+ self.last_response = {"text": streaming_text}
277
+ self.conversation.update_chat_history(prompt, streaming_text)
278
+
279
+ def for_non_stream():
280
+ try:
281
+ response = self._make_request(payload, stream=False)
282
+
283
+ # Collect all streaming chunks for non-stream mode
284
+ full_text = ""
285
+ processed_stream = sanitize_stream(
286
+ data=response.iter_content(chunk_size=None),
287
+ intro_value="data:",
288
+ to_json=True,
289
+ skip_markers=["[DONE]"],
290
+ content_extractor=self._vercel_extractor,
291
+ yield_raw_on_error=False
292
+ )
293
+
294
+ for content_chunk in processed_stream:
295
+ if content_chunk and isinstance(content_chunk, str):
296
+ full_text += content_chunk
297
+
298
+ self.last_response = {"text": full_text}
299
+ self.conversation.update_chat_history(prompt, full_text)
300
+ return self.last_response if not raw else full_text
301
+
302
+ except CurlError as e:
303
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
304
+ except Exception as e:
305
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
306
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
307
+
308
+ return for_stream() if stream else for_non_stream()
309
+
310
+ def chat(
311
+ self,
312
+ prompt: str,
313
+ stream: bool = False,
314
+ optimizer: str = None,
315
+ conversationally: bool = False,
316
+ ) -> Union[str, Generator[str, None, None]]:
317
+ def for_stream_chat():
318
+ gen = self.ask(
319
+ prompt, stream=True, raw=False,
320
+ optimizer=optimizer, conversationally=conversationally
321
+ )
322
+ for response_dict in gen:
323
+ yield self.get_message(response_dict)
324
+
325
+ def for_non_stream_chat():
326
+ response_data = self.ask(
327
+ prompt, stream=False, raw=False,
328
+ optimizer=optimizer, conversationally=conversationally
329
+ )
330
+ return self.get_message(response_data)
331
+
332
+ return for_stream_chat() if stream else for_non_stream_chat()
333
+
334
+ def get_message(self, response: dict) -> str:
335
+ assert isinstance(response, dict), "Response should be of dict data-type only"
336
+ return response["text"]
337
+ if __name__ == "__main__":
338
+ test_ai = VercelAIGateway(use_proxy=True, max_proxy_attempts=3, proxy_cache_duration=300)
339
339
  print(test_ai.chat("Hello, how are you?"))