webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (281) hide show
  1. webscout/AIauto.py +33 -15
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +703 -250
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/Act.md +309 -0
  6. webscout/Extra/GitToolkit/__init__.py +10 -0
  7. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  8. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  10. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  11. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  12. webscout/Extra/YTToolkit/README.md +375 -0
  13. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  14. webscout/Extra/YTToolkit/__init__.py +3 -0
  15. webscout/Extra/YTToolkit/transcriber.py +476 -0
  16. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  17. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  18. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  19. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  20. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  21. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  22. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  23. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  24. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  25. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  26. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  27. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  28. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  29. webscout/Extra/__init__.py +7 -0
  30. webscout/Extra/autocoder/__init__.py +9 -0
  31. webscout/Extra/autocoder/autocoder.py +1105 -0
  32. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  33. webscout/Extra/gguf.md +430 -0
  34. webscout/Extra/gguf.py +684 -0
  35. webscout/Extra/tempmail/README.md +488 -0
  36. webscout/Extra/tempmail/__init__.py +28 -0
  37. webscout/Extra/tempmail/async_utils.py +141 -0
  38. webscout/Extra/tempmail/base.py +161 -0
  39. webscout/Extra/tempmail/cli.py +187 -0
  40. webscout/Extra/tempmail/emailnator.py +84 -0
  41. webscout/Extra/tempmail/mail_tm.py +361 -0
  42. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  43. webscout/Extra/weather.md +281 -0
  44. webscout/Extra/weather.py +194 -0
  45. webscout/Extra/weather_ascii.py +76 -0
  46. webscout/Litlogger/README.md +10 -0
  47. webscout/Litlogger/__init__.py +15 -0
  48. webscout/Litlogger/formats.py +4 -0
  49. webscout/Litlogger/handlers.py +103 -0
  50. webscout/Litlogger/levels.py +13 -0
  51. webscout/Litlogger/logger.py +92 -0
  52. webscout/Provider/AI21.py +177 -0
  53. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  54. webscout/Provider/AISEARCH/Perplexity.py +333 -0
  55. webscout/Provider/AISEARCH/README.md +279 -0
  56. webscout/Provider/AISEARCH/__init__.py +9 -0
  57. webscout/Provider/AISEARCH/felo_search.py +202 -0
  58. webscout/Provider/AISEARCH/genspark_search.py +324 -0
  59. webscout/Provider/AISEARCH/hika_search.py +186 -0
  60. webscout/Provider/AISEARCH/iask_search.py +410 -0
  61. webscout/Provider/AISEARCH/monica_search.py +220 -0
  62. webscout/Provider/AISEARCH/scira_search.py +298 -0
  63. webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
  64. webscout/Provider/Aitopia.py +316 -0
  65. webscout/Provider/AllenAI.py +440 -0
  66. webscout/Provider/Andi.py +228 -0
  67. webscout/Provider/Blackboxai.py +791 -0
  68. webscout/Provider/ChatGPTClone.py +237 -0
  69. webscout/Provider/ChatGPTGratis.py +194 -0
  70. webscout/Provider/ChatSandbox.py +342 -0
  71. webscout/Provider/Cloudflare.py +324 -0
  72. webscout/Provider/Cohere.py +208 -0
  73. webscout/Provider/Deepinfra.py +340 -0
  74. webscout/Provider/ExaAI.py +261 -0
  75. webscout/Provider/ExaChat.py +358 -0
  76. webscout/Provider/Flowith.py +217 -0
  77. webscout/Provider/FreeGemini.py +250 -0
  78. webscout/Provider/Gemini.py +169 -0
  79. webscout/Provider/GithubChat.py +369 -0
  80. webscout/Provider/GizAI.py +295 -0
  81. webscout/Provider/Glider.py +225 -0
  82. webscout/Provider/Groq.py +801 -0
  83. webscout/Provider/HF_space/__init__.py +0 -0
  84. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  85. webscout/Provider/HeckAI.py +375 -0
  86. webscout/Provider/HuggingFaceChat.py +469 -0
  87. webscout/Provider/Hunyuan.py +283 -0
  88. webscout/Provider/Jadve.py +291 -0
  89. webscout/Provider/Koboldai.py +384 -0
  90. webscout/Provider/LambdaChat.py +411 -0
  91. webscout/Provider/Llama3.py +259 -0
  92. webscout/Provider/MCPCore.py +315 -0
  93. webscout/Provider/Marcus.py +198 -0
  94. webscout/Provider/Nemotron.py +218 -0
  95. webscout/Provider/Netwrck.py +270 -0
  96. webscout/Provider/OLLAMA.py +396 -0
  97. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
  98. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  99. webscout/Provider/OPENAI/FreeGemini.py +283 -0
  100. webscout/Provider/OPENAI/NEMOTRON.py +232 -0
  101. webscout/Provider/OPENAI/Qwen3.py +283 -0
  102. webscout/Provider/OPENAI/README.md +952 -0
  103. webscout/Provider/OPENAI/TwoAI.py +357 -0
  104. webscout/Provider/OPENAI/__init__.py +40 -0
  105. webscout/Provider/OPENAI/ai4chat.py +293 -0
  106. webscout/Provider/OPENAI/api.py +969 -0
  107. webscout/Provider/OPENAI/base.py +249 -0
  108. webscout/Provider/OPENAI/c4ai.py +373 -0
  109. webscout/Provider/OPENAI/chatgpt.py +556 -0
  110. webscout/Provider/OPENAI/chatgptclone.py +494 -0
  111. webscout/Provider/OPENAI/chatsandbox.py +173 -0
  112. webscout/Provider/OPENAI/copilot.py +242 -0
  113. webscout/Provider/OPENAI/deepinfra.py +322 -0
  114. webscout/Provider/OPENAI/e2b.py +1414 -0
  115. webscout/Provider/OPENAI/exaai.py +417 -0
  116. webscout/Provider/OPENAI/exachat.py +444 -0
  117. webscout/Provider/OPENAI/flowith.py +162 -0
  118. webscout/Provider/OPENAI/freeaichat.py +359 -0
  119. webscout/Provider/OPENAI/glider.py +326 -0
  120. webscout/Provider/OPENAI/groq.py +364 -0
  121. webscout/Provider/OPENAI/heckai.py +308 -0
  122. webscout/Provider/OPENAI/llmchatco.py +335 -0
  123. webscout/Provider/OPENAI/mcpcore.py +389 -0
  124. webscout/Provider/OPENAI/multichat.py +376 -0
  125. webscout/Provider/OPENAI/netwrck.py +357 -0
  126. webscout/Provider/OPENAI/oivscode.py +287 -0
  127. webscout/Provider/OPENAI/opkfc.py +496 -0
  128. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  129. webscout/Provider/OPENAI/scirachat.py +477 -0
  130. webscout/Provider/OPENAI/sonus.py +304 -0
  131. webscout/Provider/OPENAI/standardinput.py +433 -0
  132. webscout/Provider/OPENAI/textpollinations.py +339 -0
  133. webscout/Provider/OPENAI/toolbaz.py +413 -0
  134. webscout/Provider/OPENAI/typefully.py +355 -0
  135. webscout/Provider/OPENAI/typegpt.py +364 -0
  136. webscout/Provider/OPENAI/uncovrAI.py +463 -0
  137. webscout/Provider/OPENAI/utils.py +318 -0
  138. webscout/Provider/OPENAI/venice.py +431 -0
  139. webscout/Provider/OPENAI/wisecat.py +387 -0
  140. webscout/Provider/OPENAI/writecream.py +163 -0
  141. webscout/Provider/OPENAI/x0gpt.py +365 -0
  142. webscout/Provider/OPENAI/yep.py +382 -0
  143. webscout/Provider/OpenGPT.py +209 -0
  144. webscout/Provider/Openai.py +496 -0
  145. webscout/Provider/PI.py +429 -0
  146. webscout/Provider/Perplexitylabs.py +415 -0
  147. webscout/Provider/QwenLM.py +254 -0
  148. webscout/Provider/Reka.py +214 -0
  149. webscout/Provider/StandardInput.py +290 -0
  150. webscout/Provider/TTI/README.md +82 -0
  151. webscout/Provider/TTI/__init__.py +7 -0
  152. webscout/Provider/TTI/aiarta.py +365 -0
  153. webscout/Provider/TTI/artbit.py +0 -0
  154. webscout/Provider/TTI/base.py +64 -0
  155. webscout/Provider/TTI/fastflux.py +200 -0
  156. webscout/Provider/TTI/magicstudio.py +201 -0
  157. webscout/Provider/TTI/piclumen.py +203 -0
  158. webscout/Provider/TTI/pixelmuse.py +225 -0
  159. webscout/Provider/TTI/pollinations.py +221 -0
  160. webscout/Provider/TTI/utils.py +11 -0
  161. webscout/Provider/TTS/README.md +192 -0
  162. webscout/Provider/TTS/__init__.py +10 -0
  163. webscout/Provider/TTS/base.py +159 -0
  164. webscout/Provider/TTS/deepgram.py +156 -0
  165. webscout/Provider/TTS/elevenlabs.py +111 -0
  166. webscout/Provider/TTS/gesserit.py +128 -0
  167. webscout/Provider/TTS/murfai.py +113 -0
  168. webscout/Provider/TTS/openai_fm.py +129 -0
  169. webscout/Provider/TTS/parler.py +111 -0
  170. webscout/Provider/TTS/speechma.py +580 -0
  171. webscout/Provider/TTS/sthir.py +94 -0
  172. webscout/Provider/TTS/streamElements.py +333 -0
  173. webscout/Provider/TTS/utils.py +280 -0
  174. webscout/Provider/TeachAnything.py +229 -0
  175. webscout/Provider/TextPollinationsAI.py +308 -0
  176. webscout/Provider/TwoAI.py +475 -0
  177. webscout/Provider/TypliAI.py +305 -0
  178. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  179. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  180. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  181. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  182. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  183. webscout/Provider/Venice.py +258 -0
  184. webscout/Provider/VercelAI.py +253 -0
  185. webscout/Provider/WiseCat.py +233 -0
  186. webscout/Provider/WrDoChat.py +370 -0
  187. webscout/Provider/Writecream.py +246 -0
  188. webscout/Provider/WritingMate.py +269 -0
  189. webscout/Provider/__init__.py +174 -0
  190. webscout/Provider/ai4chat.py +174 -0
  191. webscout/Provider/akashgpt.py +335 -0
  192. webscout/Provider/asksteve.py +220 -0
  193. webscout/Provider/cerebras.py +290 -0
  194. webscout/Provider/chatglm.py +215 -0
  195. webscout/Provider/cleeai.py +213 -0
  196. webscout/Provider/copilot.py +425 -0
  197. webscout/Provider/elmo.py +283 -0
  198. webscout/Provider/freeaichat.py +285 -0
  199. webscout/Provider/geminiapi.py +208 -0
  200. webscout/Provider/granite.py +235 -0
  201. webscout/Provider/hermes.py +266 -0
  202. webscout/Provider/julius.py +223 -0
  203. webscout/Provider/koala.py +170 -0
  204. webscout/Provider/learnfastai.py +325 -0
  205. webscout/Provider/llama3mitril.py +215 -0
  206. webscout/Provider/llmchat.py +258 -0
  207. webscout/Provider/llmchatco.py +306 -0
  208. webscout/Provider/lmarena.py +198 -0
  209. webscout/Provider/meta.py +801 -0
  210. webscout/Provider/multichat.py +364 -0
  211. webscout/Provider/oivscode.py +309 -0
  212. webscout/Provider/samurai.py +224 -0
  213. webscout/Provider/scira_chat.py +299 -0
  214. webscout/Provider/scnet.py +243 -0
  215. webscout/Provider/searchchat.py +292 -0
  216. webscout/Provider/sonus.py +258 -0
  217. webscout/Provider/talkai.py +194 -0
  218. webscout/Provider/toolbaz.py +353 -0
  219. webscout/Provider/turboseek.py +266 -0
  220. webscout/Provider/typefully.py +202 -0
  221. webscout/Provider/typegpt.py +289 -0
  222. webscout/Provider/uncovr.py +368 -0
  223. webscout/Provider/x0gpt.py +299 -0
  224. webscout/Provider/yep.py +389 -0
  225. webscout/__init__.py +4 -2
  226. webscout/cli.py +3 -28
  227. webscout/client.py +70 -0
  228. webscout/conversation.py +35 -35
  229. webscout/litagent/Readme.md +276 -0
  230. webscout/litagent/__init__.py +29 -0
  231. webscout/litagent/agent.py +455 -0
  232. webscout/litagent/constants.py +60 -0
  233. webscout/litprinter/__init__.py +59 -0
  234. webscout/optimizers.py +419 -419
  235. webscout/scout/README.md +404 -0
  236. webscout/scout/__init__.py +8 -0
  237. webscout/scout/core/__init__.py +7 -0
  238. webscout/scout/core/crawler.py +210 -0
  239. webscout/scout/core/scout.py +607 -0
  240. webscout/scout/core/search_result.py +96 -0
  241. webscout/scout/core/text_analyzer.py +63 -0
  242. webscout/scout/core/text_utils.py +277 -0
  243. webscout/scout/core/web_analyzer.py +52 -0
  244. webscout/scout/element.py +478 -0
  245. webscout/scout/parsers/__init__.py +69 -0
  246. webscout/scout/parsers/html5lib_parser.py +172 -0
  247. webscout/scout/parsers/html_parser.py +236 -0
  248. webscout/scout/parsers/lxml_parser.py +178 -0
  249. webscout/scout/utils.py +37 -0
  250. webscout/swiftcli/Readme.md +323 -0
  251. webscout/swiftcli/__init__.py +95 -0
  252. webscout/swiftcli/core/__init__.py +7 -0
  253. webscout/swiftcli/core/cli.py +297 -0
  254. webscout/swiftcli/core/context.py +104 -0
  255. webscout/swiftcli/core/group.py +241 -0
  256. webscout/swiftcli/decorators/__init__.py +28 -0
  257. webscout/swiftcli/decorators/command.py +221 -0
  258. webscout/swiftcli/decorators/options.py +220 -0
  259. webscout/swiftcli/decorators/output.py +252 -0
  260. webscout/swiftcli/exceptions.py +21 -0
  261. webscout/swiftcli/plugins/__init__.py +9 -0
  262. webscout/swiftcli/plugins/base.py +135 -0
  263. webscout/swiftcli/plugins/manager.py +269 -0
  264. webscout/swiftcli/utils/__init__.py +59 -0
  265. webscout/swiftcli/utils/formatting.py +252 -0
  266. webscout/swiftcli/utils/parsing.py +267 -0
  267. webscout/version.py +1 -1
  268. webscout/webscout_search.py +2 -182
  269. webscout/webscout_search_async.py +1 -179
  270. webscout/zeroart/README.md +89 -0
  271. webscout/zeroart/__init__.py +135 -0
  272. webscout/zeroart/base.py +66 -0
  273. webscout/zeroart/effects.py +101 -0
  274. webscout/zeroart/fonts.py +1239 -0
  275. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
  276. webscout-8.2.9.dist-info/RECORD +289 -0
  277. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  278. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  279. webscout-8.2.7.dist-info/RECORD +0 -26
  280. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  281. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,119 @@
1
+ import cloudscraper
2
+
3
+ def main():
4
+ print("Testing cloudscraper access to LMArena...")
5
+ try:
6
+ scraper = cloudscraper.create_scraper(browser={
7
+ 'browser': 'chrome',
8
+ 'platform': 'windows',
9
+ 'desktop': True
10
+ })
11
+
12
+ # Test basic access
13
+ response = scraper.get("https://lmarena.ai")
14
+ print(f"Status code: {response.status_code}")
15
+ print(f"Response length: {len(response.text)}")
16
+ print("Cloudscraper test successful!")
17
+
18
+ # Generate a session hash
19
+ import uuid
20
+ session_hash = str(uuid.uuid4()).replace("-", "")
21
+ print(f"Session hash: {session_hash}")
22
+
23
+ # Create payloads
24
+ model_id = "gpt-4o"
25
+ prompt = "Hello, what is your name?"
26
+
27
+ first_payload = {
28
+ "data": [
29
+ None,
30
+ model_id,
31
+ {"text": prompt, "files": []},
32
+ {
33
+ "text_models": [model_id],
34
+ "all_text_models": [model_id],
35
+ "vision_models": [],
36
+ "all_vision_models": [],
37
+ "image_gen_models": [],
38
+ "all_image_gen_models": [],
39
+ "search_models": [],
40
+ "all_search_models": [],
41
+ "models": [model_id],
42
+ "all_models": [model_id],
43
+ "arena_type": "text-arena"
44
+ }
45
+ ],
46
+ "event_data": None,
47
+ "fn_index": 117,
48
+ "trigger_id": 159,
49
+ "session_hash": session_hash
50
+ }
51
+
52
+ second_payload = {
53
+ "data": [],
54
+ "event_data": None,
55
+ "fn_index": 118,
56
+ "trigger_id": 159,
57
+ "session_hash": session_hash
58
+ }
59
+
60
+ third_payload = {
61
+ "data": [None, 0.7, 1, 2048],
62
+ "event_data": None,
63
+ "fn_index": 119,
64
+ "trigger_id": 159,
65
+ "session_hash": session_hash
66
+ }
67
+
68
+ # Set up headers
69
+ headers = {
70
+ "Content-Type": "application/json",
71
+ "Accept": "application/json",
72
+ }
73
+
74
+ # Make requests
75
+ print("Sending first request...")
76
+ response = scraper.post(
77
+ "https://lmarena.ai/queue/join?",
78
+ json=first_payload,
79
+ headers=headers
80
+ )
81
+ print(f"First response status: {response.status_code}")
82
+
83
+ print("Sending second request...")
84
+ response = scraper.post(
85
+ "https://lmarena.ai/queue/join?",
86
+ json=second_payload,
87
+ headers=headers
88
+ )
89
+ print(f"Second response status: {response.status_code}")
90
+
91
+ print("Sending third request...")
92
+ response = scraper.post(
93
+ "https://lmarena.ai/queue/join?",
94
+ json=third_payload,
95
+ headers=headers
96
+ )
97
+ print(f"Third response status: {response.status_code}")
98
+
99
+ # Stream the response
100
+ stream_url = f"https://lmarena.ai/queue/data?session_hash={session_hash}"
101
+ print(f"Streaming from: {stream_url}")
102
+
103
+ with scraper.get(stream_url, headers={"Accept": "text/event-stream"}, stream=True) as response:
104
+ print(f"Stream response status: {response.status_code}")
105
+ text_position = 0
106
+ response_text = ""
107
+
108
+ for line in response.iter_lines(decode_unicode=True):
109
+ if line:
110
+ print(line)
111
+
112
+
113
+ except Exception as e:
114
+ print(f"Error: {e}")
115
+ import traceback
116
+ traceback.print_exc()
117
+
118
+ if __name__ == "__main__":
119
+ main()
@@ -0,0 +1,258 @@
1
+ from curl_cffi import CurlError
2
+ from curl_cffi.requests import Session # Import Session
3
+ import json
4
+ from typing import Generator, Dict, Any, List, Optional, Union
5
+ from uuid import uuid4
6
+ import random
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+
15
+ class Venice(Provider):
16
+ """
17
+ A class to interact with the Venice AI API.
18
+ """
19
+
20
+ AVAILABLE_MODELS = [
21
+ "mistral-31-24b",
22
+ "dolphin-3.0-mistral-24b",
23
+ "llama-3.2-3b-akash",
24
+ "qwen2dot5-coder-32b",
25
+ "deepseek-coder-v2-lite",
26
+
27
+ ]
28
+
29
+ def __init__(
30
+ self,
31
+ is_conversation: bool = True,
32
+ max_tokens: int = 2000,
33
+ timeout: int = 30,
34
+ temperature: float = 0.8, # Keep temperature, user might want to adjust
35
+ top_p: float = 0.9, # Keep top_p
36
+ intro: str = None,
37
+ filepath: str = None,
38
+ update_file: bool = True,
39
+ proxies: dict = {},
40
+ history_offset: int = 10250,
41
+ act: str = None,
42
+ model: str = "mistral-31-24b",
43
+ # System prompt is empty in the example, but keep it configurable
44
+ system_prompt: str = ""
45
+ ):
46
+ """Initialize Venice AI client"""
47
+ if model not in self.AVAILABLE_MODELS:
48
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
49
+
50
+ # Update API endpoint
51
+ self.api_endpoint = "https://outerface.venice.ai/api/inference/chat"
52
+ # Initialize curl_cffi Session
53
+ self.session = Session()
54
+ self.is_conversation = is_conversation
55
+ self.max_tokens_to_sample = max_tokens
56
+ self.temperature = temperature
57
+ self.top_p = top_p
58
+ self.timeout = timeout
59
+ self.model = model
60
+ self.system_prompt = system_prompt
61
+ self.last_response = {}
62
+
63
+ # Update Headers based on successful request
64
+ self.headers = {
65
+ "User-Agent": LitAgent().random(), # Keep using LitAgent
66
+ "accept": "*/*",
67
+ "accept-language": "en-US,en;q=0.9", # Keep existing
68
+ "content-type": "application/json",
69
+ "origin": "https://venice.ai",
70
+ "referer": "https://venice.ai/", # Update referer
71
+ # Update sec-ch-ua to match example
72
+ "sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
73
+ "sec-ch-ua-mobile": "?0",
74
+ "sec-ch-ua-platform": '"Windows"',
75
+ "sec-fetch-dest": "empty",
76
+ "sec-fetch-mode": "cors",
77
+ # Update sec-fetch-site to match example
78
+ "sec-fetch-site": "same-site",
79
+ # Add missing headers from example
80
+ "priority": "u=1, i",
81
+ "sec-gpc": "1",
82
+ "x-venice-version": "interface@20250424.065523+50bac27" # Add version header
83
+ }
84
+
85
+ # Update curl_cffi session headers and proxies
86
+ self.session.headers.update(self.headers)
87
+ self.session.proxies.update(proxies)
88
+
89
+ self.__available_optimizers = (
90
+ method
91
+ for method in dir(Optimizers)
92
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
93
+ )
94
+ Conversation.intro = (
95
+ AwesomePrompts().get_act(
96
+ act, raise_not_found=True, default=None, case_insensitive=True
97
+ )
98
+ if act
99
+ else intro or Conversation.intro
100
+ )
101
+
102
+ self.conversation = Conversation(
103
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
104
+ )
105
+ self.conversation.history_offset = history_offset
106
+
107
+ @staticmethod
108
+ def _venice_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
109
+ """Extracts content from Venice stream JSON objects."""
110
+ if isinstance(chunk, dict) and chunk.get("kind") == "content":
111
+ return chunk.get("content")
112
+ return None
113
+
114
+
115
+ def ask(
116
+ self,
117
+ prompt: str,
118
+ stream: bool = False,
119
+ raw: bool = False,
120
+ optimizer: str = None,
121
+ conversationally: bool = False,
122
+ ) -> Union[Dict[str, Any], Generator]:
123
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
124
+ if optimizer:
125
+ if optimizer in self.__available_optimizers:
126
+ conversation_prompt = getattr(Optimizers, optimizer)(
127
+ conversation_prompt if conversationally else prompt
128
+ )
129
+ else:
130
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
131
+
132
+ # Update Payload construction based on successful request
133
+ payload = {
134
+ "requestId": str(uuid4())[:7], # Keep generating request ID
135
+ "modelId": self.model,
136
+ "prompt": [{"content": conversation_prompt, "role": "user"}],
137
+ "systemPrompt": self.system_prompt, # Use configured system prompt
138
+ "conversationType": "text",
139
+ "temperature": self.temperature, # Use configured temperature
140
+ "webEnabled": True, # Keep webEnabled
141
+ "topP": self.top_p, # Use configured topP
142
+ "includeVeniceSystemPrompt": True, # Set to True as per example
143
+ "isCharacter": False, # Keep as False
144
+ # Add missing fields from example payload
145
+ "userId": "user_anon_" + str(random.randint(1000000000, 9999999999)), # Generate anon user ID
146
+ "isDefault": True,
147
+ "textToSpeech": {"voiceId": "af_sky", "speed": 1},
148
+ "clientProcessingTime": random.randint(10, 50) # Randomize slightly
149
+ }
150
+
151
+ def for_stream():
152
+ try:
153
+ # Use curl_cffi session post
154
+ response = self.session.post(
155
+ self.api_endpoint,
156
+ json=payload,
157
+ stream=True,
158
+ timeout=self.timeout,
159
+ impersonate="edge101" # Match impersonation closer to headers
160
+ )
161
+ # Check response status after the call
162
+ if response.status_code != 200:
163
+ # Include response text in error
164
+ raise exceptions.FailedToGenerateResponseError(
165
+ f"Request failed with status code {response.status_code} - {response.text}"
166
+ )
167
+
168
+ streaming_text = ""
169
+ # Use sanitize_stream with the custom extractor
170
+ processed_stream = sanitize_stream(
171
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
172
+ intro_value=None, # No simple prefix
173
+ to_json=True, # Each line is JSON
174
+ content_extractor=self._venice_extractor, # Use the specific extractor
175
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
176
+ )
177
+
178
+ for content_chunk in processed_stream:
179
+ # content_chunk is the string extracted by _venice_extractor
180
+ if content_chunk and isinstance(content_chunk, str):
181
+ streaming_text += content_chunk
182
+ yield content_chunk if raw else dict(text=content_chunk)
183
+
184
+ # Update history and last response after stream finishes
185
+ self.conversation.update_chat_history(prompt, streaming_text)
186
+ self.last_response = {"text": streaming_text}
187
+
188
+ except CurlError as e:
189
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
190
+ # Catch requests.exceptions.RequestException if needed, but CurlError is primary for curl_cffi
191
+ except Exception as e:
192
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
193
+
194
+ def for_non_stream():
195
+ full_text = ""
196
+ # Iterate through the generator provided by for_stream
197
+ for chunk_data in for_stream():
198
+ # Check if chunk_data is a dict (not raw) and has 'text'
199
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
200
+ full_text += chunk_data["text"]
201
+ # If raw=True, chunk_data is the string content itself
202
+ elif isinstance(chunk_data, str):
203
+ full_text += chunk_data
204
+ # Update last_response after aggregation
205
+ self.last_response = {"text": full_text}
206
+ return self.last_response
207
+
208
+ return for_stream() if stream else for_non_stream()
209
+
210
+ def chat(
211
+ self,
212
+ prompt: str,
213
+ stream: bool = False,
214
+ optimizer: str = None,
215
+ conversationally: bool = False,
216
+ ) -> Union[str, Generator]:
217
+ def for_stream():
218
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
219
+ yield self.get_message(response)
220
+ def for_non_stream():
221
+ return self.get_message(
222
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
223
+ )
224
+ return for_stream() if stream else for_non_stream()
225
+
226
+ def get_message(self, response: dict) -> str:
227
+ assert isinstance(response, dict), "Response should be of dict data-type only"
228
+ return response["text"]
229
+
230
+ if __name__ == "__main__":
231
+ # Ensure curl_cffi is installed
232
+ print("-" * 80)
233
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
234
+ print("-" * 80)
235
+
236
+ # Test all available models
237
+ working = 0
238
+ total = len(Venice.AVAILABLE_MODELS)
239
+
240
+ for model in Venice.AVAILABLE_MODELS:
241
+ try:
242
+ test_ai = Venice(model=model, timeout=60)
243
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
244
+ response_text = ""
245
+ for chunk in response:
246
+ response_text += chunk
247
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
248
+
249
+ if response_text and len(response_text.strip()) > 0:
250
+ status = "✓"
251
+ # Truncate response if too long
252
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
253
+ else:
254
+ status = "✗"
255
+ display_text = "Empty or invalid response"
256
+ print(f"\r{model:<50} {status:<10} {display_text}")
257
+ except Exception as e:
258
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -0,0 +1,253 @@
1
+ import re
2
+ import time
3
+ from curl_cffi import requests
4
+ import json
5
+ from typing import Union, Any, Dict, Generator, Optional
6
+ import uuid
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+
15
+
16
+ class VercelAI(Provider):
17
+ """
18
+ A class to interact with the Vercel AI API.
19
+ """
20
+
21
+ AVAILABLE_MODELS = [
22
+ "chat-model",
23
+ "chat-model-reasoning"
24
+ ]
25
+
26
+ def __init__(
27
+ self,
28
+ is_conversation: bool = True,
29
+ max_tokens: int = 600,
30
+ timeout: int = 30,
31
+ intro: str = None,
32
+ filepath: str = None,
33
+ update_file: bool = True,
34
+ proxies: dict = {},
35
+ history_offset: int = 10250,
36
+ act: str = None,
37
+ model: str = "chat-model",
38
+ system_prompt: str = "You are a helpful AI assistant."
39
+ ):
40
+ """Initializes the Vercel AI API client."""
41
+
42
+ if model not in self.AVAILABLE_MODELS:
43
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
44
+
45
+ self.session = requests.Session()
46
+ self.is_conversation = is_conversation
47
+ self.max_tokens_to_sample = max_tokens
48
+ self.api_endpoint = "https://chat.vercel.ai/api/chat"
49
+ self.stream_chunk_size = 64
50
+ self.timeout = timeout
51
+ self.last_response = {}
52
+ self.model = model
53
+ self.system_prompt = system_prompt
54
+ self.litagent = LitAgent()
55
+ self.headers = self.litagent.generate_fingerprint()
56
+ self.session.headers.update(self.headers)
57
+ self.session.proxies = proxies
58
+
59
+ # Add Vercel AI specific headers
60
+ self.session.headers.update({
61
+ "authority": "chat.vercel.ai",
62
+ "accept": "*/*",
63
+ "accept-encoding": "gzip, deflate, br, zstd",
64
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
65
+ "content-type": "application/json",
66
+ "dnt": "1",
67
+ "origin": "https://chat.vercel.ai",
68
+ "priority": "u=1, i",
69
+ "referer": "https://chat.vercel.ai/",
70
+ "sec-ch-ua-mobile": "?0",
71
+ "sec-ch-ua-platform": '"Windows"',
72
+ "sec-fetch-dest": "empty",
73
+ "sec-fetch-mode": "cors",
74
+ "sec-fetch-site": "same-origin",
75
+ "sec-gpc": "1",
76
+ "x-kpsdk-c": "1-Cl4OUDwFNA",
77
+ "x-kpsdk-cd": json.dumps({
78
+ "workTime": int(time.time() * 1000),
79
+ "id": str(uuid.uuid4()),
80
+ "answers": [5, 5],
81
+ "duration": 26.9,
82
+ "d": 1981,
83
+ "st": int(time.time() * 1000) - 1000,
84
+ "rst": int(time.time() * 1000) - 500
85
+ }),
86
+ "x-kpsdk-ct": str(uuid.uuid4()),
87
+ "x-kpsdk-r": "1-B1NfB2A",
88
+ "x-kpsdk-v": "j-1.0.0"
89
+ })
90
+
91
+ # Add cookies
92
+ self.session.cookies.update({
93
+ "KP_UIDz": str(uuid.uuid4()),
94
+ "KP_UIDz-ssn": str(uuid.uuid4())
95
+ })
96
+
97
+ self.__available_optimizers = (
98
+ method
99
+ for method in dir(Optimizers)
100
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
101
+ )
102
+ Conversation.intro = (
103
+ AwesomePrompts().get_act(
104
+ act, raise_not_found=True, default=None, case_insensitive=True
105
+ )
106
+ if act
107
+ else intro or Conversation.intro
108
+ )
109
+ self.conversation = Conversation(
110
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
111
+ )
112
+ self.conversation.history_offset = history_offset
113
+
114
+ @staticmethod
115
+ def _vercelai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
116
+ """Extracts content from the VercelAI stream format '0:"..."'."""
117
+ if isinstance(chunk, str):
118
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
119
+ if match:
120
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
121
+ content = match.group(1).encode().decode('unicode_escape')
122
+ return content.replace('\\\\', '\\').replace('\\"', '"')
123
+ return None
124
+
125
+ def ask(
126
+ self,
127
+ prompt: str,
128
+ stream: bool = False,
129
+ raw: bool = False,
130
+ optimizer: str = None,
131
+ conversationally: bool = False,
132
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
133
+ """Chat with AI"""
134
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
135
+ if optimizer:
136
+ if optimizer in self.__available_optimizers:
137
+ conversation_prompt = getattr(Optimizers, optimizer)(
138
+ conversation_prompt if conversationally else prompt
139
+ )
140
+ else:
141
+ raise Exception(
142
+ f"Optimizer is not one of {self.__available_optimizers}"
143
+ )
144
+
145
+ payload = {
146
+ "id": "guest",
147
+ "messages": [
148
+ {
149
+ "id": str(uuid.uuid4()),
150
+ "createdAt": "2025-03-29T09:13:16.992Z",
151
+ "role": "user",
152
+ "content": conversation_prompt,
153
+ "parts": [{"type": "text", "text": conversation_prompt}]
154
+ }
155
+ ],
156
+ "selectedChatModelId": self.model
157
+ }
158
+
159
+ def for_stream():
160
+ response = self.session.post(
161
+ self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
162
+ )
163
+ if not response.ok:
164
+ error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
165
+ raise exceptions.FailedToGenerateResponseError(error_msg)
166
+
167
+ streaming_text = ""
168
+ # Use sanitize_stream with the custom extractor
169
+ processed_stream = sanitize_stream(
170
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
171
+ intro_value=None, # No simple prefix
172
+ to_json=False, # Content is not JSON
173
+ content_extractor=self._vercelai_extractor # Use the specific extractor
174
+ )
175
+
176
+ for content_chunk in processed_stream:
177
+ if content_chunk and isinstance(content_chunk, str):
178
+ streaming_text += content_chunk
179
+ yield content_chunk if raw else dict(text=content_chunk)
180
+
181
+ self.last_response.update(dict(text=streaming_text))
182
+ self.conversation.update_chat_history(
183
+ prompt, self.get_message(self.last_response)
184
+ )
185
+
186
+ def for_non_stream():
187
+ for _ in for_stream():
188
+ pass
189
+ return self.last_response
190
+
191
+ return for_stream() if stream else for_non_stream()
192
+
193
+ def chat(
194
+ self,
195
+ prompt: str,
196
+ stream: bool = False,
197
+ optimizer: str = None,
198
+ conversationally: bool = False,
199
+ ) -> str:
200
+ """Generate response `str`"""
201
+ def for_stream():
202
+ for response in self.ask(
203
+ prompt, True, optimizer=optimizer, conversationally=conversationally
204
+ ):
205
+ yield self.get_message(response)
206
+
207
+ def for_non_stream():
208
+ return self.get_message(
209
+ self.ask(
210
+ prompt,
211
+ False,
212
+ optimizer=optimizer,
213
+ conversationally=conversationally,
214
+ )
215
+ )
216
+
217
+ return for_stream() if stream else for_non_stream()
218
+
219
+ def get_message(self, response: dict) -> str:
220
+ """Retrieves message only from response"""
221
+ assert isinstance(response, dict), "Response should be of dict data-type only"
222
+ # Formatting is handled by the extractor now
223
+ text = response.get("text", "")
224
+ return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement if needed
225
+
226
+ if __name__ == "__main__":
227
+ print("-" * 80)
228
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
229
+ print("-" * 80)
230
+
231
+ # Test all available models
232
+ working = 0
233
+ total = len(VercelAI.AVAILABLE_MODELS)
234
+
235
+ for model in VercelAI.AVAILABLE_MODELS:
236
+ try:
237
+ test_ai = VercelAI(model=model, timeout=60)
238
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
239
+ response_text = ""
240
+ for chunk in response:
241
+ response_text += chunk
242
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
243
+
244
+ if response_text and len(response_text.strip()) > 0:
245
+ status = "✓"
246
+ # Truncate response if too long
247
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
248
+ else:
249
+ status = "✗"
250
+ display_text = "Empty or invalid response"
251
+ print(f"\r{model:<50} {status:<10} {display_text}")
252
+ except Exception as e:
253
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")