webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (281) hide show
  1. webscout/AIauto.py +33 -15
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +703 -250
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/Act.md +309 -0
  6. webscout/Extra/GitToolkit/__init__.py +10 -0
  7. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  8. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  10. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  11. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  12. webscout/Extra/YTToolkit/README.md +375 -0
  13. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  14. webscout/Extra/YTToolkit/__init__.py +3 -0
  15. webscout/Extra/YTToolkit/transcriber.py +476 -0
  16. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  17. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  18. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  19. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  20. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  21. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  22. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  23. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  24. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  25. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  26. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  27. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  28. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  29. webscout/Extra/__init__.py +7 -0
  30. webscout/Extra/autocoder/__init__.py +9 -0
  31. webscout/Extra/autocoder/autocoder.py +1105 -0
  32. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  33. webscout/Extra/gguf.md +430 -0
  34. webscout/Extra/gguf.py +684 -0
  35. webscout/Extra/tempmail/README.md +488 -0
  36. webscout/Extra/tempmail/__init__.py +28 -0
  37. webscout/Extra/tempmail/async_utils.py +141 -0
  38. webscout/Extra/tempmail/base.py +161 -0
  39. webscout/Extra/tempmail/cli.py +187 -0
  40. webscout/Extra/tempmail/emailnator.py +84 -0
  41. webscout/Extra/tempmail/mail_tm.py +361 -0
  42. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  43. webscout/Extra/weather.md +281 -0
  44. webscout/Extra/weather.py +194 -0
  45. webscout/Extra/weather_ascii.py +76 -0
  46. webscout/Litlogger/README.md +10 -0
  47. webscout/Litlogger/__init__.py +15 -0
  48. webscout/Litlogger/formats.py +4 -0
  49. webscout/Litlogger/handlers.py +103 -0
  50. webscout/Litlogger/levels.py +13 -0
  51. webscout/Litlogger/logger.py +92 -0
  52. webscout/Provider/AI21.py +177 -0
  53. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  54. webscout/Provider/AISEARCH/Perplexity.py +333 -0
  55. webscout/Provider/AISEARCH/README.md +279 -0
  56. webscout/Provider/AISEARCH/__init__.py +9 -0
  57. webscout/Provider/AISEARCH/felo_search.py +202 -0
  58. webscout/Provider/AISEARCH/genspark_search.py +324 -0
  59. webscout/Provider/AISEARCH/hika_search.py +186 -0
  60. webscout/Provider/AISEARCH/iask_search.py +410 -0
  61. webscout/Provider/AISEARCH/monica_search.py +220 -0
  62. webscout/Provider/AISEARCH/scira_search.py +298 -0
  63. webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
  64. webscout/Provider/Aitopia.py +316 -0
  65. webscout/Provider/AllenAI.py +440 -0
  66. webscout/Provider/Andi.py +228 -0
  67. webscout/Provider/Blackboxai.py +791 -0
  68. webscout/Provider/ChatGPTClone.py +237 -0
  69. webscout/Provider/ChatGPTGratis.py +194 -0
  70. webscout/Provider/ChatSandbox.py +342 -0
  71. webscout/Provider/Cloudflare.py +324 -0
  72. webscout/Provider/Cohere.py +208 -0
  73. webscout/Provider/Deepinfra.py +340 -0
  74. webscout/Provider/ExaAI.py +261 -0
  75. webscout/Provider/ExaChat.py +358 -0
  76. webscout/Provider/Flowith.py +217 -0
  77. webscout/Provider/FreeGemini.py +250 -0
  78. webscout/Provider/Gemini.py +169 -0
  79. webscout/Provider/GithubChat.py +369 -0
  80. webscout/Provider/GizAI.py +295 -0
  81. webscout/Provider/Glider.py +225 -0
  82. webscout/Provider/Groq.py +801 -0
  83. webscout/Provider/HF_space/__init__.py +0 -0
  84. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  85. webscout/Provider/HeckAI.py +375 -0
  86. webscout/Provider/HuggingFaceChat.py +469 -0
  87. webscout/Provider/Hunyuan.py +283 -0
  88. webscout/Provider/Jadve.py +291 -0
  89. webscout/Provider/Koboldai.py +384 -0
  90. webscout/Provider/LambdaChat.py +411 -0
  91. webscout/Provider/Llama3.py +259 -0
  92. webscout/Provider/MCPCore.py +315 -0
  93. webscout/Provider/Marcus.py +198 -0
  94. webscout/Provider/Nemotron.py +218 -0
  95. webscout/Provider/Netwrck.py +270 -0
  96. webscout/Provider/OLLAMA.py +396 -0
  97. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
  98. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  99. webscout/Provider/OPENAI/FreeGemini.py +283 -0
  100. webscout/Provider/OPENAI/NEMOTRON.py +232 -0
  101. webscout/Provider/OPENAI/Qwen3.py +283 -0
  102. webscout/Provider/OPENAI/README.md +952 -0
  103. webscout/Provider/OPENAI/TwoAI.py +357 -0
  104. webscout/Provider/OPENAI/__init__.py +40 -0
  105. webscout/Provider/OPENAI/ai4chat.py +293 -0
  106. webscout/Provider/OPENAI/api.py +969 -0
  107. webscout/Provider/OPENAI/base.py +249 -0
  108. webscout/Provider/OPENAI/c4ai.py +373 -0
  109. webscout/Provider/OPENAI/chatgpt.py +556 -0
  110. webscout/Provider/OPENAI/chatgptclone.py +494 -0
  111. webscout/Provider/OPENAI/chatsandbox.py +173 -0
  112. webscout/Provider/OPENAI/copilot.py +242 -0
  113. webscout/Provider/OPENAI/deepinfra.py +322 -0
  114. webscout/Provider/OPENAI/e2b.py +1414 -0
  115. webscout/Provider/OPENAI/exaai.py +417 -0
  116. webscout/Provider/OPENAI/exachat.py +444 -0
  117. webscout/Provider/OPENAI/flowith.py +162 -0
  118. webscout/Provider/OPENAI/freeaichat.py +359 -0
  119. webscout/Provider/OPENAI/glider.py +326 -0
  120. webscout/Provider/OPENAI/groq.py +364 -0
  121. webscout/Provider/OPENAI/heckai.py +308 -0
  122. webscout/Provider/OPENAI/llmchatco.py +335 -0
  123. webscout/Provider/OPENAI/mcpcore.py +389 -0
  124. webscout/Provider/OPENAI/multichat.py +376 -0
  125. webscout/Provider/OPENAI/netwrck.py +357 -0
  126. webscout/Provider/OPENAI/oivscode.py +287 -0
  127. webscout/Provider/OPENAI/opkfc.py +496 -0
  128. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  129. webscout/Provider/OPENAI/scirachat.py +477 -0
  130. webscout/Provider/OPENAI/sonus.py +304 -0
  131. webscout/Provider/OPENAI/standardinput.py +433 -0
  132. webscout/Provider/OPENAI/textpollinations.py +339 -0
  133. webscout/Provider/OPENAI/toolbaz.py +413 -0
  134. webscout/Provider/OPENAI/typefully.py +355 -0
  135. webscout/Provider/OPENAI/typegpt.py +364 -0
  136. webscout/Provider/OPENAI/uncovrAI.py +463 -0
  137. webscout/Provider/OPENAI/utils.py +318 -0
  138. webscout/Provider/OPENAI/venice.py +431 -0
  139. webscout/Provider/OPENAI/wisecat.py +387 -0
  140. webscout/Provider/OPENAI/writecream.py +163 -0
  141. webscout/Provider/OPENAI/x0gpt.py +365 -0
  142. webscout/Provider/OPENAI/yep.py +382 -0
  143. webscout/Provider/OpenGPT.py +209 -0
  144. webscout/Provider/Openai.py +496 -0
  145. webscout/Provider/PI.py +429 -0
  146. webscout/Provider/Perplexitylabs.py +415 -0
  147. webscout/Provider/QwenLM.py +254 -0
  148. webscout/Provider/Reka.py +214 -0
  149. webscout/Provider/StandardInput.py +290 -0
  150. webscout/Provider/TTI/README.md +82 -0
  151. webscout/Provider/TTI/__init__.py +7 -0
  152. webscout/Provider/TTI/aiarta.py +365 -0
  153. webscout/Provider/TTI/artbit.py +0 -0
  154. webscout/Provider/TTI/base.py +64 -0
  155. webscout/Provider/TTI/fastflux.py +200 -0
  156. webscout/Provider/TTI/magicstudio.py +201 -0
  157. webscout/Provider/TTI/piclumen.py +203 -0
  158. webscout/Provider/TTI/pixelmuse.py +225 -0
  159. webscout/Provider/TTI/pollinations.py +221 -0
  160. webscout/Provider/TTI/utils.py +11 -0
  161. webscout/Provider/TTS/README.md +192 -0
  162. webscout/Provider/TTS/__init__.py +10 -0
  163. webscout/Provider/TTS/base.py +159 -0
  164. webscout/Provider/TTS/deepgram.py +156 -0
  165. webscout/Provider/TTS/elevenlabs.py +111 -0
  166. webscout/Provider/TTS/gesserit.py +128 -0
  167. webscout/Provider/TTS/murfai.py +113 -0
  168. webscout/Provider/TTS/openai_fm.py +129 -0
  169. webscout/Provider/TTS/parler.py +111 -0
  170. webscout/Provider/TTS/speechma.py +580 -0
  171. webscout/Provider/TTS/sthir.py +94 -0
  172. webscout/Provider/TTS/streamElements.py +333 -0
  173. webscout/Provider/TTS/utils.py +280 -0
  174. webscout/Provider/TeachAnything.py +229 -0
  175. webscout/Provider/TextPollinationsAI.py +308 -0
  176. webscout/Provider/TwoAI.py +475 -0
  177. webscout/Provider/TypliAI.py +305 -0
  178. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  179. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  180. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  181. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  182. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  183. webscout/Provider/Venice.py +258 -0
  184. webscout/Provider/VercelAI.py +253 -0
  185. webscout/Provider/WiseCat.py +233 -0
  186. webscout/Provider/WrDoChat.py +370 -0
  187. webscout/Provider/Writecream.py +246 -0
  188. webscout/Provider/WritingMate.py +269 -0
  189. webscout/Provider/__init__.py +174 -0
  190. webscout/Provider/ai4chat.py +174 -0
  191. webscout/Provider/akashgpt.py +335 -0
  192. webscout/Provider/asksteve.py +220 -0
  193. webscout/Provider/cerebras.py +290 -0
  194. webscout/Provider/chatglm.py +215 -0
  195. webscout/Provider/cleeai.py +213 -0
  196. webscout/Provider/copilot.py +425 -0
  197. webscout/Provider/elmo.py +283 -0
  198. webscout/Provider/freeaichat.py +285 -0
  199. webscout/Provider/geminiapi.py +208 -0
  200. webscout/Provider/granite.py +235 -0
  201. webscout/Provider/hermes.py +266 -0
  202. webscout/Provider/julius.py +223 -0
  203. webscout/Provider/koala.py +170 -0
  204. webscout/Provider/learnfastai.py +325 -0
  205. webscout/Provider/llama3mitril.py +215 -0
  206. webscout/Provider/llmchat.py +258 -0
  207. webscout/Provider/llmchatco.py +306 -0
  208. webscout/Provider/lmarena.py +198 -0
  209. webscout/Provider/meta.py +801 -0
  210. webscout/Provider/multichat.py +364 -0
  211. webscout/Provider/oivscode.py +309 -0
  212. webscout/Provider/samurai.py +224 -0
  213. webscout/Provider/scira_chat.py +299 -0
  214. webscout/Provider/scnet.py +243 -0
  215. webscout/Provider/searchchat.py +292 -0
  216. webscout/Provider/sonus.py +258 -0
  217. webscout/Provider/talkai.py +194 -0
  218. webscout/Provider/toolbaz.py +353 -0
  219. webscout/Provider/turboseek.py +266 -0
  220. webscout/Provider/typefully.py +202 -0
  221. webscout/Provider/typegpt.py +289 -0
  222. webscout/Provider/uncovr.py +368 -0
  223. webscout/Provider/x0gpt.py +299 -0
  224. webscout/Provider/yep.py +389 -0
  225. webscout/__init__.py +4 -2
  226. webscout/cli.py +3 -28
  227. webscout/client.py +70 -0
  228. webscout/conversation.py +35 -35
  229. webscout/litagent/Readme.md +276 -0
  230. webscout/litagent/__init__.py +29 -0
  231. webscout/litagent/agent.py +455 -0
  232. webscout/litagent/constants.py +60 -0
  233. webscout/litprinter/__init__.py +59 -0
  234. webscout/optimizers.py +419 -419
  235. webscout/scout/README.md +404 -0
  236. webscout/scout/__init__.py +8 -0
  237. webscout/scout/core/__init__.py +7 -0
  238. webscout/scout/core/crawler.py +210 -0
  239. webscout/scout/core/scout.py +607 -0
  240. webscout/scout/core/search_result.py +96 -0
  241. webscout/scout/core/text_analyzer.py +63 -0
  242. webscout/scout/core/text_utils.py +277 -0
  243. webscout/scout/core/web_analyzer.py +52 -0
  244. webscout/scout/element.py +478 -0
  245. webscout/scout/parsers/__init__.py +69 -0
  246. webscout/scout/parsers/html5lib_parser.py +172 -0
  247. webscout/scout/parsers/html_parser.py +236 -0
  248. webscout/scout/parsers/lxml_parser.py +178 -0
  249. webscout/scout/utils.py +37 -0
  250. webscout/swiftcli/Readme.md +323 -0
  251. webscout/swiftcli/__init__.py +95 -0
  252. webscout/swiftcli/core/__init__.py +7 -0
  253. webscout/swiftcli/core/cli.py +297 -0
  254. webscout/swiftcli/core/context.py +104 -0
  255. webscout/swiftcli/core/group.py +241 -0
  256. webscout/swiftcli/decorators/__init__.py +28 -0
  257. webscout/swiftcli/decorators/command.py +221 -0
  258. webscout/swiftcli/decorators/options.py +220 -0
  259. webscout/swiftcli/decorators/output.py +252 -0
  260. webscout/swiftcli/exceptions.py +21 -0
  261. webscout/swiftcli/plugins/__init__.py +9 -0
  262. webscout/swiftcli/plugins/base.py +135 -0
  263. webscout/swiftcli/plugins/manager.py +269 -0
  264. webscout/swiftcli/utils/__init__.py +59 -0
  265. webscout/swiftcli/utils/formatting.py +252 -0
  266. webscout/swiftcli/utils/parsing.py +267 -0
  267. webscout/version.py +1 -1
  268. webscout/webscout_search.py +2 -182
  269. webscout/webscout_search_async.py +1 -179
  270. webscout/zeroart/README.md +89 -0
  271. webscout/zeroart/__init__.py +135 -0
  272. webscout/zeroart/base.py +66 -0
  273. webscout/zeroart/effects.py +101 -0
  274. webscout/zeroart/fonts.py +1239 -0
  275. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
  276. webscout-8.2.9.dist-info/RECORD +289 -0
  277. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  278. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  279. webscout-8.2.7.dist-info/RECORD +0 -26
  280. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  281. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,342 @@
1
+ from typing import Optional, Union, Any, Dict, Generator, List
2
+ from uuid import uuid4
3
+ import json
4
+ import re
5
+ import random
6
+ from curl_cffi import CurlError
7
+ from curl_cffi.requests import Session
8
+ from curl_cffi.const import CurlHttpVersion
9
+
10
+ from webscout.AIutel import sanitize_stream
11
+ from webscout.AIutel import Optimizers
12
+ from webscout.AIutel import Conversation
13
+ from webscout.AIutel import AwesomePrompts
14
+ from webscout.AIbase import Provider
15
+ from webscout import exceptions
16
+ from webscout.litagent import LitAgent
17
+
18
+ class ChatSandbox(Provider):
19
+ """
20
+ Sends a chat message to the specified model via the chatsandbox API.
21
+
22
+ This provider allows you to interact with various AI models through the chatsandbox.com
23
+ interface, supporting different models/models like OpenAI, DeepSeek, Llama, etc.
24
+
25
+ Attributes:
26
+ model (str): The model to chat with (e.g., "openai", "deepseek", "llama").
27
+
28
+ Examples:
29
+ >>> from webscout.Provider.chatsandbox import ChatSandbox
30
+ >>> ai = ChatSandbox(model="openai")
31
+ >>> response = ai.chat("Hello, how are you?")
32
+ >>> print(response)
33
+ 'I'm doing well, thank you for asking! How can I assist you today?'
34
+ """
35
+ AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large"]
36
+
37
+ def __init__(
38
+ self,
39
+ model: str = "openai",
40
+ is_conversation: bool = True,
41
+ max_tokens: int = 600,
42
+ timeout: int = 30,
43
+ intro: str = None,
44
+ filepath: str = None,
45
+ update_file: bool = True,
46
+ proxies: dict = {},
47
+ history_offset: int = 10250,
48
+ act: str = None,
49
+ ):
50
+ """
51
+ Initializes the ChatSandbox API with given parameters.
52
+
53
+ Args:
54
+ model (str): The model to chat with (e.g., "openai", "deepseek", "llama").
55
+ is_conversation (bool): Whether the provider is in conversation mode.
56
+ max_tokens (int): Maximum number of tokens to sample.
57
+ timeout (int): Timeout for API requests.
58
+ intro (str): Introduction message for the conversation.
59
+ filepath (str): Filepath for storing conversation history.
60
+ update_file (bool): Whether to update the conversation history file.
61
+ proxies (dict): Proxies for the API requests.
62
+ history_offset (int): Offset for conversation history.
63
+ act (str): Act for the conversation.
64
+
65
+ Examples:
66
+ >>> ai = ChatSandbox(model="openai", system_prompt="You are a friendly assistant.")
67
+ >>> print(ai.model)
68
+ 'openai'
69
+ """
70
+ if model not in self.AVAILABLE_MODELS:
71
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
72
+
73
+ # Initialize curl_cffi Session
74
+ self.session = Session()
75
+ self.model = model
76
+ self.is_conversation = is_conversation
77
+ self.max_tokens_to_sample = max_tokens
78
+ self.api_endpoint = "https://chatsandbox.com/api/chat"
79
+ self.timeout = timeout
80
+ self.last_response = {}
81
+
82
+ # Initialize LitAgent for user agent generation
83
+ self.agent = LitAgent()
84
+
85
+ # Set up headers
86
+ self.headers = {
87
+ 'authority': 'chatsandbox.com',
88
+ 'accept': '*/*',
89
+ 'accept-encoding': 'gzip, deflate, br',
90
+ 'accept-language': 'en-US,en;q=0.9',
91
+ 'content-type': 'application/json',
92
+ 'origin': 'https://chatsandbox.com',
93
+ 'referer': f'https://chatsandbox.com/chat/{self.model}',
94
+ 'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
95
+ 'sec-ch-ua-mobile': '?0',
96
+ 'sec-ch-ua-platform': '"Windows"',
97
+ 'sec-fetch-dest': 'empty',
98
+ 'sec-fetch-mode': 'cors',
99
+ 'sec-fetch-site': 'same-origin',
100
+ 'user-agent': self.agent.random(),
101
+ 'dnt': '1',
102
+ 'sec-gpc': '1',
103
+ }
104
+
105
+ self.__available_optimizers = (
106
+ method
107
+ for method in dir(Optimizers)
108
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
109
+ )
110
+
111
+ # Update curl_cffi session headers and proxies
112
+ self.session.headers.update(self.headers)
113
+ self.session.proxies = proxies
114
+
115
+ Conversation.intro = (
116
+ AwesomePrompts().get_act(
117
+ act, raise_not_found=True, default=None, case_insensitive=True
118
+ )
119
+ if act
120
+ else intro or Conversation.intro
121
+ )
122
+ self.conversation = Conversation(
123
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
124
+ )
125
+ self.conversation.history_offset = history_offset
126
+
127
+ @staticmethod
128
+ def _chatsandbox_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
129
+ """Extracts content from the chatsandbox stream format."""
130
+ if isinstance(chunk, str):
131
+ try:
132
+ data = json.loads(chunk)
133
+ if isinstance(data, dict) and "reasoning_content" in data:
134
+ return data["reasoning_content"]
135
+ return chunk
136
+ except json.JSONDecodeError:
137
+ return chunk
138
+ return None
139
+
140
+ def ask(
141
+ self,
142
+ prompt: str,
143
+ stream: bool = False,
144
+ raw: bool = False,
145
+ optimizer: str = None,
146
+ conversationally: bool = False,
147
+ ) -> Union[Dict[str, Any], Generator]:
148
+ """
149
+ Sends a prompt to the ChatSandbox API and returns the response.
150
+
151
+ Args:
152
+ prompt (str): The prompt to send to the API.
153
+ stream (bool): Whether to stream the response.
154
+ raw (bool): Whether to return the raw response.
155
+ optimizer (str): Optimizer to use for the prompt.
156
+ conversationally (bool): Whether to generate the prompt conversationally.
157
+
158
+ Returns:
159
+ Union[Dict[str, Any], Generator]: The API response.
160
+
161
+ Examples:
162
+ >>> ai = ChatSandbox()
163
+ >>> response = ai.ask("Tell me a joke!")
164
+ >>> print(response)
165
+ {'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
166
+ """
167
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
168
+ if optimizer:
169
+ if optimizer in self.__available_optimizers:
170
+ conversation_prompt = getattr(Optimizers, optimizer)(
171
+ conversation_prompt if conversationally else prompt
172
+ )
173
+ else:
174
+ raise Exception(
175
+ f"Optimizer is not one of {self.__available_optimizers}"
176
+ )
177
+
178
+ # Prepare the payload
179
+ payload = {
180
+ "messages": [conversation_prompt],
181
+ "character": self.model
182
+ }
183
+
184
+ def for_stream():
185
+ try:
186
+ # Use curl_cffi session post with updated impersonate and http_version
187
+ response = self.session.post(
188
+ self.api_endpoint,
189
+ headers=self.headers,
190
+ json=payload,
191
+ stream=True,
192
+ timeout=self.timeout,
193
+ impersonate="chrome120", # Try a different impersonation profile
194
+ http_version=CurlHttpVersion.V1_1 # Force HTTP/1.1
195
+ )
196
+ if not response.ok:
197
+ raise exceptions.FailedToGenerateResponseError(
198
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
199
+ )
200
+
201
+ streaming_response = ""
202
+ # Use sanitize_stream with the custom extractor
203
+ processed_stream = sanitize_stream(
204
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
205
+ intro_value=None, # No simple prefix to remove here
206
+ to_json=False, # Content is not JSON
207
+ content_extractor=self._chatsandbox_extractor # Use the specific extractor
208
+ )
209
+
210
+ for content_chunk in processed_stream:
211
+ if content_chunk and isinstance(content_chunk, str):
212
+ streaming_response += content_chunk
213
+ yield content_chunk if raw else dict(text=content_chunk)
214
+
215
+ self.last_response.update(dict(text=streaming_response))
216
+ self.conversation.update_chat_history(
217
+ prompt, self.get_message(self.last_response)
218
+ )
219
+ except CurlError as e: # Catch CurlError
220
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
221
+ except Exception as e: # Catch other potential exceptions
222
+ # Include the original exception type in the message for clarity
223
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
224
+
225
+ def for_non_stream():
226
+ # This function implicitly uses the updated for_stream
227
+ for _ in for_stream():
228
+ pass
229
+ return self.last_response
230
+
231
+ return for_stream() if stream else for_non_stream()
232
+
233
+ def chat(
234
+ self,
235
+ prompt: str,
236
+ stream: bool = False,
237
+ optimizer: str = None,
238
+ conversationally: bool = False,
239
+ ) -> str:
240
+ """
241
+ Generates a response from the ChatSandbox API.
242
+
243
+ Args:
244
+ prompt (str): The prompt to send to the API.
245
+ stream (bool): Whether to stream the response.
246
+ optimizer (str): Optimizer to use for the prompt.
247
+ conversationally (bool): Whether to generate the prompt conversationally.
248
+
249
+ Returns:
250
+ str: The API response.
251
+
252
+ Examples:
253
+ >>> ai = ChatSandbox()
254
+ >>> response = ai.chat("What's the weather today?")
255
+ >>> print(response)
256
+ 'I don't have real-time weather data, but I can help you find weather information online.'
257
+ """
258
+ def for_stream():
259
+ for response in self.ask(
260
+ prompt,
261
+ stream=True,
262
+ raw=False,
263
+ optimizer=optimizer,
264
+ conversationally=conversationally,
265
+ ):
266
+ yield response.get("text", "")
267
+
268
+ if stream:
269
+ return for_stream()
270
+ else:
271
+ return self.get_message(
272
+ self.ask(
273
+ prompt,
274
+ stream=False,
275
+ raw=False,
276
+ optimizer=optimizer,
277
+ conversationally=conversationally,
278
+ )
279
+ )
280
+
281
+ def get_message(self, response: Dict[str, Any]) -> str:
282
+ """
283
+ Extract the message from the API response.
284
+
285
+ Args:
286
+ response (Dict[str, Any]): The API response.
287
+
288
+ Returns:
289
+ str: The extracted message.
290
+ """
291
+ if not isinstance(response, dict):
292
+ return str(response)
293
+
294
+ raw_text = response.get("text", "")
295
+
296
+ # Try to parse as JSON
297
+ try:
298
+ data = json.loads(raw_text)
299
+ if isinstance(data, dict):
300
+ # Check for different response formats
301
+ if "reasoning_content" in data:
302
+ return data["reasoning_content"]
303
+ elif "content" in data:
304
+ return data["content"]
305
+ elif "message" in data:
306
+ return data["message"]
307
+ elif "response" in data:
308
+ return data["response"]
309
+ elif "text" in data:
310
+ return data["text"]
311
+ # Return the whole JSON if no specific field is found
312
+ return json.dumps(data, ensure_ascii=False)
313
+ except json.JSONDecodeError:
314
+ # If it's not JSON, return the raw text
315
+ pass
316
+
317
+ return raw_text.strip()
318
+
319
+ # --- Example Usage ---
320
+ if __name__ == "__main__":
321
+ from rich import print
322
+ # Ensure curl_cffi is installed
323
+ print("-" * 80)
324
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
325
+ print("-" * 80)
326
+
327
+ for model in ChatSandbox.AVAILABLE_MODELS:
328
+ try:
329
+ test_ai = ChatSandbox(model=model, timeout=60)
330
+ response = test_ai.chat("Say 'Hello' in one word")
331
+ response_text = response
332
+
333
+ if response_text and len(response_text.strip()) > 0:
334
+ status = "✓"
335
+ # Truncate response if too long
336
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
337
+ else:
338
+ status = "✗"
339
+ display_text = "Empty or invalid response"
340
+ print(f"{model:<50} {status:<10} {display_text}")
341
+ except Exception as e:
342
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -0,0 +1,324 @@
1
+ import json
2
+ from uuid import uuid4
3
+
4
+ import re # Import re
5
+ from curl_cffi import CurlError
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+ from typing import Optional, Union, Any, AsyncGenerator, Dict
12
+ from curl_cffi.requests import Session
13
+ from webscout.litagent import LitAgent
14
+
15
+ class Cloudflare(Provider):
16
+ """
17
+ Cloudflare provider to interact with Cloudflare's text generation API.
18
+ """
19
+
20
+ # Updated AVAILABLE_MODELS from given JSON data
21
+ AVAILABLE_MODELS = [
22
+ "@hf/thebloke/deepseek-coder-6.7b-base-awq",
23
+ "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
24
+ "@cf/deepseek-ai/deepseek-math-7b-instruct",
25
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
26
+ "@cf/thebloke/discolm-german-7b-v1-awq",
27
+ "@cf/tiiuae/falcon-7b-instruct",
28
+ "@cf/google/gemma-3-12b-it",
29
+ "@hf/google/gemma-7b-it",
30
+ "@hf/nousresearch/hermes-2-pro-mistral-7b",
31
+ "@hf/thebloke/llama-2-13b-chat-awq",
32
+ "@cf/meta/llama-2-7b-chat-fp16",
33
+ "@cf/meta/llama-2-7b-chat-int8",
34
+ "@cf/meta/llama-3-8b-instruct",
35
+ "@cf/meta/llama-3-8b-instruct-awq",
36
+ "@cf/meta/llama-3.1-8b-instruct-awq",
37
+ "@cf/meta/llama-3.1-8b-instruct-fp8",
38
+ "@cf/meta/llama-3.2-11b-vision-instruct",
39
+ "@cf/meta/llama-3.2-1b-instruct",
40
+ "@cf/meta/llama-3.2-3b-instruct",
41
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
42
+ "@cf/meta/llama-4-scout-17b-16e-instruct",
43
+ "@cf/meta/llama-guard-3-8b",
44
+ "@hf/thebloke/llamaguard-7b-awq",
45
+ "@hf/meta-llama/meta-llama-3-8b-instruct",
46
+ "@cf/mistral/mistral-7b-instruct-v0.1",
47
+ "@hf/thebloke/mistral-7b-instruct-v0.1-awq",
48
+ "@hf/mistral/mistral-7b-instruct-v0.2",
49
+ "@cf/mistralai/mistral-small-3.1-24b-instruct",
50
+ "@hf/thebloke/neural-chat-7b-v3-1-awq",
51
+ "@cf/openchat/openchat-3.5-0106",
52
+ "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
53
+ "@cf/microsoft/phi-2",
54
+ "@cf/qwen/qwen1.5-0.5b-chat",
55
+ "@cf/qwen/qwen1.5-1.8b-chat",
56
+ "@cf/qwen/qwen1.5-14b-chat-awq",
57
+ "@cf/qwen/qwen1.5-7b-chat-awq",
58
+ "@cf/qwen/qwen2.5-coder-32b-instruct",
59
+ "@cf/qwen/qwq-32b",
60
+ "@cf/defog/sqlcoder-7b-2",
61
+ "@hf/nexusflow/starling-lm-7b-beta",
62
+ "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
63
+ "@cf/fblgit/una-cybertron-7b-v2-bf16",
64
+ "@hf/thebloke/zephyr-7b-beta-awq"
65
+ ]
66
+
67
+ def __init__(
68
+ self,
69
+ is_conversation: bool = True,
70
+ max_tokens: int = 600,
71
+ timeout: int = 30,
72
+ intro: str = None,
73
+ filepath: str = None,
74
+ update_file: bool = True,
75
+ proxies: dict = {},
76
+ history_offset: int = 10250,
77
+ act: str = None,
78
+ model: str = "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
79
+ system_prompt: str = "You are a helpful assistant.",
80
+ ):
81
+ """Instantiates Cloudflare Provider
82
+
83
+ Args:
84
+ is_conversation (bool, optional): Flag for conversational mode. Defaults to True.
85
+ max_tokens (int, optional): Max tokens to generate. Defaults to 600.
86
+ timeout (int, optional): HTTP request timeout. Defaults to 30.
87
+ intro (str, optional): Introductory prompt. Defaults to None.
88
+ filepath (str, optional): File path for conversation history. Defaults to None.
89
+ update_file (bool, optional): Update history file flag. Defaults to True.
90
+ proxies (dict, optional): Request proxies. Defaults to {}.
91
+ history_offset (int, optional): Chat history limit. Defaults to 10250.
92
+ act (str, optional): Awesome prompt key/index. Defaults to None.
93
+ model (str, optional): Model to use. Defaults to "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b".
94
+ system_prompt (str, optional): System prompt for conversation. Defaults to "You are a helpful assistant.".
95
+ logging (bool, optional): Enable logging if True. Defaults to False.
96
+ """
97
+ if model not in self.AVAILABLE_MODELS:
98
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
99
+
100
+ self.session = Session() # Use curl_cffi Session
101
+ self.is_conversation = is_conversation
102
+ self.max_tokens_to_sample = max_tokens
103
+ self.chat_endpoint = "https://playground.ai.cloudflare.com/api/inference"
104
+ self.stream_chunk_size = 64
105
+ self.timeout = timeout
106
+ self.last_response = {}
107
+ self.model = model
108
+ self.system_prompt = system_prompt
109
+
110
+ self.headers = {
111
+ 'Accept': 'text/event-stream',
112
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
113
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
114
+ 'Content-Type': 'application/json',
115
+ 'DNT': '1',
116
+ 'Origin': 'https://playground.ai.cloudflare.com',
117
+ 'Referer': 'https://playground.ai.cloudflare.com/',
118
+ 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
119
+ 'Sec-CH-UA-Mobile': '?0',
120
+ 'Sec-CH-UA-Platform': '"Windows"',
121
+ 'Sec-Fetch-Dest': 'empty',
122
+ 'Sec-Fetch-Mode': 'cors',
123
+ 'Sec-Fetch-Site': 'same-origin',
124
+ 'User-Agent': LitAgent().random()
125
+ }
126
+
127
+ self.cookies = {
128
+ 'cfzs_amplitude': uuid4().hex,
129
+ 'cfz_amplitude': uuid4().hex,
130
+ '__cf_bm': uuid4().hex,
131
+ }
132
+
133
+ self.__available_optimizers = (
134
+ method
135
+ for method in dir(Optimizers)
136
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
137
+ )
138
+
139
+ # Initialize session and apply proxies
140
+ # self.session = cloudscraper.create_scraper() # Replaced above
141
+ self.session.headers.update(self.headers)
142
+ self.session.proxies = proxies
143
+
144
+ Conversation.intro = (
145
+ AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
146
+ if act else intro or Conversation.intro
147
+ )
148
+ self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
149
+ self.conversation.history_offset = history_offset
150
+
151
+ # Initialize logger if logging is enabled
152
+ # self.logger = Logger(
153
+ # name="Cloudflare",
154
+ # format=LogFormat.MODERN_EMOJI,
155
+ # ) if logging else None
156
+
157
+ # if self.logger:
158
+ # self.logger.info("Cloudflare initialized successfully")
159
+
160
+ @staticmethod
161
+ def _cloudflare_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
162
+ """Extracts content from Cloudflare stream JSON objects."""
163
+ # Updated for the 0:"..." format
164
+ if isinstance(chunk, str):
165
+ # Use re.search to find the pattern 0:"<content>"
166
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
167
+ if match:
168
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
169
+ content = match.group(1).encode().decode('unicode_escape')
170
+ return content.replace('\\\\', '\\').replace('\\"', '"')
171
+ return None
172
+
173
+ def ask(
174
+ self,
175
+ prompt: str,
176
+ stream: bool = False,
177
+ raw: bool = False,
178
+ optimizer: str = None,
179
+ conversationally: bool = False,
180
+ ) -> dict:
181
+ """Chat with AI
182
+
183
+ Args:
184
+ prompt (str): Prompt to be sent.
185
+ stream (bool, optional): Whether to stream the response. Defaults to False.
186
+ raw (bool, optional): Return raw response. Defaults to False.
187
+ optimizer (str, optional): Optimizer to use. Defaults to None.
188
+ conversationally (bool, optional): Conversational mode flag. Defaults to False.
189
+ Returns:
190
+ dict: Response from the API.
191
+ """
192
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
193
+ if optimizer:
194
+ if optimizer in self.__available_optimizers:
195
+ conversation_prompt = getattr(Optimizers, optimizer)(
196
+ conversation_prompt if conversationally else prompt
197
+ )
198
+ # if self.logger:
199
+ # self.logger.debug(f"Applied optimizer: {optimizer}")
200
+ else:
201
+ # if self.logger:
202
+ # self.logger.error(f"Invalid optimizer requested: {optimizer}")
203
+ raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
204
+
205
+ payload = {
206
+ "messages": [
207
+ {"role": "system", "content": self.system_prompt},
208
+ {"role": "user", "content": conversation_prompt}
209
+ ],
210
+ "lora": None,
211
+ "model": self.model,
212
+ "max_tokens": self.max_tokens_to_sample,
213
+ "stream": True
214
+ }
215
+
216
+ def for_stream():
217
+ # if self.logger:
218
+ # self.logger.debug("Sending streaming request to Cloudflare API...")
219
+ streaming_text = "" # Initialize outside try block
220
+ try:
221
+ response = self.session.post(
222
+ self.chat_endpoint,
223
+ headers=self.headers,
224
+ cookies=self.cookies,
225
+ data=json.dumps(payload),
226
+ stream=True,
227
+ timeout=self.timeout,
228
+ impersonate="chrome120" # Add impersonate
229
+ )
230
+ response.raise_for_status()
231
+
232
+ # Use sanitize_stream
233
+ processed_stream = sanitize_stream(
234
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
235
+ intro_value=None,
236
+ to_json=False,
237
+ skip_markers=None,
238
+ content_extractor=self._cloudflare_extractor, # Use the specific extractor
239
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
240
+ )
241
+
242
+ for content_chunk in processed_stream:
243
+ if content_chunk and isinstance(content_chunk, str):
244
+ streaming_text += content_chunk
245
+ yield content_chunk if raw else dict(text=content_chunk)
246
+
247
+ except CurlError as e:
248
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
249
+ except Exception as e:
250
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
251
+ finally:
252
+ # Update history after stream finishes or fails
253
+ self.last_response.update(dict(text=streaming_text))
254
+ self.conversation.update_chat_history(prompt, streaming_text)
255
+
256
+ def for_non_stream():
257
+ # Aggregate the stream using the updated for_stream logic
258
+ full_text = ""
259
+ last_response_dict = {}
260
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
261
+ # if self.logger:
262
+ # self.logger.info("Streaming response completed successfully")
263
+ try:
264
+ # Ensure raw=False so for_stream yields dicts
265
+ for chunk_data in for_stream():
266
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
267
+ full_text += chunk_data["text"]
268
+ last_response_dict = {"text": full_text} # Keep track of last dict structure
269
+ # Handle raw string case if raw=True was passed
270
+ elif raw and isinstance(chunk_data, str):
271
+ full_text += chunk_data
272
+ last_response_dict = {"text": full_text} # Update dict even for raw
273
+ except Exception as e:
274
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
275
+ if not full_text:
276
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
277
+
278
+ # last_response and history are updated within for_stream's finally block
279
+ # Return the final aggregated response dict or raw text
280
+ return full_text if raw else last_response_dict
281
+
282
+ return for_stream() if stream else for_non_stream()
283
+
284
+ def chat(
285
+ self,
286
+ prompt: str,
287
+ stream: bool = False,
288
+ optimizer: str = None,
289
+ conversationally: bool = False,
290
+ ) -> str:
291
+ """Generate response string from chat
292
+
293
+ Args:
294
+ prompt (str): Prompt to be sent.
295
+ stream (bool, optional): Stream response flag. Defaults to False.
296
+ optimizer (str, optional): Optimizer name. Defaults to None.
297
+ conversationally (bool, optional): Conversational mode flag. Defaults to False.
298
+ Returns:
299
+ str: Generated response.
300
+ """
301
+ def for_stream():
302
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
303
+ yield self.get_message(response)
304
+ def for_non_stream():
305
+ return self.get_message(self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally))
306
+ return for_stream() if stream else for_non_stream()
307
+
308
+ def get_message(self, response: dict) -> str:
309
+ """Extracts the message text from the response
310
+
311
+ Args:
312
+ response (dict): API response.
313
+ Returns:
314
+ str: Extracted text.
315
+ """
316
+ assert isinstance(response, dict), "Response should be of dict data-type only"
317
+ return response["text"]
318
+
319
+ if __name__ == '__main__':
320
+ from rich import print
321
+ ai = Cloudflare(timeout=5000)
322
+ response = ai.chat("write a poem about AI", stream=True)
323
+ for chunk in response:
324
+ print(chunk, end="", flush=True)