webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (323) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -249
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/__init__.py +10 -0
  5. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  7. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  8. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  9. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  10. webscout/Extra/YTToolkit/README.md +375 -0
  11. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  12. webscout/Extra/YTToolkit/__init__.py +3 -0
  13. webscout/Extra/YTToolkit/transcriber.py +476 -0
  14. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  15. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  16. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  17. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  18. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  19. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  20. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  21. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  22. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  23. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  24. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  25. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  26. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  27. webscout/Extra/__init__.py +7 -0
  28. webscout/Extra/autocoder/__init__.py +9 -0
  29. webscout/Extra/autocoder/autocoder.py +1105 -0
  30. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  31. webscout/Extra/gguf.md +430 -0
  32. webscout/Extra/gguf.py +684 -0
  33. webscout/Extra/tempmail/README.md +488 -0
  34. webscout/Extra/tempmail/__init__.py +28 -0
  35. webscout/Extra/tempmail/async_utils.py +141 -0
  36. webscout/Extra/tempmail/base.py +161 -0
  37. webscout/Extra/tempmail/cli.py +187 -0
  38. webscout/Extra/tempmail/emailnator.py +84 -0
  39. webscout/Extra/tempmail/mail_tm.py +361 -0
  40. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  41. webscout/Extra/weather.md +281 -0
  42. webscout/Extra/weather.py +194 -0
  43. webscout/Extra/weather_ascii.py +76 -0
  44. webscout/Litlogger/Readme.md +175 -0
  45. webscout/Litlogger/__init__.py +67 -0
  46. webscout/Litlogger/core/__init__.py +6 -0
  47. webscout/Litlogger/core/level.py +23 -0
  48. webscout/Litlogger/core/logger.py +165 -0
  49. webscout/Litlogger/handlers/__init__.py +12 -0
  50. webscout/Litlogger/handlers/console.py +33 -0
  51. webscout/Litlogger/handlers/file.py +143 -0
  52. webscout/Litlogger/handlers/network.py +173 -0
  53. webscout/Litlogger/styles/__init__.py +7 -0
  54. webscout/Litlogger/styles/colors.py +249 -0
  55. webscout/Litlogger/styles/formats.py +458 -0
  56. webscout/Litlogger/styles/text.py +87 -0
  57. webscout/Litlogger/utils/__init__.py +6 -0
  58. webscout/Litlogger/utils/detectors.py +153 -0
  59. webscout/Litlogger/utils/formatters.py +200 -0
  60. webscout/Provider/AI21.py +177 -0
  61. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  62. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  63. webscout/Provider/AISEARCH/README.md +279 -0
  64. webscout/Provider/AISEARCH/__init__.py +9 -0
  65. webscout/Provider/AISEARCH/felo_search.py +228 -0
  66. webscout/Provider/AISEARCH/genspark_search.py +350 -0
  67. webscout/Provider/AISEARCH/hika_search.py +198 -0
  68. webscout/Provider/AISEARCH/iask_search.py +436 -0
  69. webscout/Provider/AISEARCH/monica_search.py +246 -0
  70. webscout/Provider/AISEARCH/scira_search.py +324 -0
  71. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  72. webscout/Provider/Aitopia.py +316 -0
  73. webscout/Provider/AllenAI.py +440 -0
  74. webscout/Provider/Andi.py +228 -0
  75. webscout/Provider/Blackboxai.py +673 -0
  76. webscout/Provider/ChatGPTClone.py +237 -0
  77. webscout/Provider/ChatGPTGratis.py +194 -0
  78. webscout/Provider/ChatSandbox.py +342 -0
  79. webscout/Provider/Cloudflare.py +324 -0
  80. webscout/Provider/Cohere.py +208 -0
  81. webscout/Provider/Deepinfra.py +340 -0
  82. webscout/Provider/ExaAI.py +261 -0
  83. webscout/Provider/ExaChat.py +358 -0
  84. webscout/Provider/Flowith.py +217 -0
  85. webscout/Provider/FreeGemini.py +250 -0
  86. webscout/Provider/Gemini.py +169 -0
  87. webscout/Provider/GithubChat.py +370 -0
  88. webscout/Provider/GizAI.py +295 -0
  89. webscout/Provider/Glider.py +225 -0
  90. webscout/Provider/Groq.py +801 -0
  91. webscout/Provider/HF_space/__init__.py +0 -0
  92. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  93. webscout/Provider/HeckAI.py +285 -0
  94. webscout/Provider/HuggingFaceChat.py +469 -0
  95. webscout/Provider/Hunyuan.py +283 -0
  96. webscout/Provider/Jadve.py +291 -0
  97. webscout/Provider/Koboldai.py +384 -0
  98. webscout/Provider/LambdaChat.py +411 -0
  99. webscout/Provider/Llama3.py +259 -0
  100. webscout/Provider/MCPCore.py +315 -0
  101. webscout/Provider/Marcus.py +198 -0
  102. webscout/Provider/Nemotron.py +218 -0
  103. webscout/Provider/Netwrck.py +270 -0
  104. webscout/Provider/OLLAMA.py +396 -0
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  106. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  107. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  108. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  109. webscout/Provider/OPENAI/README.md +1253 -0
  110. webscout/Provider/OPENAI/__init__.py +36 -0
  111. webscout/Provider/OPENAI/ai4chat.py +293 -0
  112. webscout/Provider/OPENAI/api.py +810 -0
  113. webscout/Provider/OPENAI/base.py +249 -0
  114. webscout/Provider/OPENAI/c4ai.py +373 -0
  115. webscout/Provider/OPENAI/chatgpt.py +556 -0
  116. webscout/Provider/OPENAI/chatgptclone.py +488 -0
  117. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  118. webscout/Provider/OPENAI/deepinfra.py +319 -0
  119. webscout/Provider/OPENAI/e2b.py +1356 -0
  120. webscout/Provider/OPENAI/exaai.py +411 -0
  121. webscout/Provider/OPENAI/exachat.py +443 -0
  122. webscout/Provider/OPENAI/flowith.py +162 -0
  123. webscout/Provider/OPENAI/freeaichat.py +359 -0
  124. webscout/Provider/OPENAI/glider.py +323 -0
  125. webscout/Provider/OPENAI/groq.py +361 -0
  126. webscout/Provider/OPENAI/heckai.py +307 -0
  127. webscout/Provider/OPENAI/llmchatco.py +335 -0
  128. webscout/Provider/OPENAI/mcpcore.py +383 -0
  129. webscout/Provider/OPENAI/multichat.py +376 -0
  130. webscout/Provider/OPENAI/netwrck.py +356 -0
  131. webscout/Provider/OPENAI/opkfc.py +496 -0
  132. webscout/Provider/OPENAI/scirachat.py +471 -0
  133. webscout/Provider/OPENAI/sonus.py +303 -0
  134. webscout/Provider/OPENAI/standardinput.py +433 -0
  135. webscout/Provider/OPENAI/textpollinations.py +339 -0
  136. webscout/Provider/OPENAI/toolbaz.py +413 -0
  137. webscout/Provider/OPENAI/typefully.py +355 -0
  138. webscout/Provider/OPENAI/typegpt.py +358 -0
  139. webscout/Provider/OPENAI/uncovrAI.py +462 -0
  140. webscout/Provider/OPENAI/utils.py +307 -0
  141. webscout/Provider/OPENAI/venice.py +425 -0
  142. webscout/Provider/OPENAI/wisecat.py +381 -0
  143. webscout/Provider/OPENAI/writecream.py +163 -0
  144. webscout/Provider/OPENAI/x0gpt.py +378 -0
  145. webscout/Provider/OPENAI/yep.py +356 -0
  146. webscout/Provider/OpenGPT.py +209 -0
  147. webscout/Provider/Openai.py +496 -0
  148. webscout/Provider/PI.py +429 -0
  149. webscout/Provider/Perplexitylabs.py +415 -0
  150. webscout/Provider/QwenLM.py +254 -0
  151. webscout/Provider/Reka.py +214 -0
  152. webscout/Provider/StandardInput.py +290 -0
  153. webscout/Provider/TTI/AiForce/README.md +159 -0
  154. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  155. webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
  156. webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
  157. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  158. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  159. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
  160. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
  161. webscout/Provider/TTI/ImgSys/README.md +174 -0
  162. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  163. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  164. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  165. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  166. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  167. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  168. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  169. webscout/Provider/TTI/Nexra/README.md +155 -0
  170. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  171. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  172. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  173. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  174. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  175. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
  176. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
  177. webscout/Provider/TTI/README.md +128 -0
  178. webscout/Provider/TTI/__init__.py +12 -0
  179. webscout/Provider/TTI/aiarta/README.md +134 -0
  180. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  181. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  182. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  183. webscout/Provider/TTI/artbit/README.md +100 -0
  184. webscout/Provider/TTI/artbit/__init__.py +22 -0
  185. webscout/Provider/TTI/artbit/async_artbit.py +155 -0
  186. webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
  187. webscout/Provider/TTI/fastflux/README.md +129 -0
  188. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  189. webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
  190. webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
  191. webscout/Provider/TTI/huggingface/README.md +114 -0
  192. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  193. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  194. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  195. webscout/Provider/TTI/piclumen/README.md +161 -0
  196. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  197. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  198. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  199. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  200. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  201. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  202. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  203. webscout/Provider/TTI/talkai/README.md +139 -0
  204. webscout/Provider/TTI/talkai/__init__.py +4 -0
  205. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  206. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  207. webscout/Provider/TTS/README.md +192 -0
  208. webscout/Provider/TTS/__init__.py +9 -0
  209. webscout/Provider/TTS/base.py +159 -0
  210. webscout/Provider/TTS/deepgram.py +156 -0
  211. webscout/Provider/TTS/elevenlabs.py +111 -0
  212. webscout/Provider/TTS/gesserit.py +128 -0
  213. webscout/Provider/TTS/murfai.py +113 -0
  214. webscout/Provider/TTS/parler.py +111 -0
  215. webscout/Provider/TTS/speechma.py +580 -0
  216. webscout/Provider/TTS/sthir.py +94 -0
  217. webscout/Provider/TTS/streamElements.py +333 -0
  218. webscout/Provider/TTS/utils.py +280 -0
  219. webscout/Provider/TeachAnything.py +229 -0
  220. webscout/Provider/TextPollinationsAI.py +308 -0
  221. webscout/Provider/TwoAI.py +280 -0
  222. webscout/Provider/TypliAI.py +305 -0
  223. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  224. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  225. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  226. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  227. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  228. webscout/Provider/Venice.py +258 -0
  229. webscout/Provider/VercelAI.py +253 -0
  230. webscout/Provider/WiseCat.py +233 -0
  231. webscout/Provider/WrDoChat.py +370 -0
  232. webscout/Provider/Writecream.py +246 -0
  233. webscout/Provider/WritingMate.py +269 -0
  234. webscout/Provider/__init__.py +172 -0
  235. webscout/Provider/ai4chat.py +149 -0
  236. webscout/Provider/akashgpt.py +335 -0
  237. webscout/Provider/asksteve.py +220 -0
  238. webscout/Provider/cerebras.py +290 -0
  239. webscout/Provider/chatglm.py +215 -0
  240. webscout/Provider/cleeai.py +213 -0
  241. webscout/Provider/copilot.py +425 -0
  242. webscout/Provider/elmo.py +283 -0
  243. webscout/Provider/freeaichat.py +285 -0
  244. webscout/Provider/geminiapi.py +208 -0
  245. webscout/Provider/granite.py +235 -0
  246. webscout/Provider/hermes.py +266 -0
  247. webscout/Provider/julius.py +223 -0
  248. webscout/Provider/koala.py +170 -0
  249. webscout/Provider/learnfastai.py +325 -0
  250. webscout/Provider/llama3mitril.py +215 -0
  251. webscout/Provider/llmchat.py +258 -0
  252. webscout/Provider/llmchatco.py +306 -0
  253. webscout/Provider/lmarena.py +198 -0
  254. webscout/Provider/meta.py +801 -0
  255. webscout/Provider/multichat.py +364 -0
  256. webscout/Provider/samurai.py +223 -0
  257. webscout/Provider/scira_chat.py +299 -0
  258. webscout/Provider/scnet.py +243 -0
  259. webscout/Provider/searchchat.py +292 -0
  260. webscout/Provider/sonus.py +258 -0
  261. webscout/Provider/talkai.py +194 -0
  262. webscout/Provider/toolbaz.py +353 -0
  263. webscout/Provider/turboseek.py +266 -0
  264. webscout/Provider/typefully.py +202 -0
  265. webscout/Provider/typegpt.py +289 -0
  266. webscout/Provider/uncovr.py +368 -0
  267. webscout/Provider/x0gpt.py +299 -0
  268. webscout/Provider/yep.py +389 -0
  269. webscout/__init__.py +4 -2
  270. webscout/cli.py +3 -28
  271. webscout/conversation.py +35 -35
  272. webscout/litagent/Readme.md +276 -0
  273. webscout/litagent/__init__.py +29 -0
  274. webscout/litagent/agent.py +455 -0
  275. webscout/litagent/constants.py +60 -0
  276. webscout/litprinter/__init__.py +59 -0
  277. webscout/scout/README.md +402 -0
  278. webscout/scout/__init__.py +8 -0
  279. webscout/scout/core/__init__.py +7 -0
  280. webscout/scout/core/crawler.py +140 -0
  281. webscout/scout/core/scout.py +568 -0
  282. webscout/scout/core/search_result.py +96 -0
  283. webscout/scout/core/text_analyzer.py +63 -0
  284. webscout/scout/core/text_utils.py +277 -0
  285. webscout/scout/core/web_analyzer.py +52 -0
  286. webscout/scout/element.py +460 -0
  287. webscout/scout/parsers/__init__.py +69 -0
  288. webscout/scout/parsers/html5lib_parser.py +172 -0
  289. webscout/scout/parsers/html_parser.py +236 -0
  290. webscout/scout/parsers/lxml_parser.py +178 -0
  291. webscout/scout/utils.py +37 -0
  292. webscout/swiftcli/Readme.md +323 -0
  293. webscout/swiftcli/__init__.py +95 -0
  294. webscout/swiftcli/core/__init__.py +7 -0
  295. webscout/swiftcli/core/cli.py +297 -0
  296. webscout/swiftcli/core/context.py +104 -0
  297. webscout/swiftcli/core/group.py +241 -0
  298. webscout/swiftcli/decorators/__init__.py +28 -0
  299. webscout/swiftcli/decorators/command.py +221 -0
  300. webscout/swiftcli/decorators/options.py +220 -0
  301. webscout/swiftcli/decorators/output.py +252 -0
  302. webscout/swiftcli/exceptions.py +21 -0
  303. webscout/swiftcli/plugins/__init__.py +9 -0
  304. webscout/swiftcli/plugins/base.py +135 -0
  305. webscout/swiftcli/plugins/manager.py +262 -0
  306. webscout/swiftcli/utils/__init__.py +59 -0
  307. webscout/swiftcli/utils/formatting.py +252 -0
  308. webscout/swiftcli/utils/parsing.py +267 -0
  309. webscout/version.py +1 -1
  310. webscout/webscout_search.py +2 -182
  311. webscout/webscout_search_async.py +1 -179
  312. webscout/zeroart/README.md +89 -0
  313. webscout/zeroart/__init__.py +135 -0
  314. webscout/zeroart/base.py +66 -0
  315. webscout/zeroart/effects.py +101 -0
  316. webscout/zeroart/fonts.py +1239 -0
  317. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
  318. webscout-8.2.8.dist-info/RECORD +334 -0
  319. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  320. webscout-8.2.7.dist-info/RECORD +0 -26
  321. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
  322. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
  323. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,673 @@
1
+ import requests
2
+ import random
3
+ import string
4
+ import base64
5
+ from datetime import datetime, timedelta
6
+ from typing import Any, Dict, Union, Generator, List
7
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.litagent import LitAgent
11
+ def to_data_uri(image_data):
12
+ """Convert image data to a data URI format"""
13
+ if isinstance(image_data, str):
14
+ # Assume it's already a data URI
15
+ return image_data
16
+
17
+ # Encode binary data to base64
18
+ encoded = base64.b64encode(image_data).decode('utf-8')
19
+
20
+ # Determine MIME type (simplified)
21
+ mime_type = "image/jpeg" # Default
22
+ if image_data.startswith(b'\x89PNG'):
23
+ mime_type = "image/png"
24
+ elif image_data.startswith(b'\xff\xd8'):
25
+ mime_type = "image/jpeg"
26
+ elif image_data.startswith(b'GIF'):
27
+ mime_type = "image/gif"
28
+
29
+ return f"data:{mime_type};base64,{encoded}"
30
+
31
+
32
+ class BLACKBOXAI(Provider):
33
+ """
34
+ BlackboxAI provider for interacting with the Blackbox API.
35
+ Supports synchronous operations with multiple models.
36
+ """
37
+ url = "https://www.blackbox.ai"
38
+ api_endpoint = "https://www.blackbox.ai/api/chat"
39
+
40
+
41
+ # Default model (remains the same as per original class)
42
+ default_model = "GPT-4.1"
43
+ default_vision_model = default_model
44
+
45
+ # New OpenRouter models list
46
+ openrouter_models = [
47
+ "Deepcoder 14B Preview",
48
+ "DeepHermes 3 Llama 3 8B Preview",
49
+ "DeepSeek R1 Zero",
50
+ "Dolphin3.0 Mistral 24B",
51
+ "Dolphin3.0 R1 Mistral 24B",
52
+ "Flash 3",
53
+ "Gemini 2.0 Flash Experimental",
54
+ "Gemma 2 9B",
55
+ "Gemma 3 12B",
56
+ "Gemma 3 1B",
57
+ "Gemma 3 27B",
58
+ "Gemma 3 4B",
59
+ "Kimi VL A3B Thinking",
60
+ "Llama 3.1 8B Instruct",
61
+ "Llama 3.1 Nemotron Ultra 253B v1",
62
+ "Llama 3.2 11B Vision Instruct",
63
+ "Llama 3.2 1B Instruct",
64
+ "Llama 3.2 3B Instruct",
65
+ "Llama 3.3 70B Instruct",
66
+ "Llama 3.3 Nemotron Super 49B v1",
67
+ "Llama 4 Maverick",
68
+ "Llama 4 Scout",
69
+ "Mistral 7B Instruct",
70
+ "Mistral Nemo",
71
+ "Mistral Small 3",
72
+ "Mistral Small 3.1 24B",
73
+ "Molmo 7B D",
74
+ "Moonlight 16B A3B Instruct",
75
+ "Qwen2.5 72B Instruct",
76
+ "Qwen2.5 7B Instruct",
77
+ "Qwen2.5 Coder 32B Instruct",
78
+ "Qwen2.5 VL 32B Instruct",
79
+ "Qwen2.5 VL 3B Instruct",
80
+ "Qwen2.5 VL 72B Instruct",
81
+ "Qwen2.5-VL 7B Instruct",
82
+ "Qwerky 72B",
83
+ "QwQ 32B",
84
+ "QwQ 32B Preview",
85
+ "QwQ 32B RpR v1",
86
+ "R1",
87
+ "R1 Distill Llama 70B",
88
+ "R1 Distill Qwen 14B",
89
+ "R1 Distill Qwen 32B",
90
+ ]
91
+
92
+ # New base models list
93
+ models = [
94
+ default_model,
95
+ "o3-mini",
96
+ "gpt-4.1-nano",
97
+ "Claude-sonnet-3.7",
98
+ "Claude-sonnet-3.5",
99
+ "DeepSeek-R1",
100
+ "Mistral-Small-24B-Instruct-2501",
101
+ *openrouter_models,
102
+ # Trending agent modes (names)
103
+ 'Python Agent', 'HTML Agent', 'Builder Agent', 'Java Agent', 'JavaScript Agent',
104
+ 'React Agent', 'Android Agent', 'Flutter Agent', 'Next.js Agent', 'AngularJS Agent',
105
+ 'Swift Agent', 'MongoDB Agent', 'PyTorch Agent', 'Xcode Agent', 'Azure Agent',
106
+ 'Bitbucket Agent', 'DigitalOcean Agent', 'Docker Agent', 'Electron Agent',
107
+ 'Erlang Agent', 'FastAPI Agent', 'Firebase Agent', 'Flask Agent', 'Git Agent',
108
+ 'Gitlab Agent', 'Go Agent', 'Godot Agent', 'Google Cloud Agent', 'Heroku Agent'
109
+ ]
110
+
111
+ # Models that support vision capabilities
112
+ vision_models = [default_vision_model, 'o3-mini', "Llama 3.2 11B Vision Instruct"] # Added Llama vision
113
+
114
+ # Models that can be directly selected by users
115
+ userSelectedModel = ['o3-mini','Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
116
+
117
+ # Agent mode configurations
118
+ agentMode = {
119
+ # OpenRouter Free
120
+ 'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
121
+ 'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
122
+ 'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
123
+ 'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
124
+ 'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
125
+ 'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
126
+ 'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
127
+ 'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
128
+ 'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
129
+ 'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
130
+ 'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
131
+ 'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
132
+ 'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
133
+ 'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
134
+ 'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
135
+ 'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
136
+ 'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
137
+ 'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
138
+ 'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
139
+ 'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
140
+ 'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
141
+ 'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
142
+ 'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
143
+ 'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
144
+ 'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"}, # Matches Mistral-Small-24B-Instruct-2501
145
+ 'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
146
+ 'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
147
+ 'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
148
+ 'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
149
+ 'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
150
+ 'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
151
+ 'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
152
+ 'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
153
+ 'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
154
+ 'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
155
+ 'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
156
+ 'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
157
+ 'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
158
+ 'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
159
+ 'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"}, # Matches DeepSeek-R1
160
+ 'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
161
+ 'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
162
+ 'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
163
+ # Default models from the new list
164
+ 'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
165
+ 'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
166
+ 'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"}, # This is 'R1' in openrouter, but 'DeepSeek-R1' in base models
167
+ 'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
168
+ # Add default_model if it's not covered and has an agent mode
169
+ default_model: {'mode': True, 'id': "openai/gpt-4.1", 'name': default_model}, # Assuming GPT-4.1 is agent-compatible
170
+ 'o3-mini': {'mode': True, 'id': "o3-mini", 'name': "o3-mini"}, # Assuming o3-mini is agent-compatible
171
+ 'gpt-4.1-nano': {'mode': True, 'id': "gpt-4.1-nano", 'name': "gpt-4.1-nano"}, # Assuming gpt-4.1-nano is agent-compatible
172
+ }
173
+
174
+ # Trending agent modes
175
+ trendingAgentMode = {
176
+ 'Python Agent': {'mode': True, 'id': "python"},
177
+ 'HTML Agent': {'mode': True, 'id': "html"},
178
+ 'Builder Agent': {'mode': True, 'id': "builder"},
179
+ 'Java Agent': {'mode': True, 'id': "java"},
180
+ 'JavaScript Agent': {'mode': True, 'id': "javascript"},
181
+ 'React Agent': {'mode': True, 'id': "react"},
182
+ 'Android Agent': {'mode': True, 'id': "android"},
183
+ 'Flutter Agent': {'mode': True, 'id': "flutter"},
184
+ 'Next.js Agent': {'mode': True, 'id': "next.js"},
185
+ 'AngularJS Agent': {'mode': True, 'id': "angularjs"},
186
+ 'Swift Agent': {'mode': True, 'id': "swift"},
187
+ 'MongoDB Agent': {'mode': True, 'id': "mongodb"},
188
+ 'PyTorch Agent': {'mode': True, 'id': "pytorch"},
189
+ 'Xcode Agent': {'mode': True, 'id': "xcode"},
190
+ 'Azure Agent': {'mode': True, 'id': "azure"},
191
+ 'Bitbucket Agent': {'mode': True, 'id': "bitbucket"},
192
+ 'DigitalOcean Agent': {'mode': True, 'id': "digitalocean"},
193
+ 'Docker Agent': {'mode': True, 'id': "docker"},
194
+ 'Electron Agent': {'mode': True, 'id': "electron"},
195
+ 'Erlang Agent': {'mode': True, 'id': "erlang"},
196
+ 'FastAPI Agent': {'mode': True, 'id': "fastapi"},
197
+ 'Firebase Agent': {'mode': True, 'id': "firebase"},
198
+ 'Flask Agent': {'mode': True, 'id': "flask"},
199
+ 'Git Agent': {'mode': True, 'id': "git"},
200
+ 'Gitlab Agent': {'mode': True, 'id': "gitlab"},
201
+ 'Go Agent': {'mode': True, 'id': "go"},
202
+ 'Godot Agent': {'mode': True, 'id': "godot"},
203
+ 'Google Cloud Agent': {'mode': True, 'id': "googlecloud"},
204
+ 'Heroku Agent': {'mode': True, 'id': "heroku"},
205
+ }
206
+
207
+ # Complete list of all models (for authorized users) - used for AVAILABLE_MODELS
208
+ _all_models = list(dict.fromkeys([
209
+ *models, # Includes default_model, o3-mini, etc., and openrouter_models and agent names
210
+ *list(agentMode.keys()), # Ensure all agentMode keys are included
211
+ *list(trendingAgentMode.keys()) # Ensure all trendingAgentMode keys are included
212
+ ]))
213
+
214
+ AVAILABLE_MODELS = {name: name for name in _all_models}
215
+ # Update AVAILABLE_MODELS to use names from agentMode if available
216
+ for model_name_key in agentMode:
217
+ if model_name_key in AVAILABLE_MODELS: # Check if the key from agentMode is in _all_models
218
+ AVAILABLE_MODELS[model_name_key] = agentMode[model_name_key].get('name', model_name_key)
219
+
220
+
221
+ # Model aliases for easier reference
222
+ model_aliases = {
223
+ "gpt-4": default_model, # default_model is "GPT-4.1"
224
+ "gpt-4.1": default_model,
225
+ "gpt-4o": default_model, # Defaulting to GPT-4.1 as per previous logic if specific GPT-4o handling isn't defined elsewhere
226
+ "gpt-4o-mini": default_model, # Defaulting
227
+ "claude-3.7-sonnet": "Claude-sonnet-3.7",
228
+ "claude-3.5-sonnet": "Claude-sonnet-3.5",
229
+ # "deepseek-r1": "DeepSeek-R1", # This is in base models, maps to R1 or DeepSeek R1 Zero in agentMode
230
+ #
231
+ "deepcoder-14b": "Deepcoder 14B Preview",
232
+ "deephermes-3-8b": "DeepHermes 3 Llama 3 8B Preview",
233
+ "deepseek-r1-zero": "DeepSeek R1 Zero",
234
+ "deepseek-r1": "R1", # Alias for R1 (which is deepseek/deepseek-r1:free)
235
+ "dolphin-3.0-24b": "Dolphin3.0 Mistral 24B",
236
+ "dolphin-3.0-r1-24b": "Dolphin3.0 R1 Mistral 24B",
237
+ "reka-flash": "Flash 3",
238
+ "gemini-2.0-flash": "Gemini 2.0 Flash Experimental",
239
+ "gemma-2-9b": "Gemma 2 9B",
240
+ "gemma-3-12b": "Gemma 3 12B",
241
+ "gemma-3-1b": "Gemma 3 1B",
242
+ "gemma-3-27b": "Gemma 3 27B",
243
+ "gemma-3-4b": "Gemma 3 4B",
244
+ "kimi-vl-a3b-thinking": "Kimi VL A3B Thinking",
245
+ "llama-3.1-8b": "Llama 3.1 8B Instruct",
246
+ "nemotron-253b": "Llama 3.1 Nemotron Ultra 253B v1",
247
+ "llama-3.2-11b": "Llama 3.2 11B Vision Instruct",
248
+ "llama-3.2-1b": "Llama 3.2 1B Instruct",
249
+ "llama-3.2-3b": "Llama 3.2 3B Instruct",
250
+ "llama-3.3-70b": "Llama 3.3 70B Instruct",
251
+ "nemotron-49b": "Llama 3.3 Nemotron Super 49B v1",
252
+ "llama-4-maverick": "Llama 4 Maverick",
253
+ "llama-4-scout": "Llama 4 Scout",
254
+ "mistral-7b": "Mistral 7B Instruct",
255
+ "mistral-nemo": "Mistral Nemo",
256
+ "mistral-small-24b": "Mistral Small 3", # Alias for "Mistral Small 3"
257
+ "mistral-small-24b-instruct-2501": "Mistral-Small-24B-Instruct-2501", # Specific name
258
+ "mistral-small-3.1-24b": "Mistral Small 3.1 24B",
259
+ "molmo-7b": "Molmo 7B D",
260
+ "moonlight-16b": "Moonlight 16B A3B Instruct",
261
+ "qwen-2.5-72b": "Qwen2.5 72B Instruct",
262
+ "qwen-2.5-7b": "Qwen2.5 7B Instruct",
263
+ "qwen-2.5-coder-32b": "Qwen2.5 Coder 32B Instruct",
264
+ "qwen-2.5-vl-32b": "Qwen2.5 VL 32B Instruct",
265
+ "qwen-2.5-vl-3b": "Qwen2.5 VL 3B Instruct",
266
+ "qwen-2.5-vl-72b": "Qwen2.5 VL 72B Instruct",
267
+ "qwen-2.5-vl-7b": "Qwen2.5-VL 7B Instruct",
268
+ "qwerky-72b": "Qwerky 72B",
269
+ "qwq-32b": "QwQ 32B",
270
+ "qwq-32b-preview": "QwQ 32B Preview",
271
+ "qwq-32b-arliai": "QwQ 32B RpR v1",
272
+ "deepseek-r1-distill-llama-70b": "R1 Distill Llama 70B",
273
+ "deepseek-r1-distill-qwen-14b": "R1 Distill Qwen 14B",
274
+ "deepseek-r1-distill-qwen-32b": "R1 Distill Qwen 32B",
275
+ }
276
+
277
+ def __init__(
278
+ self,
279
+ is_conversation: bool = True,
280
+ max_tokens: int = 8000,
281
+ timeout: int = 30,
282
+ intro: str = None,
283
+ filepath: str = None,
284
+ update_file: bool = True,
285
+ proxies: dict = {},
286
+ history_offset: int = 10250,
287
+ act: str = None,
288
+ model: str = "gpt-4.1",
289
+ system_message: str = "You are a helpful AI assistant."
290
+ ):
291
+ """Initialize BlackboxAI with enhanced configuration options."""
292
+ self.session = requests.Session()
293
+ self.max_tokens_to_sample = max_tokens
294
+ self.is_conversation = is_conversation
295
+ self.timeout = timeout
296
+ self.last_response = {}
297
+ self.model = self.get_model(model)
298
+ self.system_message = system_message
299
+
300
+ self.headers = {
301
+ "Content-Type": "application/json",
302
+ "Accept": "*/*",
303
+ }
304
+ self.cookies = {
305
+ 'cfzs_amplitude': self.generate_id(32),
306
+ 'cfz_amplitude': self.generate_id(32),
307
+ '__cf_bm': self.generate_id(32),
308
+ }
309
+
310
+ self.__available_optimizers = (
311
+ method for method in dir(Optimizers)
312
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
313
+ )
314
+
315
+ Conversation.intro = (
316
+ AwesomePrompts().get_act(
317
+ act, raise_not_found=True, default=None, case_insensitive=True
318
+ )
319
+ if act
320
+ else intro or Conversation.intro
321
+ )
322
+
323
+ self.conversation = Conversation(
324
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
325
+ )
326
+ self.conversation.history_offset = history_offset
327
+ self.session.proxies = proxies
328
+
329
+ @classmethod
330
+ def get_model(cls, model: str) -> str:
331
+ """Resolve model name from alias"""
332
+ # Convert to lowercase for case-insensitive matching
333
+ model_lower = model.lower()
334
+
335
+ # Check aliases (case-insensitive)
336
+ for alias, target in cls.model_aliases.items():
337
+ if model_lower == alias.lower():
338
+ model = target
339
+ break
340
+
341
+ # Check available models (case-insensitive)
342
+ for available_model, target in cls.AVAILABLE_MODELS.items():
343
+ if model_lower == available_model.lower() or model == target:
344
+ return target
345
+
346
+ # If we get here, the model wasn't found
347
+ raise ValueError(f"Unknown model: {model}. Available models: {', '.join(cls.AVAILABLE_MODELS)}")
348
+
349
+ @classmethod
350
+ def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 30) -> dict:
351
+ """
352
+ Generate a dynamic session with proper ID and expiry format using a specific email.
353
+
354
+ Args:
355
+ email: The email to use for this session
356
+ id_length: Length of the numeric ID (default: 21)
357
+ days_ahead: Number of days ahead for expiry (default: 30)
358
+
359
+ Returns:
360
+ dict: A session dictionary with user information and expiry
361
+ """
362
+ # Generate a random name
363
+ first_names = ["Alex", "Jordan", "Taylor", "Morgan", "Casey", "Riley", "Avery", "Quinn", "Skyler", "Dakota"]
364
+ last_names = ["Smith", "Johnson", "Williams", "Brown", "Jones", "Miller", "Davis", "Garcia", "Rodriguez", "Wilson"]
365
+ name = f"{random.choice(first_names)} {random.choice(last_names)}"
366
+
367
+ # Generate numeric ID - using Google-like ID format
368
+ numeric_id = ''.join(random.choice('0123456789') for _ in range(id_length))
369
+
370
+ # Generate future expiry date
371
+ future_date = datetime.now() + timedelta(days=days_ahead)
372
+ expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
373
+
374
+ # Generate random image ID for the new URL format
375
+ chars = string.ascii_letters + string.digits + "-"
376
+ random_img_id = ''.join(random.choice(chars) for _ in range(48))
377
+ image_url = f"https://lh3.googleusercontent.com/a/ACg8oc{random_img_id}=s96-c"
378
+
379
+ return {
380
+ "user": {
381
+ "name": name,
382
+ "email": email,
383
+ "image": image_url,
384
+ "id": numeric_id
385
+ },
386
+ "expires": expiry,
387
+ "isNewUser": False
388
+ }
389
+
390
+ @classmethod
391
+ def generate_id(cls, length: int = 7) -> str:
392
+ """Generate a random ID of specified length"""
393
+ chars = string.ascii_letters + string.digits
394
+ return ''.join(random.choice(chars) for _ in range(length))
395
+
396
+ def _make_request(
397
+ self,
398
+ messages: List[Dict[str, str]],
399
+ stream: bool = False,
400
+ temperature: float = None,
401
+ top_p: float = None,
402
+ max_tokens: int = None,
403
+ media: List = None
404
+ ) -> Generator[str, None, None]:
405
+ """Make synchronous request to BlackboxAI API."""
406
+ # Generate a chat ID for this conversation
407
+ chat_id = self.generate_id()
408
+
409
+ # Format messages for the API
410
+ current_messages = []
411
+ for i, msg in enumerate(messages):
412
+ msg_id = chat_id if i == 0 and msg["role"] == "user" else self.generate_id()
413
+ current_msg = {
414
+ "id": msg_id,
415
+ "content": msg["content"],
416
+ "role": msg["role"]
417
+ }
418
+ current_messages.append(current_msg)
419
+
420
+ # Add image data if provided
421
+ if media:
422
+ current_messages[-1]['data'] = {
423
+ "imagesData": [
424
+ {
425
+ "filePath": f"/{image_name}",
426
+ "contents": to_data_uri(image)
427
+ } for image, image_name in media
428
+ ],
429
+ "fileText": "",
430
+ "title": ""
431
+ }
432
+
433
+ # Generate a random email for the session
434
+ chars = string.ascii_lowercase + string.digits
435
+ random_team = ''.join(random.choice(chars) for _ in range(8))
436
+ request_email = f"{random_team}@blackbox.ai"
437
+
438
+ # Generate a session with the email
439
+ session_data = self.generate_session(request_email)
440
+
441
+ # Prepare the request data based on the working example
442
+ data = {
443
+ "messages": current_messages,
444
+ "agentMode": self.agentMode.get(self.model, {}) if self.model in self.agentMode else {},
445
+ "id": chat_id,
446
+ "previewToken": None,
447
+ "userId": None,
448
+ "codeModelMode": True,
449
+ "trendingAgentMode": {},
450
+ "isMicMode": False,
451
+ "userSystemPrompt": self.system_message,
452
+ "maxTokens": max_tokens or self.max_tokens_to_sample,
453
+ "playgroundTopP": top_p,
454
+ "playgroundTemperature": temperature,
455
+ "isChromeExt": False,
456
+ "githubToken": "",
457
+ "clickedAnswer2": False,
458
+ "clickedAnswer3": False,
459
+ "clickedForceWebSearch": False,
460
+ "visitFromDelta": False,
461
+ "isMemoryEnabled": False,
462
+ "mobileClient": False,
463
+ "userSelectedModel": self.model if self.model in self.userSelectedModel else None,
464
+ "validated": "00f37b34-a166-4efb-bce5-1312d87f2f94", # Using a fixed validated value from the example
465
+ "imageGenerationMode": False,
466
+ "webSearchModePrompt": False,
467
+ "deepSearchMode": False,
468
+ "designerMode": False,
469
+ "domains": None,
470
+ "vscodeClient": False,
471
+ "codeInterpreterMode": False,
472
+ "customProfile": {
473
+ "name": "",
474
+ "occupation": "",
475
+ "traits": [],
476
+ "additionalInfo": "",
477
+ "enableNewChats": False
478
+ },
479
+ "webSearchModeOption": {
480
+ "autoMode": True,
481
+ "webMode": False,
482
+ "offlineMode": False
483
+ },
484
+ "session": session_data,
485
+ "isPremium": True,
486
+ "subscriptionCache": {
487
+ "status": "PREMIUM",
488
+ "customerId": "cus_" + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(14)),
489
+ "expiryTimestamp": int((datetime.now() + timedelta(days=30)).timestamp()),
490
+ "lastChecked": int(datetime.now().timestamp() * 1000),
491
+ "isTrialSubscription": True
492
+ },
493
+ "beastMode": False,
494
+ "reasoningMode": False,
495
+ "designerMode": False,
496
+ "workspaceId": ""
497
+ }
498
+
499
+ # Use LitAgent to generate a realistic browser fingerprint for headers
500
+ agent = LitAgent()
501
+ fingerprint = agent.generate_fingerprint("chrome")
502
+ headers = {
503
+ 'accept': fingerprint['accept'],
504
+ 'accept-encoding': 'gzip, deflate, br, zstd',
505
+ 'accept-language': fingerprint['accept_language'],
506
+ 'content-type': 'application/json',
507
+ 'origin': 'https://www.blackbox.ai',
508
+ 'referer': 'https://www.blackbox.ai/',
509
+ 'sec-ch-ua': fingerprint['sec_ch_ua'],
510
+ 'sec-ch-ua-mobile': '?0',
511
+ 'sec-ch-ua-platform': f'"{fingerprint["platform"]}"',
512
+ 'sec-fetch-dest': 'empty',
513
+ 'sec-fetch-mode': 'cors',
514
+ 'sec-fetch-site': 'same-origin',
515
+ 'user-agent': fingerprint['user_agent']
516
+ }
517
+
518
+ try:
519
+ response = self.session.post(
520
+ self.api_endpoint,
521
+ json=data,
522
+ headers=headers,
523
+ stream=stream,
524
+ timeout=self.timeout
525
+ )
526
+
527
+ if not response.ok:
528
+ error_msg = f"API request failed: {response.status_code} - {response.text}"
529
+
530
+ # Check for service suspension
531
+ if response.status_code == 503 and "service has been suspended" in response.text.lower():
532
+ error_msg = "BlackboxAI service has been suspended by its owner. Please try again later or use a different provider."
533
+
534
+ # Check for API endpoint issues
535
+ if response.status_code == 403 and "replace" in response.text.lower() and "api.blackbox.ai" in response.text:
536
+ error_msg = "BlackboxAI API endpoint issue. Please check the API endpoint configuration."
537
+
538
+ raise exceptions.FailedToGenerateResponseError(error_msg)
539
+
540
+ if stream:
541
+ for line in response.iter_lines(decode_unicode=True):
542
+ if line:
543
+ if "You have reached your request limit for the hour" in line:
544
+ raise exceptions.RateLimitError("Rate limit exceeded")
545
+ yield line
546
+ else:
547
+ response_text = response.text
548
+ if "You have reached your request limit for the hour" in response_text:
549
+ raise exceptions.RateLimitError("Rate limit exceeded")
550
+ yield response_text
551
+
552
+ except requests.exceptions.RequestException as e:
553
+ raise exceptions.ProviderConnectionError(f"Connection error: {str(e)}")
554
+
555
+ def ask(
556
+ self,
557
+ prompt: str,
558
+ stream: bool = False,
559
+ temperature: float = None,
560
+ top_p: float = None,
561
+ max_tokens: int = None,
562
+ optimizer: str = None,
563
+ conversationally: bool = False,
564
+ media: List = None
565
+ ) -> Union[Dict[str, str], Generator[Dict[str, str], None, None]]:
566
+ """Send a prompt to BlackboxAI API and return the response."""
567
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
568
+ if optimizer:
569
+ if optimizer in self.__available_optimizers:
570
+ conversation_prompt = getattr(Optimizers, optimizer)(
571
+ conversation_prompt if conversationally else prompt
572
+ )
573
+ else:
574
+ raise ValueError(f"Optimizer is not one of {self.__available_optimizers}")
575
+
576
+ messages = [
577
+ {"role": "system", "content": self.system_message},
578
+ {"role": "user", "content": conversation_prompt}
579
+ ]
580
+
581
+ def for_stream():
582
+ for text in self._make_request(
583
+ messages,
584
+ stream=True,
585
+ temperature=temperature,
586
+ top_p=top_p,
587
+ max_tokens=max_tokens,
588
+ media=media
589
+ ):
590
+ yield {"text": text}
591
+
592
+ def for_non_stream():
593
+ response_text = next(self._make_request(
594
+ messages,
595
+ stream=False,
596
+ temperature=temperature,
597
+ top_p=top_p,
598
+ max_tokens=max_tokens,
599
+ media=media
600
+ ))
601
+ self.last_response = {"text": response_text}
602
+ return self.last_response
603
+
604
+ return for_stream() if stream else for_non_stream()
605
+
606
+ def chat(
607
+ self,
608
+ prompt: str,
609
+ stream: bool = False,
610
+ temperature: float = None,
611
+ top_p: float = None,
612
+ max_tokens: int = None,
613
+ optimizer: str = None,
614
+ conversationally: bool = False,
615
+ media: List = None
616
+ ) -> Union[str, Generator[str, None, None]]:
617
+ """Generate response as string."""
618
+
619
+ def for_stream():
620
+ for response in self.ask(
621
+ prompt,
622
+ stream=True,
623
+ temperature=temperature,
624
+ top_p=top_p,
625
+ max_tokens=max_tokens,
626
+ optimizer=optimizer,
627
+ conversationally=conversationally,
628
+ media=media
629
+ ):
630
+ yield self.get_message(response)
631
+
632
+ def for_non_stream():
633
+ return self.get_message(
634
+ self.ask(
635
+ prompt,
636
+ stream=False,
637
+ temperature=temperature,
638
+ top_p=top_p,
639
+ max_tokens=max_tokens,
640
+ optimizer=optimizer,
641
+ conversationally=conversationally,
642
+ media=media
643
+ )
644
+ )
645
+
646
+ return for_stream() if stream else for_non_stream()
647
+
648
+ def get_message(self, response: Dict[str, Any]) -> str:
649
+ """Extract message from response dictionary."""
650
+ assert isinstance(response, dict), "Response should be of dict data-type only"
651
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
652
+
653
+ if __name__ == "__main__":
654
+ print("-" * 80)
655
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
656
+ print("-" * 80)
657
+
658
+ for model in BLACKBOXAI.AVAILABLE_MODELS:
659
+ try:
660
+ test_ai = BLACKBOXAI(model=model, timeout=60)
661
+ response = test_ai.chat("Say 'Hello' in one word")
662
+ response_text = response
663
+
664
+ if response_text and len(response_text.strip()) > 0:
665
+ status = "✓"
666
+ # Truncate response if too long
667
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
668
+ else:
669
+ status = "✗"
670
+ display_text = "Empty or invalid response"
671
+ print(f"{model:<50} {status:<10} {display_text}")
672
+ except Exception as e:
673
+ print(f"{model:<50} {'✗':<10} {str(e)}")