webscout 8.2.2__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (483) hide show
  1. webscout/AIauto.py +524 -143
  2. webscout/AIbase.py +247 -123
  3. webscout/AIutel.py +68 -132
  4. webscout/Bard.py +1072 -535
  5. webscout/Extra/GitToolkit/__init__.py +2 -2
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -0
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -0
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -45
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +189 -18
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -682
  37. webscout/Extra/tempmail/README.md +488 -0
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +237 -304
  49. webscout/Provider/AISEARCH/README.md +106 -0
  50. webscout/Provider/AISEARCH/__init__.py +16 -10
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +130 -209
  53. webscout/Provider/AISEARCH/monica_search.py +200 -246
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -281
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -0
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +343 -173
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +60 -54
  67. webscout/Provider/GithubChat.py +385 -367
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -670
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -233
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -266
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -381
  77. webscout/Provider/Netwrck.py +273 -228
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -0
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -0
  85. webscout/Provider/OPENAI/__init__.py +148 -25
  86. webscout/Provider/OPENAI/ai4chat.py +348 -0
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/ayle.py +365 -0
  90. webscout/Provider/OPENAI/base.py +253 -46
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +514 -193
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -0
  94. webscout/Provider/OPENAI/deepinfra.py +403 -272
  95. webscout/Provider/OPENAI/e2b.py +2370 -1350
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +186 -138
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -0
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +100 -104
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -327
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +110 -84
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -0
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -0
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +126 -115
  115. webscout/Provider/OPENAI/textpollinations.py +218 -133
  116. webscout/Provider/OPENAI/toolbaz.py +136 -166
  117. webscout/Provider/OPENAI/typefully.py +419 -0
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -211
  120. webscout/Provider/OPENAI/wisecat.py +103 -125
  121. webscout/Provider/OPENAI/writecream.py +185 -156
  122. webscout/Provider/OPENAI/x0gpt.py +227 -136
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -344
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -0
  133. webscout/Provider/TTI/__init__.py +37 -12
  134. webscout/Provider/TTI/base.py +147 -0
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -0
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -0
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -0
  141. webscout/Provider/TTS/README.md +186 -0
  142. webscout/Provider/TTS/__init__.py +43 -7
  143. webscout/Provider/TTS/base.py +523 -0
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -0
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -180
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +221 -121
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -199
  158. webscout/Provider/TypliAI.py +311 -0
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -0
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +160 -145
  161. webscout/Provider/UNFINISHED/GizAI.py +300 -0
  162. webscout/Provider/UNFINISHED/Marcus.py +218 -0
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/UNFINISHED/XenAI.py +330 -0
  165. webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +64 -47
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -0
  170. webscout/Provider/UNFINISHED/samurai.py +231 -0
  171. webscout/Provider/WiseCat.py +256 -196
  172. webscout/Provider/WrDoChat.py +390 -0
  173. webscout/Provider/__init__.py +115 -198
  174. webscout/Provider/ai4chat.py +181 -202
  175. webscout/Provider/akashgpt.py +330 -342
  176. webscout/Provider/cerebras.py +397 -242
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -234
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -266
  182. webscout/Provider/llama3mitril.py +230 -180
  183. webscout/Provider/llmchat.py +308 -213
  184. webscout/Provider/llmchatco.py +321 -311
  185. webscout/Provider/meta.py +996 -794
  186. webscout/Provider/oivscode.py +332 -0
  187. webscout/Provider/searchchat.py +316 -293
  188. webscout/Provider/sonus.py +264 -208
  189. webscout/Provider/toolbaz.py +359 -320
  190. webscout/Provider/turboseek.py +332 -219
  191. webscout/Provider/typefully.py +262 -280
  192. webscout/Provider/x0gpt.py +332 -256
  193. webscout/__init__.py +31 -38
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -293
  196. webscout/client.py +1497 -0
  197. webscout/conversation.py +140 -565
  198. webscout/exceptions.py +383 -339
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +32 -378
  204. webscout/prompt_manager.py +376 -274
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -0
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -140
  210. webscout/scout/core/scout.py +800 -568
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -460
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -809
  284. webscout/swiftcli/core/__init__.py +7 -0
  285. webscout/swiftcli/core/cli.py +574 -0
  286. webscout/swiftcli/core/context.py +98 -0
  287. webscout/swiftcli/core/group.py +268 -0
  288. webscout/swiftcli/decorators/__init__.py +28 -0
  289. webscout/swiftcli/decorators/command.py +243 -0
  290. webscout/swiftcli/decorators/options.py +247 -0
  291. webscout/swiftcli/decorators/output.py +392 -0
  292. webscout/swiftcli/exceptions.py +21 -0
  293. webscout/swiftcli/plugins/__init__.py +9 -0
  294. webscout/swiftcli/plugins/base.py +134 -0
  295. webscout/swiftcli/plugins/manager.py +269 -0
  296. webscout/swiftcli/utils/__init__.py +58 -0
  297. webscout/swiftcli/utils/formatting.py +251 -0
  298. webscout/swiftcli/utils/parsing.py +368 -0
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -55
  304. webscout/zeroart/base.py +70 -60
  305. webscout/zeroart/effects.py +155 -99
  306. webscout/zeroart/fonts.py +1799 -816
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. webscout-2026.1.19.dist-info/entry_points.txt +4 -0
  311. webscout-2026.1.19.dist-info/top_level.txt +1 -0
  312. inferno/__init__.py +0 -6
  313. inferno/__main__.py +0 -9
  314. inferno/cli.py +0 -6
  315. webscout/DWEBS.py +0 -477
  316. webscout/Extra/autocoder/__init__.py +0 -9
  317. webscout/Extra/autocoder/autocoder.py +0 -849
  318. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  319. webscout/LLM.py +0 -442
  320. webscout/Litlogger/__init__.py +0 -67
  321. webscout/Litlogger/core/__init__.py +0 -6
  322. webscout/Litlogger/core/level.py +0 -23
  323. webscout/Litlogger/core/logger.py +0 -165
  324. webscout/Litlogger/handlers/__init__.py +0 -12
  325. webscout/Litlogger/handlers/console.py +0 -33
  326. webscout/Litlogger/handlers/file.py +0 -143
  327. webscout/Litlogger/handlers/network.py +0 -173
  328. webscout/Litlogger/styles/__init__.py +0 -7
  329. webscout/Litlogger/styles/colors.py +0 -249
  330. webscout/Litlogger/styles/formats.py +0 -458
  331. webscout/Litlogger/styles/text.py +0 -87
  332. webscout/Litlogger/utils/__init__.py +0 -6
  333. webscout/Litlogger/utils/detectors.py +0 -153
  334. webscout/Litlogger/utils/formatters.py +0 -200
  335. webscout/Local/__init__.py +0 -12
  336. webscout/Local/__main__.py +0 -9
  337. webscout/Local/api.py +0 -576
  338. webscout/Local/cli.py +0 -516
  339. webscout/Local/config.py +0 -75
  340. webscout/Local/llm.py +0 -287
  341. webscout/Local/model_manager.py +0 -253
  342. webscout/Local/server.py +0 -721
  343. webscout/Local/utils.py +0 -93
  344. webscout/Provider/AI21.py +0 -177
  345. webscout/Provider/AISEARCH/DeepFind.py +0 -250
  346. webscout/Provider/AISEARCH/ISou.py +0 -256
  347. webscout/Provider/AISEARCH/felo_search.py +0 -228
  348. webscout/Provider/AISEARCH/genspark_search.py +0 -208
  349. webscout/Provider/AISEARCH/hika_search.py +0 -194
  350. webscout/Provider/AISEARCH/scira_search.py +0 -324
  351. webscout/Provider/Aitopia.py +0 -292
  352. webscout/Provider/AllenAI.py +0 -413
  353. webscout/Provider/Blackboxai.py +0 -229
  354. webscout/Provider/C4ai.py +0 -432
  355. webscout/Provider/ChatGPTClone.py +0 -226
  356. webscout/Provider/ChatGPTES.py +0 -237
  357. webscout/Provider/ChatGPTGratis.py +0 -194
  358. webscout/Provider/Chatify.py +0 -175
  359. webscout/Provider/Cloudflare.py +0 -273
  360. webscout/Provider/DeepSeek.py +0 -196
  361. webscout/Provider/ElectronHub.py +0 -709
  362. webscout/Provider/ExaChat.py +0 -342
  363. webscout/Provider/Free2GPT.py +0 -241
  364. webscout/Provider/GPTWeb.py +0 -193
  365. webscout/Provider/Glider.py +0 -211
  366. webscout/Provider/HF_space/__init__.py +0 -0
  367. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  368. webscout/Provider/HuggingFaceChat.py +0 -462
  369. webscout/Provider/Hunyuan.py +0 -272
  370. webscout/Provider/LambdaChat.py +0 -392
  371. webscout/Provider/Llama.py +0 -200
  372. webscout/Provider/Llama3.py +0 -204
  373. webscout/Provider/Marcus.py +0 -148
  374. webscout/Provider/OLLAMA.py +0 -396
  375. webscout/Provider/OPENAI/c4ai.py +0 -367
  376. webscout/Provider/OPENAI/chatgptclone.py +0 -460
  377. webscout/Provider/OPENAI/exachat.py +0 -433
  378. webscout/Provider/OPENAI/freeaichat.py +0 -352
  379. webscout/Provider/OPENAI/opkfc.py +0 -488
  380. webscout/Provider/OPENAI/scirachat.py +0 -463
  381. webscout/Provider/OPENAI/standardinput.py +0 -425
  382. webscout/Provider/OPENAI/typegpt.py +0 -346
  383. webscout/Provider/OPENAI/uncovrAI.py +0 -455
  384. webscout/Provider/OPENAI/venice.py +0 -413
  385. webscout/Provider/OPENAI/yep.py +0 -327
  386. webscout/Provider/OpenGPT.py +0 -199
  387. webscout/Provider/Perplexitylabs.py +0 -415
  388. webscout/Provider/Phind.py +0 -535
  389. webscout/Provider/PizzaGPT.py +0 -198
  390. webscout/Provider/Reka.py +0 -214
  391. webscout/Provider/StandardInput.py +0 -278
  392. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  393. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  394. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  395. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  396. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  397. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  398. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  399. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  400. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  401. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  402. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  403. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  404. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  405. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  406. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  407. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  408. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  409. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  410. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  411. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  412. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  413. webscout/Provider/TTI/artbit/__init__.py +0 -22
  414. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  415. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  416. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  417. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  418. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  419. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  420. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  421. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  422. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  423. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  424. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  425. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  426. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  427. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  428. webscout/Provider/TTI/talkai/__init__.py +0 -4
  429. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  430. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  431. webscout/Provider/TTS/gesserit.py +0 -127
  432. webscout/Provider/TeachAnything.py +0 -187
  433. webscout/Provider/Venice.py +0 -219
  434. webscout/Provider/VercelAI.py +0 -234
  435. webscout/Provider/WebSim.py +0 -228
  436. webscout/Provider/Writecream.py +0 -211
  437. webscout/Provider/WritingMate.py +0 -197
  438. webscout/Provider/aimathgpt.py +0 -189
  439. webscout/Provider/askmyai.py +0 -158
  440. webscout/Provider/asksteve.py +0 -203
  441. webscout/Provider/bagoodex.py +0 -145
  442. webscout/Provider/chatglm.py +0 -205
  443. webscout/Provider/copilot.py +0 -428
  444. webscout/Provider/freeaichat.py +0 -271
  445. webscout/Provider/gaurish.py +0 -244
  446. webscout/Provider/geminiprorealtime.py +0 -160
  447. webscout/Provider/granite.py +0 -187
  448. webscout/Provider/hermes.py +0 -219
  449. webscout/Provider/koala.py +0 -268
  450. webscout/Provider/labyrinth.py +0 -340
  451. webscout/Provider/lepton.py +0 -194
  452. webscout/Provider/llamatutor.py +0 -192
  453. webscout/Provider/multichat.py +0 -325
  454. webscout/Provider/promptrefine.py +0 -193
  455. webscout/Provider/scira_chat.py +0 -277
  456. webscout/Provider/scnet.py +0 -187
  457. webscout/Provider/talkai.py +0 -194
  458. webscout/Provider/tutorai.py +0 -252
  459. webscout/Provider/typegpt.py +0 -232
  460. webscout/Provider/uncovr.py +0 -312
  461. webscout/Provider/yep.py +0 -376
  462. webscout/litprinter/__init__.py +0 -59
  463. webscout/scout/core.py +0 -881
  464. webscout/tempid.py +0 -128
  465. webscout/webscout_search.py +0 -1346
  466. webscout/webscout_search_async.py +0 -877
  467. webscout/yep_search.py +0 -297
  468. webscout-8.2.2.dist-info/METADATA +0 -734
  469. webscout-8.2.2.dist-info/RECORD +0 -309
  470. webscout-8.2.2.dist-info/entry_points.txt +0 -5
  471. webscout-8.2.2.dist-info/top_level.txt +0 -3
  472. webstoken/__init__.py +0 -30
  473. webstoken/classifier.py +0 -189
  474. webstoken/keywords.py +0 -216
  475. webstoken/language.py +0 -128
  476. webstoken/ner.py +0 -164
  477. webstoken/normalizer.py +0 -35
  478. webstoken/processor.py +0 -77
  479. webstoken/sentiment.py +0 -206
  480. webstoken/stemmer.py +0 -73
  481. webstoken/tagger.py +0 -60
  482. webstoken/tokenizer.py +0 -158
  483. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,18 +1,30 @@
1
+ import base64
2
+ import hashlib
3
+ import json
4
+ import random
1
5
  import time
2
6
  import uuid
7
+ from datetime import datetime, timezone
8
+ from typing import Any, Dict, Generator, List, Optional, Union, cast
9
+
3
10
  import requests
4
- import json
5
- import random
6
- import base64
7
- import hashlib
8
- from datetime import datetime, timedelta
9
- from typing import List, Dict, Optional, Union, Generator, Any
10
11
 
11
12
  # Import base classes and utility structures
12
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
- from .utils import (
14
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
15
- ChatCompletionMessage, CompletionUsage
13
+ from webscout.Provider.OPENAI.base import (
14
+ BaseChat,
15
+ BaseCompletions,
16
+ OpenAICompatibleProvider,
17
+ SimpleModelList,
18
+ Tool,
19
+ )
20
+ from webscout.Provider.OPENAI.utils import (
21
+ ChatCompletion,
22
+ ChatCompletionChunk,
23
+ ChatCompletionMessage,
24
+ Choice,
25
+ ChoiceDelta,
26
+ CompletionUsage,
27
+ count_tokens,
16
28
  )
17
29
 
18
30
  # ANSI escape codes for formatting
@@ -20,14 +32,50 @@ BOLD = "\033[1m"
20
32
  RED = "\033[91m"
21
33
  RESET = "\033[0m"
22
34
 
35
+
23
36
  class ChatGPTReversed:
37
+ AVAILABLE_MODELS = [
38
+ "auto",
39
+ "gpt-5-1",
40
+ "gpt-5-1-instant",
41
+ "gpt-5-1-thinking",
42
+ "gpt-5",
43
+ "gpt-5-instant",
44
+ "gpt-5-thinking",
45
+ "gpt-4",
46
+ "gpt-4.1",
47
+ "gpt-4-1",
48
+ "gpt-4.1-mini",
49
+ "gpt-4-1-mini",
50
+ "gpt-4.5",
51
+ "gpt-4-5",
52
+ "gpt-4o",
53
+ "gpt-4o-mini",
54
+ "o1",
55
+ "o1-mini",
56
+ "o3-mini",
57
+ "o3-mini-high",
58
+ "o4-mini",
59
+ "o4-mini-high",
60
+ ]
24
61
  csrf_token = None
25
62
  initialized = False
26
- AVAILABLE_MODELS = ["auto", "gpt-4o-mini", "gpt-4o", "o4-mini"]
63
+
64
+ _instance = None
65
+
66
+ def __new__(cls, model="auto"):
67
+ if cls._instance is None:
68
+ cls._instance = super(ChatGPTReversed, cls).__new__(cls)
69
+ cls._instance.initialized = False
70
+ return cls._instance
27
71
 
28
72
  def __init__(self, model="auto"):
29
- if ChatGPTReversed.initialized:
30
- raise Exception("ChatGPTReversed has already been initialized.")
73
+ if self.initialized:
74
+ # Already initialized, just update model if needed
75
+ if model not in self.AVAILABLE_MODELS:
76
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
77
+ self.model = model
78
+ return
31
79
 
32
80
  if model not in self.AVAILABLE_MODELS:
33
81
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
@@ -56,7 +104,7 @@ class ChatGPTReversed:
56
104
  "agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
57
105
  "platform": "Windows",
58
106
  "mobile": "?0",
59
- "ua": 'Not A(Brand";v="8", "Chromium";v="132", "Google Chrome";v="132'
107
+ "ua": 'Not A(Brand";v="8", "Chromium";v="132", "Google Chrome";v="132',
60
108
  }
61
109
 
62
110
  ip = self.random_ip()
@@ -80,58 +128,108 @@ class ChatGPTReversed:
80
128
  }
81
129
 
82
130
  if spoof_address:
83
- headers.update({
84
- "X-Forwarded-For": ip,
85
- "X-Originating-IP": ip,
86
- "X-Remote-IP": ip,
87
- "X-Remote-Addr": ip,
88
- "X-Host": ip,
89
- "X-Forwarded-Host": ip
90
- })
131
+ headers.update(
132
+ {
133
+ "X-Forwarded-For": ip,
134
+ "X-Originating-IP": ip,
135
+ "X-Remote-IP": ip,
136
+ "X-Remote-Addr": ip,
137
+ "X-Host": ip,
138
+ "X-Forwarded-Host": ip,
139
+ }
140
+ )
91
141
 
92
142
  return headers
93
143
 
94
- def solve_sentinel_challenge(self, seed, difficulty):
95
- """Solve the sentinel challenge for authentication."""
96
- cores = [8, 12, 16, 24]
97
- screens = [3000, 4000, 6000]
98
- core = random.choice(cores)
99
- screen = random.choice(screens)
100
-
101
- # Adjust time to match expected timezone
102
- now = datetime.now() - timedelta(hours=8)
103
- parse_time = now.strftime("%a, %d %b %Y %H:%M:%S GMT+0100 (Central European Time)")
144
+ def generate_proof_token(self, seed: str, difficulty: str, user_agent: Optional[str] = None):
145
+ """
146
+ Improved proof-of-work implementation based on gpt4free/g4f/Provider/openai/proofofwork.py
104
147
 
105
- config = [core + screen, parse_time, 4294705152, 0,
106
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36"]
148
+ Args:
149
+ seed: The seed string for the challenge
150
+ difficulty: The difficulty hex string
151
+ user_agent: Optional user agent string
107
152
 
108
- diff_len = len(difficulty) // 2
153
+ Returns:
154
+ The proof token starting with 'gAAAAAB'
155
+ """
156
+ if user_agent is None:
157
+ user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36"
158
+
159
+ screen = random.choice([3008, 4010, 6000]) * random.choice([1, 2, 4])
160
+
161
+ # Get current UTC time
162
+ now_utc = datetime.now(timezone.utc)
163
+ parse_time = now_utc.strftime("%a, %d %b %Y %H:%M:%S GMT")
164
+
165
+ proof_token = [
166
+ screen,
167
+ parse_time,
168
+ None,
169
+ 0,
170
+ user_agent,
171
+ "https://tcr9i.chat.openai.com/v2/35536E1E-65B4-4D96-9D97-6ADB7EFF8147/api.js",
172
+ "dpl=1440a687921de39ff5ee56b92807faaadce73f13",
173
+ "en",
174
+ "en-US",
175
+ None,
176
+ "plugins−[object PluginArray]",
177
+ random.choice(
178
+ [
179
+ "_reactListeningcfilawjnerp",
180
+ "_reactListening9ne2dfo1i47",
181
+ "_reactListening410nzwhan2a",
182
+ ]
183
+ ),
184
+ random.choice(["alert", "ontransitionend", "onprogress"]),
185
+ ]
109
186
 
187
+ diff_len = len(difficulty)
110
188
  for i in range(100000):
111
- config[3] = i
112
- json_data = json.dumps(config)
189
+ proof_token[3] = i
190
+ json_data = json.dumps(proof_token)
113
191
  base = base64.b64encode(json_data.encode()).decode()
114
- hash_value = hashlib.sha3_512((seed + base).encode()).hexdigest()
192
+ hash_value = hashlib.sha3_512((seed + base).encode()).digest()
115
193
 
116
- if hash_value[:diff_len] <= difficulty:
117
- result = "gAAAAAB" + base
118
- return result
194
+ if hash_value.hex()[:diff_len] <= difficulty:
195
+ return "gAAAAAB" + base
119
196
 
120
197
  # Fallback
121
- fallback_base = base64.b64encode(seed.encode()).decode()
198
+ fallback_base = base64.b64encode(f'"{seed}"'.encode()).decode()
122
199
  return "gAAAAABwQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + fallback_base
123
200
 
201
+ def solve_sentinel_challenge(self, seed, difficulty):
202
+ """Solve the sentinel challenge for authentication using improved algorithm."""
203
+ return self.generate_proof_token(seed, difficulty)
204
+
124
205
  def generate_fake_sentinel_token(self):
125
206
  """Generate a fake sentinel token for initial authentication."""
126
207
  prefix = "gAAAAAC"
208
+
209
+ # More realistic screen sizes
210
+ screen = random.choice([3008, 4010, 6000]) * random.choice([1, 2, 4])
211
+
212
+ # Get current UTC time
213
+ now_utc = datetime.now(timezone.utc)
214
+ parse_time = now_utc.strftime("%a, %d %b %Y %H:%M:%S GMT")
215
+
127
216
  config = [
128
- random.randint(3000, 6000),
129
- datetime.now().strftime("%a, %d %b %Y %H:%M:%S GMT+0100 (Central European Time)"),
130
- 4294705152, 0,
217
+ screen,
218
+ parse_time,
219
+ 4294705152,
220
+ 0,
131
221
  "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
132
- "de", "de", 401, "mediaSession", "location", "scrollX",
222
+ "de",
223
+ "de",
224
+ 401,
225
+ "mediaSession",
226
+ "location",
227
+ "scrollX",
133
228
  self.random_float(1000, 5000),
134
- str(uuid.uuid4()), "", 12, int(time.time() * 1000)
229
+ str(uuid.uuid4()),
230
+ "",
231
+ 12,
232
+ int(time.time() * 1000),
135
233
  ]
136
234
 
137
235
  base64_str = base64.b64encode(json.dumps(config).encode()).decode()
@@ -152,14 +250,16 @@ class ChatGPTReversed:
152
250
  try:
153
251
  if part.startswith("data: "):
154
252
  json_data = json.loads(part[6:])
155
- if (json_data.get("message") and
156
- json_data["message"].get("status") == "finished_successfully" and
157
- json_data["message"].get("metadata", {}).get("is_complete")):
253
+ if (
254
+ json_data.get("message")
255
+ and json_data["message"].get("status") == "finished_successfully"
256
+ and json_data["message"].get("metadata", {}).get("is_complete")
257
+ ):
158
258
  return json_data["message"]["content"]["parts"][0]
159
- except:
259
+ except Exception:
160
260
  pass
161
261
 
162
- return input_text # Return raw text if parsing fails or no complete message found
262
+ return input_text # Return raw text if parsing fails or no complete message found
163
263
 
164
264
  def rotate_session_data(self):
165
265
  """Rotate session data to maintain fresh authentication."""
@@ -169,11 +269,7 @@ class ChatGPTReversed:
169
269
 
170
270
  ChatGPTReversed.csrf_token = csrf_token
171
271
 
172
- return {
173
- "uuid": uuid_val,
174
- "csrf": csrf_token,
175
- "sentinel": sentinel_token
176
- }
272
+ return {"uuid": uuid_val, "csrf": csrf_token, "sentinel": sentinel_token}
177
273
 
178
274
  def get_csrf_token(self, uuid_val):
179
275
  """Get CSRF token for authentication."""
@@ -181,15 +277,10 @@ class ChatGPTReversed:
181
277
  return ChatGPTReversed.csrf_token
182
278
 
183
279
  headers = self.simulate_bypass_headers(
184
- accept="application/json",
185
- spoof_address=True,
186
- pre_oai_uuid=uuid_val
280
+ accept="application/json", spoof_address=True, pre_oai_uuid=uuid_val
187
281
  )
188
282
 
189
- response = requests.get(
190
- "https://chatgpt.com/api/auth/csrf",
191
- headers=headers
192
- )
283
+ response = requests.get("https://chatgpt.com/api/auth/csrf", headers=headers)
193
284
 
194
285
  data = response.json()
195
286
  if "csrfToken" not in data:
@@ -200,9 +291,7 @@ class ChatGPTReversed:
200
291
  def get_sentinel_token(self, uuid_val, csrf):
201
292
  """Get sentinel token for authentication."""
202
293
  headers = self.simulate_bypass_headers(
203
- accept="application/json",
204
- spoof_address=True,
205
- pre_oai_uuid=uuid_val
294
+ accept="application/json", spoof_address=True, pre_oai_uuid=uuid_val
206
295
  )
207
296
 
208
297
  test = self.generate_fake_sentinel_token()
@@ -212,8 +301,8 @@ class ChatGPTReversed:
212
301
  json={"p": test},
213
302
  headers={
214
303
  **headers,
215
- "Cookie": f"__Host-next-auth.csrf-token={csrf}; oai-did={uuid_val}; oai-nav-state=1;"
216
- }
304
+ "Cookie": f"__Host-next-auth.csrf-token={csrf}; oai-did={uuid_val}; oai-nav-state=1;",
305
+ },
217
306
  )
218
307
 
219
308
  data = response.json()
@@ -230,15 +319,10 @@ class ChatGPTReversed:
230
319
  raise Exception("Failed to fetch required oai-sc token")
231
320
 
232
321
  challenge_token = self.solve_sentinel_challenge(
233
- data["proofofwork"]["seed"],
234
- data["proofofwork"]["difficulty"]
322
+ data["proofofwork"]["seed"], data["proofofwork"]["difficulty"]
235
323
  )
236
324
 
237
- return {
238
- "token": data["token"],
239
- "proof": challenge_token,
240
- "oaiSc": oai_sc
241
- }
325
+ return {"token": data["token"], "proof": challenge_token, "oaiSc": oai_sc}
242
326
 
243
327
  def complete(self, message, model=None):
244
328
  """Complete a message using ChatGPT.
@@ -252,42 +336,45 @@ class ChatGPTReversed:
252
336
  The complete response as a string.
253
337
  """
254
338
  if not ChatGPTReversed.initialized:
255
- raise Exception("ChatGPTReversed has not been initialized. Please initialize the instance before calling this method.")
339
+ raise Exception(
340
+ "ChatGPTReversed has not been initialized. Please initialize the instance before calling this method."
341
+ )
256
342
 
257
343
  # Use the provided model or fall back to the instance model
258
344
  selected_model = model if model else self.model
259
345
 
260
346
  # Validate the model
261
347
  if selected_model not in self.AVAILABLE_MODELS:
262
- raise ValueError(f"Invalid model: {selected_model}. Choose from: {self.AVAILABLE_MODELS}")
348
+ raise ValueError(
349
+ f"Invalid model: {selected_model}. Choose from: {self.AVAILABLE_MODELS}"
350
+ )
263
351
 
264
352
  session_data = self.rotate_session_data()
265
353
 
266
354
  headers = self.simulate_bypass_headers(
267
- accept="plain/text", # Changed accept header as we expect full response now
355
+ accept="plain/text", # Changed accept header as we expect full response now
268
356
  spoof_address=True,
269
- pre_oai_uuid=session_data["uuid"]
357
+ pre_oai_uuid=session_data["uuid"],
270
358
  )
271
359
 
272
- headers.update({
273
- "Cookie": f"__Host-next-auth.csrf-token={session_data['csrf']}; oai-did={session_data['uuid']}; oai-nav-state=1; oai-sc={session_data['sentinel']['oaiSc']};",
274
- "openai-sentinel-chat-requirements-token": session_data["sentinel"]["token"],
275
- "openai-sentinel-proof-token": session_data["sentinel"]["proof"]
276
- })
360
+ headers.update(
361
+ {
362
+ "Cookie": f"__Host-next-auth.csrf-token={session_data['csrf']}; oai-did={session_data['uuid']}; oai-nav-state=1; oai-sc={session_data['sentinel']['oaiSc']};",
363
+ "openai-sentinel-chat-requirements-token": session_data["sentinel"]["token"],
364
+ "openai-sentinel-proof-token": session_data["sentinel"]["proof"],
365
+ }
366
+ )
277
367
 
278
368
  payload = {
279
369
  "action": "next",
280
- "messages": [{
281
- "id": self.random_uuid(),
282
- "author": {
283
- "role": "user"
284
- },
285
- "content": {
286
- "content_type": "text",
287
- "parts": [message]
288
- },
289
- "metadata": {}
290
- }],
370
+ "messages": [
371
+ {
372
+ "id": self.random_uuid(),
373
+ "author": {"role": "user"},
374
+ "content": {"content_type": "text", "parts": [message]},
375
+ "metadata": {},
376
+ }
377
+ ],
291
378
  "parent_message_id": self.random_uuid(),
292
379
  "model": selected_model, # Use the selected model
293
380
  "timezone_offset_min": -120,
@@ -295,7 +382,7 @@ class ChatGPTReversed:
295
382
  "history_and_training_disabled": False,
296
383
  "conversation_mode": {
297
384
  "kind": "primary_assistant",
298
- "plugin_ids": None # Ensure web search is not used
385
+ "plugin_ids": None, # Ensure web search is not used
299
386
  },
300
387
  "force_paragen": False,
301
388
  "force_paragen_model_slug": "",
@@ -303,13 +390,11 @@ class ChatGPTReversed:
303
390
  "force_rate_limit": False,
304
391
  "reset_rate_limits": False,
305
392
  "websocket_request_id": self.random_uuid(),
306
- "force_use_sse": True # Keep SSE for receiving the full response
393
+ "force_use_sse": True, # Keep SSE for receiving the full response
307
394
  }
308
395
 
309
396
  response = requests.post(
310
- "https://chatgpt.com/backend-anon/conversation",
311
- json=payload,
312
- headers=headers
397
+ "https://chatgpt.com/backend-anon/conversation", json=payload, headers=headers
313
398
  )
314
399
 
315
400
  if response.status_code != 200:
@@ -319,7 +404,7 @@ class ChatGPTReversed:
319
404
 
320
405
 
321
406
  class Completions(BaseCompletions):
322
- def __init__(self, client: 'ChatGPT'):
407
+ def __init__(self, client: "ChatGPT"):
323
408
  self._client = client
324
409
  self._chatgpt_reversed = None
325
410
 
@@ -332,7 +417,13 @@ class Completions(BaseCompletions):
332
417
  stream: bool = False,
333
418
  temperature: Optional[float] = None,
334
419
  top_p: Optional[float] = None,
335
- **kwargs: Any
420
+ tools: Optional[List[Union[Tool, Dict[str, Any]]]] = None, # Support for tool definitions
421
+ tool_choice: Optional[
422
+ Union[str, Dict[str, Any]]
423
+ ] = None, # Support for tool_choice parameter
424
+ timeout: Optional[int] = None,
425
+ proxies: Optional[dict] = None,
426
+ **kwargs: Any,
336
427
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
337
428
  """
338
429
  Create a chat completion with ChatGPT API.
@@ -344,6 +435,8 @@ class Completions(BaseCompletions):
344
435
  stream: Whether to stream the response
345
436
  temperature: Sampling temperature (0-1)
346
437
  top_p: Nucleus sampling parameter (0-1)
438
+ tools: List of tool definitions available for the model to use
439
+ tool_choice: Control over which tool the model should use
347
440
  **kwargs: Additional parameters to pass to the API
348
441
 
349
442
  Returns:
@@ -362,9 +455,13 @@ class Completions(BaseCompletions):
362
455
  max_tokens=max_tokens,
363
456
  temperature=temperature,
364
457
  top_p=top_p,
365
- **kwargs
458
+ tools=tools,
459
+ tool_choice=tool_choice,
460
+ timeout=timeout,
461
+ proxies=proxies,
462
+ **kwargs,
366
463
  )
367
-
464
+
368
465
  # Otherwise use non-streaming implementation
369
466
  return self._create_non_streaming(
370
467
  model=model,
@@ -372,7 +469,11 @@ class Completions(BaseCompletions):
372
469
  max_tokens=max_tokens,
373
470
  temperature=temperature,
374
471
  top_p=top_p,
375
- **kwargs
472
+ tools=tools,
473
+ tool_choice=tool_choice,
474
+ timeout=timeout,
475
+ proxies=proxies,
476
+ **kwargs,
376
477
  )
377
478
 
378
479
  def _create_streaming(
@@ -383,62 +484,182 @@ class Completions(BaseCompletions):
383
484
  max_tokens: Optional[int] = None,
384
485
  temperature: Optional[float] = None,
385
486
  top_p: Optional[float] = None,
386
- **kwargs: Any
487
+ tools: Optional[List[Union[Tool, Dict[str, Any]]]] = None,
488
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
489
+ timeout: Optional[int] = None,
490
+ proxies: Optional[dict] = None,
491
+ **kwargs: Any,
387
492
  ) -> Generator[ChatCompletionChunk, None, None]:
388
493
  """Implementation for streaming chat completions."""
389
494
  try:
390
495
  # Generate request ID and timestamp
391
496
  request_id = str(uuid.uuid4())
392
497
  created_time = int(time.time())
393
-
498
+
394
499
  # Get the last user message
395
500
  last_user_message = None
396
501
  for msg in reversed(messages):
397
502
  if msg["role"] == "user":
398
503
  last_user_message = msg["content"]
399
504
  break
400
-
505
+
401
506
  if not last_user_message:
402
507
  raise ValueError("No user message found in the conversation")
403
-
404
- # Get the response from ChatGPT
405
- response = self._chatgpt_reversed.complete(last_user_message, model=model)
406
-
407
- # Split the response into chunks for streaming simulation
408
- chunk_size = 10 # Characters per chunk
409
- for i in range(0, len(response), chunk_size):
410
- chunk_text = response[i:i+chunk_size]
411
-
412
- # Create and yield a chunk
413
- delta = ChoiceDelta(content=chunk_text)
414
- choice = Choice(index=0, delta=delta, finish_reason=None)
415
- chunk = ChatCompletionChunk(
416
- id=request_id,
417
- choices=[choice],
418
- created=created_time,
419
- model=model
508
+
509
+ # Initialize ChatGPTReversed if not already initialized
510
+ if self._chatgpt_reversed is None:
511
+ self._chatgpt_reversed = ChatGPTReversed(model=model)
512
+
513
+ # Create a proper streaming request to ChatGPT
514
+ session_data = self._chatgpt_reversed.rotate_session_data()
515
+
516
+ headers = self._chatgpt_reversed.simulate_bypass_headers(
517
+ accept="text/event-stream", spoof_address=True, pre_oai_uuid=session_data["uuid"]
518
+ )
519
+
520
+ headers.update(
521
+ {
522
+ "Cookie": f"__Host-next-auth.csrf-token={session_data['csrf']}; oai-did={session_data['uuid']}; oai-nav-state=1; oai-sc={session_data['sentinel']['oaiSc']};",
523
+ "openai-sentinel-chat-requirements-token": session_data["sentinel"]["token"],
524
+ "openai-sentinel-proof-token": session_data["sentinel"]["proof"],
525
+ }
526
+ )
527
+
528
+ # Format messages properly for ChatGPT
529
+ formatted_messages = []
530
+ for i, msg in enumerate(messages):
531
+ formatted_messages.append(
532
+ {
533
+ "id": str(uuid.uuid4()),
534
+ "author": {"role": msg["role"]},
535
+ "content": {"content_type": "text", "parts": [msg["content"]]},
536
+ "metadata": {},
537
+ }
420
538
  )
421
-
422
- yield chunk
423
-
424
- # Add a small delay to simulate streaming
425
- time.sleep(0.05)
426
-
427
- # Final chunk with finish_reason
428
- delta = ChoiceDelta(content=None)
429
- choice = Choice(index=0, delta=delta, finish_reason="stop")
430
- chunk = ChatCompletionChunk(
431
- id=request_id,
432
- choices=[choice],
433
- created=created_time,
434
- model=model
539
+
540
+ payload = {
541
+ "action": "next",
542
+ "messages": formatted_messages,
543
+ "parent_message_id": str(uuid.uuid4()),
544
+ "model": model,
545
+ "timezone_offset_min": -120,
546
+ "suggestions": [],
547
+ "history_and_training_disabled": False,
548
+ "conversation_mode": {"kind": "primary_assistant", "plugin_ids": None},
549
+ "force_paragem": False,
550
+ "force_paragem_model_slug": "",
551
+ "force_nulligen": False,
552
+ "force_rate_limit": False,
553
+ "reset_rate_limits": False,
554
+ "websocket_request_id": str(uuid.uuid4()),
555
+ "force_use_sse": True,
556
+ }
557
+
558
+ # Add optional parameters if provided
559
+ if max_tokens is not None:
560
+ payload["max_tokens"] = max_tokens
561
+ if temperature is not None:
562
+ payload["temperature"] = temperature
563
+ if top_p is not None:
564
+ payload["top_p"] = top_p
565
+
566
+ # Make the actual streaming request
567
+ response = requests.post(
568
+ "https://chatgpt.com/backend-anon/conversation",
569
+ json=payload,
570
+ headers=headers,
571
+ stream=True,
572
+ timeout=timeout or 30,
435
573
  )
436
-
437
- yield chunk
438
-
574
+
575
+ response.raise_for_status()
576
+
577
+ # Track conversation state
578
+ full_content = ""
579
+ prompt_tokens = count_tokens(str(messages))
580
+ completion_tokens = 0
581
+ total_tokens = prompt_tokens
582
+
583
+ # Process the streaming response
584
+ for line in response.iter_lines(decode_unicode=True):
585
+ if line:
586
+ if line.startswith("data: "):
587
+ data_str = line[6:] # Remove "data: " prefix
588
+
589
+ # Handle [DONE] message
590
+ if data_str.strip() == "[DONE]":
591
+ # Final chunk with finish_reason
592
+ delta = ChoiceDelta(content=None)
593
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
594
+ chunk = ChatCompletionChunk(
595
+ id=request_id, choices=[choice], created=created_time, model=model
596
+ )
597
+ chunk.usage = {
598
+ "prompt_tokens": prompt_tokens,
599
+ "completion_tokens": completion_tokens,
600
+ "total_tokens": total_tokens,
601
+ }
602
+ yield chunk
603
+ break
604
+
605
+ try:
606
+ data = json.loads(data_str)
607
+
608
+ # Handle different types of messages
609
+ if data.get("message"):
610
+ message = data["message"]
611
+
612
+ # Handle assistant responses
613
+ if message.get("author", {}).get("role") == "assistant":
614
+ content_parts = message.get("content", {}).get("parts", [])
615
+ if content_parts:
616
+ new_content = content_parts[0]
617
+
618
+ # Get the delta (new content since last chunk)
619
+ delta_content = (
620
+ new_content[len(full_content) :]
621
+ if new_content.startswith(full_content)
622
+ else new_content
623
+ )
624
+ full_content = new_content
625
+ completion_tokens = count_tokens(full_content)
626
+ total_tokens = prompt_tokens + completion_tokens
627
+
628
+ # Only yield chunk if there's new content
629
+ if delta_content:
630
+ delta = ChoiceDelta(
631
+ content=delta_content, role="assistant"
632
+ )
633
+ choice = Choice(
634
+ index=0, delta=delta, finish_reason=None
635
+ )
636
+ chunk = ChatCompletionChunk(
637
+ id=request_id,
638
+ choices=[choice],
639
+ created=created_time,
640
+ model=model,
641
+ )
642
+ chunk.usage = {
643
+ "prompt_tokens": prompt_tokens,
644
+ "completion_tokens": completion_tokens,
645
+ "total_tokens": total_tokens,
646
+ }
647
+ yield chunk
648
+
649
+ # Handle finish status
650
+ if message.get("status") == "finished_successfully":
651
+ pass
652
+
653
+ elif data.get("type") == "message_stream_complete":
654
+ # Stream is complete
655
+ pass
656
+
657
+ except json.JSONDecodeError:
658
+ # Skip invalid JSON lines
659
+ continue
660
+
439
661
  except Exception as e:
440
- print(f"{RED}Error during ChatGPT streaming request: {e}{RESET}")
441
- raise IOError(f"ChatGPT streaming request failed: {e}") from e
662
+ raise IOError(f"ChatGPT request failed: {e}") from e
442
663
 
443
664
  def _create_non_streaming(
444
665
  self,
@@ -448,50 +669,133 @@ class Completions(BaseCompletions):
448
669
  max_tokens: Optional[int] = None,
449
670
  temperature: Optional[float] = None,
450
671
  top_p: Optional[float] = None,
451
- **kwargs: Any
672
+ tools: Optional[List[Union[Tool, Dict[str, Any]]]] = None,
673
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
674
+ timeout: Optional[int] = None,
675
+ proxies: Optional[dict] = None,
676
+ **kwargs: Any,
452
677
  ) -> ChatCompletion:
453
678
  """Implementation for non-streaming chat completions."""
454
679
  try:
455
680
  # Generate request ID and timestamp
456
681
  request_id = str(uuid.uuid4())
457
682
  created_time = int(time.time())
458
-
459
- # Get the last user message
460
- last_user_message = None
461
- for msg in reversed(messages):
462
- if msg["role"] == "user":
463
- last_user_message = msg["content"]
464
- break
465
-
466
- if not last_user_message:
467
- raise ValueError("No user message found in the conversation")
468
-
469
- # Get the response from ChatGPT
470
- full_content = self._chatgpt_reversed.complete(last_user_message, model=model)
471
-
472
- # Create the completion message
473
- message = ChatCompletionMessage(
474
- role="assistant",
475
- content=full_content
683
+
684
+ # Initialize ChatGPTReversed if not already initialized
685
+ if self._chatgpt_reversed is None:
686
+ self._chatgpt_reversed = ChatGPTReversed(model=model)
687
+
688
+ # Create a proper request to ChatGPT
689
+ session_data = self._chatgpt_reversed.rotate_session_data()
690
+
691
+ headers = self._chatgpt_reversed.simulate_bypass_headers(
692
+ accept="text/event-stream", spoof_address=True, pre_oai_uuid=session_data["uuid"]
476
693
  )
477
-
478
- # Create the choice
479
- choice = Choice(
480
- index=0,
481
- message=message,
482
- finish_reason="stop"
694
+
695
+ headers.update(
696
+ {
697
+ "Cookie": f"__Host-next-auth.csrf-token={session_data['csrf']}; oai-did={session_data['uuid']}; oai-nav-state=1; oai-sc={session_data['sentinel']['oaiSc']};",
698
+ "openai-sentinel-chat-requirements-token": session_data["sentinel"]["token"],
699
+ "openai-sentinel-proof-token": session_data["sentinel"]["proof"],
700
+ }
483
701
  )
484
-
485
- # Estimate token usage (very rough estimate)
486
- prompt_tokens = sum(len(msg.get("content", "")) // 4 for msg in messages)
487
- completion_tokens = len(full_content) // 4
702
+
703
+ # Format messages properly for ChatGPT
704
+ formatted_messages = []
705
+ for i, msg in enumerate(messages):
706
+ formatted_messages.append(
707
+ {
708
+ "id": str(uuid.uuid4()),
709
+ "author": {"role": msg["role"]},
710
+ "content": {"content_type": "text", "parts": [msg["content"]]},
711
+ "metadata": {},
712
+ }
713
+ )
714
+
715
+ payload = {
716
+ "action": "next",
717
+ "messages": formatted_messages,
718
+ "parent_message_id": str(uuid.uuid4()),
719
+ "model": model,
720
+ "timezone_offset_min": -120,
721
+ "suggestions": [],
722
+ "history_and_training_disabled": False,
723
+ "conversation_mode": {"kind": "primary_assistant", "plugin_ids": None},
724
+ "force_paragem": False,
725
+ "force_paragem_model_slug": "",
726
+ "force_nulligen": False,
727
+ "force_rate_limit": False,
728
+ "reset_rate_limits": False,
729
+ "websocket_request_id": str(uuid.uuid4()),
730
+ "force_use_sse": True,
731
+ }
732
+
733
+ # Add optional parameters if provided
734
+ if max_tokens is not None:
735
+ payload["max_tokens"] = max_tokens
736
+ if temperature is not None:
737
+ payload["temperature"] = temperature
738
+ if top_p is not None:
739
+ payload["top_p"] = top_p
740
+
741
+ # Make the request and collect full response
742
+ response = requests.post(
743
+ "https://chatgpt.com/backend-anon/conversation",
744
+ json=payload,
745
+ headers=headers,
746
+ stream=True,
747
+ timeout=timeout or 30,
748
+ )
749
+
750
+ response.raise_for_status()
751
+
752
+ # Collect and parse the full response
753
+ full_response = ""
754
+ for line in response.iter_lines(decode_unicode=True):
755
+ if line:
756
+ if line.startswith("data: "):
757
+ data_str = line[6:] # Remove "data: " prefix
758
+
759
+ # Handle [DONE] message
760
+ if data_str.strip() == "[DONE]":
761
+ break
762
+
763
+ try:
764
+ data = json.loads(data_str)
765
+
766
+ # Handle assistant responses
767
+ if (
768
+ data.get("message")
769
+ and data["message"].get("author", {}).get("role") == "assistant"
770
+ ):
771
+ content_parts = data["message"].get("content", {}).get("parts", [])
772
+ if content_parts:
773
+ full_response = content_parts[0]
774
+
775
+ except json.JSONDecodeError:
776
+ # Skip invalid JSON lines
777
+ continue
778
+
779
+ # Create the completion message
780
+ message = ChatCompletionMessage(role="assistant", content=full_response)
781
+
782
+ # Create the choice
783
+ choice = Choice(index=0, message=message, finish_reason="stop")
784
+
785
+ # Calculate token usage using count_tokens
786
+ # Count tokens in the input messages (prompt)
787
+ prompt_tokens = count_tokens(str(messages))
788
+ # Count tokens in the response (completion)
789
+ completion_tokens = count_tokens(full_response)
790
+ total_tokens = prompt_tokens + completion_tokens
791
+
488
792
  usage = CompletionUsage(
489
793
  prompt_tokens=prompt_tokens,
490
794
  completion_tokens=completion_tokens,
491
- total_tokens=prompt_tokens + completion_tokens
795
+ total_tokens=total_tokens,
492
796
  )
493
-
494
- # Create the completion object
797
+
798
+ # Create the completion object with correct OpenAI format
495
799
  completion = ChatCompletion(
496
800
  id=request_id,
497
801
  choices=[choice],
@@ -499,17 +803,19 @@ class Completions(BaseCompletions):
499
803
  model=model,
500
804
  usage=usage,
501
805
  )
502
-
806
+
503
807
  return completion
504
-
808
+
505
809
  except Exception as e:
506
810
  print(f"{RED}Error during ChatGPT non-stream request: {e}{RESET}")
507
811
  raise IOError(f"ChatGPT request failed: {e}") from e
508
812
 
813
+
509
814
  class Chat(BaseChat):
510
- def __init__(self, client: 'ChatGPT'):
815
+ def __init__(self, client: "ChatGPT"):
511
816
  self.completions = Completions(client)
512
817
 
818
+
513
819
  class ChatGPT(OpenAICompatibleProvider):
514
820
  """
515
821
  OpenAI-compatible client for ChatGPT API.
@@ -523,27 +829,42 @@ class ChatGPT(OpenAICompatibleProvider):
523
829
  print(response.choices[0].message.content)
524
830
  """
525
831
 
526
- AVAILABLE_MODELS = [
527
- "auto",
528
- "gpt-4o-mini",
529
- "gpt-4o",
530
- "o4-mini"
531
- ]
832
+ required_auth = False
532
833
 
533
834
  def __init__(
534
835
  self,
535
- timeout: int = 60,
536
- proxies: dict = {}
836
+ api_key: Optional[str] = None,
837
+ tools: Optional[List[Tool]] = None,
838
+ proxies: Optional[Dict[str, str]] = None,
537
839
  ):
538
840
  """
539
841
  Initialize the ChatGPT client.
540
842
 
541
843
  Args:
542
- timeout: Request timeout in seconds
543
- proxies: Optional proxy configuration
844
+ api_key: Optional API key (not used by ChatGPTReversed but included for interface compatibility)
845
+ tools: Optional list of tools to register with the provider
846
+ proxies: Optional proxy configuration dict, e.g. {"http": "http://proxy:8080", "https": "https://proxy:8080"}
544
847
  """
545
- self.timeout = timeout
546
- self.proxies = proxies
547
-
848
+ super().__init__(api_key=api_key, tools=tools, proxies=proxies)
548
849
  # Initialize chat interface
549
850
  self.chat = Chat(self)
851
+
852
+ @property
853
+ def AVAILABLE_MODELS(self):
854
+ return ChatGPTReversed.AVAILABLE_MODELS
855
+
856
+ @property
857
+ def models(self) -> SimpleModelList:
858
+ return SimpleModelList(self.AVAILABLE_MODELS)
859
+
860
+
861
+ if __name__ == "__main__":
862
+ # Example usage
863
+ client = ChatGPT()
864
+ response = client.chat.completions.create(
865
+ model="o4-mini-high", messages=[{"role": "user", "content": "How many r in strawberry"}]
866
+ )
867
+ if isinstance(response, ChatCompletion):
868
+ if response.choices[0].message and response.choices[0].message.content:
869
+ print(response.choices[0].message.content)
870
+ print()