webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (413) hide show
  1. webscout/AIauto.py +524 -251
  2. webscout/AIbase.py +247 -319
  3. webscout/AIutel.py +68 -703
  4. webscout/Bard.py +1072 -1026
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +403 -232
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -684
  37. webscout/Extra/tempmail/README.md +487 -487
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +292 -333
  49. webscout/Provider/AISEARCH/README.md +106 -279
  50. webscout/Provider/AISEARCH/__init__.py +16 -9
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +357 -410
  53. webscout/Provider/AISEARCH/monica_search.py +200 -220
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -342
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +467 -340
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +175 -169
  67. webscout/Provider/GithubChat.py +385 -369
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -801
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -375
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -291
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -384
  77. webscout/Provider/Netwrck.py +273 -270
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -952
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -357
  85. webscout/Provider/OPENAI/__init__.py +148 -40
  86. webscout/Provider/OPENAI/ai4chat.py +348 -293
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
  90. webscout/Provider/OPENAI/base.py +253 -249
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +870 -556
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -173
  94. webscout/Provider/OPENAI/deepinfra.py +403 -322
  95. webscout/Provider/OPENAI/e2b.py +2370 -1414
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +452 -417
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -364
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +333 -308
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -335
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +374 -357
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -287
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +305 -304
  115. webscout/Provider/OPENAI/textpollinations.py +370 -339
  116. webscout/Provider/OPENAI/toolbaz.py +375 -413
  117. webscout/Provider/OPENAI/typefully.py +419 -355
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -318
  120. webscout/Provider/OPENAI/wisecat.py +359 -387
  121. webscout/Provider/OPENAI/writecream.py +185 -163
  122. webscout/Provider/OPENAI/x0gpt.py +462 -365
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -429
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -82
  133. webscout/Provider/TTI/__init__.py +37 -7
  134. webscout/Provider/TTI/base.py +147 -64
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -201
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -221
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -11
  141. webscout/Provider/TTS/README.md +186 -192
  142. webscout/Provider/TTS/__init__.py +43 -10
  143. webscout/Provider/TTS/base.py +523 -159
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -129
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -580
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +331 -308
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -475
  158. webscout/Provider/TypliAI.py +311 -305
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -209
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
  161. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
  162. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
  165. webscout/Provider/UNFINISHED/Youchat.py +347 -330
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
  170. webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
  171. webscout/Provider/WiseCat.py +256 -233
  172. webscout/Provider/WrDoChat.py +390 -370
  173. webscout/Provider/__init__.py +115 -174
  174. webscout/Provider/ai4chat.py +181 -174
  175. webscout/Provider/akashgpt.py +330 -335
  176. webscout/Provider/cerebras.py +397 -290
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -283
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -325
  182. webscout/Provider/llama3mitril.py +230 -215
  183. webscout/Provider/llmchat.py +308 -258
  184. webscout/Provider/llmchatco.py +321 -306
  185. webscout/Provider/meta.py +996 -801
  186. webscout/Provider/oivscode.py +332 -309
  187. webscout/Provider/searchchat.py +316 -292
  188. webscout/Provider/sonus.py +264 -258
  189. webscout/Provider/toolbaz.py +359 -353
  190. webscout/Provider/turboseek.py +332 -266
  191. webscout/Provider/typefully.py +262 -202
  192. webscout/Provider/x0gpt.py +332 -299
  193. webscout/__init__.py +31 -39
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -524
  196. webscout/client.py +1497 -70
  197. webscout/conversation.py +140 -436
  198. webscout/exceptions.py +383 -362
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +74 -420
  204. webscout/prompt_manager.py +376 -288
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -404
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -210
  210. webscout/scout/core/scout.py +800 -607
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -478
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -95
  284. webscout/swiftcli/core/__init__.py +7 -7
  285. webscout/swiftcli/core/cli.py +574 -297
  286. webscout/swiftcli/core/context.py +98 -104
  287. webscout/swiftcli/core/group.py +268 -241
  288. webscout/swiftcli/decorators/__init__.py +28 -28
  289. webscout/swiftcli/decorators/command.py +243 -221
  290. webscout/swiftcli/decorators/options.py +247 -220
  291. webscout/swiftcli/decorators/output.py +392 -252
  292. webscout/swiftcli/exceptions.py +21 -21
  293. webscout/swiftcli/plugins/__init__.py +9 -9
  294. webscout/swiftcli/plugins/base.py +134 -135
  295. webscout/swiftcli/plugins/manager.py +269 -269
  296. webscout/swiftcli/utils/__init__.py +58 -59
  297. webscout/swiftcli/utils/formatting.py +251 -252
  298. webscout/swiftcli/utils/parsing.py +368 -267
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -135
  304. webscout/zeroart/base.py +70 -66
  305. webscout/zeroart/effects.py +155 -101
  306. webscout/zeroart/fonts.py +1799 -1239
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
  311. webscout/DWEBS.py +0 -520
  312. webscout/Extra/Act.md +0 -309
  313. webscout/Extra/GitToolkit/gitapi/README.md +0 -110
  314. webscout/Extra/autocoder/__init__.py +0 -9
  315. webscout/Extra/autocoder/autocoder.py +0 -1105
  316. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  317. webscout/Extra/gguf.md +0 -430
  318. webscout/Extra/weather.md +0 -281
  319. webscout/Litlogger/README.md +0 -10
  320. webscout/Litlogger/__init__.py +0 -15
  321. webscout/Litlogger/formats.py +0 -4
  322. webscout/Litlogger/handlers.py +0 -103
  323. webscout/Litlogger/levels.py +0 -13
  324. webscout/Litlogger/logger.py +0 -92
  325. webscout/Provider/AI21.py +0 -177
  326. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  327. webscout/Provider/AISEARCH/felo_search.py +0 -202
  328. webscout/Provider/AISEARCH/genspark_search.py +0 -324
  329. webscout/Provider/AISEARCH/hika_search.py +0 -186
  330. webscout/Provider/AISEARCH/scira_search.py +0 -298
  331. webscout/Provider/Aitopia.py +0 -316
  332. webscout/Provider/AllenAI.py +0 -440
  333. webscout/Provider/Blackboxai.py +0 -791
  334. webscout/Provider/ChatGPTClone.py +0 -237
  335. webscout/Provider/ChatGPTGratis.py +0 -194
  336. webscout/Provider/Cloudflare.py +0 -324
  337. webscout/Provider/ExaChat.py +0 -358
  338. webscout/Provider/Flowith.py +0 -217
  339. webscout/Provider/FreeGemini.py +0 -250
  340. webscout/Provider/Glider.py +0 -225
  341. webscout/Provider/HF_space/__init__.py +0 -0
  342. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  343. webscout/Provider/HuggingFaceChat.py +0 -469
  344. webscout/Provider/Hunyuan.py +0 -283
  345. webscout/Provider/LambdaChat.py +0 -411
  346. webscout/Provider/Llama3.py +0 -259
  347. webscout/Provider/Nemotron.py +0 -218
  348. webscout/Provider/OLLAMA.py +0 -396
  349. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
  350. webscout/Provider/OPENAI/Cloudflare.py +0 -378
  351. webscout/Provider/OPENAI/FreeGemini.py +0 -283
  352. webscout/Provider/OPENAI/NEMOTRON.py +0 -232
  353. webscout/Provider/OPENAI/Qwen3.py +0 -283
  354. webscout/Provider/OPENAI/api.py +0 -969
  355. webscout/Provider/OPENAI/c4ai.py +0 -373
  356. webscout/Provider/OPENAI/chatgptclone.py +0 -494
  357. webscout/Provider/OPENAI/copilot.py +0 -242
  358. webscout/Provider/OPENAI/flowith.py +0 -162
  359. webscout/Provider/OPENAI/freeaichat.py +0 -359
  360. webscout/Provider/OPENAI/mcpcore.py +0 -389
  361. webscout/Provider/OPENAI/multichat.py +0 -376
  362. webscout/Provider/OPENAI/opkfc.py +0 -496
  363. webscout/Provider/OPENAI/scirachat.py +0 -477
  364. webscout/Provider/OPENAI/standardinput.py +0 -433
  365. webscout/Provider/OPENAI/typegpt.py +0 -364
  366. webscout/Provider/OPENAI/uncovrAI.py +0 -463
  367. webscout/Provider/OPENAI/venice.py +0 -431
  368. webscout/Provider/OPENAI/yep.py +0 -382
  369. webscout/Provider/OpenGPT.py +0 -209
  370. webscout/Provider/Perplexitylabs.py +0 -415
  371. webscout/Provider/Reka.py +0 -214
  372. webscout/Provider/StandardInput.py +0 -290
  373. webscout/Provider/TTI/aiarta.py +0 -365
  374. webscout/Provider/TTI/artbit.py +0 -0
  375. webscout/Provider/TTI/fastflux.py +0 -200
  376. webscout/Provider/TTI/piclumen.py +0 -203
  377. webscout/Provider/TTI/pixelmuse.py +0 -225
  378. webscout/Provider/TTS/gesserit.py +0 -128
  379. webscout/Provider/TTS/sthir.py +0 -94
  380. webscout/Provider/TeachAnything.py +0 -229
  381. webscout/Provider/UNFINISHED/puterjs.py +0 -635
  382. webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
  383. webscout/Provider/Venice.py +0 -258
  384. webscout/Provider/VercelAI.py +0 -253
  385. webscout/Provider/Writecream.py +0 -246
  386. webscout/Provider/WritingMate.py +0 -269
  387. webscout/Provider/asksteve.py +0 -220
  388. webscout/Provider/chatglm.py +0 -215
  389. webscout/Provider/copilot.py +0 -425
  390. webscout/Provider/freeaichat.py +0 -285
  391. webscout/Provider/granite.py +0 -235
  392. webscout/Provider/hermes.py +0 -266
  393. webscout/Provider/koala.py +0 -170
  394. webscout/Provider/lmarena.py +0 -198
  395. webscout/Provider/multichat.py +0 -364
  396. webscout/Provider/scira_chat.py +0 -299
  397. webscout/Provider/scnet.py +0 -243
  398. webscout/Provider/talkai.py +0 -194
  399. webscout/Provider/typegpt.py +0 -289
  400. webscout/Provider/uncovr.py +0 -368
  401. webscout/Provider/yep.py +0 -389
  402. webscout/litagent/Readme.md +0 -276
  403. webscout/litprinter/__init__.py +0 -59
  404. webscout/swiftcli/Readme.md +0 -323
  405. webscout/tempid.py +0 -128
  406. webscout/webscout_search.py +0 -1184
  407. webscout/webscout_search_async.py +0 -654
  408. webscout/yep_search.py +0 -347
  409. webscout/zeroart/README.md +0 -89
  410. webscout-8.2.9.dist-info/METADATA +0 -1033
  411. webscout-8.2.9.dist-info/RECORD +0 -289
  412. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
  413. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
@@ -1,556 +1,870 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- import random
6
- import base64
7
- import hashlib
8
- from datetime import datetime, timedelta
9
- from typing import List, Dict, Optional, Union, Generator, Any
10
-
11
- # Import base classes and utility structures
12
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
- from .utils import (
14
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
15
- ChatCompletionMessage, CompletionUsage, count_tokens
16
- )
17
-
18
- # ANSI escape codes for formatting
19
- BOLD = "\033[1m"
20
- RED = "\033[91m"
21
- RESET = "\033[0m"
22
-
23
- class ChatGPTReversed:
24
- csrf_token = None
25
- initialized = False
26
- AVAILABLE_MODELS = ["auto", "gpt-4o-mini", "gpt-4o", "o4-mini"]
27
-
28
- def __init__(self, model="auto"):
29
- if ChatGPTReversed.initialized:
30
- raise Exception("ChatGPTReversed has already been initialized.")
31
-
32
- if model not in self.AVAILABLE_MODELS:
33
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
34
-
35
- self.model = model
36
- self.initialize()
37
-
38
- def initialize(self):
39
- ChatGPTReversed.initialized = True
40
-
41
- def random_ip(self):
42
- """Generate a random IP address."""
43
- return ".".join(str(random.randint(0, 255)) for _ in range(4))
44
-
45
- def random_uuid(self):
46
- """Generate a random UUID."""
47
- return str(uuid.uuid4())
48
-
49
- def random_float(self, min_val, max_val):
50
- """Generate a random float between min and max."""
51
- return round(random.uniform(min_val, max_val), 4)
52
-
53
- def simulate_bypass_headers(self, accept, spoof_address=False, pre_oai_uuid=None):
54
- """Simulate browser headers to bypass detection."""
55
- simulated = {
56
- "agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
57
- "platform": "Windows",
58
- "mobile": "?0",
59
- "ua": 'Not A(Brand";v="8", "Chromium";v="132", "Google Chrome";v="132'
60
- }
61
-
62
- ip = self.random_ip()
63
- uuid_val = pre_oai_uuid or self.random_uuid()
64
-
65
- headers = {
66
- "accept": accept,
67
- "Content-Type": "application/json",
68
- "cache-control": "no-cache",
69
- "Referer": "https://chatgpt.com/",
70
- "Referrer-Policy": "strict-origin-when-cross-origin",
71
- "oai-device-id": uuid_val,
72
- "User-Agent": simulated["agent"],
73
- "pragma": "no-cache",
74
- "priority": "u=1, i",
75
- "sec-ch-ua": f"{simulated['ua']}",
76
- "sec-ch-ua-mobile": simulated["mobile"],
77
- "sec-ch-ua-platform": f"{simulated['platform']}",
78
- "sec-fetch-site": "same-origin",
79
- "sec-fetch-mode": "cors",
80
- }
81
-
82
- if spoof_address:
83
- headers.update({
84
- "X-Forwarded-For": ip,
85
- "X-Originating-IP": ip,
86
- "X-Remote-IP": ip,
87
- "X-Remote-Addr": ip,
88
- "X-Host": ip,
89
- "X-Forwarded-Host": ip
90
- })
91
-
92
- return headers
93
-
94
- def solve_sentinel_challenge(self, seed, difficulty):
95
- """Solve the sentinel challenge for authentication."""
96
- cores = [8, 12, 16, 24]
97
- screens = [3000, 4000, 6000]
98
- core = random.choice(cores)
99
- screen = random.choice(screens)
100
-
101
- # Adjust time to match expected timezone
102
- now = datetime.now() - timedelta(hours=8)
103
- parse_time = now.strftime("%a, %d %b %Y %H:%M:%S GMT+0100 (Central European Time)")
104
-
105
- config = [core + screen, parse_time, 4294705152, 0,
106
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36"]
107
-
108
- diff_len = len(difficulty) // 2
109
-
110
- for i in range(100000):
111
- config[3] = i
112
- json_data = json.dumps(config)
113
- base = base64.b64encode(json_data.encode()).decode()
114
- hash_value = hashlib.sha3_512((seed + base).encode()).hexdigest()
115
-
116
- if hash_value[:diff_len] <= difficulty:
117
- result = "gAAAAAB" + base
118
- return result
119
-
120
- # Fallback
121
- fallback_base = base64.b64encode(seed.encode()).decode()
122
- return "gAAAAABwQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + fallback_base
123
-
124
- def generate_fake_sentinel_token(self):
125
- """Generate a fake sentinel token for initial authentication."""
126
- prefix = "gAAAAAC"
127
- config = [
128
- random.randint(3000, 6000),
129
- datetime.now().strftime("%a, %d %b %Y %H:%M:%S GMT+0100 (Central European Time)"),
130
- 4294705152, 0,
131
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
132
- "de", "de", 401, "mediaSession", "location", "scrollX",
133
- self.random_float(1000, 5000),
134
- str(uuid.uuid4()), "", 12, int(time.time() * 1000)
135
- ]
136
-
137
- base64_str = base64.b64encode(json.dumps(config).encode()).decode()
138
- return prefix + base64_str
139
-
140
- def parse_response(self, input_text):
141
- """Parse the response from ChatGPT.
142
-
143
- Args:
144
- input_text (str): The response text from ChatGPT.
145
-
146
- Returns:
147
- The complete response as a string.
148
- """
149
- parts = [part.strip() for part in input_text.split("\n") if part.strip()]
150
-
151
- for part in parts:
152
- try:
153
- if part.startswith("data: "):
154
- json_data = json.loads(part[6:])
155
- if (json_data.get("message") and
156
- json_data["message"].get("status") == "finished_successfully" and
157
- json_data["message"].get("metadata", {}).get("is_complete")):
158
- return json_data["message"]["content"]["parts"][0]
159
- except:
160
- pass
161
-
162
- return input_text # Return raw text if parsing fails or no complete message found
163
-
164
- def rotate_session_data(self):
165
- """Rotate session data to maintain fresh authentication."""
166
- uuid_val = self.random_uuid()
167
- csrf_token = self.get_csrf_token(uuid_val)
168
- sentinel_token = self.get_sentinel_token(uuid_val, csrf_token)
169
-
170
- ChatGPTReversed.csrf_token = csrf_token
171
-
172
- return {
173
- "uuid": uuid_val,
174
- "csrf": csrf_token,
175
- "sentinel": sentinel_token
176
- }
177
-
178
- def get_csrf_token(self, uuid_val):
179
- """Get CSRF token for authentication."""
180
- if ChatGPTReversed.csrf_token is not None:
181
- return ChatGPTReversed.csrf_token
182
-
183
- headers = self.simulate_bypass_headers(
184
- accept="application/json",
185
- spoof_address=True,
186
- pre_oai_uuid=uuid_val
187
- )
188
-
189
- response = requests.get(
190
- "https://chatgpt.com/api/auth/csrf",
191
- headers=headers
192
- )
193
-
194
- data = response.json()
195
- if "csrfToken" not in data:
196
- raise Exception("Failed to fetch required CSRF token")
197
-
198
- return data["csrfToken"]
199
-
200
- def get_sentinel_token(self, uuid_val, csrf):
201
- """Get sentinel token for authentication."""
202
- headers = self.simulate_bypass_headers(
203
- accept="application/json",
204
- spoof_address=True,
205
- pre_oai_uuid=uuid_val
206
- )
207
-
208
- test = self.generate_fake_sentinel_token()
209
-
210
- response = requests.post(
211
- "https://chatgpt.com/backend-anon/sentinel/chat-requirements",
212
- json={"p": test},
213
- headers={
214
- **headers,
215
- "Cookie": f"__Host-next-auth.csrf-token={csrf}; oai-did={uuid_val}; oai-nav-state=1;"
216
- }
217
- )
218
-
219
- data = response.json()
220
- if "token" not in data or "proofofwork" not in data:
221
- raise Exception("Failed to fetch required sentinel token")
222
-
223
- oai_sc = None
224
- for cookie in response.cookies:
225
- if cookie.name == "oai-sc":
226
- oai_sc = cookie.value
227
- break
228
-
229
- if not oai_sc:
230
- raise Exception("Failed to fetch required oai-sc token")
231
-
232
- challenge_token = self.solve_sentinel_challenge(
233
- data["proofofwork"]["seed"],
234
- data["proofofwork"]["difficulty"]
235
- )
236
-
237
- return {
238
- "token": data["token"],
239
- "proof": challenge_token,
240
- "oaiSc": oai_sc
241
- }
242
-
243
- def complete(self, message, model=None):
244
- """Complete a message using ChatGPT.
245
-
246
- Args:
247
- message (str): The message to send to ChatGPT.
248
- model (str, optional): The model to use. If None, uses the model specified during initialization.
249
- Defaults to None.
250
-
251
- Returns:
252
- The complete response as a string.
253
- """
254
- if not ChatGPTReversed.initialized:
255
- raise Exception("ChatGPTReversed has not been initialized. Please initialize the instance before calling this method.")
256
-
257
- # Use the provided model or fall back to the instance model
258
- selected_model = model if model else self.model
259
-
260
- # Validate the model
261
- if selected_model not in self.AVAILABLE_MODELS:
262
- raise ValueError(f"Invalid model: {selected_model}. Choose from: {self.AVAILABLE_MODELS}")
263
-
264
- session_data = self.rotate_session_data()
265
-
266
- headers = self.simulate_bypass_headers(
267
- accept="plain/text", # Changed accept header as we expect full response now
268
- spoof_address=True,
269
- pre_oai_uuid=session_data["uuid"]
270
- )
271
-
272
- headers.update({
273
- "Cookie": f"__Host-next-auth.csrf-token={session_data['csrf']}; oai-did={session_data['uuid']}; oai-nav-state=1; oai-sc={session_data['sentinel']['oaiSc']};",
274
- "openai-sentinel-chat-requirements-token": session_data["sentinel"]["token"],
275
- "openai-sentinel-proof-token": session_data["sentinel"]["proof"]
276
- })
277
-
278
- payload = {
279
- "action": "next",
280
- "messages": [{
281
- "id": self.random_uuid(),
282
- "author": {
283
- "role": "user"
284
- },
285
- "content": {
286
- "content_type": "text",
287
- "parts": [message]
288
- },
289
- "metadata": {}
290
- }],
291
- "parent_message_id": self.random_uuid(),
292
- "model": selected_model, # Use the selected model
293
- "timezone_offset_min": -120,
294
- "suggestions": [],
295
- "history_and_training_disabled": False,
296
- "conversation_mode": {
297
- "kind": "primary_assistant",
298
- "plugin_ids": None # Ensure web search is not used
299
- },
300
- "force_paragen": False,
301
- "force_paragen_model_slug": "",
302
- "force_nulligen": False,
303
- "force_rate_limit": False,
304
- "reset_rate_limits": False,
305
- "websocket_request_id": self.random_uuid(),
306
- "force_use_sse": True # Keep SSE for receiving the full response
307
- }
308
-
309
- response = requests.post(
310
- "https://chatgpt.com/backend-anon/conversation",
311
- json=payload,
312
- headers=headers
313
- )
314
-
315
- if response.status_code != 200:
316
- raise Exception(f"HTTP error! status: {response.status_code}")
317
-
318
- return self.parse_response(response.text)
319
-
320
-
321
- class Completions(BaseCompletions):
322
- def __init__(self, client: 'ChatGPT'):
323
- self._client = client
324
- self._chatgpt_reversed = None
325
-
326
- def create(
327
- self,
328
- *,
329
- model: str,
330
- messages: List[Dict[str, str]],
331
- max_tokens: Optional[int] = None,
332
- stream: bool = False,
333
- temperature: Optional[float] = None,
334
- top_p: Optional[float] = None,
335
- **kwargs: Any
336
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
337
- """
338
- Create a chat completion with ChatGPT API.
339
-
340
- Args:
341
- model: The model to use (from AVAILABLE_MODELS)
342
- messages: List of message dictionaries with 'role' and 'content'
343
- max_tokens: Maximum number of tokens to generate
344
- stream: Whether to stream the response
345
- temperature: Sampling temperature (0-1)
346
- top_p: Nucleus sampling parameter (0-1)
347
- **kwargs: Additional parameters to pass to the API
348
-
349
- Returns:
350
- If stream=False, returns a ChatCompletion object
351
- If stream=True, returns a Generator yielding ChatCompletionChunk objects
352
- """
353
- # Initialize ChatGPTReversed if not already initialized
354
- if self._chatgpt_reversed is None:
355
- self._chatgpt_reversed = ChatGPTReversed(model=model)
356
-
357
- # Use streaming implementation if requested
358
- if stream:
359
- return self._create_streaming(
360
- model=model,
361
- messages=messages,
362
- max_tokens=max_tokens,
363
- temperature=temperature,
364
- top_p=top_p,
365
- **kwargs
366
- )
367
-
368
- # Otherwise use non-streaming implementation
369
- return self._create_non_streaming(
370
- model=model,
371
- messages=messages,
372
- max_tokens=max_tokens,
373
- temperature=temperature,
374
- top_p=top_p,
375
- **kwargs
376
- )
377
-
378
- def _create_streaming(
379
- self,
380
- *,
381
- model: str,
382
- messages: List[Dict[str, str]],
383
- max_tokens: Optional[int] = None,
384
- temperature: Optional[float] = None,
385
- top_p: Optional[float] = None,
386
- **kwargs: Any
387
- ) -> Generator[ChatCompletionChunk, None, None]:
388
- """Implementation for streaming chat completions."""
389
- try:
390
- # Generate request ID and timestamp
391
- request_id = str(uuid.uuid4())
392
- created_time = int(time.time())
393
-
394
- # Get the last user message
395
- last_user_message = None
396
- for msg in reversed(messages):
397
- if msg["role"] == "user":
398
- last_user_message = msg["content"]
399
- break
400
-
401
- if not last_user_message:
402
- raise ValueError("No user message found in the conversation")
403
-
404
- # Get the response from ChatGPT
405
- response = self._chatgpt_reversed.complete(last_user_message, model=model)
406
-
407
- # Split the response into chunks for streaming simulation
408
- chunk_size = 10 # Characters per chunk
409
- for i in range(0, len(response), chunk_size):
410
- chunk_text = response[i:i+chunk_size]
411
-
412
- # Create and yield a chunk
413
- delta = ChoiceDelta(content=chunk_text)
414
- choice = Choice(index=0, delta=delta, finish_reason=None)
415
- chunk = ChatCompletionChunk(
416
- id=request_id,
417
- choices=[choice],
418
- created=created_time,
419
- model=model
420
- )
421
-
422
- yield chunk
423
-
424
- # Add a small delay to simulate streaming
425
- time.sleep(0.05)
426
-
427
- # Final chunk with finish_reason
428
- delta = ChoiceDelta(content=None)
429
- choice = Choice(index=0, delta=delta, finish_reason="stop")
430
- chunk = ChatCompletionChunk(
431
- id=request_id,
432
- choices=[choice],
433
- created=created_time,
434
- model=model
435
- )
436
-
437
- yield chunk
438
-
439
- except Exception as e:
440
- print(f"{RED}Error during ChatGPT streaming request: {e}{RESET}")
441
- raise IOError(f"ChatGPT streaming request failed: {e}") from e
442
-
443
- def _create_non_streaming(
444
- self,
445
- *,
446
- model: str,
447
- messages: List[Dict[str, str]],
448
- max_tokens: Optional[int] = None,
449
- temperature: Optional[float] = None,
450
- top_p: Optional[float] = None,
451
- **kwargs: Any
452
- ) -> ChatCompletion:
453
- """Implementation for non-streaming chat completions."""
454
- try:
455
- # Generate request ID and timestamp
456
- request_id = str(uuid.uuid4())
457
- created_time = int(time.time())
458
-
459
- # Get the last user message
460
- last_user_message = None
461
- for msg in reversed(messages):
462
- if msg["role"] == "user":
463
- last_user_message = msg["content"]
464
- break
465
-
466
- if not last_user_message:
467
- raise ValueError("No user message found in the conversation")
468
-
469
- # Get the response from ChatGPT
470
- full_content = self._chatgpt_reversed.complete(last_user_message, model=model)
471
-
472
- # Create the completion message
473
- message = ChatCompletionMessage(
474
- role="assistant",
475
- content=full_content
476
- )
477
-
478
- # Create the choice
479
- choice = Choice(
480
- index=0,
481
- message=message,
482
- finish_reason="stop"
483
- )
484
-
485
- # Estimate token usage using count_tokens
486
- prompt_tokens = count_tokens([msg.get("content", "") for msg in messages])
487
- completion_tokens = count_tokens(full_content)
488
- usage = CompletionUsage(
489
- prompt_tokens=prompt_tokens,
490
- completion_tokens=completion_tokens,
491
- total_tokens=prompt_tokens + completion_tokens
492
- )
493
-
494
- # Create the completion object
495
- completion = ChatCompletion(
496
- id=request_id,
497
- choices=[choice],
498
- created=created_time,
499
- model=model,
500
- usage=usage,
501
- )
502
-
503
- return completion
504
-
505
- except Exception as e:
506
- print(f"{RED}Error during ChatGPT non-stream request: {e}{RESET}")
507
- raise IOError(f"ChatGPT request failed: {e}") from e
508
-
509
- class Chat(BaseChat):
510
- def __init__(self, client: 'ChatGPT'):
511
- self.completions = Completions(client)
512
-
513
- class ChatGPT(OpenAICompatibleProvider):
514
- """
515
- OpenAI-compatible client for ChatGPT API.
516
-
517
- Usage:
518
- client = ChatGPT()
519
- response = client.chat.completions.create(
520
- model="auto",
521
- messages=[{"role": "user", "content": "Hello!"}]
522
- )
523
- print(response.choices[0].message.content)
524
- """
525
-
526
- AVAILABLE_MODELS = [
527
- "auto",
528
- "gpt-4o-mini",
529
- "gpt-4o",
530
- "o4-mini"
531
- ]
532
-
533
- def __init__(
534
- self,
535
- timeout: int = 60,
536
- proxies: dict = {}
537
- ):
538
- """
539
- Initialize the ChatGPT client.
540
-
541
- Args:
542
- timeout: Request timeout in seconds
543
- proxies: Optional proxy configuration
544
- """
545
- self.timeout = timeout
546
- self.proxies = proxies
547
-
548
- # Initialize chat interface
549
- self.chat = Chat(self)
550
-
551
- @property
552
- def models(self):
553
- class _ModelList:
554
- def list(inner_self):
555
- return type(self).AVAILABLE_MODELS
556
- return _ModelList()
1
+ import base64
2
+ import hashlib
3
+ import json
4
+ import random
5
+ import time
6
+ import uuid
7
+ from datetime import datetime, timezone
8
+ from typing import Any, Dict, Generator, List, Optional, Union, cast
9
+
10
+ import requests
11
+
12
+ # Import base classes and utility structures
13
+ from webscout.Provider.OPENAI.base import (
14
+ BaseChat,
15
+ BaseCompletions,
16
+ OpenAICompatibleProvider,
17
+ SimpleModelList,
18
+ Tool,
19
+ )
20
+ from webscout.Provider.OPENAI.utils import (
21
+ ChatCompletion,
22
+ ChatCompletionChunk,
23
+ ChatCompletionMessage,
24
+ Choice,
25
+ ChoiceDelta,
26
+ CompletionUsage,
27
+ count_tokens,
28
+ )
29
+
30
+ # ANSI escape codes for formatting
31
+ BOLD = "\033[1m"
32
+ RED = "\033[91m"
33
+ RESET = "\033[0m"
34
+
35
+
36
+ class ChatGPTReversed:
37
+ AVAILABLE_MODELS = [
38
+ "auto",
39
+ "gpt-5-1",
40
+ "gpt-5-1-instant",
41
+ "gpt-5-1-thinking",
42
+ "gpt-5",
43
+ "gpt-5-instant",
44
+ "gpt-5-thinking",
45
+ "gpt-4",
46
+ "gpt-4.1",
47
+ "gpt-4-1",
48
+ "gpt-4.1-mini",
49
+ "gpt-4-1-mini",
50
+ "gpt-4.5",
51
+ "gpt-4-5",
52
+ "gpt-4o",
53
+ "gpt-4o-mini",
54
+ "o1",
55
+ "o1-mini",
56
+ "o3-mini",
57
+ "o3-mini-high",
58
+ "o4-mini",
59
+ "o4-mini-high",
60
+ ]
61
+ csrf_token = None
62
+ initialized = False
63
+
64
+ _instance = None
65
+
66
+ def __new__(cls, model="auto"):
67
+ if cls._instance is None:
68
+ cls._instance = super(ChatGPTReversed, cls).__new__(cls)
69
+ cls._instance.initialized = False
70
+ return cls._instance
71
+
72
+ def __init__(self, model="auto"):
73
+ if self.initialized:
74
+ # Already initialized, just update model if needed
75
+ if model not in self.AVAILABLE_MODELS:
76
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
77
+ self.model = model
78
+ return
79
+
80
+ if model not in self.AVAILABLE_MODELS:
81
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
82
+
83
+ self.model = model
84
+ self.initialize()
85
+
86
+ def initialize(self):
87
+ ChatGPTReversed.initialized = True
88
+
89
+ def random_ip(self):
90
+ """Generate a random IP address."""
91
+ return ".".join(str(random.randint(0, 255)) for _ in range(4))
92
+
93
+ def random_uuid(self):
94
+ """Generate a random UUID."""
95
+ return str(uuid.uuid4())
96
+
97
+ def random_float(self, min_val, max_val):
98
+ """Generate a random float between min and max."""
99
+ return round(random.uniform(min_val, max_val), 4)
100
+
101
+ def simulate_bypass_headers(self, accept, spoof_address=False, pre_oai_uuid=None):
102
+ """Simulate browser headers to bypass detection."""
103
+ simulated = {
104
+ "agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
105
+ "platform": "Windows",
106
+ "mobile": "?0",
107
+ "ua": 'Not A(Brand";v="8", "Chromium";v="132", "Google Chrome";v="132',
108
+ }
109
+
110
+ ip = self.random_ip()
111
+ uuid_val = pre_oai_uuid or self.random_uuid()
112
+
113
+ headers = {
114
+ "accept": accept,
115
+ "Content-Type": "application/json",
116
+ "cache-control": "no-cache",
117
+ "Referer": "https://chatgpt.com/",
118
+ "Referrer-Policy": "strict-origin-when-cross-origin",
119
+ "oai-device-id": uuid_val,
120
+ "User-Agent": simulated["agent"],
121
+ "pragma": "no-cache",
122
+ "priority": "u=1, i",
123
+ "sec-ch-ua": f"{simulated['ua']}",
124
+ "sec-ch-ua-mobile": simulated["mobile"],
125
+ "sec-ch-ua-platform": f"{simulated['platform']}",
126
+ "sec-fetch-site": "same-origin",
127
+ "sec-fetch-mode": "cors",
128
+ }
129
+
130
+ if spoof_address:
131
+ headers.update(
132
+ {
133
+ "X-Forwarded-For": ip,
134
+ "X-Originating-IP": ip,
135
+ "X-Remote-IP": ip,
136
+ "X-Remote-Addr": ip,
137
+ "X-Host": ip,
138
+ "X-Forwarded-Host": ip,
139
+ }
140
+ )
141
+
142
+ return headers
143
+
144
+ def generate_proof_token(self, seed: str, difficulty: str, user_agent: Optional[str] = None):
145
+ """
146
+ Improved proof-of-work implementation based on gpt4free/g4f/Provider/openai/proofofwork.py
147
+
148
+ Args:
149
+ seed: The seed string for the challenge
150
+ difficulty: The difficulty hex string
151
+ user_agent: Optional user agent string
152
+
153
+ Returns:
154
+ The proof token starting with 'gAAAAAB'
155
+ """
156
+ if user_agent is None:
157
+ user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36"
158
+
159
+ screen = random.choice([3008, 4010, 6000]) * random.choice([1, 2, 4])
160
+
161
+ # Get current UTC time
162
+ now_utc = datetime.now(timezone.utc)
163
+ parse_time = now_utc.strftime("%a, %d %b %Y %H:%M:%S GMT")
164
+
165
+ proof_token = [
166
+ screen,
167
+ parse_time,
168
+ None,
169
+ 0,
170
+ user_agent,
171
+ "https://tcr9i.chat.openai.com/v2/35536E1E-65B4-4D96-9D97-6ADB7EFF8147/api.js",
172
+ "dpl=1440a687921de39ff5ee56b92807faaadce73f13",
173
+ "en",
174
+ "en-US",
175
+ None,
176
+ "plugins−[object PluginArray]",
177
+ random.choice(
178
+ [
179
+ "_reactListeningcfilawjnerp",
180
+ "_reactListening9ne2dfo1i47",
181
+ "_reactListening410nzwhan2a",
182
+ ]
183
+ ),
184
+ random.choice(["alert", "ontransitionend", "onprogress"]),
185
+ ]
186
+
187
+ diff_len = len(difficulty)
188
+ for i in range(100000):
189
+ proof_token[3] = i
190
+ json_data = json.dumps(proof_token)
191
+ base = base64.b64encode(json_data.encode()).decode()
192
+ hash_value = hashlib.sha3_512((seed + base).encode()).digest()
193
+
194
+ if hash_value.hex()[:diff_len] <= difficulty:
195
+ return "gAAAAAB" + base
196
+
197
+ # Fallback
198
+ fallback_base = base64.b64encode(f'"{seed}"'.encode()).decode()
199
+ return "gAAAAABwQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + fallback_base
200
+
201
+ def solve_sentinel_challenge(self, seed, difficulty):
202
+ """Solve the sentinel challenge for authentication using improved algorithm."""
203
+ return self.generate_proof_token(seed, difficulty)
204
+
205
+ def generate_fake_sentinel_token(self):
206
+ """Generate a fake sentinel token for initial authentication."""
207
+ prefix = "gAAAAAC"
208
+
209
+ # More realistic screen sizes
210
+ screen = random.choice([3008, 4010, 6000]) * random.choice([1, 2, 4])
211
+
212
+ # Get current UTC time
213
+ now_utc = datetime.now(timezone.utc)
214
+ parse_time = now_utc.strftime("%a, %d %b %Y %H:%M:%S GMT")
215
+
216
+ config = [
217
+ screen,
218
+ parse_time,
219
+ 4294705152,
220
+ 0,
221
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
222
+ "de",
223
+ "de",
224
+ 401,
225
+ "mediaSession",
226
+ "location",
227
+ "scrollX",
228
+ self.random_float(1000, 5000),
229
+ str(uuid.uuid4()),
230
+ "",
231
+ 12,
232
+ int(time.time() * 1000),
233
+ ]
234
+
235
+ base64_str = base64.b64encode(json.dumps(config).encode()).decode()
236
+ return prefix + base64_str
237
+
238
+ def parse_response(self, input_text):
239
+ """Parse the response from ChatGPT.
240
+
241
+ Args:
242
+ input_text (str): The response text from ChatGPT.
243
+
244
+ Returns:
245
+ The complete response as a string.
246
+ """
247
+ parts = [part.strip() for part in input_text.split("\n") if part.strip()]
248
+
249
+ for part in parts:
250
+ try:
251
+ if part.startswith("data: "):
252
+ json_data = json.loads(part[6:])
253
+ if (
254
+ json_data.get("message")
255
+ and json_data["message"].get("status") == "finished_successfully"
256
+ and json_data["message"].get("metadata", {}).get("is_complete")
257
+ ):
258
+ return json_data["message"]["content"]["parts"][0]
259
+ except Exception:
260
+ pass
261
+
262
+ return input_text # Return raw text if parsing fails or no complete message found
263
+
264
+ def rotate_session_data(self):
265
+ """Rotate session data to maintain fresh authentication."""
266
+ uuid_val = self.random_uuid()
267
+ csrf_token = self.get_csrf_token(uuid_val)
268
+ sentinel_token = self.get_sentinel_token(uuid_val, csrf_token)
269
+
270
+ ChatGPTReversed.csrf_token = csrf_token
271
+
272
+ return {"uuid": uuid_val, "csrf": csrf_token, "sentinel": sentinel_token}
273
+
274
+ def get_csrf_token(self, uuid_val):
275
+ """Get CSRF token for authentication."""
276
+ if ChatGPTReversed.csrf_token is not None:
277
+ return ChatGPTReversed.csrf_token
278
+
279
+ headers = self.simulate_bypass_headers(
280
+ accept="application/json", spoof_address=True, pre_oai_uuid=uuid_val
281
+ )
282
+
283
+ response = requests.get("https://chatgpt.com/api/auth/csrf", headers=headers)
284
+
285
+ data = response.json()
286
+ if "csrfToken" not in data:
287
+ raise Exception("Failed to fetch required CSRF token")
288
+
289
+ return data["csrfToken"]
290
+
291
+ def get_sentinel_token(self, uuid_val, csrf):
292
+ """Get sentinel token for authentication."""
293
+ headers = self.simulate_bypass_headers(
294
+ accept="application/json", spoof_address=True, pre_oai_uuid=uuid_val
295
+ )
296
+
297
+ test = self.generate_fake_sentinel_token()
298
+
299
+ response = requests.post(
300
+ "https://chatgpt.com/backend-anon/sentinel/chat-requirements",
301
+ json={"p": test},
302
+ headers={
303
+ **headers,
304
+ "Cookie": f"__Host-next-auth.csrf-token={csrf}; oai-did={uuid_val}; oai-nav-state=1;",
305
+ },
306
+ )
307
+
308
+ data = response.json()
309
+ if "token" not in data or "proofofwork" not in data:
310
+ raise Exception("Failed to fetch required sentinel token")
311
+
312
+ oai_sc = None
313
+ for cookie in response.cookies:
314
+ if cookie.name == "oai-sc":
315
+ oai_sc = cookie.value
316
+ break
317
+
318
+ if not oai_sc:
319
+ raise Exception("Failed to fetch required oai-sc token")
320
+
321
+ challenge_token = self.solve_sentinel_challenge(
322
+ data["proofofwork"]["seed"], data["proofofwork"]["difficulty"]
323
+ )
324
+
325
+ return {"token": data["token"], "proof": challenge_token, "oaiSc": oai_sc}
326
+
327
+ def complete(self, message, model=None):
328
+ """Complete a message using ChatGPT.
329
+
330
+ Args:
331
+ message (str): The message to send to ChatGPT.
332
+ model (str, optional): The model to use. If None, uses the model specified during initialization.
333
+ Defaults to None.
334
+
335
+ Returns:
336
+ The complete response as a string.
337
+ """
338
+ if not ChatGPTReversed.initialized:
339
+ raise Exception(
340
+ "ChatGPTReversed has not been initialized. Please initialize the instance before calling this method."
341
+ )
342
+
343
+ # Use the provided model or fall back to the instance model
344
+ selected_model = model if model else self.model
345
+
346
+ # Validate the model
347
+ if selected_model not in self.AVAILABLE_MODELS:
348
+ raise ValueError(
349
+ f"Invalid model: {selected_model}. Choose from: {self.AVAILABLE_MODELS}"
350
+ )
351
+
352
+ session_data = self.rotate_session_data()
353
+
354
+ headers = self.simulate_bypass_headers(
355
+ accept="plain/text", # Changed accept header as we expect full response now
356
+ spoof_address=True,
357
+ pre_oai_uuid=session_data["uuid"],
358
+ )
359
+
360
+ headers.update(
361
+ {
362
+ "Cookie": f"__Host-next-auth.csrf-token={session_data['csrf']}; oai-did={session_data['uuid']}; oai-nav-state=1; oai-sc={session_data['sentinel']['oaiSc']};",
363
+ "openai-sentinel-chat-requirements-token": session_data["sentinel"]["token"],
364
+ "openai-sentinel-proof-token": session_data["sentinel"]["proof"],
365
+ }
366
+ )
367
+
368
+ payload = {
369
+ "action": "next",
370
+ "messages": [
371
+ {
372
+ "id": self.random_uuid(),
373
+ "author": {"role": "user"},
374
+ "content": {"content_type": "text", "parts": [message]},
375
+ "metadata": {},
376
+ }
377
+ ],
378
+ "parent_message_id": self.random_uuid(),
379
+ "model": selected_model, # Use the selected model
380
+ "timezone_offset_min": -120,
381
+ "suggestions": [],
382
+ "history_and_training_disabled": False,
383
+ "conversation_mode": {
384
+ "kind": "primary_assistant",
385
+ "plugin_ids": None, # Ensure web search is not used
386
+ },
387
+ "force_paragen": False,
388
+ "force_paragen_model_slug": "",
389
+ "force_nulligen": False,
390
+ "force_rate_limit": False,
391
+ "reset_rate_limits": False,
392
+ "websocket_request_id": self.random_uuid(),
393
+ "force_use_sse": True, # Keep SSE for receiving the full response
394
+ }
395
+
396
+ response = requests.post(
397
+ "https://chatgpt.com/backend-anon/conversation", json=payload, headers=headers
398
+ )
399
+
400
+ if response.status_code != 200:
401
+ raise Exception(f"HTTP error! status: {response.status_code}")
402
+
403
+ return self.parse_response(response.text)
404
+
405
+
406
+ class Completions(BaseCompletions):
407
+ def __init__(self, client: "ChatGPT"):
408
+ self._client = client
409
+ self._chatgpt_reversed = None
410
+
411
+ def create(
412
+ self,
413
+ *,
414
+ model: str,
415
+ messages: List[Dict[str, str]],
416
+ max_tokens: Optional[int] = None,
417
+ stream: bool = False,
418
+ temperature: Optional[float] = None,
419
+ top_p: Optional[float] = None,
420
+ tools: Optional[List[Union[Tool, Dict[str, Any]]]] = None, # Support for tool definitions
421
+ tool_choice: Optional[
422
+ Union[str, Dict[str, Any]]
423
+ ] = None, # Support for tool_choice parameter
424
+ timeout: Optional[int] = None,
425
+ proxies: Optional[dict] = None,
426
+ **kwargs: Any,
427
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
428
+ """
429
+ Create a chat completion with ChatGPT API.
430
+
431
+ Args:
432
+ model: The model to use (from AVAILABLE_MODELS)
433
+ messages: List of message dictionaries with 'role' and 'content'
434
+ max_tokens: Maximum number of tokens to generate
435
+ stream: Whether to stream the response
436
+ temperature: Sampling temperature (0-1)
437
+ top_p: Nucleus sampling parameter (0-1)
438
+ tools: List of tool definitions available for the model to use
439
+ tool_choice: Control over which tool the model should use
440
+ **kwargs: Additional parameters to pass to the API
441
+
442
+ Returns:
443
+ If stream=False, returns a ChatCompletion object
444
+ If stream=True, returns a Generator yielding ChatCompletionChunk objects
445
+ """
446
+ # Initialize ChatGPTReversed if not already initialized
447
+ if self._chatgpt_reversed is None:
448
+ self._chatgpt_reversed = ChatGPTReversed(model=model)
449
+
450
+ # Use streaming implementation if requested
451
+ if stream:
452
+ return self._create_streaming(
453
+ model=model,
454
+ messages=messages,
455
+ max_tokens=max_tokens,
456
+ temperature=temperature,
457
+ top_p=top_p,
458
+ tools=tools,
459
+ tool_choice=tool_choice,
460
+ timeout=timeout,
461
+ proxies=proxies,
462
+ **kwargs,
463
+ )
464
+
465
+ # Otherwise use non-streaming implementation
466
+ return self._create_non_streaming(
467
+ model=model,
468
+ messages=messages,
469
+ max_tokens=max_tokens,
470
+ temperature=temperature,
471
+ top_p=top_p,
472
+ tools=tools,
473
+ tool_choice=tool_choice,
474
+ timeout=timeout,
475
+ proxies=proxies,
476
+ **kwargs,
477
+ )
478
+
479
+ def _create_streaming(
480
+ self,
481
+ *,
482
+ model: str,
483
+ messages: List[Dict[str, str]],
484
+ max_tokens: Optional[int] = None,
485
+ temperature: Optional[float] = None,
486
+ top_p: Optional[float] = None,
487
+ tools: Optional[List[Union[Tool, Dict[str, Any]]]] = None,
488
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
489
+ timeout: Optional[int] = None,
490
+ proxies: Optional[dict] = None,
491
+ **kwargs: Any,
492
+ ) -> Generator[ChatCompletionChunk, None, None]:
493
+ """Implementation for streaming chat completions."""
494
+ try:
495
+ # Generate request ID and timestamp
496
+ request_id = str(uuid.uuid4())
497
+ created_time = int(time.time())
498
+
499
+ # Get the last user message
500
+ last_user_message = None
501
+ for msg in reversed(messages):
502
+ if msg["role"] == "user":
503
+ last_user_message = msg["content"]
504
+ break
505
+
506
+ if not last_user_message:
507
+ raise ValueError("No user message found in the conversation")
508
+
509
+ # Initialize ChatGPTReversed if not already initialized
510
+ if self._chatgpt_reversed is None:
511
+ self._chatgpt_reversed = ChatGPTReversed(model=model)
512
+
513
+ # Create a proper streaming request to ChatGPT
514
+ session_data = self._chatgpt_reversed.rotate_session_data()
515
+
516
+ headers = self._chatgpt_reversed.simulate_bypass_headers(
517
+ accept="text/event-stream", spoof_address=True, pre_oai_uuid=session_data["uuid"]
518
+ )
519
+
520
+ headers.update(
521
+ {
522
+ "Cookie": f"__Host-next-auth.csrf-token={session_data['csrf']}; oai-did={session_data['uuid']}; oai-nav-state=1; oai-sc={session_data['sentinel']['oaiSc']};",
523
+ "openai-sentinel-chat-requirements-token": session_data["sentinel"]["token"],
524
+ "openai-sentinel-proof-token": session_data["sentinel"]["proof"],
525
+ }
526
+ )
527
+
528
+ # Format messages properly for ChatGPT
529
+ formatted_messages = []
530
+ for i, msg in enumerate(messages):
531
+ formatted_messages.append(
532
+ {
533
+ "id": str(uuid.uuid4()),
534
+ "author": {"role": msg["role"]},
535
+ "content": {"content_type": "text", "parts": [msg["content"]]},
536
+ "metadata": {},
537
+ }
538
+ )
539
+
540
+ payload = {
541
+ "action": "next",
542
+ "messages": formatted_messages,
543
+ "parent_message_id": str(uuid.uuid4()),
544
+ "model": model,
545
+ "timezone_offset_min": -120,
546
+ "suggestions": [],
547
+ "history_and_training_disabled": False,
548
+ "conversation_mode": {"kind": "primary_assistant", "plugin_ids": None},
549
+ "force_paragem": False,
550
+ "force_paragem_model_slug": "",
551
+ "force_nulligen": False,
552
+ "force_rate_limit": False,
553
+ "reset_rate_limits": False,
554
+ "websocket_request_id": str(uuid.uuid4()),
555
+ "force_use_sse": True,
556
+ }
557
+
558
+ # Add optional parameters if provided
559
+ if max_tokens is not None:
560
+ payload["max_tokens"] = max_tokens
561
+ if temperature is not None:
562
+ payload["temperature"] = temperature
563
+ if top_p is not None:
564
+ payload["top_p"] = top_p
565
+
566
+ # Make the actual streaming request
567
+ response = requests.post(
568
+ "https://chatgpt.com/backend-anon/conversation",
569
+ json=payload,
570
+ headers=headers,
571
+ stream=True,
572
+ timeout=timeout or 30,
573
+ )
574
+
575
+ response.raise_for_status()
576
+
577
+ # Track conversation state
578
+ full_content = ""
579
+ prompt_tokens = count_tokens(str(messages))
580
+ completion_tokens = 0
581
+ total_tokens = prompt_tokens
582
+
583
+ # Process the streaming response
584
+ for line in response.iter_lines(decode_unicode=True):
585
+ if line:
586
+ if line.startswith("data: "):
587
+ data_str = line[6:] # Remove "data: " prefix
588
+
589
+ # Handle [DONE] message
590
+ if data_str.strip() == "[DONE]":
591
+ # Final chunk with finish_reason
592
+ delta = ChoiceDelta(content=None)
593
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
594
+ chunk = ChatCompletionChunk(
595
+ id=request_id, choices=[choice], created=created_time, model=model
596
+ )
597
+ chunk.usage = {
598
+ "prompt_tokens": prompt_tokens,
599
+ "completion_tokens": completion_tokens,
600
+ "total_tokens": total_tokens,
601
+ }
602
+ yield chunk
603
+ break
604
+
605
+ try:
606
+ data = json.loads(data_str)
607
+
608
+ # Handle different types of messages
609
+ if data.get("message"):
610
+ message = data["message"]
611
+
612
+ # Handle assistant responses
613
+ if message.get("author", {}).get("role") == "assistant":
614
+ content_parts = message.get("content", {}).get("parts", [])
615
+ if content_parts:
616
+ new_content = content_parts[0]
617
+
618
+ # Get the delta (new content since last chunk)
619
+ delta_content = (
620
+ new_content[len(full_content) :]
621
+ if new_content.startswith(full_content)
622
+ else new_content
623
+ )
624
+ full_content = new_content
625
+ completion_tokens = count_tokens(full_content)
626
+ total_tokens = prompt_tokens + completion_tokens
627
+
628
+ # Only yield chunk if there's new content
629
+ if delta_content:
630
+ delta = ChoiceDelta(
631
+ content=delta_content, role="assistant"
632
+ )
633
+ choice = Choice(
634
+ index=0, delta=delta, finish_reason=None
635
+ )
636
+ chunk = ChatCompletionChunk(
637
+ id=request_id,
638
+ choices=[choice],
639
+ created=created_time,
640
+ model=model,
641
+ )
642
+ chunk.usage = {
643
+ "prompt_tokens": prompt_tokens,
644
+ "completion_tokens": completion_tokens,
645
+ "total_tokens": total_tokens,
646
+ }
647
+ yield chunk
648
+
649
+ # Handle finish status
650
+ if message.get("status") == "finished_successfully":
651
+ pass
652
+
653
+ elif data.get("type") == "message_stream_complete":
654
+ # Stream is complete
655
+ pass
656
+
657
+ except json.JSONDecodeError:
658
+ # Skip invalid JSON lines
659
+ continue
660
+
661
+ except Exception as e:
662
+ raise IOError(f"ChatGPT request failed: {e}") from e
663
+
664
+ def _create_non_streaming(
665
+ self,
666
+ *,
667
+ model: str,
668
+ messages: List[Dict[str, str]],
669
+ max_tokens: Optional[int] = None,
670
+ temperature: Optional[float] = None,
671
+ top_p: Optional[float] = None,
672
+ tools: Optional[List[Union[Tool, Dict[str, Any]]]] = None,
673
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
674
+ timeout: Optional[int] = None,
675
+ proxies: Optional[dict] = None,
676
+ **kwargs: Any,
677
+ ) -> ChatCompletion:
678
+ """Implementation for non-streaming chat completions."""
679
+ try:
680
+ # Generate request ID and timestamp
681
+ request_id = str(uuid.uuid4())
682
+ created_time = int(time.time())
683
+
684
+ # Initialize ChatGPTReversed if not already initialized
685
+ if self._chatgpt_reversed is None:
686
+ self._chatgpt_reversed = ChatGPTReversed(model=model)
687
+
688
+ # Create a proper request to ChatGPT
689
+ session_data = self._chatgpt_reversed.rotate_session_data()
690
+
691
+ headers = self._chatgpt_reversed.simulate_bypass_headers(
692
+ accept="text/event-stream", spoof_address=True, pre_oai_uuid=session_data["uuid"]
693
+ )
694
+
695
+ headers.update(
696
+ {
697
+ "Cookie": f"__Host-next-auth.csrf-token={session_data['csrf']}; oai-did={session_data['uuid']}; oai-nav-state=1; oai-sc={session_data['sentinel']['oaiSc']};",
698
+ "openai-sentinel-chat-requirements-token": session_data["sentinel"]["token"],
699
+ "openai-sentinel-proof-token": session_data["sentinel"]["proof"],
700
+ }
701
+ )
702
+
703
+ # Format messages properly for ChatGPT
704
+ formatted_messages = []
705
+ for i, msg in enumerate(messages):
706
+ formatted_messages.append(
707
+ {
708
+ "id": str(uuid.uuid4()),
709
+ "author": {"role": msg["role"]},
710
+ "content": {"content_type": "text", "parts": [msg["content"]]},
711
+ "metadata": {},
712
+ }
713
+ )
714
+
715
+ payload = {
716
+ "action": "next",
717
+ "messages": formatted_messages,
718
+ "parent_message_id": str(uuid.uuid4()),
719
+ "model": model,
720
+ "timezone_offset_min": -120,
721
+ "suggestions": [],
722
+ "history_and_training_disabled": False,
723
+ "conversation_mode": {"kind": "primary_assistant", "plugin_ids": None},
724
+ "force_paragem": False,
725
+ "force_paragem_model_slug": "",
726
+ "force_nulligen": False,
727
+ "force_rate_limit": False,
728
+ "reset_rate_limits": False,
729
+ "websocket_request_id": str(uuid.uuid4()),
730
+ "force_use_sse": True,
731
+ }
732
+
733
+ # Add optional parameters if provided
734
+ if max_tokens is not None:
735
+ payload["max_tokens"] = max_tokens
736
+ if temperature is not None:
737
+ payload["temperature"] = temperature
738
+ if top_p is not None:
739
+ payload["top_p"] = top_p
740
+
741
+ # Make the request and collect full response
742
+ response = requests.post(
743
+ "https://chatgpt.com/backend-anon/conversation",
744
+ json=payload,
745
+ headers=headers,
746
+ stream=True,
747
+ timeout=timeout or 30,
748
+ )
749
+
750
+ response.raise_for_status()
751
+
752
+ # Collect and parse the full response
753
+ full_response = ""
754
+ for line in response.iter_lines(decode_unicode=True):
755
+ if line:
756
+ if line.startswith("data: "):
757
+ data_str = line[6:] # Remove "data: " prefix
758
+
759
+ # Handle [DONE] message
760
+ if data_str.strip() == "[DONE]":
761
+ break
762
+
763
+ try:
764
+ data = json.loads(data_str)
765
+
766
+ # Handle assistant responses
767
+ if (
768
+ data.get("message")
769
+ and data["message"].get("author", {}).get("role") == "assistant"
770
+ ):
771
+ content_parts = data["message"].get("content", {}).get("parts", [])
772
+ if content_parts:
773
+ full_response = content_parts[0]
774
+
775
+ except json.JSONDecodeError:
776
+ # Skip invalid JSON lines
777
+ continue
778
+
779
+ # Create the completion message
780
+ message = ChatCompletionMessage(role="assistant", content=full_response)
781
+
782
+ # Create the choice
783
+ choice = Choice(index=0, message=message, finish_reason="stop")
784
+
785
+ # Calculate token usage using count_tokens
786
+ # Count tokens in the input messages (prompt)
787
+ prompt_tokens = count_tokens(str(messages))
788
+ # Count tokens in the response (completion)
789
+ completion_tokens = count_tokens(full_response)
790
+ total_tokens = prompt_tokens + completion_tokens
791
+
792
+ usage = CompletionUsage(
793
+ prompt_tokens=prompt_tokens,
794
+ completion_tokens=completion_tokens,
795
+ total_tokens=total_tokens,
796
+ )
797
+
798
+ # Create the completion object with correct OpenAI format
799
+ completion = ChatCompletion(
800
+ id=request_id,
801
+ choices=[choice],
802
+ created=created_time,
803
+ model=model,
804
+ usage=usage,
805
+ )
806
+
807
+ return completion
808
+
809
+ except Exception as e:
810
+ print(f"{RED}Error during ChatGPT non-stream request: {e}{RESET}")
811
+ raise IOError(f"ChatGPT request failed: {e}") from e
812
+
813
+
814
+ class Chat(BaseChat):
815
+ def __init__(self, client: "ChatGPT"):
816
+ self.completions = Completions(client)
817
+
818
+
819
+ class ChatGPT(OpenAICompatibleProvider):
820
+ """
821
+ OpenAI-compatible client for ChatGPT API.
822
+
823
+ Usage:
824
+ client = ChatGPT()
825
+ response = client.chat.completions.create(
826
+ model="auto",
827
+ messages=[{"role": "user", "content": "Hello!"}]
828
+ )
829
+ print(response.choices[0].message.content)
830
+ """
831
+
832
+ required_auth = False
833
+
834
+ def __init__(
835
+ self,
836
+ api_key: Optional[str] = None,
837
+ tools: Optional[List[Tool]] = None,
838
+ proxies: Optional[Dict[str, str]] = None,
839
+ ):
840
+ """
841
+ Initialize the ChatGPT client.
842
+
843
+ Args:
844
+ api_key: Optional API key (not used by ChatGPTReversed but included for interface compatibility)
845
+ tools: Optional list of tools to register with the provider
846
+ proxies: Optional proxy configuration dict, e.g. {"http": "http://proxy:8080", "https": "https://proxy:8080"}
847
+ """
848
+ super().__init__(api_key=api_key, tools=tools, proxies=proxies)
849
+ # Initialize chat interface
850
+ self.chat = Chat(self)
851
+
852
+ @property
853
+ def AVAILABLE_MODELS(self):
854
+ return ChatGPTReversed.AVAILABLE_MODELS
855
+
856
+ @property
857
+ def models(self) -> SimpleModelList:
858
+ return SimpleModelList(self.AVAILABLE_MODELS)
859
+
860
+
861
+ if __name__ == "__main__":
862
+ # Example usage
863
+ client = ChatGPT()
864
+ response = client.chat.completions.create(
865
+ model="o4-mini-high", messages=[{"role": "user", "content": "How many r in strawberry"}]
866
+ )
867
+ if isinstance(response, ChatCompletion):
868
+ if response.choices[0].message and response.choices[0].message.content:
869
+ print(response.choices[0].message.content)
870
+ print()