webscout 8.2.2__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (483) hide show
  1. webscout/AIauto.py +524 -143
  2. webscout/AIbase.py +247 -123
  3. webscout/AIutel.py +68 -132
  4. webscout/Bard.py +1072 -535
  5. webscout/Extra/GitToolkit/__init__.py +2 -2
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -0
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -0
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -45
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +189 -18
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -682
  37. webscout/Extra/tempmail/README.md +488 -0
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +237 -304
  49. webscout/Provider/AISEARCH/README.md +106 -0
  50. webscout/Provider/AISEARCH/__init__.py +16 -10
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +130 -209
  53. webscout/Provider/AISEARCH/monica_search.py +200 -246
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -281
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -0
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +343 -173
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +60 -54
  67. webscout/Provider/GithubChat.py +385 -367
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -670
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -233
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -266
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -381
  77. webscout/Provider/Netwrck.py +273 -228
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -0
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -0
  85. webscout/Provider/OPENAI/__init__.py +148 -25
  86. webscout/Provider/OPENAI/ai4chat.py +348 -0
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/ayle.py +365 -0
  90. webscout/Provider/OPENAI/base.py +253 -46
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +514 -193
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -0
  94. webscout/Provider/OPENAI/deepinfra.py +403 -272
  95. webscout/Provider/OPENAI/e2b.py +2370 -1350
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +186 -138
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -0
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +100 -104
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -327
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +110 -84
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -0
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -0
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +126 -115
  115. webscout/Provider/OPENAI/textpollinations.py +218 -133
  116. webscout/Provider/OPENAI/toolbaz.py +136 -166
  117. webscout/Provider/OPENAI/typefully.py +419 -0
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -211
  120. webscout/Provider/OPENAI/wisecat.py +103 -125
  121. webscout/Provider/OPENAI/writecream.py +185 -156
  122. webscout/Provider/OPENAI/x0gpt.py +227 -136
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -344
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -0
  133. webscout/Provider/TTI/__init__.py +37 -12
  134. webscout/Provider/TTI/base.py +147 -0
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -0
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -0
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -0
  141. webscout/Provider/TTS/README.md +186 -0
  142. webscout/Provider/TTS/__init__.py +43 -7
  143. webscout/Provider/TTS/base.py +523 -0
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -0
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -180
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +221 -121
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -199
  158. webscout/Provider/TypliAI.py +311 -0
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -0
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +160 -145
  161. webscout/Provider/UNFINISHED/GizAI.py +300 -0
  162. webscout/Provider/UNFINISHED/Marcus.py +218 -0
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/UNFINISHED/XenAI.py +330 -0
  165. webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +64 -47
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -0
  170. webscout/Provider/UNFINISHED/samurai.py +231 -0
  171. webscout/Provider/WiseCat.py +256 -196
  172. webscout/Provider/WrDoChat.py +390 -0
  173. webscout/Provider/__init__.py +115 -198
  174. webscout/Provider/ai4chat.py +181 -202
  175. webscout/Provider/akashgpt.py +330 -342
  176. webscout/Provider/cerebras.py +397 -242
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -234
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -266
  182. webscout/Provider/llama3mitril.py +230 -180
  183. webscout/Provider/llmchat.py +308 -213
  184. webscout/Provider/llmchatco.py +321 -311
  185. webscout/Provider/meta.py +996 -794
  186. webscout/Provider/oivscode.py +332 -0
  187. webscout/Provider/searchchat.py +316 -293
  188. webscout/Provider/sonus.py +264 -208
  189. webscout/Provider/toolbaz.py +359 -320
  190. webscout/Provider/turboseek.py +332 -219
  191. webscout/Provider/typefully.py +262 -280
  192. webscout/Provider/x0gpt.py +332 -256
  193. webscout/__init__.py +31 -38
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -293
  196. webscout/client.py +1497 -0
  197. webscout/conversation.py +140 -565
  198. webscout/exceptions.py +383 -339
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +32 -378
  204. webscout/prompt_manager.py +376 -274
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -0
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -140
  210. webscout/scout/core/scout.py +800 -568
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -460
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -809
  284. webscout/swiftcli/core/__init__.py +7 -0
  285. webscout/swiftcli/core/cli.py +574 -0
  286. webscout/swiftcli/core/context.py +98 -0
  287. webscout/swiftcli/core/group.py +268 -0
  288. webscout/swiftcli/decorators/__init__.py +28 -0
  289. webscout/swiftcli/decorators/command.py +243 -0
  290. webscout/swiftcli/decorators/options.py +247 -0
  291. webscout/swiftcli/decorators/output.py +392 -0
  292. webscout/swiftcli/exceptions.py +21 -0
  293. webscout/swiftcli/plugins/__init__.py +9 -0
  294. webscout/swiftcli/plugins/base.py +134 -0
  295. webscout/swiftcli/plugins/manager.py +269 -0
  296. webscout/swiftcli/utils/__init__.py +58 -0
  297. webscout/swiftcli/utils/formatting.py +251 -0
  298. webscout/swiftcli/utils/parsing.py +368 -0
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -55
  304. webscout/zeroart/base.py +70 -60
  305. webscout/zeroart/effects.py +155 -99
  306. webscout/zeroart/fonts.py +1799 -816
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. webscout-2026.1.19.dist-info/entry_points.txt +4 -0
  311. webscout-2026.1.19.dist-info/top_level.txt +1 -0
  312. inferno/__init__.py +0 -6
  313. inferno/__main__.py +0 -9
  314. inferno/cli.py +0 -6
  315. webscout/DWEBS.py +0 -477
  316. webscout/Extra/autocoder/__init__.py +0 -9
  317. webscout/Extra/autocoder/autocoder.py +0 -849
  318. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  319. webscout/LLM.py +0 -442
  320. webscout/Litlogger/__init__.py +0 -67
  321. webscout/Litlogger/core/__init__.py +0 -6
  322. webscout/Litlogger/core/level.py +0 -23
  323. webscout/Litlogger/core/logger.py +0 -165
  324. webscout/Litlogger/handlers/__init__.py +0 -12
  325. webscout/Litlogger/handlers/console.py +0 -33
  326. webscout/Litlogger/handlers/file.py +0 -143
  327. webscout/Litlogger/handlers/network.py +0 -173
  328. webscout/Litlogger/styles/__init__.py +0 -7
  329. webscout/Litlogger/styles/colors.py +0 -249
  330. webscout/Litlogger/styles/formats.py +0 -458
  331. webscout/Litlogger/styles/text.py +0 -87
  332. webscout/Litlogger/utils/__init__.py +0 -6
  333. webscout/Litlogger/utils/detectors.py +0 -153
  334. webscout/Litlogger/utils/formatters.py +0 -200
  335. webscout/Local/__init__.py +0 -12
  336. webscout/Local/__main__.py +0 -9
  337. webscout/Local/api.py +0 -576
  338. webscout/Local/cli.py +0 -516
  339. webscout/Local/config.py +0 -75
  340. webscout/Local/llm.py +0 -287
  341. webscout/Local/model_manager.py +0 -253
  342. webscout/Local/server.py +0 -721
  343. webscout/Local/utils.py +0 -93
  344. webscout/Provider/AI21.py +0 -177
  345. webscout/Provider/AISEARCH/DeepFind.py +0 -250
  346. webscout/Provider/AISEARCH/ISou.py +0 -256
  347. webscout/Provider/AISEARCH/felo_search.py +0 -228
  348. webscout/Provider/AISEARCH/genspark_search.py +0 -208
  349. webscout/Provider/AISEARCH/hika_search.py +0 -194
  350. webscout/Provider/AISEARCH/scira_search.py +0 -324
  351. webscout/Provider/Aitopia.py +0 -292
  352. webscout/Provider/AllenAI.py +0 -413
  353. webscout/Provider/Blackboxai.py +0 -229
  354. webscout/Provider/C4ai.py +0 -432
  355. webscout/Provider/ChatGPTClone.py +0 -226
  356. webscout/Provider/ChatGPTES.py +0 -237
  357. webscout/Provider/ChatGPTGratis.py +0 -194
  358. webscout/Provider/Chatify.py +0 -175
  359. webscout/Provider/Cloudflare.py +0 -273
  360. webscout/Provider/DeepSeek.py +0 -196
  361. webscout/Provider/ElectronHub.py +0 -709
  362. webscout/Provider/ExaChat.py +0 -342
  363. webscout/Provider/Free2GPT.py +0 -241
  364. webscout/Provider/GPTWeb.py +0 -193
  365. webscout/Provider/Glider.py +0 -211
  366. webscout/Provider/HF_space/__init__.py +0 -0
  367. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  368. webscout/Provider/HuggingFaceChat.py +0 -462
  369. webscout/Provider/Hunyuan.py +0 -272
  370. webscout/Provider/LambdaChat.py +0 -392
  371. webscout/Provider/Llama.py +0 -200
  372. webscout/Provider/Llama3.py +0 -204
  373. webscout/Provider/Marcus.py +0 -148
  374. webscout/Provider/OLLAMA.py +0 -396
  375. webscout/Provider/OPENAI/c4ai.py +0 -367
  376. webscout/Provider/OPENAI/chatgptclone.py +0 -460
  377. webscout/Provider/OPENAI/exachat.py +0 -433
  378. webscout/Provider/OPENAI/freeaichat.py +0 -352
  379. webscout/Provider/OPENAI/opkfc.py +0 -488
  380. webscout/Provider/OPENAI/scirachat.py +0 -463
  381. webscout/Provider/OPENAI/standardinput.py +0 -425
  382. webscout/Provider/OPENAI/typegpt.py +0 -346
  383. webscout/Provider/OPENAI/uncovrAI.py +0 -455
  384. webscout/Provider/OPENAI/venice.py +0 -413
  385. webscout/Provider/OPENAI/yep.py +0 -327
  386. webscout/Provider/OpenGPT.py +0 -199
  387. webscout/Provider/Perplexitylabs.py +0 -415
  388. webscout/Provider/Phind.py +0 -535
  389. webscout/Provider/PizzaGPT.py +0 -198
  390. webscout/Provider/Reka.py +0 -214
  391. webscout/Provider/StandardInput.py +0 -278
  392. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  393. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  394. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  395. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  396. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  397. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  398. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  399. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  400. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  401. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  402. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  403. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  404. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  405. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  406. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  407. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  408. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  409. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  410. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  411. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  412. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  413. webscout/Provider/TTI/artbit/__init__.py +0 -22
  414. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  415. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  416. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  417. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  418. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  419. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  420. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  421. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  422. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  423. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  424. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  425. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  426. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  427. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  428. webscout/Provider/TTI/talkai/__init__.py +0 -4
  429. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  430. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  431. webscout/Provider/TTS/gesserit.py +0 -127
  432. webscout/Provider/TeachAnything.py +0 -187
  433. webscout/Provider/Venice.py +0 -219
  434. webscout/Provider/VercelAI.py +0 -234
  435. webscout/Provider/WebSim.py +0 -228
  436. webscout/Provider/Writecream.py +0 -211
  437. webscout/Provider/WritingMate.py +0 -197
  438. webscout/Provider/aimathgpt.py +0 -189
  439. webscout/Provider/askmyai.py +0 -158
  440. webscout/Provider/asksteve.py +0 -203
  441. webscout/Provider/bagoodex.py +0 -145
  442. webscout/Provider/chatglm.py +0 -205
  443. webscout/Provider/copilot.py +0 -428
  444. webscout/Provider/freeaichat.py +0 -271
  445. webscout/Provider/gaurish.py +0 -244
  446. webscout/Provider/geminiprorealtime.py +0 -160
  447. webscout/Provider/granite.py +0 -187
  448. webscout/Provider/hermes.py +0 -219
  449. webscout/Provider/koala.py +0 -268
  450. webscout/Provider/labyrinth.py +0 -340
  451. webscout/Provider/lepton.py +0 -194
  452. webscout/Provider/llamatutor.py +0 -192
  453. webscout/Provider/multichat.py +0 -325
  454. webscout/Provider/promptrefine.py +0 -193
  455. webscout/Provider/scira_chat.py +0 -277
  456. webscout/Provider/scnet.py +0 -187
  457. webscout/Provider/talkai.py +0 -194
  458. webscout/Provider/tutorai.py +0 -252
  459. webscout/Provider/typegpt.py +0 -232
  460. webscout/Provider/uncovr.py +0 -312
  461. webscout/Provider/yep.py +0 -376
  462. webscout/litprinter/__init__.py +0 -59
  463. webscout/scout/core.py +0 -881
  464. webscout/tempid.py +0 -128
  465. webscout/webscout_search.py +0 -1346
  466. webscout/webscout_search_async.py +0 -877
  467. webscout/yep_search.py +0 -297
  468. webscout-8.2.2.dist-info/METADATA +0 -734
  469. webscout-8.2.2.dist-info/RECORD +0 -309
  470. webscout-8.2.2.dist-info/entry_points.txt +0 -5
  471. webscout-8.2.2.dist-info/top_level.txt +0 -3
  472. webstoken/__init__.py +0 -30
  473. webstoken/classifier.py +0 -189
  474. webstoken/keywords.py +0 -216
  475. webstoken/language.py +0 -128
  476. webstoken/ner.py +0 -164
  477. webstoken/normalizer.py +0 -35
  478. webstoken/processor.py +0 -77
  479. webstoken/sentiment.py +0 -206
  480. webstoken/stemmer.py +0 -73
  481. webstoken/tagger.py +0 -60
  482. webstoken/tokenizer.py +0 -158
  483. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,496 +1,337 @@
1
- import json
2
- from ..AIutel import Optimizers
3
- from ..AIutel import Conversation
4
- from ..AIutel import AwesomePrompts, sanitize_stream
5
- from ..AIbase import Provider, AsyncProvider
6
-
7
- from webscout import exceptions
8
- from typing import Any, AsyncGenerator, Dict, List, Optional, Union
9
- import requests
10
- import httpx
11
- #----------------------------------------------------------OpenAI-----------------------------------
12
- class OPENAI(Provider):
13
- def __init__(
14
- self,
15
- api_key: str,
16
- is_conversation: bool = True,
17
- max_tokens: int = 600,
18
- temperature: float = 1,
19
- presence_penalty: int = 0,
20
- frequency_penalty: int = 0,
21
- top_p: float = 1,
22
- model: str = "gpt-3.5-turbo",
23
- timeout: int = 30,
24
- intro: str = None,
25
- filepath: str = None,
26
- update_file: bool = True,
27
- proxies: dict = {},
28
- history_offset: int = 10250,
29
- act: str = None,
30
- base_url: str = "https://api.openai.com/v1/chat/completions",
31
- ):
32
- """Instantiates OPENAI
33
-
34
- Args:
35
- api_key (key): OpenAI's API key.
36
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
37
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
38
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
39
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
40
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
41
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
42
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
43
- timeout (int, optional): Http request timeout. Defaults to 30.
44
- intro (str, optional): Conversation introductory prompt. Defaults to None.
45
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
46
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
47
- proxies (dict, optional): Http request proxies. Defaults to {}.
48
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
49
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
50
- """
51
- self.is_conversation = is_conversation
52
- self.max_tokens_to_sample = max_tokens
53
- self.api_key = api_key
54
- self.model = model
55
- self.temperature = temperature
56
- self.presence_penalty = presence_penalty
57
- self.frequency_penalty = frequency_penalty
58
- self.top_p = top_p
59
- self.chat_endpoint = base_url
60
- self.stream_chunk_size = 64
61
- self.timeout = timeout
62
- self.last_response = {}
63
- self.headers = {
64
- "Content-Type": "application/json",
65
- "Authorization": f"Bearer {self.api_key}",
66
- }
67
- self.session = requests.session()
68
- self.__available_optimizers = (
69
- method
70
- for method in dir(Optimizers)
71
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
- )
73
- self.session.headers.update(self.headers)
74
- Conversation.intro = (
75
- AwesomePrompts().get_act(
76
- act, raise_not_found=True, default=None, case_insensitive=True
77
- )
78
- if act
79
- else intro or Conversation.intro
80
- )
81
- self.conversation = Conversation(
82
- is_conversation, self.max_tokens_to_sample, filepath, update_file
83
- )
84
- self.conversation.history_offset = history_offset
85
- self.session.proxies = proxies
86
-
87
- def ask(
88
- self,
89
- prompt: str,
90
- stream: bool = False,
91
- raw: bool = False,
92
- optimizer: str = None,
93
- conversationally: bool = False,
94
- ) -> dict:
95
- """Chat with AI
96
-
97
- Args:
98
- prompt (str): Prompt to be send.
99
- stream (bool, optional): Flag for streaming response. Defaults to False.
100
- raw (bool, optional): Stream back raw response as received. Defaults to False.
101
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
103
- Returns:
104
- dict : {}
105
- ```json
106
- {
107
- "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
108
- "object": "chat.completion",
109
- "created": 1704623244,
110
- "model": "gpt-3.5-turbo",
111
- "usage": {
112
- "prompt_tokens": 0,
113
- "completion_tokens": 0,
114
- "total_tokens": 0
115
- },
116
- "choices": [
117
- {
118
- "message": {
119
- "role": "assistant",
120
- "content": "Hello! How can I assist you today?"
121
- },
122
- "finish_reason": "stop",
123
- "index": 0
124
- }
125
- ]
126
- }
127
- ```
128
- """
129
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
130
- if optimizer:
131
- if optimizer in self.__available_optimizers:
132
- conversation_prompt = getattr(Optimizers, optimizer)(
133
- conversation_prompt if conversationally else prompt
134
- )
135
- else:
136
- raise exceptions.FailedToGenerateResponseError(
137
- f"Optimizer is not one of {self.__available_optimizers}"
138
- )
139
- self.session.headers.update(self.headers)
140
- payload = {
141
- "frequency_penalty": self.frequency_penalty,
142
- "messages": [{"content": conversation_prompt, "role": "user"}],
143
- "model": self.model,
144
- "presence_penalty": self.presence_penalty,
145
- "stream": stream,
146
- "temperature": self.temperature,
147
- "top_p": self.top_p,
148
- }
149
-
150
- def for_stream():
151
- response = self.session.post(
152
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
153
- )
154
- if not response.ok:
155
- raise exceptions.FailedToGenerateResponseError(
156
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
157
- )
158
-
159
- message_load = ""
160
- for value in response.iter_lines(
161
- decode_unicode=True,
162
- delimiter="" if raw else "data:",
163
- chunk_size=self.stream_chunk_size,
164
- ):
165
- try:
166
- resp = json.loads(value)
167
- incomplete_message = self.get_message(resp)
168
- if incomplete_message:
169
- message_load += incomplete_message
170
- resp["choices"][0]["delta"]["content"] = message_load
171
- self.last_response.update(resp)
172
- yield value if raw else resp
173
- elif raw:
174
- yield value
175
- except json.decoder.JSONDecodeError:
176
- pass
177
- self.conversation.update_chat_history(
178
- prompt, self.get_message(self.last_response)
179
- )
180
-
181
- def for_non_stream():
182
- response = self.session.post(
183
- self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
184
- )
185
- if (
186
- not response.ok
187
- or not response.headers.get("Content-Type", "") == "application/json"
188
- ):
189
- raise exceptions.FailedToGenerateResponseError(
190
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
191
- )
192
- resp = response.json()
193
- self.last_response.update(resp)
194
- self.conversation.update_chat_history(
195
- prompt, self.get_message(self.last_response)
196
- )
197
- return resp
198
-
199
- return for_stream() if stream else for_non_stream()
200
-
201
- def chat(
202
- self,
203
- prompt: str,
204
- stream: bool = False,
205
- optimizer: str = None,
206
- conversationally: bool = False,
207
- ) -> str:
208
- """Generate response `str`
209
- Args:
210
- prompt (str): Prompt to be send.
211
- stream (bool, optional): Flag for streaming response. Defaults to False.
212
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
213
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
214
- Returns:
215
- str: Response generated
216
- """
217
-
218
- def for_stream():
219
- for response in self.ask(
220
- prompt, True, optimizer=optimizer, conversationally=conversationally
221
- ):
222
- yield self.get_message(response)
223
-
224
- def for_non_stream():
225
- return self.get_message(
226
- self.ask(
227
- prompt,
228
- False,
229
- optimizer=optimizer,
230
- conversationally=conversationally,
231
- )
232
- )
233
-
234
- return for_stream() if stream else for_non_stream()
235
-
236
- def get_message(self, response: dict) -> str:
237
- """Retrieves message only from response
238
-
239
- Args:
240
- response (dict): Response generated by `self.ask`
241
-
242
- Returns:
243
- str: Message extracted
244
- """
245
- assert isinstance(response, dict), "Response should be of dict data-type only"
246
- try:
247
- if response["choices"][0].get("delta"):
248
- return response["choices"][0]["delta"]["content"]
249
- return response["choices"][0]["message"]["content"]
250
- except KeyError:
251
- return ""
252
- class AsyncOPENAI(AsyncProvider):
253
- def __init__(
254
- self,
255
- api_key: str,
256
- is_conversation: bool = True,
257
- max_tokens: int = 600,
258
- temperature: float = 1,
259
- presence_penalty: int = 0,
260
- frequency_penalty: int = 0,
261
- top_p: float = 1,
262
- model: str = "gpt-3.5-turbo",
263
- timeout: int = 30,
264
- intro: str = None,
265
- filepath: str = None,
266
- update_file: bool = True,
267
- proxies: dict = {},
268
- history_offset: int = 10250,
269
- act: str = None,
270
- ):
271
- """Instantiates OPENAI
272
-
273
- Args:
274
- api_key (key): OpenAI's API key.
275
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
276
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
277
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
278
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
279
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
280
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
281
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
282
- timeout (int, optional): Http request timeout. Defaults to 30.
283
- intro (str, optional): Conversation introductory prompt. Defaults to None.
284
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
285
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
286
- proxies (dict, optional): Http request proxies. Defaults to {}.
287
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
288
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
289
- """
290
- self.is_conversation = is_conversation
291
- self.max_tokens_to_sample = max_tokens
292
- self.api_key = api_key
293
- self.model = model
294
- self.temperature = temperature
295
- self.presence_penalty = presence_penalty
296
- self.frequency_penalty = frequency_penalty
297
- self.top_p = top_p
298
- self.chat_endpoint = "https://api.openai.com/v1/chat/completions"
299
- self.stream_chunk_size = 64
300
- self.timeout = timeout
301
- self.last_response = {}
302
- self.headers = {
303
- "Content-Type": "application/json",
304
- "Authorization": f"Bearer {self.api_key}",
305
- }
306
-
307
- self.__available_optimizers = (
308
- method
309
- for method in dir(Optimizers)
310
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
311
- )
312
- Conversation.intro = (
313
- AwesomePrompts().get_act(
314
- act, raise_not_found=True, default=None, case_insensitive=True
315
- )
316
- if act
317
- else intro or Conversation.intro
318
- )
319
- self.conversation = Conversation(
320
- is_conversation, self.max_tokens_to_sample, filepath, update_file
321
- )
322
- self.conversation.history_offset = history_offset
323
- self.session = httpx.AsyncClient(
324
- headers=self.headers,
325
- proxies=proxies,
326
- )
327
-
328
- async def ask(
329
- self,
330
- prompt: str,
331
- stream: bool = False,
332
- raw: bool = False,
333
- optimizer: str = None,
334
- conversationally: bool = False,
335
- tools: Optional[List[Dict[str, Any]]] = None,
336
- ) -> Union[dict, AsyncGenerator]:
337
- """Chat with AI asynchronously.
338
-
339
- Args:
340
- prompt (str): Prompt to be send.
341
- stream (bool, optional): Flag for streaming response. Defaults to False.
342
- raw (bool, optional): Stream back raw response as received. Defaults to False.
343
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
344
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
345
- tools (Optional[List[Dict[str, Any]]], optional): List of tools to be used. Defaults to None.
346
- Returns:
347
- dict|AsyncGenerator : ai content.
348
- ```json
349
- {
350
- "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
351
- "object": "chat.completion",
352
- "created": 1704623244,
353
- "model": "gpt-3.5-turbo",
354
- "usage": {
355
- "prompt_tokens": 0,
356
- "completion_tokens": 0,
357
- "total_tokens": 0
358
- },
359
- "choices": [
360
- {
361
- "message": {
362
- "role": "assistant",
363
- "content": "Hello! How can I assist you today?"
364
- },
365
- "finish_reason": "stop",
366
- "index": 0
367
- }
368
- ]
369
- }
370
- ```
371
- """
372
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
373
- if optimizer:
374
- if optimizer in self.__available_optimizers:
375
- conversation_prompt = getattr(Optimizers, optimizer)(
376
- conversation_prompt if conversationally else prompt
377
- )
378
- else:
379
- raise Exception(
380
- f"Optimizer is not one of {self.__available_optimizers}"
381
- )
382
- payload = {
383
- "frequency_penalty": self.frequency_penalty,
384
- "messages": [{"content": conversation_prompt, "role": "user"}],
385
- "model": self.model,
386
- "presence_penalty": self.presence_penalty,
387
- "stream": stream,
388
- "temperature": self.temperature,
389
- "top_p": self.top_p,
390
- }
391
-
392
- async def for_stream():
393
- async with self.session.stream(
394
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
395
- ) as response:
396
- if not response.is_success:
397
- raise Exception(
398
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
399
- )
400
-
401
- message_load = ""
402
- async for value in response.aiter_lines():
403
- try:
404
-
405
- resp = sanitize_stream(value)
406
- incomplete_message = await self.get_message(resp)
407
- if incomplete_message:
408
- message_load += incomplete_message
409
- resp["choices"][0]["delta"]["content"] = message_load
410
- self.last_response.update(resp)
411
- yield value if raw else resp
412
- elif raw:
413
- yield value
414
- except json.decoder.JSONDecodeError:
415
- pass
416
- self.conversation.update_chat_history(
417
- prompt, await self.get_message(self.last_response)
418
- )
419
-
420
- async def for_non_stream():
421
- response = httpx.post(
422
- self.chat_endpoint,
423
- json=payload,
424
- timeout=self.timeout,
425
- headers=self.headers,
426
- )
427
- if (
428
- not response.is_success
429
- or not response.headers.get("Content-Type", "") == "application/json"
430
- ):
431
- raise Exception(
432
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
433
- )
434
- resp = response.json()
435
- self.last_response.update(resp)
436
- self.conversation.update_chat_history(
437
- prompt, await self.get_message(self.last_response)
438
- )
439
- return resp
440
-
441
- return for_stream() if stream else await for_non_stream()
442
-
443
- async def chat(
444
- self,
445
- prompt: str,
446
- stream: bool = False,
447
- optimizer: str = None,
448
- conversationally: bool = False,
449
- tools: Optional[List[Dict[str, Any]]] = None,
450
- ) -> Union[str, AsyncGenerator]:
451
- """Generate response `str` asynchronously.
452
- Args:
453
- prompt (str): Prompt to be send.
454
- stream (bool, optional): Flag for streaming response. Defaults to False.
455
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
456
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
457
- tools (Optional[List[Dict[str, Any]]], optional): List of tools to be used. Defaults to None.
458
- Returns:
459
- str|AsyncGenerator: Response generated
460
- """
461
-
462
- async def for_stream():
463
- async_ask = await self.ask(
464
- prompt, True, optimizer=optimizer, conversationally=conversationally
465
- )
466
- async for response in async_ask:
467
- yield await self.get_message(response)
468
-
469
- async def for_non_stream():
470
- return await self.get_message(
471
- await self.ask(
472
- prompt,
473
- False,
474
- optimizer=optimizer,
475
- conversationally=conversationally,
476
- )
477
- )
478
-
479
- return for_stream() if stream else await for_non_stream()
480
-
481
- async def get_message(self, response: dict) -> str:
482
- """Retrieves message only from response asynchronously.
483
-
484
- Args:
485
- response (dict): Response generated by `self.ask`
486
-
487
- Returns:
488
- str: Message extracted
489
- """
490
- assert isinstance(response, dict), "Response should be of dict data-type only"
491
- try:
492
- if response["choices"][0].get("delta"):
493
- return response["choices"][0]["delta"]["content"]
494
- return response["choices"][0]["message"]["content"]
495
- except KeyError:
496
- return ""
1
+ import json
2
+ from typing import Any, Dict, Generator, Optional, Union, cast
3
+
4
+ from curl_cffi import CurlError
5
+ from curl_cffi.requests import Session
6
+
7
+ from webscout import exceptions
8
+ from webscout.AIbase import Provider, Response
9
+ from webscout.AIutel import AwesomePrompts, Conversation, Optimizers, sanitize_stream
10
+ from webscout.litagent import LitAgent
11
+
12
+
13
+ class OpenAI(Provider):
14
+ """
15
+ A class to interact with the OpenAI API with LitAgent user-agent.
16
+ """
17
+
18
+ required_auth = True
19
+
20
+ @classmethod
21
+ def get_models(cls, api_key: Optional[str] = None):
22
+ """Fetch available models from OpenAI API.
23
+
24
+ Args:
25
+ api_key (str, optional): OpenAI API key
26
+
27
+ Returns:
28
+ list: List of available model IDs
29
+ """
30
+ if not api_key:
31
+ return []
32
+ try:
33
+ # Use a temporary curl_cffi session for this class method
34
+ temp_session = Session()
35
+ headers = {
36
+ "Authorization": f"Bearer {api_key}",
37
+ }
38
+
39
+ response = temp_session.get(
40
+ "https://api.openai.com/v1/models", headers=headers, impersonate="chrome110"
41
+ )
42
+
43
+ if response.status_code != 200:
44
+ raise Exception(
45
+ f"API request failed with status {response.status_code}: {response.text}"
46
+ )
47
+
48
+ data = response.json()
49
+ if "data" in data and isinstance(data["data"], list):
50
+ return [model["id"] for model in data["data"] if "id" in model]
51
+ raise Exception("Invalid response format from API")
52
+
53
+ except (CurlError, Exception) as e:
54
+ raise Exception(f"Failed to fetch models: {str(e)}")
55
+
56
+ @staticmethod
57
+ def _openai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
58
+ """Extracts content from OpenAI stream JSON objects."""
59
+ if isinstance(chunk, dict):
60
+ return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
61
+ return None
62
+
63
+ def __init__(
64
+ self,
65
+ api_key: str,
66
+ is_conversation: bool = True,
67
+ max_tokens: int = 600,
68
+ temperature: float = 1,
69
+ presence_penalty: int = 0,
70
+ frequency_penalty: int = 0,
71
+ top_p: float = 1,
72
+ model: str = "gpt-3.5-turbo",
73
+ timeout: int = 30,
74
+ intro: Optional[str] = None,
75
+ filepath: Optional[str] = None,
76
+ update_file: bool = True,
77
+ proxies: dict = {},
78
+ history_offset: int = 10250,
79
+ act: Optional[str] = None,
80
+ base_url: str = "https://api.openai.com/v1/chat/completions",
81
+ system_prompt: str = "You are a helpful assistant.",
82
+ browser: str = "chrome",
83
+ ):
84
+ """Initializes the OpenAI API client."""
85
+ self.url = base_url
86
+
87
+ # Initialize LitAgent
88
+ self.agent = LitAgent()
89
+ self.fingerprint = self.agent.generate_fingerprint(browser)
90
+ self.api_key = api_key
91
+ # Use the fingerprint for headers
92
+ self.headers = {
93
+ "Accept": self.fingerprint["accept"],
94
+ "Accept-Language": self.fingerprint["accept_language"],
95
+ "User-Agent": self.fingerprint.get("user_agent", ""),
96
+ }
97
+ if self.api_key:
98
+ self.headers["Authorization"] = f"Bearer {self.api_key}"
99
+
100
+ # Initialize curl_cffi Session
101
+ self.session = Session()
102
+ # Update curl_cffi session headers and proxies
103
+ self.session.headers.update(self.headers)
104
+ if proxies:
105
+ self.session.proxies.update(cast(Any, proxies)) # Assign proxies directly
106
+ self.system_prompt = system_prompt
107
+ self.is_conversation = is_conversation
108
+ self.max_tokens_to_sample = max_tokens
109
+ self.timeout = timeout
110
+ self.last_response = {}
111
+ self.model = model
112
+ self.temperature = temperature
113
+ self.presence_penalty = presence_penalty
114
+ self.frequency_penalty = frequency_penalty
115
+ self.top_p = top_p
116
+
117
+ # Fetch available models
118
+ try:
119
+ self.available_models = self.get_models(self.api_key)
120
+ except Exception:
121
+ self.available_models = []
122
+
123
+ if self.available_models and self.model not in self.available_models:
124
+ raise ValueError(f"Invalid model: {self.model}. Choose from: {self.available_models}")
125
+
126
+ self.__available_optimizers = (
127
+ method
128
+ for method in dir(Optimizers)
129
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
130
+ )
131
+ self.conversation = Conversation(
132
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
133
+ )
134
+ self.conversation.history_offset = history_offset
135
+
136
+ if act:
137
+ self.conversation.intro = (
138
+ AwesomePrompts().get_act(
139
+ cast(Union[str, int], act),
140
+ default=self.conversation.intro,
141
+ case_insensitive=True,
142
+ )
143
+ or self.conversation.intro
144
+ )
145
+ elif intro:
146
+ self.conversation.intro = intro
147
+
148
+ def refresh_identity(self, browser: Optional[str] = None):
149
+ """
150
+ Refreshes the browser identity fingerprint.
151
+
152
+ Args:
153
+ browser: Specific browser to use for the new fingerprint
154
+ """
155
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
156
+ self.fingerprint = self.agent.generate_fingerprint(browser)
157
+
158
+ # Update headers with new fingerprint (only relevant ones)
159
+ self.headers.update(
160
+ {
161
+ "Accept": self.fingerprint["accept"],
162
+ "Accept-Language": self.fingerprint["accept_language"],
163
+ }
164
+ )
165
+
166
+ # Update session headers
167
+ self.session.headers.update(self.headers)
168
+
169
+ return self.fingerprint
170
+
171
+ def ask(
172
+ self,
173
+ prompt: str,
174
+ stream: bool = False,
175
+ raw: bool = False,
176
+ optimizer: Optional[str] = None,
177
+ conversationally: bool = False,
178
+ **kwargs: Any,
179
+ ) -> Response:
180
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
181
+ if optimizer:
182
+ if optimizer in self.__available_optimizers:
183
+ conversation_prompt = getattr(Optimizers, optimizer)(
184
+ conversation_prompt if conversationally else prompt
185
+ )
186
+ else:
187
+ raise exceptions.FailedToGenerateResponseError(
188
+ f"Optimizer is not one of {self.__available_optimizers}"
189
+ )
190
+
191
+ # Payload construction
192
+ payload = {
193
+ "model": self.model,
194
+ "messages": [
195
+ {"role": "system", "content": self.system_prompt},
196
+ {"role": "user", "content": conversation_prompt},
197
+ ],
198
+ "stream": stream,
199
+ "temperature": self.temperature,
200
+ "top_p": self.top_p,
201
+ "presence_penalty": self.presence_penalty,
202
+ "frequency_penalty": self.frequency_penalty,
203
+ }
204
+
205
+ def for_stream():
206
+ streaming_text = ""
207
+ try:
208
+ # Use curl_cffi session post with impersonate
209
+ response = self.session.post(
210
+ self.url,
211
+ data=json.dumps(payload),
212
+ stream=True,
213
+ timeout=self.timeout,
214
+ impersonate="chrome110",
215
+ )
216
+ response.raise_for_status()
217
+
218
+ # Use sanitize_stream
219
+ processed_stream = sanitize_stream(
220
+ data=response.iter_content(chunk_size=None),
221
+ intro_value="data:",
222
+ to_json=True,
223
+ skip_markers=["[DONE]"],
224
+ content_extractor=self._openai_extractor,
225
+ yield_raw_on_error=False,
226
+ raw=raw,
227
+ )
228
+
229
+ for content_chunk in processed_stream:
230
+ if raw:
231
+ yield content_chunk
232
+ else:
233
+ if content_chunk and isinstance(content_chunk, str):
234
+ streaming_text += content_chunk
235
+ resp = dict(text=content_chunk)
236
+ yield resp if not raw else content_chunk
237
+
238
+ except CurlError as e:
239
+ raise exceptions.FailedToGenerateResponseError(
240
+ f"Request failed (CurlError): {str(e)}"
241
+ ) from e
242
+ except Exception as e:
243
+ raise exceptions.FailedToGenerateResponseError(
244
+ f"Request failed ({type(e).__name__}): {str(e)}"
245
+ ) from e
246
+ finally:
247
+ if streaming_text:
248
+ self.last_response = {"text": streaming_text}
249
+ self.conversation.update_chat_history(prompt, streaming_text)
250
+
251
+ def for_non_stream():
252
+ try:
253
+ # Use curl_cffi session post with impersonate for non-streaming
254
+ response = self.session.post(
255
+ self.url,
256
+ data=json.dumps(payload),
257
+ timeout=self.timeout,
258
+ impersonate="chrome110",
259
+ )
260
+ response.raise_for_status()
261
+
262
+ response_text = response.text
263
+
264
+ # Use sanitize_stream to parse the non-streaming JSON response
265
+ processed_stream = sanitize_stream(
266
+ data=response_text,
267
+ to_json=True,
268
+ intro_value=None,
269
+ content_extractor=lambda chunk: chunk.get("choices", [{}])[0]
270
+ .get("message", {})
271
+ .get("content")
272
+ if isinstance(chunk, dict)
273
+ else None,
274
+ yield_raw_on_error=False,
275
+ raw=raw,
276
+ )
277
+ # Extract the single result
278
+ content = next(processed_stream, None)
279
+ if raw:
280
+ return content
281
+ content = content if isinstance(content, str) else ""
282
+
283
+ self.last_response = {"text": content}
284
+ self.conversation.update_chat_history(prompt, content)
285
+ return self.last_response if not raw else content
286
+
287
+ except CurlError as e:
288
+ raise exceptions.FailedToGenerateResponseError(
289
+ f"Request failed (CurlError): {e}"
290
+ ) from e
291
+ except Exception as e:
292
+ err_text = ""
293
+ if hasattr(e, "response"):
294
+ response_obj = getattr(e, "response")
295
+ if hasattr(response_obj, "text"):
296
+ err_text = getattr(response_obj, "text")
297
+ raise exceptions.FailedToGenerateResponseError(
298
+ f"Request failed ({type(e).__name__}): {e} - {err_text}"
299
+ ) from e
300
+
301
+ return for_stream() if stream else for_non_stream()
302
+
303
+ def chat(
304
+ self,
305
+ prompt: str,
306
+ stream: bool = False,
307
+ optimizer: Optional[str] = None,
308
+ conversationally: bool = False,
309
+ **kwargs: Any,
310
+ ) -> Union[str, Generator[str, None, None]]:
311
+ def for_stream_chat():
312
+ gen = self.ask(
313
+ prompt,
314
+ stream=True,
315
+ raw=False,
316
+ optimizer=optimizer,
317
+ conversationally=conversationally,
318
+ )
319
+ for response_dict in gen:
320
+ yield self.get_message(response_dict)
321
+
322
+ def for_non_stream_chat():
323
+ response_data = self.ask(
324
+ prompt,
325
+ stream=False,
326
+ raw=False,
327
+ optimizer=optimizer,
328
+ conversationally=conversationally,
329
+ )
330
+ return self.get_message(response_data)
331
+
332
+ return for_stream_chat() if stream else for_non_stream_chat()
333
+
334
+ def get_message(self, response: Response) -> str:
335
+ if not isinstance(response, dict):
336
+ return str(response)
337
+ return cast(Dict[str, Any], response).get("text", "")