webscout 8.2.2__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (483) hide show
  1. webscout/AIauto.py +524 -143
  2. webscout/AIbase.py +247 -123
  3. webscout/AIutel.py +68 -132
  4. webscout/Bard.py +1072 -535
  5. webscout/Extra/GitToolkit/__init__.py +2 -2
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -0
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -0
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -45
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +189 -18
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -682
  37. webscout/Extra/tempmail/README.md +488 -0
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +237 -304
  49. webscout/Provider/AISEARCH/README.md +106 -0
  50. webscout/Provider/AISEARCH/__init__.py +16 -10
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +130 -209
  53. webscout/Provider/AISEARCH/monica_search.py +200 -246
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -281
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -0
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +343 -173
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +60 -54
  67. webscout/Provider/GithubChat.py +385 -367
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -670
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -233
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -266
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -381
  77. webscout/Provider/Netwrck.py +273 -228
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -0
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -0
  85. webscout/Provider/OPENAI/__init__.py +148 -25
  86. webscout/Provider/OPENAI/ai4chat.py +348 -0
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/ayle.py +365 -0
  90. webscout/Provider/OPENAI/base.py +253 -46
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +514 -193
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -0
  94. webscout/Provider/OPENAI/deepinfra.py +403 -272
  95. webscout/Provider/OPENAI/e2b.py +2370 -1350
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +186 -138
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -0
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +100 -104
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -327
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +110 -84
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -0
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -0
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +126 -115
  115. webscout/Provider/OPENAI/textpollinations.py +218 -133
  116. webscout/Provider/OPENAI/toolbaz.py +136 -166
  117. webscout/Provider/OPENAI/typefully.py +419 -0
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -211
  120. webscout/Provider/OPENAI/wisecat.py +103 -125
  121. webscout/Provider/OPENAI/writecream.py +185 -156
  122. webscout/Provider/OPENAI/x0gpt.py +227 -136
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -344
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -0
  133. webscout/Provider/TTI/__init__.py +37 -12
  134. webscout/Provider/TTI/base.py +147 -0
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -0
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -0
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -0
  141. webscout/Provider/TTS/README.md +186 -0
  142. webscout/Provider/TTS/__init__.py +43 -7
  143. webscout/Provider/TTS/base.py +523 -0
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -0
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -180
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +221 -121
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -199
  158. webscout/Provider/TypliAI.py +311 -0
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -0
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +160 -145
  161. webscout/Provider/UNFINISHED/GizAI.py +300 -0
  162. webscout/Provider/UNFINISHED/Marcus.py +218 -0
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/UNFINISHED/XenAI.py +330 -0
  165. webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +64 -47
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -0
  170. webscout/Provider/UNFINISHED/samurai.py +231 -0
  171. webscout/Provider/WiseCat.py +256 -196
  172. webscout/Provider/WrDoChat.py +390 -0
  173. webscout/Provider/__init__.py +115 -198
  174. webscout/Provider/ai4chat.py +181 -202
  175. webscout/Provider/akashgpt.py +330 -342
  176. webscout/Provider/cerebras.py +397 -242
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -234
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -266
  182. webscout/Provider/llama3mitril.py +230 -180
  183. webscout/Provider/llmchat.py +308 -213
  184. webscout/Provider/llmchatco.py +321 -311
  185. webscout/Provider/meta.py +996 -794
  186. webscout/Provider/oivscode.py +332 -0
  187. webscout/Provider/searchchat.py +316 -293
  188. webscout/Provider/sonus.py +264 -208
  189. webscout/Provider/toolbaz.py +359 -320
  190. webscout/Provider/turboseek.py +332 -219
  191. webscout/Provider/typefully.py +262 -280
  192. webscout/Provider/x0gpt.py +332 -256
  193. webscout/__init__.py +31 -38
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -293
  196. webscout/client.py +1497 -0
  197. webscout/conversation.py +140 -565
  198. webscout/exceptions.py +383 -339
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +32 -378
  204. webscout/prompt_manager.py +376 -274
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -0
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -140
  210. webscout/scout/core/scout.py +800 -568
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -460
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -809
  284. webscout/swiftcli/core/__init__.py +7 -0
  285. webscout/swiftcli/core/cli.py +574 -0
  286. webscout/swiftcli/core/context.py +98 -0
  287. webscout/swiftcli/core/group.py +268 -0
  288. webscout/swiftcli/decorators/__init__.py +28 -0
  289. webscout/swiftcli/decorators/command.py +243 -0
  290. webscout/swiftcli/decorators/options.py +247 -0
  291. webscout/swiftcli/decorators/output.py +392 -0
  292. webscout/swiftcli/exceptions.py +21 -0
  293. webscout/swiftcli/plugins/__init__.py +9 -0
  294. webscout/swiftcli/plugins/base.py +134 -0
  295. webscout/swiftcli/plugins/manager.py +269 -0
  296. webscout/swiftcli/utils/__init__.py +58 -0
  297. webscout/swiftcli/utils/formatting.py +251 -0
  298. webscout/swiftcli/utils/parsing.py +368 -0
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -55
  304. webscout/zeroart/base.py +70 -60
  305. webscout/zeroart/effects.py +155 -99
  306. webscout/zeroart/fonts.py +1799 -816
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. webscout-2026.1.19.dist-info/entry_points.txt +4 -0
  311. webscout-2026.1.19.dist-info/top_level.txt +1 -0
  312. inferno/__init__.py +0 -6
  313. inferno/__main__.py +0 -9
  314. inferno/cli.py +0 -6
  315. webscout/DWEBS.py +0 -477
  316. webscout/Extra/autocoder/__init__.py +0 -9
  317. webscout/Extra/autocoder/autocoder.py +0 -849
  318. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  319. webscout/LLM.py +0 -442
  320. webscout/Litlogger/__init__.py +0 -67
  321. webscout/Litlogger/core/__init__.py +0 -6
  322. webscout/Litlogger/core/level.py +0 -23
  323. webscout/Litlogger/core/logger.py +0 -165
  324. webscout/Litlogger/handlers/__init__.py +0 -12
  325. webscout/Litlogger/handlers/console.py +0 -33
  326. webscout/Litlogger/handlers/file.py +0 -143
  327. webscout/Litlogger/handlers/network.py +0 -173
  328. webscout/Litlogger/styles/__init__.py +0 -7
  329. webscout/Litlogger/styles/colors.py +0 -249
  330. webscout/Litlogger/styles/formats.py +0 -458
  331. webscout/Litlogger/styles/text.py +0 -87
  332. webscout/Litlogger/utils/__init__.py +0 -6
  333. webscout/Litlogger/utils/detectors.py +0 -153
  334. webscout/Litlogger/utils/formatters.py +0 -200
  335. webscout/Local/__init__.py +0 -12
  336. webscout/Local/__main__.py +0 -9
  337. webscout/Local/api.py +0 -576
  338. webscout/Local/cli.py +0 -516
  339. webscout/Local/config.py +0 -75
  340. webscout/Local/llm.py +0 -287
  341. webscout/Local/model_manager.py +0 -253
  342. webscout/Local/server.py +0 -721
  343. webscout/Local/utils.py +0 -93
  344. webscout/Provider/AI21.py +0 -177
  345. webscout/Provider/AISEARCH/DeepFind.py +0 -250
  346. webscout/Provider/AISEARCH/ISou.py +0 -256
  347. webscout/Provider/AISEARCH/felo_search.py +0 -228
  348. webscout/Provider/AISEARCH/genspark_search.py +0 -208
  349. webscout/Provider/AISEARCH/hika_search.py +0 -194
  350. webscout/Provider/AISEARCH/scira_search.py +0 -324
  351. webscout/Provider/Aitopia.py +0 -292
  352. webscout/Provider/AllenAI.py +0 -413
  353. webscout/Provider/Blackboxai.py +0 -229
  354. webscout/Provider/C4ai.py +0 -432
  355. webscout/Provider/ChatGPTClone.py +0 -226
  356. webscout/Provider/ChatGPTES.py +0 -237
  357. webscout/Provider/ChatGPTGratis.py +0 -194
  358. webscout/Provider/Chatify.py +0 -175
  359. webscout/Provider/Cloudflare.py +0 -273
  360. webscout/Provider/DeepSeek.py +0 -196
  361. webscout/Provider/ElectronHub.py +0 -709
  362. webscout/Provider/ExaChat.py +0 -342
  363. webscout/Provider/Free2GPT.py +0 -241
  364. webscout/Provider/GPTWeb.py +0 -193
  365. webscout/Provider/Glider.py +0 -211
  366. webscout/Provider/HF_space/__init__.py +0 -0
  367. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  368. webscout/Provider/HuggingFaceChat.py +0 -462
  369. webscout/Provider/Hunyuan.py +0 -272
  370. webscout/Provider/LambdaChat.py +0 -392
  371. webscout/Provider/Llama.py +0 -200
  372. webscout/Provider/Llama3.py +0 -204
  373. webscout/Provider/Marcus.py +0 -148
  374. webscout/Provider/OLLAMA.py +0 -396
  375. webscout/Provider/OPENAI/c4ai.py +0 -367
  376. webscout/Provider/OPENAI/chatgptclone.py +0 -460
  377. webscout/Provider/OPENAI/exachat.py +0 -433
  378. webscout/Provider/OPENAI/freeaichat.py +0 -352
  379. webscout/Provider/OPENAI/opkfc.py +0 -488
  380. webscout/Provider/OPENAI/scirachat.py +0 -463
  381. webscout/Provider/OPENAI/standardinput.py +0 -425
  382. webscout/Provider/OPENAI/typegpt.py +0 -346
  383. webscout/Provider/OPENAI/uncovrAI.py +0 -455
  384. webscout/Provider/OPENAI/venice.py +0 -413
  385. webscout/Provider/OPENAI/yep.py +0 -327
  386. webscout/Provider/OpenGPT.py +0 -199
  387. webscout/Provider/Perplexitylabs.py +0 -415
  388. webscout/Provider/Phind.py +0 -535
  389. webscout/Provider/PizzaGPT.py +0 -198
  390. webscout/Provider/Reka.py +0 -214
  391. webscout/Provider/StandardInput.py +0 -278
  392. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  393. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  394. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  395. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  396. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  397. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  398. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  399. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  400. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  401. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  402. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  403. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  404. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  405. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  406. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  407. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  408. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  409. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  410. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  411. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  412. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  413. webscout/Provider/TTI/artbit/__init__.py +0 -22
  414. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  415. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  416. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  417. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  418. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  419. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  420. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  421. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  422. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  423. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  424. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  425. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  426. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  427. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  428. webscout/Provider/TTI/talkai/__init__.py +0 -4
  429. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  430. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  431. webscout/Provider/TTS/gesserit.py +0 -127
  432. webscout/Provider/TeachAnything.py +0 -187
  433. webscout/Provider/Venice.py +0 -219
  434. webscout/Provider/VercelAI.py +0 -234
  435. webscout/Provider/WebSim.py +0 -228
  436. webscout/Provider/Writecream.py +0 -211
  437. webscout/Provider/WritingMate.py +0 -197
  438. webscout/Provider/aimathgpt.py +0 -189
  439. webscout/Provider/askmyai.py +0 -158
  440. webscout/Provider/asksteve.py +0 -203
  441. webscout/Provider/bagoodex.py +0 -145
  442. webscout/Provider/chatglm.py +0 -205
  443. webscout/Provider/copilot.py +0 -428
  444. webscout/Provider/freeaichat.py +0 -271
  445. webscout/Provider/gaurish.py +0 -244
  446. webscout/Provider/geminiprorealtime.py +0 -160
  447. webscout/Provider/granite.py +0 -187
  448. webscout/Provider/hermes.py +0 -219
  449. webscout/Provider/koala.py +0 -268
  450. webscout/Provider/labyrinth.py +0 -340
  451. webscout/Provider/lepton.py +0 -194
  452. webscout/Provider/llamatutor.py +0 -192
  453. webscout/Provider/multichat.py +0 -325
  454. webscout/Provider/promptrefine.py +0 -193
  455. webscout/Provider/scira_chat.py +0 -277
  456. webscout/Provider/scnet.py +0 -187
  457. webscout/Provider/talkai.py +0 -194
  458. webscout/Provider/tutorai.py +0 -252
  459. webscout/Provider/typegpt.py +0 -232
  460. webscout/Provider/uncovr.py +0 -312
  461. webscout/Provider/yep.py +0 -376
  462. webscout/litprinter/__init__.py +0 -59
  463. webscout/scout/core.py +0 -881
  464. webscout/tempid.py +0 -128
  465. webscout/webscout_search.py +0 -1346
  466. webscout/webscout_search_async.py +0 -877
  467. webscout/yep_search.py +0 -297
  468. webscout-8.2.2.dist-info/METADATA +0 -734
  469. webscout-8.2.2.dist-info/RECORD +0 -309
  470. webscout-8.2.2.dist-info/entry_points.txt +0 -5
  471. webscout-8.2.2.dist-info/top_level.txt +0 -3
  472. webstoken/__init__.py +0 -30
  473. webstoken/classifier.py +0 -189
  474. webstoken/keywords.py +0 -216
  475. webstoken/language.py +0 -128
  476. webstoken/ner.py +0 -164
  477. webstoken/normalizer.py +0 -35
  478. webstoken/processor.py +0 -77
  479. webstoken/sentiment.py +0 -206
  480. webstoken/stemmer.py +0 -73
  481. webstoken/tagger.py +0 -60
  482. webstoken/tokenizer.py +0 -158
  483. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,311 +1,321 @@
1
- import requests
2
- import json
3
- import uuid
4
- import re
5
- from typing import Union, Any, Dict, Optional, Generator, List
6
-
7
- from webscout.AIutel import Optimizers
8
- from webscout.AIutel import Conversation
9
- from webscout.AIutel import AwesomePrompts
10
- from webscout.AIbase import Provider
11
- from webscout import exceptions
12
- from webscout.litagent import LitAgent as Lit
13
-
14
- class LLMChatCo(Provider):
15
- """
16
- A class to interact with the LLMChat.co API
17
- """
18
-
19
- AVAILABLE_MODELS = [
20
- "gemini-flash-2.0", # Default model
21
- "llama-4-scout",
22
- "gpt-4o-mini",
23
- "gpt-4.1-nano",
24
-
25
-
26
- # "gpt-4.1",
27
- # "gpt-4.1-mini",
28
- # "o3-mini",
29
- # "claude-3-5-sonnet",
30
- # "deepseek-r1",
31
- # "claude-3-7-sonnet",
32
- # "deep", # deep research mode
33
- # "pro" # pro research mode
34
-
35
- ]
36
-
37
- def __init__(
38
- self,
39
- is_conversation: bool = True,
40
- max_tokens: int = 2048,
41
- timeout: int = 60,
42
- intro: str = None,
43
- filepath: str = None,
44
- update_file: bool = True,
45
- proxies: dict = {},
46
- history_offset: int = 10250,
47
- act: str = None,
48
- model: str = "gemini-flash-2.0",
49
- system_prompt: str = "You are a helpful assistant."
50
- ):
51
- """
52
- Initializes the LLMChat.co API with given parameters.
53
- """
54
-
55
- if model not in self.AVAILABLE_MODELS:
56
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
57
-
58
- self.session = requests.Session()
59
- self.is_conversation = is_conversation
60
- self.max_tokens_to_sample = max_tokens
61
- self.api_endpoint = "https://llmchat.co/api/completion"
62
- self.timeout = timeout
63
- self.last_response = {}
64
- self.model = model
65
- self.system_prompt = system_prompt
66
- self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
67
-
68
- # Create LitAgent instance for user agent generation
69
- lit_agent = Lit()
70
-
71
- # Headers based on the provided request
72
- self.headers = {
73
- "Content-Type": "application/json",
74
- "Accept": "text/event-stream",
75
- "User-Agent": lit_agent.random(),
76
- "Accept-Language": "en-US,en;q=0.9",
77
- "Origin": "https://llmchat.co",
78
- "Referer": f"https://llmchat.co/chat/{self.thread_id}",
79
- "DNT": "1",
80
- "Sec-Fetch-Dest": "empty",
81
- "Sec-Fetch-Mode": "cors",
82
- "Sec-Fetch-Site": "same-origin"
83
- }
84
-
85
- self.__available_optimizers = (
86
- method
87
- for method in dir(Optimizers)
88
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
89
- )
90
-
91
- Conversation.intro = (
92
- AwesomePrompts().get_act(
93
- act, raise_not_found=True, default=None, case_insensitive=True
94
- )
95
- if act
96
- else intro or Conversation.intro
97
- )
98
-
99
- self.conversation = Conversation(
100
- is_conversation, self.max_tokens_to_sample, filepath, update_file
101
- )
102
- self.conversation.history_offset = history_offset
103
- self.session.proxies = proxies
104
- # Store message history for conversation context
105
- self.last_assistant_response = ""
106
-
107
- def parse_sse(self, data):
108
- """Parse Server-Sent Events data"""
109
- if not data or not data.strip():
110
- return None
111
-
112
- # Check if it's an event line
113
- if data.startswith('event:'):
114
- return {'event': data[6:].strip()}
115
-
116
- # Check if it's data
117
- if data.startswith('data:'):
118
- data_content = data[5:].strip()
119
- if data_content:
120
- try:
121
- return {'data': json.loads(data_content)}
122
- except json.JSONDecodeError:
123
- return {'data': data_content}
124
-
125
- return None
126
-
127
- def ask(
128
- self,
129
- prompt: str,
130
- stream: bool = True, # Default to stream as the API uses SSE
131
- raw: bool = False,
132
- optimizer: str = None,
133
- conversationally: bool = False,
134
- web_search: bool = False,
135
- ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
136
- """Chat with LLMChat.co with streaming capabilities"""
137
-
138
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
139
- if optimizer:
140
- if optimizer in self.__available_optimizers:
141
- conversation_prompt = getattr(Optimizers, optimizer)(
142
- conversation_prompt if conversationally else prompt
143
- )
144
- else:
145
- raise exceptions.FailedToGenerateResponseError(
146
- f"Optimizer is not one of {self.__available_optimizers}"
147
- )
148
-
149
-
150
- # Generate a unique ID for this message
151
- thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
152
- messages = [
153
- {"role": "system", "content": self.system_prompt},
154
- {"role": "user", "content": prompt},
155
- ]
156
- # Prepare payload for the API request based on observed request format
157
- payload = {
158
- "mode": self.model,
159
- "prompt": prompt,
160
- "threadId": self.thread_id,
161
- "messages": messages,
162
- "mcpConfig": {},
163
- "threadItemId": thread_item_id,
164
- "parentThreadItemId": "",
165
- "webSearch": web_search,
166
- "showSuggestions": True
167
- }
168
-
169
- def for_stream():
170
- try:
171
- # Set up the streaming request
172
- response = self.session.post(
173
- self.api_endpoint,
174
- json=payload,
175
- headers=self.headers,
176
- stream=True,
177
- timeout=self.timeout
178
- )
179
- response.raise_for_status()
180
-
181
- # Process the SSE stream
182
- full_response = ""
183
- current_event = None
184
- buffer = ""
185
-
186
- # Use a raw read approach to handle SSE
187
- for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
188
- if not chunk:
189
- continue
190
-
191
- # Decode the chunk and add to buffer
192
- buffer += chunk.decode('utf-8')
193
-
194
- # Process complete lines in the buffer
195
- while '\n' in buffer:
196
- line, buffer = buffer.split('\n', 1)
197
- line = line.strip()
198
-
199
- if not line:
200
- continue
201
-
202
- if line.startswith('event:'):
203
- current_event = line[6:].strip()
204
- elif line.startswith('data:'):
205
- data_content = line[5:].strip()
206
- if data_content and current_event == 'answer':
207
- try:
208
- json_data = json.loads(data_content)
209
- if "answer" in json_data and "text" in json_data["answer"]:
210
- text_chunk = json_data["answer"]["text"]
211
- # If there's a fullText, use it as it's more complete
212
- if json_data["answer"].get("fullText") and json_data["answer"].get("status") == "COMPLETED":
213
- text_chunk = json_data["answer"]["fullText"]
214
-
215
- # Extract only new content since last chunk
216
- new_text = text_chunk[len(full_response):]
217
- if new_text:
218
- full_response = text_chunk
219
- yield new_text if raw else dict(text=new_text)
220
- except json.JSONDecodeError:
221
- continue
222
- elif data_content and current_event == 'done':
223
- break
224
-
225
- self.last_response.update(dict(text=full_response))
226
- self.last_assistant_response = full_response
227
- self.conversation.update_chat_history(
228
- prompt, self.get_message(self.last_response)
229
- )
230
-
231
- except requests.exceptions.RequestException as e:
232
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
233
- except Exception as e:
234
- raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}")
235
-
236
- def for_non_stream():
237
- full_response = ""
238
- try:
239
- for chunk in for_stream():
240
- if not raw:
241
- full_response += chunk.get('text', '')
242
- else:
243
- full_response += chunk
244
- except Exception as e:
245
- if not full_response:
246
- raise exceptions.FailedToGenerateResponseError(f"Failed to get response: {str(e)}")
247
-
248
- return dict(text=full_response)
249
-
250
- return for_stream() if stream else for_non_stream()
251
-
252
- def chat(
253
- self,
254
- prompt: str,
255
- stream: bool = False,
256
- optimizer: str = None,
257
- conversationally: bool = False,
258
- web_search: bool = False,
259
- ) -> Union[str, Generator[str, None, None]]:
260
- """Generate response with streaming capabilities"""
261
-
262
- def for_stream():
263
- for response in self.ask(
264
- prompt, True, optimizer=optimizer, conversationally=conversationally,
265
- web_search=web_search
266
- ):
267
- yield self.get_message(response)
268
-
269
- def for_non_stream():
270
- return self.get_message(
271
- self.ask(
272
- prompt,
273
- False,
274
- optimizer=optimizer,
275
- conversationally=conversationally,
276
- web_search=web_search
277
- )
278
- )
279
-
280
- return for_stream() if stream else for_non_stream()
281
-
282
- def get_message(self, response: Dict[str, Any]) -> str:
283
- """Retrieves message from response with validation"""
284
- assert isinstance(response, dict), "Response should be of dict data-type only"
285
- return response["text"]
286
-
287
- if __name__ == "__main__":
288
- print("-" * 80)
289
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
290
- print("-" * 80)
291
-
292
- # Test all available models
293
- working = 0
294
- total = len(LLMChatCo.AVAILABLE_MODELS)
295
-
296
- for model in LLMChatCo.AVAILABLE_MODELS:
297
- try:
298
- test_ai = LLMChatCo(model=model, timeout=60)
299
- response = test_ai.chat("Say 'Hello' in one word")
300
- response_text = response
301
-
302
- if response_text and len(response_text.strip()) > 0:
303
- status = "✓"
304
- # Truncate response if too long
305
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
306
- else:
307
- status = "✗"
308
- display_text = "Empty or invalid response"
309
- print(f"{model:<50} {status:<10} {display_text}")
310
- except Exception as e:
311
- print(f"{model:<50} {'✗':<10} {str(e)}")
1
+ import uuid
2
+ from typing import Any, Dict, Generator, Optional, Union, cast
3
+
4
+ from curl_cffi import CurlError
5
+ from curl_cffi.requests import Session
6
+
7
+ from webscout import exceptions
8
+ from webscout.AIbase import Provider, Response
9
+ from webscout.AIutel import ( # Import sanitize_stream
10
+ AwesomePrompts,
11
+ Conversation,
12
+ Optimizers,
13
+ sanitize_stream,
14
+ )
15
+ from webscout.litagent import LitAgent as Lit
16
+
17
+
18
+ class LLMChatCo(Provider):
19
+ """
20
+ A class to interact with the LLMChat.co API
21
+ """
22
+
23
+ required_auth = False
24
+ AVAILABLE_MODELS = [
25
+ "gemini-flash-2.0", # Default model
26
+ "llama-4-scout",
27
+ "gpt-4o-mini",
28
+ "gpt-4.1-nano",
29
+ # "gpt-4.1",
30
+ # "gpt-4.1-mini",
31
+ # "o3-mini",
32
+ # "claude-3-5-sonnet",
33
+ # "deepseek-r1",
34
+ # "claude-3-7-sonnet",
35
+ # "deep", # deep research mode
36
+ # "pro" # pro research mode
37
+ ]
38
+
39
+ def __init__(
40
+ self,
41
+ is_conversation: bool = True,
42
+ max_tokens: int = 2048, # Note: max_tokens is not used by this API
43
+ timeout: int = 60,
44
+ intro: Optional[str] = None,
45
+ filepath: Optional[str] = None,
46
+ update_file: bool = True,
47
+ proxies: dict = {},
48
+ history_offset: int = 10250,
49
+ act: Optional[str] = None,
50
+ model: str = "gemini-flash-2.0",
51
+ system_prompt: str = "You are a helpful assistant.",
52
+ ):
53
+ """
54
+ Initializes the LLMChat.co API with given parameters.
55
+ """
56
+
57
+ if model not in self.AVAILABLE_MODELS:
58
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
59
+
60
+ # Initialize curl_cffi Session
61
+ self.session = Session()
62
+ self.is_conversation = is_conversation
63
+ self.max_tokens_to_sample = max_tokens
64
+ self.api_endpoint = "https://llmchat.co/api/completion"
65
+ self.timeout = timeout
66
+ self.last_response = {}
67
+ self.model = model
68
+ self.system_prompt = system_prompt
69
+ self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
70
+
71
+ # Create LitAgent instance (keep if needed for other headers)
72
+ lit_agent = Lit()
73
+
74
+ # Headers based on the provided request
75
+ self.headers = {
76
+ "Content-Type": "application/json",
77
+ "Accept": "text/event-stream",
78
+ "User-Agent": lit_agent.random(),
79
+ "Accept-Language": "en-US,en;q=0.9",
80
+ "Origin": "https://llmchat.co",
81
+ "Referer": f"https://llmchat.co/chat/{self.thread_id}",
82
+ "DNT": "1",
83
+ "Sec-Fetch-Dest": "empty",
84
+ "Sec-Fetch-Mode": "cors",
85
+ "Sec-Fetch-Site": "same-origin",
86
+ # Add sec-ch-ua headers if needed for impersonation consistency
87
+ }
88
+
89
+ self.__available_optimizers = (
90
+ method
91
+ for method in dir(Optimizers)
92
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
93
+ )
94
+
95
+ self.conversation = Conversation(
96
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
97
+ )
98
+ act_prompt = (
99
+ AwesomePrompts().get_act(
100
+ cast(Union[str, int], act), default=None, case_insensitive=True
101
+ )
102
+ if act
103
+ else intro
104
+ )
105
+ if act_prompt:
106
+ self.conversation.intro = act_prompt
107
+ self.conversation.history_offset = history_offset
108
+ # Update curl_cffi session headers and proxies
109
+ self.session = Session(impersonate="chrome110")
110
+ self.session.headers.update(self.headers)
111
+ if proxies:
112
+ self.session.proxies.update(proxies)
113
+ self.system_prompt = system_prompt
114
+ # Store message history for conversation context
115
+ self.last_assistant_response = ""
116
+
117
+ @staticmethod
118
+ def _llmchatco_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
119
+ """Extracts text content from LLMChat.co stream JSON objects."""
120
+ if isinstance(chunk, dict) and "answer" in chunk:
121
+ answer = chunk["answer"]
122
+ # Prefer fullText if available and status is COMPLETED
123
+ if answer.get("fullText") and answer.get("status") == "COMPLETED":
124
+ return answer["fullText"]
125
+ elif "text" in answer:
126
+ return answer["text"]
127
+ return None
128
+
129
+ def ask(
130
+ self,
131
+ prompt: str,
132
+ stream: bool = True, # Default to stream as the API uses SSE
133
+ raw: bool = False,
134
+ optimizer: Optional[str] = None,
135
+ conversationally: bool = False,
136
+ web_search: bool = False,
137
+ **kwargs: Any,
138
+ ) -> Union[Dict[str, Any], Generator[Any, None, None], str]:
139
+ """Chat with LLMChat.co with streaming capabilities and raw output support using sanitize_stream."""
140
+
141
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
142
+ if optimizer:
143
+ if optimizer in self.__available_optimizers:
144
+ conversation_prompt = getattr(Optimizers, optimizer)(
145
+ conversation_prompt if conversationally else prompt
146
+ )
147
+ else:
148
+ raise exceptions.FailedToGenerateResponseError(
149
+ f"Optimizer is not one of {self.__available_optimizers}"
150
+ )
151
+
152
+ # Generate a unique ID for this message
153
+ thread_item_id = "".join(str(uuid.uuid4()).split("-"))[:20]
154
+ messages = [
155
+ {"role": "system", "content": self.system_prompt},
156
+ {"role": "user", "content": prompt},
157
+ ]
158
+ # Prepare payload for the API request based on observed request format
159
+ payload = {
160
+ "mode": self.model,
161
+ "prompt": prompt,
162
+ "threadId": self.thread_id,
163
+ "messages": messages,
164
+ "mcpConfig": {},
165
+ "threadItemId": thread_item_id,
166
+ "parentThreadItemId": "",
167
+ "webSearch": web_search,
168
+ "showSuggestions": True,
169
+ }
170
+
171
+ def for_stream():
172
+ full_response = ""
173
+ try:
174
+ response = self.session.post(
175
+ self.api_endpoint,
176
+ json=payload,
177
+ stream=True,
178
+ timeout=self.timeout,
179
+ impersonate="chrome110",
180
+ )
181
+ response.raise_for_status()
182
+
183
+ processed_stream = sanitize_stream(
184
+ data=response.iter_content(chunk_size=None),
185
+ intro_value="data:",
186
+ to_json=True,
187
+ content_extractor=self._llmchatco_extractor,
188
+ yield_raw_on_error=False,
189
+ raw=raw,
190
+ )
191
+
192
+ last_yielded_text = ""
193
+ for current_full_text in processed_stream:
194
+ if current_full_text and isinstance(current_full_text, str):
195
+ new_text = current_full_text[len(last_yielded_text) :]
196
+ if new_text:
197
+ full_response = current_full_text
198
+ last_yielded_text = current_full_text
199
+ if raw:
200
+ yield new_text
201
+ else:
202
+ yield dict(text=new_text)
203
+ self.last_response = dict(text=full_response)
204
+ self.last_assistant_response = full_response
205
+ self.conversation.update_chat_history(prompt, full_response)
206
+ except CurlError as e:
207
+ raise exceptions.FailedToGenerateResponseError(
208
+ f"Request failed (CurlError): {e}"
209
+ ) from e
210
+ except Exception as e:
211
+ err_text = ""
212
+ if hasattr(e, "response"):
213
+ response_obj = getattr(e, "response")
214
+ if hasattr(response_obj, "text"):
215
+ err_text = getattr(response_obj, "text")
216
+ raise exceptions.FailedToGenerateResponseError(
217
+ f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}"
218
+ ) from e
219
+
220
+ def for_non_stream():
221
+ full_response_text = ""
222
+ try:
223
+ for chunk_data in for_stream():
224
+ if raw and isinstance(chunk_data, str):
225
+ full_response_text += chunk_data
226
+ elif isinstance(chunk_data, dict) and "text" in chunk_data:
227
+ full_response_text += chunk_data["text"]
228
+ except Exception as e:
229
+ if not full_response_text:
230
+ raise exceptions.FailedToGenerateResponseError(
231
+ f"Failed to get non-stream response: {str(e)}"
232
+ ) from e
233
+ return full_response_text if raw else self.last_response
234
+
235
+ return for_stream() if stream else for_non_stream()
236
+
237
+ def chat(
238
+ self,
239
+ prompt: str,
240
+ stream: bool = False,
241
+ optimizer: Optional[str] = None,
242
+ conversationally: bool = False,
243
+ web_search: bool = False,
244
+ raw: bool = False,
245
+ **kwargs: Any,
246
+ ) -> Union[str, Generator[str, None, None]]:
247
+ """Generate response with streaming capabilities and raw output support"""
248
+
249
+ def for_stream_chat():
250
+ gen = self.ask(
251
+ prompt,
252
+ stream=True,
253
+ raw=raw,
254
+ optimizer=optimizer,
255
+ conversationally=conversationally,
256
+ web_search=web_search,
257
+ )
258
+ for response in gen:
259
+ if raw:
260
+ yield response
261
+ else:
262
+ yield self.get_message(response)
263
+
264
+ def for_non_stream_chat():
265
+ response_data = self.ask(
266
+ prompt,
267
+ stream=False,
268
+ raw=raw,
269
+ optimizer=optimizer,
270
+ conversationally=conversationally,
271
+ web_search=web_search,
272
+ )
273
+ if raw:
274
+ return (
275
+ response_data
276
+ if isinstance(response_data, str)
277
+ else self.get_message(response_data)
278
+ )
279
+ else:
280
+ return self.get_message(response_data)
281
+
282
+ return for_stream_chat() if stream else for_non_stream_chat()
283
+
284
+ def get_message(self, response: Response) -> str:
285
+ """Retrieves message from response with validation"""
286
+ if not isinstance(response, dict):
287
+ return str(response)
288
+ response_dict = cast(Dict[str, Any], response)
289
+ return response_dict.get("text", "")
290
+
291
+
292
+ if __name__ == "__main__":
293
+ # # Ensure curl_cffi is installed
294
+ # print("-" * 80)
295
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
296
+ # print("-" * 80)
297
+
298
+ # # Test all available models
299
+ # working = 0
300
+ # total = len(LLMChatCo.AVAILABLE_MODELS)
301
+
302
+ # for model in LLMChatCo.AVAILABLE_MODELS:
303
+ # try:
304
+ # test_ai = LLMChatCo(model=model, timeout=60)
305
+ # response = test_ai.chat("Say 'Hello' in one word")
306
+ # response_text = response
307
+
308
+ # if response_text and len(response_text.strip()) > 0:
309
+ # status = "✓"
310
+ # # Truncate response if too long
311
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
312
+ # else:
313
+ # status = "✗"
314
+ # display_text = "Empty or invalid response"
315
+ # print(f"{model:<50} {status:<10} {display_text}")
316
+ # except Exception as e:
317
+ # print(f"{model:<50} {'✗':<10} {str(e)}")
318
+ ai = LLMChatCo()
319
+ response = ai.chat("yooo", stream=True, raw=False)
320
+ for chunk in response:
321
+ print(chunk, end="", flush=True)