webscout 8.2.2__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (483) hide show
  1. webscout/AIauto.py +524 -143
  2. webscout/AIbase.py +247 -123
  3. webscout/AIutel.py +68 -132
  4. webscout/Bard.py +1072 -535
  5. webscout/Extra/GitToolkit/__init__.py +2 -2
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -0
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -0
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -45
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +189 -18
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -682
  37. webscout/Extra/tempmail/README.md +488 -0
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +237 -304
  49. webscout/Provider/AISEARCH/README.md +106 -0
  50. webscout/Provider/AISEARCH/__init__.py +16 -10
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +130 -209
  53. webscout/Provider/AISEARCH/monica_search.py +200 -246
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -281
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -0
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +343 -173
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +60 -54
  67. webscout/Provider/GithubChat.py +385 -367
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -670
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -233
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -266
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -381
  77. webscout/Provider/Netwrck.py +273 -228
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -0
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -0
  85. webscout/Provider/OPENAI/__init__.py +148 -25
  86. webscout/Provider/OPENAI/ai4chat.py +348 -0
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/ayle.py +365 -0
  90. webscout/Provider/OPENAI/base.py +253 -46
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +514 -193
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -0
  94. webscout/Provider/OPENAI/deepinfra.py +403 -272
  95. webscout/Provider/OPENAI/e2b.py +2370 -1350
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +186 -138
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -0
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +100 -104
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -327
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +110 -84
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -0
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -0
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +126 -115
  115. webscout/Provider/OPENAI/textpollinations.py +218 -133
  116. webscout/Provider/OPENAI/toolbaz.py +136 -166
  117. webscout/Provider/OPENAI/typefully.py +419 -0
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -211
  120. webscout/Provider/OPENAI/wisecat.py +103 -125
  121. webscout/Provider/OPENAI/writecream.py +185 -156
  122. webscout/Provider/OPENAI/x0gpt.py +227 -136
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -344
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -0
  133. webscout/Provider/TTI/__init__.py +37 -12
  134. webscout/Provider/TTI/base.py +147 -0
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -0
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -0
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -0
  141. webscout/Provider/TTS/README.md +186 -0
  142. webscout/Provider/TTS/__init__.py +43 -7
  143. webscout/Provider/TTS/base.py +523 -0
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -0
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -180
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +221 -121
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -199
  158. webscout/Provider/TypliAI.py +311 -0
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -0
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +160 -145
  161. webscout/Provider/UNFINISHED/GizAI.py +300 -0
  162. webscout/Provider/UNFINISHED/Marcus.py +218 -0
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/UNFINISHED/XenAI.py +330 -0
  165. webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +64 -47
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -0
  170. webscout/Provider/UNFINISHED/samurai.py +231 -0
  171. webscout/Provider/WiseCat.py +256 -196
  172. webscout/Provider/WrDoChat.py +390 -0
  173. webscout/Provider/__init__.py +115 -198
  174. webscout/Provider/ai4chat.py +181 -202
  175. webscout/Provider/akashgpt.py +330 -342
  176. webscout/Provider/cerebras.py +397 -242
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -234
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -266
  182. webscout/Provider/llama3mitril.py +230 -180
  183. webscout/Provider/llmchat.py +308 -213
  184. webscout/Provider/llmchatco.py +321 -311
  185. webscout/Provider/meta.py +996 -794
  186. webscout/Provider/oivscode.py +332 -0
  187. webscout/Provider/searchchat.py +316 -293
  188. webscout/Provider/sonus.py +264 -208
  189. webscout/Provider/toolbaz.py +359 -320
  190. webscout/Provider/turboseek.py +332 -219
  191. webscout/Provider/typefully.py +262 -280
  192. webscout/Provider/x0gpt.py +332 -256
  193. webscout/__init__.py +31 -38
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -293
  196. webscout/client.py +1497 -0
  197. webscout/conversation.py +140 -565
  198. webscout/exceptions.py +383 -339
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +32 -378
  204. webscout/prompt_manager.py +376 -274
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -0
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -140
  210. webscout/scout/core/scout.py +800 -568
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -460
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -809
  284. webscout/swiftcli/core/__init__.py +7 -0
  285. webscout/swiftcli/core/cli.py +574 -0
  286. webscout/swiftcli/core/context.py +98 -0
  287. webscout/swiftcli/core/group.py +268 -0
  288. webscout/swiftcli/decorators/__init__.py +28 -0
  289. webscout/swiftcli/decorators/command.py +243 -0
  290. webscout/swiftcli/decorators/options.py +247 -0
  291. webscout/swiftcli/decorators/output.py +392 -0
  292. webscout/swiftcli/exceptions.py +21 -0
  293. webscout/swiftcli/plugins/__init__.py +9 -0
  294. webscout/swiftcli/plugins/base.py +134 -0
  295. webscout/swiftcli/plugins/manager.py +269 -0
  296. webscout/swiftcli/utils/__init__.py +58 -0
  297. webscout/swiftcli/utils/formatting.py +251 -0
  298. webscout/swiftcli/utils/parsing.py +368 -0
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -55
  304. webscout/zeroart/base.py +70 -60
  305. webscout/zeroart/effects.py +155 -99
  306. webscout/zeroart/fonts.py +1799 -816
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. webscout-2026.1.19.dist-info/entry_points.txt +4 -0
  311. webscout-2026.1.19.dist-info/top_level.txt +1 -0
  312. inferno/__init__.py +0 -6
  313. inferno/__main__.py +0 -9
  314. inferno/cli.py +0 -6
  315. webscout/DWEBS.py +0 -477
  316. webscout/Extra/autocoder/__init__.py +0 -9
  317. webscout/Extra/autocoder/autocoder.py +0 -849
  318. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  319. webscout/LLM.py +0 -442
  320. webscout/Litlogger/__init__.py +0 -67
  321. webscout/Litlogger/core/__init__.py +0 -6
  322. webscout/Litlogger/core/level.py +0 -23
  323. webscout/Litlogger/core/logger.py +0 -165
  324. webscout/Litlogger/handlers/__init__.py +0 -12
  325. webscout/Litlogger/handlers/console.py +0 -33
  326. webscout/Litlogger/handlers/file.py +0 -143
  327. webscout/Litlogger/handlers/network.py +0 -173
  328. webscout/Litlogger/styles/__init__.py +0 -7
  329. webscout/Litlogger/styles/colors.py +0 -249
  330. webscout/Litlogger/styles/formats.py +0 -458
  331. webscout/Litlogger/styles/text.py +0 -87
  332. webscout/Litlogger/utils/__init__.py +0 -6
  333. webscout/Litlogger/utils/detectors.py +0 -153
  334. webscout/Litlogger/utils/formatters.py +0 -200
  335. webscout/Local/__init__.py +0 -12
  336. webscout/Local/__main__.py +0 -9
  337. webscout/Local/api.py +0 -576
  338. webscout/Local/cli.py +0 -516
  339. webscout/Local/config.py +0 -75
  340. webscout/Local/llm.py +0 -287
  341. webscout/Local/model_manager.py +0 -253
  342. webscout/Local/server.py +0 -721
  343. webscout/Local/utils.py +0 -93
  344. webscout/Provider/AI21.py +0 -177
  345. webscout/Provider/AISEARCH/DeepFind.py +0 -250
  346. webscout/Provider/AISEARCH/ISou.py +0 -256
  347. webscout/Provider/AISEARCH/felo_search.py +0 -228
  348. webscout/Provider/AISEARCH/genspark_search.py +0 -208
  349. webscout/Provider/AISEARCH/hika_search.py +0 -194
  350. webscout/Provider/AISEARCH/scira_search.py +0 -324
  351. webscout/Provider/Aitopia.py +0 -292
  352. webscout/Provider/AllenAI.py +0 -413
  353. webscout/Provider/Blackboxai.py +0 -229
  354. webscout/Provider/C4ai.py +0 -432
  355. webscout/Provider/ChatGPTClone.py +0 -226
  356. webscout/Provider/ChatGPTES.py +0 -237
  357. webscout/Provider/ChatGPTGratis.py +0 -194
  358. webscout/Provider/Chatify.py +0 -175
  359. webscout/Provider/Cloudflare.py +0 -273
  360. webscout/Provider/DeepSeek.py +0 -196
  361. webscout/Provider/ElectronHub.py +0 -709
  362. webscout/Provider/ExaChat.py +0 -342
  363. webscout/Provider/Free2GPT.py +0 -241
  364. webscout/Provider/GPTWeb.py +0 -193
  365. webscout/Provider/Glider.py +0 -211
  366. webscout/Provider/HF_space/__init__.py +0 -0
  367. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  368. webscout/Provider/HuggingFaceChat.py +0 -462
  369. webscout/Provider/Hunyuan.py +0 -272
  370. webscout/Provider/LambdaChat.py +0 -392
  371. webscout/Provider/Llama.py +0 -200
  372. webscout/Provider/Llama3.py +0 -204
  373. webscout/Provider/Marcus.py +0 -148
  374. webscout/Provider/OLLAMA.py +0 -396
  375. webscout/Provider/OPENAI/c4ai.py +0 -367
  376. webscout/Provider/OPENAI/chatgptclone.py +0 -460
  377. webscout/Provider/OPENAI/exachat.py +0 -433
  378. webscout/Provider/OPENAI/freeaichat.py +0 -352
  379. webscout/Provider/OPENAI/opkfc.py +0 -488
  380. webscout/Provider/OPENAI/scirachat.py +0 -463
  381. webscout/Provider/OPENAI/standardinput.py +0 -425
  382. webscout/Provider/OPENAI/typegpt.py +0 -346
  383. webscout/Provider/OPENAI/uncovrAI.py +0 -455
  384. webscout/Provider/OPENAI/venice.py +0 -413
  385. webscout/Provider/OPENAI/yep.py +0 -327
  386. webscout/Provider/OpenGPT.py +0 -199
  387. webscout/Provider/Perplexitylabs.py +0 -415
  388. webscout/Provider/Phind.py +0 -535
  389. webscout/Provider/PizzaGPT.py +0 -198
  390. webscout/Provider/Reka.py +0 -214
  391. webscout/Provider/StandardInput.py +0 -278
  392. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  393. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  394. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  395. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  396. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  397. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  398. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  399. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  400. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  401. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  402. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  403. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  404. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  405. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  406. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  407. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  408. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  409. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  410. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  411. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  412. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  413. webscout/Provider/TTI/artbit/__init__.py +0 -22
  414. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  415. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  416. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  417. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  418. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  419. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  420. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  421. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  422. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  423. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  424. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  425. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  426. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  427. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  428. webscout/Provider/TTI/talkai/__init__.py +0 -4
  429. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  430. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  431. webscout/Provider/TTS/gesserit.py +0 -127
  432. webscout/Provider/TeachAnything.py +0 -187
  433. webscout/Provider/Venice.py +0 -219
  434. webscout/Provider/VercelAI.py +0 -234
  435. webscout/Provider/WebSim.py +0 -228
  436. webscout/Provider/Writecream.py +0 -211
  437. webscout/Provider/WritingMate.py +0 -197
  438. webscout/Provider/aimathgpt.py +0 -189
  439. webscout/Provider/askmyai.py +0 -158
  440. webscout/Provider/asksteve.py +0 -203
  441. webscout/Provider/bagoodex.py +0 -145
  442. webscout/Provider/chatglm.py +0 -205
  443. webscout/Provider/copilot.py +0 -428
  444. webscout/Provider/freeaichat.py +0 -271
  445. webscout/Provider/gaurish.py +0 -244
  446. webscout/Provider/geminiprorealtime.py +0 -160
  447. webscout/Provider/granite.py +0 -187
  448. webscout/Provider/hermes.py +0 -219
  449. webscout/Provider/koala.py +0 -268
  450. webscout/Provider/labyrinth.py +0 -340
  451. webscout/Provider/lepton.py +0 -194
  452. webscout/Provider/llamatutor.py +0 -192
  453. webscout/Provider/multichat.py +0 -325
  454. webscout/Provider/promptrefine.py +0 -193
  455. webscout/Provider/scira_chat.py +0 -277
  456. webscout/Provider/scnet.py +0 -187
  457. webscout/Provider/talkai.py +0 -194
  458. webscout/Provider/tutorai.py +0 -252
  459. webscout/Provider/typegpt.py +0 -232
  460. webscout/Provider/uncovr.py +0 -312
  461. webscout/Provider/yep.py +0 -376
  462. webscout/litprinter/__init__.py +0 -59
  463. webscout/scout/core.py +0 -881
  464. webscout/tempid.py +0 -128
  465. webscout/webscout_search.py +0 -1346
  466. webscout/webscout_search_async.py +0 -877
  467. webscout/yep_search.py +0 -297
  468. webscout-8.2.2.dist-info/METADATA +0 -734
  469. webscout-8.2.2.dist-info/RECORD +0 -309
  470. webscout-8.2.2.dist-info/entry_points.txt +0 -5
  471. webscout-8.2.2.dist-info/top_level.txt +0 -3
  472. webstoken/__init__.py +0 -30
  473. webstoken/classifier.py +0 -189
  474. webstoken/keywords.py +0 -216
  475. webstoken/language.py +0 -128
  476. webstoken/ner.py +0 -164
  477. webstoken/normalizer.py +0 -35
  478. webstoken/processor.py +0 -77
  479. webstoken/sentiment.py +0 -206
  480. webstoken/stemmer.py +0 -73
  481. webstoken/tagger.py +0 -60
  482. webstoken/tokenizer.py +0 -158
  483. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info/licenses}/LICENSE.md +0 -0
@@ -0,0 +1,425 @@
1
+ import json
2
+ import time
3
+ import uuid
4
+ from datetime import datetime
5
+ from typing import Any, Dict, Generator, List, Optional, Union, cast
6
+
7
+ from curl_cffi import CurlError
8
+
9
+ # Import curl_cffi for improved request handling
10
+ from curl_cffi.requests import Session
11
+
12
+ # Import base classes and utility structures
13
+ from webscout.Provider.OPENAI.base import (
14
+ BaseChat,
15
+ BaseCompletions,
16
+ OpenAICompatibleProvider,
17
+ SimpleModelList,
18
+ )
19
+ from webscout.Provider.OPENAI.utils import (
20
+ ChatCompletion,
21
+ ChatCompletionChunk,
22
+ ChatCompletionMessage,
23
+ Choice,
24
+ ChoiceDelta,
25
+ CompletionUsage,
26
+ count_tokens,
27
+ format_prompt,
28
+ )
29
+
30
+ # Attempt to import LitAgent, fallback if not available
31
+ from ...litagent import LitAgent
32
+
33
+
34
+ class Completions(BaseCompletions):
35
+ def __init__(self, client: "IBM"):
36
+ self._client = client
37
+
38
+ def create(
39
+ self,
40
+ *,
41
+ model: str,
42
+ messages: List[Dict[str, str]],
43
+ max_tokens: Optional[int] = 2049,
44
+ stream: bool = False,
45
+ temperature: Optional[float] = None,
46
+ top_p: Optional[float] = None,
47
+ **kwargs: Any,
48
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
49
+ """
50
+ Creates a model response for the given chat conversation.
51
+ Mimics openai.chat.completions.create
52
+ """
53
+ formatted_prompt = format_prompt(
54
+ messages, add_special_tokens=False, do_continue=True, include_system=True
55
+ )
56
+
57
+ if not formatted_prompt:
58
+ raise ValueError("No valid prompt could be generated from messages")
59
+
60
+ # Use count_tokens to estimate prompt tokens
61
+ try:
62
+ prompt_tokens = count_tokens(formatted_prompt)
63
+ except Exception:
64
+ # Fallback to simple estimation if tiktoken not available
65
+ prompt_tokens = int(len(formatted_prompt.split()) * 1.3)
66
+
67
+ now = datetime.now().isoformat()
68
+ payload = {
69
+ "agent_name": model,
70
+ "input": [
71
+ {
72
+ "role": "user",
73
+ "parts": [
74
+ {
75
+ "content_type": "text/plain",
76
+ "content": formatted_prompt,
77
+ "content_encoding": "plain",
78
+ "role": "user",
79
+ }
80
+ ],
81
+ "created_at": now,
82
+ "completed_at": now,
83
+ }
84
+ ],
85
+ "mode": "stream",
86
+ "session_id": str(uuid.uuid4()),
87
+ }
88
+
89
+ request_id = f"chatcmpl-{uuid.uuid4()}"
90
+ created_time = int(time.time())
91
+
92
+ if stream:
93
+ return self._create_stream(request_id, created_time, model, payload, prompt_tokens)
94
+ else:
95
+ return self._create_non_stream(request_id, created_time, model, payload, prompt_tokens)
96
+
97
+ def _create_stream(
98
+ self,
99
+ request_id: str,
100
+ created_time: int,
101
+ model: str,
102
+ payload: Dict[str, Any],
103
+ prompt_tokens: int,
104
+ ) -> Generator[ChatCompletionChunk, None, None]:
105
+ """Stream chat completions using manual SSE parsing (no sanitize_stream)"""
106
+ try:
107
+ response = self._client.session.post(
108
+ self._client.base_url,
109
+ data=json.dumps(payload),
110
+ stream=True,
111
+ timeout=self._client.timeout,
112
+ impersonate="chrome110",
113
+ )
114
+
115
+ if response.status_code in [401, 403]:
116
+ # Token expired, refresh and retry once
117
+ self._client.get_token()
118
+ response = self._client.session.post(
119
+ self._client.base_url,
120
+ data=json.dumps(payload),
121
+ stream=True,
122
+ timeout=self._client.timeout,
123
+ impersonate="chrome110",
124
+ )
125
+
126
+ if response.status_code != 200:
127
+ raise IOError(
128
+ f"IBM request failed with status code {response.status_code}: {response.text}"
129
+ )
130
+
131
+ # Track completion tokens
132
+ completion_tokens = 0
133
+
134
+ buffer = ""
135
+ for chunk in response.iter_content(chunk_size=None):
136
+ if not chunk:
137
+ continue
138
+
139
+ # Decode bytes to string
140
+ try:
141
+ chunk_str = chunk.decode("utf-8") if isinstance(chunk, bytes) else chunk
142
+ except UnicodeDecodeError:
143
+ continue
144
+
145
+ buffer += chunk_str
146
+
147
+ # Process complete lines
148
+ while "\n" in buffer:
149
+ line, buffer = buffer.split("\n", 1)
150
+ line = line.strip()
151
+
152
+ if not line:
153
+ continue
154
+
155
+ # Parse SSE format: "data: {...}"
156
+ if line.startswith("data:"):
157
+ json_str = line[5:].strip() # Remove "data:" prefix
158
+
159
+ # Skip [DONE] marker
160
+ if json_str == "[DONE]":
161
+ break
162
+
163
+ try:
164
+ # Parse JSON
165
+ data = json.loads(json_str)
166
+
167
+ # Extract content from IBM format
168
+ if data.get("type") == "message.part":
169
+ part = data.get("part", {})
170
+ content = part.get("content")
171
+
172
+ if content:
173
+ completion_tokens += 1
174
+
175
+ # Create the delta object
176
+ delta = ChoiceDelta(content=content, role="assistant")
177
+
178
+ # Create the choice object
179
+ choice = Choice(index=0, delta=delta, finish_reason=None)
180
+
181
+ # Create the chunk object
182
+ chunk = ChatCompletionChunk(
183
+ id=request_id,
184
+ choices=[choice],
185
+ created=created_time,
186
+ model=model,
187
+ system_fingerprint=None,
188
+ )
189
+
190
+ yield chunk
191
+
192
+ except json.JSONDecodeError:
193
+ # Skip malformed JSON lines
194
+ continue
195
+
196
+ # Send final chunk with finish_reason
197
+ final_delta = ChoiceDelta(content=None, role=None)
198
+ final_choice = Choice(index=0, delta=final_delta, finish_reason="stop")
199
+ final_chunk = ChatCompletionChunk(
200
+ id=request_id,
201
+ choices=[final_choice],
202
+ created=created_time,
203
+ model=model,
204
+ system_fingerprint=None,
205
+ )
206
+ yield final_chunk
207
+
208
+ except CurlError as e:
209
+ print(f"Error during IBM stream request: {e}")
210
+ raise IOError(f"IBM request failed: {e}") from e
211
+ except Exception as e:
212
+ print(f"Error processing IBM stream: {e}")
213
+ raise
214
+
215
+ def _create_non_stream(
216
+ self,
217
+ request_id: str,
218
+ created_time: int,
219
+ model: str,
220
+ payload: Dict[str, Any],
221
+ prompt_tokens: int,
222
+ ) -> ChatCompletion:
223
+ """Create non-streaming chat completion"""
224
+ try:
225
+ # Collect all content from stream
226
+ accumulated_content = ""
227
+ completion_tokens = 0
228
+
229
+ for chunk in self._create_stream(
230
+ request_id, created_time, model, payload, prompt_tokens
231
+ ):
232
+ if chunk.choices and chunk.choices[0].delta.content:
233
+ accumulated_content += chunk.choices[0].delta.content
234
+ completion_tokens += 1
235
+
236
+ # Use count_tokens for more accurate completion token count
237
+ try:
238
+ completion_tokens = count_tokens(accumulated_content)
239
+ except Exception:
240
+ # Fallback if tiktoken not available
241
+ pass
242
+
243
+ # Create the message object
244
+ message = ChatCompletionMessage(
245
+ role="assistant", content=accumulated_content, tool_calls=None
246
+ )
247
+
248
+ # Create the choice object
249
+ choice = Choice(index=0, message=message, finish_reason="stop")
250
+
251
+ # Create usage object with proper token counts
252
+ usage = CompletionUsage(
253
+ prompt_tokens=int(prompt_tokens),
254
+ completion_tokens=completion_tokens,
255
+ total_tokens=int(prompt_tokens) + completion_tokens,
256
+ )
257
+
258
+ # Create the completion object
259
+ completion = ChatCompletion(
260
+ id=request_id,
261
+ choices=[choice],
262
+ created=created_time,
263
+ model=model,
264
+ usage=usage,
265
+ )
266
+ return completion
267
+
268
+ except CurlError as e:
269
+ print(f"Error during IBM non-stream request: {e}")
270
+ raise IOError(f"IBM request failed: {e}") from e
271
+ except Exception as e:
272
+ print(f"Error processing IBM response: {e}")
273
+ raise
274
+
275
+
276
+ class Chat(BaseChat):
277
+ def __init__(self, client: "IBM"):
278
+ self.completions = Completions(client)
279
+
280
+
281
+ class IBM(OpenAICompatibleProvider):
282
+ """
283
+ OpenAI-compatible client for IBM Granite Playground API.
284
+ Provides a familiar interface for interacting with IBM's Granite models.
285
+ """
286
+
287
+ required_auth = False # No API key required for IBM Granite Playground
288
+ AVAILABLE_MODELS = [
289
+ "granite-chat",
290
+ "granite-thinking",
291
+ "granite-search",
292
+ "granite-research",
293
+ ]
294
+
295
+ def get_token(self) -> str:
296
+ """Fetches a fresh dynamic Bearer token from the IBM UI auth endpoint."""
297
+ auth_url = "https://www.ibm.com/granite/playground/api/v1/ui/auth"
298
+ try:
299
+ # Use the existing session to benefit from cookies/headers
300
+ response = self.session.get(auth_url, timeout=self.timeout, impersonate="chrome110")
301
+ if response.ok:
302
+ data = response.json()
303
+ token = data.get("token")
304
+ if token:
305
+ self.headers["Authorization"] = f"Bearer {token}"
306
+ self.session.headers.update(self.headers)
307
+ return token
308
+ raise IOError(f"Failed to fetch auth token: {response.status_code}")
309
+ except Exception as e:
310
+ raise IOError(f"Error fetching auth token: {str(e)}")
311
+
312
+ def __init__(
313
+ self, api_key: Optional[str] = None, timeout: Optional[int] = 30, browser: str = "chrome"
314
+ ):
315
+ """
316
+ Initialize IBM client.
317
+
318
+ Args:
319
+ api_key: Not required for IBM Granite Playground (uses dynamic bearer token)
320
+ timeout: Request timeout in seconds
321
+ browser: Browser type for fingerprinting
322
+ """
323
+ self.timeout = timeout
324
+ self.base_url = "https://d1eh1ubv87xmm5.cloudfront.net/granite/playground/api/v1/acp/runs"
325
+
326
+ # Initialize curl_cffi Session
327
+ self.session = Session()
328
+
329
+ # Initialize LitAgent for browser fingerprinting
330
+ try:
331
+ agent = LitAgent()
332
+ fingerprint = agent.generate_fingerprint(browser)
333
+
334
+ self.headers = {
335
+ "Accept": "text/event-stream",
336
+ "Accept-Language": fingerprint.get("accept_language", "en-US,en;q=0.9"),
337
+ "Authorization": "",
338
+ "Content-Type": "application/json",
339
+ "Cache-Control": "no-cache",
340
+ "Origin": "https://www.ibm.com",
341
+ "Pragma": "no-cache",
342
+ "Referer": "https://www.ibm.com/granite/playground",
343
+ "Sec-Fetch-Dest": "empty",
344
+ "Sec-Fetch-Mode": "cors",
345
+ "Sec-Fetch-Site": "cross-site",
346
+ "User-Agent": fingerprint.get("user_agent", ""),
347
+ "Sec-CH-UA": fingerprint.get("sec_ch_ua", ""),
348
+ "Sec-CH-UA-Mobile": "?0",
349
+ "Sec-CH-UA-Platform": f'"{fingerprint.get("platform", "")}"',
350
+ }
351
+ except (NameError, Exception):
352
+ # Fallback to basic headers if LitAgent is not available
353
+ self.headers = {
354
+ "Accept": "text/event-stream",
355
+ "Accept-Language": "en-US,en;q=0.9",
356
+ "Authorization": "",
357
+ "Content-Type": "application/json",
358
+ "Cache-Control": "no-cache",
359
+ "Origin": "https://www.ibm.com",
360
+ "Pragma": "no-cache",
361
+ "Referer": "https://www.ibm.com/granite/playground",
362
+ "Sec-Fetch-Dest": "empty",
363
+ "Sec-Fetch-Mode": "cors",
364
+ "Sec-Fetch-Site": "cross-site",
365
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
366
+ "Sec-CH-UA": '"Not)A;Brand";v="99", "Google Chrome";v="120", "Chromium";v="120"',
367
+ "Sec-CH-UA-Mobile": "?0",
368
+ "Sec-CH-UA-Platform": '"Windows"',
369
+ }
370
+
371
+ # Update session headers
372
+ self.session.headers.update(self.headers)
373
+
374
+ # Fetch initial token
375
+ self.get_token()
376
+
377
+ # Initialize chat interface
378
+ self.chat = Chat(self)
379
+
380
+ @classmethod
381
+ def get_models(cls, api_key: Optional[str] = None):
382
+ """Get available models.
383
+
384
+ Args:
385
+ api_key: Not used for IBM (kept for compatibility)
386
+
387
+ Returns:
388
+ list: List of available model IDs
389
+ """
390
+ return cls.AVAILABLE_MODELS
391
+
392
+ @property
393
+ def models(self) -> SimpleModelList:
394
+ return SimpleModelList(type(self).AVAILABLE_MODELS)
395
+
396
+
397
+ # Example usage
398
+ if __name__ == "__main__":
399
+ # Test the IBM client
400
+ client = IBM()
401
+
402
+ # Test streaming
403
+ print("Testing streaming:")
404
+ response = client.chat.completions.create(
405
+ model="granite-chat",
406
+ messages=[{"role": "user", "content": "Say 'Hello World' in one sentence"}],
407
+ stream=True,
408
+ )
409
+
410
+ for chunk in cast(Generator[ChatCompletionChunk, None, None], response):
411
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
412
+ print(chunk.choices[0].delta.content, end="", flush=True)
413
+ print("\n")
414
+
415
+ # Test non-streaming
416
+ print("Testing non-streaming:")
417
+ response = client.chat.completions.create(
418
+ model="granite-chat",
419
+ messages=[{"role": "user", "content": "Say 'Hello' in one word"}],
420
+ stream=False,
421
+ )
422
+
423
+ if isinstance(response, ChatCompletion):
424
+ if response.choices[0].message and response.choices[0].message.content:
425
+ print(response.choices[0].message.content)
@@ -0,0 +1,253 @@
1
+ import json
2
+ import time
3
+ import uuid
4
+ from typing import Any, Dict, Generator, List, Optional, Union, cast
5
+
6
+ from curl_cffi.requests import Session
7
+
8
+ # Import LitAgent for user agent generation
9
+ # Import base classes and utility structures
10
+ from webscout.Provider.OPENAI.base import (
11
+ BaseChat,
12
+ BaseCompletions,
13
+ OpenAICompatibleProvider,
14
+ SimpleModelList,
15
+ )
16
+ from webscout.Provider.OPENAI.utils import (
17
+ ChatCompletion,
18
+ ChatCompletionChunk,
19
+ ChatCompletionMessage,
20
+ Choice,
21
+ ChoiceDelta,
22
+ CompletionUsage,
23
+ count_tokens,
24
+ )
25
+
26
+
27
+ class Completions(BaseCompletions):
28
+ def __init__(self, client: "LLMChat"):
29
+ self._client = client
30
+
31
+ def create(
32
+ self,
33
+ *,
34
+ model: str,
35
+ messages: List[Dict[str, str]],
36
+ max_tokens: Optional[int] = 2048,
37
+ stream: bool = False,
38
+ temperature: Optional[float] = None,
39
+ top_p: Optional[float] = None,
40
+ timeout: Optional[int] = None,
41
+ proxies: Optional[dict] = None,
42
+ **kwargs: Any,
43
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
44
+ # In this case, we pass messages directly to the API
45
+ request_id = f"chatcmpl-{uuid.uuid4()}"
46
+ created_time = int(time.time())
47
+
48
+ if stream:
49
+ return self._create_streaming(
50
+ request_id, created_time, model, messages, max_tokens, timeout, proxies
51
+ )
52
+ else:
53
+ return self._create_non_streaming(
54
+ request_id, created_time, model, messages, max_tokens, timeout, proxies
55
+ )
56
+
57
+ def _create_streaming(
58
+ self,
59
+ request_id: str,
60
+ created_time: int,
61
+ model: str,
62
+ messages: List[Dict[str, str]],
63
+ max_tokens: Optional[int],
64
+ timeout: Optional[int],
65
+ proxies: Optional[dict],
66
+ ) -> Generator[ChatCompletionChunk, None, None]:
67
+ try:
68
+ prompt_tokens = count_tokens(json.dumps(messages))
69
+ completion_tokens = 0
70
+
71
+ url = f"{self._client.api_endpoint}?model={model}"
72
+ payload = {"messages": messages, "max_tokens": max_tokens or 2048, "stream": True}
73
+
74
+ response = self._client.session.post(
75
+ url,
76
+ json=payload,
77
+ stream=True,
78
+ timeout=timeout or self._client.timeout,
79
+ impersonate="chrome110",
80
+ )
81
+ response.raise_for_status()
82
+
83
+ full_content = ""
84
+ for line in response.iter_lines():
85
+ if line:
86
+ line = line.decode("utf-8")
87
+ if line.startswith("data: "):
88
+ data_str = line[6:]
89
+ if data_str.strip() == "[DONE]":
90
+ break
91
+
92
+ try:
93
+ data = json.loads(data_str)
94
+ content = data.get("response", "")
95
+ if content:
96
+ full_content += content
97
+ completion_tokens += 1
98
+
99
+ delta = ChoiceDelta(content=content, role="assistant")
100
+ choice = Choice(index=0, delta=delta, finish_reason=None)
101
+ chunk = ChatCompletionChunk(
102
+ id=request_id,
103
+ choices=[choice],
104
+ created=created_time,
105
+ model=model,
106
+ )
107
+ yield chunk
108
+ except json.JSONDecodeError:
109
+ continue
110
+
111
+ # Final chunk
112
+ delta = ChoiceDelta(content=None)
113
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
114
+ final_chunk = ChatCompletionChunk(
115
+ id=request_id, choices=[choice], created=created_time, model=model
116
+ )
117
+ usage_obj = CompletionUsage(
118
+ prompt_tokens=prompt_tokens,
119
+ completion_tokens=count_tokens(full_content),
120
+ total_tokens=prompt_tokens + count_tokens(full_content),
121
+ )
122
+ final_chunk.usage = usage_obj.model_dump(exclude_none=True)
123
+ yield final_chunk
124
+
125
+ except Exception as e:
126
+ raise IOError(f"LLMChat streaming request failed: {e}") from e
127
+
128
+ def _create_non_streaming(
129
+ self,
130
+ request_id: str,
131
+ created_time: int,
132
+ model: str,
133
+ messages: List[Dict[str, str]],
134
+ max_tokens: Optional[int],
135
+ timeout: Optional[int],
136
+ proxies: Optional[dict],
137
+ ) -> ChatCompletion:
138
+ try:
139
+ full_content = ""
140
+ prompt_tokens = count_tokens(json.dumps(messages))
141
+
142
+ for chunk in self._create_streaming(
143
+ request_id, created_time, model, messages, max_tokens, timeout, proxies
144
+ ):
145
+ if chunk.choices[0].delta and chunk.choices[0].delta.content:
146
+ full_content += chunk.choices[0].delta.content
147
+
148
+ message = ChatCompletionMessage(role="assistant", content=full_content)
149
+ choice = Choice(index=0, message=message, finish_reason="stop")
150
+ usage = CompletionUsage(
151
+ prompt_tokens=prompt_tokens,
152
+ completion_tokens=count_tokens(full_content),
153
+ total_tokens=prompt_tokens + count_tokens(full_content),
154
+ )
155
+
156
+ return ChatCompletion(
157
+ id=request_id, choices=[choice], created=created_time, model=model, usage=usage
158
+ )
159
+ except Exception as e:
160
+ raise IOError(f"LLMChat request failed: {e}") from e
161
+
162
+
163
+ class Chat(BaseChat):
164
+ def __init__(self, client: "LLMChat"):
165
+ self.completions = Completions(client)
166
+
167
+
168
+ class LLMChat(OpenAICompatibleProvider):
169
+ required_auth = False
170
+ AVAILABLE_MODELS = [
171
+ "@cf/aisingapore/gemma-sea-lion-v4-27b-it",
172
+ "@cf/deepseek-ai/deepseek-math-7b-instruct",
173
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
174
+ "@cf/defog/sqlcoder-7b-2",
175
+ "@cf/fblgit/una-cybertron-7b-v2-bf16",
176
+ "@cf/google/gemma-2b-it-lora",
177
+ "@cf/google/gemma-3-12b-it",
178
+ "@cf/ibm-granite/granite-4.0-h-micro",
179
+ "@cf/meta-llama/llama-2-7b-chat-hf-lora",
180
+ "@cf/meta/llama-2-7b-chat-fp16",
181
+ "@cf/meta/llama-2-7b-chat-int8",
182
+ "@cf/meta/llama-3-8b-instruct",
183
+ "@cf/meta/llama-3-8b-instruct-awq",
184
+ "@cf/meta/llama-3.1-70b-instruct",
185
+ "@cf/meta/llama-3.1-8b-instruct",
186
+ "@cf/meta/llama-3.2-1b-instruct",
187
+ "@cf/meta/llama-3.2-3b-instruct",
188
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
189
+ "@cf/meta/llama-4-scout-17b-16e-instruct",
190
+ "@cf/meta/llama/llama-2-7b-chat-hf-lora",
191
+ "@cf/meta/meta-llama-3-8b-instruct",
192
+ "@cf/microsoft/phi-2",
193
+ "@cf/mistral/mistral-7b-instruct-v0.1-vllm",
194
+ "@cf/mistral/mistral-7b-instruct-v0.2-lora",
195
+ "@cf/mistralai/mistral-small-3.1-24b-instruct",
196
+ "@cf/openchat/openchat-3.5-0106",
197
+ "@cf/qwen/qwen1.5-0.5b-chat",
198
+ "@cf/qwen/qwen1.5-1.8b-chat",
199
+ "@cf/qwen/qwen1.5-14b-chat-awq",
200
+ "@cf/qwen/qwen1.5-7b-chat-awq",
201
+ "@cf/qwen/qwen2.5-coder-32b-instruct",
202
+ "@cf/qwen/qwen3-30b-a3b-fp8",
203
+ "@cf/qwen/qwq-32b",
204
+ "@cf/tiiuae/falcon-7b-instruct",
205
+ "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
206
+ "@hf/google/gemma-7b-it",
207
+ "@hf/meta-llama/meta-llama-3-8b-instruct",
208
+ "@hf/mistral/mistral-7b-instruct-v0.2",
209
+ "@hf/nexusflow/starling-lm-7b-beta",
210
+ "@hf/thebloke/deepseek-coder-6.7b-base-awq",
211
+ "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
212
+ "@hf/thebloke/llama-2-13b-chat-awq",
213
+ "@hf/thebloke/llamaguard-7b-awq",
214
+ "@hf/thebloke/mistral-7b-instruct-v0.1-awq",
215
+ "@hf/thebloke/neural-chat-7b-v3-1-awq",
216
+ "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
217
+ "@hf/thebloke/zephyr-7b-beta-awq",
218
+ ]
219
+
220
+ def __init__(self, proxies: dict = {}, timeout: int = 30):
221
+ self.session = Session()
222
+ self.timeout = timeout
223
+ self.api_endpoint = "https://llmchat.in/inference/stream"
224
+ self.proxies = proxies
225
+ if proxies:
226
+ self.session.proxies.update(cast(Any, proxies))
227
+
228
+ self.headers = {
229
+ "Content-Type": "application/json",
230
+ "Accept": "*/*",
231
+ "Origin": "https://llmchat.in",
232
+ "Referer": "https://llmchat.in/",
233
+ }
234
+ self.session.headers.update(self.headers)
235
+ self.chat = Chat(self)
236
+
237
+ @property
238
+ def models(self) -> SimpleModelList:
239
+ return SimpleModelList(type(self).AVAILABLE_MODELS)
240
+
241
+
242
+ if __name__ == "__main__":
243
+ client = LLMChat()
244
+ response = client.chat.completions.create(
245
+ model="@cf/meta/llama-3.1-70b-instruct",
246
+ messages=[{"role": "user", "content": "Say 'Hello' in one word"}],
247
+ stream=True,
248
+ )
249
+ for chunk in response:
250
+ if hasattr(chunk, "choices") and chunk.choices:
251
+ delta = chunk.choices[0].delta
252
+ if delta and delta.content:
253
+ print(delta.content, end="", flush=True)